repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
canyon289/pymc | [
"4e695f635b2ead24e2e647651eadd2505ab1fa63"
] | [
"pymc3/tests/test_distributions.py"
] | [
"from __future__ import division\n\nimport itertools\nimport sys\n\nfrom .helpers import SeededTest, select_by_precision\nfrom ..vartypes import continuous_types\nfrom ..model import Model, Point, Potential, Deterministic\nfrom ..blocking import DictToVarBijection, DictToArrayBijection, ArrayOrdering\nfrom ..distributions import (\n DensityDist, Categorical, Multinomial, VonMises, Dirichlet,\n MvStudentT, MvNormal, MatrixNormal, ZeroInflatedPoisson,\n ZeroInflatedNegativeBinomial, Constant, Poisson, Bernoulli, Beta,\n BetaBinomial, HalfStudentT, StudentT, Weibull, Pareto,\n InverseGamma, Gamma, Cauchy, HalfCauchy, Lognormal, Laplace,\n NegativeBinomial, Geometric, Exponential, ExGaussian, Normal, TruncatedNormal,\n Flat, LKJCorr, Wald, ChiSquared, HalfNormal, DiscreteUniform,\n Bound, Uniform, Triangular, Binomial, SkewNormal, DiscreteWeibull,\n Gumbel, Logistic, OrderedLogistic, LogitNormal, Interpolated,\n ZeroInflatedBinomial, HalfFlat, AR1, KroneckerNormal, Rice,\n Kumaraswamy\n)\n\nfrom ..distributions import continuous\nfrom pymc3.theanof import floatX\nfrom numpy import array, inf, log, exp\nfrom numpy.testing import assert_almost_equal, assert_allclose, assert_equal\nimport numpy.random as nr\nimport numpy as np\nimport pytest\n\nfrom scipy import integrate\nimport scipy.stats.distributions as sp\nimport scipy.stats\nfrom scipy.special import logit\nimport theano\nimport theano.tensor as tt\nfrom ..math import kronecker\n\ndef get_lkj_cases():\n \"\"\"\n Log probabilities calculated using the formulas in:\n http://www.sciencedirect.com/science/article/pii/S0047259X09000876\n \"\"\"\n tri = np.array([0.7, 0.0, -0.7])\n return [\n (tri, 1, 3, 1.5963125911388549),\n (tri, 3, 3, -7.7963493376312742),\n (tri, 0, 3, -np.inf),\n (np.array([1.1, 0.0, -0.7]), 1, 3, -np.inf),\n (np.array([0.7, 0.0, -1.1]), 1, 3, -np.inf)\n ]\n\n\nLKJ_CASES = get_lkj_cases()\n\n\nclass Domain(object):\n def __init__(self, vals, dtype=None, edges=None, shape=None):\n avals = array(vals, dtype=dtype)\n if dtype is None and not str(avals.dtype).startswith('int'):\n avals = avals.astype(theano.config.floatX)\n vals = [array(v, dtype=avals.dtype) for v in vals]\n\n if edges is None:\n edges = array(vals[0]), array(vals[-1])\n vals = vals[1:-1]\n if shape is None:\n shape = avals[0].shape\n\n self.vals = vals\n self.shape = shape\n\n self.lower, self.upper = edges\n self.dtype = avals.dtype\n\n def __add__(self, other):\n return Domain(\n [v + other for v in self.vals],\n self.dtype,\n (self.lower + other, self.upper + other),\n self.shape)\n\n def __mul__(self, other):\n try:\n return Domain(\n [v * other for v in self.vals],\n self.dtype,\n (self.lower * other, self.upper * other),\n self.shape)\n except TypeError:\n return Domain(\n [v * other for v in self.vals],\n self.dtype,\n (self.lower, self.upper),\n self.shape)\n\n def __neg__(self):\n return Domain(\n [-v for v in self.vals],\n self.dtype,\n (-self.lower, -self.upper),\n self.shape)\n\n\ndef product(domains, n_samples=-1):\n \"\"\"Get an iterator over a product of domains.\n\n Args:\n domains: a dictionary of (name, object) pairs, where the objects\n must be \"domain-like\", as in, have a `.vals` property\n n_samples: int, maximum samples to return. -1 to return whole product\n\n Returns:\n list of the cartesian product of the domains\n \"\"\"\n try:\n names, domains = zip(*domains.items())\n except ValueError: # domains.items() is empty\n return []\n all_vals = [zip(names, val) for val in itertools.product(*[d.vals for d in domains])]\n if n_samples > 0 and len(all_vals) > n_samples:\n return (all_vals[j] for j in nr.choice(len(all_vals), n_samples, replace=False))\n return all_vals\n\n\nR = Domain([-inf, -2.1, -1, -.01, .0, .01, 1, 2.1, inf])\nRplus = Domain([0, .01, .1, .9, .99, 1, 1.5, 2, 100, inf])\nRplusbig = Domain([0, .5, .9, .99, 1, 1.5, 2, 20, inf])\nRminusbig = Domain([-inf, -2, -1.5, -1, -.99, -.9, -.5, -0.01, 0])\nUnit = Domain([0, .001, .1, .5, .75, .99, 1])\n\nCirc = Domain([-np.pi, -2.1, -1, -.01, .0, .01, 1, 2.1, np.pi])\n\nRunif = Domain([-1, -.4, 0, .4, 1])\nRdunif = Domain([-10, 0, 10.])\nRplusunif = Domain([0, .5, inf])\nRplusdunif = Domain([2, 10, 100], 'int64')\n\nI = Domain([-1000, -3, -2, -1, 0, 1, 2, 3, 1000], 'int64')\n\nNatSmall = Domain([0, 3, 4, 5, 1000], 'int64')\nNat = Domain([0, 1, 2, 3, 2000], 'int64')\nNatBig = Domain([0, 1, 2, 3, 5000, 50000], 'int64')\nPosNat = Domain([1, 2, 3, 2000], 'int64')\n\nBool = Domain([0, 0, 1, 1], 'int64')\n\n\ndef build_model(distfam, valuedomain, vardomains, extra_args=None):\n if extra_args is None:\n extra_args = {}\n with Model() as m:\n vals = {}\n for v, dom in vardomains.items():\n vals[v] = Flat(v, dtype=dom.dtype, shape=dom.shape,\n testval=dom.vals[0])\n vals.update(extra_args)\n distfam('value', shape=valuedomain.shape, transform=None, **vals)\n return m\n\n\ndef integrate_nd(f, domain, shape, dtype):\n if shape == () or shape == (1,):\n if dtype in continuous_types:\n return integrate.quad(f, domain.lower, domain.upper, epsabs=1e-8)[0]\n else:\n return sum(f(j) for j in range(domain.lower, domain.upper + 1))\n elif shape == (2,):\n def f2(a, b):\n return f([a, b])\n\n return integrate.dblquad(f2, domain.lower[0], domain.upper[0],\n lambda _: domain.lower[1],\n lambda _: domain.upper[1])[0]\n elif shape == (3,):\n def f3(a, b, c):\n return f([a, b, c])\n\n return integrate.tplquad(f3, domain.lower[0], domain.upper[0],\n lambda _: domain.lower[1],\n lambda _: domain.upper[1],\n lambda _, __: domain.lower[2],\n lambda _, __: domain.upper[2])[0]\n else:\n raise ValueError(\"Dont know how to integrate shape: \" + str(shape))\n\n\ndef multinomial_logpdf(value, n, p):\n if value.sum() == n and (0 <= value).all() and (value <= n).all():\n logpdf = scipy.special.gammaln(n + 1)\n logpdf -= scipy.special.gammaln(value + 1).sum()\n logpdf += logpow(p, value).sum()\n return logpdf\n else:\n return -inf\n\n\ndef beta_mu_sd(value, mu, sd):\n kappa = mu * (1 - mu) / sd**2 - 1\n if kappa > 0:\n return sp.beta.logpdf(value, mu * kappa, (1 - mu) * kappa)\n else:\n return -inf\n\n\nclass ProductDomain(object):\n def __init__(self, domains):\n self.vals = list(itertools.product(*[d.vals for d in domains]))\n self.shape = (len(domains),) + domains[0].shape\n self.lower = [d.lower for d in domains]\n self.upper = [d.upper for d in domains]\n self.dtype = domains[0].dtype\n\n\ndef Vector(D, n):\n return ProductDomain([D] * n)\n\n\ndef SortedVector(n):\n vals = []\n np.random.seed(42)\n for _ in range(10):\n vals.append(np.sort(np.random.randn(n)))\n return Domain(vals, edges=(None, None))\n\n\ndef UnitSortedVector(n):\n vals = []\n np.random.seed(42)\n for _ in range(10):\n vals.append(np.sort(np.random.rand(n)))\n return Domain(vals, edges=(None, None))\n\n\ndef RealMatrix(n, m):\n vals = []\n np.random.seed(42)\n for _ in range(10):\n vals.append(np.random.randn(n, m))\n return Domain(vals, edges=(None, None))\n\n\ndef simplex_values(n):\n if n == 1:\n yield array([1.0])\n else:\n for v in Unit.vals:\n for vals in simplex_values(n - 1):\n yield np.concatenate([[v], (1 - v) * vals])\n\n\ndef normal_logpdf_tau(value, mu, tau):\n return normal_logpdf_cov(value, mu, np.linalg.inv(tau)).sum()\n\n\ndef normal_logpdf_cov(value, mu, cov):\n return scipy.stats.multivariate_normal.logpdf(value, mu, cov).sum()\n\n\ndef normal_logpdf_chol(value, mu, chol):\n return normal_logpdf_cov(value, mu, np.dot(chol, chol.T)).sum()\n\n\ndef normal_logpdf_chol_upper(value, mu, chol):\n return normal_logpdf_cov(value, mu, np.dot(chol.T, chol)).sum()\n\n\ndef matrix_normal_logpdf_cov(value, mu, rowcov, colcov):\n return scipy.stats.matrix_normal.logpdf(value, mu, rowcov, colcov)\n\n\ndef matrix_normal_logpdf_chol(value, mu, rowchol, colchol):\n return matrix_normal_logpdf_cov(value, mu, np.dot(rowchol, rowchol.T),\n np.dot(colchol, colchol.T))\n\n\ndef kron_normal_logpdf_cov(value, mu, covs, sigma):\n cov = kronecker(*covs).eval()\n if sigma is not None:\n cov += sigma**2 * np.eye(*cov.shape)\n return scipy.stats.multivariate_normal.logpdf(value, mu, cov).sum()\n\n\ndef kron_normal_logpdf_chol(value, mu, chols, sigma):\n covs = [np.dot(chol, chol.T) for chol in chols]\n return kron_normal_logpdf_cov(value, mu, covs, sigma=sigma)\n\n\ndef kron_normal_logpdf_evd(value, mu, evds, sigma):\n covs = []\n for eigs, Q in evds:\n try:\n eigs = eigs.eval()\n except AttributeError:\n pass\n try:\n Q = Q.eval()\n except AttributeError:\n pass\n covs.append(np.dot(Q, np.dot(np.diag(eigs), Q.T)))\n return kron_normal_logpdf_cov(value, mu, covs, sigma)\n\n\ndef betafn(a):\n return floatX(scipy.special.gammaln(a).sum(-1) - scipy.special.gammaln(a.sum(-1)))\n\n\ndef logpow(v, p):\n return np.choose(v == 0, [p * np.log(v), 0])\n\n\ndef discrete_weibull_logpmf(value, q, beta):\n return floatX(np.log(np.power(q, np.power(value, beta))\n - np.power(q, np.power(value + 1, beta))))\n\n\ndef dirichlet_logpdf(value, a):\n return floatX((-betafn(a) + logpow(value, a - 1).sum(-1)).sum())\n\n\ndef categorical_logpdf(value, p):\n if value >= 0 and value <= len(p):\n return floatX(np.log(p[value]))\n else:\n return -inf\n\ndef mvt_logpdf(value, nu, Sigma, mu=0):\n d = len(Sigma)\n dist = np.atleast_2d(value) - mu\n chol = np.linalg.cholesky(Sigma)\n trafo = np.linalg.solve(chol, dist.T).T\n logdet = np.log(np.diag(chol)).sum()\n\n lgamma = scipy.special.gammaln\n norm = lgamma((nu + d) / 2.) - 0.5 * d * np.log(nu * np.pi) - lgamma(nu / 2.)\n logp = norm - logdet - (nu + d) / 2. * np.log1p((trafo * trafo).sum(-1) / nu)\n return logp.sum()\n\ndef AR1_logpdf(value, k, tau_e):\n return (sp.norm(loc=0,scale=1/np.sqrt(tau_e)).logpdf(value[0]) +\n sp.norm(loc=k*value[:-1],scale=1/np.sqrt(tau_e)).logpdf(value[1:]).sum())\n\ndef invlogit(x, eps=sys.float_info.epsilon):\n return (1. - 2. * eps) / (1. + np.exp(-x)) + eps\n\ndef orderedlogistic_logpdf(value, eta, cutpoints):\n c = np.concatenate(([-np.inf], cutpoints, [np.inf]))\n p = invlogit(eta - c[value]) - invlogit(eta - c[value + 1])\n return np.log(p)\n\nclass Simplex(object):\n def __init__(self, n):\n self.vals = list(simplex_values(n))\n self.shape = (n,)\n self.dtype = Unit.dtype\n\n\nclass MultiSimplex(object):\n def __init__(self, n_dependent, n_independent):\n self.vals = []\n for simplex_value in itertools.product(simplex_values(n_dependent), repeat=n_independent):\n self.vals.append(np.vstack(simplex_value))\n self.shape = (n_independent, n_dependent)\n self.dtype = Unit.dtype\n\n\ndef PdMatrix(n):\n if n == 1:\n return PdMatrix1\n elif n == 2:\n return PdMatrix2\n elif n == 3:\n return PdMatrix3\n else:\n raise ValueError(\"n out of bounds\")\n\nPdMatrix1 = Domain([np.eye(1), [[.5]]], edges=(None, None))\n\nPdMatrix2 = Domain([np.eye(2), [[.5, .05], [.05, 4.5]]], edges=(None, None))\n\nPdMatrix3 = Domain(\n [np.eye(3), [[.5, .1, 0], [.1, 1, 0], [0, 0, 2.5]]], edges=(None, None))\n\n\nPdMatrixChol1 = Domain([np.eye(1), [[0.001]]], edges=(None, None))\nPdMatrixChol2 = Domain([np.eye(2), [[0.1, 0], [10, 1]]], edges=(None, None))\nPdMatrixChol3 = Domain([np.eye(3), [[0.1, 0, 0], [10, 100, 0], [0, 1, 10]]],\n edges=(None, None))\n\n\ndef PdMatrixChol(n):\n if n == 1:\n return PdMatrixChol1\n elif n == 2:\n return PdMatrixChol2\n elif n == 3:\n return PdMatrixChol3\n else:\n raise ValueError(\"n out of bounds\")\n\n\nPdMatrixCholUpper1 = Domain([np.eye(1), [[0.001]]], edges=(None, None))\nPdMatrixCholUpper2 = Domain([np.eye(2), [[0.1, 10], [0, 1]]], edges=(None, None))\nPdMatrixCholUpper3 = Domain([np.eye(3), [[0.1, 10, 0], [0, 100, 1], [0, 0, 10]]],\n edges=(None, None))\n\n\ndef PdMatrixCholUpper(n):\n if n == 1:\n return PdMatrixCholUpper1\n elif n == 2:\n return PdMatrixCholUpper2\n elif n == 3:\n return PdMatrixCholUpper3\n else:\n raise ValueError(\"n out of bounds\")\n\n\ndef RandomPdMatrix(n):\n A = np.random.rand(n, n)\n return np.dot(A, A.T) + n * np.identity(n)\n\n\nclass TestMatchesScipy(SeededTest):\n def pymc3_matches_scipy(self, pymc3_dist, domain, paramdomains, scipy_dist,\n decimal=None, extra_args=None, scipy_args=None):\n if extra_args is None:\n extra_args = {}\n if scipy_args is None:\n scipy_args = {}\n model = build_model(pymc3_dist, domain, paramdomains, extra_args)\n value = model.named_vars['value']\n\n def logp(args):\n args.update(scipy_args)\n return scipy_dist(**args)\n self.check_logp(model, value, domain, paramdomains, logp, decimal=decimal)\n\n def check_logp(self, model, value, domain, paramdomains, logp_reference, decimal=None):\n domains = paramdomains.copy()\n domains['value'] = domain\n logp = model.fastlogp\n for pt in product(domains, n_samples=100):\n pt = Point(pt, model=model)\n if decimal is None:\n decimal = select_by_precision(float64=6, float32=3)\n assert_almost_equal(logp(pt), logp_reference(pt), decimal=decimal, err_msg=str(pt))\n\n def check_int_to_1(self, model, value, domain, paramdomains):\n pdf = model.fastfn(exp(model.logpt))\n for pt in product(paramdomains, n_samples=10):\n pt = Point(pt, value=value.tag.test_value, model=model)\n bij = DictToVarBijection(value, (), pt)\n pdfx = bij.mapf(pdf)\n area = integrate_nd(pdfx, domain, value.dshape, value.dtype)\n assert_almost_equal(area, 1, err_msg=str(pt))\n\n def check_dlogp(self, model, value, domain, paramdomains):\n try:\n from numdifftools import Gradient\n except ImportError:\n return\n if not model.cont_vars:\n return\n\n domains = paramdomains.copy()\n domains['value'] = domain\n bij = DictToArrayBijection(\n ArrayOrdering(model.cont_vars), model.test_point)\n dlogp = bij.mapf(model.fastdlogp(model.cont_vars))\n logp = bij.mapf(model.fastlogp)\n\n def wrapped_logp(x):\n try:\n return logp(x)\n except:\n return np.nan\n\n ndlogp = Gradient(wrapped_logp)\n for pt in product(domains, n_samples=100):\n pt = Point(pt, model=model)\n pt = bij.map(pt)\n decimals = select_by_precision(float64=6, float32=4)\n assert_almost_equal(dlogp(pt), ndlogp(pt), decimal=decimals, err_msg=str(pt))\n\n def checkd(self, distfam, valuedomain, vardomains, checks=None, extra_args=None):\n if checks is None:\n checks = (self.check_int_to_1, self.check_dlogp)\n\n if extra_args is None:\n extra_args = {}\n m = build_model(distfam, valuedomain, vardomains, extra_args=extra_args)\n for check in checks:\n check(m, m.named_vars['value'], valuedomain, vardomains)\n\n def test_uniform(self):\n self.pymc3_matches_scipy(\n Uniform, Runif, {'lower': -Rplusunif, 'upper': Rplusunif},\n lambda value, lower, upper: sp.uniform.logpdf(value, lower, upper - lower))\n\n def test_triangular(self):\n self.pymc3_matches_scipy(\n Triangular, Runif, {'lower': -Rplusunif, 'c': Runif, 'upper': Rplusunif},\n lambda value, c, lower, upper: sp.triang.logpdf(value, c-lower, lower, upper-lower))\n\n def test_bound_normal(self):\n PositiveNormal = Bound(Normal, lower=0.)\n self.pymc3_matches_scipy(PositiveNormal, Rplus, {'mu': Rplus, 'sd': Rplus},\n lambda value, mu, sd: sp.norm.logpdf(value, mu, sd),\n decimal=select_by_precision(float64=6, float32=-1))\n with Model(): x = PositiveNormal('x', mu=0, sd=1, transform=None)\n assert np.isinf(x.logp({'x':-1}))\n\n def test_discrete_unif(self):\n self.pymc3_matches_scipy(\n DiscreteUniform, Rdunif, {'lower': -Rplusdunif, 'upper': Rplusdunif},\n lambda value, lower, upper: sp.randint.logpmf(value, lower, upper + 1))\n\n def test_flat(self):\n self.pymc3_matches_scipy(Flat, Runif, {}, lambda value: 0)\n with Model():\n x = Flat('a')\n assert_allclose(x.tag.test_value, 0)\n\n def test_half_flat(self):\n self.pymc3_matches_scipy(HalfFlat, Rplus, {}, lambda value: 0)\n with Model():\n x = HalfFlat('a', shape=2)\n assert_allclose(x.tag.test_value, 1)\n assert x.tag.test_value.shape == (2,)\n\n def test_normal(self):\n self.pymc3_matches_scipy(Normal, R, {'mu': R, 'sd': Rplus},\n lambda value, mu, sd: sp.norm.logpdf(value, mu, sd),\n decimal=select_by_precision(float64=6, float32=1)\n )\n\n def test_truncated_normal(self):\n def scipy_logp(value, mu, sd, lower, upper):\n return sp.truncnorm.logpdf(\n value, (lower-mu)/sd, (upper-mu)/sd, loc=mu, scale=sd)\n\n args = {'mu': array(-2.1), 'lower': array(-100.), 'upper': array(0.01),\n 'sd': array(0.01)}\n val = TruncatedNormal.dist(**args).logp(0.)\n assert_allclose(val.eval(), scipy_logp(value=0, **args))\n\n self.pymc3_matches_scipy(\n TruncatedNormal, R,\n {'mu': R, 'sd': Rplusbig, 'lower': -Rplusbig, 'upper': Rplusbig},\n scipy_logp,\n decimal=select_by_precision(float64=6, float32=1)\n )\n\n def test_half_normal(self):\n self.pymc3_matches_scipy(HalfNormal, Rplus, {'sd': Rplus},\n lambda value, sd: sp.halfnorm.logpdf(value, scale=sd),\n decimal=select_by_precision(float64=6, float32=-1)\n )\n\n def test_chi_squared(self):\n self.pymc3_matches_scipy(ChiSquared, Rplus, {'nu': Rplusdunif},\n lambda value, nu: sp.chi2.logpdf(value, df=nu))\n\n def test_wald_scipy(self):\n self.pymc3_matches_scipy(Wald, Rplus, {'mu': Rplus},\n lambda value, mu: sp.invgauss.logpdf(value, mu),\n decimal=select_by_precision(float64=6, float32=1)\n )\n\n @pytest.mark.parametrize('value,mu,lam,phi,alpha,logp', [\n (.5, .001, .5, None, 0., -124500.7257914),\n (1., .5, .001, None, 0., -4.3733162),\n (2., 1., None, None, 0., -2.2086593),\n (5., 2., 2.5, None, 0., -3.4374500),\n (7.5, 5., None, 1., 0., -3.2199074),\n (15., 10., None, .75, 0., -4.0360623),\n (50., 15., None, .66666, 0., -6.1801249),\n (.5, .001, 0.5, None, 0., -124500.7257914),\n (1., .5, .001, None, .5, -3.3330954),\n (2., 1., None, None, 1., -0.9189385),\n (5., 2., 2.5, None, 2., -2.2128783),\n (7.5, 5., None, 1., 2.5, -2.5283764),\n (15., 10., None, .75, 5., -3.3653647),\n (50., 15., None, .666666, 10., -5.6481874)\n ])\n def test_wald(self, value, mu, lam, phi, alpha, logp):\n # Log probabilities calculated using the dIG function from the R package gamlss.\n # See e.g., doi: 10.1111/j.1467-9876.2005.00510.x, or\n # http://www.gamlss.org/.\n with Model() as model:\n Wald('wald', mu=mu, lam=lam, phi=phi, alpha=alpha, transform=None)\n pt = {'wald': value}\n decimals = select_by_precision(float64=6, float32=1)\n assert_almost_equal(model.fastlogp(pt), logp, decimal=decimals, err_msg=str(pt))\n\n def test_beta(self):\n self.pymc3_matches_scipy(Beta, Unit, {'alpha': Rplus, 'beta': Rplus},\n lambda value, alpha, beta: sp.beta.logpdf(value, alpha, beta))\n self.pymc3_matches_scipy(Beta, Unit, {'mu': Unit, 'sd': Rplus}, beta_mu_sd)\n\n def test_kumaraswamy(self):\n # Scipy does not have a built-in Kumaraswamy pdf\n def scipy_log_pdf(value, a, b):\n return np.log(a) + np.log(b) + (a - 1) * np.log(value) + (b - 1) * np.log(1 - value ** a)\n self.pymc3_matches_scipy(Kumaraswamy, Unit, {'a': Rplus, 'b': Rplus}, scipy_log_pdf)\n\n def test_exponential(self):\n self.pymc3_matches_scipy(Exponential, Rplus, {'lam': Rplus},\n lambda value, lam: sp.expon.logpdf(value, 0, 1 / lam))\n\n def test_geometric(self):\n self.pymc3_matches_scipy(Geometric, Nat, {'p': Unit},\n lambda value, p: np.log(sp.geom.pmf(value, p)))\n\n def test_negative_binomial(self):\n def test_fun(value, mu, alpha):\n return sp.nbinom.logpmf(value, alpha, 1 - mu / (mu + alpha))\n self.pymc3_matches_scipy(NegativeBinomial, Nat, {\n 'mu': Rplus, 'alpha': Rplus}, test_fun)\n\n def test_laplace(self):\n self.pymc3_matches_scipy(Laplace, R, {'mu': R, 'b': Rplus},\n lambda value, mu, b: sp.laplace.logpdf(value, mu, b))\n\n def test_lognormal(self):\n self.pymc3_matches_scipy(\n Lognormal, Rplus, {'mu': R, 'tau': Rplusbig},\n lambda value, mu, tau: floatX(sp.lognorm.logpdf(value, tau**-.5, 0, np.exp(mu))))\n\n def test_t(self):\n self.pymc3_matches_scipy(StudentT, R, {'nu': Rplus, 'mu': R, 'lam': Rplus},\n lambda value, nu, mu, lam: sp.t.logpdf(value, nu, mu, lam**-0.5))\n\n def test_cauchy(self):\n self.pymc3_matches_scipy(Cauchy, R, {'alpha': R, 'beta': Rplusbig},\n lambda value, alpha, beta: sp.cauchy.logpdf(value, alpha, beta))\n\n def test_half_cauchy(self):\n self.pymc3_matches_scipy(HalfCauchy, Rplus, {'beta': Rplusbig},\n lambda value, beta: sp.halfcauchy.logpdf(value, scale=beta))\n\n def test_gamma(self):\n self.pymc3_matches_scipy(\n Gamma, Rplus, {'alpha': Rplusbig, 'beta': Rplusbig},\n lambda value, alpha, beta: sp.gamma.logpdf(value, alpha, scale=1.0 / beta))\n\n def test_fun(value, mu, sd):\n return sp.gamma.logpdf(value, mu**2 / sd**2, scale=1.0 / (mu / sd**2))\n self.pymc3_matches_scipy(\n Gamma, Rplus, {'mu': Rplusbig, 'sd': Rplusbig}, test_fun)\n\n def test_inverse_gamma(self):\n self.pymc3_matches_scipy(\n InverseGamma, Rplus, {'alpha': Rplus, 'beta': Rplus},\n lambda value, alpha, beta: sp.invgamma.logpdf(value, alpha, scale=beta))\n\n def test_pareto(self):\n self.pymc3_matches_scipy(Pareto, Rplus, {'alpha': Rplusbig, 'm': Rplusbig},\n lambda value, alpha, m: sp.pareto.logpdf(value, alpha, scale=m))\n\n @pytest.mark.xfail(condition=(theano.config.floatX == \"float32\"), reason=\"Fails on float32 due to inf issues\")\n def test_weibull(self):\n self.pymc3_matches_scipy(Weibull, Rplus, {'alpha': Rplusbig, 'beta': Rplusbig},\n lambda value, alpha, beta: sp.exponweib.logpdf(value, 1, alpha, scale=beta),\n )\n\n def test_half_studentt(self):\n # this is only testing for nu=1 (halfcauchy)\n self.pymc3_matches_scipy(HalfStudentT, Rplus, {'sd': Rplus},\n lambda value, sd: sp.halfcauchy.logpdf(value, 0, sd))\n\n def test_skew_normal(self):\n self.pymc3_matches_scipy(SkewNormal, R, {'mu': R, 'sd': Rplusbig, 'alpha': R},\n lambda value, alpha, mu, sd: sp.skewnorm.logpdf(value, alpha, mu, sd))\n\n def test_binomial(self):\n self.pymc3_matches_scipy(Binomial, Nat, {'n': NatSmall, 'p': Unit},\n lambda value, n, p: sp.binom.logpmf(value, n, p))\n\n # Too lazy to propagate decimal parameter through the whole chain of deps\n @pytest.mark.xfail(condition=(theano.config.floatX == \"float32\"), reason=\"Fails on float32\")\n def test_beta_binomial(self):\n self.checkd(BetaBinomial, Nat, {'alpha': Rplus, 'beta': Rplus, 'n': NatSmall})\n\n def test_bernoulli(self):\n self.pymc3_matches_scipy(\n Bernoulli, Bool, {'logit_p': R},\n lambda value, logit_p: sp.bernoulli.logpmf(value, scipy.special.expit(logit_p)))\n self.pymc3_matches_scipy(Bernoulli, Bool, {'p': Unit},\n lambda value, p: sp.bernoulli.logpmf(value, p))\n\n\n def test_discrete_weibull(self):\n self.pymc3_matches_scipy(DiscreteWeibull, Nat,\n {'q': Unit, 'beta': Rplusdunif}, discrete_weibull_logpmf)\n\n def test_poisson(self):\n self.pymc3_matches_scipy(Poisson, Nat, {'mu': Rplus},\n lambda value, mu: sp.poisson.logpmf(value, mu))\n\n def test_bound_poisson(self):\n NonZeroPoisson = Bound(Poisson, lower=1.)\n self.pymc3_matches_scipy(NonZeroPoisson, PosNat, {'mu': Rplus},\n lambda value, mu: sp.poisson.logpmf(value, mu))\n\n with Model(): x = NonZeroPoisson('x', mu=4)\n assert np.isinf(x.logp({'x':0}))\n\n def test_constantdist(self):\n self.pymc3_matches_scipy(Constant, I, {'c': I},\n lambda value, c: np.log(c == value))\n\n # Too lazy to propagate decimal parameter through the whole chain of deps\n @pytest.mark.xfail(condition=(theano.config.floatX == \"float32\"), reason=\"Fails on float32\")\n def test_zeroinflatedpoisson(self):\n self.checkd(ZeroInflatedPoisson, Nat, {'theta': Rplus, 'psi': Unit})\n\n # Too lazy to propagate decimal parameter through the whole chain of deps\n @pytest.mark.xfail(condition=(theano.config.floatX == \"float32\"), reason=\"Fails on float32\")\n def test_zeroinflatednegativebinomial(self):\n self.checkd(ZeroInflatedNegativeBinomial, Nat,\n {'mu': Rplusbig, 'alpha': Rplusbig, 'psi': Unit})\n\n # Too lazy to propagate decimal parameter through the whole chain of deps\n @pytest.mark.xfail(condition=(theano.config.floatX == \"float32\"), reason=\"Fails on float32\")\n def test_zeroinflatedbinomial(self):\n self.checkd(ZeroInflatedBinomial, Nat,\n {'n': NatSmall, 'p': Unit, 'psi': Unit})\n\n @pytest.mark.parametrize('n', [1, 2, 3])\n def test_mvnormal(self, n):\n self.pymc3_matches_scipy(MvNormal, RealMatrix(5, n),\n {'mu': Vector(R, n), 'tau': PdMatrix(n)},\n normal_logpdf_tau)\n self.pymc3_matches_scipy(MvNormal, Vector(R, n),\n {'mu': Vector(R, n), 'tau': PdMatrix(n)},\n normal_logpdf_tau)\n self.pymc3_matches_scipy(MvNormal, RealMatrix(5, n),\n {'mu': Vector(R, n), 'cov': PdMatrix(n)},\n normal_logpdf_cov)\n self.pymc3_matches_scipy(MvNormal, Vector(R, n),\n {'mu': Vector(R, n), 'cov': PdMatrix(n)},\n normal_logpdf_cov)\n self.pymc3_matches_scipy(MvNormal, RealMatrix(5, n),\n {'mu': Vector(R, n), 'chol': PdMatrixChol(n)},\n normal_logpdf_chol,\n decimal=select_by_precision(float64=6, float32=-1))\n self.pymc3_matches_scipy(MvNormal, Vector(R, n),\n {'mu': Vector(R, n), 'chol': PdMatrixChol(n)},\n normal_logpdf_chol,\n decimal=select_by_precision(float64=6, float32=0))\n\n def MvNormalUpper(*args, **kwargs):\n return MvNormal(lower=False, *args, **kwargs)\n\n self.pymc3_matches_scipy(MvNormalUpper, Vector(R, n),\n {'mu': Vector(R, n), 'chol': PdMatrixCholUpper(n)},\n normal_logpdf_chol_upper,\n decimal=select_by_precision(float64=6, float32=0))\n\n @pytest.mark.xfail(condition=(theano.config.floatX == \"float32\"), reason=\"Fails on float32 due to inf issues\")\n def test_mvnormal_indef(self):\n cov_val = np.array([[1, 0.5], [0.5, -2]])\n cov = tt.matrix('cov')\n cov.tag.test_value = np.eye(2)\n mu = floatX(np.zeros(2))\n x = tt.vector('x')\n x.tag.test_value = np.zeros(2)\n logp = MvNormal.dist(mu=mu, cov=cov).logp(x)\n f_logp = theano.function([cov, x], logp)\n assert f_logp(cov_val, np.ones(2)) == -np.inf\n dlogp = tt.grad(logp, cov)\n f_dlogp = theano.function([cov, x], dlogp)\n assert not np.all(np.isfinite(f_dlogp(cov_val, np.ones(2))))\n\n logp = MvNormal.dist(mu=mu, tau=cov).logp(x)\n f_logp = theano.function([cov, x], logp)\n assert f_logp(cov_val, np.ones(2)) == -np.inf\n dlogp = tt.grad(logp, cov)\n f_dlogp = theano.function([cov, x], dlogp)\n assert not np.all(np.isfinite(f_dlogp(cov_val, np.ones(2))))\n\n def test_mvnormal_init_fail(self):\n with Model():\n with pytest.raises(ValueError):\n x = MvNormal('x', mu=np.zeros(3), shape=3)\n with pytest.raises(ValueError):\n x = MvNormal('x', mu=np.zeros(3), cov=np.eye(3), tau=np.eye(3), shape=3)\n\n @pytest.mark.parametrize('n', [1, 2, 3])\n def test_matrixnormal(self, n):\n mat_scale = 1e3 # To reduce logp magnitude\n mean_scale = .1\n self.pymc3_matches_scipy(MatrixNormal, RealMatrix(n, n),\n {'mu': RealMatrix(n, n)*mean_scale,\n 'rowcov': PdMatrix(n)*mat_scale,\n 'colcov': PdMatrix(n)*mat_scale},\n matrix_normal_logpdf_cov)\n self.pymc3_matches_scipy(MatrixNormal, RealMatrix(2, n),\n {'mu': RealMatrix(2, n)*mean_scale,\n 'rowcov': PdMatrix(2)*mat_scale,\n 'colcov': PdMatrix(n)*mat_scale},\n matrix_normal_logpdf_cov)\n self.pymc3_matches_scipy(MatrixNormal, RealMatrix(3, n),\n {'mu': RealMatrix(3, n)*mean_scale,\n 'rowchol': PdMatrixChol(3)*mat_scale,\n 'colchol': PdMatrixChol(n)*mat_scale},\n matrix_normal_logpdf_chol,\n decimal=select_by_precision(float64=6, float32=-1))\n self.pymc3_matches_scipy(MatrixNormal, RealMatrix(n, 3),\n {'mu': RealMatrix(n, 3)*mean_scale,\n 'rowchol': PdMatrixChol(n)*mat_scale,\n 'colchol': PdMatrixChol(3)*mat_scale},\n matrix_normal_logpdf_chol,\n decimal=select_by_precision(float64=6, float32=0))\n\n @pytest.mark.parametrize('n', [2, 3])\n @pytest.mark.parametrize('m', [3])\n @pytest.mark.parametrize('sigma', [None, 1.0])\n def test_kroneckernormal(self, n, m, sigma):\n np.random.seed(5)\n N = n*m\n covs = [RandomPdMatrix(n), RandomPdMatrix(m)]\n chols = list(map(np.linalg.cholesky, covs))\n evds = list(map(np.linalg.eigh, covs))\n dom = Domain([np.random.randn(N)*0.1], edges=(None, None), shape=N)\n mu = Domain([np.random.randn(N)*0.1], edges=(None, None), shape=N)\n\n std_args = {'mu': mu}\n cov_args = {'covs': covs}\n chol_args = {'chols': chols}\n evd_args = {'evds': evds}\n if sigma is not None and sigma != 0:\n std_args['sigma'] = Domain([sigma], edges=(None, None))\n else:\n for args in [cov_args, chol_args, evd_args]:\n args['sigma'] = sigma\n\n self.pymc3_matches_scipy(\n KroneckerNormal, dom, std_args, kron_normal_logpdf_cov,\n extra_args=cov_args, scipy_args=cov_args)\n self.pymc3_matches_scipy(\n KroneckerNormal, dom, std_args, kron_normal_logpdf_chol,\n extra_args=chol_args, scipy_args=chol_args)\n self.pymc3_matches_scipy(\n KroneckerNormal, dom, std_args, kron_normal_logpdf_evd,\n extra_args=evd_args, scipy_args=evd_args)\n\n dom = Domain([np.random.randn(2, N)*0.1], edges=(None, None), shape=(2, N))\n\n self.pymc3_matches_scipy(\n KroneckerNormal, dom, std_args, kron_normal_logpdf_cov,\n extra_args=cov_args, scipy_args=cov_args)\n self.pymc3_matches_scipy(\n KroneckerNormal, dom, std_args, kron_normal_logpdf_chol,\n extra_args=chol_args, scipy_args=chol_args)\n self.pymc3_matches_scipy(\n KroneckerNormal, dom, std_args, kron_normal_logpdf_evd,\n extra_args=evd_args, scipy_args=evd_args)\n\n @pytest.mark.parametrize('n', [1, 2])\n def test_mvt(self, n):\n self.pymc3_matches_scipy(MvStudentT, Vector(R, n),\n {'nu': Rplus, 'Sigma': PdMatrix(n), 'mu': Vector(R, n)},\n mvt_logpdf)\n self.pymc3_matches_scipy(MvStudentT, RealMatrix(2, n),\n {'nu': Rplus, 'Sigma': PdMatrix(n), 'mu': Vector(R, n)},\n mvt_logpdf)\n\n @pytest.mark.parametrize('n',[2,3,4])\n def test_AR1(self, n):\n self.pymc3_matches_scipy(AR1, Vector(R, n), {'k': Unit, 'tau_e': Rplus}, AR1_logpdf)\n\n\n @pytest.mark.parametrize('n', [2, 3])\n def test_wishart(self, n):\n # This check compares the autodiff gradient to the numdiff gradient.\n # However, due to the strict constraints of the wishart,\n # it is impossible to numerically determine the gradient as a small\n # pertubation breaks the symmetry. Thus disabling.\n #\n # self.checkd(Wishart, PdMatrix(n), {'n': Domain([2, 3, 4, 2000]), 'V': PdMatrix(n)},\n # checks=[self.check_dlogp])\n pass\n\n @pytest.mark.parametrize('x,eta,n,lp', LKJ_CASES)\n def test_lkj(self, x, eta, n, lp):\n with Model() as model:\n LKJCorr('lkj', eta=eta, n=n, transform=None)\n\n pt = {'lkj': x}\n decimals = select_by_precision(float64=6, float32=4)\n assert_almost_equal(model.fastlogp(pt), lp, decimal=decimals, err_msg=str(pt))\n\n @pytest.mark.parametrize('n', [2, 3])\n def test_dirichlet(self, n):\n self.pymc3_matches_scipy(Dirichlet, Simplex(\n n), {'a': Vector(Rplus, n)}, dirichlet_logpdf)\n\n def test_dirichlet_2D(self):\n self.pymc3_matches_scipy(Dirichlet, MultiSimplex(2, 2),\n {'a': Vector(Vector(Rplus, 2), 2)}, dirichlet_logpdf)\n\n @pytest.mark.parametrize('n', [2, 3])\n def test_multinomial(self, n):\n self.pymc3_matches_scipy(Multinomial, Vector(Nat, n), {'p': Simplex(n), 'n': Nat},\n multinomial_logpdf)\n\n @pytest.mark.parametrize('p,n', [\n [[.25, .25, .25, .25], 1],\n [[.3, .6, .05, .05], 2],\n [[.3, .6, .05, .05], 10],\n ])\n def test_multinomial_mode(self, p, n):\n _p = np.array(p)\n with Model() as model:\n m = Multinomial('m', n, _p, _p.shape)\n assert_allclose(m.distribution.mode.eval().sum(), n)\n _p = np.array([p, p])\n with Model() as model:\n m = Multinomial('m', n, _p, _p.shape)\n assert_allclose(m.distribution.mode.eval().sum(axis=-1), n)\n\n @pytest.mark.parametrize('p, shape, n', [\n [[.25, .25, .25, .25], 4, 2],\n [[.25, .25, .25, .25], (1, 4), 3],\n # 3: expect to fail\n # [[.25, .25, .25, .25], (10, 4)],\n [[.25, .25, .25, .25], (10, 1, 4), 5],\n # 5: expect to fail\n # [[[.25, .25, .25, .25]], (2, 4), [7, 11]],\n [[[.25, .25, .25, .25],\n [.25, .25, .25, .25]], (2, 4), 13],\n [[[.25, .25, .25, .25],\n [.25, .25, .25, .25]], (1, 2, 4), [23, 29]],\n [[[.25, .25, .25, .25],\n [.25, .25, .25, .25]], (10, 2, 4), [31, 37]],\n [[[.25, .25, .25, .25],\n [.25, .25, .25, .25]], (2, 4), [17, 19]],\n ])\n def test_multinomial_random(self, p, shape, n):\n p = np.asarray(p)\n with Model() as model:\n m = Multinomial('m', n=n, p=p, shape=shape)\n m.random()\n\n def test_multinomial_mode_with_shape(self):\n n = [1, 10]\n p = np.asarray([[.25,.25,.25,.25], [.26, .26, .26, .22]])\n with Model() as model:\n m = Multinomial('m', n=n, p=p, shape=(2, 4))\n assert_allclose(m.distribution.mode.eval().sum(axis=-1), n)\n\n def test_multinomial_vec(self):\n vals = np.array([[2,4,4], [3,3,4]])\n p = np.array([0.2, 0.3, 0.5])\n n = 10\n\n with Model() as model_single:\n Multinomial('m', n=n, p=p, shape=len(p))\n\n with Model() as model_many:\n Multinomial('m', n=n, p=p, shape=vals.shape)\n\n assert_almost_equal(scipy.stats.multinomial.logpmf(vals, n, p),\n np.asarray([model_single.fastlogp({'m': val}) for val in vals]),\n decimal=4)\n\n assert_almost_equal(scipy.stats.multinomial.logpmf(vals, n, p),\n model_many.free_RVs[0].logp_elemwise({'m': vals}).squeeze(),\n decimal=4)\n\n assert_almost_equal(sum([model_single.fastlogp({'m': val}) for val in vals]),\n model_many.fastlogp({'m': vals}),\n decimal=4)\n\n def test_multinomial_vec_1d_n(self):\n vals = np.array([[2,4,4], [4,3,4]])\n p = np.array([0.2, 0.3, 0.5])\n ns = np.array([10, 11])\n\n with Model() as model:\n Multinomial('m', n=ns, p=p, shape=vals.shape)\n\n assert_almost_equal(sum([multinomial_logpdf(val, n, p) for val, n in zip(vals, ns)]),\n model.fastlogp({'m': vals}),\n decimal=4)\n\n def test_multinomial_vec_1d_n_2d_p(self):\n vals = np.array([[2,4,4], [4,3,4]])\n ps = np.array([[0.2, 0.3, 0.5],\n [0.9, 0.09, 0.01]])\n ns = np.array([10, 11])\n\n with Model() as model:\n Multinomial('m', n=ns, p=ps, shape=vals.shape)\n\n assert_almost_equal(sum([multinomial_logpdf(val, n, p) for val, n, p in zip(vals, ns, ps)]),\n model.fastlogp({'m': vals}),\n decimal=4)\n\n def test_multinomial_vec_2d_p(self):\n vals = np.array([[2,4,4], [3,3,4]])\n ps = np.array([[0.2, 0.3, 0.5],\n [0.3, 0.3, 0.4]])\n n = 10\n\n with Model() as model:\n Multinomial('m', n=n, p=ps, shape=vals.shape)\n\n assert_almost_equal(sum([multinomial_logpdf(val, n, p) for val, p in zip(vals, ps)]),\n model.fastlogp({'m': vals}),\n decimal=4)\n\n def test_categorical_bounds(self):\n with Model():\n x = Categorical('x', p=np.array([0.2, 0.3, 0.5]))\n assert np.isinf(x.logp({'x': -1}))\n assert np.isinf(x.logp({'x': 3}))\n\n @pytest.mark.parametrize('n', [2, 3, 4])\n def test_categorical(self, n):\n self.pymc3_matches_scipy(Categorical, Domain(range(n), 'int64'), {'p': Simplex(n)},\n lambda value, p: categorical_logpdf(value, p))\n\n @pytest.mark.parametrize('n', [2, 3, 4])\n def test_orderedlogistic(self, n):\n self.pymc3_matches_scipy(OrderedLogistic, Domain(range(n), 'int64'),\n {'eta': R, 'cutpoints': Vector(R, n-1)},\n lambda value, eta, cutpoints: orderedlogistic_logpdf(value, eta, cutpoints))\n\n def test_densitydist(self):\n def logp(x):\n return -log(2 * .5) - abs(x - .5) / .5\n self.checkd(DensityDist, R, {}, extra_args={'logp': logp})\n\n def test_addpotential(self):\n with Model() as model:\n value = Normal('value', 1, 1)\n Potential('value_squared', -value ** 2)\n self.check_dlogp(model, value, R, {})\n\n def test_get_tau_sd(self):\n sd = np.array([2])\n assert_almost_equal(continuous.get_tau_sd(sd=sd), [1. / sd**2, sd])\n\n @pytest.mark.parametrize('value,mu,sigma,nu,logp', [\n (0.5, -50.000, 0.500, 0.500, -99.8068528),\n (1.0, -1.000, 0.001, 0.001, -1992.5922447),\n (2.0, 0.001, 1.000, 1.000, -1.6720416),\n (5.0, 0.500, 2.500, 2.500, -2.4543644),\n (7.5, 2.000, 5.000, 5.000, -2.8259429),\n (15.0, 5.000, 7.500, 7.500, -3.3093854),\n (50.0, 50.000, 10.000, 10.000, -3.6436067),\n (1000.0, 500.000, 10.000, 20.000, -27.8707323)\n ])\n def test_ex_gaussian(self, value, mu, sigma, nu, logp):\n \"\"\"Log probabilities calculated using the dexGAUS function from the R package gamlss.\n See e.g., doi: 10.1111/j.1467-9876.2005.00510.x, or http://www.gamlss.org/.\"\"\"\n with Model() as model:\n ExGaussian('eg', mu=mu, sigma=sigma, nu=nu)\n pt = {'eg': value}\n assert_almost_equal(model.fastlogp(pt), logp, decimal=select_by_precision(float64=6, float32=2), err_msg=str(pt))\n\n @pytest.mark.xfail(condition=(theano.config.floatX == \"float32\"), reason=\"Fails on float32\")\n def test_vonmises(self):\n self.pymc3_matches_scipy(\n VonMises, R, {'mu': Circ, 'kappa': Rplus},\n lambda value, mu, kappa: floatX(sp.vonmises.logpdf(value, kappa, loc=mu)))\n\n def test_gumbel(self):\n def gumbel(value, mu, beta):\n return floatX(sp.gumbel_r.logpdf(value, loc=mu, scale=beta))\n self.pymc3_matches_scipy(Gumbel, R, {'mu': R, 'beta': Rplusbig}, gumbel)\n\n def test_logistic(self):\n self.pymc3_matches_scipy(Logistic, R, {'mu': R, 's': Rplus},\n lambda value, mu, s: sp.logistic.logpdf(value, mu, s),\n decimal=select_by_precision(float64=6, float32=1))\n\n def test_logitnormal(self):\n self.pymc3_matches_scipy(LogitNormal, Unit, {'mu': R, 'sd': Rplus},\n lambda value, mu, sd: (sp.norm.logpdf(logit(value), mu, sd)\n - (np.log(value) + np.log1p(-value))),\n decimal=select_by_precision(float64=6, float32=1))\n\n def test_multidimensional_beta_construction(self):\n with Model():\n Beta('beta', alpha=1., beta=1., shape=(10, 20))\n\n def test_rice(self):\n self.pymc3_matches_scipy(Rice, Rplus, {'nu': Rplus, 'sd': Rplusbig},\n lambda value, nu, sd: sp.rice.logpdf(value, b=nu, loc=0, scale=sd))\n\n @pytest.mark.xfail(condition=(theano.config.floatX == \"float32\"), reason=\"Fails on float32\")\n def test_interpolated(self):\n for mu in R.vals:\n for sd in Rplus.vals:\n #pylint: disable=cell-var-from-loop\n xmin = mu - 5 * sd\n xmax = mu + 5 * sd\n\n class TestedInterpolated (Interpolated):\n def __init__(self, **kwargs):\n x_points = np.linspace(xmin, xmax, 100000)\n pdf_points = sp.norm.pdf(x_points, loc=mu, scale=sd)\n super(TestedInterpolated, self).__init__(\n x_points=x_points,\n pdf_points=pdf_points,\n **kwargs\n )\n\n def ref_pdf(value):\n return np.where(\n np.logical_and(value >= xmin, value <= xmax),\n sp.norm.logpdf(value, mu, sd),\n -np.inf * np.ones(value.shape)\n )\n\n self.pymc3_matches_scipy(TestedInterpolated, R, {}, ref_pdf)\n\n\ndef test_bound():\n np.random.seed(42)\n UnboundNormal = Bound(Normal)\n dist = UnboundNormal.dist(mu=0, sd=1)\n assert dist.transform is None\n assert dist.default() == 0.\n assert isinstance(dist.random(), np.ndarray)\n\n LowerNormal = Bound(Normal, lower=1)\n dist = LowerNormal.dist(mu=0, sd=1)\n assert dist.logp(0).eval() == -np.inf\n assert dist.default() > 1\n assert dist.transform is not None\n assert np.all(dist.random() > 1)\n\n UpperNormal = Bound(Normal, upper=-1)\n dist = UpperNormal.dist(mu=0, sd=1)\n assert dist.logp(-0.5).eval() == -np.inf\n assert dist.default() < -1\n assert dist.transform is not None\n assert np.all(dist.random() < -1)\n\n ArrayNormal = Bound(Normal, lower=[1, 2], upper=[2, 3])\n dist = ArrayNormal.dist(mu=0, sd=1, shape=2)\n assert_equal(dist.logp([0.5, 3.5]).eval(), -np.array([np.inf, np.inf]))\n assert_equal(dist.default(), np.array([1.5, 2.5]))\n assert dist.transform is not None\n with pytest.raises(ValueError) as err:\n dist.random()\n err.match('Drawing samples from distributions with array-valued')\n\n with Model():\n a = ArrayNormal('c', shape=2)\n assert_equal(a.tag.test_value, np.array([1.5, 2.5]))\n\n lower = tt.vector('lower')\n lower.tag.test_value = np.array([1, 2]).astype(theano.config.floatX)\n upper = 3\n ArrayNormal = Bound(Normal, lower=lower, upper=upper)\n dist = ArrayNormal.dist(mu=0, sd=1, shape=2)\n logp = dist.logp([0.5, 3.5]).eval({lower: lower.tag.test_value})\n assert_equal(logp, -np.array([np.inf, np.inf]))\n assert_equal(dist.default(), np.array([2, 2.5]))\n assert dist.transform is not None\n\n with Model():\n a = ArrayNormal('c', shape=2)\n assert_equal(a.tag.test_value, np.array([2, 2.5]))\n\n rand = Bound(Binomial, lower=10).dist(n=20, p=0.3).random()\n assert rand.dtype in [np.int16, np.int32, np.int64]\n assert rand >= 10\n\n rand = Bound(Binomial, upper=10).dist(n=20, p=0.8).random()\n assert rand.dtype in [np.int16, np.int32, np.int64]\n assert rand <= 10\n\n rand = Bound(Binomial, lower=5, upper=8).dist(n=10, p=0.6).random()\n assert rand.dtype in [np.int16, np.int32, np.int64]\n assert rand >= 5 and rand <= 8\n\n\nclass TestLatex(object):\n\n def setup_class(self):\n # True parameter values\n alpha, sigma = 1, 1\n beta = [1, 2.5]\n\n # Size of dataset\n size = 100\n\n # Predictor variable\n X = np.random.normal(size=(size, 2)).dot(np.array([[1, 0], [0, 0.2]]))\n\n # Simulate outcome variable\n Y = alpha + X.dot(beta) + np.random.randn(size)*sigma\n with Model() as self.model:\n # Priors for unknown model parameters\n alpha = Normal('alpha', mu=0, sd=10)\n b = Normal('beta', mu=0, sd=10, shape=(2,), observed=beta)\n sigma = HalfNormal('sigma', sd=1)\n\n # Expected value of outcome\n mu = Deterministic('mu', alpha + tt.dot(X, b))\n\n # Likelihood (sampling distribution) of observations\n Y_obs = Normal('Y_obs', mu=mu, sd=sigma, observed=Y)\n self.distributions = [alpha, sigma, mu, b, Y_obs]\n self.expected = (\n r'$\\text{alpha} \\sim \\text{Normal}(\\mathit{mu}=0,~\\mathit{sd}=10.0)$',\n r'$\\text{sigma} \\sim \\text{HalfNormal}(\\mathit{sd}=1.0)$',\n r'$\\text{mu} \\sim \\text{Deterministic}(\\text{alpha},~\\text{Constant},~\\text{beta})$',\n r'$\\text{beta} \\sim \\text{Normal}(\\mathit{mu}=0,~\\mathit{sd}=10.0)$',\n r'$\\text{Y_obs} \\sim \\text{Normal}(\\mathit{mu}=\\text{mu},~\\mathit{sd}=f(\\text{sigma}))$'\n )\n\n def test__repr_latex_(self):\n for distribution, tex in zip(self.distributions, self.expected):\n assert distribution._repr_latex_() == tex\n\n model_tex = self.model._repr_latex_()\n\n for tex in self.expected: # make sure each variable is in the model\n for segment in tex.strip('$').split(r'\\sim'):\n assert segment in model_tex\n\n def test___latex__(self):\n for distribution, tex in zip(self.distributions, self.expected):\n assert distribution._repr_latex_() == distribution.__latex__()\n assert self.model._repr_latex_() == self.model.__latex__()\n\n\ndef test_discrete_trafo():\n with pytest.raises(ValueError) as err:\n Binomial.dist(n=5, p=0.5, transform='log')\n err.match('Transformations for discrete distributions')\n with Model():\n with pytest.raises(ValueError) as err:\n Binomial('a', n=5, p=0.5, transform='log')\n err.match('Transformations for discrete distributions')\n"
] | [
[
"scipy.stats.distributions.halfnorm.logpdf",
"numpy.ones",
"scipy.stats.distributions.invgauss.logpdf",
"numpy.diag",
"scipy.special.logit",
"numpy.random.seed",
"numpy.asarray",
"scipy.stats.distributions.binom.logpmf",
"scipy.stats.distributions.truncnorm.logpdf",
"numpy.log",
"scipy.stats.distributions.cauchy.logpdf",
"scipy.stats.distributions.chi2.logpdf",
"scipy.stats.distributions.geom.pmf",
"scipy.stats.distributions.t.logpdf",
"scipy.stats.distributions.expon.logpdf",
"scipy.stats.distributions.nbinom.logpmf",
"scipy.integrate.tplquad",
"numpy.vstack",
"numpy.log1p",
"scipy.stats.distributions.laplace.logpdf",
"scipy.stats.distributions.norm.pdf",
"scipy.stats.distributions.uniform.logpdf",
"numpy.concatenate",
"numpy.logical_and",
"scipy.stats.distributions.triang.logpdf",
"scipy.stats.distributions.gamma.logpdf",
"scipy.stats.distributions.skewnorm.logpdf",
"scipy.stats.distributions.halfcauchy.logpdf",
"numpy.random.rand",
"scipy.stats.distributions.rice.logpdf",
"scipy.stats.distributions.vonmises.logpdf",
"numpy.identity",
"scipy.stats.distributions.pareto.logpdf",
"scipy.stats.distributions.bernoulli.logpmf",
"numpy.linspace",
"numpy.sqrt",
"numpy.eye",
"scipy.stats.distributions.gumbel_r.logpdf",
"numpy.atleast_2d",
"numpy.zeros",
"scipy.stats.distributions.poisson.logpmf",
"numpy.random.normal",
"numpy.linalg.cholesky",
"scipy.stats.distributions.logistic.logpdf",
"numpy.power",
"scipy.stats.distributions.randint.logpmf",
"scipy.stats.distributions.beta.logpdf",
"scipy.stats.distributions.norm.logpdf",
"scipy.integrate.dblquad",
"scipy.integrate.quad",
"numpy.linalg.solve",
"numpy.linalg.inv",
"scipy.stats.distributions.exponweib.logpdf",
"numpy.random.randn",
"numpy.exp",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.dot",
"scipy.stats.distributions.invgamma.logpdf"
]
] |
riedelx/adaptic-pyproc | [
"cfc14fbaadbc90f01db90440e9bd0d20950cdc77"
] | [
"libraries/adaptic.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\n\n# =============================================================================\n# ADAPTIC classes\n# =============================================================================\nclass adap0: # base class with funtions to read num files\n def __init__(self,name):\n self.name = name\n\n def readFile(cls, title, cutoff = None,folderPath=\"\",numPath=\"\"): # numPath = str('/num/'), path='data/'\n # wordsNum=[]\n # for data in open(folderPath+numPath+title+str(\".num\"),'r'):\n # wordsNum.append(data.split())\n # if cutoff:\n # for i in range(len(wordsNum)):\n # try:\n # if wordsNum[i][0] == '#io1' and wordsNum[i+2][0] == str(cutoff):\n # break\n # except:\n # pass\n # wordsNum = wordsNum[0:i-2]\n # return wordsNum\n\n #update 2021/07/26, cutoff specific steps\n wordsNum=[]\n for data in open(folderPath+numPath+title+str(\".num\"),'r'):\n wordsNum.append(data.split())\n if cutoff:\n lineNo = []\n for i in range(len(wordsNum)):\n try:\n if wordsNum[i][0] == '#io1':\n lineNo.append([int(wordsNum[i+2][0]),i])\n except:\n pass\n cutoff.sort(reverse=True)\n for i in cutoff:\n if i==lineNo[-1][0]:\n wordsNum=wordsNum[:(lineNo[-1][1]-2)]\n elif i<lineNo[-1][0]:\n del wordsNum[(lineNo[i-1][1]):(lineNo[i][1])]\n return wordsNum\n\n def convertNum(self, wordsNum, phrase, startRow, column, convType):\n variable=[]\n iRow=-1\n for i in wordsNum:\n iRow += 1\n if i==[phrase]:\n tempVector=[]\n jRow = iRow + startRow\n while wordsNum[jRow] != []:\n if convType == \"int\":\n tempVal = int(wordsNum[jRow][column])\n elif convType == \"float\":\n try: tempVal = float(wordsNum[jRow][column])\n except: tempVal = float(0)\n elif convType == \"str\":\n tempVal = wordsNum[jRow][column]\n tempVector.append(tempVal)\n jRow += 1\n tempVector=np.array(tempVector)\n if len(variable)==0:\n variable = tempVector\n else:\n variable=np.column_stack((variable,tempVector))\n if convType == \"str\":\n break\n if convType != \"str\":\n variable=np.column_stack((np.zeros((variable.shape[0],1)),variable))\n return variable\n\nclass adap1(adap0): # base adaptic class processing the num\n def __init__(self,title, cutoff, folderPath, numPath):\n super(adap1, self).__init__(title)\n self.wordsNum = self.readFile(title, cutoff,folderPath,numPath)\n self.step = self.convertNum(self.wordsNum,\"#io1\",2,0,\"int\")\n self.nodeName = self.convertNum(self.wordsNum,\"#in2\",3,0,\"str\")\n\n def returnVec(self,attribute,idx=':'):\n self.hasAttribute(attribute)\n return eval('self.'+attribute+'['+str(idx)+'].T')\n\n # def pseudoStatic(self, LF, dispMax):\n # pseudoCrv = [0]\n # for i in range(1,LF.shape[1]):\n # pseudoTemp=np.trapz(LF[0,0:i+1],dispMax[0,0:i+1])/dispMax[0,i]\n # pseudoCrv.append(pseudoTemp)\n # return np.array(pseudoCrv)[None, :]\n\n def restrainedName_create(self):\n self.restrainedName = self.convertNum(self.wordsNum,\"#in1\",3,0,\"str\")\n\n def cbpName_create(self):\n self.cbpName = self.convertNum(self.wordsNum,\"#ie1\",3,0,\"str\")\n\n def jelName_create(self):\n self.jelName = self.convertNum(self.wordsNum,\"#ie11\",3,0,\"str\")\n\n def lnkName_create(self):\n self.lnkName = self.convertNum(self.wordsNum,\"#ie18\",3,0,\"str\")\n\n def nodeDispX_create(self):\n self.nodeDispX = self.convertNum(self.wordsNum,\"#in2\",3,1,\"float\")\n\n def nodeDispY_create(self):\n self.nodeDispY = self.convertNum(self.wordsNum,\"#in2\",3,2,\"float\")\n\n def nodeDispY_create(self):\n self.nodeDispY = self.convertNum(self.wordsNum,\"#in2\",3,2,\"float\")\n\n def restrainedX_create(self):\n self.hasAttribute(\"restrainedName\")\n self.restrainedX = self.convertNum(self.wordsNum,\"#in1\",3,1,\"float\")\n\n def restrainedY_create(self):\n self.hasAttribute(\"restrainedName\")\n self.restrainedY = self.convertNum(self.wordsNum,\"#in1\",3,2,\"float\")\n\n # def time_create(self):\n # self.time = self.convertNum(self.wordsNum,\"#io1\",2,2,\"float\")\n\n # def LF_create(self):\n # self.LF = self.convertNum(self.wordsNum,\"#io1\",2,2,\"float\")\n\n def hasAttribute(self, att):\n if hasattr(self, att):\n pass\n else:\n getattr( self, str(att+\"_create\"))()\n\nclass adaptic2D(adap1):\n def __init__(self,title, cutoff = None,folderPath=\"\",numPath=\"\"):\n super(adaptic2D, self).__init__(title, cutoff, folderPath, numPath)\n\n def gaussName_create(self):\n temp1 = self.convertNum(self.wordsNum,\"#ie1s\",3,0,\"str\")\n temp2 = self.convertNum(self.wordsNum,\"#ie1s\",3,1,\"str\")\n self.gaussName = np.array([temp1[i]+\"_\"+temp2[i] for i in range(len(temp2))])\n del temp1, temp2\n\n def nodeDispRZ_create(self):\n self.nodeDispRZ = self.convertNum(self.wordsNum,\"#in2\",3,3,\"float\")\n\n def cbpM1_create(self):\n self.hasAttribute(\"cbpName\")\n self.cbpM1 = self.convertNum(self.wordsNum,\"#ie1\",3,1,\"float\")\n\n def cbpTheta1_create(self):\n self.hasAttribute(\"cbpName\")\n self.cbpTheta1 = self.convertNum(self.wordsNum,\"#ie1d1\",3,1,\"float\")\n\n def restrainedRZ_create(self):\n self.hasAttribute(\"restrainedName\")\n self.restrainedRZ = self.convertNum(self.wordsNum,\"#in1\",3,3,\"float\")\n\n def cbpM2_create(self):\n self.hasAttribute(\"cbpName\")\n self.cbpM2 = self.convertNum(self.wordsNum,\"#ie1\",3,2,\"float\")\n\n def cbpTheta2_create(self):\n self.hasAttribute(\"cbpName\")\n self.cbpTheta2 = self.convertNum(self.wordsNum,\"#ie1d1\",3,2,\"float\")\n\n def cbpF_create(self):\n self.hasAttribute(\"cbpName\")\n self.cbpF = self.convertNum(self.wordsNum,\"#ie1\",3,3,\"float\")\n\n def cbpDelta_create(self):\n self.hasAttribute(\"cbpName\")\n self.cbpDelta = self.convertNum(self.wordsNum,\"#ie1d1\",3,3,\"float\")\n\n def jelF_create(self):\n self.hasAttribute(\"jelName\")\n self.jelF = self.convertNum(self.wordsNum,\"#ie11\",3,1,\"float\")\n\n def jelV_create(self):\n self.hasAttribute(\"jelName\")\n self.jelV = self.convertNum(self.wordsNum,\"#ie11\",3,2,\"float\")\n\n def jelM_create(self):\n self.hasAttribute(\"jelName\")\n self.jelM = self.convertNum(self.wordsNum,\"#ie11\",3,3,\"float\")\n\n def lnkM1_create(self):\n self.hasAttribute(\"lnkName\")\n self.lnkM1 = self.convertNum(self.wordsNum,\"#ie18\",3,1,\"float\")\n\n def lnkM2_create(self):\n self.hasAttribute(\"lnkName\")\n self.lnkM2 = self.convertNum(self.wordsNum,\"#ie18\",3,2,\"float\")\n\n def lnkF_create(self):\n self.hasAttribute(\"lnkName\")\n self.lnkF = self.convertNum(self.wordsNum,\"#ie18\",3,3,\"float\")\n\n def gauss1StrainB_create(self):\n self.hasAttribute(\"gaussName\")\n self.gauss1StrainB = self.convertNum(self.wordsNum,\"#ie1s\",3,2,\"float\")\n\n def gauss1StrainT_create(self):\n self.hasAttribute(\"gaussName\")\n self.gauss1StrainT = self.convertNum(self.wordsNum,\"#ie1s\",3,4,\"float\")\n\n def gauss1StrainAv_create(self):\n self.hasAttribute(\"gauss1StrainT\")\n self.hasAttribute(\"gauss1StrainB\")\n self.hasAttribute(\"gaussName\")\n self.gauss1StrainAv = (self.gauss1StrainT+self.gauss1StrainB)/2\n\n def gauss2StrainAv_create(self):\n self.hasAttribute(\"gauss2StrainT\")\n self.hasAttribute(\"gauss2StrainB\")\n self.hasAttribute(\"gaussName\")\n self.gauss2StrainAv = (self.gauss2StrainT+self.gauss2StrainB)/2\n\n def gauss1StressAv_create(self):\n self.hasAttribute(\"gauss1StressT\")\n self.hasAttribute(\"gauss1StressB\")\n self.hasAttribute(\"gaussName\")\n self.gauss1StressAv = (self.gauss1StressT+self.gauss1StressB)/2\n\n def gauss2StressAv_create(self):\n self.hasAttribute(\"gauss2StressT\")\n self.hasAttribute(\"gauss2StressB\")\n self.hasAttribute(\"gaussName\")\n self.gauss2StressAv = (self.gauss2StressT+self.gauss2StressB)/2\n\n def gauss1StressB_create(self):\n self.hasAttribute(\"gaussName\")\n self.gauss1StressB = self.convertNum(self.wordsNum,\"#ie1s\",3,3,\"float\")\n\n def gauss1StressT_create(self):\n self.hasAttribute(\"gaussName\")\n self.gauss1StressT = self.convertNum(self.wordsNum,\"#ie1s\",3,5,\"float\")\n\n def gauss2StrainB_create(self):\n self.hasAttribute(\"gaussName\")\n self.gauss2StrainB = self.convertNum(self.wordsNum,\"#ie1s\",3,6,\"float\")\n\n def gauss2StrainT_create(self):\n self.hasAttribute(\"gaussName\")\n self.gauss2StrainT = self.convertNum(self.wordsNum,\"#ie1s\",3,8,\"float\")\n\n def gauss2StressB_create(self):\n self.hasAttribute(\"gaussName\")\n self.gauss2StressB = self.convertNum(self.wordsNum,\"#ie1s\",3,7,\"float\")\n\n def gauss2StressT_create(self):\n self.hasAttribute(\"gaussName\")\n self.gauss2StressT = self.convertNum(self.wordsNum,\"#ie1s\",3,9,\"float\")\n\n def bndName_create(self):\n self.bndName = self.convertNum(self.wordsNum,\"#ie23\",3,0,\"str\")\n\n def bndM1_create(self):\n self.hasAttribute(\"bndName\")\n self.bndM1 = self.convertNum(self.wordsNum,\"#ie23\",3,1,\"float\")\n\n def bndM2_create(self):\n self.hasAttribute(\"bndName\")\n self.bndM2 = self.convertNum(self.wordsNum,\"#ie23\",3,2,\"float\")\n\n def bndF_create(self):\n self.hasAttribute(\"bndName\")\n self.bndF = self.convertNum(self.wordsNum,\"#ie23\",3,3,\"float\")\n\n def bndGaussName_create(self):\n temp1 = self.convertNum(self.wordsNum,\"#ie23s\",3,0,\"str\")\n temp2 = self.convertNum(self.wordsNum,\"#ie23s\",3,1,\"str\")\n self.bndGaussName = np.array([temp1[i]+\"_\"+temp2[i] for i in range(len(temp2))])\n del temp1, temp2\n\n def bndStrain_create(self):\n self.hasAttribute(\"bndGaussName\")\n self.bndStrain = self.convertNum(self.wordsNum,\"#ie23s\",3,2,\"float\")\n\n def bndStress_create(self):\n self.hasAttribute(\"bndGaussName\")\n self.bndStress = self.convertNum(self.wordsNum,\"#ie23s\",3,3,\"float\")\n\n def bndSlip_create(self):\n self.hasAttribute(\"bndGaussName\")\n self.bndSlip = self.convertNum(self.wordsNum,\"#ie23s\",3,4,\"float\")\n\n def bndBond_create(self):\n self.hasAttribute(\"bndGaussName\")\n self.bndBond = self.convertNum(self.wordsNum,\"#ie23s\",3,5,\"float\")\n\n def cncName_create(self):\n self.cncName = self.convertNum(self.wordsNum,\"#ie24\",3,0,\"str\")\n\n def cncM1_create(self):\n self.hasAttribute(\"cncName\")\n self.cncM1 = self.convertNum(self.wordsNum,\"#ie24\",3,1,\"float\")\n\n def cncM2_create(self):\n self.hasAttribute(\"cncName\")\n self.cncM2 = self.convertNum(self.wordsNum,\"#ie24\",3,2,\"float\")\n\n def cncF_create(self):\n self.hasAttribute(\"cncName\")\n self.cncF = self.convertNum(self.wordsNum,\"#ie24\",3,3,\"float\")\n\n def cncGaussName_create(self):\n temp1 = self.convertNum(self.wordsNum,\"#ie24s\",3,0,\"str\")\n temp2 = self.convertNum(self.wordsNum,\"#ie24s\",3,1,\"str\")\n self.cncGaussName = np.array([temp1[i]+\"_\"+temp2[i] for i in range(len(temp2))])\n del temp1, temp2\n\n def cncStrain_create(self):\n self.hasAttribute(\"cncGaussName\")\n self.cncStrain = self.convertNum(self.wordsNum,\"#ie24s\",3,2,\"float\")\n\n def cncStress_create(self):\n self.hasAttribute(\"cncGaussName\")\n self.cncStress = self.convertNum(self.wordsNum,\"#ie24s\",3,3,\"float\")\n\n def cncGamma_create(self):\n self.hasAttribute(\"cncGaussName\")\n self.cncGamma = self.convertNum(self.wordsNum,\"#ie24s\",3,4,\"float\")\n\n def cncTau_create(self):\n self.hasAttribute(\"cncGaussName\")\n self.cncTau = self.convertNum(self.wordsNum,\"#ie24s\",3,5,\"float\")\n\n def findIndice(self, att, ID):\n indice = \"error\"\n if \"restrained\" in att:\n att = \"restrainedName\"\n if \"cbp\" in att:\n att = \"cbpName\"\n if \"jel\" in att:\n att = \"jelName\"\n if \"lnk\" in att:\n att = \"lnkName\"\n if \"gauss\" in att:\n att = \"gaussName\"\n if att == \"bndM1\" or att == \"bndM2\" or att == \"bndF\":\n att = \"bndName\"\n if att == \"bndStrain\" or att == \"bndStress\" or att == \"bndSlip\" or att == \"bndBond\":\n att = \"bndGaussName\"\n if att == \"cncM1\" or att == \"cncM2\" or att == \"cncF\":\n att = \"cncName\"\n if att == \"cncStrain\" or att == \"cncStress\" or att == \"cncGamma\" or att == \"cncTau\":\n att = \"cncGaussName\"\n #print('att = {0}'.format(att))\n for i, j in enumerate(getattr(self, att)):\n if j == ID:\n indice = i\n break\n if indice == \"error\":\n print('indice {0} in attribute {1} does not exist'.format(ID, att))\n return indice\n\nclass adaptic3D(adap1):\n def __init__(self,title, cutoff = None,folderPath=\"\",numPath=\"\"):\n super(adaptic3D, self).__init__(title, cutoff, folderPath, numPath)\n\n def gaussName_create(self): # element - material - position\n temp1 = self.convertNum(self.wordsNum,\"#ie31s\",3,0,\"str\")\n temp2 = self.convertNum(self.wordsNum,\"#ie31s\",3,1,\"str\")\n temp2 = np.array([i.replace(\".\", \"_\") for i in temp2])\n self.gaussName = np.array([temp1[i]+\"_\"+temp2[i] for i in range(len(temp2))])\n del temp1, temp2\n\n def cvsName_create(self): # element - material - position\n temp1 = self.convertNum(self.wordsNum,\"#ie52s1\",3,0,\"str\")\n temp2 = self.convertNum(self.wordsNum,\"#ie52s1\",3,1,\"str\")\n temp2 = np.array([i.replace(\".\", \"_\") for i in temp2])\n self.cvsName = np.array([temp1[i]+\"_\"+temp2[i] for i in range(len(temp2))])\n del temp1, temp2\n\n def nodeDispZ_create(self):\n self.nodeDispRZ = self.convertNum(self.wordsNum,\"#in2\",3,3,\"float\")\n\n def nodeDispRX_create(self):\n self.nodeDispRZ = self.convertNum(self.wordsNum,\"#in2\",3,4,\"float\")\n\n def nodeDispRY_create(self):\n self.nodeDispRZ = self.convertNum(self.wordsNum,\"#in2\",3,5,\"float\")\n\n def nodeDispRZ_create(self):\n self.nodeDispRZ = self.convertNum(self.wordsNum,\"#in2\",3,6,\"float\")\n\n def cbpMy1_create(self):\n self.hasAttribute(\"cbpName\")\n self.cbpMy1 = self.convertNum(self.wordsNum,\"#ie31\",3,1,\"float\")\n\n def cbpMz1_create(self):\n self.hasAttribute(\"cbpName\")\n self.cbpMz1 = self.convertNum(self.wordsNum,\"#ie31\",3,2,\"float\")\n\n def cbpMy2_create(self):\n self.hasAttribute(\"cbpName\")\n self.cbpMy2 = self.convertNum(self.wordsNum,\"#ie31\",3,3,\"float\")\n\n def cbpMz2_create(self):\n self.hasAttribute(\"cbpName\")\n self.cbpMz2 = self.convertNum(self.wordsNum,\"#ie31\",3,4,\"float\")\n\n def cbpF_create(self):\n self.hasAttribute(\"cbpName\")\n self.cbpF = self.convertNum(self.wordsNum,\"#ie31\",3,5,\"float\")\n\n def cbpMT_create(self):\n self.hasAttribute(\"cbpName\")\n self.cbpMT = self.convertNum(self.wordsNum,\"#ie31\",3,6,\"float\")\n\n def cbpThy1_create(self):\n self.hasAttribute(\"cbpName\")\n self.cbpThy1 = self.convertNum(self.wordsNum,\"#ie31d1\",3,1,\"float\")\n\n def cbpThz1_create(self):\n self.hasAttribute(\"cbpName\")\n self.cbpThz1 = self.convertNum(self.wordsNum,\"#ie1d1\",3,2,\"float\")\n\n def cbpThy2_create(self):\n self.hasAttribute(\"cbpName\")\n self.cbpThy2 = self.convertNum(self.wordsNum,\"#ie31d1\",3,3,\"float\")\n\n def cbpThz2_create(self):\n self.hasAttribute(\"cbpName\")\n self.cbpThz2 = self.convertNum(self.wordsNum,\"#ie1d1\",3,4,\"float\")\n\n def cbpDelta_create(self):\n self.hasAttribute(\"cbpName\")\n self.cbpDelta = self.convertNum(self.wordsNum,\"#ie1d1\",3,5,\"float\")\n\n def cbpTheta_create(self):\n self.hasAttribute(\"cbpName\")\n self.cbpTheta = self.convertNum(self.wordsNum,\"#ie1d1\",3,6,\"float\")\n\n def restrainedRZ_create(self):\n self.hasAttribute(\"restrainedName\")\n self.restrainedRZ = self.convertNum(self.wordsNum,\"#in1\",3,3,\"float\")\n\n def jelFx_create(self):\n self.hasAttribute(\"jelName\")\n self.jelFx = self.convertNum(self.wordsNum,\"#ie41\",3,1,\"float\")\n\n def jelFy_create(self):\n self.hasAttribute(\"jelName\")\n self.jelFy = self.convertNum(self.wordsNum,\"#ie41\",3,2,\"float\")\n\n def jelFz_create(self):\n self.hasAttribute(\"jelName\")\n self.jelFz = self.convertNum(self.wordsNum,\"#ie41\",3,3,\"float\")\n\n def jelMx_create(self):\n self.hasAttribute(\"jelName\")\n self.jelMx = self.convertNum(self.wordsNum,\"#ie41\",3,4,\"float\")\n\n def jelMy_create(self):\n self.hasAttribute(\"jelName\")\n self.jelMy = self.convertNum(self.wordsNum,\"#ie41\",3,5,\"float\")\n\n def jelMz_create(self):\n self.hasAttribute(\"jelName\")\n self.jelMz = self.convertNum(self.wordsNum,\"#ie41\",3,6,\"float\")\n\n def lnkMy1_create(self):\n self.hasAttribute(\"lnkName\")\n self.lnkMy1 = self.convertNum(self.wordsNum,\"#ie18\",3,1,\"float\")\n\n def lnkMz1_create(self):\n self.hasAttribute(\"lnkName\")\n self.lnkMz1 = self.convertNum(self.wordsNum,\"#ie18\",3,2,\"float\")\n\n def lnkMy2_create(self):\n self.hasAttribute(\"lnkName\")\n self.lnkMy2 = self.convertNum(self.wordsNum,\"#ie18\",3,3,\"float\")\n\n def lnkMz2_create(self):\n self.hasAttribute(\"lnkName\")\n self.lnkMz2 = self.convertNum(self.wordsNum,\"#ie18\",3,4,\"float\")\n\n def lnkF_create(self):\n self.hasAttribute(\"lnkName\")\n self.lnkF = self.convertNum(self.wordsNum,\"#ie18\",3,5,\"float\")\n\n def lnkMT_create(self):\n self.hasAttribute(\"lnkName\")\n self.lnkMT = self.convertNum(self.wordsNum,\"#ie18\",3,6,\"float\")\n\n def gauss1Strain_create(self):\n self.hasAttribute(\"gaussName\")\n self.gauss1Strain = self.convertNum(self.wordsNum,\"#ie31s\",3,2,\"float\")\n\n def gauss2Strain_create(self):\n self.hasAttribute(\"gaussName\")\n self.gauss2Strain = self.convertNum(self.wordsNum,\"#ie31s\",3,4,\"float\")\n\n def gauss1Stress_create(self):\n self.hasAttribute(\"gaussName\")\n self.gauss1Stress = self.convertNum(self.wordsNum,\"#ie31s\",3,3,\"float\")\n\n def gauss2Stress_create(self):\n self.hasAttribute(\"gaussName\")\n self.gauss2Stress = self.convertNum(self.wordsNum,\"#ie31s\",3,5,\"float\")\n\n def cvsNx_create(self):\n self.hasAttribute(\"cvsName\")\n self.cvsNx = self.convertNum(self.wordsNum,\"#ie52s1\",3,2,\"float\")\n\n def cvsNy_create(self):\n self.hasAttribute(\"cvsName\")\n self.cvsNy = self.convertNum(self.wordsNum,\"#ie52s1\",3,3,\"float\")\n\n def cvsNxy_create(self):\n self.hasAttribute(\"cvsName\")\n self.cvsNxy = self.convertNum(self.wordsNum,\"#ie52s1\",3,4,\"float\")\n\n def cvsMx_create(self):\n self.hasAttribute(\"cvsName\")\n self.cvsMx = self.convertNum(self.wordsNum,\"#ie52s1\",3,5,\"float\")\n\n def cvsMy_create(self):\n self.hasAttribute(\"cvsName\")\n self.cvsMy = self.convertNum(self.wordsNum,\"#ie52s1\",3,6,\"float\")\n\n def cvsMxy_create(self):\n self.hasAttribute(\"cvsName\")\n self.cvsMxy = self.convertNum(self.wordsNum,\"#ie52s1\",3,7,\"float\")\n\n def cvsQxz_create(self):\n self.hasAttribute(\"cvsName\")\n self.cvsQxz = self.convertNum(self.wordsNum,\"#ie52s1\",3,8,\"float\")\n\n def cvsQyz_create(self):\n self.hasAttribute(\"cvsName\")\n self.cvsQyz = self.convertNum(self.wordsNum,\"#ie52s1\",3,9,\"float\")\n\n def cvsEpsx_create(self):\n self.hasAttribute(\"cvsName\")\n self.cvsEpsx = self.convertNum(self.wordsNum,\"#ie52s2\",3,2,\"float\")\n\n def cvsEpsy_create(self):\n self.hasAttribute(\"cvsName\")\n self.cvsEpsy = self.convertNum(self.wordsNum,\"#ie52s2\",3,3,\"float\")\n\n def cvsEpsxy_create(self):\n self.hasAttribute(\"cvsName\")\n self.cvsEpsxy = self.convertNum(self.wordsNum,\"#ie52s2\",3,4,\"float\")\n\n def cvsKapx_create(self):\n self.hasAttribute(\"cvsName\")\n self.cvsKapx = self.convertNum(self.wordsNum,\"#ie52s2\",3,5,\"float\")\n\n def cvsKapy_create(self):\n self.hasAttribute(\"cvsName\")\n self.cvsKapy = self.convertNum(self.wordsNum,\"#ie52s2\",3,6,\"float\")\n\n def cvsKapxy_create(self):\n self.hasAttribute(\"cvsName\")\n self.cvsKapxy = self.convertNum(self.wordsNum,\"#ie52s2\",3,7,\"float\")\n\n def cvsEpsxz_create(self):\n self.hasAttribute(\"cvsName\")\n self.cvsEpsxz = self.convertNum(self.wordsNum,\"#ie52s2\",3,8,\"float\")\n\n def cvsEpsyz_create(self):\n self.hasAttribute(\"cvsName\")\n self.cvsEpsyz = self.convertNum(self.wordsNum,\"#ie52s2\",3,9,\"float\")\n\n def findIndice(self, att, ID):\n indice = \"error\"\n if \"restrained\" in att:\n att = \"restrainedName\"\n if \"cbp\" in att:\n att = \"cbpName\"\n if \"jel\" in att:\n att = \"jelName\"\n if \"lnk\" in att:\n att = \"lnkName\"\n if \"gauss\" in att:\n att = \"gaussName\"\n #print('att = {0}'.format(att))\n for i, j in enumerate(getattr(self, att)):\n if j == ID:\n indice = i\n break\n if indice == \"error\":\n print('indice {0} in attribute {1} does not exist'.format(ID, att))\n return indice\n"
] | [
[
"numpy.array",
"numpy.column_stack",
"numpy.zeros"
]
] |
Dabble-of-DevOps-Bio/ella | [
"e38631d302611a143c9baaa684bcbd014d9734e4"
] | [
"src/vardb/util/vcfiterator.py"
] | [
"from typing import Any, Dict, IO, Mapping, Optional, Sequence, Tuple, Union\nimport cyvcf2\nimport logging\nimport numpy as np\n\nlog = logging.getLogger(__name__)\n\n# have to re-declare here since only exist in cyvcf2 stub and fails on execution\nText = Union[str, bytes]\nPrimitives = Union[int, float, bool, Text]\n\n\ndef _numpy_unknown_to_none(a: np.ndarray) -> list:\n \"\"\"\n Unknown values ('.') in integer arrays are assigned as '-inf' (e.g. for in32 the value is -2^31)\n Convert array to list, and replace these values with None\n \"\"\"\n b = a.tolist()\n n = max(a.shape)\n indices = zip(*np.where(a < np.iinfo(a.dtype).min + n))\n\n def set_value(x, i, value):\n \"Set value in nested lists\"\n if len(i) > 1:\n x = set_value(x[i[0]], i[1:], value)\n else:\n x[i[0]] = value\n\n for idx in indices:\n set_value(b, idx, None)\n\n return b\n\n\ndef numpy_to_list(a: Optional[np.ndarray]):\n if a is None:\n return None\n if np.issubdtype(a.dtype, np.integer):\n return _numpy_unknown_to_none(a)\n else:\n return a.tolist()\n\n\nclass Record(object):\n variant: cyvcf2.Variant\n samples: Sequence[str]\n meta: Mapping[str, Any]\n\n def __init__(self, variant: cyvcf2.Variant, samples: Sequence[str], meta: Mapping[str, Any]):\n self.variant = variant\n self.samples = samples\n self.meta = meta\n\n def _sample_index(self, sample_name: str):\n return self.samples.index(sample_name)\n\n def get_raw_filter(self):\n \"\"\"Need to implement this here, as cyvcf2 does not distinguish between 'PASS' and '.' (both return None).\n Therefore, we need to parse the VCF line to get the raw filter status.\"\"\"\n return str(self.variant).split(\"\\t\")[6]\n\n def sample_genotype(self, sample_name: str):\n return tuple(self.variant.genotypes[self._sample_index(sample_name)][:-1])\n\n def has_allele(self, sample_name: str):\n gt = self.sample_genotype(sample_name)\n return max(gt) == 1\n\n def get_format_sample(self, property: str, sample_name: str, scalar: bool = False):\n if property == \"GT\":\n return self.sample_genotype(sample_name)\n else:\n prop = self.variant.format(property)\n if prop is not None:\n ret = numpy_to_list(prop[self._sample_index(sample_name)])\n if scalar:\n assert len(ret) == 1\n return ret[0]\n else:\n return ret\n\n def get_format(self, property: str):\n if property == \"GT\":\n return self.variant.genotypes\n else:\n return numpy_to_list(self.variant.format(property))\n\n def get_block_id(self):\n return self.variant.INFO.get(\"OLD_MULTIALLELIC\")\n\n def is_multiallelic(self):\n return self.get_block_id() is not None\n\n def is_sample_multiallelic(self, sample_name: str):\n return self.is_multiallelic() and bool(set(self.sample_genotype(sample_name)) - set([0, 1]))\n\n def annotation(\n self,\n ) -> Dict[str, Union[Primitives, Tuple[Primitives, ...]]]:\n return dict(x for x in self.variant.INFO)\n\n def __str__(self):\n s = repr(self.variant)\n\n if self.samples:\n genotypes = []\n for i, x in enumerate(self.variant.gt_bases):\n genotypes.append(f\"{x} ({str(self.samples[i])})\")\n\n s += f\" - Genotypes: {', '.join(genotypes)}\"\n return s\n\n\nRESERVED_GT_HEADERS = {\n \"AD\": {\"Number\": \"R\", \"Type\": \"Integer\", \"Description\": \"Injected. Read depth for each allele\"},\n \"ADF\": {\n \"Number\": \"R\",\n \"Type\": \"Integer\",\n \"Description\": \"Injected. Read depth for each allele on the forward strand\",\n },\n \"ADR\": {\n \"Number\": \"R\",\n \"Type\": \"Integer\",\n \"Description\": \"Injected. Read depth for each allele on the reverse strand\",\n },\n \"DP\": {\"Number\": \"1\", \"Type\": \"Integer\", \"Description\": \"Injected. Read depth\"},\n \"EC\": {\n \"Number\": \"A\",\n \"Type\": \"Integer\",\n \"Description\": \"Injected. Expected alternate allele counts\",\n },\n \"FT\": {\n \"Number\": \"1\",\n \"Type\": \"String\",\n \"Description\": \"Injected. Filter indicating if this genotype was “called”\",\n },\n \"GL\": {\"Number\": \"G\", \"Type\": \"Float\", \"Description\": \"Injected. Genotype likelihoods\"},\n \"GP\": {\n \"Number\": \"G\",\n \"Type\": \"Float\",\n \"Description\": \"Injected. Genotype posterior probabilities\",\n },\n \"GQ\": {\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Injected. Conditional genotype quality\",\n },\n \"GT\": {\"Number\": \"1\", \"Type\": \"String\", \"Description\": \"Injected. Genotype\"},\n \"HQ\": {\"Number\": \"2\", \"Type\": \"Integer\", \"Description\": \"Injected. Haplotype quality\"},\n \"MQ\": {\"Number\": \"1\", \"Type\": \"Integer\", \"Description\": \"Injected. RMS mapping quality\"},\n \"PL\": {\n \"Number\": \"G\",\n \"Type\": \"Integer\",\n \"Description\": \"Injected. Phred-scaled genotype likelihoods rounded to the closest integer\",\n },\n \"PP\": {\n \"Number\": \"G\",\n \"Type\": \"Integer\",\n \"Description\": \"Injected. Phred-scaled genotype posterior probabilities rounded to the closest integer\",\n },\n \"PQ\": {\"Number\": \"1\", \"Type\": \"Integer\", \"Description\": \"Injected. Phasing quality\"},\n \"PS\": {\"Number\": \"1\", \"Type\": \"Integer\", \"Description\": \"Injected. Phase\"},\n}\n\n\nclass VcfIterator(object):\n def __init__(self, path_or_fileobject: Union[str, IO], include_raw: bool = False):\n self.path_or_fileobject = path_or_fileobject\n self.reader = cyvcf2.Reader(self.path_or_fileobject, gts012=True)\n self.include_raw = include_raw\n self.samples = self.reader.samples\n self.add_format_headers()\n self.meta: Dict[str, list] = {}\n for h in self.reader.header_iter():\n if h.type not in self.meta:\n self.meta[h.type] = []\n self.meta[h.type].append(h.info())\n\n def add_format_headers(self):\n \"Add format headers if they do not exist. This is a subset of the reserved genotype keys from https://samtools.github.io/hts-specs/VCFv4.3.pdf (table 2)\"\n for key, fmt in RESERVED_GT_HEADERS.items():\n if key in self.reader and self.reader.get_header_type(key) == \"FORMAT\":\n existing_header_line = self.reader[key]\n if (\n existing_header_line[\"Number\"] != fmt[\"Number\"]\n or existing_header_line[\"Type\"] != fmt[\"Type\"]\n ):\n log.warning(\n f\"Header for format field {key} in VCF does not match VCF spec. Ignoring.\"\n )\n else:\n self.reader.add_format_to_header({**fmt, **{\"ID\": key}})\n\n def __iter__(self):\n variant: cyvcf2.Variant\n if self.include_raw:\n for variant in self.reader:\n yield str(variant), variant\n else:\n for variant in self.reader:\n r = Record(variant, self.samples, self.meta)\n yield r\n"
] | [
[
"numpy.iinfo",
"numpy.issubdtype"
]
] |
ayanc/learncfa | [
"e3584b51cace5f1feabe22d0ae8102b21df8d60e"
] | [
"run/sensor.py"
] | [
"# Copyright (C) 2016 Ayan Chakrabarti <[email protected]>\nimport numpy as np\n\ndef trunc(img):\n w = img.shape[0]; h = img.shape[1]\n w = (w//8)*8\n h = (h//8)*8\n return img[0:w,0:h,...].copy()\n \n\ndef _clip(img):\n return np.maximum(0.,np.minimum(1.,img))\n\ndef bayer(img,nstd):\n v = np.zeros((img.shape[0],img.shape[1]),dtype=np.float32)\n v[0::2,0::2] = img[0::2,0::2,1]\n v[1::2,1::2] = img[1::2,1::2,1]\n v[1::2,0::2] = img[1::2,0::2,0]\n v[0::2,1::2] = img[0::2,1::2,2]\n\n v = v/3.0 + np.float32(np.random.normal(0,1,v.shape))*nstd\n\n return _clip(v)\n\ndef cfz(img,nstd):\n v = np.sum(img,axis=2)\n v[0::4,0::4] = img[0::4,0::4,1]\n v[1::4,1::4] = img[1::4,1::4,1]\n v[1::4,0::4] = img[1::4,0::4,0]\n v[0::4,1::4] = img[0::4,1::4,2]\n\n v = v/3.0 + np.float32(np.random.normal(0,1,v.shape))*nstd\n\n return _clip(v)\n\ndef lcfa(img,nstd,code_str):\n code = np.asarray([int(s) for s in code_str],dtype=np.int);\n code.shape = (8,8);\n\n v = np.sum(img,axis=2)\n for i in range(8):\n for j in range(8):\n if code[i,j] < 3:\n v[i::8,j::8] = img[i::8,j::8,code[i,j]]\n\n v = v/3.0 + np.float32(np.random.normal(0,1,v.shape))*nstd\n return _clip(v)\n \n"
] | [
[
"numpy.random.normal",
"numpy.sum",
"numpy.zeros",
"numpy.minimum"
]
] |
mexxexx/ionsrcopt | [
"889db53f84ea5dd2199b882a5a1b36c256aa5128"
] | [
"visualization/svm.py"
] | [
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport warnings\n\nfrom sklearn import svm\nfrom sklearn import metrics\nfrom sklearn import model_selection\nfrom sklearn import preprocessing\n\n\ndef main():\n files = {\n \"./Results/JanNov2016.csv\": {\"label\": 0, \"color\": \"#880000\"},\n \"./Results/JanNov2016_unstable.csv\": {\"label\": 1, \"color\": \"#FFAAAA\"},\n \"./Results/JanNov2018.csv\": {\"label\": 0, \"color\": \"#000088\"},\n \"./Results/JanNov2018_unstable.csv\": {\"label\": 1, \"color\": \"#AAAAFF\"},\n }\n df = read_summaries(files)\n features = [\"bias disc\", \"gas\", \"RF\", \"solinj\", \"solcen\", \"solext\", \"HTI\"]\n features = [(f, \"50%\") for f in features]\n feature_ranges = np.array(\n [(df[features[i]].min(), df[features[i]].max()) for i in range(len(features))]\n )\n\n num_base_points = 80\n shifting_resolution = 2000\n max_deviation = 0.1\n\n eval_resolution = 10000\n\n X = df[features].values\n scaler = preprocessing.RobustScaler((10, 90)).fit(X)\n X = scaler.transform(X)\n\n y = df[\"label\"].values\n # weights = df[('DURATION', 'in_hours')]\n kf = model_selection.KFold(n_splits=5, shuffle=True)\n confusion_matrix = np.zeros((2, 2))\n for train_index, test_index in kf.split(X):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n model = train_model(X_train, y_train)\n confusion_matrix += test_model(model, X_test, y_test)\n\n print(create_classification_report(confusion_matrix))\n\n model = train_model(X, y, probability=True)\n\n X = df[features].values\n base_points = X[np.random.permutation(len(X))[:num_base_points]]\n shifted_points = create_shifted_points(\n base_points, shifting_resolution, max_deviation\n )\n shifted_points = scale_shifted_points(shifted_points, scaler.transform)\n\n sensitivities = estimate_feature_sensitivity(model, shifted_points)\n shifted_points = scale_shifted_points(shifted_points, scaler.inverse_transform)\n\n eval_grid = create_eval_grid(feature_ranges, eval_resolution)\n sensitivities = average_sensitivities(\n shifted_points, sensitivities, eval_grid, eval_resolution\n )\n plot_sensitivity(eval_grid, sensitivities, features)\n\n\ndef average_sensitivities(points, sensitivities, eval_grid, eval_resolution):\n dim = len(points)\n num_base_points = len(points[0])\n result = np.empty((dim, eval_resolution))\n result[:] = np.nan\n\n for d in range(dim):\n evaluations = np.array(\n [\n np.interp(\n eval_grid[d],\n xp=points[d, p, :, d],\n fp=sensitivities[d, p, :, 0],\n left=np.nan,\n right=np.nan,\n )\n for p in range(num_base_points)\n ]\n )\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n result[d] = np.nanmean(evaluations, axis=0)\n\n return result\n\n\ndef create_eval_grid(feature_ranges, eval_resolution):\n return np.linspace(\n start=feature_ranges[:, 0], stop=feature_ranges[:, 1], num=eval_resolution\n ).T\n\n\ndef plot_sensitivity(eval_grid, sensitivities, features):\n dim = len(eval_grid)\n fig, ax = plt.subplots(nrows=dim, ncols=1)\n fig.suptitle(\"Probability for source to be unstable\")\n for d in range(dim):\n ax[d].set_title(features[d])\n ax[d].plot(eval_grid[d], sensitivities[d])\n\n figManager = plt.get_current_fig_manager()\n figManager.window.showMaximized()\n plt.subplots_adjust(\n left=0.05, bottom=0.05, right=0.95, top=0.93, wspace=None, hspace=0.4\n )\n plt.show()\n\n\ndef estimate_feature_sensitivity(model, shifted_points):\n sensitivities = [\n [model.predict_proba(points) for points in shifted_dim]\n for shifted_dim in shifted_points\n ]\n return np.array(sensitivities)\n\n\ndef scale_shifted_points(shifted_points, scaling_method):\n shifted_points = [\n [scaling_method(points) for points in shifted_dim]\n for shifted_dim in shifted_points\n ]\n return np.array(shifted_points)\n\n\ndef create_shifted_points(base_points, resolution, max_deviation):\n dimension = len(base_points[0])\n\n shifted_points = [[] for d in range(dimension)]\n\n for d in range(dimension):\n for base_point in base_points:\n points = np.tile(base_point, (resolution, 1))\n min_val = base_point[d] * (1 - max_deviation)\n max_val = base_point[d] * (1 + max_deviation)\n if max_val < min_val:\n swap = min_val\n min_val = max_val\n max_val = swap\n points[:, d] = np.linspace(min_val, max_val, resolution)\n shifted_points[d].append(points)\n\n return np.array(shifted_points)\n\n\ndef train_model(X, y, probability=False):\n svc = svm.SVC(C=10.0, kernel=\"rbf\", gamma=\"auto\", probability=probability)\n svc.fit(X, y)\n return svc\n\n\ndef test_model(model, X, y):\n y_pred = model.predict(X)\n return metrics.confusion_matrix(y, y_pred)\n\n\ndef create_classification_report(confusion_matrix):\n df = pd.DataFrame()\n df[\"precision\"] = [\n confusion_matrix[0, 0] / (confusion_matrix[0, 0] + confusion_matrix[1, 0]),\n confusion_matrix[1, 1] / (confusion_matrix[1, 1] + confusion_matrix[0, 1]),\n ]\n df[\"recall\"] = [\n confusion_matrix[0, 0] / (confusion_matrix[0, 0] + confusion_matrix[0, 1]),\n confusion_matrix[1, 1] / (confusion_matrix[1, 1] + confusion_matrix[1, 0]),\n ]\n df[\"f1-score\"] = (\n 2 * df[\"precision\"] * df[\"recall\"] / (df[\"precision\"] + df[\"recall\"])\n )\n\n return df\n\n\ndef read_summaries(files):\n df = None\n for filename, marker in files.items():\n df_new = pd.read_csv(filename, index_col=0, header=[0, 1])\n df_new[\"label\"] = marker[\"label\"]\n\n if not df is None:\n df = df.append(df_new, sort=False)\n else:\n df = df_new\n\n df[\"label\"] = df[\"label\"]\n return df[df.index >= 0]\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.tile",
"numpy.empty",
"numpy.zeros",
"sklearn.svm.SVC",
"pandas.read_csv",
"numpy.nanmean",
"numpy.interp",
"pandas.DataFrame",
"matplotlib.pyplot.get_current_fig_manager",
"matplotlib.pyplot.subplots",
"sklearn.metrics.confusion_matrix",
"sklearn.preprocessing.RobustScaler",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"sklearn.model_selection.KFold",
"numpy.array",
"numpy.linspace"
]
] |
victor-gil-sepulveda/PhD-ANMPythonHelpers | [
"c0e15684cce4aa4da90141b51f043a567a5f8655"
] | [
"anmichelpers/comparison/comparison.py"
] | [
"import numpy\nimport anmichelpers.tools.tools as tools\nimport math\nfrom anmichelpers.tools.tools import norm\nfrom math import exp\n\n# For all measures see http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2350220/\n\ndef overlap(mode_i, mode_j):\n \"\"\"\n Measure of the similarity between 2 modes.\n Overlap value is in the range [0,1] (1 means maximum overlap / similarity)\n Tama and Sanejouand 2001. Conformational change of proteins arising from normal mode calculations.\n \"\"\"\n return numpy.abs(numpy.dot(mode_i, mode_j)) / (tools.norm(mode_i) * tools.norm(mode_j))\n# return numpy.abs(numpy.dot(mode_i/tools.norm(mode_i), mode_j/tools.norm(mode_j))) / (tools.norm(mode_i) * tools.norm(mode_j))\n\ndef cumulative_overlap(mode, mode_range):\n \"\"\"\n Measure of similarity between one mode and a range of modes.\n \n A value of 1 means a perfect match.\n \"\"\"\n cum_overlap = 0\n for i in range(len(mode_range)):\n o = overlap(mode, mode_range[i])\n cum_overlap += o*o\n return math.sqrt(cum_overlap)\n\ndef rmsip(first_mode_range, second_mode_range):\n \"\"\"\n Root mean square inner product. \n Indicates how well the motion space spanned by the first range of modes is represented by the\n second range of modes.\n \n \"An Analysis of Core Deformations in Protein Superfamilies\" Alejandra Leo-Macias,\n Pedro Lopez-Romero, Dmitry Lupyan, Daniel Zerbino and Angel R. Ortiz\n Biophys J. 2005 Feb; 88(2): 1291-1299.\n \n Must vary between 0 and 1\n http://www.biomedcentral.com/1471-2105/15/399\n Also: http://www.biomedcentral.com/1471-2105/14/183\n \"\"\"\n D = len(first_mode_range)\n K = len(second_mode_range)\n rmsip_val = 0.\n for i in range(D):\n for j in range(K):\n ovp = overlap(first_mode_range[i], second_mode_range[j])\n rmsip_val += ovp*ovp\n return math.sqrt(rmsip_val / float(D))\n\ndef degree_of_collectivity(mode, normalize = False):\n \"\"\"\n http://peds.oxfordjournals.org/content/14/1/1.long\n Tama and Sanejouand 2001. Conformational change of proteins arising from normal mode calculations.\n\n From th article:\n A measure of how collective a protein motion is was proposed by Bruschweiler (1995). In the present study, it was used in order \n to estimate the degree of collectivity of each conformational change considered, reflecting the number of atoms which are \n significantly affected during the conformational change. This degree of collectivity, k, is defined as being proportional to \n the exponential of the 'information entropy' embedded in vector inc_R.\n It is confined to the interval between N^-1 and 1. If k = 1, the conformational change is maximally collective.\n \n \n Bruschweiler (1995): http://scitation.aip.org/content/aip/journal/jcp/102/8/10.1063/1.469213\n \n \n \\kappa = \\frac{1}{N} exp \\left( -\\sum^N_i \\alpha \\Delta R_i^2 log \\left( \\alpha \\Delta R_i^2 \\right) \\right)\n \"\"\"\n\n # Calculate \"displacements of the mode\"\n R = numpy.reshape(mode,(len(mode)/3,3))\n N = len(R)\n# print \"Degree of collectivity. Max. value: 1 Min. value: \",1./N\n inc_R = norm(R)\n # Calculate alpha\n sum_ri_sq = numpy.dot(inc_R, inc_R)\n alpha = 1./sum_ri_sq\n # Calculate the degree of collectivity\n alpha_ri2 = alpha*(inc_R*inc_R)\n log_alpha_ri2 = numpy.log(alpha_ri2)\n k = (1./N)*exp(-(alpha_ri2*log_alpha_ri2).sum())\n if not normalize:\n return k\n else:\n min_val = 1./N\n return (k-min_val) / (1-min_val)\n\n \n# a = [1,2,3,4,5,6] -> degree_of_collectivity = 0.76810859305 (range: 1, 0.5)\n# normed -> 0.5362171861\n"
] | [
[
"numpy.dot",
"numpy.log"
]
] |
1alexandra/collage | [
"671ca1713d8e9f74faae9e824552a03c253adad0"
] | [
"src/CornerCreator.py"
] | [
"import numpy as np\nfrom PIL import Image, ImageFilter\n\n\nclass CornerCreator:\n \"\"\"Create corners with a given curvature from ``0`` to ``1``.\n\n Corners size is defined by ``corner_width``.\n Type of corners are defined by ``corner_curvature``:\n\n - ``0``: no corners,\n - from ``0`` to ``0.5``: hyperbolic convex corners,\n - ``0.5``: linear corners,\n - from ``0.5`` to ``1``: hyperbolic concave corners,\n - ``1``: square corner.\n\n \"\"\"\n def __init__(self, corner_width, corner_curvature):\n self.Width = corner_width\n self.Curve = corner_curvature\n\n Width = property(doc=\"Defines corner size (``Width x Width``).\")\n Curve = property(doc=\"Corner curvature from ``0`` to ``1``.\")\n\n @Width.getter\n def Width(self):\n return self._width\n\n @Width.setter\n def Width(self, value):\n self._width = max(value, 0)\n\n @Curve.getter\n def Curve(self):\n return self._curve\n\n @Curve.setter\n def Curve(self, value):\n self._curve = max(min(value, 1), 0)\n\n def linear(self, x):\n \"\"\"Linear ``L(x)``.\n\n - ``L(0) = Width``,\n - ``L(Width * Curve) = Width * Curve``,\n - ``L(Width) = 0``.\n\n \"\"\"\n if self.Curve == 0:\n return 0\n if self.Curve == 0.5:\n return self.Width - x\n if self.Curve == 1:\n return self.Width\n\n def hyperbole(self, x):\n \"\"\"Hyperbolic ``h(x)``.\n\n - ``h(0) = Width``,\n - ``h(Width * Curve) = Width * Curve``,\n - ``h(Width) = 0``.\n \"\"\"\n c = self.Width\n z = self.Curve\n a = z * z * c / (1 - 2 * z)\n k = a * (a + c)\n b = - k / (a + c)\n return k / (x + a) + b\n\n def corner_function(self, x):\n \"\"\"Calculate lenght of corner line in ``x`` row.\"\"\"\n if not (0 <= x <= self.Width):\n return 0\n if self.Curve in [0, 0.5, 1]:\n return self.linear(x)\n return self.hyperbole(x)\n\n def get_corner(self):\n \"\"\"Return boolean array with (``Width x Widht``) corner.\"\"\"\n r = self.Width\n corner = np.ones((r, r))\n for i in range(r):\n cols = np.round(self.corner_function(i))\n corner[i, :int(cols)] = False\n return np.logical_or(corner, corner.T)\n\n def apply_corner(self, arr, corner):\n \"\"\"Apply ``corner`` mask to ``arr`` corners with a correct rotation.\"\"\"\n r = self.Width\n\n arr[:r, :r] = np.logical_and(arr[:r, :r], corner)\n corner = np.rot90(corner)\n\n arr[-r:, :r] = np.logical_and(arr[-r:, :r], corner)\n corner = np.rot90(corner)\n\n arr[-r:, -r:] = np.logical_and(arr[-r:, -r:], corner)\n corner = np.rot90(corner)\n\n arr[:r, -r:] = np.logical_and(arr[:r, -r:], corner)\n corner = np.rot90(corner)\n\n return arr\n\n def smooth_boundary(self, mask):\n \"\"\"Put zeros to the boundary points of ``mask``.\"\"\"\n mask[0] = 0\n mask[-1] = 0\n mask[:, 0] = 0\n mask[:, -1] = 0\n\n def get_alpha(self, size):\n \"\"\"Return ``PIL Image`` alpha channel with 0 in corners and boundary.\n\n If ``size < 2.1 Width``, the corners don't appear.\"\"\"\n h, w = size\n if w <= 0 or h <= 0:\n return np.array([])\n mask = np.ones((w, h), dtype=bool)\n self.smooth_boundary(mask)\n minsize = 2.1 * self.Width\n if self.Width > 0 and min(w, h) >= minsize:\n corner = self.get_corner()\n self.apply_corner(mask, corner)\n alpha = mask.astype(np.uint8) * 255\n alpha = Image.fromarray(alpha, mode=\"L\")\n return alpha.filter(ImageFilter.GaussianBlur(3))\n"
] | [
[
"numpy.ones",
"numpy.logical_or",
"numpy.logical_and",
"numpy.rot90",
"numpy.array"
]
] |
dkim319/NFL_Predictive_Model_v2 | [
"5884e10a681e2e34f54a2280c94d2f42fc442d17"
] | [
"2 - Machine Learning/4_New Model - XGBoost - Final Model.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 14 20:21:23 2017\n\n@author: DKIM\n\"\"\"\n\n#Run 3b- Supporting Functions \nimport pandas as pd\nimport numpy as np\nimport xgboost as xgb\nfrom sklearn import feature_selection\n\nseednumber = 319\n\n# load data\n\nstart_year = 2014\ntarget_year = 2018\n\ndata_train, data_test, features_train, target_train, features_test, target_test = load_data(start_year, target_year)\n\n# start timer to capture the amount of time taken to process this python file\nfrom datetime import datetime\ntimestart = datetime.now()\n\n# Optimized Parameters\nest = 200\nlr = 0.3\ndepth = 9\nsubsample = 0.9\ncolsample_bytree = 0.85\n\n# Optimized Feature Selection\n# feature select based on percentile\nfs_results = feature_select_per(features_train, target_train, features_test, target_test, est, lr, depth, subsample, colsample_bytree)\nfs_results = fs_results.sort_values(by='acc_test', ascending=False).reset_index()\n\n# select the best percentile\nper = fs_results['per'][0]\n\nfs = feature_selection.SelectPercentile(feature_selection.f_classif, percentile = per)\nfeature_model = fs.fit(features_train,target_train)\n\nfeatures_train_new = feature_model.transform(features_train)\nfeatures_test_new = feature_model.transform(features_test)\n\nxgb = xgboost.XGBClassifier(n_estimators=est, learning_rate=lr, gamma=0, subsample=subsample,\n colsample_bytree=colsample_bytree, max_depth=depth)\n\nxgb.fit(features_train_new, target_train)\npred_test = xgb.predict(features_test_new)\npred_train = xgb.predict(features_train_new)\n\npredictions_train = [round(value) for value in pred_train]\npredictions_test = [round(value) for value in pred_test]\n\ntrain_accuracy = accuracy_score(target_train, predictions_train)\ntest_accuracy = accuracy_score(target_test, predictions_test)\n\nprint (train_accuracy)\nprint (test_accuracy)\n\npred_df1 = pd.DataFrame(predictions_test) \npred_df1_raw = pd.DataFrame(pred_test)\n\ndata_test = data_test.reset_index()\npred_df1 = pred_df1.reset_index()\npred_df1_raw = pred_df1_raw.reset_index()\n\ndata_test['predictions - Percentile'] = pred_df1[0]\ndata_test['predictions - Raw'] = pred_df1_raw[0]\ndata_test.to_csv('predictions for - ' + str(per) + ' - ' + str(start_year) + ' - ' + str(target_year) +'.csv') \n \n# stop the timer and print out the duration\nprint (datetime.now() - timestart)"
] | [
[
"pandas.DataFrame",
"sklearn.feature_selection.SelectPercentile"
]
] |
axelbr/dreamer | [
"775cd521b12d7c8f753e790844285d933e460234"
] | [
"plotting/plot_laptime_evaluation.py"
] | [
"import argparse\nimport time\nimport pathlib\nimport warnings\nfrom datetime import datetime\n\nfrom tensorboard.backend.event_processing.event_accumulator import EventAccumulator\n\nfrom plotting.aggregators import MeanStd, MeanMinMax\nfrom plotting.log_parsers import EvaluationParser\nfrom plotting.plot_test_evaluation import plot_error_bar\nfrom plotting.structs import LONG_TRACKS_DICT, ALL_METHODS_DICT, SHORT_TRACKS_DICT, PALETTE\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom plotting.utils import parse_file, check_track, check_method, get_tf_data, Run\n\n\ndef load_filtered_runs(args, file_parsers, tag, filter_tag, threshold):\n runs = []\n for dir in args.indir:\n print(f'Loading runs from {dir}', end='')\n for file in dir.glob('**/events*'):\n try:\n train_track, method, seed = parse_file(dir, file, file_parsers)\n if not check_track(train_track, args.tracks) or not check_method(method, args.methods):\n continue\n event_acc = EventAccumulator(str(file), size_guidance={'scalars': 100000,\n 'tensors': 100000}) # max number of items to keep\n event_acc.Reload()\n except Warning as w:\n warnings.warn(w)\n continue\n except Exception as err:\n print(f'Error {file}: {err}')\n continue\n for test_track in args.tracks:\n _, y = get_tf_data(event_acc, method, tag=f'{test_track}/{tag}')\n x, f = get_tf_data(event_acc, method, tag=f'{test_track}/{filter_tag}')\n if args.first_n_models is not None:\n eval_episodes = 10\n x = x[:eval_episodes * args.first_n_models] # to make uniform the eval, consider the same n of models\n y = y[:eval_episodes * args.first_n_models] # for each algorithm\n f = f[:eval_episodes * args.first_n_models] # for each algorithm\n x = x[np.nonzero(f > threshold)]\n y = y[np.nonzero(f > threshold)]\n if x.shape[0] > 0 and y.shape[0] > 0:\n runs.append(Run(file, train_track, test_track.replace(\"_\", \"\"), method, seed, x, y))\n print('.', end='')\n print()\n return runs\n\ndef main(args):\n tag = \"time\"\n args.ylabel = args.ylabel if args.ylabel != \"\" else tag\n runs = load_filtered_runs(args, [EvaluationParser()], tag='time', filter_tag='progress', threshold=0.95)\n train_tracks = sorted(set([r.train_track for r in runs if r.train_track!=\"\"]))\n args.outdir.mkdir(parents=True, exist_ok=True)\n timestamp = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n for aggregator, fn in zip(['mean_minmax'], [MeanMinMax()]):\n fig, axes = plt.subplots(1, len(train_tracks), figsize=(4 * len(train_tracks), 3))\n # todo move loop on train tracks in plot error bar\n for i, (train_track, ax) in enumerate(zip(train_tracks, axes)):\n filter_runs = [r for r in runs if r.train_track == train_track or r.train_track==\"\"]\n plot_error_bar(args, filter_runs, ax, aggregator=fn)\n if args.legend:\n if not type(axes) == np.ndarray: # in case of fig with a single axis\n axes = [axes]\n handles, labels = axes[-1].get_legend_handles_labels()\n fig.legend(handles, labels, loc='lower center', ncol=len(labels), framealpha=1.0, handletextpad=0.1)\n filename = f'time_' + '_'.join(train_tracks) + f'_{aggregator}_{timestamp}.png'\n fig.tight_layout(pad=2.5)\n fig.savefig(args.outdir / filename)\n print(f\"[Info] Written {args.outdir / filename}\")\n\n\ndef parse():\n parser = argparse.ArgumentParser()\n parser.add_argument('--indir', nargs='+', type=pathlib.Path, required=True)\n parser.add_argument('--outdir', type=pathlib.Path, required=True)\n parser.add_argument('--xlabel', type=str, default=\"\")\n parser.add_argument('--ylabel', type=str, default=\"\")\n parser.add_argument('--legend', action='store_true')\n parser.add_argument('--show_labels', action='store_true')\n parser.add_argument('--tracks', nargs='+', type=str, default=LONG_TRACKS_DICT.keys())\n parser.add_argument('--vis_tracks', nargs='+', type=str, default=LONG_TRACKS_DICT.keys())\n parser.add_argument('--methods', nargs='+', type=str, default=ALL_METHODS_DICT.keys())\n parser.add_argument('--first_n_models', type=int, required=None, default=2)\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n init = time.time()\n main(parse())\n print(f\"\\n[Info] Elapsed Time: {time.time() - init:.3f} seconds\")\n"
] | [
[
"numpy.nonzero"
]
] |
Data-to-Knowledge/ConsentsReporting | [
"d5152699a26860ea3ff63093ef11bd440a812e0f"
] | [
"process_data.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 11:41:44 2018\n\n@author: MichaelEK\n\"\"\"\nimport os\nimport argparse\nimport types\nimport pandas as pd\nimport numpy as np\nfrom pdsql import mssql\nfrom datetime import datetime\nimport yaml\nimport itertools\nimport lowflows as lf\nimport util\n\npd.options.display.max_columns = 10\nrun_time_start = datetime.today().strftime('%Y-%m-%d %H:%M:%S')\nprint(run_time_start)\n\ntry:\n\n #####################################\n ### Read parameters file\n\n base_dir = os.path.realpath(os.path.dirname(__file__))\n\n with open(os.path.join(base_dir, 'parameters-test.yml')) as param:\n param = yaml.safe_load(param)\n\n # parser = argparse.ArgumentParser()\n # parser.add_argument('yaml_path')\n # args = parser.parse_args()\n #\n # with open(args.yaml_path) as param:\n # param = yaml.safe_load(param)\n\n ## Integrety checks\n use_types_check = np.in1d(list(param['misc']['use_types_codes'].keys()), param['misc']['use_types_priorities']).all()\n\n if not use_types_check:\n raise ValueError('use_type_priorities parameter does not encompass all of the use type categories. Please fix the parameters file.')\n\n\n #####################################\n ### Read the hydro log\n\n# max_date_stmt = \"select max(RunTimeStart) from \" + param.log_table + \" where HydroTable='\" + param.process_name + \"' and RunResult='pass' and ExtSystem='\" + param.ext_system + \"'\"\n#\n# last_date1 = mssql.rd_sql(server=param.hydro_server, database=param.hydro_database, stmt=max_date_stmt).loc[0][0]\n#\n# if last_date1 is None:\n# last_date1 = '1900-01-01'\n# else:\n# last_date1 = str(last_date1.date())\n#\n# print('Last sucessful date is ' + last_date1)\n\n #######################################\n ### Read in source data and update accela tables in ConsentsReporting db\n print('--Reading in source data...')\n\n ## Make object to contain the source data\n db = types.SimpleNamespace()\n\n for i, p in param['source data'].items():\n setattr(db, i, mssql.rd_sql(p['server'], p['database'], p['table'], p['col_names'], rename_cols=p['rename_cols'], username=p['username'], password=p['password']))\n if (p['database'] == 'Accela') & (not (p['table'] in ['Ecan.vAct_Water_AssociatedPermits', 'Ecan.vQA_Relationship_Actuals'])):\n table1 = 'Accela.' + p['table'].split('Ecan.')[1]\n print(table1)\n t1 = getattr(db, i).copy().dropna(subset=p['pk'])\n t1.drop_duplicates(p['pk'], inplace=True)\n print('update in db')\n new_ones, _ = mssql.update_from_difference(t1, param['output']['server'], param['output']['database'], table1, on=p['pk'], mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])\n\n\n ######################################\n ### Populate base tables\n print('--Update base tables')\n\n ## HydroGroup\n hf1 = pd.DataFrame(param['misc']['HydroGroup'])\n hf1['ModifiedDate'] = run_time_start\n\n hf0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'HydroGroup', username=param['output']['username'], password=param['output']['password'])\n\n hf_diff1 = hf1[~hf1.HydroGroup.isin(hf0.HydroGroup)]\n\n if not hf_diff1.empty:\n mssql.to_mssql(hf_diff1, param['output']['server'], param['output']['database'], 'HydroGroup', username=param['output']['username'], password=param['output']['password'])\n hf0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'HydroGroup', username=param['output']['username'], password=param['output']['password'])\n\n ## Activity\n act1 = param['misc']['Activities']['ActivityType']\n act2 = pd.DataFrame(list(itertools.product(act1, hf0.HydroGroupID.tolist())), columns=['ActivityType', 'HydroGroupID'])\n\n act2['ModifiedDate'] = run_time_start\n\n act0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Activity', username=param['output']['username'], password=param['output']['password'])\n\n act_diff1 = act2[~act2[['ActivityType', 'HydroGroupID']].isin(act0[['ActivityType', 'HydroGroupID']]).any(axis=1)]\n\n if not act_diff1.empty:\n mssql.to_mssql(act_diff1, param['output']['server'], param['output']['database'], 'Activity', username=param['output']['username'], password=param['output']['password'])\n act0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Activity', username=param['output']['username'], password=param['output']['password'])\n\n # Combine activity and hydro features\n act_types1 = pd.merge(act0[['ActivityID', 'ActivityType', 'HydroGroupID']], hf0[['HydroGroupID', 'HydroGroup']], on='HydroGroupID')\n act_types1['ActivityName'] = act_types1['ActivityType'] + ' ' + act_types1['HydroGroup']\n\n ## AlloBlock\n ab0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'AlloBlock', username=param['output']['username'], password=param['output']['password'])\n\n sw_blocks1 = pd.Series(db.wap_allo['sw_allo_block'].unique())\n gw_blocks1 = pd.Series(db.allocated_volume['allo_block'].unique())\n\n # Fixes\n wap_allo1 = db.wap_allo.copy()\n wap_allo1['sw_allo_block'] = wap_allo1['sw_allo_block'].str.strip()\n wap_allo1.loc[wap_allo1.sw_allo_block == 'Migration: Not Classified', 'sw_allo_block'] = 'A'\n\n allo_vol1 = db.allocated_volume.copy()\n allo_vol1['allo_block'] = allo_vol1['allo_block'].str.strip()\n allo_vol1.loc[allo_vol1.allo_block == 'Migration: Not Classified', 'allo_block'] = 'A'\n\n # Determine blocks and what needs to be added\n sw_blocks1 = set(wap_allo1['sw_allo_block'].unique())\n gw_blocks1 = set(allo_vol1['allo_block'].unique())\n\n blocks1 = sw_blocks1.union(gw_blocks1)\n\n ab1 = pd.DataFrame(list(itertools.product(blocks1, hf0.HydroGroupID.tolist())), columns=['AllocationBlock', 'HydroGroupID'])\n\n ab1['ModifiedDate'] = run_time_start\n\n ab0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'AlloBlock', username=param['output']['username'], password=param['output']['password'])\n\n ab_diff1 = ab1[~ab1[['AllocationBlock', 'HydroGroupID']].isin(ab0[['AllocationBlock', 'HydroGroupID']]).any(axis=1)]\n\n if not ab_diff1.empty:\n mssql.to_mssql(ab_diff1, param['output']['server'], param['output']['database'], 'AlloBlock', username=param['output']['username'], password=param['output']['password'])\n ab0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'AlloBlock', username=param['output']['username'], password=param['output']['password'])\n\n # Combine alloblock and hydro features\n ab_types1 = pd.merge(ab0[['AlloBlockID', 'AllocationBlock', 'HydroGroupID']], hf0[['HydroGroupID', 'HydroGroup']], on='HydroGroupID').drop('HydroGroupID', axis=1)\n\n ## Attributes\n att1 = pd.DataFrame(param['misc']['Attributes'])\n att1['ModifiedDate'] = run_time_start\n\n att0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Attributes', username=param['output']['username'], password=param['output']['password'])\n\n att_diff1 = att1[~att1.Attribute.isin(att0.Attribute)]\n\n if not att_diff1.empty:\n mssql.to_mssql(att_diff1, param['output']['server'], param['output']['database'], 'Attributes', username=param['output']['username'], password=param['output']['password'])\n att0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Attributes', username=param['output']['username'], password=param['output']['password'])\n\n ##################################################\n ### Sites and streamdepletion\n print('--Update sites tables')\n\n ## takes\n wap_allo1['WAP'] = wap_allo1['WAP'].str.strip().str.upper()\n wap_allo1.loc[~wap_allo1.WAP.str.contains('[A-Z]+\\d\\d/\\d\\d\\d\\d'), 'WAP'] = np.nan\n wap1 = wap_allo1['WAP'].unique()\n wap1 = wap1[~pd.isnull(wap1)]\n\n ## Diverts\n div1 = db.divert.copy()\n div1['WAP'] = div1['WAP'].str.strip().str.upper()\n div1.loc[~div1.WAP.str.contains('[A-Z]+\\d\\d/\\d\\d\\d\\d'), 'WAP'] = np.nan\n wap2 = div1['WAP'].unique()\n wap2 = wap2[~pd.isnull(wap2)]\n\n ## Combo\n waps = np.concatenate((wap1, wap2), axis=None)\n\n ## Check that all WAPs exist in the USM sites table\n usm_waps1 = db.sites[db.sites.ExtSiteID.isin(waps)].copy()\n usm_waps1[['NZTMX', 'NZTMY']] = usm_waps1[['NZTMX', 'NZTMY']].astype(int)\n\n if len(wap1) != len(usm_waps1):\n miss_waps = set(wap1).difference(set(usm_waps1.ExtSiteID))\n print('Missing {} WAPs in USM'.format(len(miss_waps)))\n wap_allo1 = wap_allo1[~wap_allo1.WAP.isin(miss_waps)].copy()\n\n ## Update ConsentsSites table\n cs1 = usm_waps1[['ExtSiteID', 'SiteName']].copy()\n# cs1['SiteType'] = 'WAP'\n\n new_sites, _ = mssql.update_from_difference(cs1, param['output']['server'], param['output']['database'], 'ConsentsSites', on='ExtSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])\n\n # Log\n log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ConsentsSites', 'pass', '{} sites updated'.format(len(new_sites)), username=param['output']['username'], password=param['output']['password'])\n\n cs0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'ConsentsSites', ['SiteID', 'ExtSiteID'], username=param['output']['username'], password=param['output']['password'])\n cs_waps2 = pd.merge(cs0, usm_waps1.drop('SiteName', axis=1), on='ExtSiteID')\n cs_waps3 = pd.merge(cs_waps2, db.wap_sd, on='ExtSiteID').drop('ExtSiteID', axis=1).round()\n\n new_waps, _ = mssql.update_from_difference(cs_waps3, param['output']['server'], param['output']['database'], 'SiteStreamDepletion', on='SiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])\n\n # Log\n log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'WAP', 'pass', '{} sites updated'.format(len(new_waps)), username=param['output']['username'], password=param['output']['password'])\n\n ## Read db table\n# wap0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'SiteStreamDepletion')\n\n ## Make linked WAP-SiteID table\n wap_site = cs0.rename(columns={'ExtSiteID': 'WAP'})\n\n ##################################################\n ### Permit table\n print('--Update Permit table')\n\n ## Clean data\n permits1 = db.permit.copy()\n permits1['RecordNumber'] = permits1['RecordNumber'].str.strip().str.upper()\n permits1['ConsentStatus'] = permits1['ConsentStatus'].str.strip()\n permits1['EcanID'] = permits1['EcanID'].str.strip().str.upper()\n\n permits1['FromDate'] = pd.to_datetime(permits1['FromDate'], infer_datetime_format=True, errors='coerce')\n permits1['ToDate'] = pd.to_datetime(permits1['ToDate'], infer_datetime_format=True, errors='coerce')\n\n permits1.loc[permits1['ConsentStatus'] == 'Issued - s124 Continuance', 'ToDate'] = permits1.loc[permits1['ConsentStatus'] == 'Issued - s124 Continuance', 'FromDate'] + pd.DateOffset(years=30)\n\n permits1[['NZTMX', 'NZTMY']] = permits1[['NZTMX', 'NZTMY']].round()\n\n permits1.loc[(permits1['FromDate'] < '1950-01-01'), 'FromDate'] = np.nan\n permits1.loc[(permits1['ToDate'] < '1950-01-01'), 'ToDate'] = np.nan\n\n ## Filter data\n permits2 = permits1.drop_duplicates('RecordNumber')\n permits2 = permits2[permits2.ConsentStatus.notnull() & permits2.RecordNumber.notnull() & permits2['EcanID'].notnull()].copy()\n# permits2 = permits2[(permits2['FromDate'] > '1950-01-01') & (permits2['ToDate'] > '1950-01-01') & (permits2['ToDate'] > permits2['FromDate']) & permits2.NZTMX.notnull() & permits2.NZTMY.notnull() & permits2.ConsentStatus.notnull() & permits2.RecordNumber.notnull() & permits2['EcanID'].notnull()].copy()\n\n ## Convert datetimes to date\n permits2['FromDate'] = permits2['FromDate'].dt.date\n permits2['ToDate'] = permits2['ToDate'].dt.date\n permits2.loc[permits2['FromDate'].isnull(), 'FromDate'] = '1900-01-01'\n permits2.loc[permits2['ToDate'].isnull(), 'ToDate'] = '1900-01-01'\n\n ## Save results\n new_permits, _ = mssql.update_from_difference(permits2, param['output']['server'], param['output']['database'], 'Permit', on='RecordNumber', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])\n\n # Log\n log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'Permit', 'pass', '{} rows updated'.format(len(new_permits)), username=param['output']['username'], password=param['output']['password'])\n\n ## Read db table\n permits0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Permit', username=param['output']['username'], password=param['output']['password'])\n\n ##################################################\n ### Parent-Child\n print('--Update Parent-child table')\n\n ## Clean data\n pc1 = db.parent_child.copy()\n pc1['ParentRecordNumber'] = pc1['ParentRecordNumber'].str.strip().str.upper()\n pc1['ChildRecordNumber'] = pc1['ChildRecordNumber'].str.strip().str.upper()\n pc1['ParentCategory'] = pc1['ParentCategory'].str.strip()\n pc1['ChildCategory'] = pc1['ChildCategory'].str.strip()\n\n ## Filter data\n pc1 = pc1.drop_duplicates()\n pc1 = pc1[pc1['ParentRecordNumber'].notnull() & pc1['ChildRecordNumber'].notnull()]\n\n ## Check foreign keys\n crc1 = permits0.RecordNumber.unique()\n pc2 = pc1[pc1.ParentRecordNumber.isin(crc1) & pc1.ChildRecordNumber.isin(crc1)].copy()\n\n ## Save results\n new_pc, _ = mssql.update_from_difference(pc2, param['output']['server'], param['output']['database'], 'ParentChild', on=['ParentRecordNumber', 'ChildRecordNumber'], mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])\n\n # Log\n log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ParentChild', 'pass', '{} rows updated'.format(len(new_pc)), username=param['output']['username'], password=param['output']['password'])\n\n ## Read db table\n pc0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'ParentChild', username=param['output']['username'], password=param['output']['password'])\n\n #################################################\n ### AllocatedRatesVolumes\n print('--Update Allocation tables')\n\n attr1 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'Attributes', ['AttributeID', 'Attribute'], username=param['output']['username'], password=param['output']['password'])\n\n ## Rates\n # Clean data\n wa1 = wap_allo1.copy()\n wa1['RecordNumber'] = wa1['RecordNumber'].str.strip().str.upper()\n wa1['take_type'] = wa1['take_type'].str.strip().str.title()\n wa1['FromMonth'] = wa1['FromMonth'].str.strip().str.title()\n wa1['ToMonth'] = wa1['ToMonth'].str.strip().str.title()\n wa1['IncludeInSwAllocation'] = wa1['IncludeInSwAllocation'].str.strip().str.title()\n\n wa1['AllocatedRate'] = pd.to_numeric(wa1['AllocatedRate'], errors='coerce').round(2)\n wa1['WapRate'] = pd.to_numeric(wa1['WapRate'], errors='coerce').round(2)\n wa1['VolumeDaily'] = pd.to_numeric(wa1['VolumeDaily'], errors='coerce').astype(int)\n wa1['VolumeWeekly'] = pd.to_numeric(wa1['VolumeWeekly'], errors='coerce').astype(int)\n wa1['Volume150Day'] = pd.to_numeric(wa1['Volume150Day'], errors='coerce').astype(int)\n\n wa1.loc[wa1['FromMonth'] == 'Migration: Not Classified', 'FromMonth'] = 'Jul'\n wa1.loc[wa1['ToMonth'] == 'Migration: Not Classified', 'ToMonth'] = 'Jun'\n mon_mapping = {'Jan': 7, 'Feb': 8, 'Mar': 9, 'Apr': 10, 'May': 11, 'Jun': 12, 'Jul': 1, 'Aug': 2, 'Sep': 3, 'Oct': 4, 'Nov': 5, 'Dec': 6}\n wa1.replace({'FromMonth': mon_mapping, 'ToMonth': mon_mapping}, inplace=True)\n\n wa1.loc[wa1['IncludeInSwAllocation'] == 'No', 'IncludeInSwAllocation'] = False\n wa1.loc[wa1['IncludeInSwAllocation'] == 'Yes', 'IncludeInSwAllocation'] = True\n\n wa1.replace({'sw_allo_block': {'In Waitaki': 'A'}}, inplace=True)\n\n # Check foreign keys\n wa4 = wa1[wa1.RecordNumber.isin(crc1)].copy()\n\n # Filters\n# wa4 = wa2[(wa2.AllocatedRate > 0)].copy()\n# wa3.loc[~wa3['IncludeInSwAllocation'], ['AllocatedRate', 'SD1', 'SD2']] = 0\n# wa4 = wa3.drop('IncludeInSwAllocation', axis=1).copy()\n\n # Find the missing WAPs per consent\n crc_wap_mis1 = wa4.loc[wa4.WAP.isnull(), 'RecordNumber'].unique()\n crc_wap4 = wa4[['RecordNumber', 'WAP']].drop_duplicates()\n\n for i in crc_wap_mis1:\n crc2 = pc0[np.in1d(pc0.ParentRecordNumber, i)].ChildRecordNumber.values\n wap1 = []\n while (len(crc2) > 0) & (len(wap1) == 0):\n wap1 = crc_wap4.loc[np.in1d(crc_wap4.RecordNumber, crc2), 'WAP'].values\n crc2 = pc0[np.in1d(pc0.ParentRecordNumber, crc2)].ChildRecordNumber.values\n if len(wap1) > 0:\n wa4.loc[wa4.RecordNumber == i, 'WAP'] = wap1[0]\n\n wa4 = wa4[wa4.WAP.notnull()].copy()\n wa4.rename(columns={'sw_allo_block': 'AllocationBlock'}, inplace=True)\n\n # Distribute the months\n cols1 = wa4.columns.tolist()\n from_mon_pos = cols1.index('FromMonth')\n to_mon_pos = cols1.index('ToMonth')\n\n allo_rates_list = []\n# c1 = 0\n for val in wa4.itertuples(False, None):\n from_month = int(val[from_mon_pos])\n to_month = int(val[to_mon_pos])\n if from_month > to_month:\n mons = list(range(1, to_month + 1))\n# c1 = c1 + 1\n else:\n mons = range(from_month, to_month + 1)\n d1 = [val + (i,) for i in mons]\n allo_rates_list.extend(d1)\n col_names1 = wa4.columns.tolist()\n col_names1.extend(['Month'])\n wa5 = pd.DataFrame(allo_rates_list, columns=col_names1).drop(['FromMonth', 'ToMonth'], axis=1)\n\n # Mean of all months\n grp1 = wa5.groupby(['RecordNumber', 'take_type', 'AllocationBlock', 'WAP'])\n mean1 = grp1[['WapRate', 'AllocatedRate', 'VolumeDaily', 'VolumeWeekly', 'Volume30Day', 'Volume150Day', 'SD1', 'SD2']].mean().round(2)\n include1 = grp1['IncludeInSwAllocation'].first()\n mon_min = grp1['Month'].min()\n mon_min.name = 'FromMonth'\n mon_max = grp1['Month'].max()\n mon_max.name = 'ToMonth'\n wa6 = pd.concat([mean1, mon_min, mon_max, include1], axis=1).reset_index()\n # wa6['HydroGroup'] = 'Surface Water'\n\n ## Allocated Volume\n av1 = allo_vol1.copy()\n\n # clean data\n av1['RecordNumber'] = av1['RecordNumber'].str.strip().str.upper()\n av1['take_type'] = av1['take_type'].str.strip().str.title()\n av1['IncludeInGwAllocation'] = av1['IncludeInGwAllocation'].str.strip().str.title()\n av1.loc[av1['IncludeInGwAllocation'] == 'No', 'IncludeInGwAllocation'] = False\n av1.loc[av1['IncludeInGwAllocation'] == 'Yes', 'IncludeInGwAllocation'] = True\n av1['IncludeInGwAllocation'] = av1['IncludeInGwAllocation'].astype(bool)\n# av1['AllocatedAnnualVolume'] = pd.to_numeric(av1['AllocatedAnnualVolume'], errors='coerce').astype(int)\n av1['FullAnnualVolume'] = pd.to_numeric(av1['FullAnnualVolume'], errors='coerce').astype(int)\n# av1.loc[av1['AllocatedAnnualVolume'] <= 0, 'AllocatedAnnualVolume'] = 0\n# av1 = av1.loc[av1['AllocatedAnnualVolume'] > 0]\n av1.rename(columns={'allo_block': 'AllocationBlock'}, inplace=True)\n av1.drop('AllocatedAnnualVolume', axis=1, inplace=True)\n av1.replace({'AllocationBlock': {'In Waitaki': 'A'}}, inplace=True)\n av1.drop_duplicates(subset=['RecordNumber', 'take_type', 'AllocationBlock'], inplace=True)\n\n ## Combine volumes with rates\n wa7 = pd.merge(av1, wa6, on=['RecordNumber', 'take_type', 'AllocationBlock'])\n\n ## Distribute the volumes by WapRate\n wa8 = wa7.copy()\n\n grp3 = wa8.groupby(['RecordNumber', 'take_type', 'AllocationBlock'])\n wa8['WapRateAgg'] = grp3['WapRate'].transform('sum')\n wa8['ratio'] = wa8['WapRate'] / wa8['WapRateAgg']\n wa8.loc[wa8['ratio'].isnull(), 'ratio'] = 1\n wa8['FullAnnualVolume'] = (wa8['FullAnnualVolume'] * wa8['ratio']).round()\n wa8.drop(['WapRateAgg', 'ratio', 'VolumeDaily', 'VolumeWeekly', 'Volume30Day', 'Volume150Day', 'SD2', 'WapRate'], axis=1, inplace=True)\n wa8 = wa8[wa8.FullAnnualVolume >= 0].copy()\n\n ## Add in stream depletion\n # wa9 = pd.merge(wa8, db.wap_sd.rename(columns={'ExtSiteID': 'WAP'}), on='WAP').drop(['SD1_NZTMX', 'SD1_NZTMY', 'SD1_30Day', 'SD2_NZTMX', 'SD2_NZTMY', 'SD2_7Day', 'SD2_30Day', 'SD2_150Day', 'SD1', 'SD2'], axis=1)\n #\n # wa9['SD1_7Day'] = pd.to_numeric(wa9['SD1_7Day'], errors='coerce').round(0)\n # wa9['SD1_150Day'] = pd.to_numeric(wa9['SD1_150Day'], errors='coerce').round(0)\n\n ## Combine with aquifer test storativity\n # aq1 = db.wap_aquifer_test.dropna(subset=['storativity']).copy()\n # aq1.rename(columns={'ExtSiteID': 'WAP'}, inplace=True)\n # aq2 = aq1.groupby('WAP')['storativity'].mean().dropna().reset_index()\n # aq2.storativity = True\n #\n # wa9 = pd.merge(wa9, aq2, on='WAP', how='left')\n # wa9.loc[wa9.storativity.isnull(), 'storativity'] = False\n\n ## Distribute the rates and volumes by allocation hydro group\n wa8['sw_rate'] = 0\n wa8['gw_rate'] = 0\n wa8['sw_vol'] = 0\n wa8['gw_vol'] = 0\n\n wa8.loc[wa8.take_type == 'Take Surface Water', 'sw_rate'] = wa8.loc[wa8.take_type == 'Take Surface Water', 'AllocatedRate']\n wa8.loc[wa8.take_type == 'Take Groundwater', 'sw_rate'] = wa8.loc[wa8.take_type == 'Take Groundwater', 'SD1']\n wa8.loc[wa8.take_type == 'Take Groundwater', 'gw_rate'] = wa8.loc[wa8.take_type == 'Take Groundwater', 'AllocatedRate'] - wa8.loc[wa8.take_type == 'Take Groundwater', 'SD1']\n wa8.loc[wa8.take_type == 'Take Surface Water', 'sw_vol'] = wa8.loc[wa8.take_type == 'Take Surface Water', 'FullAnnualVolume']\n wa8.loc[wa8.take_type == 'Take Groundwater', 'sw_vol'] = (wa8.loc[wa8.take_type == 'Take Groundwater', 'SD1']/wa8.loc[wa8.take_type == 'Take Groundwater', 'AllocatedRate']) * wa8.loc[wa8.take_type == 'Take Groundwater', 'FullAnnualVolume']\n wa8.loc[wa8.take_type == 'Take Groundwater', 'gw_vol'] = (wa8.loc[wa8.take_type == 'Take Groundwater', 'gw_rate']/wa8.loc[wa8.take_type == 'Take Groundwater', 'AllocatedRate']) * wa8.loc[wa8.take_type == 'Take Groundwater', 'FullAnnualVolume']\n\n allo_list = []\n for k, row in wa8.iterrows():\n # print(k)\n if row['IncludeInSwAllocation']:\n sw1 = row[['RecordNumber', 'AllocationBlock', 'WAP', 'FromMonth', 'ToMonth', 'sw_rate', 'sw_vol']].rename({'sw_rate': 'AllocatedRate', 'sw_vol': 'AllocatedAnnualVolume'})\n sw1['HydroGroup'] = 'Surface Water'\n allo_list.append(sw1.to_frame().T)\n if row['IncludeInGwAllocation']:\n gw1 = row[['RecordNumber', 'AllocationBlock', 'WAP', 'FromMonth', 'ToMonth', 'gw_rate', 'gw_vol']].rename({'gw_rate': 'AllocatedRate', 'gw_vol': 'AllocatedAnnualVolume'})\n gw1['HydroGroup'] = 'Groundwater'\n allo_list.append(gw1.to_frame().T)\n\n rv1 = pd.concat(allo_list)\n\n rv1['AllocatedAnnualVolume'] = pd.to_numeric(rv1['AllocatedAnnualVolume'])\n rv1['AllocatedRate'] = pd.to_numeric(rv1['AllocatedRate'])\n rv1['FromMonth'] = pd.to_numeric(rv1['FromMonth'], downcast='integer')\n rv1['ToMonth'] = pd.to_numeric(rv1['ToMonth'], downcast='integer')\n\n rv1.loc[rv1['AllocatedAnnualVolume'].isnull(), 'AllocatedAnnualVolume'] = 0\n rv1.loc[rv1['AllocatedAnnualVolume'] == np.inf, 'AllocatedAnnualVolume'] = 0\n rv1.loc[rv1['AllocatedRate'].isnull(), 'AllocatedRate'] = 0\n rv1.loc[rv1['AllocatedRate'] == np.inf, 'AllocatedRate'] = 0\n\n # Cut out the fat\n rv4 = rv1[(rv1['AllocatedAnnualVolume'] > 0) | (rv1['AllocatedRate'] > 0)].copy()\n\n ## Calculate missing volumes and rates\n ann_bool = rv4.AllocatedAnnualVolume == 0\n rv4.loc[ann_bool, 'AllocatedAnnualVolume'] = (rv4.loc[ann_bool, 'AllocatedRate'] * 0.001*60*60*24*30.42* (rv4.loc[ann_bool, 'ToMonth'] - rv4.loc[ann_bool, 'FromMonth'] + 1))\n\n rate_bool = rv4.AllocatedRate == 0\n rv4.loc[rate_bool, 'AllocatedRate'] = (rv4.loc[rate_bool, 'AllocatedAnnualVolume'] / 60/60/24/30.42/ (rv4.loc[rate_bool, 'ToMonth'] - rv4.loc[rate_bool, 'FromMonth'] + 1) * 1000)\n\n ## Convert the rates and volumes to integers\n rv4['AllocatedAnnualVolume'] = rv4['AllocatedAnnualVolume'].round().astype(int)\n rv4['AllocatedRate'] = rv4['AllocatedRate'].round().astype(int)\n\n ## Merge tables for IDs\n avr5 = pd.merge(rv4, ab_types1, on=['AllocationBlock', 'HydroGroup']).drop(['AllocationBlock', 'HydroGroup'], axis=1).copy()\n avr6 = pd.merge(avr5, wap_site, on='WAP').drop('WAP', axis=1)\n\n ## Update CrcAlloSite table\n crc_allo = avr6[['RecordNumber', 'AlloBlockID', 'SiteID']].copy()\n crc_allo['SiteAllo'] = True\n crc_allo['SiteType'] = 'WAP'\n\n ## Determine which rows should be updated\n# old_crc_allo = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcAlloSite', where_in={'SiteAllo': [1], 'SiteType': ['WAP']})\n#\n# diff_dict = mssql.compare_dfs(old_crc_allo.drop(['CrcAlloSiteID', 'ModifiedDate'], axis=1), crc_allo, on=['RecordNumber', 'AlloBlockID', 'SiteID'])\n#\n# both1 = pd.concat([diff_dict['new'], diff_dict['diff']])\n#\n# rem1 = diff_dict['remove']\n\n # Save results\n new_crc_allo, rem_crc_allo = mssql.update_from_difference(crc_allo, param['output']['server'], param['output']['database'], 'CrcAlloSite', on=['RecordNumber', 'AlloBlockID', 'SiteID'], mod_date_col='ModifiedDate', where_cols=['SiteID', 'SiteType'], username=param['output']['username'], password=param['output']['password'])\n\n # Log\n log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'CrcAlloSite', 'pass', '{} rows updated'.format(len(new_crc_allo)), username=param['output']['username'], password=param['output']['password'])\n\n # Read db table\n allo_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcAlloSite', ['CrcAlloSiteID', 'RecordNumber', 'AlloBlockID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])\n\n # Remove old data if needed\n if not rem_crc_allo.empty:\n rem_crc_allo1 = pd.merge(allo_site0, rem_crc_allo, on=['RecordNumber', 'AlloBlockID', 'SiteID']).drop(['RecordNumber', 'AlloBlockID', 'SiteID'], axis=1)\n mssql.del_table_rows(param['output']['server'], param['output']['database'], 'AllocatedRateVolume', rem_crc_allo1, username=param['output']['username'], password=param['output']['password'])\n# mssql.del_table_rows(param['output']['server'], param['output']['database'], 'TSLowFlowRestr', rem_crc_allo1, username=param['output']['username'], password=param['output']['password'])\n# mssql.del_table_rows(param['output']['server'], param['output']['database'], 'LowFlowConditions', rem_crc_allo1, username=param['output']['username'], password=param['output']['password'])\n# mssql.del_table_rows(param['output']['server'], param['output']['database'], 'CrcAlloSite', rem_crc_allo1, username=param['output']['username'], password=param['output']['password'])\n allo_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcAlloSite', ['CrcAlloSiteID', 'RecordNumber', 'AlloBlockID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])\n\n ## Update AllocatedRateVolume table\n avr7 = pd.merge(allo_site0, avr6, on=['RecordNumber', 'AlloBlockID', 'SiteID']).drop(['RecordNumber', 'AlloBlockID', 'SiteID'], axis=1).drop_duplicates('CrcAlloSiteID')\n\n # Save results\n new_avr, _ = mssql.update_from_difference(avr7, param['output']['server'], param['output']['database'], 'AllocatedRateVolume', on='CrcAlloSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])\n\n # Log\n log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'AllocatedRateVolume', 'pass', '{} rows updated'.format(len(new_avr)), username=param['output']['username'], password=param['output']['password'])\n\n #################################################\n ### ConsentedRateVolume\n print('--Update Consent tables')\n\n ## Clean data\n crv1 = db.consented_takes.copy()\n crv1['RecordNumber'] = crv1['RecordNumber'].str.strip().str.upper()\n crv1['take_type'] = crv1['take_type'].str.strip().str.title()\n crv1['LowflowCondition'] = crv1['LowflowCondition'].str.strip().str.upper()\n crv1['ConsentedAnnualVolume'] = pd.to_numeric(crv1['ConsentedAnnualVolume'], errors='coerce').round()\n crv1['ConsentedMultiDayVolume'] = pd.to_numeric(crv1['ConsentedMultiDayVolume'], errors='coerce').round()\n crv1['ConsentedMultiDayPeriod'] = pd.to_numeric(crv1['ConsentedMultiDayPeriod'], errors='coerce').round()\n crv1['ConsentedRate'] = pd.to_numeric(crv1['ConsentedRate'], errors='coerce')\n\n crv1.loc[crv1['ConsentedMultiDayVolume'] <= 0, 'ConsentedMultiDayVolume'] = np.nan\n crv1.loc[crv1['ConsentedMultiDayPeriod'] <= 0, 'ConsentedMultiDayPeriod'] = np.nan\n crv1.loc[crv1['ConsentedRate'] <= 0, 'ConsentedRate'] = np.nan\n crv1.loc[crv1['ConsentedAnnualVolume'] <= 0, 'ConsentedAnnualVolume'] = np.nan\n\n crv1.loc[crv1['LowflowCondition'].isnull(), 'LowflowCondition'] = 'NO'\n crv1.loc[(crv1['LowflowCondition'] == 'COMPLEX'), 'LowflowCondition'] = 'YES'\n crv1.loc[crv1['LowflowCondition'] == 'NO', 'LowflowCondition'] = False\n crv1.loc[crv1['LowflowCondition'] == 'YES', 'LowflowCondition'] = True\n\n ## Filter data\n crv2 = crv1[crv1.ConsentedRate.notnull()]\n\n ## Check foreign keys\n crv2 = crv2[crv2.RecordNumber.isin(crc1)].copy()\n\n ## Aggregate take types for counts and min/max month\n grp4 = wa4.groupby(['RecordNumber', 'take_type', 'WAP'])\n mon_min = grp4['FromMonth'].min()\n mon_min.name = 'FromMonth'\n mon_max = grp4['ToMonth'].max()\n mon_max.name = 'ToMonth'\n mon_min_max = pd.concat([mon_min, mon_max], axis=1)\n mon_min_max1 = mon_min_max.reset_index()\n\n grp5 = mon_min_max1.groupby(['RecordNumber', 'take_type'])\n mon_min_max1['wap_count'] = grp5['WAP'].transform('count')\n\n ## Distribute WAPs to consents\n crv3 = pd.merge(crv2, mon_min_max1, on=['RecordNumber', 'take_type'])\n crv3[['ConsentedAnnualVolume', 'ConsentedMultiDayVolume']] = crv3[['ConsentedAnnualVolume', 'ConsentedMultiDayVolume']].divide(crv3['wap_count'], 0).round()\n crv3['ConsentedRate'] = crv3['ConsentedRate'].divide(crv3['wap_count'], 0).round(2)\n\n ## Convert take types to ActivityID\n take_types1 = act_types1[act_types1.ActivityType == 'Take'].copy()\n crv4 = pd.merge(crv3.drop('wap_count', axis=1), take_types1[['ActivityID', 'ActivityName']], left_on='take_type', right_on='ActivityName').drop(['take_type', 'ActivityName'], axis=1)\n\n ## Convert WAPs to SiteIDs\n crv5 = pd.merge(crv4, wap_site, on='WAP').drop('WAP', axis=1)\n\n ## Create CrcActSite table\n crc_act = crv5[['RecordNumber', 'ActivityID', 'SiteID']].copy()\n crc_act['SiteActivity'] = True\n crc_act['SiteType'] = 'WAP'\n\n # Save results\n new_crc_act, rem_crc_act = mssql.update_from_difference(crc_act, param['output']['server'], param['output']['database'], 'CrcActSite', on=['RecordNumber', 'ActivityID', 'SiteID'], mod_date_col='ModifiedDate', where_cols=['RecordNumber', 'ActivityID', 'SiteID', 'SiteType'], username=param['output']['username'], password=param['output']['password'])\n\n # Log\n log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'CrcActSite', 'pass', '{} rows updated'.format(len(new_crc_act)), username=param['output']['username'], password=param['output']['password'])\n\n # Read db table\n act_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcActSite', ['CrcActSiteID', 'RecordNumber', 'ActivityID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])\n\n # Remove old data if needed\n if not rem_crc_act.empty:\n rem_crc_act1 = pd.merge(act_site0, rem_crc_act, on=['RecordNumber', 'ActivityID', 'SiteID']).drop(['RecordNumber', 'ActivityID', 'SiteID'], axis=1)\n del_stmt = \"delete from {table} where {col} in ({val})\"\n\n# del_stmt1 = del_stmt.format(table='ConsentedAttributes', col='CrcActSiteID', val=', '.join(rem_crc_act1.CrcActSiteID.astype(str).tolist()))\n# mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt1, username=param['output']['username'], password=param['output']['password'])\n#\n# del_stmt2a = del_stmt.format(table='LinkedPermits', col='CrcActSiteID', val=', '.join(rem_crc_act1.CrcActSiteID.astype(str).tolist()))\n# mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt2a, username=param['output']['username'], password=param['output']['password'])\n#\n# del_stmt2b = del_stmt.format(table='LinkedPermits', col='OtherCrcActSiteID', val=', '.join(rem_crc_act1.CrcActSiteID.astype(str).tolist()))\n# mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt2b, username=param['output']['username'], password=param['output']['password'])\n\n del_stmt3 = del_stmt.format(table='ConsentedRateVolume', col='CrcActSiteID', val=', '.join(rem_crc_act1.CrcActSiteID.astype(str).tolist()))\n mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt3, username=param['output']['username'], password=param['output']['password'])\n\n# del_stmt4 = del_stmt.format(table='CrcActSite', col='CrcActSiteID', val=', '.join(rem_crc_act1.CrcActSiteID.astype(str).tolist()))\n# mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt4, username=param['output']['username'], password=param['output']['password'])\n\n act_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcActSite', ['CrcActSiteID', 'RecordNumber', 'ActivityID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])\n\n ## Create ConsentedRateVolume table\n crv6 = pd.merge(crv5, act_site0, on=['RecordNumber', 'ActivityID', 'SiteID']).drop(['RecordNumber', 'ActivityID', 'SiteID', 'LowflowCondition'], axis=1)\n\n # Save results\n new_crv, _ = mssql.update_from_difference(crv6, param['output']['server'], param['output']['database'], 'ConsentedRateVolume', on='CrcActSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])\n\n # Log\n log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ConsentedRateVolume', 'pass', '{} rows updated'.format(len(new_crv)), username=param['output']['username'], password=param['output']['password'])\n\n ###########################################\n ### Diverts\n\n ## Clean\n div1 = db.divert.copy()\n div1['RecordNumber'] = div1['RecordNumber'].str.strip().str.upper()\n div1['DivertType'] = div1['DivertType'].str.strip().str.title()\n div1['LowflowCondition'] = div1['LowflowCondition'].str.strip().str.upper()\n div1['ConsentedMultiDayVolume'] = pd.to_numeric(div1['ConsentedMultiDayVolume'], errors='coerce').round()\n div1['ConsentedMultiDayPeriod'] = pd.to_numeric(div1['ConsentedMultiDayPeriod'], errors='coerce').round()\n div1['ConsentedRate'] = pd.to_numeric(div1['ConsentedRate'], errors='coerce').round(2)\n\n div1.loc[div1['ConsentedMultiDayVolume'] <= 0, 'ConsentedMultiDayVolume'] = np.nan\n div1.loc[div1['ConsentedMultiDayPeriod'] <= 0, 'ConsentedMultiDayPeriod'] = np.nan\n div1.loc[div1['ConsentedRate'] <= 0, 'ConsentedRate'] = np.nan\n\n div1.loc[div1['LowflowCondition'].isnull(), 'LowflowCondition'] = 'NO'\n div1.loc[(~div1['LowflowCondition'].isin(['NO', 'YES'])), 'LowflowCondition'] = 'YES'\n div1.loc[div1['LowflowCondition'] == 'NO', 'LowflowCondition'] = False\n div1.loc[div1['LowflowCondition'] == 'YES', 'LowflowCondition'] = True\n\n div1['WAP'] = div1['WAP'].str.strip().str.upper()\n div1.loc[~div1.WAP.str.contains('[A-Z]+\\d\\d/\\d\\d\\d\\d'), 'WAP'] = np.nan\n\n ## Filter\n div2 = div1[div1.WAP.notnull()]\n\n ## Check foreign keys\n div2 = div2[div2.RecordNumber.isin(crc1)].copy()\n\n ## Check primary keys\n div2 = div2.drop_duplicates(['RecordNumber', 'WAP'])\n\n ## Join to get the IDs and filter WAPs\n div3 = pd.merge(div2, act_types1[['ActivityID', 'ActivityName']], left_on='DivertType', right_on='ActivityName').drop(['DivertType', 'ActivityName'], axis=1)\n div3 = pd.merge(div3, wap_site, on='WAP').drop('WAP', axis=1)\n\n ## CrcActSite\n crc_act_div = div3[['RecordNumber', 'ActivityID', 'SiteID']].copy()\n crc_act_div['SiteActivity'] = True\n crc_act_div['SiteType'] = 'WAP'\n\n # Save results\n new_crc_div, rem_crc_div = mssql.update_from_difference(crc_act_div, param['output']['server'], param['output']['database'], 'CrcActSite', on=['RecordNumber', 'ActivityID', 'SiteID'], mod_date_col='ModifiedDate', where_cols=['RecordNumber', 'ActivityID', 'SiteID', 'SiteType'], username=param['output']['username'], password=param['output']['password'])\n\n # Log\n log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'CrcActSite', 'pass', '{} rows updated'.format(len(new_crc_div)), username=param['output']['username'], password=param['output']['password'])\n\n # Read db table\n act_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcActSite', ['CrcActSiteID', 'RecordNumber', 'ActivityID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])\n\n ## ConsentedRateVolume\n crc_div = pd.merge(div3, act_site0, on=['RecordNumber', 'ActivityID', 'SiteID']).drop(['RecordNumber', 'ActivityID', 'SiteID', 'LowflowCondition'], axis=1).dropna(subset=['ConsentedRate', 'ConsentedMultiDayVolume'], how='all')\n crc_div['FromMonth'] = 1\n crc_div['ToMonth'] = 12\n\n # Save results\n new_crc_div, _ = mssql.update_from_difference(crc_div, param['output']['server'], param['output']['database'], 'ConsentedRateVolume', on='CrcActSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])\n\n # Log\n log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ConsentedRateVolume', 'pass', '{} rows updated'.format(len(new_crc_div)), username=param['output']['username'], password=param['output']['password'])\n\n\n ###########################################\n ### Water use types\n\n wu1 = db.water_use.copy()\n\n ## Clean\n wu1['RecordNumber'] = wu1['RecordNumber'].str.strip().str.upper()\n wu1['UseType'] = wu1['UseType'].str.strip().str.title()\n wu1['ConsentedMultiDayVolume'] = pd.to_numeric(wu1['ConsentedMultiDayVolume'], errors='coerce').round()\n wu1['ConsentedMultiDayPeriod'] = pd.to_numeric(wu1['ConsentedMultiDayPeriod'], errors='coerce').round()\n wu1['ConsentedRate'] = pd.to_numeric(wu1['ConsentedRate'], errors='coerce').round(2)\n\n wu1.loc[wu1['ConsentedMultiDayVolume'] <= 0, 'ConsentedMultiDayVolume'] = np.nan\n wu1.loc[wu1['ConsentedMultiDayPeriod'] <= 0, 'ConsentedMultiDayPeriod'] = np.nan\n wu1.loc[wu1['ConsentedRate'] <= 0, 'ConsentedRate'] = np.nan\n\n spaces_bool = wu1['UseType'].str[3:5] == ' '\n wu1.loc[spaces_bool, 'UseType'] = wu1.loc[spaces_bool, 'UseType'].str[:3] + wu1.loc[spaces_bool, 'UseType'].str[4:]\n\n ## Check foreign keys\n wu2 = wu1[wu1.RecordNumber.isin(crc1)].copy()\n\n ## Split into WAPs by take type equivelant\n wu3 = wu2.copy()\n wu3['take_type'] = wu3['UseType'].str.replace('Use', 'Take')\n wu4 = pd.merge(wu3, mon_min_max1, on=['RecordNumber', 'take_type'])\n wu4['ConsentedMultiDayVolume'] = wu4['ConsentedMultiDayVolume'].divide(wu4['wap_count'], 0).round()\n wu4['ConsentedRate'] = wu4['ConsentedRate'].divide(wu4['wap_count'], 0).round(2)\n wu4.drop(['wap_count', 'take_type'], axis=1, inplace=True)\n\n ## Convert Use types to broader categories\n types_cat = {}\n for key, value in param['misc']['use_types_codes'].items():\n for string in value:\n types_cat[string] = key\n types_check = np.in1d(wu4.WaterUse.unique(), list(types_cat.keys())).all()\n if not types_check:\n raise ValueError('Some use types are missing in the parameters file. Check the use type table and the parameters file.')\n wu4.WaterUse.replace(types_cat, inplace=True)\n wu4['WaterUse'] = wu4['WaterUse'].astype('category')\n\n ## Join to get the IDs and filter WAPs\n wu5 = pd.merge(wu4, act_types1[['ActivityID', 'ActivityName']], left_on='UseType', right_on='ActivityName').drop(['UseType', 'ActivityName'], axis=1)\n wu5 = pd.merge(wu5, wap_site, on='WAP').drop('WAP', axis=1)\n\n ## Drop duplicate uses\n wu5.WaterUse.cat.set_categories(param['misc']['use_types_priorities'], True, inplace=True)\n wu5 = wu5.sort_values('WaterUse')\n wu6 = wu5.drop_duplicates(['RecordNumber', 'ActivityID', 'SiteID']).copy()\n\n ## CrcActSite\n crc_act_wu = wu6[['RecordNumber', 'ActivityID', 'SiteID']].copy()\n crc_act_wu['SiteActivity'] = True\n crc_act_wu['SiteType'] = 'WAP'\n\n # Save results\n new_crv_wu, _ = mssql.update_from_difference(crc_act_wu, param['output']['server'], param['output']['database'], 'CrcActSite', on=['RecordNumber', 'ActivityID', 'SiteID'], mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])\n\n # Log\n log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'CrcActSite', 'pass', '{} rows updated'.format(len(new_crv_wu)), username=param['output']['username'], password=param['output']['password'])\n\n # Read db table\n act_site0 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcActSite', ['CrcActSiteID', 'RecordNumber', 'ActivityID', 'SiteID'], username=param['output']['username'], password=param['output']['password'])\n\n ## ConsentedRateVolume\n crv_wu = pd.merge(wu6, act_site0, on=['RecordNumber', 'ActivityID', 'SiteID'])[['CrcActSiteID', 'ConsentedRate', 'ConsentedMultiDayVolume', 'ConsentedMultiDayPeriod']].dropna(subset=['ConsentedRate', 'ConsentedMultiDayVolume'], how='all')\n crv_wu['FromMonth'] = 1\n crv_wu['ToMonth'] = 12\n\n # Save results\n new_crv_wu, _ = mssql.update_from_difference(crv_wu, param['output']['server'], param['output']['database'], 'ConsentedRateVolume', on='CrcActSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])\n\n # Log\n log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ConsentedRateVolume', 'pass', '{} rows updated'.format(len(new_crv_wu)), username=param['output']['username'], password=param['output']['password'])\n\n ## Attributes\n cols1 = ['RecordNumber', 'ActivityID', 'SiteID']\n attr_cols = attr1.Attribute[attr1.Attribute.isin(wu6.columns)].tolist()\n cols1.extend(attr_cols)\n wua1 = wu6.loc[:, wu6.columns.isin(cols1)].set_index(['RecordNumber', 'ActivityID', 'SiteID'])\n wua2 = wua1.stack()\n wua2.name = 'Value'\n wua2 = wua2.reset_index()\n wua2.rename(columns={'level_3': 'Attribute'}, inplace=True)\n wua3 = pd.merge(wua2, attr1, on='Attribute').drop('Attribute', axis=1)\n wua4 = pd.merge(wua3, act_site0, on=['RecordNumber', 'ActivityID', 'SiteID']).drop(['RecordNumber', 'ActivityID', 'SiteID'], axis=1)\n\n # Save results\n new_wua, _ = mssql.update_from_difference(wua4, param['output']['server'], param['output']['database'], 'ConsentedAttributes', on=['CrcActSiteID', 'AttributeID'], mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])\n\n # Log\n log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'ConsentedAttributes', 'pass', '{} rows updated'.format(len(new_wua)), username=param['output']['username'], password=param['output']['password'])\n\n #################################################\n ### Linked Consents\n print('--Update LinkConsent table')\n\n ## Clean data\n lc1 = db.linked_permits.copy()\n lc1['RecordNumber'] = lc1['RecordNumber'].str.strip().str.upper()\n lc1['OtherRecordNumber'] = lc1['OtherRecordNumber'].str.strip().str.upper()\n lc1['Relationship'] = lc1['Relationship'].str.strip()\n lc1['LinkedStatus'] = lc1['LinkedStatus'].str.strip()\n lc1['CombinedAnnualVolume'] = pd.to_numeric(lc1['CombinedAnnualVolume'], errors='coerce').round()\n\n ## Check foreign keys\n lc2 = lc1[lc1.RecordNumber.isin(crc1) & lc1.OtherRecordNumber.isin(crc1)].copy()\n\n ## Filter data\n lc2 = lc2.drop_duplicates(['RecordNumber', 'OtherRecordNumber'])\n lc2 = lc2[lc2['Relationship'].notnull()]\n# lc3 = lc2[lc2['CombinedAnnualVolume'] > 0].copy()\n\n ## Distribute to CrcActSiteIDs\n crc_count1 = mon_min_max1.drop(['FromMonth', 'ToMonth'], axis=1)\n crc_count1['wap_count'] = crc_count1.groupby(['RecordNumber']).WAP.transform('count')\n\n # Main one\n lc3 = pd.merge(lc2, crc_count1, on='RecordNumber')\n lc3['CombinedAnnualVolume'] = lc3['CombinedAnnualVolume'] / lc3['wap_count']\n lc4 = pd.merge(lc3.drop('wap_count', axis=1), take_types1[['ActivityID', 'ActivityName']], left_on='take_type', right_on='ActivityName').drop(['take_type', 'ActivityName'], axis=1)\n lc4 = pd.merge(lc4, wap_site, on='WAP').drop('WAP', axis=1)\n lc4 = pd.merge(lc4, act_site0, on=['RecordNumber', 'ActivityID', 'SiteID']).drop(['RecordNumber', 'ActivityID', 'SiteID'], axis=1)\n\n # Other one\n lc4.rename(columns={'OtherRecordNumber': 'RecordNumber'}, inplace=True)\n lc5 = pd.merge(lc4, crc_count1, on='RecordNumber').drop('wap_count', axis=1)\n lc5 = pd.merge(lc5, take_types1[['ActivityID', 'ActivityName']], left_on='take_type', right_on='ActivityName').drop(['take_type', 'ActivityName'], axis=1)\n lc5 = pd.merge(lc5, wap_site, on='WAP').drop('WAP', axis=1)\n lc5 = pd.merge(lc5, act_site0, on=['RecordNumber', 'ActivityID', 'SiteID']).drop(['RecordNumber', 'ActivityID', 'SiteID'], axis=1)\n lc5.rename(columns={'CrcActSiteID_x': 'CrcActSiteID', 'CrcActSiteID_y': 'OtherCrcActSiteID'}, inplace=True)\n\n ## Save results\n new_lc, _ = mssql.update_from_difference(lc5, param['output']['server'], param['output']['database'], 'LinkedPermits', on=['CrcActSiteID', 'OtherCrcActSiteID'], mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])\n\n # Log\n log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'LinkedPermits', 'pass', '{} rows updated'.format(len(new_lc)), username=param['output']['username'], password=param['output']['password'])\n\n ###############################################\n ### Lowflows tables\n print('--Lowflows')\n\n ## Assign database parameters to the lowflows module\n lf.read_data.lf_server = param['misc']['lowflows']['server']\n lf.read_data.hydrotel_server = param['misc']['hydrotel']['server']\n lf.read_data.usm_server = param['source data']['sites']['server']\n\n ## ConsentsSites\n lf_sites1 = lf.sites(username=param['misc']['lowflows']['username'], password=param['misc']['lowflows']['password']).reset_index()\n\n new_sites, _ = mssql.update_from_difference(lf_sites1[['ExtSiteID', 'SiteName']], param['output']['server'], param['output']['database'], 'ConsentsSites', on='ExtSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])\n sites1 = mssql.rd_sql(param['output']['server'], param['output']['database'], 'ConsentsSites', ['SiteID', 'ExtSiteID'], username=param['output']['username'], password=param['output']['password'])\n\n ## LowFlowSite\n lf_sites2 = pd.merge(sites1, lf_sites1, on='ExtSiteID').drop(['ExtSiteID', 'SiteName'], axis=1)\n new_lf_sites = mssql.update_from_difference(lf_sites2, param['output']['server'], param['output']['database'], 'LowFlowSite', on='SiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])\n\n ## Make lowflow conditions tables\n trigs1 = lf.crc_trigs(username=param['misc']['lowflows']['username'], password=param['misc']['lowflows']['password']).reset_index()\n trigs2 = trigs1.sort_values(['IsActive', 'ExtSiteID', 'RecordNumber', 'MinAllocation', 'BandNumber'], ascending=[False, True, True, True, True]).drop_duplicates(['RecordNumber', 'ExtSiteID']).drop('IsActive', axis=1)\n\n trigs3 = pd.merge(sites1, trigs2, on=['ExtSiteID']).drop('ExtSiteID', axis=1)\n\n sw_blocks = ab_types1[(ab_types1.HydroGroup == 'Surface Water') & (ab_types1.AllocationBlock != 'In Waitaki')]\n allo_site1 = allo_site0[['RecordNumber', 'AlloBlockID']].drop_duplicates()\n allo_site2 = allo_site1[allo_site1.AlloBlockID.isin(sw_blocks.AlloBlockID)]\n\n trigs3a = pd.merge(allo_site2, trigs3, on=['RecordNumber'])\n\n # Missing SW Allo consents\n mis_trigs1 = trigs3[~trigs3['RecordNumber'].isin(allo_site2.RecordNumber.unique())].copy()\n mis_allo_site1 = allo_site1[allo_site1.RecordNumber.isin(mis_trigs1.RecordNumber.unique())].copy()\n mis_allo_site1['AlloBlockID'] = 9\n mis_allo_site1.drop_duplicates(inplace=True)\n\n extra_trigs = pd.merge(mis_allo_site1, trigs3, on=['RecordNumber'])\n\n # Combine trigs\n trigs4 = pd.concat([trigs3a, extra_trigs], sort=False)\n\n ## Update CrcAlloSite table\n trigs_allo = trigs4[['RecordNumber', 'AlloBlockID', 'SiteID', 'SiteType']].copy()\n trigs_allo['SiteAllo'] = False\n\n # Save results\n new_trigs_allo, rem_trigs_allo = mssql.update_from_difference(trigs_allo, param['output']['server'], param['output']['database'], 'CrcAlloSite', on=['RecordNumber', 'AlloBlockID', 'SiteID'], mod_date_col='ModifiedDate', where_cols=['RecordNumber', 'AlloBlockID', 'SiteID', 'SiteType'], username=param['output']['username'], password=param['output']['password'])\n\n # Log\n log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'CrcAlloSite', 'pass', '{} rows updated'.format(len(new_trigs_allo)), username=param['output']['username'], password=param['output']['password'])\n\n # Read db table\n allo_site_trig = mssql.rd_sql(param['output']['server'], param['output']['database'], 'CrcAlloSite', ['CrcAlloSiteID', 'RecordNumber', 'AlloBlockID', 'SiteID'], where_in={'SiteType': ['LowFlow', 'Residual']}, username=param['output']['username'], password=param['output']['password'])\n\n ## Update LowFlowConditions\n trigs5 = pd.merge(allo_site_trig, trigs4, on=['RecordNumber', 'AlloBlockID', 'SiteID']).drop(['RecordNumber', 'AlloBlockID', 'SiteID', 'SiteType'], axis=1)\n\n # Save results\n new_trigs, rem_cond = mssql.update_from_difference(trigs5, param['output']['server'], param['output']['database'], 'LowFlowConditions', on='CrcAlloSiteID', mod_date_col='ModifiedDate', username=param['output']['username'], password=param['output']['password'])\n\n # Remove old data if needed - Cannot do this because of time series table dependencies\n # if not rem_trigs_allo.empty:\n # rem_trigs_allo1 = pd.merge(allo_site_trig, rem_trigs_allo, on=['RecordNumber', 'AlloBlockID', 'SiteID']).drop(['RecordNumber', 'AlloBlockID', 'SiteID'], axis=1)\n #\n # del_stmt = \"delete from {table} where {col} in ({val})\"\n #\n # del_stmt1 = del_stmt.format(table='TSLowFlowRestr', col='CrcAlloSiteID', val=', '.join(rem_trigs_allo1.CrcAlloSiteID.astype(str).tolist()))\n # mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt1)\n #\n # del_stmt2 = del_stmt.format(table='LowFlowConditions', col='CrcAlloSiteID', val=', '.join(rem_trigs_allo1.CrcAlloSiteID.astype(str).tolist()))\n # mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt2)\n #\n # if not rem_cond.empty:\n # del_stmt3 = del_stmt.format(table='LowFlowConditions', col='CrcAlloSiteID', val=', '.join(rem_cond.CrcAlloSiteID.astype(str).tolist()))\n # mssql.del_table_rows(param['output']['server'], param['output']['database'], stmt=del_stmt3)\n\n # Log\n log1 = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'LowFlowConditions', 'pass', '{} rows updated'.format(len(new_trigs)), username=param['output']['username'], password=param['output']['password'])\n\n## If failure\n\nexcept Exception as err:\n err1 = err\n print(err1)\n log_err = util.log(param['output']['server'], param['output']['database'], 'log', run_time_start, '1900-01-01', 'Some Table', 'fail', str(err1)[:299], username=param['output']['username'], password=param['output']['password'])\n"
] | [
[
"pandas.to_numeric",
"pandas.DataFrame",
"numpy.in1d",
"pandas.to_datetime",
"pandas.merge",
"pandas.concat",
"pandas.DateOffset",
"pandas.isnull",
"numpy.concatenate"
]
] |
phigre/cobi | [
"bb6cd9a49eb22862be6d87f0a2b0c8baf65cadb5"
] | [
"combine2d/sandbox/Borden/perturbed_surface.py"
] | [
"import torch\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport shutil\n\nfrom combine2d.core import gis, test_cases\nfrom combine2d.core.utils import NonRGIGlacierDirectory\nfrom combine2d.core.first_guess import compile_first_guess\nfrom combine2d.core.inversion import InversionDirectory\nfrom combine2d.core.dynamics import create_glacier\nfrom combine2d.core.cost_function import create_cost_func\nfrom combine2d.core.inversion import InversionDirectory\nfrom combine2d.core import data_logging\nfrom combine2d.core.table_creation import create_case_table, eval_identical_twin\nfrom combine2d.core.data_manipulation import create_perlin_noise, \\\n add_surface_noise\nfrom oggm import cfg\n\ncfg.initialize()\n\nbasedir = '/path/to/example'\nbasedir = '/data/philipp/thesis/perturbed_surface'\n\n# Choose a case\ncase = test_cases.Borden\ngdir = NonRGIGlacierDirectory(case, basedir)\n# only needed once:\n#gis.define_nonrgi_glacier_region(gdir)\n\n# create settings for inversion\nscaling = 1 #200\ndesired_rmse = 2\n#desired_rmse = 6\n#desired_rmse = 10\n\nlambdas = np.zeros(6)\nlambdas[0] = 0.2\nlambdas[1] = 0.25\nlambdas[2] = 100 * scaling\nlambdas[3] = 1e5 * scaling\n#lambdas[4] = 1e7\n\nminimize_options = {\n 'maxiter': 300,\n 'ftol': 0.5e-3,\n #'xtol': 1e-30,\n 'gtol': 1e-4,\n #'maxcor': 5,\n #'maxls': 10,\n 'disp': True\n}\ninv_subdir = 'fin {:02d} scaling {:02d} {:1g}e7'.format(scaling, desired_rmse,\n lambdas[4]/1e7)\ngdir.write_inversion_settings(mb_spinup=None,\n yrs_spinup=2000,\n yrs_forward_run=200,\n reg_parameters=lambdas,\n solver='L-BFGS-B',\n minimize_options=minimize_options,\n inversion_subdir=inv_subdir,\n fg_shape_factor=1.,\n fg_slope_cutoff_angle=2.5,\n fg_min_height=-30,\n fg_interp_boundary=True,\n bounds_min_max=(2, 1000)\n )\n\n\n# Optional, if not reset=True and already ran once\n# only needed once:\n#create_glacier(gdir)\n#compile_first_guess(gdir)\n\nif os.path.exists(gdir.get_filepath('dem_noise')):\n os.remove(gdir.get_filepath('dem_noise'))\nnoise = create_perlin_noise(gdir, desired_rmse, octaves=4, base=2, freq=3,\n glacier_only=True)\nadd_surface_noise(gdir, noise)\n\ncreate_case_table(gdir)\n\nidir = InversionDirectory(gdir)\n\n# copy this script to inversion directory for reproducibility\npath_to_file = '/home/philipp/COBBI/combine/sandbox/Borden' \\\n '/perturbed_surface.py'\nfname = os.path.split(path_to_file)[-1]\nif not os.path.exists(idir.get_current_basedir()):\n os.makedirs(idir.get_current_basedir(), exist_ok=True)\nshutil.copy(path_to_file, os.path.join(idir.get_current_basedir(), fname))\n\nres = idir.run_minimize()\neval_identical_twin(idir)\n#dl = data_logging.load_pickle(idir.get_current_basedir() + '/data_logger.pkl')\n\nprint('end')"
] | [
[
"numpy.zeros"
]
] |
jinghuix/dgl | [
"fae26dd15caac92458a08ad34889086e1e333ddd"
] | [
"examples/pytorch/graphsage/train_cv_multi_gpu.py"
] | [
"import dgl\nimport numpy as np\nimport torch as th\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.multiprocessing as mp\nimport dgl.function as fn\nimport dgl.nn.pytorch as dglnn\nimport time\nimport argparse\nimport tqdm\nimport traceback\nfrom _thread import start_new_thread\nfrom functools import wraps\nfrom dgl.data import RedditDataset\nfrom torch.utils.data import DataLoader\nfrom torch.nn.parallel import DistributedDataParallel\n\nclass SAGEConvWithCV(nn.Module):\n def __init__(self, in_feats, out_feats, activation):\n super().__init__()\n self.W = nn.Linear(in_feats * 2, out_feats)\n self.activation = activation\n self.reset_parameters()\n\n def reset_parameters(self):\n gain = nn.init.calculate_gain('relu')\n nn.init.xavier_uniform_(self.W.weight, gain=gain)\n nn.init.constant_(self.W.bias, 0)\n\n def forward(self, block, H, HBar=None):\n if self.training:\n with block.local_scope():\n H_src, H_dst = H\n HBar_src, agg_HBar_dst = HBar\n block.dstdata['agg_hbar'] = agg_HBar_dst\n block.srcdata['hdelta'] = H_src - HBar_src\n block.update_all(fn.copy_u('hdelta', 'm'), fn.mean('m', 'hdelta_new'))\n h_neigh = block.dstdata['agg_hbar'] + block.dstdata['hdelta_new']\n h = self.W(th.cat([H_dst, h_neigh], 1))\n if self.activation is not None:\n h = self.activation(h)\n return h\n else:\n with block.local_scope():\n H_src, H_dst = H\n block.srcdata['h'] = H_src\n block.update_all(fn.copy_u('h', 'm'), fn.mean('m', 'h_new'))\n h_neigh = block.dstdata['h_new']\n h = self.W(th.cat([H_dst, h_neigh], 1))\n if self.activation is not None:\n h = self.activation(h)\n return h\n\nclass SAGE(nn.Module):\n def __init__(self,\n in_feats,\n n_hidden,\n n_classes,\n n_layers,\n activation):\n super().__init__()\n self.n_layers = n_layers\n self.n_hidden = n_hidden\n self.n_classes = n_classes\n self.layers = nn.ModuleList()\n self.layers.append(SAGEConvWithCV(in_feats, n_hidden, activation))\n for i in range(1, n_layers - 1):\n self.layers.append(SAGEConvWithCV(n_hidden, n_hidden, activation))\n self.layers.append(SAGEConvWithCV(n_hidden, n_classes, None))\n\n def forward(self, blocks):\n h = blocks[0].srcdata['features']\n updates = []\n for layer, block in zip(self.layers, blocks):\n # We need to first copy the representation of nodes on the RHS from the\n # appropriate nodes on the LHS.\n # Note that the shape of h is (num_nodes_LHS, D) and the shape of h_dst\n # would be (num_nodes_RHS, D)\n h_dst = h[:block.number_of_dst_nodes()]\n hbar_src = block.srcdata['hist']\n agg_hbar_dst = block.dstdata['agg_hist']\n # Then we compute the updated representation on the RHS.\n # The shape of h now becomes (num_nodes_RHS, D)\n h = layer(block, (h, h_dst), (hbar_src, agg_hbar_dst))\n block.dstdata['h_new'] = h\n return h\n\n def inference(self, g, x, batch_size, device):\n \"\"\"\n Inference with the GraphSAGE model on full neighbors (i.e. without neighbor sampling).\n g : the entire graph.\n x : the input of entire node set.\n\n The inference code is written in a fashion that it could handle any number of nodes and\n layers.\n \"\"\"\n # During inference with sampling, multi-layer blocks are very inefficient because\n # lots of computations in the first few layers are repeated.\n # Therefore, we compute the representation of all nodes layer by layer. The nodes\n # on each layer are of course splitted in batches.\n # TODO: can we standardize this?\n nodes = th.arange(g.number_of_nodes())\n for l, layer in enumerate(self.layers):\n y = g.ndata['hist_%d' % (l + 1)]\n\n for start in tqdm.trange(0, len(nodes), batch_size):\n end = start + batch_size\n batch_nodes = nodes[start:end]\n block = dgl.to_block(dgl.in_subgraph(g, batch_nodes), batch_nodes)\n induced_nodes = block.srcdata[dgl.NID]\n\n h = x[induced_nodes].to(device)\n block = block.to(device)\n h_dst = h[:block.number_of_dst_nodes()]\n h = layer(block, (h, h_dst))\n\n y[start:end] = h.cpu()\n\n x = y\n return y\n\n\n\nclass NeighborSampler(object):\n def __init__(self, g, fanouts):\n self.g = g\n self.fanouts = fanouts\n\n def sample_blocks(self, seeds):\n seeds = th.LongTensor(seeds)\n blocks = []\n hist_blocks = []\n for fanout in self.fanouts:\n # For each seed node, sample ``fanout`` neighbors.\n frontier = dgl.sampling.sample_neighbors(self.g, seeds, fanout)\n # For history aggregation we sample all neighbors.\n hist_frontier = dgl.in_subgraph(self.g, seeds)\n # Then we compact the frontier into a bipartite graph for message passing.\n block = dgl.to_block(frontier, seeds)\n hist_block = dgl.to_block(hist_frontier, seeds)\n # Obtain the seed nodes for next layer.\n seeds = block.srcdata[dgl.NID]\n\n blocks.insert(0, block)\n hist_blocks.insert(0, hist_block)\n return blocks, hist_blocks\n\n# According to https://github.com/pytorch/pytorch/issues/17199, this decorator\n# is necessary to make fork() and openmp work together.\n#\n# TODO: confirm if this is necessary for MXNet and Tensorflow. If so, we need\n# to standardize worker process creation since our operators are implemented with\n# OpenMP.\ndef thread_wrapped_func(func):\n @wraps(func)\n def decorated_function(*args, **kwargs):\n queue = mp.Queue()\n def _queue_result():\n exception, trace, res = None, None, None\n try:\n res = func(*args, **kwargs)\n except Exception as e:\n exception = e\n trace = traceback.format_exc()\n queue.put((res, exception, trace))\n\n start_new_thread(_queue_result, ())\n result, exception, trace = queue.get()\n if exception is None:\n return result\n else:\n assert isinstance(exception, Exception)\n raise exception.__class__(trace)\n return decorated_function\n\ndef compute_acc(pred, labels):\n \"\"\"\n Compute the accuracy of prediction given the labels.\n \"\"\"\n return (th.argmax(pred, dim=1) == labels).float().sum() / len(pred)\n\ndef evaluate(model, g, labels, val_mask, batch_size, device):\n \"\"\"\n Evaluate the model on the validation set specified by ``val_mask``.\n g : The entire graph.\n inputs : The features of all the nodes.\n labels : The labels of all the nodes.\n val_mask : A 0-1 mask indicating which nodes do we actually compute the accuracy for.\n batch_size : Number of nodes to compute at the same time.\n device : The GPU device to evaluate on.\n \"\"\"\n model.eval()\n with th.no_grad():\n inputs = g.ndata['features']\n pred = model.inference(g, inputs, batch_size, device) # also recomputes history tensors\n model.train()\n return compute_acc(pred[val_mask], labels[val_mask])\n\ndef load_subtensor(g, labels, blocks, hist_blocks, dev_id, aggregation_on_device=False):\n \"\"\"\n Copys features and labels of a set of nodes onto GPU.\n \"\"\"\n blocks[0].srcdata['features'] = g.ndata['features'][blocks[0].srcdata[dgl.NID]]\n blocks[-1].dstdata['label'] = labels[blocks[-1].dstdata[dgl.NID]]\n ret_blocks = []\n ret_hist_blocks = []\n for i, (block, hist_block) in enumerate(zip(blocks, hist_blocks)):\n hist_col = 'features' if i == 0 else 'hist_%d' % i\n block.srcdata['hist'] = g.ndata[hist_col][block.srcdata[dgl.NID]]\n\n # Aggregate history\n hist_block.srcdata['hist'] = g.ndata[hist_col][hist_block.srcdata[dgl.NID]]\n if aggregation_on_device:\n hist_block = hist_block.to(dev_id)\n hist_block.srcdata['hist'] = hist_block.srcdata['hist']\n hist_block.update_all(fn.copy_u('hist', 'm'), fn.mean('m', 'agg_hist'))\n\n block = block.to(dev_id)\n if not aggregation_on_device:\n hist_block = hist_block.to(dev_id)\n block.dstdata['agg_hist'] = hist_block.dstdata['agg_hist']\n ret_blocks.append(block)\n ret_hist_blocks.append(hist_block)\n return ret_blocks, ret_hist_blocks\n\ndef create_history_storage(g, args, n_classes):\n # Initialize history storage\n for l in range(args.num_layers):\n dim = args.num_hidden if l != args.num_layers - 1 else n_classes\n g.ndata['hist_%d' % (l + 1)] = th.zeros(g.number_of_nodes(), dim).share_memory_()\n\ndef init_history(g, model, dev_id, batch_size):\n with th.no_grad():\n model.inference(g, g.ndata['features'], batch_size, dev_id) # replaces hist_i features in-place\n\ndef update_history(g, blocks):\n with th.no_grad():\n for i, block in enumerate(blocks):\n ids = block.dstdata[dgl.NID].cpu()\n hist_col = 'hist_%d' % (i + 1)\n\n h_new = block.dstdata['h_new'].cpu()\n g.ndata[hist_col][ids] = h_new\n\n@thread_wrapped_func\ndef run(proc_id, n_gpus, args, devices, data):\n dropout = 0.2\n\n dev_id = devices[proc_id]\n if n_gpus > 1:\n dist_init_method = 'tcp://{master_ip}:{master_port}'.format(\n master_ip='127.0.0.1', master_port='12345')\n world_size = n_gpus\n th.distributed.init_process_group(backend=\"nccl\",\n init_method=dist_init_method,\n world_size=world_size,\n rank=proc_id)\n th.cuda.set_device(dev_id)\n\n # Unpack data\n train_mask, val_mask, in_feats, labels, n_classes, g = data\n train_nid = th.LongTensor(np.nonzero(train_mask)[0])\n val_nid = th.LongTensor(np.nonzero(val_mask)[0])\n train_mask = th.BoolTensor(train_mask)\n val_mask = th.BoolTensor(val_mask)\n\n # Split train_nid\n train_nid = th.split(train_nid, len(train_nid) // n_gpus)[proc_id]\n\n # Create sampler\n sampler = NeighborSampler(g, [int(_) for _ in args.fan_out.split(',')])\n\n # Create PyTorch DataLoader for constructing blocks\n dataloader = DataLoader(\n dataset=train_nid.numpy(),\n batch_size=args.batch_size,\n collate_fn=sampler.sample_blocks,\n shuffle=True,\n drop_last=False,\n num_workers=args.num_workers_per_gpu)\n\n # Define model\n model = SAGE(in_feats, args.num_hidden, n_classes, args.num_layers, F.relu)\n\n # Move the model to GPU and define optimizer\n model = model.to(dev_id)\n if n_gpus > 1:\n model = DistributedDataParallel(model, device_ids=[dev_id], output_device=dev_id)\n loss_fcn = nn.CrossEntropyLoss()\n loss_fcn = loss_fcn.to(dev_id)\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n\n # Compute history tensor and their aggregation before training on CPU\n model.eval()\n if n_gpus > 1:\n if proc_id == 0:\n init_history(g, model.module, dev_id, args.val_batch_size)\n th.distributed.barrier()\n else:\n init_history(g, model, dev_id, args.val_batch_size)\n model.train()\n\n # Training loop\n avg = 0\n iter_tput = []\n for epoch in range(args.num_epochs):\n tic = time.time()\n model.train()\n for step, (blocks, hist_blocks) in enumerate(dataloader):\n if proc_id == 0:\n tic_step = time.time()\n\n # The nodes for input lies at the LHS side of the first block.\n # The nodes for output lies at the RHS side of the last block.\n seeds = blocks[-1].dstdata[dgl.NID]\n\n blocks, hist_blocks = load_subtensor(g, labels, blocks, hist_blocks, dev_id, True)\n\n # forward\n batch_pred = model(blocks)\n # update history\n update_history(g, blocks)\n # compute loss\n batch_labels = blocks[-1].dstdata['label']\n loss = loss_fcn(batch_pred, batch_labels)\n # backward\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if proc_id == 0:\n iter_tput.append(len(seeds) * n_gpus / (time.time() - tic_step))\n if step % args.log_every == 0 and proc_id == 0:\n acc = compute_acc(batch_pred, batch_labels)\n print('Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f}'.format(\n epoch, step, loss.item(), acc.item(), np.mean(iter_tput[3:])))\n\n if n_gpus > 1:\n th.distributed.barrier()\n\n toc = time.time()\n if proc_id == 0:\n print('Epoch Time(s): {:.4f}'.format(toc - tic))\n if epoch >= 5:\n avg += toc - tic\n if epoch % args.eval_every == 0 and epoch != 0:\n model.eval()\n eval_acc = evaluate(\n model if n_gpus == 1 else model.module, g, labels, val_nid, args.val_batch_size, dev_id)\n print('Eval Acc {:.4f}'.format(eval_acc))\n\n if n_gpus > 1:\n th.distributed.barrier()\n if proc_id == 0:\n print('Avg epoch time: {}'.format(avg / (epoch - 4)))\n\nif __name__ == '__main__':\n argparser = argparse.ArgumentParser(\"multi-gpu training\")\n argparser.add_argument('--gpu', type=str, default='0')\n argparser.add_argument('--num-epochs', type=int, default=20)\n argparser.add_argument('--num-hidden', type=int, default=16)\n argparser.add_argument('--num-layers', type=int, default=2)\n argparser.add_argument('--fan-out', type=str, default='1,1')\n argparser.add_argument('--batch-size', type=int, default=1000)\n argparser.add_argument('--val-batch-size', type=int, default=1000)\n argparser.add_argument('--log-every', type=int, default=20)\n argparser.add_argument('--eval-every', type=int, default=5)\n argparser.add_argument('--lr', type=float, default=0.003)\n argparser.add_argument('--num-workers-per-gpu', type=int, default=0)\n args = argparser.parse_args()\n \n devices = list(map(int, args.gpu.split(',')))\n n_gpus = len(devices)\n\n # load reddit data\n data = RedditDataset(self_loop=True)\n n_classes = data.num_classes\n g = data[0]\n features = g.ndata['feat']\n in_feats = features.shape[1]\n labels = g.ndata['label']\n train_mask = g.ndata['train_mask']\n val_mask = g.ndata['val_mask']\n g.ndata['features'] = features.share_memory_()\n create_history_storage(g, args, n_classes)\n\n g.create_format_()\n # Pack data\n data = train_mask, val_mask, in_feats, labels, n_classes, g\n\n if n_gpus == 1:\n run(0, n_gpus, args, devices, data)\n else:\n procs = []\n for proc_id in range(n_gpus):\n p = mp.Process(target=run, args=(proc_id, n_gpus, args, devices, data))\n p.start()\n procs.append(p)\n for p in procs:\n p.join()\n"
] | [
[
"torch.nn.init.xavier_uniform_",
"torch.no_grad",
"torch.multiprocessing.Queue",
"torch.nn.ModuleList",
"torch.BoolTensor",
"torch.cat",
"torch.distributed.init_process_group",
"numpy.nonzero",
"numpy.mean",
"torch.cuda.set_device",
"torch.argmax",
"torch.distributed.barrier",
"torch.nn.parallel.DistributedDataParallel",
"torch.nn.init.calculate_gain",
"torch.nn.Linear",
"torch.nn.init.constant_",
"torch.nn.CrossEntropyLoss",
"torch.LongTensor",
"torch.multiprocessing.Process"
]
] |
shunk031/allennlp-models | [
"1e89d5e51cb45f3e77a48d4983bf980088334fac"
] | [
"tests/vision/models/visual_entailment_test.py"
] | [
"from torch.testing import assert_allclose\nfrom transformers import AutoModel\n\nfrom allennlp.common.testing import ModelTestCase\nfrom allennlp.data import Vocabulary\n\nfrom allennlp_models import vision # noqa: F401\n\nfrom tests import FIXTURES_ROOT\n\n\nclass TestVEVilbert(ModelTestCase):\n def test_model_can_train_save_and_load_small_model(self):\n param_file = FIXTURES_ROOT / \"vision\" / \"vilbert_ve\" / \"experiment.jsonnet\"\n self.ensure_model_can_train_save_and_load(param_file)\n\n def test_model_can_train_save_and_load_with_cache(self):\n import tempfile\n\n with tempfile.TemporaryDirectory(prefix=self.__class__.__name__) as d:\n overrides = {\"dataset_reader.feature_cache_dir\": str(d)}\n import json\n\n overrides = json.dumps(overrides)\n param_file = FIXTURES_ROOT / \"vision\" / \"vilbert_ve\" / \"experiment.jsonnet\"\n self.ensure_model_can_train_save_and_load(param_file, overrides=overrides)\n\n def test_model_can_train_save_and_load_from_huggingface(self):\n param_file = FIXTURES_ROOT / \"vision\" / \"vilbert_ve\" / \"experiment_from_huggingface.jsonnet\"\n self.ensure_model_can_train_save_and_load(param_file)\n\n def test_model_loads_weights_correctly(self):\n from allennlp_models.vision.models.visual_entailment import VisualEntailmentModel\n\n vocab = Vocabulary()\n model_name = \"epwalsh/bert-xsmall-dummy\"\n model = VisualEntailmentModel.from_huggingface_model_name(\n vocab=vocab,\n model_name=model_name,\n image_feature_dim=2048,\n image_num_hidden_layers=1,\n image_hidden_size=3,\n image_num_attention_heads=1,\n combined_num_attention_heads=1,\n combined_hidden_size=5,\n pooled_output_dim=7,\n image_intermediate_size=11,\n image_attention_dropout=0.0,\n image_hidden_dropout=0.0,\n image_biattention_id=[0, 1],\n text_biattention_id=[0, 1],\n text_fixed_layer=0,\n image_fixed_layer=0,\n )\n\n transformer = AutoModel.from_pretrained(model_name)\n\n # compare embedding parameters\n assert_allclose(\n transformer.embeddings.word_embeddings.weight.data,\n model.backbone.text_embeddings.embeddings.word_embeddings.weight.data,\n )\n\n # compare encoder parameters\n assert_allclose(\n transformer.encoder.layer[0].intermediate.dense.weight.data,\n model.backbone.encoder.layers1[0].intermediate.dense.weight.data,\n )\n"
] | [
[
"torch.testing.assert_allclose"
]
] |
mhorn11/deepclr | [
"6ee21963a402776851950a51709eef849ff96b5f"
] | [
"deepclr/utils/metrics.py"
] | [
"from enum import auto\nfrom typing import Any, Callable, Dict, Optional\n\nimport torch\nimport torch.nn.functional as F\n\nfrom ..config.config import Config, ConfigEnum\nfrom ..data.labels import LabelType\nfrom ..utils.tensor import prepare_tensor\nfrom .quaternion import qconjugate, qmult\n\n\nMetricFunction = Callable[[torch.Tensor, torch.Tensor], torch.Tensor]\nGenericMetricFunction = Callable[[torch.Tensor, torch.Tensor, Optional[str]], torch.Tensor]\n\n\ndef _apply_reduction(x: torch.Tensor, reduction: Optional[str]) -> torch.Tensor:\n if reduction is None or reduction == 'none':\n return x\n if reduction == 'mean':\n return torch.mean(x)\n elif reduction == 'sum':\n return torch.sum(x)\n else:\n raise RuntimeError(f\"Unsupported reduction '{reduction}'\")\n\n\ndef _quat_norm(source: torch.Tensor, _target: torch.Tensor, label_type: LabelType,\n reduction: Optional[str] = 'mean') -> torch.Tensor:\n \"\"\"Quaternion norm of source tensor.\"\"\"\n if label_type == LabelType.POSE3D_QUAT:\n source_norm = torch.norm(source[:, 3:], p=2, dim=1, keepdim=True)\n elif label_type == LabelType.POSE3D_DUAL_QUAT:\n source_norm = torch.norm(source[:, :4], p=2, dim=1, keepdim=True)\n else:\n raise RuntimeError(\"Unsupported label type for this loss type\")\n\n return _apply_reduction(source_norm, reduction)\n\n\ndef _normalize(x: torch.Tensor, label_type: LabelType, eps: float = 1e-8) -> torch.Tensor:\n if label_type == LabelType.POSE3D_QUAT:\n x_norm = torch.norm(x[:, 3:], p=2, dim=1, keepdim=True) + eps\n x = torch.cat((x[:, :3], x[:, 3:] / x_norm), dim=1)\n return x\n elif label_type == LabelType.POSE3D_DUAL_QUAT:\n x_norm = torch.norm(x[:, :4], p=2, dim=1, keepdim=True) + eps\n x = x / x_norm\n return x\n else:\n raise RuntimeError(\"Unsupported label type for normalization\")\n\n\ndef trans_loss(source: torch.Tensor, target: torch.Tensor, label_type: LabelType, p: int = 2,\n reduction: Optional[str] = 'mean', eps: float = 1e-8) -> torch.Tensor:\n \"\"\"Translation (translation directly from label or dual quaternion vector) loss.\"\"\"\n if label_type == LabelType.POSE3D_EULER or label_type == LabelType.POSE3D_QUAT:\n source_trans = source[:, :3]\n target_trans = target[:, :3]\n\n elif label_type == LabelType.POSE3D_DUAL_QUAT:\n source = _normalize(source, label_type, eps)\n target = _normalize(target, label_type, eps)\n source_trans = source[:, 4:]\n target_trans = target[:, 4:]\n\n else:\n raise RuntimeError(\"Unsupported label type for this loss type.\")\n\n loss = torch.norm(source_trans - target_trans, dim=1, p=p, keepdim=True)\n return _apply_reduction(loss, reduction)\n\n\ndef trans_3d_loss(source: torch.Tensor, target: torch.Tensor, label_type: LabelType, p: int = 2,\n reduction: Optional[str] = 'mean', eps: float = 1e-8) -> torch.Tensor:\n \"\"\"Translation in 3D coordinates [x, y, z] loss.\"\"\"\n if label_type == LabelType.POSE3D_EULER or label_type == LabelType.POSE3D_QUAT:\n source_trans = source[:, :3]\n target_trans = target[:, :3]\n\n elif label_type == LabelType.POSE3D_DUAL_QUAT:\n # normalize dual quaternion\n source = _normalize(source, label_type, eps)\n target = _normalize(target, label_type, eps)\n\n # convert dual quaternion to translation vector\n source_trans_quat = 2.0 * qmult(source[:, 4:], qconjugate(source[:, :4]))\n target_trans_quat = 2.0 * qmult(target[:, 4:], qconjugate(target[:, :4]))\n source_trans = source_trans_quat[:, 1:]\n target_trans = target_trans_quat[:, 1:]\n\n else:\n raise RuntimeError(\"Unsupported label type for this loss type.\")\n\n loss = torch.norm(source_trans - target_trans, dim=1, p=p, keepdim=True)\n return _apply_reduction(loss, reduction)\n\n\ndef dual_loss(source: torch.Tensor, target: torch.Tensor, label_type: LabelType, p: int = 2,\n reduction: Optional[str] = 'mean', eps: float = 1e-8) -> torch.Tensor:\n \"\"\"Dual quaternion vector loss.\"\"\"\n if label_type == LabelType.POSE3D_QUAT:\n # translation quaternion\n source_trans_quat = source.new_zeros(source.shape[0], 4)\n source_trans_quat[:, 1:] = source[:, :3]\n target_trans_quat = target.new_zeros(target.shape[0], 4)\n target_trans_quat[:, 1:] = target[:, :3]\n\n # dual quaternions\n source_dual = 0.5 * qmult(source_trans_quat, source[:, 3:])\n target_dual = 0.5 * qmult(target_trans_quat, target[:, 3:])\n\n elif label_type == LabelType.POSE3D_DUAL_QUAT:\n source = _normalize(source, label_type, eps)\n target = _normalize(target, label_type, eps)\n source_dual = source[:, 4:]\n target_dual = target[:, 4:]\n\n else:\n raise RuntimeError(\"Unsupported label type for this loss type\")\n\n loss = torch.norm(source_dual - target_dual, dim=1, p=p, keepdim=True)\n return _apply_reduction(loss, reduction)\n\n\ndef rot_loss(source: torch.Tensor, target: torch.Tensor, label_type: LabelType, p: int = 2,\n reduction: Optional[str] = 'mean', eps: float = 1e-8) -> torch.Tensor:\n \"\"\"Rotation vector (either euler angles or quaternion vector) loss.\"\"\"\n if label_type == LabelType.POSE3D_EULER:\n source_rot = source[:, 3:]\n target_rot = target[:, 3:]\n\n elif label_type == LabelType.POSE3D_QUAT:\n source = _normalize(source, label_type, eps)\n target = _normalize(target, label_type, eps)\n source_rot = source[:, 3:]\n target_rot = target[:, 3:]\n\n elif label_type == LabelType.POSE3D_DUAL_QUAT:\n source = _normalize(source, label_type, eps)\n target = _normalize(target, label_type, eps)\n source_rot = source[:, :4]\n target_rot = target[:, :4]\n\n else:\n raise RuntimeError(\"Unsupported label type for this loss type\")\n\n loss = torch.norm(source_rot - target_rot, dim=1, p=p, keepdim=True)\n return _apply_reduction(loss, reduction)\n\n\ndef quat_norm_loss(source: torch.Tensor, target: torch.Tensor, label_type: LabelType,\n reduction: Optional[str] = 'mean') -> torch.Tensor:\n \"\"\"Quaternion norm loss.\"\"\"\n if label_type != LabelType.POSE3D_QUAT and label_type != LabelType.POSE3D_DUAL_QUAT:\n raise RuntimeError(\"Unsupported label type for this loss type.\")\n\n source_norm = _quat_norm(source, target, label_type, reduction=None)\n loss = torch.pow(1.0 - source_norm, 2)\n\n return _apply_reduction(loss, reduction)\n\n\ndef dual_constraint_loss(source: torch.Tensor, _target: torch.Tensor, label_type: LabelType,\n reduction: Optional[str] = 'mean', eps: float = 1e-8) -> torch.Tensor:\n \"\"\"Dual quaternion constraint loss.\"\"\"\n if label_type != LabelType.POSE3D_DUAL_QUAT:\n raise RuntimeError(\"Unsupported label type for this loss type.\")\n\n source = _normalize(source, label_type, eps)\n source_trans_quat = 2.0 * qmult(source[:, 4:], qconjugate(source[:, :4]))\n loss = torch.pow(source_trans_quat[:, 0], 2)\n\n return _apply_reduction(loss, reduction)\n\n\ndef _weighted_loss(metric_fn: GenericMetricFunction, source: torch.Tensor, target: torch.Tensor,\n weights: torch.Tensor) -> torch.Tensor:\n \"\"\"Weighted sum of loss function output.\"\"\"\n ret = metric_fn(source, target, 'none')\n return torch.sum(weights * torch.mean(ret, 0))\n\n\ndef _weighted_loss_fn(metric_fn: GenericMetricFunction, weights: Optional[torch.Tensor] = None) -> MetricFunction:\n \"\"\"Create weighted loss function.\"\"\"\n if weights is None:\n def func(source, target):\n return metric_fn(source, target, 'mean')\n return func\n else:\n def func(source, target):\n return _weighted_loss(metric_fn, source, target, weights) # type: ignore\n return func\n\n\nclass MetricType(ConfigEnum):\n \"\"\"Enum with all available loss types.\"\"\"\n MAE = auto()\n MSE = auto()\n TRANS = auto()\n TRANS_3D = auto()\n DUAL = auto()\n ROT = auto()\n QUAT_NORM = auto()\n DUAL_CONSTRAINT = auto()\n\n def fn(self, label_type: LabelType, weights: Optional[torch.Tensor] = None, **kwargs: Any) -> MetricFunction:\n func: Optional[GenericMetricFunction] = None\n\n if self == MetricType.MAE:\n def func(source, target, red): return F.l1_loss(source, target, reduction=red, **kwargs)\n elif self == MetricType.MSE:\n def func(source, target, red): return F.mse_loss(source, target, reduction=red, **kwargs)\n elif self == MetricType.TRANS:\n def func(source, target, red): return trans_loss(source, target, label_type, reduction=red, **kwargs)\n elif self == MetricType.TRANS_3D:\n def func(source, target, red): return trans_3d_loss(source, target, label_type, reduction=red, **kwargs)\n elif self == MetricType.DUAL:\n def func(source, target, red): return dual_loss(source, target, label_type, reduction=red, **kwargs)\n elif self == MetricType.ROT:\n def func(source, target, red): return rot_loss(source, target, label_type, reduction=red, **kwargs)\n elif self == MetricType.QUAT_NORM:\n def func(source, target, red): return quat_norm_loss(source, target, label_type, reduction=red)\n elif self == MetricType.DUAL_CONSTRAINT:\n def func(source, target, red): return dual_constraint_loss(source, target, label_type, reduction=red)\n\n if func is not None:\n return _weighted_loss_fn(func, weights)\n else:\n raise NotImplementedError(\"MetricType '{}' not implemented\".format(self))\n\n\ndef get_loss_fn(cfg: Config) -> MetricFunction:\n \"\"\"Create loss function from config.\"\"\"\n label_type = cfg.model.label_type\n\n # weights loss functions\n loss_functions = list()\n for metric_data in cfg.metrics.loss:\n # weights\n weights = metric_data['weights']\n if weights is not None:\n weights = prepare_tensor(torch.FloatTensor(weights), device=cfg.device, non_blocking=False)\n\n # function\n loss_functions.append(metric_data['type'].fn(label_type, weights=weights, **metric_data['params']))\n\n # sum weighted loss\n def func(source: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n loss = torch.stack([f(source, target) for f in loss_functions])\n return torch.sum(loss)\n\n return func\n\n\ndef get_metric_fns(cfg: Config) -> Dict[str, MetricFunction]:\n \"\"\"Create metric functions from config.\"\"\"\n metric_fns = dict()\n for metric_data in [*cfg.metrics.loss, *cfg.metrics.other]:\n params = metric_data['params'] if 'params' in metric_data else dict()\n metric_fns[metric_data['type'].name.lower()] = metric_data['type'].fn(cfg.model.label_type, **params)\n return metric_fns\n"
] | [
[
"torch.sum",
"torch.FloatTensor",
"torch.nn.functional.mse_loss",
"torch.pow",
"torch.nn.functional.l1_loss",
"torch.norm",
"torch.cat",
"torch.mean"
]
] |
Treestanx/pylivetrader | [
"7d3244e05996dae8a137ed257350783bdfbcf13d"
] | [
"examples/MACD/macd_example.py"
] | [
"from pylivetrader.api import order_target_percent, record, symbol\nimport pandas as pd\n\n\ndef initialize(context):\n # The initialize method is called at the very start of your script's\n # execution. You can set up anything you'll be needing later here. The\n # context argument will be received by all pylivetrader methods in\n # your script, and you can store information on it that you'd like to\n # share between methods.\n\n # This is the asset that we'll be trading.\n context.asset = symbol('AAPL')\n\n\ndef handle_data(context, data):\n # The handle_data method is called by pylivetrader every minute when\n # new data is received. This is where we'll execute our trading logic. For\n # an explanation of pylivetrader function scheduling, please see here:\n # https://github.com/alpacahq/pylivetrader#run.\n\n # Compute averages\n # data.history() will return a pandas dataframe with price information.\n # pandas' EWM method will give us our exponential moving averages.\n\n # Calculate short-term EMA (using data from the past 12 minutes.)\n short_periods = 12\n short_data = data.history(\n context.asset, 'price', bar_count=short_periods, frequency=\"1m\")\n short_ema = pd.Series.ewm(short_data, span=short_periods).mean().iloc[-1]\n # Calculate long-term EMA (using data from the past 26 minutes.)\n long_periods = 26\n long_data = data.history(\n context.asset, 'price', bar_count=long_periods, frequency=\"1m\")\n long_ema = pd.Series.ewm(long_data, span=long_periods).mean().iloc[-1]\n\n macd = short_ema - long_ema\n\n # Trading logic\n if macd > 0:\n # order_target_percent allocates a specified percentage of your\n # portfolio to a long position in a given asset. (A value of 1\n # means that 100% of your portfolio will be allocated.)\n order_target_percent(context.asset, 1)\n elif macd < 0:\n # You can supply a negative value to short an asset instead.\n order_target_percent(context.asset, -1)\n\n # Save values for later inspection\n record(AAPL=data.current(context.asset, 'price'),\n short_mavg=short_ema,\n long_mavg=long_ema)\n"
] | [
[
"pandas.Series.ewm"
]
] |
SZLSP/reid2020NAIC | [
"d0eaee768e0be606417a27ce5ea2b3071b5a9bc2"
] | [
"fastreid/layers/norm_layers/conditional_instance_norm2d.py"
] | [
"import torch\nimport torch.nn as nn\n\n\nclass ConditionalInstanceNorm2d(nn.Module):\n \"\"\"Conditional Instance Normalization\n Parameters\n num_features – C from an expected input of size (N, C, H, W)\n num_classes – Number of classes in the datset.\n bias – if set to True, adds a bias term to the embedding layer\n\n Shape:\n Input: (N, C, H, W) (N,)\n Output: (N, C, H, W) (same shape as input)\n\n Examples:\n >>> m = ConditionalInstanceNorm2d(100, 10)\n >>> input = torch.randn(20, 100, 35, 45)\n >>> label = torch.randint(10, (20,))\n >>> output = m(input, label)\n \"\"\"\n\n def __init__(self, num_features, num_classes, bias=True):\n super().__init__()\n self.num_features = num_features\n self.bias = bias\n self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False)\n if bias:\n self.embed = nn.Embedding(num_classes, num_features * 2)\n self.embed.weight.data[:, :num_features].uniform_()\n self.embed.weight.data[:, num_features:].zero_()\n else:\n self.embed = nn.Embedding(num_classes, num_features)\n self.embed.weight.data.uniform_()\n\n def forward(self, x, y):\n h = self.instance_norm(x)\n if self.bias:\n gamma, beta = self.embed(y).chunk(2, dim=-1)\n out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1)\n else:\n gamma = self.embed(y)\n out = gamma.view(-1, self.num_features, 1, 1) * h\n return out\n\n\nif __name__ == '__main__':\n m = ConditionalInstanceNorm2d(100, 10)\n input = torch.randn(20, 100, 35, 45)\n label = torch.randint(10, (20,))\n output = m(input, label)\n"
] | [
[
"torch.randn",
"torch.nn.Embedding",
"torch.randint",
"torch.nn.InstanceNorm2d"
]
] |
rahulbhadani/RNN | [
"47c288ba9f2ed17a593e6b61d0d867cb2d69c244"
] | [
"crypt_lstm.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Crypt_LSTM\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1guzTMibpzWywlckt9xu2gazsb-jifVDG\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import StandardScaler\nimport io\n# %matplotlib inline\n\n# Decode the files as uploaded file is a dictionary of keys and values\nbtc = pd.read_csv('/home/ivory/VersionControl/RNN/btc.csv')\n\n# select only the coloumn we need in the prediction process\ndata_to_use = btc['Close'].values\nbtc.head()\n\n# select bitcoin closing price as the target variable to predict\ndata_to_use=btc['Close'].values\ndata_to_use\n\n# data preprocessing using sklearn to scale our data and then fit into our model\nscaler = StandardScaler()\nscaled_data = scaler.fit_transform(data_to_use.reshape(-1,1))\n\n# Now we plot the data to see how the bitcoin close price tended over the given time period\nimport matplotlib.pyplot as plt\nfig, ax = plt.subplots()\nfig.set_size_inches(10.0, 6.0)\nax.grid(which='major', linestyle='-', linewidth='0.75', color='white')\nax.grid(which='minor', linestyle=':', linewidth='0.25', color='black')\nplt.title('Bitcoin prices from December 2014 to May 2018')\nax.set_facecolor((1.0, 0.17, 0.2, 0.35))\nplt.xlabel('Days')\n\nplt.ylabel('Scaled price of Bitcoin')\nplt.plot(scaled_data, label='Price')\n\nax.spines['bottom'].set_color('None')\nax.spines['top'].set_color('None') \nax.spines['right'].set_color('None')\nax.spines['left'].set_color('None')\nplt.legend()\nplt.show()\n\n# Features and label dataset\n# We define a function to create the features and labels for our dataset by windowing the data\n\ndef window_data(data, window_size):\n '''\n Input:\n data - this is the dataset we use\n window_size - how many datapoints we use to predict the next datapoint in the seqeunce. \n Output:\n X - features split into widnows of data points\n y - labels - this is the next number in the sequence that we are trying to predict\n '''\n X = []\n y = []\n i = 0\n while (i + window_size) <= len(data) - 1:\n X.append(data[i:i+window_size])\n y.append(data[i+window_size])\n \n i += 1\n assert len(X) == len(y)\n return X, y\n\nX, y = window_data(scaled_data, 7)\n\nX_train = np.array(X[:1018])\ny_train = np.array(y[:1018])\n\nX_test = np.array(X[1018:])\ny_test = np.array(y[1018:])\n\nprint('X_train size: {}'.format(X_train.shape))\n\nprint('y_train size: {}'.format(y_train.shape))\n\nprint('X_test size: {}'.format(X_test.shape))\n\nprint('y_test size: {}'.format(y_test.shape))\n\n# Define the network\nbatch_size = 7 #Number of windows of data we are passing at once\nwindow_size = 7 #Number of days we consider while predict the bitcoin price\nhidden_layer = 256 # The number of hidden layer /no. of units in our LSTM cell.\nclip_margin = 4 #This prevents exploding the gradient - use clipper to clip the gradient below this margin\nlearning_rate = 0.001 #Learning rate that aims to reduce the loss function\nepochs = 200 #The number of episodes our model needs to build itself\n\n#Placeholders for tensorflow\ninputs = tf.placeholder(tf.float32, [batch_size, window_size, 1])\ntargets = tf.placeholder(tf.float32, [batch_size, 1])\n\n# LSTM Weights\n# Since LSTM weights are determined by operation gates that includes forget, input and out gates,\n'''\nForget Gate\nf & = \\sigma(x_t U^f + s_{t-1}W^f)\nThis is a sigmoid layer that takes the outout at t-1 time and the current input at t time\nand combine them into a single tensor. Then, it applies a linear transformation followed\nby a sigmoid.\n\nOutput of the forget gate is between 0 to 1 s we have a sigmoid function. The output is\nmultiplied with the internal state and that is why the gate is called as the forget gate.\nIf f = 0, then the previous internal state is completely forgotten, if f = 1, then it will\nbe pass unaltered.\n'''\n\n'''\nInput Gate:\ni & = \\sigma(x_t U^i + s_{t-1}W^i)\nThis takes the previous output with a new input and passes them through a sigmoid layer.\nThe gate returns a value between 0 and 1. The value of the input gate is multiplied with\nthe output of the cadidate layer.\n'''\n\n'''\nCandidate hidden state:\ng & = \\tanh(x_tU^g + s_{t-1}W^g)\nApplies hyperbolic tangent to the mix of the input and previous output, returns the candidate\nvector. The candidate vector is then added to the internal state using the update rule:\nc_t & = c_{t-1} \\odot + g\\odot i\n\nThe previous state is multiplied by the forget gate and then added to the fraction of\nthe new candidate allowed by the output gate.\n'''\n\n'''\nOutput Gate:\no & = \\sigma(x_tU^o + s_{t-1}W^o)\ns_t & = \\tanh(c_t)\\odot o\n\nThis controls how much of the internal state is passed to the output and works in a similar\nfashion as other gates.\n\n'''\n# tf.truncated normal generates a normal distribution with specified mean and standard\n# deviation except that values whose magnitude is more than 2.0 standard deviation from\n# the mean are dropped and re-picked. Default mean is 0.0\n\n#Weights for the input gate\nweights_input_gate = tf.Variable(tf.truncated_normal([1, hidden_layer], stddev=0.05))\nweights_input_hidden = tf.Variable(tf.truncated_normal([hidden_layer, hidden_layer], stddev=0.05))\nbias_input = tf.Variable(tf.zeros([hidden_layer]))\n\n#weights for the forgot gate\nweights_forget_gate = tf.Variable(tf.truncated_normal([1, hidden_layer], stddev=0.05))\nweights_forget_hidden = tf.Variable(tf.truncated_normal([hidden_layer, hidden_layer], stddev=0.05))\nbias_forget = tf.Variable(tf.zeros([hidden_layer]))\n\n#weights for the output gate\nweights_output_gate = tf.Variable(tf.truncated_normal([1, hidden_layer], stddev=0.05))\nweights_output_hidden = tf.Variable(tf.truncated_normal([hidden_layer, hidden_layer], stddev=0.05))\nbias_output = tf.Variable(tf.zeros([hidden_layer]))\n\n#weights for the memory cell\nweights_memory_cell = tf.Variable(tf.truncated_normal([1, hidden_layer], stddev=0.05))\nweights_memory_cell_hidden = tf.Variable(tf.truncated_normal([hidden_layer, hidden_layer], stddev=0.05))\nbias_memory_cell = tf.Variable(tf.zeros([hidden_layer]))\n\n#Output layer weigts\nweights_output = tf.Variable(tf.truncated_normal([hidden_layer, 1], stddev=0.05))\nbias_output_layer = tf.Variable(tf.zeros([1]))\n\ndef LSTM_cell(input, output, state):\n input_gate = tf.sigmoid(tf.matmul(input, weights_input_gate) \n + tf.matmul(output, weights_input_hidden) \n + bias_input) #i & = \\sigma(x_t U^i + s_{t-1}W^i)\n forget_gate = tf.sigmoid(tf.matmul(input, weights_forget_gate) \n + tf.matmul(output, weights_forget_hidden) \n + bias_forget) #f & = \\sigma(x_t U^f + s_{t-1}W^f)\n output_gate = tf.sigmoid(tf.matmul(input, weights_output_gate) \n + tf.matmul(output, weights_output_hidden) \n + bias_output) #\\sigma(x_tU^o + s_{t-1}W^o)\n memory_cell = tf.tanh(tf.matmul(input, weights_memory_cell) \n + tf.matmul(output, weights_memory_cell_hidden) \n + bias_memory_cell) #\\tanh(x_tU^g + s_{t-1}W^g)\n \n state = state*forget_gate + input_gate*memory_cell #c_{t-1} \\odot + g\\odot i\n \n output = output_gate*tf.tanh(state)\n \n return state, output\n\n# Network loop \n# A loop for network is created which iterates through every window in the batch\n# creating the `batch_states` as all zeros. The output is used for predicting\n# the bitcoin price\n\noutputs = []\n\n#iterates through every window in the batch:\nfor i in range(batch_size):\n #for each batch, I am creating batch_state as all zeros and output for that\n # window which is all zeros at the beginning as well\n batch_state = np.zeros([1, hidden_layer], dtype=np.float32)\n batch_output = np.zeros([1, hidden_layer], dtype=np.float32)\n \n # for each point in the window we are feeding into LSTM to get next output\n for ii in range(window_size):\n batch_state, batch_output = LSTM_cell(tf.reshape(inputs[i][ii], (-1, 1)), \n batch_state, batch_output)\n \n #Last output is considered and used to get a prediction\n outputs.append(tf.matmul(batch_output, weights_output) + bias_output_layer)\n \noutputs\n\n# Define Loss\n# We use `mean_squared_error` function for the loss to minimize the errors.\n\nlosses = []\nfor i in range(len(outputs)):\n losses.append(tf.losses.mean_squared_error(\n tf.reshape(targets[i], (-1, 1)), outputs[i]))\n \n# Computes the mean of elements across dimensions of a tensor.\nloss = tf.reduce_mean(losses) \n\n# We define the optimizer with gradient clipping\ngradients = tf.gradients(loss, tf.trainable_variables())\n\n# Clips values of multiple tensors by the ratio of the sum of their norms.\nclipped, _ = tf.clip_by_global_norm(gradients, clip_margin)\n\noptimizer = tf.train.AdamOptimizer(learning_rate)\ntrained_optimizer = optimizer.apply_gradients(zip(gradients, tf.trainable_variables()))\n\n# Train the network\n# We now train the network with the number of epochs = 200 and then observe\n# change in our loss through the time.\n# The current loss decreases with the increase in the epochs as observed\n# increasing our model accuracy in predicting the bitcoin prices\nsession = tf.Session()\nsession.run(tf.global_variables_initializer())\n\nfor i in range(epochs):\n traind_scores = []\n ii = 0\n epoch_loss = []\n while(ii+batch_size) <= len(X_train):\n X_batch = X_train[ii:ii+batch_size]\n y_batch = y_train[ii:ii+batch_size]\n \n o, c, _ = session.run([outputs, loss, trained_optimizer], \n feed_dict = {inputs:X_batch, targets:y_batch})\n \n epoch_loss.append(c)\n traind_scores.append(o)\n ii += batch_size\n if (i%30) == 0:\n print('Epoch {}/{}'.format(i, epochs), \n 'Current loss: {}'.format(np.mean(epoch_loss)))\n\nsup = []\nfor i in range(len(traind_scores)):\n for j in range(len(traind_scores[i])):\n sup.append(traind_scores[i][j][0])\n \n\ntests = []\ni = 0\nwhile i+batch_size <= len(X_test):\n o = session.run([outputs], feed_dict={inputs:X_test[i:i+batch_size]})\n i += batch_size\n tests.append(o)\n \ntests_new = []\nfor i in range(len(tests)):\n for j in range(len(tests[i][0])):\n tests_new.append(tests[i][0][j])\n\ntest_results = []\nfor i in range(1264):\n if i >= 1019:\n test_results.append(tests_new[i-1019])\n else:\n test_results.append(None)\n\n# Plotting the predictions\nfig, ax = plt.subplots()\nfig.set_size_inches(10.0, 6.0)\nax.grid(which='major', linestyle='-', linewidth='0.75', color='white')\nax.grid(which='minor', linestyle=':', linewidth='0.25', color='black')\nplt.title('Bitcoin prices from December 2014 to May 2018 - Prediction Plot')\nax.set_facecolor((1.0, 0.17, 0.2, 0.35))\nplt.xlabel('Days')\n\nplt.ylabel('Scaled price of Bitcoin')\nplt.plot(scaled_data, label='Price')\nplt.plot(sup, label='Training data')\nplt.plot(test_results, label='Testing data')\nax.spines['bottom'].set_color('None')\nax.spines['top'].set_color('None') \nax.spines['right'].set_color('None')\nax.spines['left'].set_color('None')\nplt.legend()\nplt.show()\n\n"
] | [
[
"tensorflow.reshape",
"tensorflow.matmul",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"tensorflow.global_variables_initializer",
"tensorflow.clip_by_global_norm",
"matplotlib.pyplot.title",
"numpy.mean",
"numpy.zeros",
"pandas.read_csv",
"tensorflow.tanh",
"matplotlib.pyplot.subplots",
"tensorflow.Session",
"sklearn.preprocessing.StandardScaler",
"tensorflow.placeholder",
"matplotlib.pyplot.legend",
"tensorflow.zeros",
"tensorflow.truncated_normal",
"tensorflow.train.AdamOptimizer",
"tensorflow.reduce_mean",
"tensorflow.trainable_variables",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.xlabel"
]
] |
ktruong14/web_scraping_challenge | [
"312714639ea4d3d6d5dcc265620aa0e6d4ea2d29"
] | [
"scrape_mars.py"
] | [
"from splinter import Browser\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport time\n\n\ndef init_browser():\n executable_path = {'executable_path': 'chromedriver.exe'}\n browser = Browser('chrome', **executable_path, headless=False)\n return browser\n\n\ndef marsNews():\n browser = init_browser()\n url = 'https://mars.nasa.gov/news/'\n browser.visit(url)\n time.sleep(10)\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n news_title = soup.find_all('div', class_='content_title')\n paragraph_text = soup.find_all('div', class_='article_teaser_body')\n latest_news_title = news_title[1].text\n latest_paragraph_text = paragraph_text[0].text\n return latest_news_title, latest_paragraph_text\n\n\ndef marsImage():\n browser = init_browser()\n url = \"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\n browser.visit(url)\n time.sleep(10)\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n image_link = soup.find('article')['style'].replace(\n 'background-image: url(', '').replace(');', '')[1:-1]\n featured_image_url = 'https://www.jpl.nasa.gov' + image_link\n return featured_image_url\n\n\ndef marsFacts():\n browser = init_browser()\n url = 'https://space-facts.com/mars/'\n browser.visit(url)\n time.sleep(10)\n mars_facts = pd.read_html(url)\n mars_facts = mars_facts[0].to_html()\n return mars_facts\n\n\ndef marsHemisphere():\n browser = init_browser()\n url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n base_url = 'https://astrogeology.usgs.gov'\n browser.visit(url)\n time.sleep(10)\n html = browser.html\n soup = BeautifulSoup(html,'html.parser')\n title_img_url = []\n \n items = soup.find_all('div',class_='item')\n\n for item in items:\n title = item.find('h3').text\n link = item.find('a', class_='itemLink product-item')['href']\n browser.visit(base_url+link)\n html = browser.html\n soup = BeautifulSoup(html,\"html.parser\")\n img_link = base_url + soup.find('img',class_='wide-image')['src']\n title_img_url.append({\"Title\":title,\"Img_url\":img_link})\n\n return title_img_url\n\ndef scrape_info():\n scraped_data = {}\n scraped_data['mars_title'], scraped_data['mars_paragraph'] = marsNews()\n scraped_data['mars_featured_image'] = marsImage()\n scraped_data['mars_facts'] = marsFacts()\n scraped_data['mars_hemisphere'] = marsHemisphere()\n\n return scraped_data\n"
] | [
[
"pandas.read_html"
]
] |
snumrl/skate | [
"a57ec2dc81dc2502da8886b92b870d2c8d65b838"
] | [
"angle2bvh.py"
] | [
"import numpy as np\nfrom PyCommon.modules.Math import mmMath as mm\nimport math\nimport os\nimport bvh\nfrom scipy.spatial.transform import Rotation\n\nclass MakeBvh(object):\n def __init__(self):\n self.skel = None\n self.joint_name = None\n\n def angle2bvh(self):\n file_dir = 'data/mocap/movingcam/'\n # fn = 'bc_small'\n # fn = 'backflip_a'\n fn = 'jump_result'\n\n f_name = file_dir + fn + '_angle.txt'\n\n # line_num = 0\n\n # read joint angles of each key pose frame from txt file\n angles = []\n\n with open(f_name) as f:\n for line in f:\n line = line.replace(' \\n', '')\n values = line.split(\" \")\n # print(values, type(values))\n angle = []\n for val in values:\n angle.append(float(val))\n # print(\"angle: \", type(angle))\n angles.append(np.asarray(angle))\n # line_num = line_num + 1\n\n # make bvh file\n\n # 0:6 #pelvis\n # 15, 16, 17 # right leg\n # 18, 19, 20\n # 21, 22, 23\n # zero for toes\n # 24, 25, 26 #spine\n # 27, 28, 29\n # 30, 31, 32\n # 33, 34, 35, # left arm\n # 36, 37, 38\n # 39, 40, 41\n # 42, 43, 44\n # 45, 46, 47 #right arm\n # 48, 49, 50\n # 51, 52, 53\n # 54, 55, 56\n # 6, 7, 8 #left leg\n # 9, 10, 11\n # 12, 13, 14\n # zero for toes\n with open('/home/yuri/PycharmProjects/skate/SkateUtils/skeleton.bvh', 'r') as f:\n skeleton_info = f.read()\n mybvh = bvh.Bvh(skeleton_info)\n self.joint_name = mybvh.get_joints_names()\n\n # print(self.joint_name)\n output_f_name = fn + \".bvh\"\n\n with open(output_f_name, 'w') as f:\n f.write(skeleton_info)\n f.write('MOTION\\r\\n')\n f.write('Frames: '+str(len(angles))+'\\r\\n')\n f.write('Frame Time: 0.0333333\\r\\n')\n t_pose_angles = [0. for _ in range(63)]\n t_pose_angles[1] = 104.721\n f.write(' '.join(map(str, t_pose_angles))+'\\r\\n')\n\n smpl_names = [\n 'Left_Hip', 'Right_Hip', 'Waist', 'Left_Knee', 'Right_Knee',\n 'Upper_Waist', 'Left_Ankle', 'Right_Ankle', 'Chest',\n 'Left_Toe', 'Right_Toe', 'Base_Neck', 'Left_Shoulder',\n 'Right_Shoulder', 'Upper_Neck', 'Left_Arm', 'Right_Arm',\n 'Left_Elbow', 'Right_Elbow', 'Left_Wrist', 'Right_Wrist',\n 'Left_Finger', 'Right_Finger'\n ]\n\n index_list = [0, 2, 5, 8, 11, 3, 12, 15, 13, 16, 18, 20, 14, 17, 19, 21, 1, 4, 7, 10]\n # index_list = [2, 5, 8, 11, 3, 12, 15, 13, 16, 18, 20, 14, 17, 19, 21, 1, 4, 7, 10, 0]\n # 23 joints + root orientation-> root translation + orientation +19 joints\n # 69 + 3 dofs -> 57 dofs + 3+3\n\n for q in angles:\n euler_middle_q = np.asarray(q)\n # for joit_i in range(int(len(q) / 3)):\n # if joit_i != 1:\n # temp_axis_angle = np.asarray([q[3*joit_i], q[3*joit_i+1], q[3*joit_i+2]])\n # r_offset = np.eye(3)\n # if 'scapular_left' in skel.dof(3*joit_i).name:\n # r_offset = mm.rotX(-0.9423)\n # elif 'scapular_right' in skel.dof(3*joit_i).name:\n # r_offset = mm.rotX(0.9423)\n # elif 'bicep_left' in skel.dof(3*joit_i).name:\n # r_offset = mm.rotX(-1.2423)\n # elif 'bicep_right' in skel.dof(3*joit_i).name:\n # r_offset = mm.rotX(1.2423)\n # euler_result = axis2Euler(temp_axis_angle, r_offset)\n # euler_middle_q[3*joit_i:3*joit_i+3] = euler_result\n\n # print(\"middle_q:\", euler_middle_q)\n # convert axis angle to euler angle\n euler_q = np.zeros(60)\n for i in range(len(index_list)):\n ci = 3*index_list[i] # corresponding_index\n # print(i, ci)\n axis_angle = np.asarray([euler_middle_q[ci], euler_middle_q[ci+1], euler_middle_q[ci+2]])\n # axis_angle = np.asarray([euler_middle_q[i], euler_middle_q[i + 1], euler_middle_q[i + 2]])\n euler_q[3*i:3*i+3] = 180./math.pi*Rotation.from_rotvec(axis_angle).as_euler('ZXY')\n\n # for i in range(len(euler_middle_q)/3):\n # temp = np.asarray([euler_middle_q[i], euler_middle_q[i+1], euler_middle_q[i+2]])\n # euler_q[i:i+3] = Rotation.from_rotvec(temp).as_euler('ZXY')\n # i = i + 3\n\n # euler_q[0:3] = euler_middle_q[0:3]\n # euler_q = np.zeros(len(q)+6) # add two toes dofs (6 = 3x2)\n # euler_q[0:3] = np.dot(mm.rotY(-math.pi / 2.), q[3:6] * 100.) + np.array([0., 104.721, 0.])\n # euler_q[3:6] = euler_middle_q[0:3]\n # euler_q[6:15] = euler_middle_q[15:24]\n # euler_q[15:18] = np.zeros(3)\n # euler_q[18:51] = euler_middle_q[24:]\n # euler_q[51:60] = euler_middle_q[6:15]\n # euler_q[60:63] = np.zeros(3)\n\n t_euler_q = np.zeros(63)\n t_euler_q[1] = 104.721\n t_euler_q[3:63] = euler_q\n # t_euler_q[3:4] = -1.*t_euler_q[3:4]\n f.write(' '.join(map(str, t_euler_q)))\n f.write('\\r\\n')\n\nif __name__ == '__main__':\n mybvh = MakeBvh()\n mybvh.angle2bvh()"
] | [
[
"scipy.spatial.transform.Rotation.from_rotvec",
"numpy.asarray",
"numpy.zeros"
]
] |
koralturkk/scikit-opt | [
"f62cd7f73d8b355f6d3d1865366794416268460b"
] | [
"examples/demo_sa.py"
] | [
"demo_func = lambda x: x[0] ** 2 + (x[1] - 0.05) ** 2 + x[2] ** 2\n\n# %% Do SA\nfrom sko.SA import SA\n\nsa = SA(func=demo_func, x0=[1, 1, 1], T_max=1, T_min=1e-9, L=300, max_stay_counter=150)\nbest_x, best_y = sa.run()\nprint('best_x:', best_x, 'best_y', best_y)\n\n# %% Plot the result\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nplt.plot(pd.DataFrame(sa.best_y_history).cummin(axis=0))\nplt.show()\n\n# %%\nfrom sko.SA import SAFast\n\nsa_fast = SAFast(func=demo_func, x0=[1, 1, 1], T_max=1, T_min=1e-9, q=0.99, L=300, max_stay_counter=150)\nsa_fast.run()\nprint('Fast Simulated Annealing: best_x is ', sa_fast.best_x, 'best_y is ', sa_fast.best_y)\n\n# %%\nfrom sko.SA import SABoltzmann\n\nsa_boltzmann = SABoltzmann(func=demo_func, x0=[1, 1, 1], T_max=1, T_min=1e-9, q=0.99, L=300, max_stay_counter=150)\nsa_boltzmann.run()\nprint('Boltzmann Simulated Annealing: best_x is ', sa_boltzmann.best_x, 'best_y is ', sa_fast.best_y)\n\n# %%\nfrom sko.SA import SACauchy\n\nsa_cauchy = SACauchy(func=demo_func, x0=[1, 1, 1], T_max=1, T_min=1e-9, q=0.99, L=300, max_stay_counter=150)\nsa_cauchy.run()\nprint('Cauchy Simulated Annealing: best_x is ', sa_cauchy.best_x, 'best_y is ', sa_cauchy.best_y)\n"
] | [
[
"matplotlib.pyplot.show",
"pandas.DataFrame"
]
] |
liuandrew/training-rl-algo | [
"ca56d65209de0bf88ac1e1db2269bb7daac4da47"
] | [
"a2c_ppo_acktr/model.py"
] | [
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport gym\n\nfrom a2c_ppo_acktr.distributions import Bernoulli, Categorical, DiagGaussian\nfrom a2c_ppo_acktr.utils import init\n\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.view(x.size(0), -1)\n\n\nclass Policy(nn.Module):\n def __init__(self, obs_shape, action_space, base=None, base_kwargs=None):\n super(Policy, self).__init__()\n if base_kwargs is None:\n base_kwargs = {}\n if base is None:\n if len(obs_shape) == 3:\n base = CNNBase\n elif len(obs_shape) == 1:\n base = MLPBase\n else:\n raise NotImplementedError\n elif type(base) == str:\n #Attempt to find a base class with the given string\n base = globals()[base]\n\n self.base = base(obs_shape[0], **base_kwargs)\n\n if action_space.__class__.__name__ == \"Discrete\":\n num_outputs = action_space.n\n self.dist = Categorical(self.base.output_size, num_outputs)\n elif action_space.__class__.__name__ == \"Box\":\n num_outputs = action_space.shape[0]\n self.dist = DiagGaussian(self.base.output_size, num_outputs)\n elif action_space.__class__.__name__ == \"MultiBinary\":\n num_outputs = action_space.shape[0]\n self.dist = Bernoulli(self.base.output_size, num_outputs)\n else:\n raise NotImplementedError\n\n @property\n def is_recurrent(self):\n return self.base.is_recurrent\n\n @property\n def recurrent_hidden_state_size(self):\n \"\"\"Size of rnn_hx.\"\"\"\n return self.base.recurrent_hidden_state_size\n\n #Andy: add auxiliary check\n @property\n def has_auxiliary(self):\n return self.base.has_auxiliary\n\n @property\n def auxiliary_output_size(self):\n if self.base.has_auxiliary:\n return self.base.auxiliary_output_size\n else:\n return 1\n \n \n\n def forward(self, inputs, rnn_hxs, masks):\n raise NotImplementedError\n\n def act(self, inputs, rnn_hxs, masks, deterministic=False):\n if self.base.has_auxiliary:\n value, actor_features, rnn_hxs, auxiliary = \\\n self.base(inputs, rnn_hxs, masks, deterministic)\n else:\n value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)\n # if no auxiliary output, storage will expect an output 0\n # with shape the same as value\n auxiliary = torch.zeros(value.shape)\n\n dist = self.dist(actor_features)\n\n if deterministic:\n action = dist.mode()\n else:\n action = dist.sample()\n\n action_log_probs = dist.log_probs(action)\n dist_entropy = dist.entropy().mean()\n\n return value, action, action_log_probs, rnn_hxs, auxiliary\n\n def get_value(self, inputs, rnn_hxs, masks):\n outputs = self.base(inputs, rnn_hxs, masks)\n value = outputs[0]\n return value\n\n def evaluate_actions(self, inputs, rnn_hxs, masks, action):\n if self.base.has_auxiliary:\n value, actor_features, rnn_hxs, auxiliary = \\\n self.base(inputs, rnn_hxs, masks)\n else:\n value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)\n auxiliary = torch.zeros(value.shape)\n\n dist = self.dist(actor_features)\n\n action_log_probs = dist.log_probs(action)\n dist_entropy = dist.entropy().mean()\n\n return value, action_log_probs, dist_entropy, rnn_hxs, auxiliary\n\n\nclass NNBase(nn.Module):\n def __init__(self, recurrent, recurrent_input_size, hidden_size):\n super(NNBase, self).__init__()\n\n self._hidden_size = hidden_size\n self._recurrent = recurrent\n #Andy: default the NNBase to having no auxiliary outputs\n self.has_auxiliary = False \n self.auxiliary_output_size = 0\n\n if recurrent:\n self.gru = nn.GRU(recurrent_input_size, hidden_size)\n for name, param in self.gru.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0)\n elif 'weight' in name:\n nn.init.orthogonal_(param)\n\n @property\n def is_recurrent(self):\n return self._recurrent\n\n @property\n def recurrent_hidden_state_size(self):\n if self._recurrent:\n return self._hidden_size\n return 1\n\n @property\n def output_size(self):\n return self._hidden_size\n\n def _forward_gru(self, x, hxs, masks):\n if x.size(0) == hxs.size(0):\n x, hxs = self.gru(x.unsqueeze(0), (hxs * masks).unsqueeze(0))\n x = x.squeeze(0)\n hxs = hxs.squeeze(0)\n else:\n # x is a (T, N, -1) tensor that has been flatten to (T * N, -1)\n N = hxs.size(0)\n T = int(x.size(0) / N)\n\n # unflatten\n x = x.view(T, N, x.size(1))\n\n # Same deal with masks\n masks = masks.view(T, N)\n\n # Let's figure out which steps in the sequence have a zero for any agent\n # We will always assume t=0 has a zero in it as that makes the logic cleaner\n has_zeros = ((masks[1:] == 0.0) \\\n .any(dim=-1)\n .nonzero()\n .squeeze()\n .cpu())\n\n # +1 to correct the masks[1:]\n if has_zeros.dim() == 0:\n # Deal with scalar\n has_zeros = [has_zeros.item() + 1]\n else:\n has_zeros = (has_zeros + 1).numpy().tolist()\n\n # add t=0 and t=T to the list\n has_zeros = [0] + has_zeros + [T]\n\n hxs = hxs.unsqueeze(0)\n outputs = []\n for i in range(len(has_zeros) - 1):\n # We can now process steps that don't have any zeros in masks together!\n # This is much faster\n start_idx = has_zeros[i]\n end_idx = has_zeros[i + 1]\n\n rnn_scores, hxs = self.gru(\n x[start_idx:end_idx],\n hxs * masks[start_idx].view(1, -1, 1))\n\n outputs.append(rnn_scores)\n\n # assert len(outputs) == T\n # x is a (T, N, -1) tensor\n x = torch.cat(outputs, dim=0)\n # flatten\n x = x.view(T * N, -1)\n hxs = hxs.squeeze(0)\n\n return x, hxs\n\n\nclass CNNBase(NNBase):\n def __init__(self, num_inputs, recurrent=False, hidden_size=512):\n super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size)\n\n init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.\n constant_(x, 0), nn.init.calculate_gain('relu'))\n\n self.main = nn.Sequential(\n init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), nn.ReLU(),\n init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(),\n init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(),\n init_(nn.Linear(32 * 7 * 7, hidden_size)), nn.ReLU())\n\n init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.\n constant_(x, 0))\n\n self.critic_linear = init_(nn.Linear(hidden_size, 1))\n\n self.train()\n\n def forward(self, inputs, rnn_hxs, masks):\n x = self.main(inputs / 255.0)\n\n if self.is_recurrent:\n x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)\n\n return self.critic_linear(x), x, rnn_hxs\n\n\nclass MLPBase(NNBase):\n def __init__(self, num_inputs, recurrent=False, hidden_size=64):\n super(MLPBase, self).__init__(recurrent, num_inputs, hidden_size)\n\n if recurrent:\n num_inputs = hidden_size\n\n init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.\n constant_(x, 0), np.sqrt(2))\n\n self.actor = nn.Sequential(\n init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),\n init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())\n\n self.critic = nn.Sequential(\n init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),\n init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())\n\n self.critic_linear = init_(nn.Linear(hidden_size, 1))\n\n self.train()\n\n def forward(self, inputs, rnn_hxs, masks):\n x = inputs\n\n if self.is_recurrent:\n x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)\n\n hidden_critic = self.critic(x)\n hidden_actor = self.actor(x)\n\n return self.critic_linear(hidden_critic), hidden_actor, rnn_hxs\n\n\n\n\n\n\n#Andy: Add FlexBase currently for allowing shared layers between\n# actor and critic\nclass FlexBaseOld(NNBase):\n '''\n NN module that allows for shared actor and critic layers as well\n as varying number of output heads\n \n num_layers: how many hidden MLP layers from input (or from GRU) to output heads\n num_shared_layers: how many of these MLP layers should be shared between actor and critic \n -1 means all layers should be shared\n '''\n def __init__(self, num_inputs, recurrent=True, hidden_size=64,\n num_layers=2, num_shared_layers=-1):\n super(FlexBaseOld, self).__init__(recurrent, num_inputs, hidden_size)\n \n print('Using FlexBase')\n print('num shared layers is ' + str(num_shared_layers))\n\n if recurrent:\n num_inputs = hidden_size\n\n init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.\n constant_(x, 0), np.sqrt(2))\n \n shared_layers = []\n critic_layers = []\n actor_layers = []\n \n # generate all the shared layers\n cur_shared_layers = 0\n in_dim = num_inputs\n for i in range(num_layers):\n if num_shared_layers == -1 or cur_shared_layers < num_shared_layers:\n shared_layers.append(init_(nn.Linear(in_dim, hidden_size)))\n shared_layers.append(nn.Tanh())\n in_dim = hidden_size # only first layer with have input size num_inputs\n cur_shared_layers += 1\n \n # generate the non-shared layers\n if num_shared_layers != -1:\n remaining_layers = num_layers - num_shared_layers\n else:\n remaining_layers = 0\n \n for i in range(remaining_layers):\n critic_layers.append(init_(nn.Linear(in_dim, hidden_size)))\n critic_layers.append(nn.Tanh())\n actor_layers.append(init_(nn.Linear(in_dim, hidden_size)))\n actor_layers.append(nn.Tanh())\n in_dim = hidden_size # only first layer with have input size num_inputs\n \n # finally create the critic linear output\n critic_layers.append(init_(nn.Linear(in_dim, 1)))\n \n if len(shared_layers) > 0:\n self.shared_layers = nn.Sequential(*shared_layers)\n else:\n self.shared_layers = None\n \n if len(actor_layers) > 0:\n self.actor_layers = nn.Sequential(*actor_layers)\n else:\n self.actor_layers = None\n \n if len(critic_layers) > 0:\n self.critic_layers = nn.Sequential(*critic_layers)\n else:\n self.critic_layers = None\n \n self.train()\n \n def forward(self, inputs, rnn_hxs, masks):\n x = inputs\n \n if self.is_recurrent:\n x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)\n \n if self.shared_layers:\n x = self.shared_layers(x)\n \n if self.actor_layers:\n hidden_actor = self.actor_layers(x)\n else:\n # if all layers are shared between actor and critic,\n # the last output of shared layers will be x\n # which will be used by the dist function in Policy (model.py)\n hidden_actor = x\n \n if self.critic_layers:\n # this should always run since we will output the critic evaluation here\n critic_val = self.critic_layers(x)\n else:\n raise Exception('Something mysterious happened... there was no final critic head')\n \n return critic_val, hidden_actor, rnn_hxs\n\n\n\n\n\n# To totally customize where auxiliary tasks are attached, lets split up the shared layers\n# into individually activatable (self.shared_layers becomes a list of nn.Sequentials) ones\nclass FlexBase(NNBase):\n '''\n NN module that allows for shared actor and critic layers as well\n as varying number of output heads\n \n num_layers: how many hidden MLP layers from input (or from GRU) to output heads\n num_shared_layers: how many of these MLP layers should be shared between actor and critic \n -1 means all layers should be shared\n \n Auxiliary Tasks:\n To add auxiliary heads, we will choose which layers each auxiliary head will be attached to\n For each auxiliary task we need:\n * Depth of layer to attach\n * Whether to attach to actor or critic side\n * Type of output (here we can use Gym spaces and distributions as the Policy does, or linear output)\n Thus each entry to auxiliary_heads should be\n [(depth: -1 is last, 1 is after first layer (1. recurrent layer, 2. hidden, etc.),\n side: 0:actor or 1:critic, -1: if we expect to be on shared layers\n output: gym.spaces.Discrete, gym.spaces.Box, or int representing output dimension for linear output)]\n \n\n !IMPORTANT - to use Gym distributions and such, we will need to adjust code further\n specifically looking into allowing to pass the predicted outputs in \n evaluate_actions in Policy from PPO algorithm, and getting log_probs\n to get loss from. Linear loss is easier to code in so this is what we\n will focus on for now\n In other words, Distributions are not ready to be used as auxiliary\n tasks yet\n '''\n def __init__(self, num_inputs, recurrent=True, hidden_size=64,\n num_layers=2, num_shared_layers=-1, auxiliary_heads=[]):\n super(FlexBase, self).__init__(recurrent, num_inputs, hidden_size)\n \n self.num_layers = num_layers\n self.auxiliary_heads = auxiliary_heads\n self.has_auxiliary = True\n\n if recurrent:\n num_inputs = hidden_size\n self.num_layers += 1\n\n init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.\n constant_(x, 0), np.sqrt(2))\n \n shared_layers = []\n critic_layers = []\n actor_layers = []\n \n self.shared_layers = []\n self.critic_layers = []\n self.actor_layers = []\n \n # generate all the shared layers\n cur_shared_layers = 0\n in_dim = num_inputs\n for i in range(num_layers):\n if num_shared_layers == -1 or cur_shared_layers < num_shared_layers:\n setattr(self, 'shared'+str(i), nn.Sequential(\n init_(nn.Linear(in_dim, hidden_size)),\n nn.Tanh()\n ))\n self.shared_layers.append(getattr(self, 'shared'+str(i)))\n in_dim = hidden_size # only first layer with have input size num_inputs\n cur_shared_layers += 1\n \n # generate the non-shared layers\n if num_shared_layers != -1:\n remaining_layers = num_layers - num_shared_layers\n else:\n remaining_layers = 0\n \n for i in range(remaining_layers): \n setattr(self, 'critic'+str(i), nn.Sequential(\n init_(nn.Linear(in_dim, hidden_size)),\n nn.Tanh()\n ))\n setattr(self, 'actor'+str(i), nn.Sequential(\n init_(nn.Linear(in_dim, hidden_size)),\n nn.Tanh()\n ))\n \n self.critic_layers.append(getattr(self, 'critic'+str(i)))\n self.actor_layers.append(getattr(self, 'actor'+str(i)))\n\n in_dim = hidden_size # only first layer with have input size num_inputs\n \n # finally create the critic linear output\n# critic_layers.append(init_(nn.Linear(in_dim, 1)))\n self.critic_head = init_(nn.Linear(in_dim, 1))\n self.critic_layers.append(self.critic_head)\n \n \n self.auxiliary_layers = []\n self.auxiliary_output_idxs = [] # indexes for generating auxiliary outputs\n self.auxiliary_layer_types = [] # 0 linear, 1 distribution\n self.auxiliary_output_size = 0\n # generate auxiliary outputs\n current_auxiliary_output_idx = 0\n for i, head in enumerate(auxiliary_heads):\n depth = head[0]\n if depth == -1:\n depth = self.num_layers\n side = head[1]\n output_type = head[2]\n self.auxiliary_output_idxs.append(current_auxiliary_output_idx)\n if depth == 0:\n raise Exception('Auxiliary task requesting depth of 0')\n if depth > self.num_layers:\n raise Exception('Auxiliary task requesting depth greater than exists in network (head[0])')\n if side > 1:\n raise Exception('Auxiliary task requesting side that is not 0 (actor) or 1 (critic)')\n total_shared_layers = num_shared_layers\n if recurrent: \n total_shared_layers += 1\n\n if side == -1:\n if depth > total_shared_layers:\n raise Exception('Auxiliary task expects to be on shared layers, but is assigned to layers past shared')\n else:\n if depth <= total_shared_layers:\n raise Exception('Auxiliary task expects to be on individual layers, but is assigned to shared depth')\n \n if type(output_type) == int:\n # linear output\n layer = init_(nn.Linear(hidden_size, output_type))\n self.auxiliary_output_size += output_type\n self.auxiliary_layer_types.append(0)\n current_auxiliary_output_idx += output_type\n elif hasattr(output_type, '__class__'):\n # output based on gym space\n # code taken from Policy to implement a dist function\n if output_type.__class__.__name__ == \"Discrete\":\n num_outputs = output_type.n\n layer = Categorical(hidden_size, num_outputs)\n num_outputs = 1\n elif output_type.__class__.__name__ == \"Box\":\n num_outputs = output_type.shape[0]\n layer = DiagGaussian(hidden_size, num_outputs)\n elif output_type.__class__.__name__ == \"MultiBinary\":\n num_outputs = output_type.shape[0]\n layer = Bernoulli(hidden_size, num_outputs)\n else:\n raise NotImplementedError\n self.auxiliary_output_size += num_outputs\n self.auxiliary_layer_types.append(1)\n current_auxiliary_output_idx += num_outputs\n \n setattr(self, 'auxiliary'+str(i), layer)\n self.auxiliary_layers.append(getattr(self, 'auxiliary'+str(i)))\n \n if self.auxiliary_output_size == 0:\n self.auxiliary_output_size = 1\n self.has_auxiliary = False\n self.train()\n \n def forward(self, inputs, rnn_hxs, masks, deterministic=False):\n current_layer = 0\n shared_layer_idx = 0\n individual_layer_idx = 0\n on_shared_layers = True\n auxiliary_preds = torch.zeros((inputs.shape[0], self.auxiliary_output_size))\n x = inputs\n \n if self.is_recurrent:\n x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)\n current_layer += 1\n \n actor_x = x\n critic_x = x\n \n for i in range(current_layer, self.num_layers+1):\n # iterate through the layers whether shared or individual actor/critic\n # print(i)\n # first check if any auxiliary tasks have the current depth\n for j, head in enumerate(self.auxiliary_heads):\n # depth = head[0]\n # side = head[1]\n depth = head[0]\n if depth == -1:\n depth = self.num_layers\n if depth == current_layer:\n # print('Calling auxiliary head at depth {}'.format(i))\n # figure out if we are on shared layer\n if on_shared_layers:\n auxiliary_input = x\n elif head[1] == 0:\n auxiliary_input = actor_x\n elif head[1] == 1:\n auxiliary_input = critic_x\n \n # convert to output of auxiliary head\n auxiliary_output = self.auxiliary_layers[j](auxiliary_input)\n if self.auxiliary_layer_types[j] == 1:\n if deterministic:\n auxiliary_output = auxiliary_output.mode()\n else:\n auxiliary_output = auxiliary_output.sample()\n size = auxiliary_output.shape[1]\n start_idx = self.auxiliary_output_idxs[j]\n auxiliary_preds[:, start_idx:start_idx+size] = auxiliary_output\n \n # continue proceding through layers\n # check if we still have shared layers to complete\n if len(self.shared_layers) > 0 and shared_layer_idx < len(self.shared_layers):\n x = self.shared_layers[shared_layer_idx](x)\n # print('Calling shared layer {}'.format(shared_layer_idx))\n shared_layer_idx += 1\n # if shared layers are done, this will set actor_x and critic_x\n actor_x = x\n critic_x = x\n elif len(self.actor_layers) > 0 and individual_layer_idx < len(self.actor_layers):\n # no more shared layers - move to actor critic layers\n on_shared_layers = False\n # print('Calling actor critic layer {}'.format(individual_layer_idx))\n actor_x = self.actor_layers[individual_layer_idx](actor_x)\n critic_x = self.critic_layers[individual_layer_idx](critic_x)\n individual_layer_idx += 1\n \n current_layer += 1\n \n \n # Finally get critic value estimation\n critic_val = self.critic_layers[-1](critic_x)\n \n if self.has_auxiliary:\n return critic_val, actor_x, rnn_hxs, auxiliary_preds\n else:\n return critic_val, actor_x, rnn_hxs\n \n \n def forward_with_activations(self, inputs, rnn_hxs, masks, deterministic=False):\n \"\"\"Same as forward function but this will pass back all intermediate values\n\n _type_: _description_\n \"\"\"\n current_layer = 0\n shared_layer_idx = 0\n individual_layer_idx = 0\n on_shared_layers = True\n auxiliary_preds = torch.zeros((inputs.shape[0], self.auxiliary_output_size))\n x = inputs\n\n shared_activations = []\n actor_activations = []\n critic_activations = []\n\n \n # 0. Compute activations for recurrent layer if we are recurrent\n if self.is_recurrent:\n x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)\n shared_activations.append(x)\n current_layer += 1\n \n actor_x = x\n critic_x = x\n \n for i in range(current_layer, self.num_layers+1):\n # iterate through the layers whether shared or individual actor/critic\n # print(i)\n # 1. Auxiliary output computation\n # Check if any auxiliary tasks have the current depth\n # and depending on if they are on the shared, critic, or actor branch\n # evaluate the auxiliary task\n for j, head in enumerate(self.auxiliary_heads):\n # depth = head[0]\n # side = head[1]\n depth = head[0]\n if depth == -1:\n depth = self.num_layers\n if depth == current_layer:\n # print('Calling auxiliary head at depth {}'.format(i))\n # figure out if we are on shared layer\n if on_shared_layers:\n auxiliary_input = x\n elif head[1] == 0:\n auxiliary_input = actor_x\n elif head[1] == 1:\n auxiliary_input = critic_x\n \n # convert to output of auxiliary head\n auxiliary_output = self.auxiliary_layers[j](auxiliary_input)\n if self.auxiliary_layer_types[j] == 1:\n if deterministic:\n auxiliary_output = auxiliary_output.mode()\n else:\n auxiliary_output = auxiliary_output.sample()\n size = auxiliary_output.shape[1]\n start_idx = self.auxiliary_output_idxs[j]\n auxiliary_preds[:, start_idx:start_idx+size] = auxiliary_output\n \n # 2. Forward pass through the next layer\n\n # If we still have remaining shared layers, forward pass through the shared layers\n if len(self.shared_layers) > 0 and shared_layer_idx < len(self.shared_layers):\n x = self.shared_layers[shared_layer_idx](x)\n # print('Calling shared layer {}'.format(shared_layer_idx))\n shared_layer_idx += 1\n shared_activations.append(x)\n # if shared layers are done, this will set actor_x and critic_x\n actor_x = x\n critic_x = x\n \n # Otherwise, forward pass through actor and critic layers\n elif len(self.actor_layers) > 0 and individual_layer_idx < len(self.actor_layers):\n on_shared_layers = False\n # print('Calling actor critic layer {}'.format(individual_layer_idx))\n actor_x = self.actor_layers[individual_layer_idx](actor_x)\n critic_x = self.critic_layers[individual_layer_idx](critic_x)\n \n actor_activations.append(actor_x)\n critic_activations.append(critic_x)\n individual_layer_idx += 1\n \n current_layer += 1\n \n \n # Finally get critic value estimation\n critic_val = self.critic_layers[-1](critic_x)\n\n results = {\n 'critic_val': critic_val,\n 'actor_x': actor_x,\n 'rnn_hxs': rnn_hxs,\n 'shared_activations': shared_activations,\n 'actor_activations': actor_activations,\n 'critic_activations': critic_activations\n }\n \n if self.has_auxiliary:\n results['auxiliary_preds'] = auxiliary_preds\n\n return results"
] | [
[
"numpy.sqrt",
"torch.nn.init.calculate_gain",
"torch.nn.Linear",
"torch.nn.init.constant_",
"torch.nn.Tanh",
"torch.nn.GRU",
"torch.nn.Conv2d",
"torch.nn.init.orthogonal_",
"torch.nn.Sequential",
"torch.zeros",
"torch.nn.ReLU",
"torch.cat"
]
] |
KuNyaa/fastNLP | [
"945b30bb6174751130744231aa26119bf9bb2601"
] | [
"reproduction/Summarization/Baseline/tools/Encoder.py"
] | [
"from __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import *\r\nimport torch.nn.init as init\r\n\r\nimport data\r\nfrom tools.logger import *\r\nfrom transformer.Models import get_sinusoid_encoding_table\r\n\r\nclass Encoder(nn.Module):\r\n def __init__(self, hps, vocab):\r\n super(Encoder, self).__init__()\r\n\r\n self._hps = hps\r\n self._vocab = vocab\r\n self.sent_max_len = hps.sent_max_len\r\n\r\n vocab_size = len(vocab)\r\n logger.info(\"[INFO] Vocabulary size is %d\", vocab_size)\r\n embed_size = hps.word_emb_dim\r\n sent_max_len = hps.sent_max_len\r\n\r\n input_channels = 1\r\n out_channels = hps.output_channel\r\n min_kernel_size = hps.min_kernel_size\r\n max_kernel_size = hps.max_kernel_size\r\n width = embed_size\r\n\r\n # word embedding\r\n self.embed = nn.Embedding(vocab_size, embed_size, padding_idx=vocab.word2id('[PAD]'))\r\n if hps.word_embedding:\r\n word2vec = data.Word_Embedding(hps.embedding_path, vocab)\r\n word_vecs = word2vec.load_my_vecs(embed_size)\r\n # pretrained_weight = word2vec.add_unknown_words_by_zero(word_vecs, embed_size)\r\n pretrained_weight = word2vec.add_unknown_words_by_avg(word_vecs, embed_size)\r\n pretrained_weight = np.array(pretrained_weight)\r\n self.embed.weight.data.copy_(torch.from_numpy(pretrained_weight))\r\n self.embed.weight.requires_grad = hps.embed_train\r\n\r\n # position embedding\r\n self.position_embedding = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(sent_max_len + 1, embed_size, padding_idx=0), freeze=True)\r\n\r\n # cnn\r\n self.convs = nn.ModuleList([nn.Conv2d(input_channels, out_channels, kernel_size = (height, width)) for height in range(min_kernel_size, max_kernel_size+1)])\r\n logger.info(\"[INFO] Initing W for CNN.......\")\r\n for conv in self.convs:\r\n init_weight_value = 6.0\r\n init.xavier_normal_(conv.weight.data, gain=np.sqrt(init_weight_value))\r\n fan_in, fan_out = Encoder.calculate_fan_in_and_fan_out(conv.weight.data)\r\n std = np.sqrt(init_weight_value) * np.sqrt(2.0 / (fan_in + fan_out))\r\n\r\n def calculate_fan_in_and_fan_out(tensor):\r\n dimensions = tensor.ndimension()\r\n if dimensions < 2:\r\n logger.error(\"[Error] Fan in and fan out can not be computed for tensor with less than 2 dimensions\")\r\n raise ValueError(\"[Error] Fan in and fan out can not be computed for tensor with less than 2 dimensions\")\r\n\r\n if dimensions == 2: # Linear\r\n fan_in = tensor.size(1)\r\n fan_out = tensor.size(0)\r\n else:\r\n num_input_fmaps = tensor.size(1)\r\n num_output_fmaps = tensor.size(0)\r\n receptive_field_size = 1\r\n if tensor.dim() > 2:\r\n receptive_field_size = tensor[0][0].numel()\r\n fan_in = num_input_fmaps * receptive_field_size\r\n fan_out = num_output_fmaps * receptive_field_size\r\n\r\n return fan_in, fan_out\r\n\r\n def forward(self, input):\r\n # input: a batch of Example object [batch_size, N, seq_len]\r\n vocab = self._vocab\r\n\r\n batch_size, N, _ = input.size()\r\n input = input.view(-1, input.size(2)) # [batch_size*N, L]\r\n input_sent_len = ((input!=vocab.word2id('[PAD]')).sum(dim=1)).int() # [batch_size*N, 1]\r\n enc_embed_input = self.embed(input) # [batch_size*N, L, D]\r\n\r\n input_pos = torch.Tensor([np.hstack((np.arange(1, sentlen + 1), np.zeros(self.sent_max_len - sentlen))) for sentlen in input_sent_len])\r\n if self._hps.cuda:\r\n input_pos = input_pos.cuda()\r\n enc_pos_embed_input = self.position_embedding(input_pos.long()) # [batch_size*N, D]\r\n enc_conv_input = enc_embed_input + enc_pos_embed_input\r\n enc_conv_input = enc_conv_input.unsqueeze(1) # (batch * N,Ci,L,D)\r\n enc_conv_output = [F.relu(conv(enc_conv_input)).squeeze(3) for conv in self.convs] # kernel_sizes * (batch*N, Co, W)\r\n enc_maxpool_output = [F.max_pool1d(x, x.size(2)).squeeze(2) for x in enc_conv_output] # kernel_sizes * (batch*N, Co)\r\n sent_embedding = torch.cat(enc_maxpool_output, 1) # (batch*N, Co * kernel_sizes)\r\n sent_embedding = sent_embedding.view(batch_size, N, -1)\r\n return sent_embedding\r\n\r\nclass DomainEncoder(Encoder):\r\n def __init__(self, hps, vocab, domaindict):\r\n super(DomainEncoder, self).__init__(hps, vocab)\r\n\r\n # domain embedding\r\n self.domain_embedding = nn.Embedding(domaindict.size(), hps.domain_emb_dim)\r\n self.domain_embedding.weight.requires_grad = True\r\n\r\n def forward(self, input, domain):\r\n \"\"\"\r\n :param input: [batch_size, N, seq_len], N sentence number, seq_len token number\r\n :param domain: [batch_size]\r\n :return: sent_embedding: [batch_size, N, Co * kernel_sizes]\r\n \"\"\"\r\n\r\n batch_size, N, _ = input.size()\r\n\r\n sent_embedding = super().forward(input)\r\n enc_domain_input = self.domain_embedding(domain) # [batch, D]\r\n enc_domain_input = enc_domain_input.unsqueeze(1).expand(batch_size, N, -1) # [batch, N, D]\r\n sent_embedding = torch.cat((sent_embedding, enc_domain_input), dim=2)\r\n return sent_embedding\r\n\r\nclass MultiDomainEncoder(Encoder):\r\n def __init__(self, hps, vocab, domaindict):\r\n super(MultiDomainEncoder, self).__init__(hps, vocab)\r\n\r\n self.domain_size = domaindict.size()\r\n\r\n # domain embedding\r\n self.domain_embedding = nn.Embedding(self.domain_size, hps.domain_emb_dim)\r\n self.domain_embedding.weight.requires_grad = True\r\n\r\n def forward(self, input, domain):\r\n \"\"\"\r\n :param input: [batch_size, N, seq_len], N sentence number, seq_len token number\r\n :param domain: [batch_size, domain_size]\r\n :return: sent_embedding: [batch_size, N, Co * kernel_sizes]\r\n \"\"\"\r\n\r\n batch_size, N, _ = input.size()\r\n\r\n # logger.info(domain[:5, :])\r\n\r\n sent_embedding = super().forward(input)\r\n domain_padding = torch.arange(self.domain_size).unsqueeze(0).expand(batch_size, -1)\r\n domain_padding = domain_padding.cuda().view(-1) if self._hps.cuda else domain_padding.view(-1) # [batch * domain_size]\r\n\r\n enc_domain_input = self.domain_embedding(domain_padding) # [batch * domain_size, D]\r\n enc_domain_input = enc_domain_input.view(batch_size, self.domain_size, -1) * domain.unsqueeze(-1).float() # [batch, domain_size, D]\r\n\r\n # logger.info(enc_domain_input[:5,:]) # [batch, domain_size, D]\r\n\r\n enc_domain_input = enc_domain_input.sum(1) / domain.sum(1).float().unsqueeze(-1) # [batch, D]\r\n enc_domain_input = enc_domain_input.unsqueeze(1).expand(batch_size, N, -1) # [batch, N, D]\r\n sent_embedding = torch.cat((sent_embedding, enc_domain_input), dim=2)\r\n return sent_embedding\r\n\r\n\r\nclass BertEncoder(nn.Module):\r\n def __init__(self, hps):\r\n super(BertEncoder, self).__init__()\r\n\r\n from pytorch_pretrained_bert.modeling import BertModel\r\n\r\n self._hps = hps\r\n self.sent_max_len = hps.sent_max_len\r\n self._cuda = hps.cuda\r\n\r\n embed_size = hps.word_emb_dim\r\n sent_max_len = hps.sent_max_len\r\n\r\n input_channels = 1\r\n out_channels = hps.output_channel\r\n min_kernel_size = hps.min_kernel_size\r\n max_kernel_size = hps.max_kernel_size\r\n width = embed_size\r\n\r\n # word embedding\r\n self._bert = BertModel.from_pretrained(\"/remote-home/dqwang/BERT/pre-train/uncased_L-24_H-1024_A-16\")\r\n self._bert.eval()\r\n for p in self._bert.parameters():\r\n p.requires_grad = False\r\n\r\n self.word_embedding_proj = nn.Linear(4096, embed_size)\r\n\r\n # position embedding\r\n self.position_embedding = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(sent_max_len + 1, embed_size, padding_idx=0), freeze=True)\r\n\r\n # cnn\r\n self.convs = nn.ModuleList([nn.Conv2d(input_channels, out_channels, kernel_size = (height, width)) for height in range(min_kernel_size, max_kernel_size+1)])\r\n logger.info(\"[INFO] Initing W for CNN.......\")\r\n for conv in self.convs:\r\n init_weight_value = 6.0\r\n init.xavier_normal_(conv.weight.data, gain=np.sqrt(init_weight_value))\r\n fan_in, fan_out = Encoder.calculate_fan_in_and_fan_out(conv.weight.data)\r\n std = np.sqrt(init_weight_value) * np.sqrt(2.0 / (fan_in + fan_out))\r\n\r\n def calculate_fan_in_and_fan_out(tensor):\r\n dimensions = tensor.ndimension()\r\n if dimensions < 2:\r\n logger.error(\"[Error] Fan in and fan out can not be computed for tensor with less than 2 dimensions\")\r\n raise ValueError(\"[Error] Fan in and fan out can not be computed for tensor with less than 2 dimensions\")\r\n\r\n if dimensions == 2: # Linear\r\n fan_in = tensor.size(1)\r\n fan_out = tensor.size(0)\r\n else:\r\n num_input_fmaps = tensor.size(1)\r\n num_output_fmaps = tensor.size(0)\r\n receptive_field_size = 1\r\n if tensor.dim() > 2:\r\n receptive_field_size = tensor[0][0].numel()\r\n fan_in = num_input_fmaps * receptive_field_size\r\n fan_out = num_output_fmaps * receptive_field_size\r\n\r\n return fan_in, fan_out\r\n\r\n def pad_encoder_input(self, input_list):\r\n \"\"\"\r\n :param input_list: N [seq_len, hidden_state]\r\n :return: enc_sent_input_pad: list, N [max_len, hidden_state]\r\n \"\"\"\r\n max_len = self.sent_max_len\r\n enc_sent_input_pad = []\r\n _, hidden_size = input_list[0].size()\r\n for i in range(len(input_list)):\r\n article_words = input_list[i] # [seq_len, hidden_size]\r\n seq_len = article_words.size(0)\r\n if seq_len > max_len:\r\n pad_words = article_words[:max_len, :]\r\n else:\r\n pad_tensor = torch.zeros(max_len - seq_len, hidden_size).cuda() if self._cuda else torch.zeros(max_len - seq_len, hidden_size)\r\n pad_words = torch.cat([article_words, pad_tensor], dim=0)\r\n enc_sent_input_pad.append(pad_words)\r\n return enc_sent_input_pad\r\n\r\n def forward(self, inputs, input_masks, enc_sent_len):\r\n \"\"\"\r\n \r\n :param inputs: a batch of Example object [batch_size, doc_len=512]\r\n :param input_masks: 0 or 1, [batch, doc_len=512]\r\n :param enc_sent_len: sentence original length [batch, N]\r\n :return: \r\n \"\"\"\r\n\r\n\r\n # Use Bert to get word embedding\r\n batch_size, N = enc_sent_len.size()\r\n input_pad_list = []\r\n for i in range(batch_size):\r\n tokens_id = inputs[i]\r\n input_mask = input_masks[i]\r\n sent_len = enc_sent_len[i]\r\n input_ids = tokens_id.unsqueeze(0)\r\n input_mask = input_mask.unsqueeze(0)\r\n\r\n out, _ = self._bert(input_ids, token_type_ids=None, attention_mask=input_mask)\r\n out = torch.cat(out[-4:], dim=-1).squeeze(0) # [doc_len=512, hidden_state=4096]\r\n\r\n _, hidden_size = out.size()\r\n\r\n # restore the sentence\r\n last_end = 1\r\n enc_sent_input = []\r\n for length in sent_len:\r\n if length != 0 and last_end < 511:\r\n enc_sent_input.append(out[last_end: min(511, last_end + length), :])\r\n last_end += length\r\n else:\r\n pad_tensor = torch.zeros(self.sent_max_len, hidden_size).cuda() if self._hps.cuda else torch.zeros(self.sent_max_len, hidden_size)\r\n enc_sent_input.append(pad_tensor)\r\n\r\n\r\n # pad the sentence\r\n enc_sent_input_pad = self.pad_encoder_input(enc_sent_input) # [N, seq_len, hidden_state=4096]\r\n input_pad_list.append(torch.stack(enc_sent_input_pad))\r\n\r\n input_pad = torch.stack(input_pad_list)\r\n\r\n input_pad = input_pad.view(batch_size*N, self.sent_max_len, -1)\r\n enc_sent_len = enc_sent_len.view(-1) # [batch_size*N]\r\n enc_embed_input = self.word_embedding_proj(input_pad) # [batch_size * N, L, D]\r\n\r\n sent_pos_list = []\r\n for sentlen in enc_sent_len:\r\n sent_pos = list(range(1, min(self.sent_max_len, sentlen) + 1))\r\n for k in range(self.sent_max_len - sentlen):\r\n sent_pos.append(0)\r\n sent_pos_list.append(sent_pos)\r\n input_pos = torch.Tensor(sent_pos_list).long()\r\n\r\n if self._hps.cuda:\r\n input_pos = input_pos.cuda()\r\n enc_pos_embed_input = self.position_embedding(input_pos.long()) # [batch_size*N, D]\r\n enc_conv_input = enc_embed_input + enc_pos_embed_input\r\n enc_conv_input = enc_conv_input.unsqueeze(1) # (batch * N,Ci,L,D)\r\n enc_conv_output = [F.relu(conv(enc_conv_input)).squeeze(3) for conv in self.convs] # kernel_sizes * (batch*N, Co, W)\r\n enc_maxpool_output = [F.max_pool1d(x, x.size(2)).squeeze(2) for x in enc_conv_output] # kernel_sizes * (batch*N, Co)\r\n sent_embedding = torch.cat(enc_maxpool_output, 1) # (batch*N, Co * kernel_sizes)\r\n sent_embedding = sent_embedding.view(batch_size, N, -1)\r\n return sent_embedding\r\n\r\n\r\nclass BertTagEncoder(BertEncoder):\r\n def __init__(self, hps, domaindict):\r\n super(BertTagEncoder, self).__init__(hps)\r\n\r\n # domain embedding\r\n self.domain_embedding = nn.Embedding(domaindict.size(), hps.domain_emb_dim)\r\n self.domain_embedding.weight.requires_grad = True\r\n\r\n def forward(self, inputs, input_masks, enc_sent_len, domain):\r\n sent_embedding = super().forward(inputs, input_masks, enc_sent_len)\r\n\r\n batch_size, N = enc_sent_len.size()\r\n\r\n enc_domain_input = self.domain_embedding(domain) # [batch, D]\r\n enc_domain_input = enc_domain_input.unsqueeze(1).expand(batch_size, N, -1) # [batch, N, D]\r\n sent_embedding = torch.cat((sent_embedding, enc_domain_input), dim=2)\r\n\r\n return sent_embedding\r\n\r\nclass ELMoEndoer(nn.Module):\r\n def __init__(self, hps):\r\n super(ELMoEndoer, self).__init__()\r\n\r\n self._hps = hps\r\n self.sent_max_len = hps.sent_max_len\r\n\r\n from allennlp.modules.elmo import Elmo\r\n\r\n elmo_dim = 1024\r\n options_file = \"/remote-home/dqwang/ELMo/elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json\"\r\n weight_file = \"/remote-home/dqwang/ELMo/elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5\"\r\n\r\n # elmo_dim = 512\r\n # options_file = \"/remote-home/dqwang/ELMo/elmo_2x2048_256_2048cnn_1xhighway_options.json\"\r\n # weight_file = \"/remote-home/dqwang/ELMo/elmo_2x2048_256_2048cnn_1xhighway_weights.hdf5\"\r\n\r\n embed_size = hps.word_emb_dim\r\n sent_max_len = hps.sent_max_len\r\n\r\n input_channels = 1\r\n out_channels = hps.output_channel\r\n min_kernel_size = hps.min_kernel_size\r\n max_kernel_size = hps.max_kernel_size\r\n width = embed_size\r\n\r\n # elmo embedding\r\n self.elmo = Elmo(options_file, weight_file, 1, dropout=0)\r\n self.embed_proj = nn.Linear(elmo_dim, embed_size)\r\n\r\n # position embedding\r\n self.position_embedding = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(sent_max_len + 1, embed_size, padding_idx=0), freeze=True)\r\n\r\n # cnn\r\n self.convs = nn.ModuleList([nn.Conv2d(input_channels, out_channels, kernel_size = (height, width)) for height in range(min_kernel_size, max_kernel_size+1)])\r\n logger.info(\"[INFO] Initing W for CNN.......\")\r\n for conv in self.convs:\r\n init_weight_value = 6.0\r\n init.xavier_normal_(conv.weight.data, gain=np.sqrt(init_weight_value))\r\n fan_in, fan_out = Encoder.calculate_fan_in_and_fan_out(conv.weight.data)\r\n std = np.sqrt(init_weight_value) * np.sqrt(2.0 / (fan_in + fan_out))\r\n\r\n def calculate_fan_in_and_fan_out(tensor):\r\n dimensions = tensor.ndimension()\r\n if dimensions < 2:\r\n logger.error(\"[Error] Fan in and fan out can not be computed for tensor with less than 2 dimensions\")\r\n raise ValueError(\"[Error] Fan in and fan out can not be computed for tensor with less than 2 dimensions\")\r\n\r\n if dimensions == 2: # Linear\r\n fan_in = tensor.size(1)\r\n fan_out = tensor.size(0)\r\n else:\r\n num_input_fmaps = tensor.size(1)\r\n num_output_fmaps = tensor.size(0)\r\n receptive_field_size = 1\r\n if tensor.dim() > 2:\r\n receptive_field_size = tensor[0][0].numel()\r\n fan_in = num_input_fmaps * receptive_field_size\r\n fan_out = num_output_fmaps * receptive_field_size\r\n\r\n return fan_in, fan_out\r\n\r\n def forward(self, input):\r\n # input: a batch of Example object [batch_size, N, seq_len, character_len]\r\n\r\n batch_size, N, seq_len, _ = input.size()\r\n input = input.view(batch_size * N, seq_len, -1) # [batch_size*N, seq_len, character_len]\r\n input_sent_len = ((input.sum(-1)!=0).sum(dim=1)).int() # [batch_size*N, 1]\r\n logger.debug(input_sent_len.view(batch_size, -1))\r\n enc_embed_input = self.elmo(input)['elmo_representations'][0] # [batch_size*N, L, D]\r\n enc_embed_input = self.embed_proj(enc_embed_input)\r\n\r\n # input_pos = torch.Tensor([np.hstack((np.arange(1, sentlen + 1), np.zeros(self.sent_max_len - sentlen))) for sentlen in input_sent_len])\r\n\r\n sent_pos_list = []\r\n for sentlen in input_sent_len:\r\n sent_pos = list(range(1, min(self.sent_max_len, sentlen) + 1))\r\n for k in range(self.sent_max_len - sentlen):\r\n sent_pos.append(0)\r\n sent_pos_list.append(sent_pos)\r\n input_pos = torch.Tensor(sent_pos_list).long()\r\n\r\n if self._hps.cuda:\r\n input_pos = input_pos.cuda()\r\n enc_pos_embed_input = self.position_embedding(input_pos.long()) # [batch_size*N, D]\r\n enc_conv_input = enc_embed_input + enc_pos_embed_input\r\n enc_conv_input = enc_conv_input.unsqueeze(1) # (batch * N,Ci,L,D)\r\n enc_conv_output = [F.relu(conv(enc_conv_input)).squeeze(3) for conv in self.convs] # kernel_sizes * (batch*N, Co, W)\r\n enc_maxpool_output = [F.max_pool1d(x, x.size(2)).squeeze(2) for x in enc_conv_output] # kernel_sizes * (batch*N, Co)\r\n sent_embedding = torch.cat(enc_maxpool_output, 1) # (batch*N, Co * kernel_sizes)\r\n sent_embedding = sent_embedding.view(batch_size, N, -1)\r\n return sent_embedding\r\n\r\nclass ELMoEndoer2(nn.Module):\r\n def __init__(self, hps):\r\n super(ELMoEndoer2, self).__init__()\r\n\r\n self._hps = hps\r\n self._cuda = hps.cuda\r\n self.sent_max_len = hps.sent_max_len\r\n\r\n from allennlp.modules.elmo import Elmo\r\n\r\n elmo_dim = 1024\r\n options_file = \"/remote-home/dqwang/ELMo/elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json\"\r\n weight_file = \"/remote-home/dqwang/ELMo/elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5\"\r\n\r\n # elmo_dim = 512\r\n # options_file = \"/remote-home/dqwang/ELMo/elmo_2x2048_256_2048cnn_1xhighway_options.json\"\r\n # weight_file = \"/remote-home/dqwang/ELMo/elmo_2x2048_256_2048cnn_1xhighway_weights.hdf5\"\r\n\r\n embed_size = hps.word_emb_dim\r\n sent_max_len = hps.sent_max_len\r\n\r\n input_channels = 1\r\n out_channels = hps.output_channel\r\n min_kernel_size = hps.min_kernel_size\r\n max_kernel_size = hps.max_kernel_size\r\n width = embed_size\r\n\r\n # elmo embedding\r\n self.elmo = Elmo(options_file, weight_file, 1, dropout=0)\r\n self.embed_proj = nn.Linear(elmo_dim, embed_size)\r\n\r\n # position embedding\r\n self.position_embedding = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(sent_max_len + 1, embed_size, padding_idx=0), freeze=True)\r\n\r\n # cnn\r\n self.convs = nn.ModuleList([nn.Conv2d(input_channels, out_channels, kernel_size = (height, width)) for height in range(min_kernel_size, max_kernel_size+1)])\r\n logger.info(\"[INFO] Initing W for CNN.......\")\r\n for conv in self.convs:\r\n init_weight_value = 6.0\r\n init.xavier_normal_(conv.weight.data, gain=np.sqrt(init_weight_value))\r\n fan_in, fan_out = Encoder.calculate_fan_in_and_fan_out(conv.weight.data)\r\n std = np.sqrt(init_weight_value) * np.sqrt(2.0 / (fan_in + fan_out))\r\n\r\n def calculate_fan_in_and_fan_out(tensor):\r\n dimensions = tensor.ndimension()\r\n if dimensions < 2:\r\n logger.error(\"[Error] Fan in and fan out can not be computed for tensor with less than 2 dimensions\")\r\n raise ValueError(\"[Error] Fan in and fan out can not be computed for tensor with less than 2 dimensions\")\r\n\r\n if dimensions == 2: # Linear\r\n fan_in = tensor.size(1)\r\n fan_out = tensor.size(0)\r\n else:\r\n num_input_fmaps = tensor.size(1)\r\n num_output_fmaps = tensor.size(0)\r\n receptive_field_size = 1\r\n if tensor.dim() > 2:\r\n receptive_field_size = tensor[0][0].numel()\r\n fan_in = num_input_fmaps * receptive_field_size\r\n fan_out = num_output_fmaps * receptive_field_size\r\n\r\n return fan_in, fan_out\r\n\r\n def pad_encoder_input(self, input_list):\r\n \"\"\"\r\n :param input_list: N [seq_len, hidden_state]\r\n :return: enc_sent_input_pad: list, N [max_len, hidden_state]\r\n \"\"\"\r\n max_len = self.sent_max_len\r\n enc_sent_input_pad = []\r\n _, hidden_size = input_list[0].size()\r\n for i in range(len(input_list)):\r\n article_words = input_list[i] # [seq_len, hidden_size]\r\n seq_len = article_words.size(0)\r\n if seq_len > max_len:\r\n pad_words = article_words[:max_len, :]\r\n else:\r\n pad_tensor = torch.zeros(max_len - seq_len, hidden_size).cuda() if self._cuda else torch.zeros(max_len - seq_len, hidden_size)\r\n pad_words = torch.cat([article_words, pad_tensor], dim=0)\r\n enc_sent_input_pad.append(pad_words)\r\n return enc_sent_input_pad\r\n\r\n def forward(self, inputs, input_masks, enc_sent_len):\r\n \"\"\"\r\n\r\n :param inputs: a batch of Example object [batch_size, doc_len=512, character_len=50]\r\n :param input_masks: 0 or 1, [batch, doc_len=512]\r\n :param enc_sent_len: sentence original length [batch, N]\r\n :return: \r\n sent_embedding: [batch, N, D]\r\n \"\"\"\r\n\r\n # Use Bert to get word embedding\r\n batch_size, N = enc_sent_len.size()\r\n input_pad_list = []\r\n\r\n elmo_output = self.elmo(inputs)['elmo_representations'][0] # [batch_size, 512, D]\r\n elmo_output = elmo_output * input_masks.unsqueeze(-1).float()\r\n # print(\"END elmo\")\r\n\r\n for i in range(batch_size):\r\n sent_len = enc_sent_len[i] # [1, N]\r\n out = elmo_output[i]\r\n\r\n _, hidden_size = out.size()\r\n\r\n # restore the sentence\r\n last_end = 0\r\n enc_sent_input = []\r\n for length in sent_len:\r\n if length != 0 and last_end < 512:\r\n enc_sent_input.append(out[last_end : min(512, last_end + length), :])\r\n last_end += length\r\n else:\r\n pad_tensor = torch.zeros(self.sent_max_len, hidden_size).cuda() if self._hps.cuda else torch.zeros(self.sent_max_len, hidden_size)\r\n enc_sent_input.append(pad_tensor)\r\n\r\n # pad the sentence\r\n enc_sent_input_pad = self.pad_encoder_input(enc_sent_input) # [N, seq_len, hidden_state=4096]\r\n input_pad_list.append(torch.stack(enc_sent_input_pad)) # batch * [N, max_len, hidden_state]\r\n\r\n input_pad = torch.stack(input_pad_list)\r\n\r\n input_pad = input_pad.view(batch_size * N, self.sent_max_len, -1)\r\n enc_sent_len = enc_sent_len.view(-1) # [batch_size*N]\r\n enc_embed_input = self.embed_proj(input_pad) # [batch_size * N, L, D]\r\n\r\n # input_pos = torch.Tensor([np.hstack((np.arange(1, sentlen + 1), np.zeros(self.sent_max_len - sentlen))) for sentlen in input_sent_len])\r\n\r\n sent_pos_list = []\r\n for sentlen in enc_sent_len:\r\n sent_pos = list(range(1, min(self.sent_max_len, sentlen) + 1))\r\n for k in range(self.sent_max_len - sentlen):\r\n sent_pos.append(0)\r\n sent_pos_list.append(sent_pos)\r\n input_pos = torch.Tensor(sent_pos_list).long()\r\n\r\n if self._hps.cuda:\r\n input_pos = input_pos.cuda()\r\n enc_pos_embed_input = self.position_embedding(input_pos.long()) # [batch_size*N, D]\r\n enc_conv_input = enc_embed_input + enc_pos_embed_input\r\n enc_conv_input = enc_conv_input.unsqueeze(1) # (batch * N,Ci,L,D)\r\n enc_conv_output = [F.relu(conv(enc_conv_input)).squeeze(3) for conv in self.convs] # kernel_sizes * (batch*N, Co, W)\r\n enc_maxpool_output = [F.max_pool1d(x, x.size(2)).squeeze(2) for x in enc_conv_output] # kernel_sizes * (batch*N, Co)\r\n sent_embedding = torch.cat(enc_maxpool_output, 1) # (batch*N, Co * kernel_sizes)\r\n sent_embedding = sent_embedding.view(batch_size, N, -1)\r\n return sent_embedding"
] | [
[
"numpy.sqrt",
"torch.stack",
"torch.nn.Linear",
"numpy.zeros",
"torch.zeros",
"torch.nn.Embedding",
"numpy.arange",
"torch.from_numpy",
"torch.nn.Conv2d",
"torch.arange",
"numpy.array",
"torch.cat",
"torch.Tensor"
]
] |
WenjayDu/PocketFlow | [
"19ed4858b2fc914541032f74239ca08c0074c237"
] | [
"learners/weight_sparsification/learner.py"
] | [
"# Tencent is pleased to support the open source community by making PocketFlow available.\n#\n# Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Weight sparsification learner.\"\"\"\n\nimport os\nfrom timeit import default_timer as timer\nimport numpy as np\nimport tensorflow as tf\n\nfrom learners.abstract_learner import AbstractLearner\nfrom learners.distillation_helper import DistillationHelper\nfrom learners.weight_sparsification.pr_optimizer import PROptimizer\nfrom learners.weight_sparsification.utils import get_maskable_vars\nfrom utils.multi_gpu_wrapper import MultiGpuWrapper as mgw\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('ws_save_path', './models_ws/model.ckpt', 'WS: model\\'s save path')\ntf.app.flags.DEFINE_float('ws_prune_ratio', 0.75, 'WS: target pruning ratio')\ntf.app.flags.DEFINE_string('ws_prune_ratio_prtl', 'optimal',\n 'WS: pruning ratio protocol (\\'uniform\\' | \\'heurist\\' | \\'optimal\\')')\ntf.app.flags.DEFINE_integer('ws_nb_rlouts', 200, 'WS: # of roll-outs for the RL agent')\ntf.app.flags.DEFINE_integer('ws_nb_rlouts_min', 50,\n 'WS: minimal # of roll-outs for the RL agent to start training')\ntf.app.flags.DEFINE_string('ws_reward_type', 'single-obj',\n 'WS: reward type (\\'single-obj\\' OR \\'multi-obj\\')')\ntf.app.flags.DEFINE_float('ws_lrn_rate_rg', 3e-2, 'WS: learning rate for layerwise regression')\ntf.app.flags.DEFINE_integer('ws_nb_iters_rg', 20, 'WS: # of iterations for layerwise regression')\ntf.app.flags.DEFINE_float('ws_lrn_rate_ft', 3e-4, 'WS: learning rate for global fine-tuning')\ntf.app.flags.DEFINE_integer('ws_nb_iters_ft', 400, 'WS: # of iterations for global fine-tuning')\ntf.app.flags.DEFINE_integer('ws_nb_iters_feval', 25, 'WS: # of iterations for fast evaluation')\ntf.app.flags.DEFINE_float('ws_prune_ratio_exp', 3.0, 'WS: pruning ratio\\'s exponent term')\ntf.app.flags.DEFINE_float('ws_iter_ratio_beg', 0.1, 'WS: iteration ratio (at starting time)')\ntf.app.flags.DEFINE_float('ws_iter_ratio_end', 0.5, 'WS: iteration ratio (at ending time)')\ntf.app.flags.DEFINE_float('ws_mask_update_step', 500, 'WS: step size for updating the pruning mask')\n\ndef calc_prune_ratio(vars_list):\n \"\"\"Calculate the overall pruning ratio for the given list of variables.\n Args:\n * vars_list: list of variables\n Returns:\n * prune_ratio: overall pruning ratio of the given list of variables\n \"\"\"\n\n nb_params_nnz = tf.add_n([tf.count_nonzero(var) for var in vars_list])\n nb_params_all = tf.add_n([tf.size(var) for var in vars_list])\n prune_ratio = 1.0 - tf.cast(nb_params_nnz, tf.float32) / tf.cast(nb_params_all, tf.float32)\n\n return prune_ratio\n\nclass WeightSparseLearner(AbstractLearner): # pylint: disable=too-many-instance-attributes\n \"\"\"Weight sparsification learner.\"\"\"\n\n def __init__(self, sm_writer, model_helper):\n \"\"\"Constructor function.\n Args:\n * sm_writer: TensorFlow's summary writer\n * model_helper: model helper with definitions of model & dataset\n \"\"\"\n\n # class-independent initialization\n super(WeightSparseLearner, self).__init__(sm_writer, model_helper)\n\n # define the scope for masks\n self.mask_scope = 'mask'\n\n # compute the optimal pruning ratios (only when the execution mode is 'train')\n if FLAGS.exec_mode == 'train':\n pr_optimizer = PROptimizer(model_helper, self.mpi_comm)\n if FLAGS.ws_prune_ratio_prtl == 'optimal':\n if self.is_primary_worker('local'):\n self.download_model() # pre-trained model is required\n self.auto_barrier()\n tf.logging.info('model files: ' + ', '.join(os.listdir('./models')))\n self.var_names_n_prune_ratios = pr_optimizer.run()\n\n # class-dependent initialization\n if FLAGS.enbl_dst:\n self.helper_dst = DistillationHelper(sm_writer, model_helper, self.mpi_comm)\n if FLAGS.exec_mode == 'train':\n self.__build_train() # only when the execution mode is 'train'\n self.__build_eval() # needed whatever the execution mode is\n\n def train(self):\n \"\"\"Train a model and periodically produce checkpoint files.\"\"\"\n\n # initialization\n self.sess_train.run(self.init_op)\n if FLAGS.enbl_multi_gpu:\n self.sess_train.run(self.bcast_op)\n\n # train the model through iterations and periodically save & evaluate the model\n last_mask_applied = False\n time_prev = timer()\n for idx_iter in range(self.nb_iters_train):\n # train the model\n if (idx_iter + 1) % FLAGS.summ_step != 0:\n self.sess_train.run(self.train_op)\n else:\n __, summary, log_rslt = self.sess_train.run([self.train_op, self.summary_op, self.log_op])\n if self.is_primary_worker('global'):\n time_step = timer() - time_prev\n self.__monitor_progress(summary, log_rslt, idx_iter, time_step)\n time_prev = timer()\n\n # apply pruning\n if (idx_iter + 1) % FLAGS.ws_mask_update_step == 0:\n iter_ratio = float(idx_iter + 1) / self.nb_iters_train\n if iter_ratio >= FLAGS.ws_iter_ratio_beg:\n if iter_ratio <= FLAGS.ws_iter_ratio_end:\n self.sess_train.run([self.prune_op, self.init_opt_op])\n elif not last_mask_applied:\n last_mask_applied = True\n self.sess_train.run([self.prune_op, self.init_opt_op])\n\n # save the model at certain steps\n if self.is_primary_worker('global') and (idx_iter + 1) % FLAGS.save_step == 0:\n self.__save_model()\n self.evaluate()\n\n # save the final model\n if self.is_primary_worker('global'):\n self.__save_model()\n self.evaluate()\n\n def evaluate(self):\n \"\"\"Restore a model from the latest checkpoint files and then evaluate it.\"\"\"\n\n self.__restore_model(is_train=False)\n nb_iters = int(np.ceil(float(FLAGS.nb_smpls_eval) / FLAGS.batch_size_eval))\n eval_rslts = np.zeros((nb_iters, len(self.eval_op)))\n for idx_iter in range(nb_iters):\n eval_rslts[idx_iter] = self.sess_eval.run(self.eval_op)\n for idx, name in enumerate(self.eval_op_names):\n tf.logging.info('%s = %.4e' % (name, np.mean(eval_rslts[:, idx])))\n\n def __build_train(self): # pylint: disable=too-many-locals\n \"\"\"Build the training graph.\"\"\"\n\n with tf.Graph().as_default():\n # create a TF session for the current graph\n config = tf.ConfigProto()\n if FLAGS.enbl_multi_gpu:\n config.gpu_options.visible_device_list = str(mgw.local_rank()) # pylint: disable=no-member\n else:\n config.gpu_options.visible_device_list = '0' # pylint: disable=no-member\n sess = tf.Session(config=config)\n\n # data input pipeline\n with tf.variable_scope(self.data_scope):\n iterator = self.build_dataset_train()\n images, labels = iterator.get_next()\n\n # model definition - distilled model\n if FLAGS.enbl_dst:\n logits_dst = self.helper_dst.calc_logits(sess, images)\n\n # model definition - weight-sparsified model\n with tf.variable_scope(self.model_scope):\n # loss & extra evaluation metrics\n logits = self.forward_train(images)\n self.maskable_var_names = [var.name for var in self.maskable_vars]\n loss, metrics = self.calc_loss(labels, logits, self.trainable_vars)\n if FLAGS.enbl_dst:\n loss += self.helper_dst.calc_loss(logits, logits_dst)\n tf.summary.scalar('loss', loss)\n for key, value in metrics.items():\n tf.summary.scalar(key, value)\n\n # learning rate schedule\n self.global_step = tf.train.get_or_create_global_step()\n lrn_rate, self.nb_iters_train = self.setup_lrn_rate(self.global_step)\n\n # overall pruning ratios of trainable & maskable variables\n pr_trainable = calc_prune_ratio(self.trainable_vars)\n pr_maskable = calc_prune_ratio(self.maskable_vars)\n tf.summary.scalar('pr_trainable', pr_trainable)\n tf.summary.scalar('pr_maskable', pr_maskable)\n\n # build masks and corresponding operations for weight sparsification\n self.masks, self.prune_op = self.__build_masks()\n\n # optimizer & gradients\n optimizer_base = tf.train.MomentumOptimizer(lrn_rate, FLAGS.momentum)\n if not FLAGS.enbl_multi_gpu:\n optimizer = optimizer_base\n else:\n optimizer = mgw.DistributedOptimizer(optimizer_base)\n grads_origin = optimizer.compute_gradients(loss, self.trainable_vars)\n grads_pruned = self.__calc_grads_pruned(grads_origin)\n\n # TF operations & model saver\n self.sess_train = sess\n with tf.control_dependencies(self.update_ops):\n self.train_op = optimizer.apply_gradients(grads_pruned, global_step=self.global_step)\n self.summary_op = tf.summary.merge_all()\n self.log_op = [lrn_rate, loss, pr_trainable, pr_maskable] + list(metrics.values())\n self.log_op_names = ['lr', 'loss', 'pr_trn', 'pr_msk'] + list(metrics.keys())\n self.init_op = tf.variables_initializer(self.vars)\n self.init_opt_op = tf.variables_initializer(optimizer_base.variables())\n if FLAGS.enbl_multi_gpu:\n self.bcast_op = mgw.broadcast_global_variables(0)\n self.saver_train = tf.train.Saver(self.vars)\n\n def __build_eval(self):\n \"\"\"Build the evaluation graph.\"\"\"\n\n with tf.Graph().as_default():\n # create a TF session for the current graph\n config = tf.ConfigProto()\n if FLAGS.enbl_multi_gpu:\n config.gpu_options.visible_device_list = str(mgw.local_rank()) # pylint: disable=no-member\n else:\n config.gpu_options.visible_device_list = '0' # pylint: disable=no-member\n self.sess_eval = tf.Session(config=config)\n\n # data input pipeline\n with tf.variable_scope(self.data_scope):\n iterator = self.build_dataset_eval()\n images, labels = iterator.get_next()\n\n # model definition - distilled model\n if FLAGS.enbl_dst:\n logits_dst = self.helper_dst.calc_logits(self.sess_eval, images)\n\n # model definition - weight-sparsified model\n with tf.variable_scope(self.model_scope):\n # loss & extra evaluation metrics\n logits = self.forward_eval(images)\n loss, metrics = self.calc_loss(labels, logits, self.trainable_vars)\n if FLAGS.enbl_dst:\n loss += self.helper_dst.calc_loss(logits, logits_dst)\n\n # overall pruning ratios of trainable & maskable variables\n pr_trainable = calc_prune_ratio(self.trainable_vars)\n pr_maskable = calc_prune_ratio(self.maskable_vars)\n\n # TF operations for evaluation\n self.eval_op = [loss, pr_trainable, pr_maskable] + list(metrics.values())\n self.eval_op_names = ['loss', 'pr_trn', 'pr_msk'] + list(metrics.keys())\n self.saver_eval = tf.train.Saver(self.vars)\n\n def __build_masks(self):\n \"\"\"build masks and corresponding operations for weight sparsification.\n Returns:\n * masks: list of masks for weight sparsification\n * prune_op: pruning operation\n \"\"\"\n\n masks, prune_ops = [], []\n with tf.variable_scope(self.mask_scope):\n for var, var_name_n_prune_ratio in zip(self.maskable_vars, self.var_names_n_prune_ratios):\n # obtain the dynamic pruning ratio\n assert var.name == var_name_n_prune_ratio[0], \\\n 'unmatched variable names: %s vs. %s' % (var.name, var_name_n_prune_ratio[0])\n prune_ratio = self.__calc_prune_ratio_dyn(var_name_n_prune_ratio[1])\n\n # create a mask and non-masked backup for each variable\n name = var.name.replace(':0', '_mask')\n mask = tf.get_variable(name, initializer=tf.ones(var.shape), trainable=False)\n name = var.name.replace(':0', '_var_bkup')\n var_bkup = tf.get_variable(name, initializer=var.initialized_value(), trainable=False)\n\n # create update operations\n var_bkup_update_op = var_bkup.assign(tf.where(mask > 0.5, var, var_bkup))\n with tf.control_dependencies([var_bkup_update_op]):\n mask_thres = tf.contrib.distributions.percentile(tf.abs(var_bkup), prune_ratio * 100)\n mask_update_op = mask.assign(tf.cast(tf.abs(var_bkup) > mask_thres, tf.float32))\n with tf.control_dependencies([mask_update_op]):\n prune_op = var.assign(var_bkup * mask)\n\n # record pruning masks & operations\n masks += [mask]\n prune_ops += [prune_op]\n\n return masks, tf.group(prune_ops)\n\n def __calc_prune_ratio_dyn(self, prune_ratio_fnl):\n \"\"\"Calculate the dynamic pruning ratio.\n Args:\n * prune_ratio_fnl: final pruning ratio\n Returns:\n * prune_ratio_dyn: dynamic pruning ratio\n \"\"\"\n\n idx_iter_beg = int(self.nb_iters_train * FLAGS.ws_iter_ratio_beg)\n idx_iter_end = int(self.nb_iters_train * FLAGS.ws_iter_ratio_end)\n base = tf.cast(self.global_step - idx_iter_beg, tf.float32) / (idx_iter_end - idx_iter_beg)\n base = tf.minimum(1.0, tf.maximum(0.0, base))\n prune_ratio_dyn = prune_ratio_fnl * (1.0 - tf.pow(1.0 - base, FLAGS.ws_prune_ratio_exp))\n\n return prune_ratio_dyn\n\n def __calc_grads_pruned(self, grads_origin):\n \"\"\"Calculate the mask-pruned gradients.\n Args:\n * grads_origin: list of original gradients\n Returns:\n * grads_pruned: list of mask-pruned gradients\n \"\"\"\n\n grads_pruned = []\n for grad in grads_origin:\n if grad[1].name not in self.maskable_var_names:\n grads_pruned += [grad]\n else:\n idx_mask = self.maskable_var_names.index(grad[1].name)\n grads_pruned += [(grad[0] * self.masks[idx_mask], grad[1])]\n\n return grads_pruned\n\n def __save_model(self):\n \"\"\"Save the current model.\"\"\"\n\n save_path = self.saver_train.save(self.sess_train, FLAGS.ws_save_path, self.global_step)\n tf.logging.info('model saved to ' + save_path)\n\n def __restore_model(self, is_train):\n \"\"\"Restore a model from the latest checkpoint files.\n Args:\n * is_train: whether to restore a model for training\n \"\"\"\n\n save_path = tf.train.latest_checkpoint(os.path.dirname(FLAGS.ws_save_path))\n if is_train:\n self.saver_train.restore(self.sess_train, save_path)\n else:\n self.saver_eval.restore(self.sess_eval, save_path)\n tf.logging.info('model restored from ' + save_path)\n\n def __monitor_progress(self, summary, log_rslt, idx_iter, time_step):\n \"\"\"Monitor the training progress.\n Args:\n * summary: summary protocol buffer\n * log_rslt: logging operations' results\n * idx_iter: index of the training iteration\n * time_step: time step between two summary operations\n \"\"\"\n\n # write summaries for TensorBoard visualization\n self.sm_writer.add_summary(summary, idx_iter)\n\n # compute the training speed\n speed = FLAGS.batch_size * FLAGS.summ_step / time_step\n if FLAGS.enbl_multi_gpu:\n speed *= mgw.size()\n\n # display monitored statistics\n log_str = ' | '.join(['%s = %.4e' % (name, value)\n for name, value in zip(self.log_op_names, log_rslt)])\n tf.logging.info('iter #%d: %s | speed = %.2f pics / sec' % (idx_iter + 1, log_str, speed))\n\n @property\n def maskable_vars(self):\n \"\"\"List of all maskable variables.\"\"\"\n\n return get_maskable_vars(self.trainable_vars)"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.train.MomentumOptimizer",
"tensorflow.ones",
"tensorflow.variable_scope",
"tensorflow.abs",
"tensorflow.app.flags.DEFINE_float",
"tensorflow.Graph",
"tensorflow.train.get_or_create_global_step",
"tensorflow.variables_initializer",
"tensorflow.count_nonzero",
"tensorflow.app.flags.DEFINE_integer",
"numpy.mean",
"tensorflow.pow",
"tensorflow.cast",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.group",
"tensorflow.ConfigProto",
"tensorflow.control_dependencies",
"tensorflow.size",
"tensorflow.logging.info",
"tensorflow.summary.merge_all",
"tensorflow.where",
"tensorflow.maximum"
]
] |
ckoerber/gsum | [
"a9647d60eb5ef971caad88f97df8f1e9cc5286c2"
] | [
"gsum/models.py"
] | [
"from __future__ import division\nimport docrep\nfrom .helpers import coefficients, hpd, mahalanobis, geometric_sum\nimport numpy as np\nfrom numpy.linalg import solve, cholesky\nimport scipy as sp\nfrom scipy.linalg import cho_solve, solve_triangular, inv, eigh\nfrom scipy.special import loggamma\nimport scipy.stats as st\nfrom scipy.optimize import fmin_l_bfgs_b\nfrom sklearn.base import clone\nfrom sklearn.gaussian_process.kernels import RBF, ConstantKernel\nfrom sklearn.utils import check_random_state\nfrom sklearn.utils.validation import check_X_y\nfrom sklearn.exceptions import ConvergenceWarning\n\nimport warnings\nfrom operator import itemgetter\n\n\n__all__ = [\n 'ConjugateGaussianProcess', 'ConjugateStudentProcess',\n 'TruncationGP', 'TruncationTP', 'TruncationPointwise'\n]\n\ndocstrings = docrep.DocstringProcessor()\n\n\[email protected]_sectionsf('BaseConjugateProcess')\[email protected]\nclass BaseConjugateProcess:\n \"\"\"\n The base class for the stochastic process estimator.\n\n Parameters\n ----------\n kernel : kernel object\n The kernel specifying the correlation function of the GP.\n The covariance matrix is the kernel multiplied by the squared scale.\n If None is passed, the kernel \"RBF(1.0)\" is used as default.\n Note that the kernel’s hyperparameters are optimized during fitting.\n center : float\n The prior central values for the parameters of the mean function.\n disp : float >= 0\n The dispersion parameter for the normal prior placed on the mean. This, multiplied by the squared scale\n parameter from the inverse chi squared prior, determines the variance of the mean.\n The smaller the dispersion, the better determined is the mean.\n Set this to zero for a mean that is known to be `mean`.\n df : float > 0\n The degrees of freedom parameter for the inverse chi squared prior placed on the marginal variance.\n This is a measure of how well the marginal standard deviation (or variance) is known, with\n larger degrees of freedom implying a better known standard deviation. Set this to infinity for a\n standard deviation that is known to be `scale`, or use the `sd` keyword argument.\n scale : float > 0\n The scale parameter of the scaled inverse chi squared prior placed on the marginal variance\n of the Gaussian process. Approximately the prior standard deviation for the Gaussian process.\n sd : float > 0, optional\n A convenience argument that sets the marginal standard deviation for the Gaussian process.\n This is equivalent to setting df0 to infinity and scale0 to sd\n (i.e., a delta function prior on the standard deviation).\n nugget : float, optional (default: 1e-10)\n Value added to the diagonal of the correlation matrix during fitting.\n Larger values correspond to increased noise level in the observations.\n This can also prevent a potential numerical issue during fitting, by\n ensuring that the calculated values form a positive definite matrix.\n optimizer : string or callable, optional (default: \"fmin_l_bfgs_b\")\n Can either be one of the internally supported optimizers for optimizing\n the kernel's parameters, specified by a string, or an externally\n defined optimizer passed as a callable. If a callable is passed, it\n must have the signature::\n def optimizer(obj_func, initial_theta, bounds):\n # * 'obj_func' is the objective function to be minimized, which\n # takes the hyperparameters theta as parameter and an\n # optional flag eval_gradient, which determines if the\n # gradient is returned additionally to the function value\n # * 'initial_theta': the initial value for theta, which can be\n # used by local optimizers\n # * 'bounds': the bounds on the values of theta\n ....\n # Returned are the best found hyperparameters theta and\n # the corresponding value of the target function.\n return theta_opt, func_min\n Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize\n is used. If None is passed, the kernel's parameters are kept fixed.\n Available internal optimizers are::\n 'fmin_l_bfgs_b'\n n_restarts_optimizer : int, optional (default: 0)\n The number of restarts of the optimizer for finding the kernel's\n parameters which maximize the log-marginal likelihood. The first run\n of the optimizer is performed from the kernel's initial parameters,\n the remaining ones (if any) from thetas sampled log-uniform randomly\n from the space of allowed theta-values. If greater than 0, all bounds\n must be finite. Note that n_restarts_optimizer == 0 implies that one\n run is performed.\n copy_X_train : bool, optional (default: True)\n If True, a persistent copy of the training data is stored in the\n object. Otherwise, just a reference to the training data is stored,\n which might cause predictions to change if the data is modified\n externally.\n random_state : int, RandomState instance or None, optional (default: None)\n The generator used to initialize the centers. If int, random_state is\n the seed used by the random number generator; If RandomState instance,\n random_state is the random number generator; If None, the random number\n generator is the RandomState instance used by `np.random`.\n \"\"\"\n \n def __init__(self, kernel=None, center=0, disp=0, df=1, scale=1, sd=None, basis=None, nugget=1e-10,\n optimizer='fmin_l_bfgs_b', n_restarts_optimizer=0, copy_X_train=True, random_state=None,\n decomposition='cholesky'):\n self.kernel = kernel\n\n # Setup hyperparameters\n self._center_0 = np.atleast_1d(center)\n self._disp_0 = np.atleast_2d(disp)\n if sd is not None:\n self._df_0 = np.inf\n self._scale_0 = sd\n else:\n self._df_0 = df\n self._scale_0 = scale\n\n # Break with scikit learn convention here. Define all attributes in __init__.\n # Use value of self._fit to determine whether the `fit` has been called.\n self._fit = False\n self.X_train_ = None\n self.y_train_ = None\n self.corr_L_ = self.corr_sqrt_ = None\n self.corr_ = None\n self.center_ = None\n self.disp_ = None\n self.df_ = None\n self.scale_ = None\n self.cov_factor_ = None\n self.cbar_sq_mean_ = None\n self.kernel_ = None\n self._rng = None\n self._eigh_tuple_ = None\n\n self.nugget = nugget\n self.copy_X_train = copy_X_train\n self.random_state = random_state\n self.n_restarts_optimizer = n_restarts_optimizer\n self.optimizer = optimizer\n self.decomposition = decomposition\n\n self._default_kernel = ConstantKernel(1.0, constant_value_bounds='fixed') * \\\n RBF(1.0, length_scale_bounds='fixed')\n\n if basis is None:\n self.basis = lambda X: np.ones((X.shape[0], 1))\n self.basis_train_ = None\n\n @property\n def center0(self):\n return self._center_0\n\n @property\n def disp0(self):\n return self._disp_0\n\n @property\n def df0(self):\n return self._df_0\n\n @property\n def scale0(self):\n return self._scale_0\n\n @classmethod\n def compute_center(cls, y, sqrt_R, basis, center0, disp0, decomposition, eval_gradient=False, dR=None):\n R\"\"\"Computes the regression coefficients' center hyperparameter :math:`\\eta` updated based on data\n\n Parameters\n ----------\n y : array, shape = (n_curves, n_samples)\n The data to condition upon\n sqrt_R : array, shape = (n_samples, n_samples)\n The decomposition of the correlation matrix. Its value depends on `decomposition`\n basis : array, shape = (n_samples, n_param)\n The basis matrix that multiplies the regression coefficients beta to create the GP mean.\n center0 : scalar or array, shape = (n_param)\n The prior regression coefficients for the mean\n disp0 : scalar or array, shape = (n_param, n_param)\n The prior dispersion for the regression coefficients\n decomposition : str\n The way that R has been decomposed into sqrt_R: either 'cholesky' or 'eig'.\n eval_gradient : bool, optional\n Whether to return the gradient with respect to the kernel hyperparameters. Defaults to False.\n dR : array, shape = (n_samples, n_samples, n_kernel_params), optional\n The gradient of the correlation matrix. This is required if eval_gradient is True.\n\n Returns\n -------\n center : scalar or array, shape = (n_param)\n The posterior regression coefficients for the mean\n grad_center : array, shape = (n_param, n_kernel_params), optional\n The gradient of the posterior regression coefficients for the mean with respect to kernel hyperparameters\n \"\"\"\n # Mean is not updated if its prior variance is zero (i.e. delta function prior)\n # Do by hand to prevent dividing by zero\n if np.all(disp0 == 0):\n if eval_gradient:\n if dR is None:\n raise ValueError('dR must be given if eval_gradient is True')\n return np.copy(center0), np.zeros((*center0.shape, dR.shape[-1]))\n return np.copy(center0)\n\n y_avg = cls.avg_y(y)\n ny = cls.num_y(y)\n\n # if decomposition == 'cholesky':\n # invR_y_avg = cho_solve((sqrt_R, True), y_avg)\n # elif decomposition == 'eig':\n # invR_y_avg = solve(sqrt_R, y_avg)\n # else:\n # raise ValueError('decomposition must be either \"cholesky\" or \"eig\"')\n invR_y_avg = cls.solve_sqrt(sqrt_R, y=y_avg, decomposition=decomposition)\n disp = cls.compute_disp(y=y, sqrt_R=sqrt_R, basis=basis, disp0=disp0, decomposition=decomposition)\n factor = solve(disp0, center0) + ny * basis.T @ invR_y_avg\n center = disp @ factor\n\n if eval_gradient:\n if dR is None:\n raise ValueError('dR must be given if eval_gradient is True')\n # invR_basis = cho_solve((chol, True), basis)\n # invR_diff = cho_solve((chol, True), basis @ center - y_avg)\n invR_basis = cls.solve_sqrt(sqrt_R, y=basis, decomposition=decomposition)\n invR_diff = cls.solve_sqrt(sqrt_R, y=basis @ center - y_avg, decomposition=decomposition)\n d_center = ny * disp @ np.einsum('ji,jkp,k->ip', invR_basis, dR, invR_diff)\n return center, d_center\n return center\n\n @classmethod\n def compute_disp(cls, y, sqrt_R, basis, disp0, decomposition, eval_gradient=False, dR=None):\n R\"\"\"The dispersion hyperparameter :math:`V` updated based on data.\n\n Parameters\n ----------\n y : array, shape = (n_curves, n_samples)\n The data to condition upon\n sqrt_R : (n_samples, n_samples)-shaped array\n The lower Cholesky decomposition of the correlation matrix\n basis : (n_samples, n_param)-shaped array\n The basis for the `p` regression coefficients `beta`\n disp0 : (n_param, n_param)-shaped array\n The prior dispersion\n eval_gradient : bool, optional\n Whether to return the gradient with respect to the kernel hyperparameters. Defaults to False.\n dR : array, shape = (n_samples, n_samples, n_kernel_params), optional\n The gradient of the correlation matrix. This is required if eval_gradient is True.\n\n Returns\n -------\n disp : (p, p)-shaped array\n The updated dispersion hyperparameter\n grad_disp : array, shape = (p,p), optional\n \"\"\"\n # If prior variance is zero, it stays zero\n # Do by hand to prevent dividing by zero\n if np.all(disp0 == 0):\n if eval_gradient:\n if dR is None:\n raise ValueError('dR must be given if eval_gradient is True')\n return np.zeros_like(disp0), np.zeros((*disp0.shape, dR.shape[-1]))\n return np.zeros_like(disp0)\n\n ny = cls.num_y(y)\n # quad = mahalanobis(basis.T, 0, chol) ** 2\n quad = basis.T @ cls.solve_sqrt(sqrt_R, y=basis, decomposition=decomposition)\n disp = inv(inv(disp0) + ny * quad)\n if eval_gradient:\n if dR is None:\n raise ValueError('dR must be given if eval_gradient is True')\n # invRBV = cho_solve((chol, True), basis) @ disp\n invRBV = cls.solve_sqrt(sqrt_R, y=basis, decomposition=decomposition) @ disp\n dV = ny * np.einsum('ji,jkp,kl->ilp', invRBV, dR, invRBV)\n return disp, dV\n return disp\n\n @classmethod\n def compute_df(cls, y, df0, eval_gradient=False, dR=None):\n R\"\"\"Computes the degrees of freedom hyperparameter :math:`\\nu` based on data\n\n Parameters\n ----------\n y : array, shape = (n_curves, n_samples)\n The data to condition upon\n df0 : scalar\n The prior degrees of freedom\n eval_gradient : bool, optional\n Whether to return the gradient with respect to the kernel hyperparameters. Defaults to False.\n dR : array, shape = (n_samples, n_samples, n_kernel_params), optional\n The gradient of the correlation matrix. This is required if eval_gradient is True.\n\n Returns\n -------\n df : scalar\n The updated degrees of freedom\n grad_df : array, size = (n_kernel_params,), optional\n The gradient of the updated degrees of freedom with respect to the kernel parameters\n \"\"\"\n df = df0 + y.size\n if eval_gradient:\n if dR is None:\n raise ValueError('dR must be given if eval_gradient is True')\n return df, np.zeros(dR.shape[-1])\n return df\n\n @classmethod\n def compute_scale_sq_v2(cls, y, sqrt_R, basis, center0, disp0, df0, scale0, decomposition,\n eval_gradient=False, dR=None):\n R\"\"\"The squared scale hyperparameter :math:`\\tau^2` updated based on data.\n\n Parameters\n ----------\n y : array, shape = (n_samples, [n_curves])\n The data to condition upon\n chol : array, shape = (n_samples, n_samples)\n The lower Cholesky decomposition of the correlation matrix\n basis : array, shape = (n_samples, n_param)\n The basis for the `p` regression coefficients `beta`\n center0 : scalar or array, shape = (n_param)\n The prior regression coefficients for the mean\n disp0 : array, shape = (n_param, n_param)\n The prior dispersion\n df0 : scalar\n The prior degrees of freedom hyperparameter\n scale0 : scalar\n The prior scale hyperparameter\n decomposition\n eval_gradient : bool, optional\n Whether to return to the gradient with respect to kernel hyperparameters. Defaults to False.\n dR : array, shape = (n_samples, n_samples, n_kernel_params), optional\n The gradient of the correlation matrix. This is required if eval_gradient is True.\n\n Returns\n -------\n scale_sq : scalar\n The updated scale hyperparameter squared\n grad_scale_sq : array, shape = (n_kernel_params,), optional\n The gradient of scale^2 with respect to the kernel hyperparameters. Only returned if eval_gradient is True.\n \"\"\"\n if df0 == np.inf:\n if eval_gradient:\n return scale0**2, np.zeros(dR.shape[-1])\n return scale0**2\n\n avg_y, ny = cls.avg_y(y), cls.num_y(y)\n\n # Compute contributions from a non-zero mean\n if np.all(disp0 == 0):\n # The disp -> 0 limit must be taken carefully to find these terms\n center = center0\n # invR_diff0 = cho_solve((chol, True), 2 * avg_y - basis @ center)\n invR_diff0 = cls.solve_sqrt(sqrt_R, 2 * avg_y - basis @ center, decomposition=decomposition)\n mean_terms = - ny * center0 @ basis.T @ invR_diff0\n else:\n center = cls.compute_center(\n y=y, sqrt_R=sqrt_R, basis=basis, center0=center0, disp0=disp0, decomposition=decomposition)\n disp = cls.compute_disp(y=y, sqrt_R=sqrt_R, basis=basis, disp0=disp0, decomposition=decomposition)\n mean_terms = center0 @ inv(disp0) @ center0 - center @ inv(disp) @ center\n\n # Combine the prior info, quadratic form, and mean contributions to find scale**2\n if y.ndim == 1:\n y = y[:, None]\n # invR_y = cho_solve((chol, True), y)\n invR_y = cls.solve_sqrt(sqrt_R, y=y, decomposition=decomposition)\n quad = np.trace(y.T @ invR_y)\n df = cls.compute_df(y=y, df0=df0)\n scale_sq = (df0 * scale0**2 + mean_terms + quad) / df\n\n if eval_gradient:\n if dR is None:\n raise ValueError('dR must be given if eval_gradient is true')\n # Both the disp -> 0 and non-zero forms have the same gradient formula\n d_scale_sq = - np.einsum('ij,jkp,ki->p', invR_y.T, dR, invR_y) # From the quadratic form\n # invR_diff = cho_solve((chol, True), 2 * avg_y - basis @ center)\n # invR_basis_center = cho_solve((chol, True), basis) @ center\n invR_diff = cls.solve_sqrt(sqrt_R, 2 * avg_y - basis @ center, decomposition=decomposition)\n invR_basis_center = cls.solve_sqrt(sqrt_R, basis, decomposition=decomposition) @ center\n d_scale_sq += ny * np.einsum('i,ijp,j->p', invR_basis_center, dR, invR_diff)\n d_scale_sq /= df\n return scale_sq, d_scale_sq\n return scale_sq\n\n @classmethod\n def compute_scale_sq(cls, y, sqrt_R, basis, center0, disp0, df0, scale0, decomposition,\n eval_gradient=False, dR=None):\n R\"\"\"The squared scale hyperparameter :math:`\\tau^2` updated based on data.\n\n Parameters\n ----------\n y : ndarray, shape = (n_samples, [n_curves])\n The data to condition upon\n sqrt_R : ndarray, shape = (n_samples, n_samples)\n The lower Cholesky decomposition of the correlation matrix\n basis : ndarray, shape = (n_samples, n_param)\n The basis for the `p` regression coefficients `beta`\n center0 : int or float or array, shape = (n_param)\n The prior regression coefficients for the mean\n disp0 : ndarray, shape = (n_param, n_param)\n The prior dispersion\n df0 : int or float\n The prior degrees of freedom hyperparameter\n scale0 : int or float\n The prior scale hyperparameter\n eval_gradient : bool, optional\n Whether to return to the gradient with respect to kernel hyperparameters. Defaults to False.\n dR : array, shape = (n_samples, n_samples, n_kernel_params)\n The gradient of the correlation matrix. This is required if eval_gradient is True.\n\n Returns\n -------\n scale_sq : scalar\n The updated scale hyperparameter squared\n grad_scale_sq : array, shape = (n_kernel_params,), optional\n The gradient of scale^2 with respect to the kernel hyperparameters. Only returned if eval_gradient is True.\n \"\"\"\n if df0 == np.inf:\n if eval_gradient:\n return scale0**2, np.zeros(dR.shape[-1])\n return scale0**2\n\n if y.ndim == 1:\n y = y[:, None]\n avg_y = cls.avg_y(y)\n N = len(avg_y)\n ny = cls.num_y(y)\n\n y_centered = y - avg_y[:, None]\n # invR_yc = cho_solve((chol, True), y_centered)\n invR_yc = cls.solve_sqrt(sqrt_R, y_centered, decomposition=decomposition)\n quad = np.trace(y_centered.T @ invR_yc)\n\n avg_y_centered = avg_y - basis @ center0\n disp = cls.compute_disp(\n y=y, sqrt_R=sqrt_R, basis=basis, disp0=disp0, decomposition=decomposition, eval_gradient=False)\n invR_basis = cls.solve_sqrt(sqrt_R, basis, decomposition=decomposition)\n invR_avg_yc = cls.solve_sqrt(sqrt_R, avg_y_centered, decomposition=decomposition)\n # Use the Woodbury matrix identity on Melendez et al Eq. (A31):\n mat = np.eye(N) - ny * invR_basis @ disp @ basis.T\n mat_invR_avg_yc = ny * mat @ invR_avg_yc\n # mat = np.eye(N) - ny * cho_solve((chol, True), basis) @ disp @ basis.T\n # mat_invR_avg_yc = ny * mat @ cho_solve((chol, True), avg_y_centered)\n quad2 = avg_y_centered @ mat_invR_avg_yc\n\n df = cls.compute_df(y=y, df0=df0)\n scale_sq = (df0 * scale0 ** 2 + quad + quad2) / df\n\n if eval_gradient:\n if dR is None:\n raise ValueError('dR must be given if eval_gradient is true')\n d_scale_sq = - np.einsum('ji,jkp,ki->p', invR_yc, dR, invR_yc)\n d_scale_sq -= np.einsum('i,ijp,j->p', mat_invR_avg_yc, dR, mat_invR_avg_yc) / ny\n d_scale_sq /= df\n return scale_sq, d_scale_sq\n return scale_sq\n\n @staticmethod\n def solve_sqrt(sqrt_mat, y, decomposition):\n R\"\"\"Solves a system Mx = y given sqrt_M and y.\n\n Parameters\n ----------\n sqrt_mat : array\n The square root of a matrix. If decomposition is 'eig', then this can be a tuple (eig, Q) such that\n M = Q @ np.diag(eig) @ Q.T. This can speed up the inversion due to the simple property that\n M^-1 = Q @ np.diag(1/eig) @ Q.T.\n y : array\n decomposition : str\n The way that the square root has been performed. Either 'cholesky' or 'eig'. If cholesky,\n then it is assumed that sqrt_mat is the lower triangular matrix `L` such that `M = L L.T`.\n\n Returns\n -------\n x\n \"\"\"\n if decomposition == 'cholesky':\n return cho_solve((sqrt_mat, True), y)\n elif decomposition == 'eig':\n if isinstance(sqrt_mat, tuple):\n eig, Q = sqrt_mat\n inv_mat = Q @ np.diag(1. / eig) @ Q.T\n return inv_mat @ y\n return solve(sqrt_mat.T, solve(sqrt_mat, y))\n else:\n raise ValueError('decomposition must be either \"cholesky\" or \"eig\"')\n\n @staticmethod\n def compute_cov_factor(scale_sq, df):\n R\"\"\"Converts the squared scale hyperparameter :math:`\\tau^2` to the correlation -> covariance conversion factor\n\n The conversion is given by :math:`\\sigma^2 = \\nu \\tau^2 / (\\nu - 2)` for :math:`\\nu > 2`\n\n Warnings\n --------\n If the correlation matrix does equal 1 on the diagonal, :math:`\\sigma^2` **will not**\n be the marginal variance. Instead, one must look at the diagonal of the covariance directly.\n \"\"\"\n var = scale_sq\n if df != np.inf:\n var = df * scale_sq / (df - 2)\n return var\n\n def center(self):\n \"\"\"The regression coefficient hyperparameters for the mean updated by the call to `fit`.\n \"\"\"\n if self.decomposition == 'cholesky':\n sqrt_R = self.corr_sqrt_\n elif self.decomposition == 'eig':\n sqrt_R = self._eigh_tuple_\n else:\n raise ValueError('decomposition must be either \"cholesky\" or \"eig\"')\n return self.compute_center(\n y=self.y_train_, sqrt_R=sqrt_R, basis=self.basis_train_,\n center0=self.center0, disp0=self.disp0, decomposition=self.decomposition)\n\n def disp(self):\n \"\"\"The dispersion hyperparameter updated by the call to `fit`.\n \"\"\"\n if self.decomposition == 'cholesky':\n sqrt_R = self.corr_sqrt_\n elif self.decomposition == 'eig':\n sqrt_R = self._eigh_tuple_\n else:\n raise ValueError('decomposition must be either \"cholesky\" or \"eig\"')\n return self.compute_disp(\n y=self.y_train_, sqrt_R=sqrt_R, basis=self.basis_train_, disp0=self.disp0,\n decomposition=self.decomposition)\n\n def df(self):\n \"\"\"The degrees of freedom hyperparameter for the standard deviation updated by the call to `fit`\n \"\"\"\n return self.compute_df(y=self.y_train_, df0=self.df0)\n\n def scale(self):\n \"\"\"The scale hyperparameter for the standard deviation updated by the call to `fit`\n \"\"\"\n if self.decomposition == 'cholesky':\n sqrt_R = self.corr_sqrt_\n elif self.decomposition == 'eig':\n sqrt_R = self._eigh_tuple_\n else:\n raise ValueError('decomposition must be either \"cholesky\" or \"eig\"')\n scale_sq = self.compute_scale_sq(\n y=self.y_train_, sqrt_R=sqrt_R, basis=self.basis_train_,\n center0=self.center0, disp0=self.disp0, df0=self.df0, scale0=self.scale0,\n decomposition=self.decomposition)\n return np.sqrt(scale_sq)\n\n def mean(self, X):\n \"\"\"The MAP value for the mean of the process at inputs X with hyperparameters updated by y.\n\n This does not interpolate the y values. For that functionality, use `predict`.\n \"\"\"\n if not self._fit: # Unfitted; predict based on GP prior\n center = self.center0\n else:\n center = self.center_\n return self.basis(X) @ center\n\n def cov(self, X, Xp=None):\n R\"\"\"Computes the covariance matrix.\n\n If `fit` has not been called, then this uses the prior values of `df` and `scale` and the default\n unoptimized kernel. Otherwise it uses the posterior values of `df` and `scale`, and the optimized kernel.\n This does not return the conditional covariance matrix. For that, use `predict`.\n\n Parameters\n ----------\n X : array, shape = (n_samples, n_features)\n Xp : array, optional, shape = (n_samples2, n_features)\n\n Returns\n -------\n array, shape = (n_samples, n_samples2)\n\n Raises\n ------\n ValueError if the degrees of freedom is less than 2, since the covariance does not exist in this case.\n This could happen if `fit` is not called and the provided `df` is less than 2.\n \"\"\"\n # Don't fill in Xp because WhiteKernel will not work correctly\n # if Xp is None:\n # Xp = X\n\n if not self._fit: # Unfitted; predict based on GP prior\n if self.df0 <= 2:\n raise ValueError('df must be greater than 2 for the covariance to exist')\n cov_factor = self.compute_cov_factor(scale_sq=self.scale0**2, df=self.df0)\n if self.kernel is None:\n kernel = self._default_kernel\n else:\n kernel = self.kernel\n else:\n cov_factor = self.cov_factor_\n kernel = self.kernel_\n\n return cov_factor * kernel(X, Xp)\n \n @staticmethod\n def num_y(y):\n \"\"\"Computes the number of curves in y\"\"\"\n ny = 1\n if y.ndim == 2:\n ny = y.shape[1]\n return ny\n\n @staticmethod\n def avg_y(y):\n \"\"\"Computes the average of y over the set of curves\n\n Parameters\n ----------\n y : array, shape = (n_samples, [n_curves])\n The data\n\n Returns\n -------\n avg_y : array, shape = (n_samples,)\n The average of y over the set of curves\n \"\"\"\n if y.ndim == 1:\n return np.copy(y)\n elif y.ndim == 2:\n return np.average(y, axis=1)\n else:\n raise ValueError('y must be two-dimensional, not shape={}'.format(y.shape))\n\n def _calibrate_kernel(self):\n if self.optimizer is not None and self.kernel_.n_dims > 0:\n # Choose hyperparameters based on maximizing the log-marginal\n # likelihood (potentially starting from several initial values)\n def obj_func(theta, eval_gradient=True):\n if eval_gradient:\n lml, grad = self.log_marginal_likelihood(\n theta, eval_gradient=True)\n return -lml, -grad\n else:\n return -self.log_marginal_likelihood(theta)\n\n # First optimize starting from theta specified in kernel\n optima = [(self._constrained_optimization(obj_func,\n self.kernel_.theta,\n self.kernel_.bounds))]\n\n # Additional runs are performed from log-uniform chosen initial\n # theta\n if self.n_restarts_optimizer > 0:\n if not np.isfinite(self.kernel_.bounds).all():\n raise ValueError(\n \"Multiple optimizer restarts (n_restarts_optimizer>0) \"\n \"requires that all bounds are finite.\")\n bounds = self.kernel_.bounds\n for iteration in range(self.n_restarts_optimizer):\n theta_initial = \\\n self._rng.uniform(bounds[:, 0], bounds[:, 1])\n optima.append(\n self._constrained_optimization(obj_func, theta_initial,\n bounds))\n # Select result from run with minimal (negative) log-marginal\n # likelihood\n lml_values = list(map(itemgetter(1), optima))\n optima = np.array(optima)\n self.kernel_.theta = optima[np.argmin(lml_values)][0]\n self.log_marginal_likelihood_value_ = -np.min(lml_values)\n else:\n self.log_marginal_likelihood_value_ = \\\n self.log_marginal_likelihood(self.kernel_.theta)\n \n def fit(self, X, y):\n R\"\"\"Fits the process to data (X, y) and updates all hyperparameters.\n\n Parameters\n ----------\n X : array, shape = (n_samples, n_features)\n The input variables where the response is observed\n y : array, shape = (n_samples, [n_curves])\n The response values\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n if self.kernel is None: # Use an RBF kernel as default\n self.kernel_ = clone(self._default_kernel)\n else:\n self.kernel_ = clone(self.kernel)\n self._rng = check_random_state(self.random_state)\n\n # X, y = check_X_y(X, y, multi_output=True, y_numeric=True)\n if self.copy_X_train:\n try:\n self.X_train_ = X.copy()\n except AttributeError:\n self.X_train_ = np.copy(X)\n\n try:\n self.y_train_ = y.copy()\n except AttributeError:\n self.y_train_ = np.copy(y)\n else:\n self.X_train_ = X\n self.y_train_ = y\n self.basis_train_ = self.basis(self.X_train_)\n\n self._calibrate_kernel()\n self.corr_ = self.kernel_(X)\n\n if self.decomposition == 'cholesky':\n self.corr_L_ = self.corr_sqrt_ = cholesky(self.corr_ + self.nugget * np.eye(len(X)))\n sqrt_R = self.corr_sqrt_\n elif self.decomposition == 'eig':\n eig, Q = eigh(self.corr_ + self.nugget * np.eye(len(X)))\n self._eigh_tuple_ = eig, Q\n sqrt_R = eig, Q # Passing tuple makes matrix inversion easier later on\n self.corr_L_ = self.corr_sqrt_ = Q @ np.diag(np.sqrt(eig))\n else:\n raise ValueError('decomposition must be \"cholesky\" or \"eig\"')\n\n self.center_ = self.compute_center(\n y=self.y_train_, sqrt_R=sqrt_R, basis=self.basis_train_,\n center0=self.center0, disp0=self.disp0, decomposition=self.decomposition\n )\n self.disp_ = self.compute_disp(\n y=self.y_train_, sqrt_R=sqrt_R, basis=self.basis_train_, disp0=self.disp0,\n decomposition=self.decomposition\n )\n self.df_ = self.compute_df(y=self.y_train_, df0=self.df0)\n scale_sq = self.compute_scale_sq(\n y=self.y_train_, sqrt_R=sqrt_R, basis=self.basis_train_,\n center0=self.center0, disp0=self.disp0, df0=self.df0, scale0=self.scale0,\n decomposition=self.decomposition\n )\n self.scale_ = np.sqrt(scale_sq)\n self.cov_factor_ = self.cbar_sq_mean_ = self.compute_cov_factor(scale_sq=scale_sq, df=self.df_)\n self._fit = True\n return self\n\n def underlying_properties(self, X, return_std=False, return_cov=False):\n y_mean = self.mean(X)\n if return_cov:\n y_cov = self.cov(X)\n return y_mean, y_cov\n elif return_std:\n y_std = np.sqrt(np.diag(self.cov(X)))\n return y_mean, y_std\n else:\n return y_mean\n\n @docstrings.get_sectionsf('BaseConjugateProcess_predict')\n @docstrings.dedent\n def predict(self, X, return_std=False, return_cov=False, Xc=None, y=None, pred_noise=False):\n \"\"\"\n Predict using the Gaussian process regression model at the points `X`\n\n Calling `predict` before calling `fit` will use the GP prior.\n In addition to the mean of the predictive distribution, its standard deviation (return_std=True)\n or covariance (return_cov=True) can be returned. Note that at most one of the two can be requested.\n\n Parameters\n ----------\n X : array, shape = (n_samples, n_features)\n Locations at which to predict the new y values\n return_std : bool, optional (default = False)\n Whether the marginal standard deviation of the predictive process is to be returned\n return_cov : bool, optional (default = False)\n Whether the covariance matrix of the predictive process is to be returned\n Xc : array, shape = (n_conditional_samples, n_features)\n Locations at which to condition. Defaults to `X` used in fit. This *does not*\n affect the `X` used to update hyperparameters.\n y : array, shape = (n_conditional_samples, [n_curves])\n Points upon which to condition. Defaults to the `y` used in `fit`. This *does not*\n affect the `y` used to update hyperparameters.\n pred_noise : bool, optional\n Adds `nugget` to the diagonal of the covariance matrix if `return_cov == True`.\n\n Returns\n -------\n y_mean : array, shape = (n_curves, n_samples)\n Mean of predictive distribution at query points\n y_std : array, shape = (n_samples,), optional\n Standard deviation of predictive distribution at query points.\n Only returned when return_std is True.\n y_cov : array, shape = (n_samples, n_samples), optional\n Covariance of joint predictive distribution at query points.\n Only returned when return_cov is True.\n \"\"\"\n if return_std and return_cov:\n raise RuntimeError('Only one of return_std or return_cov may be True')\n\n if not self._fit: # Unfitted; predict based on GP prior\n return self.underlying_properties(X=X, return_std=return_std, return_cov=return_cov)\n\n decomp = self.decomposition\n\n if Xc is None:\n Xc = self.X_train_\n if decomp == 'cholesky':\n sqrt_R = self.corr_sqrt_\n elif decomp == 'eig':\n sqrt_R = self._eigh_tuple_\n else:\n raise ValueError('decomposition must be \"cholesky\" or \"eig\"')\n else:\n # corr_chol = cholesky(self.kernel_(Xc) + self.nugget * np.eye(len(Xc)))\n kk = self.kernel_(Xc) + self.nugget * np.eye(len(Xc))\n if decomp == 'cholesky':\n sqrt_R = cholesky(kk)\n elif decomp == 'eig':\n sqrt_R = eigh(kk) # eig, Q\n else:\n raise ValueError('decomposition must be \"cholesky\" or \"eig\"')\n if y is None:\n y = self.y_train_\n\n # Use X and y from fit for hyperparameters\n m_old = self.mean(Xc)\n m_new = self.mean(X)\n\n # Now use X and y from arguments for conditioning/predictions\n R_on = self.kernel_(Xc, X)\n R_no = R_on.T\n R_nn = self.kernel_(X) # Only use one argument, otherwise, e.g., WhiteKernel won't work right\n\n if y.ndim == 1:\n y = y[:, None]\n\n # Use given y for prediction\n # alpha = cho_solve((corr_chol, True), (y - m_old[:, None]))\n alpha = self.solve_sqrt(sqrt_R, (y - m_old[:, None]), decomposition=decomp)\n m_pred = np.squeeze(m_new[:, None] + R_no @ alpha)\n if return_std or return_cov:\n # half_quad = solve_triangular(corr_chol, R_on, lower=True)\n # R_pred = R_nn - half_quad.T @ half_quad\n R_pred = R_nn - R_no @ self.solve_sqrt(sqrt_R, R_on, decomposition=decomp)\n if pred_noise:\n R_pred += self.nugget * np.eye(len(X))\n # Use y from fit for hyperparameters\n var = self.compute_cov_factor(scale_sq=self.scale_**2, df=self.df_)\n K_pred = np.squeeze(var * R_pred)\n if return_std:\n return m_pred, np.sqrt(np.diag(K_pred))\n return m_pred, K_pred\n return m_pred\n\n def sample_y(self, X, n_samples=1, random_state=0, underlying=False):\n \"\"\"Draw samples from Gaussian process and evaluate at X. (Taken from scikit-learn's gp module)\n\n Parameters\n ----------\n X : array, shape = (n_samples, n_features)\n n_samples : int, optional (default = 1)\n random_state : int, RandomState instance or None, optional (default=0)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used by np.random.\n\n Returns\n -------\n y_samples : array, shape = (n_samples, [n_curves])\n Output samples from the GP at input points X.\n \"\"\"\n rng = check_random_state(random_state)\n\n if underlying:\n y_mean, y_cov = self.underlying_properties(X=X, return_cov=True)\n else:\n y_mean, y_cov = self.predict(X, return_cov=True)\n\n if y_mean.ndim == 1:\n y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T\n else:\n y_samples = \\\n [rng.multivariate_normal(y_mean[:, i], y_cov,\n n_samples).T[:, np.newaxis]\n for i in range(y_mean.shape[1])]\n y_samples = np.hstack(y_samples)\n return y_samples\n\n def log_marginal_likelihood(self, theta=None, eval_gradient=False, X=None, y=None):\n raise NotImplementedError\n\n def _constrained_optimization(self, obj_func, initial_theta, bounds):\n R\"\"\"A method to find the best kernel hyperparameters. Taken from scikit-learn.\n \"\"\"\n if self.optimizer == \"fmin_l_bfgs_b\":\n theta_opt, func_min, convergence_dict = \\\n fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)\n if convergence_dict[\"warnflag\"] != 0:\n warnings.warn(\"fmin_l_bfgs_b terminated abnormally with the \"\n \" state: %s\" % convergence_dict,\n ConvergenceWarning)\n elif callable(self.optimizer):\n theta_opt, func_min = \\\n self.optimizer(obj_func, initial_theta, bounds=bounds)\n else:\n raise ValueError(\"Unknown optimizer %s.\" % self.optimizer)\n\n return theta_opt, func_min\n\n\[email protected]\nclass ConjugateGaussianProcess(BaseConjugateProcess):\n R\"\"\"A conjugacy-based Gaussian Process class.\n\n Parameters\n ----------\n %(BaseConjugateProcess.parameters)s\n \"\"\"\n\n def log_marginal_likelihood(self, theta=None, eval_gradient=False, X=None, y=None):\n \"\"\"Returns log-marginal likelihood of theta for training data.\n\n Parameters\n ----------\n theta : array-like, shape = (n_kernel_params,) or None\n Kernel hyperparameters for which the log-marginal likelihood is\n evaluated. If `None`, and fit() has been called, the precomputed\n log_marginal_likelihood of ``self.kernel_.theta`` is returned.\n eval_gradient : bool, default: False\n If True, the gradient of the log-marginal likelihood with respect\n to the kernel hyperparameters at position theta is returned\n additionally. If True, theta must not be None.\n X : array, shape = (n_samples, n_features), optional\n The input data to use for the kernel. Defaults to `X` passed in `fit`.\n y : array, shape = (n_samples, [n_curves]), optional\n The observed data to use. Defaults to `y` passed in `fit`.\n\n Returns\n -------\n log_likelihood : float\n Log-marginal likelihood of theta for training data.\n log_likelihood_gradient : array, shape = (n_kernel_params,), optional\n Gradient of the log-marginal likelihood with respect to the kernel\n hyperparameters at position theta.\n Only returned when eval_gradient is True.\n \"\"\"\n if theta is None and self._fit:\n if eval_gradient:\n raise ValueError(\n \"Gradient can only be evaluated for theta!=None\"\n )\n return self.log_marginal_likelihood_value_\n\n if not hasattr(self, 'kernel_') or self.kernel_ is None:\n if self.kernel is None:\n kernel = self._default_kernel\n else:\n kernel = self.kernel\n else:\n kernel = self.kernel_\n kernel = kernel.clone_with_theta(theta)\n X = self.X_train_ if X is None else X\n y = self.y_train_ if y is None else y\n\n if eval_gradient:\n R, R_gradient = kernel(X, eval_gradient=True)\n else:\n R = kernel(X)\n R_gradient = None\n\n R[np.diag_indices_from(R)] += self.nugget\n\n decomp = self.decomposition\n\n if decomp == 'cholesky':\n try:\n sqrt_R = cholesky(R) # Line 2\n except np.linalg.LinAlgError:\n return (-np.inf, np.zeros_like(theta)) \\\n if eval_gradient else -np.inf\n elif decomp == 'eig':\n sqrt_R = eigh(R) # eig, Q\n else:\n raise ValueError('decomposition must be \"cholesky\" or \"eig\"')\n\n\n # Support multi-dimensional output of self.y_train_\n if y.ndim == 1:\n y = y[:, np.newaxis]\n\n # ---------------------------------\n # Conjugacy-specific code.\n center0, disp0, df0, scale0 = self.center0, self.disp0, self.df0, self.scale0\n df = self.compute_df(y=y, df0=df0, eval_gradient=False)\n basis = self.basis(X)\n if eval_gradient:\n center, grad_center = self.compute_center(\n y, sqrt_R, basis, center0=center0, disp0=disp0,\n eval_gradient=eval_gradient, dR=R_gradient, decomposition=decomp\n )\n scale2, dscale2 = self.compute_scale_sq(\n y=y, sqrt_R=sqrt_R, basis=basis, center0=center0, disp0=disp0,\n df0=df0, scale0=scale0, eval_gradient=eval_gradient, dR=R_gradient,\n decomposition=decomp\n )\n grad_var = self.compute_cov_factor(scale_sq=dscale2, df=df)\n grad_mean = basis @ grad_center\n else:\n center = self.compute_center(y, sqrt_R, basis, center0=center0, disp0=disp0, decomposition=decomp)\n scale2 = self.compute_scale_sq(\n y=y, sqrt_R=sqrt_R, basis=basis, center0=center0, disp0=disp0,\n df0=df0, scale0=scale0, decomposition=decomp\n )\n grad_center, grad_var, grad_mean = None, None, None\n mean = basis @ center\n var = self.compute_cov_factor(scale_sq=scale2, df=df)\n\n # Convert from correlation matrix to covariance and subtract mean\n # to make all calculations below identical to scikit learn implementation\n # L = np.sqrt(var) * corr_L\n if decomp == 'cholesky':\n L = np.sqrt(var) * sqrt_R\n logdet_K = 2 * np.log(np.diag(L)).sum()\n elif decomp == 'eig':\n eig, Q = sqrt_R\n L = var * eig, Q # Technically not lower triangular, but use L anyways\n logdet_K = np.log(var * eig).sum()\n else:\n raise ValueError('decomposition must be \"cholesky\" or \"eig\"')\n\n K, K_gradient = var * R, None\n if eval_gradient:\n K_gradient = var * R_gradient + grad_var * R[:, :, None]\n y_train = y - mean[:, None]\n N = K.shape[0]\n # ---------------------------------\n # Resume likelihood calculation\n\n # alpha = cho_solve((L, True), y_train) # Line 3\n alpha = self.solve_sqrt(L, y_train, decomposition=decomp)\n\n # Compute log-likelihood (compare line 7)\n log_likelihood_dims = -0.5 * np.einsum(\"ik,ik->k\", y_train, alpha)\n # log_likelihood_dims -= np.log(np.diag(L)).sum()\n log_likelihood_dims -= 0.5 * logdet_K\n log_likelihood_dims -= N / 2 * np.log(2 * np.pi)\n log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions\n\n if eval_gradient: # compare Equation 5.9 from GP for ML\n tmp = np.einsum(\"ik,jk->ijk\", alpha, alpha) # k: output-dimension\n # tmp -= cho_solve((L, True), np.eye(N))[:, :, np.newaxis]\n tmp -= self.solve_sqrt(L, np.eye(N), decomposition=decomp)[:, :, np.newaxis]\n # Compute \"0.5 * trace(tmp.dot(K_gradient))\" without\n # constructing the full matrix tmp.dot(K_gradient) since only\n # its diagonal is required\n log_likelihood_gradient_dims = \\\n 0.5 * np.einsum(\"ijl,ijk->kl\", tmp, K_gradient)\n\n # Beyond scikit-learn: Add gradient wrt mean\n log_likelihood_gradient_dims -= grad_mean.T @ alpha\n\n # Sum over output dimension\n log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)\n return log_likelihood, log_likelihood_gradient\n return log_likelihood\n\n def likelihood(self, log=True, X=None, y=None, theta=None):\n # Multiple corr can be passed to quickly get likelihoods for many correlation parameters\n if X is None:\n X = self.X_train_\n if y is None:\n y = self.y_train_\n\n # corr = self.kernel(X, **self.kernel_kws)\n kernel = self.kernel_.clone_with_theta(theta)\n corr = kernel(X)\n corr = corr + self.nugget * np.eye(corr.shape[-1])\n corr_chol = cholesky(corr)\n \n # Setup best guesses for mean and cov\n center0, disp0, df0, scale0 = self.center0, self.disp0, self.df0, self.scale0\n df = self.compute_df(y=y, df0=df0)\n basis = self.basis(X)\n mean = basis @ self.compute_center(y, corr_chol, basis, center0=center0, disp0=disp0)\n # sd = self.compute_std(y=y, chol=corr_chol, basis=basis, beta0=beta0, disp0=disp0, df0=df0, scale0=scale0)\n scale2 = self.compute_scale_sq(\n y=y, chol=corr_chol, basis=basis, center0=center0, disp0=disp0,\n df0=df0, scale0=scale0)\n var = self.compute_cov_factor(scale_sq=scale2, df=df)\n cov = var * corr\n dist = st.multivariate_normal(mean=mean, cov=cov)\n log_like = np.sum(dist.logpdf(y))\n if log:\n return log_like\n return np.exp(log_like)\n\n\[email protected]\nclass ConjugateStudentProcess(BaseConjugateProcess):\n R\"\"\"A conjugacy-based Student-t Process class.\n\n Parameters\n ----------\n %(BaseConjugateProcess.parameters)s\n \"\"\"\n\n def cov(self, X, Xp=None):\n\n if not self._fit: # Unfitted; predict based on GP prior\n df = self.df0\n scale = self.scale0\n disp = self.disp0\n if self.kernel is None:\n kernel = self._default_kernel\n else:\n kernel = self.kernel\n else:\n df = self.df_\n scale = self.scale_\n disp = self.disp_\n kernel = self.kernel_\n\n if df <= 2:\n raise ValueError('df must be greater than 2 for the covariance to exist')\n\n # Call kernel before potentially reassigning Xp, else, e.g., WhiteKernel will not work properly\n corr = kernel(X, Xp)\n\n if Xp is None:\n Xp = X\n\n var = self.compute_cov_factor(scale_sq=scale**2, df=df)\n return var * (corr + self.basis(X) @ disp @ self.basis(Xp).T)\n\n @docstrings.dedent\n def predict(self, X, return_std=False, return_cov=False, Xc=None, y=None, pred_noise=False):\n R\"\"\"\n\n Parameters\n ----------\n %(BaseConjugateProcess_predict.parameters)s\n \"\"\"\n\n pred = super(ConjugateStudentProcess, self).predict(\n X=X, return_std=return_std, return_cov=return_cov, Xc=Xc, y=y, pred_noise=pred_noise)\n\n decomp = self.decomposition\n if not self._fit: # Unfitted; predict based on GP prior\n disp = self.disp0\n var = self.compute_cov_factor(scale_sq=self.scale0 ** 2, df=self.df0)\n basis = self.basis(X)\n else:\n disp = self.disp_\n var = self.cov_factor_\n basis_new = self.basis(X)\n\n if Xc is None:\n basis_old = self.basis_train_\n if decomp == 'cholesky':\n sqrt_R = self.corr_sqrt_\n elif decomp == 'eig':\n sqrt_R = self._eigh_tuple_\n else:\n raise ValueError('decomposition must be \"cholesky\" or \"eig\"')\n R_no = self.kernel_(X, self.X_train_)\n else:\n basis_old = self.basis(Xc)\n R_no = self.kernel_(X, Xc)\n kk = self.kernel_(Xc) + self.nugget * np.eye(len(Xc))\n # corr_chol = cholesky(kk)\n if decomp == 'cholesky':\n sqrt_R = cholesky(kk)\n elif decomp == 'eig':\n sqrt_R = eigh(kk) # eig, Q\n else:\n raise ValueError('decomposition must be \"cholesky\" or \"eig\"')\n # The conditional basis\n # basis = basis_new - R_no @ cho_solve((corr_chol, True), basis_old)\n basis = basis_new - R_no @ self.solve_sqrt(sqrt_R, basis_old, decomposition=decomp)\n\n mean_cov = var * (basis @ disp @ basis.T) # From integrating out the mean\n if return_std:\n mean, std = pred\n std += np.sqrt(np.diag(mean_cov))\n return mean, std\n if return_cov:\n mean, cov = pred\n cov += mean_cov\n return mean, cov\n return pred\n\n def log_marginal_likelihood(self, theta=None, eval_gradient=False, X=None, y=None):\n if y is None:\n y = self.y_train_\n if X is None:\n X = self.X_train_\n\n ny = self.num_y(y)\n if not hasattr(self, 'kernel_') or self.kernel_ is None:\n if self.kernel is None:\n kernel = self._default_kernel\n else:\n kernel = self.kernel\n else:\n kernel = self.kernel_\n kernel = kernel.clone_with_theta(theta)\n if eval_gradient:\n R, dR = kernel(X, eval_gradient)\n else:\n R, dR = kernel(X), None\n\n R[np.diag_indices_from(R)] += self.nugget\n N = R.shape[0]\n\n decomp = self.decomposition\n\n if decomp == 'cholesky':\n try:\n sqrt_R = cholesky(R) # Line 2\n except np.linalg.LinAlgError:\n return (-np.inf, np.zeros_like(theta)) \\\n if eval_gradient else -np.inf\n elif decomp == 'eig':\n sqrt_R = eigh(R) # eig, Q\n else:\n raise ValueError('decomposition must be \"cholesky\" or \"eig\"')\n\n center0, disp0, df0, scale0 = self.center0, self.disp0, self.df0, self.scale0\n df = self.compute_df(y=y, df0=df0)\n basis = self.basis(X)\n if eval_gradient:\n disp, grad_disp = self.compute_disp(\n y=y, sqrt_R=sqrt_R, basis=basis, disp0=disp0,\n eval_gradient=eval_gradient, dR=dR, decomposition=decomp\n )\n scale_sq, grad_scale_sq = self.compute_scale_sq(\n y=y, sqrt_R=sqrt_R, basis=basis, center0=center0, disp0=disp0,\n df0=df0, scale0=scale0, eval_gradient=eval_gradient, dR=dR, decomposition=decomp\n )\n else:\n disp = self.compute_disp(y=y, sqrt_R=sqrt_R, basis=basis, disp0=disp0, decomposition=decomp)\n scale_sq = self.compute_scale_sq(\n y=y, sqrt_R=sqrt_R, basis=basis, center0=center0, disp0=disp0, df0=df0,\n scale0=scale0, decomposition=decomp\n )\n grad_disp, grad_scale_sq = None, None\n scale = np.sqrt(scale_sq)\n\n def log_norm(df_, scale_, disp_):\n \"\"\"Normalization constant of the normal scaled inverse chi squared distribution\"\"\"\n norm = loggamma(df_ / 2.) - df_ / 2. * np.log(df_ * scale_ / 2.)\n log_det = np.linalg.slogdet(2 * np.pi * disp_)[1]\n if log_det != -np.inf:\n norm += 0.5 * log_det\n return norm\n\n if decomp == 'cholesky':\n logdet_R = 2 * np.log(np.diag(sqrt_R)).sum()\n elif decomp == 'eig':\n eig, Q = sqrt_R\n logdet_R = np.log(eig).sum()\n else:\n raise ValueError('decomposition must be \"cholesky\" or \"eig\"')\n\n log_like = log_norm(df, scale, disp) - log_norm(df0, scale0, disp0) \\\n - ny / 2. * (N * np.log(2*np.pi) + logdet_R)\n\n if eval_gradient:\n # cho_solve only cares about first dimension of dR. Gradient parameters are in the last dimension.\n # log_like_gradient = - (ny / 2.) * np.trace(cho_solve((corr_L, True), dR), axis1=0, axis2=1)\n log_like_gradient = - (ny / 2.) * np.trace(\n self.solve_sqrt(sqrt_R, dR, decomposition=decomp), axis1=0, axis2=1\n )\n log_like_gradient -= (df / 2.) * grad_scale_sq / scale_sq\n\n if not np.all(disp == 0):\n log_like_gradient += 0.5 * np.einsum('ij,ijp->p', inv(disp), grad_disp)\n\n return log_like, log_like_gradient\n\n return log_like\n\n\ndef _default_ref(X, ref):\n return ref * np.ones(X.shape[0])\n\n\ndef _default_ratio(X, ratio):\n return ratio * np.ones(X.shape[0])\n\n\[email protected]\nclass TruncationProcess:\n R\"\"\"\n\n Parameters\n ----------\n kernel : sklearn.Kernel\n\n ratio : scalar or callable\n ref : scalar or callable\n excluded : 1d array, optional\n The set of orders to ignore when constructing process for y_order and dy_order, i.e., the geometric sum\n will not include these values\n ratio_kws : dict, optional\n kernel_kws : dict, optional\n nugget : float, optional\n verbose : bool, optional\n\n Other Parameters\n ----------------\n %(BaseConjugateProcess.parameters)s\n \"\"\"\n\n def __init__(self, kernel=None, ratio=0.5, ref=1, excluded=None, ratio_kws=None, **kwargs):\n\n if not callable(ref):\n self.ref = lambda X, ref=ref: ref * np.ones(X.shape[0])\n else:\n self.ref = ref\n\n if not callable(ratio):\n self.ratio = lambda X, ratio=ratio: ratio * np.ones(X.shape[0])\n else:\n self.ratio = ratio\n\n # self.coeffs_process_class = BaseConjugateProcess\n # self.coeffs_process = self.coeffs_process_class(kernel=kernel, **kwargs)\n self.coeffs_process = BaseConjugateProcess(kernel=kernel, **kwargs)\n self.kernel = kernel\n self._log_like = None\n\n self.excluded = excluded\n self.ratio_kws = {} if ratio_kws is None else ratio_kws\n\n self._fit = False\n self.X_train_ = None\n self.y_train_ = None\n self.orders_ = None\n self.dX_ = None\n self.dy_ = None\n self.coeffs_ = None\n # self.coeffs_process_ = None\n\n def mean(self, X, start=0, end=np.inf):\n coeff_mean = self.coeffs_process.mean(X=X)\n ratio_sum = geometric_sum(x=self.ratio(X, **self.ratio_kws), start=start, end=end, excluded=self.excluded)\n return self.ref(X) * ratio_sum * coeff_mean\n\n def cov(self, X, Xp=None, start=0, end=np.inf):\n coeff_cov = self.coeffs_process.cov(X=X, Xp=Xp)\n Xp = X if Xp is None else Xp # Must reassign *after* calling cov\n ratio_mat = self.ratio(X, **self.ratio_kws)[:, None] * self.ratio(Xp, **self.ratio_kws)\n ratio_sum = geometric_sum(x=ratio_mat, start=start, end=end, excluded=self.excluded)\n ref_mat = self.ref(X)[:, None] * self.ref(Xp)\n return ref_mat * ratio_sum * coeff_cov\n\n def basis(self, X, start=0, end=np.inf):\n cn_basis = self.coeffs_process.basis(X=X)\n ratio = self.ratio(X, **self.ratio_kws)[:, None]\n ratio_sum = geometric_sum(x=ratio, start=start, end=end, excluded=self.excluded)\n return self.ref(X)[:, None] * ratio_sum * cn_basis\n\n def underlying_properties(self, X, order, return_std=False, return_cov=False):\n y_mean = self.mean(X, start=order+1)\n if return_cov:\n y_cov = self.cov(X, start=order+1)\n return y_mean, y_cov\n elif return_std:\n y_std = np.sqrt(np.diag(self.cov(X, start=order+1)))\n return y_mean, y_std\n else:\n return y_mean\n\n def fit(self, X, y, orders, dX=None, dy=None):\n self.X_train_ = X\n self.y_train_ = y\n self.orders_ = orders\n orders_mask = ~ np.isin(orders, self.excluded)\n\n self.dX_ = dX\n self.dy_ = dy\n\n # Extract the coefficients based on best ratio value and setup/fit the iid coefficient process\n ratio = self.ratio(X, **self.ratio_kws)\n ref = self.ref(X)\n if np.atleast_1d(ratio).ndim > 1:\n raise ValueError('ratio must return a 1d array or a scalar')\n if np.atleast_1d(ref).ndim > 1:\n raise ValueError('ref must return a 1d array or a scalar')\n self.coeffs_ = coefficients(y=y, ratio=ratio, ref=ref, orders=orders)[:, orders_mask]\n # self.coeffs_process_ = self.coeffs_process_class(kernel=self.kernel, **self.coeffs_process_kwargs)\n self.coeffs_process.fit(X=X, y=self.coeffs_)\n self._fit = True\n return self\n\n def predict(self, X, order, return_std=False, return_cov=False, Xc=None, y=None, pred_noise=False, kind='both'):\n \"\"\"Returns the predictive GP at the points X\n\n Parameters\n ----------\n X : (M, d) array\n Locations at which to predict the new y values\n order : int\n The order of the GP to predict\n return_std : bool\n Whether the marginal standard deviation of the predictive process is to be returned\n return_cov : bool\n Whether the covariance matrix of the predictive process is to be returned\n Xc : (N, d) array\n Locations at which to condition. Defaults to `X` used in fit. This *does not*\n affect the `X` used to update hyperparameters.\n y : (N, n) array\n Points upon which to condition. Defaults to the `y` used in `fit`. This *does not*\n affect the `y` used to update hyperparameters.\n pred_noise : bool\n Adds `noise_sd` to the diagonal of the covariance matrix if `return_cov == True`.\n kind : str\n\n Returns\n -------\n mean, (mean, std), or (mean, cov), depending on `return_std` and `return_cov`\n \"\"\"\n\n if not self._fit:\n return self.underlying_properties(X, order, return_cov=return_cov, return_std=return_std)\n\n if Xc is None:\n Xc = self.X_train_\n if y is None:\n if order not in self.orders_:\n raise ValueError('order must be in orders passed to `fit`')\n if self.y_train_.ndim == 1:\n y = self.y_train_\n else:\n y = np.squeeze(self.y_train_[:, self.orders_ == order])\n\n if kind not in ['both', 'interp', 'trunc']:\n raise ValueError('kind must be one of \"both\", \"interp\" or \"trunc\"')\n\n m_pred, K_pred = 0, 0\n if kind == 'both' or kind == 'interp':\n # ----------------------------------------------------\n # Get mean & cov for (interpolating) prediction y_order\n #\n # Use X and y from fit for hyperparameters\n m_old = self.mean(X=Xc, start=0, end=order)\n m_new = self.mean(X=X, start=0, end=order)\n\n # Use X and y from arguments for conditioning/predictions\n K_oo = self.cov(start=0, end=order, X=Xc, Xp=Xc)\n K_on = self.cov(start=0, end=order, X=Xc, Xp=X)\n K_no = K_on.T\n K_nn = self.cov(start=0, end=order, X=X, Xp=X)\n\n # Use given y for prediction\n alpha = solve(K_oo, y - m_old)\n m_pred += m_new + K_no @ alpha\n if return_std or return_cov:\n K_pred += K_nn - K_no @ solve(K_oo, K_on)\n #\n # ----------------------------------------------------\n\n if kind == 'both' or kind == 'trunc':\n # ----------------------------------------------------\n # Get the mean & cov for truncation error\n #\n m_new_trunc = self.mean(X=X, start=order + 1, end=np.inf)\n K_nn_trunc = self.cov(X=X, Xp=X, start=order + 1, end=np.inf)\n\n X_trunc = self.dX_\n if X_trunc is not None: # truncation error is constrained\n m_old_trunc = self.mean(X=X_trunc, start=order+1, end=np.inf)\n K_oo_trunc = self.cov(X=X_trunc, Xp=X_trunc, start=order+1, end=np.inf)\n K_on_trunc = self.cov(X=X_trunc, Xp=X, start=order+1, end=np.inf)\n K_no_trunc = K_on_trunc.T\n\n alpha_trunc = solve(K_oo_trunc, (self.dy_ - m_old_trunc))\n m_pred += m_new_trunc + K_no_trunc @ alpha_trunc\n if return_std or return_cov:\n K_pred += K_nn_trunc - K_no_trunc @ solve(K_oo_trunc, K_on_trunc)\n else: # truncation is not constrained\n m_pred += m_new_trunc\n if return_std or return_cov:\n K_pred += K_nn_trunc\n\n if return_cov:\n return m_pred, K_pred\n if return_std:\n return m_pred, np.sqrt(np.diag(K_pred))\n return m_pred\n\n def log_marginal_likelihood(self, theta, eval_gradient=False, X=None, y=None, orders=None, **ratio_kws):\n if X is None:\n X = self.X_train_\n if y is None:\n y = self.y_train_\n if orders is None:\n orders = self.orders_\n ref = self.ref(X)\n ratio = self.ratio(X, **ratio_kws)\n\n orders_mask = ~ np.isin(orders, self.excluded)\n coeffs = coefficients(y=y, ratio=ratio, ref=ref, orders=orders)[:, orders_mask]\n result = self.coeffs_process.log_marginal_likelihood(theta, eval_gradient=eval_gradient, X=X, y=coeffs)\n if eval_gradient:\n coeff_log_like, coeff_log_like_gradient = result\n else:\n coeff_log_like = result\n\n orders_in = orders[orders_mask]\n n = len(orders_in)\n det_factor = np.sum(n * np.log(np.abs(ref)) + np.sum(orders_in) * np.log(np.abs(ratio)))\n y_log_like = coeff_log_like - det_factor\n return y_log_like\n\n\nclass TruncationGP(TruncationProcess):\n R\"\"\"A Gaussian Process Truncation class\"\"\"\n\n def __init__(self, kernel=None, ratio=0.5, ref=1, excluded=None, ratio_kws=None, **kwargs):\n super().__init__(\n kernel=kernel, ref=ref, ratio=ratio, excluded=excluded, ratio_kws=ratio_kws, **kwargs)\n self.coeffs_process = ConjugateGaussianProcess(kernel=kernel, **kwargs)\n\n\nclass TruncationTP(TruncationProcess):\n R\"\"\"A Student-t Process Truncation class\"\"\"\n\n def __init__(self, kernel=None, ratio=0.5, ref=1, excluded=None, ratio_kws=None, **kwargs):\n super().__init__(\n kernel=kernel, ratio=ratio, ref=ref, excluded=excluded, ratio_kws=ratio_kws, **kwargs)\n self.coeffs_process = ConjugateStudentProcess(kernel=kernel, **kwargs)\n\n def predict(self, X, order, return_std=False, return_cov=False, Xc=None, y=None, pred_noise=False, kind='both'):\n pred = super(TruncationTP, self).predict(\n X=X, order=order, return_std=return_std, return_cov=return_cov,\n Xc=Xc, y=y, pred_noise=pred_noise\n )\n\n if not return_std and not return_cov:\n return pred\n\n if Xc is None:\n Xc = self.X_train_\n\n var, disp = self.coeffs_process.cov_factor_, self.coeffs_process.disp_\n basis_lower, basis_trunc = np.zeros((X.shape[0], disp.shape[0])), np.zeros((X.shape[0], disp.shape[0]))\n\n if kind == 'both' or kind == 'interp':\n # Use Xc from argument to define old points\n K_oo = self.cov(X=Xc, Xp=Xc, start=0, end=order)\n K_no = self.cov(X=X, Xp=Xc, start=0, end=order)\n\n basis_lower_old = self.basis(X=Xc, start=0, end=order)\n basis_lower_new = self.basis(X=X, start=0, end=order)\n basis_lower = basis_lower_new - K_no @ solve(K_oo, basis_lower_old)\n\n if kind == 'both' or kind == 'trunc':\n X_trunc = self.dX_\n if X_trunc is not None: # truncation error is constrained\n K_oo_trunc = self.cov(X=X_trunc, Xp=X_trunc, start=order+1, end=np.inf)\n K_no_trunc = self.cov(X=X, Xp=X_trunc, start=order+1, end=np.inf)\n\n basis_trunc_old = self.basis(X=X_trunc, start=order+1, end=np.inf)\n basis_trunc_new = self.basis(X=X, start=order+1, end=np.inf)\n basis_trunc = basis_trunc_new - K_no_trunc @ solve(K_oo_trunc, basis_trunc_old)\n else: # not constrained\n basis_trunc = self.basis(start=order + 1, end=np.inf, X=X)\n\n mean_cov = var * (basis_lower + basis_trunc) @ disp @ (basis_lower + basis_trunc).T\n\n if return_std:\n mean, std = pred\n return mean, std + np.sqrt(np.diag(mean_cov))\n if return_cov:\n mean, cov = pred\n return mean, cov + mean_cov\n\n\nclass TruncationPointwise:\n R\"\"\"A conjugacy-based implementation of the pointwise convergence model from Furnstahl et al. (2015)\n\n Implements the following model\n\n .. math::\n\n y_k = y_{\\mathrm{ref}} \\sum_{n=0}^k c_n Q^n\n\n where the :math:`c_n` are iid Gaussian random variables and :math:`\\bar c^2` has a scaled inverse chi squared\n conjugate prior\n\n .. math::\n\n c_n \\,|\\, \\bar c^2 & \\sim N(0, \\bar c^2) \\\\\n \\bar c^2 & \\sim \\chi^{-2}(\\nu_0, \\tau_0^2)\n\n Conditioning on the partial sums :math:`y_0`, :math:`\\dots,` :math:`y_k`, allow\n one to estimate :math:`\\bar c`, and thus the full summation :math:`y_\\infty`.\n\n Parameters\n ----------\n df : float >= 0\n The degrees of freedom hyperparameter :math:`\\nu_0` for the scaled inverse chi squared prior on :math:`\\bar c`\n scale : float > 0\n The scale hyperparameter :math:`\\tau_0` for the scaled inverse chi squared prior on :math:`\\bar c`\n excluded : int or array, optional\n The orders to be excluded from both the hyperparameter updating and from the truncation error distribution.\n Defaults to `None`.\n \"\"\"\n\n def __init__(self, df=1, scale=1, excluded=None):\n self.df0 = df\n self.scale0 = scale\n self.excluded = excluded\n\n self._fit = False\n self.y_ = None\n self.ratio_ = None\n self.ref_ = None\n self.orders_ = None\n self.orders_mask_ = None\n self._orders_masked = None\n self.coeffs_ = None\n self.coeffs_dist_ = None\n self.df_ = None\n self.scale_ = None\n self.y_masked_ = None\n self.dist_ = None\n\n @classmethod\n def _compute_df(cls, c, df0):\n return df0 + c.shape[-1]\n\n @classmethod\n def _compute_scale(cls, c, df0, scale0):\n c_sq = (c ** 2).sum(-1)\n df = cls._compute_df(c, df0)\n return np.sqrt((df0 * scale0**2 + c_sq) / df)\n\n @staticmethod\n def _num_orders(y):\n if y.ndim == 1:\n return 1\n elif y.ndim == 2:\n return y.shape[-1]\n\n def _compute_order_indices(self, orders):\n if orders is None:\n return slice(None)\n orders = np.atleast_1d(orders)\n return np.squeeze([np.nonzero(self._orders_masked == order) for order in orders])\n\n def fit(self, y, ratio, ref=1, orders=None):\n R\"\"\"\n\n Parameters\n ----------\n y\n ratio\n ref\n orders\n\n Returns\n -------\n\n \"\"\"\n if y.ndim == 1:\n y = y[:, None]\n\n ratio, ref = np.atleast_1d(ratio, ref)\n\n self.y_ = y\n self.ratio_ = ratio\n self.ref_ = ref\n\n if orders is None:\n orders = np.arange(y.shape[-1])\n\n if y.shape[-1] != orders.size:\n raise ValueError('The last dimension of `y` must have the same size as `orders`')\n\n self.orders_ = orders\n self.orders_mask_ = orders_mask = ~ np.isin(orders, self.excluded)\n self.coeffs_ = coefficients(y=y, ratio=ratio, ref=ref, orders=orders)[:, orders_mask]\n self.df_ = self._compute_df(c=self.coeffs_, df0=self.df0)\n self.scale_ = self._compute_scale(c=self.coeffs_, df0=self.df0, scale0=self.scale0)\n\n self.y_masked_ = y[:, orders_mask]\n self._orders_masked = orders_masked = orders[orders_mask]\n ratio_sums = np.array([geometric_sum(ratio**2, k+1, np.inf, excluded=self.excluded)\n for k in orders_masked]).T\n trunc_scale = ref[:, None] * np.sqrt(ratio_sums) * self.scale_[:, None]\n self.coeffs_dist_ = st.t(loc=0, scale=self.scale_, df=self.df_)\n self.dist_ = st.t(loc=self.y_masked_, scale=trunc_scale, df=self.df_)\n self._fit = True\n return self\n\n def interval(self, alpha, orders=None):\n R\"\"\"A convenience method to call `interval` on the truncation error distribution object.\n\n Parameters\n ----------\n alpha\n orders\n\n Returns\n -------\n\n \"\"\"\n alpha = np.array(alpha)\n if alpha.ndim == 1:\n alpha = alpha[:, None, None]\n interval = np.array(self.dist_.interval(alpha))\n idx = self._compute_order_indices(orders)\n return interval[..., idx]\n\n def pdf(self, y, orders=None):\n R\"\"\"A convenience method to call `pdf` on the truncation error distribution object.\n\n Parameters\n ----------\n y\n orders\n\n Returns\n -------\n\n \"\"\"\n y = np.atleast_1d(y)\n if y.ndim == 1:\n y = y[:, None, None]\n idx = self._compute_order_indices(orders)\n return self.dist_.pdf(y)[..., idx]\n\n def logpdf(self, y, orders=None):\n R\"\"\"A convenience method to call `logpdf` on the truncation error distribution object.\n\n Parameters\n ----------\n y\n orders\n\n Returns\n -------\n\n \"\"\"\n y = np.atleast_1d(y)\n if y.ndim == 1:\n y = y[:, None, None]\n idx = self._compute_order_indices(orders)\n return self.dist_.logpdf(y)[..., idx]\n\n def std(self):\n R\"\"\"A convenience method to call `std` on the truncation error distribution object.\n\n Returns\n -------\n\n \"\"\"\n return self.dist_.std()\n\n def log_likelihood(self, ratio=None, ref=None):\n R\"\"\"Computes the log likelihood for the ratio and ref parameters given the data passed to `fit`.\n\n That is\n\n .. math::\n pr(\\vec{y}_k \\, | \\, Q, y_{ref}) & = \\frac{pr(\\vec{c}_k)}{\\prod_n y_{ref} Q^n} \\\\\n pr(\\vec{c}_k) & = \\frac{\\Gamma(\\nu/2)}{\\Gamma(\\nu_0/2)}\n \\sqrt{\\frac{1}{(2\\pi)^n} \\frac{(\\nu_0 \\tau_0^2 / 2)^{\\nu_0}}{(\\nu \\tau^2 / 2)^{\\nu}}}\n\n Parameters\n ----------\n ratio : scalar or array, shape = (n_points,)\n The ratio, or EFT expansion parameter, in the geometric sum, used to extract the coefficients.\n ref : scalar or array, shape = (n_points,)\n The multiplicative reference scale used to extract the coefficients.\n\n Returns\n -------\n float\n The log likelihood\n \"\"\"\n if not self._fit:\n raise ValueError('Must call fit before calling log_likelihood')\n\n if ratio is None:\n ratio = self.ratio_\n if ref is None:\n ref = self.ref_\n\n y, orders, mask = self.y_, self.orders_, self.orders_mask_\n coeffs = coefficients(y=y, ratio=ratio, ref=ref, orders=orders)[:, mask]\n df0, scale0 = self.df0, self.scale0\n df = self._compute_df(c=coeffs, df0=df0)\n scale = self._compute_scale(c=coeffs, df0=df0, scale0=scale0)\n\n n = self._num_orders(coeffs)\n log_like = loggamma(df / 2.) - 0.5 * n * np.log(2 * np.pi)\n if df0 > 0: # Ignore this infinite constant for scale invariant prior, df0 == 0\n log_like += 0.5 * np.sum(df0 * np.log(df0 * scale0 ** 2 / 2.)) - loggamma(df0 / 2.)\n log_like -= 0.5 * np.sum(df * np.log(df * scale**2 / 2.))\n log_like -= np.sum(np.log(np.abs(ref)) + np.sum(orders[mask]) * np.log(ratio)) # From change of variables\n return log_like\n\n def credible_diagnostic(self, data, dobs, band_intervals=None, band_dobs=None, beta=True):\n dist = self.dist_\n dobs = np.atleast_1d(dobs)\n if data.ndim == 1:\n data = data[:, None]\n lower, upper = dist.interval(dobs[:, None, None])\n\n def diagnostic(data_, lower_, upper_):\n indicator = (lower_ < data_) & (data_ < upper_) # 1 if in, 0 if out\n return np.average(indicator, axis=1) # The diagnostic\n\n # D_CI = np.apply_along_axis(\n # diagnostic, axis=0, arr=data, lower_=lower,\n # upper_=upper)\n D_CI = diagnostic(data, lower, upper)\n\n if band_intervals is not None:\n if band_dobs is None:\n band_dobs = dobs\n band_dobs = np.atleast_1d(band_dobs)\n\n N = self.y_.shape[0]\n if beta:\n band_intervals = np.atleast_1d(band_intervals)\n # Band shape: (len(dobs), 2, len(X))\n bands = np.zeros((len(band_intervals), 2, len(band_dobs)))\n for i, p in enumerate(band_intervals):\n bands[i] = np.array(\n [hpd(sp.stats.beta, p, N*s+1, N-N*s+1)\n for s in band_dobs]).T\n # bands = np.transpose(bands, [0, 1, 2])\n else:\n band_dist = st.binom(n=N, p=band_dobs)\n band_intervals = np.atleast_2d(band_intervals)\n bands = np.asarray(band_dist.interval(band_intervals.T)) / N\n bands = np.transpose(bands, [1, 0, 2])\n return D_CI, bands\n return D_CI\n"
] | [
[
"numpy.ones",
"numpy.sum",
"numpy.diag",
"numpy.copy",
"numpy.trace",
"numpy.log",
"scipy.linalg.cho_solve",
"numpy.isfinite",
"sklearn.base.clone",
"numpy.transpose",
"scipy.stats.binom",
"numpy.argmin",
"numpy.abs",
"scipy.linalg.eigh",
"scipy.optimize.fmin_l_bfgs_b",
"numpy.isin",
"numpy.linalg.slogdet",
"scipy.special.loggamma",
"numpy.average",
"numpy.nonzero",
"scipy.stats.t",
"numpy.eye",
"sklearn.utils.check_random_state",
"numpy.atleast_2d",
"numpy.zeros",
"scipy.linalg.inv",
"numpy.linalg.cholesky",
"numpy.arange",
"numpy.hstack",
"numpy.all",
"numpy.diag_indices_from",
"numpy.min",
"numpy.einsum",
"numpy.array",
"scipy.stats.multivariate_normal",
"numpy.zeros_like",
"numpy.linalg.solve",
"numpy.squeeze",
"sklearn.gaussian_process.kernels.ConstantKernel",
"numpy.exp",
"numpy.atleast_1d",
"numpy.sqrt",
"sklearn.gaussian_process.kernels.RBF"
]
] |
TylerYep/edutorch | [
"6a4a425cbfd7fcdcd851b010816d29c3b5bae8bd"
] | [
"tests/nn/rnn_cell_test.py"
] | [
"import numpy as np\n\nfrom edutorch.nn import RNNCell\nfrom tests.gradient_check import estimate_gradients\n\n\ndef test_rnn_cell_forward() -> None:\n N, D, H = 3, 10, 4\n x = np.linspace(-0.4, 0.7, num=N * D).reshape(N, D)\n\n model = RNNCell(\n prev_h=np.linspace(-0.2, 0.5, num=N * H).reshape(N, H),\n Wx=np.linspace(-0.1, 0.9, num=D * H).reshape(D, H),\n Wh=np.linspace(-0.3, 0.7, num=H * H).reshape(H, H),\n b=np.linspace(-0.2, 0.4, num=H),\n )\n\n next_h = model(x)\n expected_next_h = np.asarray(\n [\n [-0.58172089, -0.50182032, -0.41232771, -0.31410098],\n [0.66854692, 0.79562378, 0.87755553, 0.92795967],\n [0.97934501, 0.99144213, 0.99646691, 0.99854353],\n ]\n )\n\n assert np.allclose(expected_next_h, next_h)\n\n\ndef test_rnn_cell_backward() -> None:\n N, D, H = 4, 5, 6\n x = np.random.randn(N, D)\n prev_h = np.random.randn(N, H)\n Wx = np.random.randn(D, H)\n Wh = np.random.randn(H, H)\n b = np.random.randn(H)\n dnext_h = np.random.randn(*prev_h.shape)\n\n model = RNNCell(prev_h=prev_h, Wx=Wx, Wh=Wh, b=b)\n\n params = {\"prev_h\": prev_h, \"Wx\": Wx, \"Wh\": Wh, \"b\": b}\n dx_num, dprev_h_num, dWx_num, dWh_num, db_num = estimate_gradients(\n model, dnext_h, x, params\n )\n\n _ = model(x)\n dx, dprev_h, dWx, dWh, db = model.backward(dnext_h)\n\n assert np.allclose(dx_num, dx)\n assert np.allclose(dprev_h_num, dprev_h)\n assert np.allclose(dWx_num, dWx)\n assert np.allclose(dWh_num, dWh)\n assert np.allclose(db_num, db)\n"
] | [
[
"numpy.allclose",
"numpy.linspace",
"numpy.random.randn",
"numpy.asarray"
]
] |
arielrossanigo/ibis | [
"18e967cac961285b05d8df560f40148bac1a2571"
] | [
"ibis/backends/impala/tests/test_pandas_interop.py"
] | [
"import numpy as np\nimport pandas as pd\nimport pandas.testing as tm\nimport pytest\n\nimport ibis\nimport ibis.expr.datatypes as dt\nimport ibis.expr.schema as sch\nfrom ibis.backends.impala.pandas_interop import DataFrameWriter # noqa: E402\n\npytestmark = pytest.mark.impala\n\n\[email protected]\ndef exhaustive_df():\n return pd.DataFrame(\n {\n 'bigint_col': np.array(\n [0, 10, 20, 30, 40, 50, 60, 70, 80, 90], dtype='i8'\n ),\n 'bool_col': np.array(\n [\n True,\n False,\n True,\n False,\n True,\n None,\n True,\n False,\n True,\n False,\n ],\n dtype=np.bool_,\n ),\n 'date_string_col': [\n '11/01/10',\n None,\n '11/01/10',\n '11/01/10',\n '11/01/10',\n '11/01/10',\n '11/01/10',\n '11/01/10',\n '11/01/10',\n '11/01/10',\n ],\n 'double_col': np.array(\n [\n 0.0,\n 10.1,\n np.nan,\n 30.299999999999997,\n 40.399999999999999,\n 50.5,\n 60.599999999999994,\n 70.700000000000003,\n 80.799999999999997,\n 90.899999999999991,\n ],\n dtype=np.float64,\n ),\n 'floatcol': np.array(\n [\n np.nan,\n 1.1000000238418579,\n 2.2000000476837158,\n 3.2999999523162842,\n 4.4000000953674316,\n 5.5,\n 6.5999999046325684,\n 7.6999998092651367,\n 8.8000001907348633,\n 9.8999996185302734,\n ],\n dtype='f4',\n ),\n 'int_col': np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='i4'),\n 'month': [11, 11, 11, 11, 2, 11, 11, 11, 11, 11],\n 'smallint_col': np.array(\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='i2'\n ),\n 'string_col': [\n '0',\n '1',\n None,\n 'double , whammy',\n '4',\n '5',\n '6',\n '7',\n '8',\n '9',\n ],\n 'timestamp_col': [\n pd.Timestamp('2010-11-01 00:00:00'),\n None,\n pd.Timestamp('2010-11-01 00:02:00.100000'),\n pd.Timestamp('2010-11-01 00:03:00.300000'),\n pd.Timestamp('2010-11-01 00:04:00.600000'),\n pd.Timestamp('2010-11-01 00:05:00.100000'),\n pd.Timestamp('2010-11-01 00:06:00.150000'),\n pd.Timestamp('2010-11-01 00:07:00.210000'),\n pd.Timestamp('2010-11-01 00:08:00.280000'),\n pd.Timestamp('2010-11-01 00:09:00.360000'),\n ],\n 'tinyint_col': np.array(\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='i1'\n ),\n 'year': [\n 2010,\n 2010,\n 2010,\n 2010,\n 2010,\n 2009,\n 2009,\n 2009,\n 2009,\n 2009,\n ],\n }\n )\n\n\ndef test_alltypes_roundtrip(con, alltypes_df):\n _check_roundtrip(con, alltypes_df)\n\n\ndef test_writer_cleanup_deletes_hdfs_dir(con, hdfs, alltypes_df):\n writer = DataFrameWriter(con, alltypes_df)\n\n path = writer.write_temp_csv()\n assert hdfs.exists(path)\n\n writer.cleanup()\n assert not hdfs.exists(path)\n\n # noop\n writer.cleanup()\n assert not hdfs.exists(path)\n\n\ndef test_create_table_from_dataframe(con, alltypes_df, temp_table_db):\n tmp_db, tname = temp_table_db\n con.create_table(tname, alltypes_df, database=tmp_db)\n\n table = con.table(tname, database=tmp_db)\n df = table.execute()\n tm.assert_frame_equal(df, alltypes_df)\n\n\ndef test_insert(con, temp_table_db, exhaustive_df):\n tmp_db, table_name = temp_table_db\n schema = sch.infer(exhaustive_df)\n\n con.create_table(table_name, database=tmp_db, schema=schema)\n\n con.insert(table_name, exhaustive_df.iloc[:4], database=tmp_db)\n con.insert(table_name, exhaustive_df.iloc[4:], database=tmp_db)\n\n table = con.table(table_name, database=tmp_db)\n\n result = (\n table.execute().sort_values(by='tinyint_col').reset_index(drop=True)\n )\n tm.assert_frame_equal(result, exhaustive_df)\n\n\[email protected](raises=AssertionError, reason='NYT')\ndef test_insert_partition():\n assert False\n\n\ndef test_round_trip_exhaustive(con, exhaustive_df):\n _check_roundtrip(con, exhaustive_df)\n\n\ndef _check_roundtrip(con, df):\n writer = DataFrameWriter(con, df)\n path = writer.write_temp_csv()\n\n table = writer.delimited_table(path)\n df2 = table.execute()\n tm.assert_frame_equal(df2, df)\n\n\ndef test_timestamp_with_timezone():\n df = pd.DataFrame(\n {'A': pd.date_range('20130101', periods=3, tz='US/Eastern')}\n )\n schema = sch.infer(df)\n expected = ibis.schema([('A', \"timestamp('US/Eastern')\")])\n assert schema.equals(expected)\n assert schema.types[0].equals(dt.Timestamp('US/Eastern'))\n"
] | [
[
"numpy.array",
"pandas.testing.assert_frame_equal",
"pandas.Timestamp",
"pandas.date_range"
]
] |
abhiram-krishnan/tensorflow | [
"76069c136cd5042ee8021a7dc15ef591244e5a73"
] | [
"tensorflow/python/eager/backprop.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Code for backpropagation using the tape utilities.\"\"\"\n\n# TODO(b/159343581): Properly support CompositeTensor in all functions in this\n# file.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport operator\nimport sys\n\nimport six\n\nfrom tensorflow.python import pywrap_tfe\nfrom tensorflow.python import _pywrap_utils\nfrom tensorflow.python.eager import backprop_util\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import execute\nfrom tensorflow.python.eager import imperative_grad\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.ops import default_gradient\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops.unconnected_gradients import UnconnectedGradients\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.python.util.lazy_loader import LazyLoader\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# Note that we need to lazy load the following two modules to avoid creating\n# circular dependencies.\n# TODO(b/119775953): fix the circular dependencies.\npfor_ops = LazyLoader(\n \"pfor_ops\", globals(),\n \"tensorflow.python.ops.parallel_for.control_flow_ops\")\nnp_arrays = LazyLoader(\n \"np_arrays\", globals(),\n \"tensorflow.python.ops.numpy_ops.np_arrays\")\n\nfunction = LazyLoader(\"function\", globals(),\n \"tensorflow.python.eager.function\")\n\n_op_attr_type_cache = {}\n\n\ndef op_attr_type(op_type, attr_name):\n try:\n return _op_attr_type_cache[(op_type, attr_name)]\n except KeyError:\n context.ensure_initialized()\n h = context.context()._handle # pylint: disable=protected-access\n attr_type = pywrap_tfe.TFE_OpNameGetAttrType(h, op_type, attr_name)\n _op_attr_type_cache[(op_type, attr_name)] = attr_type\n return attr_type\n\n\ndef make_attr(attr_type, value):\n # pybind11 enums do not return the raw value like SWIG enums do. They are\n # useful when comparing amongst each other but not direct integers as we are\n # doing in most tests.\n # https://pybind11.readthedocs.io/en/stable/classes.html#enumerations-and-internal-types\n # TODO(amitpatankar): After all SWIG transitions, convert the enum comparisons\n # from integer value to class.\n if attr_type == int(pywrap_tfe.TF_ATTR_TYPE):\n return dtypes.as_dtype(value)\n if attr_type == [int(pywrap_tfe.TF_ATTR_TYPE)]:\n return [dtypes.as_dtype(v) for v in value]\n if attr_type == int(pywrap_tfe.TF_ATTR_SHAPE):\n return tensor_shape.as_shape(value).as_proto()\n if attr_type == [int(pywrap_tfe.TF_ATTR_SHAPE)]:\n return [tensor_shape.as_shape(v).as_proto() for v in value]\n if isinstance(value, str):\n return value.encode()\n return value\n\n\nclass _MockOp(object):\n \"\"\"Pretends to be a tf.Operation for the gradient functions.\"\"\"\n\n def __init__(self, attrs, inputs, outputs, typ, skip_input_indices):\n self.attrs = attrs\n self.inputs = inputs\n self.outputs = outputs\n self.type = typ\n self.skip_input_indices = skip_input_indices\n\n def get_attr(self, attr):\n typ = op_attr_type(self.type, attr)\n for i in range(0, len(self.attrs), 2):\n if self.attrs[i] == attr:\n return make_attr(typ, self.attrs[i + 1])\n raise KeyError(attr)\n\n def _get_control_flow_context(self):\n raise NotImplementedError(\n \"tf.GradientTape.gradients() does not support graph control flow \"\n \"operations like tf.cond or tf.while at this time. Use tf.gradients() \"\n \"instead. If you need this feature, please file a feature request at \"\n \"https://github.com/tensorflow/tensorflow/issues/new\"\n )\n\n\ndef _gradient_function(op_name, attr_tuple, num_inputs, inputs, outputs,\n out_grads, skip_input_indices, forward_pass_name_scope):\n \"\"\"Calls the gradient function of the op.\n\n Args:\n op_name: the name of the op to be differentiated.\n attr_tuple: the attrs, as a tuple.\n num_inputs: the number of inputs to the op.\n inputs: inputs to the original operation.\n outputs: outputs to the original operation.\n out_grads: gradients of the operation wrt its outputs.\n skip_input_indices: a tuple that is passed to the gradient function,\n indicating which inputs to skip calculating the gradient for\n forward_pass_name_scope: the namescope of the op in the forward pass.\n\n Returns:\n The gradients with respect to the inputs of the function, as a list.\n \"\"\"\n mock_op = _MockOp(attr_tuple, inputs, outputs, op_name, skip_input_indices)\n grad_fn = ops._gradient_registry.lookup(op_name) # pylint: disable=protected-access\n if grad_fn is None:\n return [None] * num_inputs\n\n # This does not work with v1 TensorArrays.\n if ops.executing_eagerly_outside_functions(\n ) or control_flow_util.EnableControlFlowV2(ops.get_default_graph()):\n gradient_name_scope = \"gradient_tape/\"\n if forward_pass_name_scope:\n gradient_name_scope += forward_pass_name_scope + \"/\"\n with ops.name_scope(gradient_name_scope):\n return grad_fn(mock_op, *out_grads)\n else:\n return grad_fn(mock_op, *out_grads)\n\n\npywrap_tfe.TFE_Py_RegisterGradientFunction(_gradient_function)\n\n\ndef _must_record_gradient():\n return not pywrap_tfe.TFE_Py_TapeSetIsEmpty()\n\n\ndef _record_gradient(op_name, inputs, attrs, results):\n return pywrap_tfe.TFE_Py_RecordGradient(op_name, inputs, attrs, results,\n ops.get_name_scope())\n\n\nexecute.must_record_gradient = _must_record_gradient\nexecute.record_gradient = _record_gradient\n\n\ndef implicit_val_and_grad(f):\n \"\"\"Returns a function which differentiates f with respect to variables.\n\n The wrapped function returns the value and the gradient of f when called with\n the same arguments. The gradient is with respect to all trainable TFE\n variables accessed by `f`.\n\n This function is useful when the exact set of variables to differentiate with\n is not known ahead of time.\n\n Example:\n\n ```python\n dense_layer = tf.compat.v1.layers.Dense(1)\n def loss(x, y):\n return tf.reduce_sum(tf.square(dense_layer(x) - y))\n\n # Obtain the gradient function.\n val_grad_fn = tfe.implicit_value_and_gradients(loss)\n\n # Invoke the gradient function with concrete values of x and y.\n x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n y = tf.constant([[10.0], [20.0]])\n value, grads_and_vars = val_grad_fn(x, y)\n print('Value of loss: %s' % value)\n\n # Apply the gradients to Variables.\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)\n optimizer.apply_gradients(grads_and_vars)\n ```\n\n Args:\n f: function to be differentiated. If `f` returns a scalar, this scalar will\n be differentiated. If `f` returns a tensor or list of tensors, by default\n a scalar will be computed by adding all their values to produce a single\n scalar.\n\n Returns:\n A function which, when called, returns a tuple pair.\n Its first element is the value to which the function evaluates.\n Its second element is list of (gradient, variable) pairs.\n\n Raises:\n ValueError: if `f` returns None.\n \"\"\"\n # TODO(cais): Remove calls to tf.constant() once the gradients functions\n # accept lists and np.ndarrays.\n\n def grad_fn(*args, **kwds):\n \"\"\"Computes the gradient of the wrapped function.\"\"\"\n this_tape = tape.push_new_tape()\n try:\n end_node = f(*args, **kwds)\n if end_node is None:\n raise ValueError(\"Cannot differentiate a function that returns None; \"\n \"did you forget to return a value from {}?\".format(\n f.__name__))\n finally:\n tape.pop_tape(this_tape)\n # Note: variables are returned in construction order. This ensures unique\n # order across executions.\n variables = this_tape.watched_variables()\n if not variables:\n raise ValueError(\"No trainable variables were accessed while the \"\n \"function was being computed.\")\n\n sources = [v.handle for v in variables]\n for s in sources:\n if getattr(s, \"is_packed\", False):\n raise ValueError(\n \"GradientTape.gradient is not supported on packed EagerTensors yet.\"\n )\n grad = imperative_grad.imperative_grad(this_tape, nest.flatten(end_node),\n sources)\n return end_node, list(zip(grad, variables))\n\n return grad_fn\n\n\ndef implicit_grad(f):\n \"\"\"Returns a function which differentiates f with respect to variables.\n\n The wrapped function returns the gradient of f when called with the same\n arguments. The gradient is with respect to all trainable TFE variables\n accessed by `f`.\n\n This function is useful when the exact set of variables to differentiate with\n is not known ahead of time.\n\n Example:\n\n ```python\n dense_layer = tf.compat.v1.layers.Dense(1)\n def loss(x, y):\n return tf.reduce_sum(tf.square(dense_layer(x) - y))\n\n # Obtain the gradient function.\n grad_fn = tfe.implicit_gradients(loss)\n\n # Invoke the gradient function with concrete values of x and y.\n x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n y = tf.constant([[10.0], [20.0]])\n grads_and_vars = grad_fn(x, y)\n\n # Apply the gradients to Variables.\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)\n optimizer.apply_gradients(grads_and_vars)\n ```\n\n Args:\n f: function to be differentiated. If `f` returns a scalar, this scalar will\n be differentiated. If `f` returns a tensor or list of tensors, by default\n a scalar will be computed by adding all their values to produce a single\n scalar.\n\n Returns:\n A function which, when called, returns a list of (gradient, variable) pairs.\n \"\"\"\n # TODO(cais): Remove calls to tf.constant() once the gradients functions\n # accept lists and np.ndarrays.\n\n def grad_fn(*args, **kwds):\n \"\"\"Computes the gradient of the wrapped function.\"\"\"\n return implicit_val_and_grad(f)(*args, **kwds)[1]\n\n return grad_fn\n\n\ndef _get_arg_spec(f, params, param_args):\n \"\"\"The positions of the parameters of f to be differentiated in param_args.\"\"\"\n try:\n args = tf_inspect.getfullargspec(f).args\n except TypeError as e:\n # TypeError can happen when f is a callable object.\n if params is None:\n return range(len(param_args))\n elif all(isinstance(x, int) for x in params):\n return params\n raise ValueError(\"Either callable provided is not a function or could not \"\n \"inspect its arguments by name: %s. Original error: %s\"\n % (f, e))\n if params is None:\n if not args:\n return range(len(param_args))\n if args[0] == \"self\":\n return range(len(args) - 1)\n else:\n return range(len(args))\n elif all(isinstance(x, six.string_types) for x in params):\n return [args.index(n) for n in params]\n elif all(isinstance(x, int) for x in params):\n return params\n else:\n raise ValueError(\n \"params must be all strings or all integers; got %s.\" % params)\n\n\ndef gradients_function(f, params=None):\n \"\"\"Returns a function which differentiates f with respect to params.\n\n Example:\n ```python\n # f(x, y) = (x ^ 3) * y - x * (y ^ 2)\n # Therefore, the 1st order derivatives are:\n # df / dx = 3 * (x ^ 2) * y - y ^ 2\n # df / dy = x ^ 3 - 2 * x * y\n # The 2nd order derivatives with respect to x is:\n # d^2 f / (dx)^2 = 6 * x * y\n def f(x, y):\n return x * x * x * y - x * y * y\n\n # Obtain a function that returns 1st order gradients.\n grad_fn = tfe.gradients_function(f)\n\n x = 2.0\n y = 3.0\n\n # Invoke the 1st order gradient function.\n x_grad, y_grad = grad_fn(x, y)\n assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2\n assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3\n\n # Obtain a function that returns the 2nd order gradient with respect to x.\n gradgrad_fn = tfe.gradients_function(lambda x, y: grad_fn(x, y)[0])\n\n # Invoke the 2nd order gradient function.\n x_gradgrad = gradgrad_fn(x, y)[0]\n assert x_gradgrad.numpy() == 6 * 2 * 3\n\n # To obtain a callable that returns the gradient(s) of `f` with respect to a\n # subset of its inputs, use the `params` keyword argument with\n # `gradients_function()`.\n ygrad_fn = tfe.gradients_function(f, params=[1])\n\n (y_grad,) = ygrad_fn(x, y)\n assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3\n ```\n\n Note that only tensors with real or complex dtypes are differentiable.\n\n Args:\n f: function to be differentiated. If `f` returns a scalar, this scalar will\n be differentiated. If `f` returns a tensor or list of tensors, by default\n a scalar will be computed by adding all their values to produce a single\n scalar. If desired, the tensors can be elementwise multiplied by the\n tensors passed as the `dy` keyword argument to the returned gradient\n function.\n params: list of parameter names of f or list of integers indexing the\n parameters with respect to which we'll differentiate. Passing None\n differentiates with respect to all parameters.\n\n Returns:\n function which, when called, returns the value of f and the gradient\n of `f` with respect to all of `params`. The function takes an extra optional\n keyword argument `dy`. Setting it allows computation of vector jacobian\n products for vectors other than the vector of ones.\n\n Raises:\n ValueError: if the params are not all strings or all integers.\n \"\"\"\n\n def decorated(*args, **kwds):\n \"\"\"Computes the gradient of the decorated function.\"\"\"\n\n _, grad = val_and_grad_function(f, params=params)(*args, **kwds)\n return grad\n\n return decorated\n\n\ndef _ensure_unique_tensor_objects(parameter_positions, args):\n \"\"\"Make each of the parameter_positions in args a unique ops.Tensor object.\n\n Ensure that each parameter is treated independently.\n For example:\n\n def f(x, y): return x * y\n g = gradients_function(f)\n one = tf.constant(1.)\n\n g(one, one) should return [1., 1.]\n (even though the two arguments are the same Tensor object).\n\n Args:\n parameter_positions: List of indices into args defining the arguments to\n differentiate against.\n args: A list of arguments to the function to be differentiated.\n\n Returns:\n args, possibly edited in-place.\n \"\"\"\n s = set()\n for (i, t) in enumerate(args):\n if i in parameter_positions:\n tid = ops.tensor_id(t)\n if tid in s:\n args[i] = gen_array_ops.identity(args[i])\n else:\n s.add(tid)\n return args\n\n\ndef val_and_grad_function(f, params=None):\n \"\"\"Returns a function that computes f and its derivative w.r.t. params.\n\n Example:\n ```python\n # f(x, y) = (x ^ 3) * y - x * (y ^ 2)\n # Therefore, the 1st order derivatives are:\n # df / dx = 3 * (x ^ 2) * y - y ^ 2\n # df / dy = x ^ 3 - 2 * x * y\n def f(x, y):\n return x * x * x * y - x * y * y\n\n # Obtain a function that returns the function value and the 1st order\n # gradients.\n val_grads_fn = tfe.value_and_gradients_function(f)\n\n x = 2.0\n y = 3.0\n\n # Invoke the value-and-gradients function.\n f_val, (x_grad, y_grad) = val_grads_fn(x, y)\n assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)\n assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2\n assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3\n\n # To obtain a callable that returns the value of `f` and the gradient(s) of\n # `f` with respect to a subset of its inputs, use the `params` keyword\n # argument with `value_and_gradients_function()`.\n val_ygrad_fn = tfe.value_and_gradients_function(f, params=[1])\n\n f_val, (y_grad,) = val_ygrad_fn(x, y)\n assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)\n assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3\n ```\n\n Args:\n f: function to be differentiated. If `f` returns a scalar, this scalar will\n be differentiated. If `f` returns a tensor or list of tensors, by default\n a scalar will be computed by adding all their values to produce a single\n scalar. If desired, the tensors can be elementwise multiplied by the\n tensors passed as the `dy` keyword argument to the returned gradient\n function.\n params: list of parameter names of f or list of integers indexing the\n parameters with respect to which we'll differentiate. Passing `None`\n differentiates with respect to all parameters.\n\n Returns:\n function which, when called, returns the value of f and the gradient\n of f with respect to all of `params`. The function takes an extra optional\n keyword argument \"dy\". Setting it allows computation of vector jacobian\n products for vectors other than the vector of ones.\n\n Raises:\n ValueError: if the params are not all strings or all integers.\n \"\"\"\n\n def decorated(*args, **kwds):\n \"\"\"Computes the value and gradient of the decorated function.\"\"\"\n dy = kwds.pop(\"dy\", None)\n if kwds:\n raise ValueError(\"Functions to be differentiated cannot \"\n \"receive keyword arguments.\")\n val, vjp = make_vjp(f, params)(*args, **kwds)\n return val, vjp(dy=dy)\n\n return decorated\n\n\ndef make_vjp(f, params=None, persistent=True):\n \"\"\"Returns a function that computes f and its vjp w.r.t.\n\n params.\n\n The term \"vjp\" here is an abbreviation for vector-jacobian product.\n\n Args:\n f: the function to be differentiated.\n params: the parameters (numbers or names) to differentiate with respect to.\n A value of None will differentiate with respect to all parameters.\n persistent: Boolean controlling whether the VJP function can be re-used.\n Must be True or False.\n\n Returns:\n A function, which when called, returns a tuple (value, vjp), where:\n - value is the result of calling f.\n - vjp is a function, which takes a vector as an argument and\n returns the product of that vector with the Jacobian of f.\n Providing no argument to vjp is equivalent to providing a\n vector of ones.\n\n For example,\n ```python\n def f(x):\n return x * x\n\n wrapped_fn = tfe.make_vjp(f)\n result, vjp = wrapped_fn(tf.constant(3.0))\n # result is 9.0\n vjp() # the vjp function returns 6.0\n\n Raises:\n ValueError: if `f` returns None.\n \"\"\"\n\n def decorated(*args, **kwds):\n \"\"\"Computes the value and gradient of the decorated function.\"\"\"\n parameter_positions = _get_arg_spec(f, params, args)\n assert not kwds, \"The gradient function can't take keyword arguments.\"\n this_tape = tape.push_new_tape(persistent=persistent)\n try:\n sources = []\n args = [\n ops.convert_to_tensor(arg) if i in parameter_positions else arg\n for i, arg in enumerate(args)\n ]\n args = _ensure_unique_tensor_objects(parameter_positions, args)\n for i in parameter_positions:\n if getattr(args[i], \"is_packed\", False):\n raise ValueError(\n \"GradientTape.gradient is not supported on packed EagerTensors\"\n \"yet.\")\n sources.append(args[i])\n tape.watch(this_tape, args[i])\n result = f(*args)\n if result is None:\n raise ValueError(\"Cannot differentiate a function that returns None; \"\n \"did you forget to return a value from {}?\".format(\n f.__name__))\n flat_result = nest.flatten(result)\n flat_result = [gen_array_ops.identity(x) for x in flat_result]\n result = nest.pack_sequence_as(result, flat_result)\n finally:\n tape.pop_tape(this_tape)\n def vjp(dy=None):\n if dy is not None:\n dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)]\n return imperative_grad.imperative_grad(\n this_tape, nest.flatten(result), sources, output_gradients=dy)\n\n return result, vjp\n\n return decorated\n\n\ndef flatten_nested_indexed_slices(grad):\n assert isinstance(grad, ops.IndexedSlices)\n if isinstance(grad.values, ops.Tensor):\n return grad\n else:\n assert isinstance(grad.values, ops.IndexedSlices)\n g = flatten_nested_indexed_slices(grad.values)\n return ops.IndexedSlices(g.values, array_ops.gather(grad.indices,\n g.indices),\n g.dense_shape)\n\n\ndef aggregate_indexed_slices_gradients(grads):\n \"\"\"Aggregates gradients containing `IndexedSlices`s.\"\"\"\n if len(grads) < 1:\n return None\n if len(grads) == 1:\n return grads[0]\n grads = [g for g in grads if g is not None]\n # If any gradient is a `Tensor`, sum them up and return a dense tensor\n # object.\n if any(isinstance(g, ops.Tensor) for g in grads):\n return math_ops.add_n(grads)\n\n # The following `_as_indexed_slices_list` casts ids of IndexedSlices into\n # int64. It is to make sure the inputs of `concat` all have same the data\n # type.\n grads = math_ops._as_indexed_slices_list(grads) # pylint: disable=protected-access\n\n grads = [flatten_nested_indexed_slices(x) for x in grads]\n # Form IndexedSlices out of the concatenated values and indices.\n concat_grad = ops.IndexedSlices(\n array_ops.concat([x.values for x in grads], axis=0),\n array_ops.concat([x.indices for x in grads], axis=0),\n grads[0].dense_shape)\n\n return concat_grad\n\n\ndef _aggregate_grads(gradients):\n \"\"\"Aggregate gradients from multiple sources.\n\n Args:\n gradients: A list of 'Tensor' or 'IndexedSlices' gradients.\n\n Returns:\n If 'gradients' only has 'Tensor', returns an aggregated 'Tensor'.\n Otherwise returns an aggregated 'IndexedSlices'.\n \"\"\"\n assert gradients, \"No gradients to aggregate\"\n\n if len(gradients) == 1:\n return gradients[0]\n if all(isinstance(g, ops.Tensor) for g in gradients):\n return gen_math_ops.add_n(gradients)\n else:\n assert all(isinstance(g, (ops.Tensor, ops.IndexedSlices))\n for g in gradients)\n return aggregate_indexed_slices_gradients(gradients)\n\n\ndef _num_elements(grad):\n \"\"\"The number of elements in the `grad` tensor.\"\"\"\n if isinstance(grad, ops.Tensor):\n shape_tuple = grad._shape_tuple() # pylint: disable=protected-access\n elif isinstance(grad, ops.IndexedSlices):\n shape_tuple = grad.values._shape_tuple() # pylint: disable=protected-access\n else:\n raise ValueError(\"`grad` not a Tensor or IndexedSlices.\")\n if shape_tuple is None or None in shape_tuple:\n return 0\n return functools.reduce(operator.mul, shape_tuple, 1)\n\n\ndef _fast_fill(value, shape, dtype):\n return array_ops.fill(\n constant_op.constant(shape, dtype=dtypes.int32),\n constant_op.constant(value, dtype=dtype))\n\n\ndef _zeros(shape, dtype):\n \"\"\"Helper to return (possibly cached) zero tensors in eager mode.\"\"\"\n # Note: variants will use _zeros_like\n if dtype == dtypes.string or dtype == dtypes.resource:\n return None\n\n ctx = context.context()\n if not ctx.executing_eagerly():\n return array_ops.zeros(shape, dtype)\n\n device = ctx.device_name\n\n if tensor_util.is_tensor(shape):\n shape_key = shape.ref()\n else:\n shape_key = shape\n cache_key = shape_key, dtype, device\n cached = ctx.zeros_cache().get(cache_key)\n if cached is None:\n if dtypes.as_dtype(dtype).is_bool:\n value = False\n else:\n value = 0\n cached = _fast_fill(value, shape, dtype)\n ctx.zeros_cache().put(cache_key, cached)\n return cached\n\n\ndef _ones(shape, dtype):\n as_dtype = dtypes.as_dtype(dtype)\n if as_dtype == dtypes.string:\n return None\n\n if not context.executing_eagerly():\n return array_ops.ones(shape, dtype)\n\n if as_dtype.is_bool:\n value = True\n else:\n value = 1\n\n if shape == (): # pylint: disable=g-explicit-bool-comparison\n return constant_op.constant(value, dtype=dtype)\n return _fast_fill(value, shape, dtype)\n\n\n_default_vspace = imperative_grad.VSpace(\n num_elements_fn=_num_elements,\n aggregate_fn=_aggregate_grads,\n zeros_fn=_zeros,\n ones_fn=_ones,\n zeros_like_fn=default_gradient.zeros_like,\n ones_like_fn=default_gradient.ones_like,\n graph_shape_fn=gen_array_ops.shape)\npywrap_tfe.TFE_Py_RegisterVSpace(_default_vspace)\n\n\ndef _handle_or_self(x):\n \"\"\"Unwrap resource variable/ndarray to return tensors.\"\"\"\n if resource_variable_ops.is_resource_variable(x):\n return x.handle\n if isinstance(x, np_arrays.ndarray):\n return x.data\n return x\n\n\n@tf_export(\"GradientTape\", \"autodiff.GradientTape\", v1=[\"GradientTape\"])\nclass GradientTape(object):\n \"\"\"Record operations for automatic differentiation.\n\n Operations are recorded if they are executed within this context manager and\n at least one of their inputs is being \"watched\".\n\n Trainable variables (created by `tf.Variable` or `tf.compat.v1.get_variable`,\n where `trainable=True` is default in both cases) are automatically watched.\n Tensors can be manually watched by invoking the `watch` method on this context\n manager.\n\n For example, consider the function `y = x * x`. The gradient at `x = 3.0` can\n be computed as:\n\n ```python\n x = tf.constant(3.0)\n with tf.GradientTape() as g:\n g.watch(x)\n y = x * x\n dy_dx = g.gradient(y, x) # Will compute to 6.0\n ```\n\n GradientTapes can be nested to compute higher-order derivatives. For example,\n\n ```python\n x = tf.constant(3.0)\n with tf.GradientTape() as g:\n g.watch(x)\n with tf.GradientTape() as gg:\n gg.watch(x)\n y = x * x\n dy_dx = gg.gradient(y, x) # Will compute to 6.0\n d2y_dx2 = g.gradient(dy_dx, x) # Will compute to 2.0\n ```\n\n By default, the resources held by a GradientTape are released as soon as\n GradientTape.gradient() method is called. To compute multiple gradients over\n the same computation, create a persistent gradient tape. This allows multiple\n calls to the gradient() method as resources are released when the tape object\n is garbage collected. For example:\n\n ```python\n x = tf.constant(3.0)\n with tf.GradientTape(persistent=True) as g:\n g.watch(x)\n y = x * x\n z = y * y\n dz_dx = g.gradient(z, x) # 108.0 (4*x^3 at x = 3)\n dy_dx = g.gradient(y, x) # 6.0\n del g # Drop the reference to the tape\n ```\n\n By default GradientTape will automatically watch any trainable variables that\n are accessed inside the context. If you want fine grained control over which\n variables are watched you can disable automatic tracking by passing\n `watch_accessed_variables=False` to the tape constructor:\n\n ```python\n with tf.GradientTape(watch_accessed_variables=False) as tape:\n tape.watch(variable_a)\n y = variable_a ** 2 # Gradients will be available for `variable_a`.\n z = variable_b ** 3 # No gradients will be available since `variable_b` is\n # not being watched.\n ```\n\n Note that when using models you should ensure that your variables exist when\n using `watch_accessed_variables=False`. Otherwise it's quite easy to make your\n first iteration not have any gradients:\n\n ```python\n a = tf.keras.layers.Dense(32)\n b = tf.keras.layers.Dense(32)\n\n with tf.GradientTape(watch_accessed_variables=False) as tape:\n tape.watch(a.variables) # Since `a.build` has not been called at this point\n # `a.variables` will return an empty list and the\n # tape will not be watching anything.\n result = b(a(inputs))\n tape.gradient(result, a.variables) # The result of this computation will be\n # a list of `None`s since a's variables\n # are not being watched.\n ```\n\n Note that only tensors with real or complex dtypes are differentiable.\n \"\"\"\n\n def __init__(self, persistent=False, watch_accessed_variables=True):\n \"\"\"Creates a new GradientTape.\n\n Args:\n persistent: Boolean controlling whether a persistent gradient tape\n is created. False by default, which means at most one call can\n be made to the gradient() method on this object.\n watch_accessed_variables: Boolean controlling whether the tape will\n automatically `watch` any (trainable) variables accessed while the tape\n is active. Defaults to True meaning gradients can be requested from any\n result computed in the tape derived from reading a trainable `Variable`.\n If False users must explicitly `watch` any `Variable`s they want to\n request gradients from.\n \"\"\"\n self._tape = None\n self._persistent = persistent\n self._watch_accessed_variables = watch_accessed_variables\n self._watched_variables = ()\n self._recording = False\n self._created_eagerly = context.executing_eagerly()\n if self._created_eagerly:\n context.ensure_initialized()\n context.context().start_step()\n\n def __enter__(self):\n \"\"\"Enters a context inside which operations are recorded on this tape.\"\"\"\n self._push_tape()\n return self\n\n def __exit__(self, typ, value, traceback):\n \"\"\"Exits the recording context, no further operations are traced.\"\"\"\n if self._recording:\n self._pop_tape()\n\n def _push_tape(self):\n \"\"\"Pushes a new tape onto the tape stack.\"\"\"\n if self._recording:\n raise ValueError(\"Tape is still recording, This can happen if you try to \"\n \"re-enter an already-active tape.\")\n if self._tape is None:\n self._tape = tape.push_new_tape(\n persistent=self._persistent,\n watch_accessed_variables=self._watch_accessed_variables)\n else:\n tape.push_tape(self._tape)\n self._recording = True\n\n def _pop_tape(self):\n if not self._recording:\n raise ValueError(\"Tape is not recording.\")\n tape.pop_tape(self._tape)\n self._recording = False\n\n def __del__(self):\n if self._created_eagerly:\n try:\n context.context().end_step()\n except AttributeError:\n pass\n except TypeError:\n pass\n\n def watch(self, tensor):\n \"\"\"Ensures that `tensor` is being traced by this tape.\n\n Args:\n tensor: a Tensor or list of Tensors.\n\n Raises:\n ValueError: if it encounters something that is not a tensor.\n \"\"\"\n for t in nest.flatten(tensor, expand_composites=True):\n if not (_pywrap_utils.IsTensor(t) or _pywrap_utils.IsVariable(t)):\n raise ValueError(\"Passed in object of type {}, not tf.Tensor\".format(\n type(t)))\n if not backprop_util.IsTrainable(t):\n logging.log_first_n(\n logging.WARN, \"The dtype of the watched tensor must be \"\n \"floating (e.g. tf.float32), got %r\", 5, t.dtype)\n if hasattr(t, \"handle\"):\n # There are many variable-like objects, all of them currently have\n # `handle` attribute that points to a tensor. If this changes, internals\n # of watch_variable need to change as well.\n tape.watch_variable(self._tape, t)\n else:\n tape.watch(self._tape, t)\n\n @tf_contextlib.contextmanager\n def stop_recording(self):\n \"\"\"Temporarily stops recording operations on this tape.\n\n Operations executed while this context manager is active will not be\n recorded on the tape. This is useful for reducing the memory used by tracing\n all computations.\n\n For example:\n\n ```\n with tf.GradientTape(persistent=True) as t:\n loss = compute_loss(model)\n with t.stop_recording():\n # The gradient computation below is not traced, saving memory.\n grads = t.gradient(loss, model.variables)\n ```\n\n Yields:\n None\n Raises:\n RuntimeError: if the tape is not currently recording.\n \"\"\"\n if self._tape is None:\n raise RuntimeError(\n \"Trying to stop recording a tape which is not recording.\")\n self._pop_tape()\n try:\n yield\n finally:\n self._push_tape()\n\n def reset(self):\n \"\"\"Clears all information stored in this tape.\n\n Equivalent to exiting and reentering the tape context manager with a new\n tape. For example, the two following code blocks are equivalent:\n\n ```\n with tf.GradientTape() as t:\n loss = loss_fn()\n with tf.GradientTape() as t:\n loss += other_loss_fn()\n t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn\n\n\n # The following is equivalent to the above\n with tf.GradientTape() as t:\n loss = loss_fn()\n t.reset()\n loss += other_loss_fn()\n t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn\n ```\n\n This is useful if you don't want to exit the context manager for the tape,\n or can't because the desired reset point is inside a control flow construct:\n\n ```\n with tf.GradientTape() as t:\n loss = ...\n if loss > k:\n t.reset()\n ```\n \"\"\"\n self._pop_tape()\n self._tape = None\n self._push_tape()\n\n def watched_variables(self):\n \"\"\"Returns variables watched by this tape in order of construction.\"\"\"\n if self._tape is not None:\n self._watched_variables = self._tape.watched_variables()\n return self._watched_variables\n\n def gradient(self,\n target,\n sources,\n output_gradients=None,\n unconnected_gradients=UnconnectedGradients.NONE):\n \"\"\"Computes the gradient using operations recorded in context of this tape.\n\n Args:\n target: a list or nested structure of Tensors or Variables to be\n differentiated.\n sources: a list or nested structure of Tensors or Variables. `target`\n will be differentiated against elements in `sources`.\n output_gradients: a list of gradients, one for each element of\n target. Defaults to None.\n unconnected_gradients: a value which can either hold 'none' or 'zero' and\n alters the value which will be returned if the target and sources are\n unconnected. The possible values and effects are detailed in\n 'UnconnectedGradients' and it defaults to 'none'.\n\n Returns:\n a list or nested structure of Tensors (or IndexedSlices, or None),\n one for each element in `sources`. Returned structure is the same as\n the structure of `sources`.\n\n Raises:\n RuntimeError: if called inside the context of the tape, or if called more\n than once on a non-persistent tape.\n ValueError: if the target is a variable or if unconnected gradients is\n called with an unknown value.\n \"\"\"\n if self._tape is None:\n raise RuntimeError(\"GradientTape.gradient can only be called once on \"\n \"non-persistent tapes.\")\n if self._recording:\n if not self._persistent:\n self._pop_tape()\n else:\n logging.log_first_n(\n logging.WARN, \"Calling GradientTape.gradient on a persistent \"\n \"tape inside its context is significantly less \"\n \"efficient than calling it outside the context (it \"\n \"causes the gradient ops to be recorded on the \"\n \"tape, leading to increased CPU and memory usage). \"\n \"Only call GradientTape.gradient inside the \"\n \"context if you actually want to trace the \"\n \"gradient in order to compute higher order \"\n \"derivatives.\", 1)\n\n num_ndarrays = 0\n flat_targets = []\n for t in nest.flatten(target):\n if not backprop_util.IsTrainable(t):\n logging.vlog(\n logging.WARN, \"The dtype of the target tensor must be \"\n \"floating (e.g. tf.float32) when calling GradientTape.gradient, \"\n \"got %r\", t.dtype)\n if resource_variable_ops.is_resource_variable(t):\n with self:\n t = ops.convert_to_tensor(t)\n elif isinstance(t, np_arrays.ndarray):\n t = t.data\n num_ndarrays += 1\n flat_targets.append(t)\n # Only rewrap if all targets are ndarray. If not, prefer tensors.\n rewrap_as_ndarray = num_ndarrays == len(flat_targets)\n\n flat_sources = nest.flatten(sources)\n flat_sources_raw = flat_sources\n flat_sources = [_handle_or_self(x) for x in flat_sources]\n for t in flat_sources_raw:\n if not backprop_util.IsTrainable(t):\n logging.vlog(\n logging.WARN, \"The dtype of the source tensor must be \"\n \"floating (e.g. tf.float32) when calling GradientTape.gradient, \"\n \"got %r\", t.dtype)\n if getattr(t, \"is_packed\", False):\n raise ValueError(\n \"GradientTape.gradient is not supported on packed EagerTensors yet.\"\n )\n\n if output_gradients is not None:\n output_gradients = [None if x is None else ops.convert_to_tensor(x)\n for x in nest.flatten(output_gradients)]\n\n flat_grad = imperative_grad.imperative_grad(\n self._tape,\n flat_targets,\n flat_sources,\n output_gradients=output_gradients,\n sources_raw=flat_sources_raw,\n unconnected_gradients=unconnected_gradients)\n\n if not self._persistent:\n # Keep track of watched variables before setting tape to None\n self._watched_variables = self._tape.watched_variables()\n self._tape = None\n\n if rewrap_as_ndarray:\n flat_grad = nest.map_structure(np_arrays.tensor_to_ndarray, flat_grad)\n\n grad = nest.pack_sequence_as(sources, flat_grad)\n return grad\n\n def jacobian(self,\n target,\n sources,\n unconnected_gradients=UnconnectedGradients.NONE,\n parallel_iterations=None,\n experimental_use_pfor=True):\n \"\"\"Computes the jacobian using operations recorded in context of this tape.\n\n See [wikipedia article](http://en.wikipedia.org/wiki/jacobian_matrix_and_determinant) for the\n definition of a Jacobian.\n\n Example usage:\n\n ```python\n with tf.GradientTape() as g:\n x = tf.constant([1.0, 2.0])\n g.watch(x)\n y = x * x\n jacobian = g.jacobian(y, x)\n # jacobian value is [[2., 0.], [0., 4.]]\n ```\n\n Args:\n target: Tensor to be differentiated.\n sources: a list or nested structure of Tensors or Variables. `target`\n will be differentiated against elements in `sources`.\n unconnected_gradients: a value which can either hold 'none' or 'zero' and\n alters the value which will be returned if the target and sources are\n unconnected. The possible values and effects are detailed in\n 'UnconnectedGradients' and it defaults to 'none'.\n parallel_iterations: A knob to control how many iterations are dispatched\n in parallel. This knob can be used to control the total memory usage.\n experimental_use_pfor: If true, vectorizes the jacobian computation. Else\n falls back to a sequential while_loop. Vectorization can sometimes fail\n or lead to excessive memory usage. This option can be used to disable\n vectorization in such cases.\n\n Returns:\n A list or nested structure of Tensors (or None), one for each element in\n `sources`. Returned structure is the same as the structure of `sources`.\n Note if any gradient is sparse (IndexedSlices), jacobian function\n currently makes it dense and returns a Tensor instead. This may change in\n the future.\n\n\n Raises:\n RuntimeError: If called on a non-persistent tape with eager execution\n enabled and without enabling experimental_use_pfor.\n ValueError: If vectorization of jacobian computation fails.\n \"\"\"\n flat_sources = nest.flatten(sources)\n rewrap_as_ndarray = False\n if isinstance(target, np_arrays.ndarray):\n target = target.data\n rewrap_as_ndarray = True\n target_static_shape = target.shape\n target_shape = array_ops.shape(target)\n # Note that we push and pop the tape here and below. This is needed since we\n # need gradients through the enclosed operations.\n self._push_tape()\n target = array_ops.reshape(target, [-1])\n self._pop_tape()\n\n def loop_fn(i):\n self._push_tape()\n y = array_ops.gather(target, i)\n self._pop_tape()\n return self.gradient(y, flat_sources,\n unconnected_gradients=unconnected_gradients)\n\n try:\n target_size = int(target.shape[0])\n except TypeError:\n target_size = array_ops.shape(target)[0]\n\n if experimental_use_pfor:\n try:\n output = pfor_ops.pfor(loop_fn, target_size,\n parallel_iterations=parallel_iterations)\n except ValueError as err:\n six.reraise(\n ValueError,\n ValueError(\n str(err) + \"\\nEncountered an exception while vectorizing the \"\n \"jacobian computation. Vectorization can be disabled by setting\"\n \" experimental_use_pfor to False.\"),\n sys.exc_info()[2])\n else:\n if context.executing_eagerly() and not self._persistent:\n raise RuntimeError(\n \"GradientTape must be created with persistent=True\"\n \" to compute the jacobian with eager execution enabled and with \"\n \" experimental_use_pfor set to False.\")\n output = pfor_ops.for_loop(\n loop_fn, [target.dtype] * len(flat_sources), target_size,\n parallel_iterations=parallel_iterations)\n\n for i, out in enumerate(output):\n if out is not None:\n new_shape = array_ops.concat(\n [target_shape, array_ops.shape(out)[1:]], axis=0)\n out = array_ops.reshape(out, new_shape)\n if context.executing_eagerly():\n out.set_shape(target_static_shape.concatenate(flat_sources[i].shape))\n if rewrap_as_ndarray:\n out = np_arrays.tensor_to_ndarray(out)\n output[i] = out\n\n return nest.pack_sequence_as(sources, output)\n\n def batch_jacobian(self,\n target,\n source,\n unconnected_gradients=UnconnectedGradients.NONE,\n parallel_iterations=None,\n experimental_use_pfor=True):\n \"\"\"Computes and stacks per-example jacobians.\n\n See [wikipedia article](http://en.wikipedia.org/wiki/jacobian_matrix_and_determinant) for the\n definition of a Jacobian. This function is essentially an efficient\n implementation of the following:\n\n `tf.stack([self.jacobian(y[i], x[i]) for i in range(x.shape[0])])`.\n\n Note that compared to `GradientTape.jacobian` which computes gradient of\n each output value w.r.t each input value, this function is useful when\n `target[i,...]` is independent of `source[j,...]` for `j != i`. This\n assumption allows more efficient computation as compared to\n `GradientTape.jacobian`. The output, as well as intermediate activations,\n are lower dimensional and avoid a bunch of redundant zeros which would\n result in the jacobian computation given the independence assumption.\n\n Example usage:\n\n ```python\n with tf.GradientTape() as g:\n x = tf.constant([[1., 2.], [3., 4.]], dtype=tf.float32)\n g.watch(x)\n y = x * x\n batch_jacobian = g.batch_jacobian(y, x)\n # batch_jacobian is [[[2, 0], [0, 4]], [[6, 0], [0, 8]]]\n ```\n\n Args:\n target: A tensor with rank 2 or higher and with shape [b, y1, ..., y_n].\n `target[i,...]` should only depend on `source[i,...]`.\n source: A tensor with rank 2 or higher and with shape [b, x1, ..., x_m].\n unconnected_gradients: a value which can either hold 'none' or 'zero' and\n alters the value which will be returned if the target and sources are\n unconnected. The possible values and effects are detailed in\n 'UnconnectedGradients' and it defaults to 'none'.\n parallel_iterations: A knob to control how many iterations are dispatched\n in parallel. This knob can be used to control the total memory usage.\n experimental_use_pfor: If true, uses pfor for computing the Jacobian. Else\n uses a tf.while_loop.\n\n Returns:\n A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]`\n is the jacobian of `target[i, ...]` w.r.t. `source[i, ...]`, i.e. stacked\n per-example jacobians.\n\n Raises:\n RuntimeError: If called on a non-persistent tape with eager execution\n enabled and without enabling experimental_use_pfor.\n ValueError: If vectorization of jacobian computation fails or if first\n dimension of `target` and `source` do not match.\n \"\"\"\n target_shape = target.shape\n if target_shape.rank is None:\n dim = tensor_shape.Dimension(None)\n else:\n dim = target_shape.dims[0]\n if not (target_shape.with_rank_at_least(2) and\n source.shape.with_rank_at_least(2) and\n dim.is_compatible_with(source.shape[0])):\n raise ValueError(\n \"Need first dimension of target shape (%s) and \"\n \"source shape (%s) to match.\" % (target.shape, source.shape))\n if target_shape.is_fully_defined():\n batch_size = int(target_shape[0])\n target_row_size = target_shape.num_elements() // batch_size\n else:\n target_shape = array_ops.shape(target)\n batch_size = target_shape[0]\n target_row_size = array_ops.size(target) // batch_size\n source_shape = array_ops.shape(source)\n # Flatten target to 2-D.\n # Note that we push and pop the tape here and below. This is needed since we\n # need gradients through the enclosed operations.\n self._push_tape()\n with ops.control_dependencies(\n [check_ops.assert_equal(batch_size, source_shape[0])]):\n target = array_ops.reshape(target, [batch_size, target_row_size])\n self._pop_tape()\n\n def loop_fn(i):\n self._push_tape()\n y = array_ops.gather(target, i, axis=1)\n self._pop_tape()\n return self.gradient(y, source,\n unconnected_gradients=unconnected_gradients)\n\n if experimental_use_pfor:\n try:\n output = pfor_ops.pfor(loop_fn, target_row_size,\n parallel_iterations=parallel_iterations)\n except ValueError as err:\n six.reraise(\n ValueError,\n ValueError(\n str(err) + \"\\nEncountered an exception while vectorizing the \"\n \"batch_jacobian computation. Vectorization can be disabled by \"\n \"setting experimental_use_pfor to False.\"),\n sys.exc_info()[2])\n else:\n if context.executing_eagerly() and not self._persistent:\n raise RuntimeError(\n \"GradientTape must be created with persistent=True\"\n \" to compute the batch_jacobian with eager execution enabled and \"\n \" with experimental_use_pfor set to False.\")\n output = pfor_ops.for_loop(loop_fn, target.dtype, target_row_size,\n parallel_iterations=parallel_iterations)\n new_shape = array_ops.concat([target_shape, source_shape[1:]], axis=0)\n if output is None:\n return array_ops.zeros(new_shape)\n else:\n output = array_ops.reshape(output,\n [target_row_size, batch_size, -1])\n output = array_ops.transpose(output, [1, 0, 2])\n return array_ops.reshape(output, new_shape)\n"
] | [
[
"tensorflow.python.eager.tape.push_new_tape",
"tensorflow.python.eager.imperative_grad.VSpace",
"tensorflow.python.eager.tape.pop_tape",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.eager.tape.watch",
"tensorflow.python.ops.math_ops._as_indexed_slices_list",
"tensorflow.python._pywrap_utils.IsVariable",
"tensorflow.python.platform.tf_logging.vlog",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.gen_array_ops.identity",
"tensorflow.python.platform.tf_logging.log_first_n",
"tensorflow.python.framework.tensor_shape.as_shape",
"tensorflow.python.framework.ops.get_name_scope",
"tensorflow.python.eager.tape.watch_variable",
"tensorflow.python.eager.backprop_util.IsTrainable",
"tensorflow.python.eager.tape.push_tape",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.eager.context.ensure_initialized",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.pywrap_tfe.TFE_Py_RegisterGradientFunction",
"tensorflow.python.framework.tensor_shape.Dimension",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.pywrap_tfe.TFE_OpNameGetAttrType",
"tensorflow.python.eager.context.context",
"tensorflow.python._pywrap_utils.IsTensor",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.pywrap_tfe.TFE_Py_RegisterVSpace",
"tensorflow.python.ops.gen_math_ops.add_n",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.pywrap_tfe.TFE_Py_TapeSetIsEmpty",
"tensorflow.python.ops.resource_variable_ops.is_resource_variable",
"tensorflow.python.framework.tensor_util.is_tensor",
"tensorflow.python.ops.check_ops.assert_equal",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.util.tf_inspect.getfullargspec",
"tensorflow.python.ops.math_ops.add_n",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.framework.ops._gradient_registry.lookup",
"tensorflow.python.eager.imperative_grad.imperative_grad",
"tensorflow.python.framework.ops.tensor_id",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions"
]
] |
VisExcell/riskmodels | [
"012bbfd563482ba09585cd042b1f9465253ab1f4"
] | [
"RiskAssessment.py"
] | [
"import datetime\nimport numpy as np\n\"\"\"\nClass to define a 'RiskAssessment' from FHIR.\nCurrently only produces JSON.\n{\n \"date\": date assesment was made in ISO format yyyy-mm-dd,\n \"results\": {\n \"five_year_abs\": Five year Absolute Risk for this patient as decimal\n \"five_year_ave\": Five year Risk for an average patient\n \"lifetime_abs\": Lifetime Absolute Risk for this patient as decimal\n \"lifetime_ave\": Lifetime Risk for an average patient\n }\n}\n\"\"\"\n\n\nclass BasicRiskAssessment:\n def __init__(self):\n self.resourceType = \"RiskAssessment\"\n #self.date = datetime.datetime.now().isoformat()\n self.date = datetime.date.today().isoformat()\n self.fiveyearABS = np.float64(-1)\n self.fiveyearAVE = np.float64(-1)\n self.lifetimeABS = np.float64(-1)\n self.lifetimeAVE = np.float64(-1)\n\n def setRiskScores(self, fiveABS, fiveAVE, lifeABS, lifeAVE):\n self.fiveyearABS = fiveABS\n self.fiveyearAVE = fiveAVE\n self.lifetimeABS = lifeABS\n self.lifetimeAVE = lifeAVE\n\n def getJson(self):\n return {\"date\":self.date,\n \"results\": {\n \"five_year_abs\": self.fiveyearABS,\n \"five_year_ave\": self.fiveyearAVE,\n \"lifetime_abs\": self.lifetimeABS,\n \"lifetime_ave\": self.lifetimeAVE\n }}"
] | [
[
"numpy.float64"
]
] |
alexgessner/emukit | [
"a068c8d5e06b2ae8b038f67bf2e4f66c4d91651a"
] | [
"emukit/quadrature/acquisitions/squared_correlation.py"
] | [
"# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n\n\nimport numpy as np\nfrom scipy.linalg import lapack\nfrom typing import Tuple\n\nfrom ...core.acquisition import Acquisition\nfrom ...quadrature.methods import VanillaBayesianQuadrature\n\n\nclass SquaredCorrelation(Acquisition):\n \"\"\"\n This acquisition function is the correlation between the integral and the new point(s) under a GP-model.\n\n SquaredCorrelation is identical to the integral-variance-reduction acquisition up to a global normalizing constant!\n\n .. math::\n \\rho^2(x) = \\frac{(\\int k_N(x_1, x)\\mathrm{d}x_1)^2}{\\mathfrac{v}_N v_N(x)}\\in [0, 1]\n\n where :math:`\\mathfrac{v}_N` is the current integral variance given N observations X, :math:`v_N(x)` is the\n predictive integral variance if point x was added newly, and :math:`k_N(x_1, x)` is the posterior kernel function.\n \"\"\"\n\n def __init__(self, model: VanillaBayesianQuadrature):\n \"\"\"\n :param model: The vanilla Bayesian quadrature model\n \"\"\"\n self.model = model\n\n def has_gradients(self) -> bool:\n return True\n\n def evaluate(self, x: np.ndarray) -> np.ndarray:\n \"\"\"\n Evaluates the acquisition function at x.\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: (n_points x 1) the acquisition function value at x\n \"\"\"\n return self._evaluate(x)[0]\n\n def _evaluate(self, x: np.ndarray) -> Tuple[np.ndarray, np.float, np.ndarray, np.ndarray]:\n \"\"\"\n Evaluates the acquisition function at x.\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: the acquisition function value at x, shape (n_points x 1), current integral variance,\n predictive variance + noise, predictive covariance between integral and x, shapes of the latter\n two (n_points, 1).\n \"\"\"\n integral_current_var, y_predictive_var, predictive_cov = self._value_terms(x)\n squared_correlation = predictive_cov**2 / (integral_current_var * y_predictive_var)\n return squared_correlation, integral_current_var, y_predictive_var, predictive_cov\n\n def evaluate_with_gradients(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Evaluate the acquisition function with gradient\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: acquisition value and corresponding gradient at x, shapes (n_points, 1) and (n_points, input_dim)\n \"\"\"\n # value\n squared_correlation, integral_current_var, y_predictive_var, predictive_cov = self._evaluate(x)\n\n # gradient\n d_y_predictive_var_dx, d_predictive_cov_dx = self._gradient_terms(x)\n first_term = 2. * predictive_cov * d_predictive_cov_dx\n second_term = (predictive_cov**2 / y_predictive_var) * d_y_predictive_var_dx\n normalization = integral_current_var * y_predictive_var\n squared_correlation_gradient = (first_term - second_term) / normalization\n\n return squared_correlation, squared_correlation_gradient\n\n def _value_terms(self, x: np.ndarray) -> Tuple[np.float, np.ndarray, np.ndarray]:\n \"\"\"\n computes the terms needed for the squared correlation\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: current integral variance, predictive variance + noise, predictive covariance between integral and x,\n shapes of the latter two arrays are (n_points, 1).\n \"\"\"\n integral_current_var = self.model.integrate()[1]\n y_predictive_var = self.model.predict(x)[1] + self.model.base_gp.observation_noise_variance\n\n qKx = self.model.base_gp.kern.qK(x)\n qKX = self.model.base_gp.kern.qK(self.model.base_gp.X)\n\n predictive_cov = np.transpose(qKx - np.dot(qKX, self._graminv_Kx(x)))\n return integral_current_var, y_predictive_var, predictive_cov\n\n def _gradient_terms(self, x):\n \"\"\"\n Computes the terms needed for the gradient of the squared correlation\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: the gradient of (y_predictive_var, predictive_cov) wrt. x at param x, shapes (n_points, input_dim)\n \"\"\"\n # gradient of predictive variance of y\n dvar_dx = self.model.base_gp.kern.dKdiag_dx(x)\n dKxX_dx1 = self.model.base_gp.kern.dK_dx1(x, self.model.X)\n graminv_KXx = self._graminv_Kx(x)\n\n d_y_predictive_var_dx = dvar_dx - 2. * (dKxX_dx1 * np.transpose(graminv_KXx)).sum(axis=2, keepdims=False)\n\n # gradient of predictive covariance between integral and (x, y)-pair\n dqKx_dx = np.transpose(self.model.base_gp.kern.dqK_dx(x))\n qKX_graminv = self._qK_graminv() # (1, N)\n dKXx_dx2 = self.model.base_gp.kern.dK_dx2(self.model.X, x)\n d_predictive_cov_dx = dqKx_dx - np.dot(qKX_graminv, np.transpose(dKXx_dx2))[0, :, :]\n\n return np.transpose(d_y_predictive_var_dx), d_predictive_cov_dx\n\n # helpers\n def _graminv_Kx(self, x):\n \"\"\"\n Inverse kernel Gram matrix multiplied with kernel function k(x, x') evaluated at existing training datapoints\n and location x.\n\n .. math::\n [K(X, X) + \\sigma^2 I]^{-1} K (X, x)\n\n :param x: (n_points x input_dim) locations where to evaluate\n :return: (n_train_points, n_points)\n \"\"\"\n lower_chol = self.model.base_gp.gram_chol()\n KXx = self.model.base_gp.kern.K(self.model.base_gp.X, x)\n return lapack.dtrtrs(lower_chol.T, (lapack.dtrtrs(lower_chol, KXx, lower=1)[0]), lower=0)[0]\n\n def _qK_graminv(self):\n \"\"\"\n Inverse kernel mean multiplied with inverse kernel Gram matrix, all evaluated at training locations.\n\n .. math::\n \\int k(x, X)\\mathrm{d}x [k(X, X) + \\sigma^2 I]^{-1}\n\n :return: weights of shape (1, n_train_points)\n \"\"\"\n lower_chol = self.model.base_gp.gram_chol()\n qK = self.model.base_gp.kern.qK(self.model.base_gp.X)\n graminv_qK_trans = lapack.dtrtrs(lower_chol.T, (lapack.dtrtrs(lower_chol, qK.T, lower=1)[0]), lower=0)[0]\n return np.transpose(graminv_qK_trans)\n"
] | [
[
"scipy.linalg.lapack.dtrtrs",
"numpy.transpose"
]
] |
BelleJohn/neuropsychology-NeuroKit | [
"d01111b9b82364d28da01c002e6cbfc45d9493d9",
"d01111b9b82364d28da01c002e6cbfc45d9493d9"
] | [
"neurokit2/complexity/optim_complexity_k.py",
"neurokit2/rsp/rsp_findpeaks.py"
] | [
"from warnings import warn\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom ..misc import NeuroKitWarning, find_plateau\n\n\ndef complexity_k(signal, k_max=\"max\", show=False):\n \"\"\"Automated selection of the optimal k_max parameter for Higuchi Fractal Dimension (HFD).\n\n The optimal kmax is computed based on the point at which HFD values plateau for a range of kmax values (see Vega, 2015).\n\n Parameters\n ----------\n signal : Union[list, np.array, pd.Series]\n The signal (i.e., a time series) in the form of a vector of values.\n k_max : Union[int, str, list], optional\n Maximum number of interval times (should be greater than or equal to 3) to be tested. If 'max',\n it selects the maximum possible value corresponding to half the length of the signal.\n show : bool\n Visualise the slope of the curve for the selected kmax value.\n\n Returns\n --------\n k : float\n The optimal kmax of the time series.\n info : dict\n A dictionary containing additional information regarding the parameters used\n to compute optimal kmax.\n\n See Also\n --------\n fractal_higuchi\n\n Examples\n ----------\n >>> import neurokit2 as nk\n >>>\n >>> signal = nk.signal_simulate(duration=2, sampling_rate=100, frequency=[5, 6], noise=0.5)\n >>> k_max, info = nk.complexity_k(signal, k_max='default', show=True)\n >>> k_max #doctest: +SKIP\n\n Reference\n ----------\n - Higuchi, T. (1988). Approach to an irregular time series on the basis of the fractal theory.\n Physica D: Nonlinear Phenomena, 31(2), 277-283.\n\n - Vega, C. F., & Noel, J. (2015, June). Parameters analyzed of Higuchi's fractal dimension for EEG brain signals.\n In 2015 Signal Processing Symposium (SPSympo) (pp. 1-5). IEEE. https://ieeexplore.ieee.org/document/7168285\n \"\"\"\n # Get the range of k-max values to be tested\n # ------------------------------------------\n if isinstance(k_max, str): # e.g., \"default\"\n # upper limit for k value (max possible value)\n k_max = int(np.floor(len(signal) / 2)) # so that normalizing factor is positive\n\n if isinstance(k_max, int):\n kmax_range = np.arange(2, k_max + 1)\n elif isinstance(k_max, (list, np.ndarray, pd.Series)):\n kmax_range = np.array(k_max)\n else:\n warn(\n \"k_max should be an int or a list of values of kmax to be tested.\",\n category=NeuroKitWarning,\n )\n\n # Compute the slope for each kmax value\n # --------------------------------------\n vectorized_k_slope = np.vectorize(_complexity_k_slope, excluded=[1])\n slopes, intercepts, info = vectorized_k_slope(kmax_range, signal)\n # k_values = [d[\"k_values\"] for d in info]\n average_values = [d[\"average_values\"] for d in info]\n\n # Find plateau (the saturation point of slope)\n # --------------------------------------------\n optimal_point = find_plateau(slopes, show=False)\n if optimal_point is not None:\n kmax_optimal = kmax_range[optimal_point]\n else:\n kmax_optimal = np.max(kmax_range)\n warn(\n \"The optimal kmax value detected is 2 or less. There may be no plateau in this case. \"\n + f\"You can inspect the plot by set `show=True`. We will return optimal k_max = {kmax_optimal} (the max).\",\n category=NeuroKitWarning,\n )\n\n # Plot\n if show:\n _complexity_k_plot(kmax_range, slopes, kmax_optimal, ax=None)\n\n # Return optimal tau and info dict\n return kmax_optimal, {\n \"Values\": kmax_range,\n \"Scores\": slopes,\n \"Intercepts\": intercepts,\n \"Average_Values\": average_values,\n }\n\n\n# =============================================================================\n# Utilities\n# =============================================================================\n\n\ndef _complexity_k_Lk(k, signal):\n n = len(signal)\n\n # Step 1: construct k number of new time series for range of k_values from 1 to kmax\n k_subrange = np.arange(1, k + 1) # where m = 1, 2... k\n\n idx = np.tile(np.arange(0, len(signal), k), (k, 1)).astype(float)\n idx += np.tile(np.arange(0, k), (idx.shape[1], 1)).T\n mask = idx >= len(signal)\n idx[mask] = 0\n\n sig_values = signal[idx.astype(int)].astype(float)\n sig_values[mask] = np.nan\n\n # Step 2: Calculate length Lm(k) of each curve\n normalization = (n - 1) / (np.floor((n - k_subrange) / k).astype(int) * k)\n sets = (np.nansum(np.abs(np.diff(sig_values)), axis=1) * normalization) / k\n\n # Step 3: Compute average value over k sets of Lm(k)\n return np.sum(sets) / k\n\n\ndef _complexity_k_slope(kmax, signal, k_number=\"max\"):\n if k_number == \"max\":\n k_values = np.arange(1, kmax + 1)\n else:\n k_values = np.unique(np.linspace(1, kmax + 1, k_number).astype(int))\n\n \"\"\"Step 3 of Vega & Noel (2015)\"\"\"\n vectorized_Lk = np.vectorize(_complexity_k_Lk, excluded=[1])\n\n # Compute length of the curve, Lm(k)\n average_values = vectorized_Lk(k_values, signal)\n\n # Slope of best-fit line through points (slope equal to FD)\n slope, intercept = -np.polyfit(np.log(k_values), np.log(average_values), 1)\n return slope, intercept, {\"k_values\": k_values, \"average_values\": average_values}\n\n\n# =============================================================================\n# Plotting\n# =============================================================================\n\n\ndef _complexity_k_plot(k_range, slope_values, k_optimal, ax=None):\n\n # Prepare plot\n if ax is None:\n fig, ax = plt.subplots()\n else:\n fig = None\n\n ax.set_title(\"Optimization of $k_{max}$ parameter\")\n ax.set_xlabel(\"$k_{max}$ values\")\n ax.set_ylabel(\"Higuchi Fractal Dimension (HFD) values\")\n colors = plt.cm.PuBu(np.linspace(0, 1, len(k_range)))\n\n # if single time series\n ax.plot(k_range, slope_values, color=\"#2196F3\", zorder=1)\n for i, j in enumerate(k_range):\n ax.scatter(k_range[i], slope_values[i], color=colors[i], marker=\"o\", zorder=2)\n ax.axvline(x=k_optimal, color=\"#E91E63\", label=\"Optimal $k_{max}$: \" + str(k_optimal))\n ax.legend(loc=\"upper right\")\n\n return fig\n",
"# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\n\n\ndef rsp_findpeaks(rsp_cleaned, sampling_rate=1000, method=\"khodadad2018\", amplitude_min=0.3):\n \"\"\"Extract extrema in a respiration (RSP) signal.\n\n Low-level function used by `rsp_peaks()` to identify inhalation and exhalation onsets (troughs and peaks\n respectively) in a preprocessed respiration signal using different sets of parameters.\n See `rsp_peaks()` for details.\n\n Parameters\n ----------\n rsp_cleaned : Union[list, np.array, pd.Series]\n The cleaned respiration channel as returned by `rsp_clean()`.\n sampling_rate : int\n The sampling frequency of 'rsp_cleaned' (in Hz, i.e., samples/second).\n method : str\n The processing pipeline to apply. Can be one of \"khodadad2018\" (default) or \"biosppy\".\n amplitude_min : float\n Only applies if method is \"khodadad2018\". Extrema that have a vertical distance smaller than\n (outlier_threshold * average vertical distance) to any direct neighbour are removed as false\n positive outliers. I.e., outlier_threshold should be a float with positive sign (the default is\n 0.3). Larger values of outlier_threshold correspond to more conservative thresholds (i.e.,\n more extrema removed as outliers).\n\n Returns\n -------\n info : dict\n A dictionary containing additional information, in this case the samples at which inhalation\n onsets and exhalation onsets occur, accessible with the keys \"RSP_Troughs\" and \"RSP_Peaks\", respectively.\n\n See Also\n --------\n rsp_clean, rsp_fixpeaks, rsp_peaks, signal_rate, rsp_amplitude,\n rsp_process, rsp_plot\n\n Examples\n --------\n >>> import neurokit2 as nk\n >>>\n >>> rsp = nk.rsp_simulate(duration=30, respiratory_rate=15)\n >>> cleaned = nk.rsp_clean(rsp, sampling_rate=1000)\n >>> info = nk.rsp_findpeaks(cleaned)\n >>> fig = nk.events_plot([info[\"RSP_Peaks\"], info[\"RSP_Troughs\"]], cleaned)\n >>> fig #doctest: +SKIP\n\n \"\"\"\n # Try retrieving correct column\n if isinstance(rsp_cleaned, pd.DataFrame):\n try:\n rsp_cleaned = rsp_cleaned[\"RSP_Clean\"]\n except NameError:\n try:\n rsp_cleaned = rsp_cleaned[\"RSP_Raw\"]\n except NameError:\n rsp_cleaned = rsp_cleaned[\"RSP\"]\n\n cleaned = np.array(rsp_cleaned)\n\n # Find peaks\n method = method.lower() # remove capitalised letters\n if method in [\"khodadad\", \"khodadad2018\"]:\n info = _rsp_findpeaks_khodadad(cleaned, amplitude_min)\n elif method == \"biosppy\":\n info = _rsp_findpeaks_biosppy(cleaned, sampling_rate=sampling_rate)\n else:\n raise ValueError(\"NeuroKit error: rsp_findpeaks(): 'method' should be one of 'khodadad2018' or 'biosppy'.\")\n\n return info\n\n\n# =============================================================================\n# Methods\n# =============================================================================\ndef _rsp_findpeaks_biosppy(rsp_cleaned, sampling_rate):\n\n extrema = _rsp_findpeaks_extrema(rsp_cleaned)\n extrema, amplitudes = _rsp_findpeaks_outliers(rsp_cleaned, extrema, amplitude_min=0)\n\n peaks, troughs = _rsp_findpeaks_sanitize(extrema, amplitudes)\n\n # Apply minimum period outlier-criterion (exclude inter-breath-intervals\n # that produce breathing rate larger than 35 breaths per minute.\n outlier_idcs = np.where((np.diff(peaks) / sampling_rate) < 1.7)[0]\n\n peaks = np.delete(peaks, outlier_idcs)\n troughs = np.delete(troughs, outlier_idcs)\n\n info = {\"RSP_Peaks\": peaks, \"RSP_Troughs\": troughs}\n return info\n\n\ndef _rsp_findpeaks_khodadad(rsp_cleaned, amplitude_min=0.3):\n\n extrema = _rsp_findpeaks_extrema(rsp_cleaned)\n extrema, amplitudes = _rsp_findpeaks_outliers(rsp_cleaned, extrema, amplitude_min=amplitude_min)\n peaks, troughs = _rsp_findpeaks_sanitize(extrema, amplitudes)\n\n info = {\"RSP_Peaks\": peaks, \"RSP_Troughs\": troughs}\n return info\n\n\n# =============================================================================\n# Internals\n# =============================================================================\ndef _rsp_findpeaks_extrema(rsp_cleaned):\n # Detect zero crossings (note that these are zero crossings in the raw\n # signal, not in its gradient).\n greater = rsp_cleaned > 0\n smaller = rsp_cleaned < 0\n risex = np.where(np.bitwise_and(smaller[:-1], greater[1:]))[0]\n fallx = np.where(np.bitwise_and(greater[:-1], smaller[1:]))[0]\n\n if risex[0] < fallx[0]:\n startx = \"rise\"\n elif fallx[0] < risex[0]:\n startx = \"fall\"\n\n allx = np.concatenate((risex, fallx))\n allx.sort(kind=\"mergesort\")\n\n # Find extrema by searching minima between falling zero crossing and\n # rising zero crossing, and searching maxima between rising zero\n # crossing and falling zero crossing.\n extrema = []\n for i in range(len(allx) - 1):\n\n # Determine whether to search for minimum or maximum.\n if startx == \"rise\":\n if (i + 1) % 2 != 0:\n argextreme = np.argmax\n else:\n argextreme = np.argmin\n elif startx == \"fall\":\n if (i + 1) % 2 != 0:\n argextreme = np.argmin\n else:\n argextreme = np.argmax\n\n # Get the two zero crossings between which the extreme will be\n # searched.\n beg = allx[i]\n end = allx[i + 1]\n\n extreme = argextreme(rsp_cleaned[beg:end])\n extrema.append(beg + extreme)\n\n extrema = np.asarray(extrema)\n return extrema\n\n\ndef _rsp_findpeaks_outliers(rsp_cleaned, extrema, amplitude_min=0.3):\n\n # Only consider those extrema that have a minimum vertical distance to\n # their direct neighbor, i.e., define outliers in absolute amplitude\n # difference between neighboring extrema.\n vertical_diff = np.abs(np.diff(rsp_cleaned[extrema]))\n median_diff = np.median(vertical_diff)\n min_diff = np.where(vertical_diff > (median_diff * amplitude_min))[0]\n extrema = extrema[min_diff]\n\n # Make sure that the alternation of peaks and troughs is unbroken. If\n # alternation of sign in extdiffs is broken, remove the extrema that\n # cause the breaks.\n amplitudes = rsp_cleaned[extrema]\n extdiffs = np.sign(np.diff(amplitudes))\n extdiffs = np.add(extdiffs[0:-1], extdiffs[1:])\n removeext = np.where(extdiffs != 0)[0] + 1\n extrema = np.delete(extrema, removeext)\n amplitudes = np.delete(amplitudes, removeext)\n\n return extrema, amplitudes\n\n\ndef _rsp_findpeaks_sanitize(extrema, amplitudes):\n # To be able to consistently calculate breathing amplitude, make sure that\n # the extrema always start with a trough and end with a peak, since\n # breathing amplitude will be defined as vertical distance between each\n # peak and the preceding trough. Note that this also ensures that the\n # number of peaks and troughs is equal.\n if amplitudes[0] > amplitudes[1]:\n extrema = np.delete(extrema, 0)\n if amplitudes[-1] < amplitudes[-2]:\n extrema = np.delete(extrema, -1)\n peaks = extrema[1::2]\n troughs = extrema[0:-1:2]\n\n return peaks, troughs\n"
] | [
[
"numpy.sum",
"numpy.vectorize",
"numpy.diff",
"numpy.floor",
"matplotlib.pyplot.subplots",
"numpy.arange",
"numpy.max",
"numpy.log",
"numpy.array",
"numpy.linspace"
],
[
"numpy.diff",
"numpy.asarray",
"numpy.median",
"numpy.where",
"numpy.add",
"numpy.delete",
"numpy.array",
"numpy.concatenate",
"numpy.bitwise_and"
]
] |
c3sr/go-pytorch | [
"0d1f6edbc6e48f0f68055274af0997e2ff9b0ce1"
] | [
"scripts/convert_model.py"
] | [
"import pretrainedmodels as pm\nimport torch\n\n# models giving an error\nerrored_model_name = ['fbresnet152', 'bninception', 'inceptionv4', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']\n\n# collect all (model, pretrained) tuples\npm_args = []\nfor model_name in pm.model_names:\n\tfor pretrained in pm.pretrained_settings[model_name]:\n\t\tif pretrained in ['imagenet', 'imagenet+5k']:\n\t\t\tpm_args.append([model_name, pretrained])\n\nfor i in range(len(pm_args)):\n\t# download model\n\tmodel_name = pm_args[i][0]\n\tpretrained_on = pm_args[i][1]\n\tmodel = pm.__dict__[model_name](num_classes=1000, pretrained=pretrained_on)\n\tmodel.eval()\n\tif model_name not in errored_model_name:\n\t\t# fetch input_size\n\t\tprint(\"REFERENCE model - \", model_name)\n\t\tmodel_settings = pm.pretrained_settings[model_name]\n\t\tinput_size = model_settings[pretrained_on]['input_size'][1]\n\t\tno_of_channels = model_settings[pretrained_on]['input_size'][0]\n\t\texample = torch.rand(1, no_of_channels, input_size, input_size)\n\t\ttraced_script_module = torch.jit.trace(model, example, check_trace=False)\n\t\ttraced_script_module.save(model_name + \"-\" + pretrained_on + \".pt\")\n\t\tprint(\"SUCCESS: Converted model - \", model_name, \"-\", pretrained_on)\n\telse:\n\t\tprint(\"ERROR: Could not convert model - \", model_name, \"-\", pretrained_on)\n"
] | [
[
"torch.rand",
"torch.jit.trace"
]
] |
pbarton666/virtual_classroom | [
"a9d0dc2eb16ebc4d2fd451c3a3e6f96e37c87675"
] | [
"dkr-py310/docker-student-portal-310/course_files/python_for_excel_users/basic_operations/spreadsheet_functions.py"
] | [
"import os\nimport pandas as pd\nfrom openpyxl import load_workbook\n\ndata_dir = \".\"\ntemplate = 'chartme_template.xlsx'\nnew_wkbk = 'chartme_data_added.xlsx'\ntab_name = 'data'\n\nROWS_AXIS = 0\nCOLS_AXIS = 1\n\ndef normalize(series):\n \"\"\"Accepts a column (a pandas.Series object) and returns a normalized\n version. Operations are applied to all elements of the column.\n 'Normalized' means centering its distribution.\"\"\"\n mean = series.mean()\n sdev = series.std(ddof = 0) #population standard dev\n normalized = (series - mean) / sdev\n return normalized\n\ndef element_wise_operations(df, col0, col1, axis = ROWS_AXIS):\n \"\"\"Accepts a DataFrame and applies element-wise operations. Operations \"\"\"\n\ndef create_data():\n \"\"\"Creates and returns a DataFrame\"\"\"\n data = \\\n [\n ['baseball', 180, 1100],\n ['wrestling', 30, 300],\n ['gymnastics', 1, 120], \n ]\n cols = ['sport', 'duration', 'fans' ]\n \n sports_df = pd.DataFrame(data=data, columns=cols)\n \n return sports_df\n\ndf = create_data()\ndf['z_duration'] = normalize(df['duration'])\nprint(df)\nz=1\n\n#Add a new column\n\n#Add a column based on others\n\n#Apply a custom spreadsheet function\na=1\n\n\n\n\n\n"
] | [
[
"pandas.DataFrame"
]
] |
galenvincent/encapZulate-1 | [
"ed2d749befa64cb9e04aefb0cedee322a2614da2"
] | [
"src/encapzulate/scripts/make_batches_dummy_data.py"
] | [
"from pathlib import Path\n\nimport numpy as np\nimport pandas as pd\n\nif \"ihome\" in str(Path.home()):\n path_photoz = Path.home() / \"photoz\"\n # path_photoz = Path(\"/bgfs\") / \"jnewman\" / \"bid13\" / \"photoZ\"\nelif \"/Users/andrews\" in str(Path.home()):\n path_photoz = Path.home() / \"projects\" / \"photoz\"\n\npath_pasquet2019 = path_photoz / \"data\" / \"pasquet2019\"\npath_out = path_pasquet2019 / \"batches\" / \"dummy_data\"\npath_out.mkdir(exist_ok=True)\n\nbatch_size = 32\ninput_shape = (64, 64, 5)\n\nlabels = pd.read_hdf(path_pasquet2019 / \"dummy_data.h5\")\npaths = labels[\"filePath\"].iloc[:batch_size]\n\nbatches_per_epoch = int(np.floor(len(labels) / batch_size))\n\npath_cubes = pd.Series([str(path_pasquet2019 / \"cubes\") for _ in range(len(paths))])\ncube_ids = paths.str.split(\"cubes\").str[1]\npaths = path_cubes.str.cat(cube_ids)\n\nfor ii in range(batches_per_epoch):\n\n out = np.empty((batch_size, *input_shape))\n ind_start = batches_per_epoch * batch_size\n ind_end = ind_start + batch_size\n for jj, path in enumerate(paths[ind_start:ind_end]):\n out[jj] = np.load(path)\n\n np.save(path_out / f\"batch-{ii:05}.npy\", out)\n"
] | [
[
"numpy.save",
"pandas.read_hdf",
"numpy.load",
"numpy.empty"
]
] |
Wondersui/gluon-cv | [
"a990c2f148efccd3f7dc0cc0ccd81c03a0f91dd5"
] | [
"docs/tutorials/pose/demo_simple_pose.py"
] | [
"\"\"\"1. Predict with pre-trained Simple Pose Estimation models\n==========================================\n\nThis article shows how to play with pre-trained Simple Pose models with only a few\nlines of code.\n\nFirst let's import some necessary libraries:\n\"\"\"\n\nfrom matplotlib import pyplot as plt\nfrom gluoncv import model_zoo, data, utils\nfrom gluoncv.data.transforms.pose import detector_to_simple_pose, heatmap_to_coord\n\n######################################################################\n# Load a pretrained model\n# -------------------------\n#\n# Let's get a Simple Pose model trained with input images of size 256x192 on MS COCO\n# dataset. We pick the one using ResNet-18 V1b as the base model. By specifying\n# ``pretrained=True``, it will automatically download the model from the model\n# zoo if necessary. For more pretrained models, please refer to\n# :doc:`../../model_zoo/index`.\n#\n# Note that a Simple Pose model takes a top-down strategy to estimate\n# human pose in detected bounding boxes from an object detection model.\n\ndetector = model_zoo.get_model('yolo3_mobilenet1.0_coco', pretrained=True)\npose_net = model_zoo.get_model('simple_pose_resnet18_v1b', pretrained=True)\n\n# Note that we can reset the classes of the detector to only include\n# human, so that the NMS process is faster.\n\ndetector.reset_class([\"person\"], reuse_weights=['person'])\n\n######################################################################\n# Pre-process an image for detector, and make inference\n# --------------------\n#\n# Next we download an image, and pre-process with preset data transforms. Here we\n# specify that we resize the short edge of the image to 512 px. But you can\n# feed an arbitrarily sized image.\n#\n# This function returns two results. The first is a NDArray with shape\n# ``(batch_size, RGB_channels, height, width)``. It can be fed into the\n# model directly. The second one contains the images in numpy format to\n# easy to be plotted. Since we only loaded a single image, the first dimension\n# of `x` is 1.\n\nim_fname = utils.download('https://github.com/dmlc/web-data/blob/master/' +\n 'gluoncv/pose/soccer.png?raw=true',\n path='soccer.png')\nx, img = data.transforms.presets.ssd.load_test(im_fname, short=512)\nprint('Shape of pre-processed image:', x.shape)\n\nclass_IDs, scores, bounding_boxs = detector(x)\n\n######################################################################\n# Process tensor from detector to keypoiny network\n# --------------------\n#\n# Next we process the output from the detector.\n#\n# For a Simple Pose network, it expects the input has the size 256x192,\n# and the human is centered. We crop the bounding boxed area\n# for each human, and resize it to 256x192, then finally normalize it.\n#\n# In order to make sure the bounding box has included the entire person,\n# we usually slightly upscale the box size.\n\npose_input, upscale_bbox = detector_to_simple_pose(img, class_IDs, scores, bounding_boxs)\n\n######################################################################\n# Predict with a Simple Pose network\n# --------------------\n#\n# Now we can make prediction.\n#\n# A Simple Pose network predicts the heatmap for each joint (i.e. keypoint).\n# After the inference we search for the highest value in the heatmap and map it to the\n# coordinates on the original image.\n\npredicted_heatmap = pose_net(pose_input)\npred_coords, confidence = heatmap_to_coord(predicted_heatmap, upscale_bbox)\n\n######################################################################\n# Display the pose estimation results\n# ---------------------\n#\n# We can use :py:func:`gluoncv.utils.viz.plot_keypoints` to visualize the\n# results.\n\nax = utils.viz.plot_keypoints(img, pred_coords, confidence,\n class_IDs, bounding_boxs, scores,\n box_thresh=0.5, keypoint_thresh=0.2)\nplt.show()\n"
] | [
[
"matplotlib.pyplot.show"
]
] |
limingwu8/PdM | [
"739b4b118f1c81fae704b15a9aa84d1f8c7b0196"
] | [
"Sensor.py"
] | [
"import matplotlib\n# matplotlib.use('Agg')\nfrom pandas import DataFrame\nfrom pandas import Series\nfrom pandas import concat\nfrom pandas import read_csv\nfrom pandas import datetime\nimport pandas as pd\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.preprocessing import MinMaxScaler\nfrom keras.models import Sequential,load_model\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom math import sqrt\nfrom matplotlib import pyplot\nfrom numpy import array\nimport datetime\nfrom matplotlib.dates import DateFormatter\nfrom random import shuffle\nimport numpy as np\nfrom scipy import stats\nimport os\nimport pickle\n\nclass Sensors:\n units = {'MAIN_FILTER_IN_PRESSURE':'PSI','MAIN_FILTER_OIL_TEMP':'Celsius',\n 'MAIN_FILTER_OUT_PRESSURE':'PSI','OIL_RETURN_TEMPERATURE':'Celsius',\n 'TANK_FILTER_IN_PRESSURE':'PSI','TANK_FILTER_OUT_PRESSURE':'PSI',\n 'TANK_LEVEL':'Centimeter','TANK_TEMPERATURE':'Celsius','FT-202B':'Micrometer',\n 'FT-204B':'Micrometer','PT-203':'Micrometer','PT-204':'Micrometer'}\n sensor_name_acronym = {'MAIN_FILTER_IN_PRESSURE':'P1','MAIN_FILTER_OIL_TEMP':'T1',\n 'MAIN_FILTER_OUT_PRESSURE':'PSI','OIL_RETURN_TEMPERATURE':'T2',\n 'TANK_FILTER_IN_PRESSURE':'PSI','TANK_FILTER_OUT_PRESSURE':'PSI',\n 'TANK_LEVEL':'L1','TANK_TEMPERATURE':'T3','FT-202B':'V1',\n 'FT-204B':'V2','PT-203':'V3','PT-204':'V4'}\n threshold = {'MAIN_FILTER_IN_PRESSURE': (40, 65, 80), 'MAIN_FILTER_OIL_TEMP': (40, 55, 60),\n 'MAIN_FILTER_OUT_PRESSURE': 'PSI', 'OIL_RETURN_TEMPERATURE': (40, 55, 60),\n 'TANK_FILTER_IN_PRESSURE': 'PSI', 'TANK_FILTER_OUT_PRESSURE': 'PSI',\n 'TANK_LEVEL': (40, 48, 50), 'TANK_TEMPERATURE': (40, 55, 60), 'FT-202B': (0, 20, 50),\n 'FT-204B': (0, 10, 20), 'PT-203': (0, 20, 50), 'PT-204': (0, 10, 20)}\n\n def __init__(self, dataset_path, sensor_name,sample_rate, root_path, n_epochs = 1, n_batch = 1,\n save_info = 0, n_neurons = 1, run_on_local = 1, train = 1, n_lag = 1, n_seq = 1):\n self.n_lag = n_lag\n self.n_seq = n_seq\n self.n_epochs = n_epochs\n self.n_batch = n_batch\n self.n_neurons = n_neurons\n self.dataset_path = dataset_path\n self.sensor_name = sensor_name\n self.sample_rate = sample_rate\n self.root_path = root_path\n self.save_info = save_info\n self.run_on_local = run_on_local\n self.train = train\n self.init_file_name()\n # self.normality_test()\n\n def get_units(self):\n return self.units\n\n def init_file_name(self):\n # self.dataset_path = self.dataset_path + self.sample_rate + '/' + self.sensor_name + '.csv'\n self.dataset_path = os.path.join(self.dataset_path, self.sample_rate, self.sensor_name + '.csv')\n self.file_name = self.sensor_name + '-' + self.sample_rate\n self.file_path = os.path.join(self.root_path, self.sensor_name, self.sample_rate, str(self.n_seq) + '_step')\n\n def get_files(self, file_dir):\n '''\n Args:\n file_dir: file directory\n Returns:\n list of file path\n '''\n dataset_path = []\n for root, dirs, files in os.walk(file_dir):\n for file in files:\n dataset_path.append(os.path.join(root, file))\n return dataset_path\n\n # date-time parsing function for loading the dataset\n def parser(self, x):\n return datetime.strptime('190' + x, '%Y-%m')\n\n # convert time series into supervised learning problem\n def series_to_supervised(self, data, n_in=1, n_out=1, dropnan=True):\n n_vars = 1 if type(data) is list else data.shape[1]\n df = DataFrame(data)\n cols, names = list(), list()\n # input sequence (t-n, ... t-1)\n for i in range(n_in, 0, -1):\n cols.append(df.shift(i))\n names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]\n # forecast sequence (t, t+1, ... t+n)\n for i in range(0, n_out):\n cols.append(df.shift(-i))\n if i == 0:\n names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]\n else:\n names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]\n # put it all together\n agg = concat(cols, axis=1)\n agg.columns = names\n # drop rows with NaN values\n if dropnan:\n agg.dropna(inplace=True)\n return agg\n\n # create a differenced series\n def difference(self, dataset, interval=1):\n diff = list()\n for i in range(interval, len(dataset)):\n value = dataset[i] - dataset[i - interval]\n diff.append(value)\n return Series(diff)\n\n # transform series into train and test sets for supervised learning\n def prepare_data(self, series, n_test, n_lag, n_seq):\n # extract raw values\n raw_values = series.values\n # transform data to be stationary\n diff_series = self.difference(raw_values, 1)\n diff_values = diff_series.values\n diff_values = diff_values.reshape(len(diff_values), 1)\n # rescale values to -1, 1\n scaler = MinMaxScaler(feature_range=(-1, 1))\n scaled_values = scaler.fit_transform(diff_values)\n scaled_values = scaled_values.reshape(len(scaled_values), 1)\n # transform into supervised learning problem X, y\n supervised = self.series_to_supervised(scaled_values, n_lag, n_seq)\n supervised_values = supervised.values\n # split into train and test sets\n train, test = supervised_values[0:-n_test], supervised_values[-n_test:]\n return scaler, train, test\n\n # fit an LSTM network to training data\n def fit_lstm(self, train, n_lag, n_seq, n_batch, nb_epoch, n_neurons):\n # reshape training into [samples, timesteps, features]\n X, y = train[:, 0:n_lag], train[:, n_lag:]\n X = X.reshape(X.shape[0], 1, X.shape[1])\n # design network\n model = Sequential()\n model.add(LSTM(n_neurons, batch_input_shape=(n_batch, X.shape[1], X.shape[2]), stateful=True))\n model.add(Dense(y.shape[1]))\n model.compile(loss='mean_squared_error', optimizer='adam')\n # fit network\n for i in range(nb_epoch):\n model.fit(X, y, epochs=1, batch_size=n_batch, verbose=0, shuffle=False)\n model.reset_states()\n return model\n\n # make one forecast with an LSTM,\n def forecast_lstm(self, model, X, n_batch):\n # reshape input pattern to [samples, timesteps, features]\n X = X.reshape(1, 1, len(X))\n # make forecast\n forecast = model.predict(X, batch_size=n_batch)\n # convert to array\n return [x for x in forecast[0, :]]\n\n # evaluate the persistence model\n def make_forecasts(self, model, n_batch, test, n_lag, n_seq):\n forecasts = list()\n for i in range(len(test)):\n X, y = test[i, 0:n_lag], test[i, n_lag:]\n # make forecast\n forecast = self.forecast_lstm(model, X, n_batch)\n # store the forecast\n forecasts.append(forecast)\n return forecasts\n\n # invert differenced forecast\n def inverse_difference(self, last_ob, forecast):\n # invert first forecast\n inverted = list()\n inverted.append(forecast[0] + last_ob)\n # propagate difference forecast using inverted first value\n for i in range(1, len(forecast)):\n inverted.append(forecast[i] + inverted[i - 1])\n return inverted\n\n # inverse data transform on forecasts\n def inverse_transform(self, series, forecasts, scaler, n_test):\n inverted = list()\n for i in range(len(forecasts)):\n # create array from forecast\n forecast = array(forecasts[i])\n forecast = forecast.reshape(1, len(forecast))\n # invert scaling\n inv_scale = scaler.inverse_transform(forecast)\n inv_scale = inv_scale[0, :]\n # invert differencing\n index = len(series) - n_test + i - 1\n last_ob = series.values[index]\n inv_diff = self.inverse_difference(last_ob, inv_scale)\n # store\n inverted.append(inv_diff)\n return inverted\n\n # evaluate the RMSE for each forecast time step\n def evaluate_forecasts(self, test, forecasts, n_lag, n_seq, sensor_name):\n for i in range(n_seq):\n actual = [row[i] for row in test]\n predicted = [forecast[i] for forecast in forecasts]\n rmse = sqrt(mean_squared_error(actual, predicted))\n rmse_percent = rmse / np.mean(actual)\n if self.save_info & self.train:\n # save data to pickle\n pickle.dump(actual, self.pkl)\n pickle.dump(predicted, self.pkl)\n print('t+%d RMSE: %f, error percent: %f%%' % ((i + 1), rmse, rmse_percent * 100))\n\n if self.save_info & self.train:\n self.logs.write('t+%d RMSE: %f, error percent: %f%%\\n' % ((i + 1), rmse, rmse_percent * 100))\n\n # plot the forecasts in the context of the original dataset\n def plot_forecasts(self, series, forecasts, n_test, file_name, sensor_name, time, n_seq):\n\n plot_one_line = 1\n label_fontsize = 35\n axis_fontsize = 30\n linewidth = 5\n\n # plot the entire dataset in blue\n fig = pyplot.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n # make x label in a specific format\n ax1.xaxis_date()\n ax1.xaxis.set_major_formatter(DateFormatter('%m-%d'))\n forecasts = np.array(forecasts)\n pyplot.plot(time, series.values, label='Actual data', linewidth=linewidth)\n ####################### plot the forecast value #########################\n X = []\n for i in range(1, forecasts.shape[1] + 1):\n off_s = len(series) - n_test + i - n_seq\n off_e = off_s + n_test - 1\n X.append(range(off_s, off_e + 1))\n X = np.array(X)\n Y = np.array(forecasts)\n for i in range(0, Y.shape[1]):\n index = X[i]\n pyplot.plot(time[index[0]:index[len(index) - 1] + 1], Y[:, i], label='Prediction: t+' + str(i + 1), linewidth=linewidth)\n if plot_one_line == 1:\n break\n pyplot.hlines(self.threshold[self.sensor_name][0], time[0], time[-1], colors='r', label='high', linewidth=linewidth)\n pyplot.hlines(self.threshold[self.sensor_name][1], time[0], time[-1], colors='g', label='normal', linewidth=linewidth)\n pyplot.hlines(self.threshold[self.sensor_name][2], time[0], time[-1], colors='r', label='low', linewidth=linewidth)\n\n pyplot.title(self.sensor_name_acronym[self.sensor_name], fontsize=label_fontsize)\n pyplot.legend(fontsize=label_fontsize, loc='upper right')\n pyplot.xlabel('Date', fontsize=label_fontsize)\n pyplot.ylabel(self.units[sensor_name], fontsize=label_fontsize)\n pyplot.xticks(fontsize=axis_fontsize)\n pyplot.yticks(fontsize=axis_fontsize)\n # replace date to sequential days\n\n ######################### plot zoomed in figure ########################\n fig_zoomed = pyplot.figure()\n ax2 = fig_zoomed.add_subplot(1, 1, 1)\n ax2.xaxis_date()\n ax2.xaxis.set_major_formatter(DateFormatter('%m-%d'))\n # plot original data\n start = X[0][0] - 1\n end = len(series)\n pyplot.plot(time[start:end], series[start:end], label='Actual data', linewidth=linewidth)\n for i in range(0, Y.shape[1]):\n index = X[i]\n pyplot.plot(time[index[0]:index[len(index) - 1] + 1], Y[:, i], label='Prediction: t+' + str(i + 1), linewidth=linewidth)\n if plot_one_line == 1:\n break\n\n pyplot.title(self.sensor_name_acronym[self.sensor_name], fontsize=label_fontsize)\n pyplot.legend(fontsize=label_fontsize, loc='upper right')\n pyplot.xlabel('Date', fontsize=label_fontsize)\n pyplot.ylabel(self.units[sensor_name], fontsize=label_fontsize)\n pyplot.xticks(fontsize=axis_fontsize)\n pyplot.yticks(fontsize=axis_fontsize)\n # show the plot\n fig.show()\n fig_zoomed.show()\n\n if self.save_info:\n fig.set_size_inches(18.5, 10.5)\n fig_zoomed.set_size_inches(18.5, 10.5)\n fig.savefig(os.path.join(self.file_path, file_name + '.png'), bbox_inches='tight', dpi=150)\n fig_zoomed.savefig(os.path.join(self.file_path, file_name + '-zoomed.png'), bbox_inches='tight', dpi=150)\n\n pyplot.close(fig)\n pyplot.close(fig_zoomed)\n\n def _plot(self, series, forecasts, n_test, file_name, sensor_name, time, n_seq):\n \"\"\"\n Same as function 'plot_forecasts', replace the datetime in x-axis with days.\n \"\"\"\n plot_one_line = 1\n label_fontsize = 35\n axis_fontsize = 30\n linewidth = 5\n\n # plot the entire dataset in blue\n fig = pyplot.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n # make x label in a specific format\n # ax1.xaxis_date()\n # ax1.xaxis.set_major_formatter(DateFormatter('%m-%d'))\n forecasts = np.array(forecasts)\n pyplot.plot(series.index, series.values, label='Actual data', linewidth=linewidth)\n ####################### plot the forecast value #########################\n X = []\n for i in range(1, forecasts.shape[1] + 1):\n off_s = len(series) - n_test + i - n_seq\n off_e = off_s + n_test - 1\n X.append(range(off_s, off_e + 1))\n X = np.array(X)\n Y = np.array(forecasts)\n for i in range(0, Y.shape[1]):\n index = X[i]\n pyplot.plot(np.arange(index[0], index[-1] + 1), Y[:, i], label='Prediction: t+' + str(i + 1),\n linewidth=linewidth)\n if plot_one_line == 1:\n break\n pyplot.hlines(self.threshold[self.sensor_name][0], series.index[0], series.index[-1], colors='r',\n linewidth=linewidth)\n pyplot.hlines(self.threshold[self.sensor_name][1], series.index[0], series.index[-1], colors='g', label='normal',\n linewidth=linewidth)\n pyplot.hlines(self.threshold[self.sensor_name][2], series.index[0], series.index[-1], colors='r',\n linewidth=linewidth)\n\n pyplot.title(self.sensor_name_acronym[self.sensor_name], fontsize=label_fontsize)\n pyplot.legend(fontsize=label_fontsize, loc='upper left')\n pyplot.xlabel('Days', fontsize=label_fontsize)\n pyplot.ylabel(self.units[sensor_name], fontsize=label_fontsize)\n pyplot.xticks(fontsize=axis_fontsize)\n pyplot.yticks(fontsize=axis_fontsize)\n # replace date to sequential days\n\n # show the plot\n fig.show()\n\n if self.save_info:\n fig.set_size_inches(18.5, 10.5)\n fig.savefig(os.path.join(self.file_path, file_name + '.png'), bbox_inches='tight', dpi=150)\n\n pyplot.close(fig)\n\n def load_dataset(self):\n series = read_csv(self.dataset_path, sep=',')\n header = list(series.columns.values)\n\n raw_time = series[header[0]]\n raw_values = series[header[1]]\n\n raw_time = raw_time.values\n raw_datetime = [datetime.datetime.strptime(\n i, \"%Y-%m-%d %H:%M:%S\") for i in raw_time]\n raw_values = raw_values.values\n\n series_time = Series(raw_time)\n series_values = Series(raw_values)\n return series, series_values, raw_datetime\n\n def open_file(self):\n\n if not os.path.exists(self.file_path):\n try:\n os.makedirs(self.file_path)\n except:\n print('create folder error!')\n try:\n self.logs = open(os.path.join(self.file_path, 'logs.txt'), 'w')\n self.pkl = open(os.path.join(self.file_path, 'data.pkl'),'wb')\n except:\n print('open file error!')\n def close_file(self):\n try:\n self.logs.close()\n self.pkl.close()\n except:\n print('close file error!')\n\n def run_train(self):\n # create logs files\n self.open_file()\n\n print('processing the dataset of ', self.file_name)\n if self.save_info:\n self.logs.write(self.file_name + '\\n')\n\n # load dataset\n # series = read_csv(self.dataset_path, sep=',')\n # header = list(series.columns.values)\n #\n # raw_time = series[header[0]]\n # raw_values = series[header[1]]\n #\n # raw_time = raw_time.values\n # raw_datetime = [datetime.datetime.strptime(\n # i, \"%d-%b-%Y %H:%M:%S\") for i in raw_time]\n # raw_values = raw_values.values\n #\n # series_time = Series(raw_time)\n # series_values = Series(raw_values)\n series, series_values, raw_datetime = self.load_dataset()\n # configure\n n_test = int(0.2 * series.shape[0])\n\n # prepare data\n scaler, train, test = self.prepare_data(series_values, n_test, self.n_lag, self.n_seq)\n # fit model\n model = self.fit_lstm(train, self.n_lag, self.n_seq, self.n_batch, self.n_epochs, self.n_neurons)\n if self.save_info == 1:\n # save model\n model_name = 'model_' + self.file_name + '-' + 'seq_' + str(self.n_seq) + '.h5'\n model.save(os.path.join(self.file_path, model_name))\n\n # make prediction\n forecasts = self.make_forecasts(model, self.n_batch, test, self.n_lag, self.n_seq)\n # inverse transform forecasts and test\n forecasts = self.inverse_transform(series_values, forecasts, scaler, n_test + self.n_seq - 1)\n actual = [row[self.n_lag:] for row in test]\n actual = self.inverse_transform(series_values, actual, scaler, n_test + self.n_seq - 1)\n # evaluate forecasts\n self.evaluate_forecasts(actual, forecasts, self.n_lag, self.n_seq, self.file_name)\n # plot forecasts\n # self.plot_forecasts(series_values, forecasts, n_test, self.file_name, self.sensor_name, raw_datetime, self.n_seq)\n self._plot(series_values, forecasts, n_test, self.file_name, self.sensor_name, raw_datetime, self.n_seq)\n\n # close file\n self.close_file()\n\n def run_update(self):\n pass\n\n\n def _random_shuffle(self, series):\n # series['value'] = series['value'].sample(frac=1).reset_index(drop=True)\n value = list(series['value'])\n chunks = [value[i:i+70] for i in range(0, len(value), 70)]\n shuffle(chunks)\n flat_list = [item for sublist in chunks for item in sublist]\n series['value'] = pd.Series(flat_list)\n # series.to_csv(self.dataset_path, sep=',', encoding='utf-8', index=False)\n return series, series['value']\n\n # if the prediction values are minus, set them zero\n def constrain(self, forecasts):\n for i in range(0, len(forecasts)):\n item = forecasts[i]\n for j in range(0, len(item)):\n if forecasts[i][j] < 0:\n forecasts[i][j] = 0\n return forecasts\n\n def _normalize(self):\n \"\"\"\n Normalize the dataset to make them not original\n :return:\n \"\"\"\n # load dataset\n series, series_values, raw_datetime = self.load_dataset()\n values = series_values\n if self.sensor_name in ['MAIN_FILTER_OIL_TEMP', 'OIL_RETURN_TEMPERATURE', 'TANK_TEMPERATURE']:\n # Convert Fahrenheit to Degree\n values = (values-32)/1.8\n # Normalize to 35 degree to 65 degree\n range = max(values) - min(values)\n a = (values - min(values)) / range\n range2 = 65 - 35\n a = (a * range2) + 35\n\n elif self.sensor_name in ['FT-202B', 'FT-204B', 'PT-203', 'PT-204']:\n # Convert Mils to Micrometre(um)\n values = 25.4*values\n # Normalize to 0-50 Micrometre\n range = max(values) - min(values)\n a = (values - min(values)) / range\n range2 = 50 - 0\n a = (a * range2) + 0\n elif self.sensor_name in ['MAIN_FILTER_IN_PRESSURE']:\n # Normalize to 10-45 PSI\n range = max(values) - min(values)\n a = (values - min(values)) / range\n range2 = 45 - 10\n a = (a * range2) + 10\n elif self.sensor_name in ['TANK_LEVEL']:\n # Convert Inch to Centimeter(CM)\n values = values*2.54\n # Normalize to 40-60 CM\n range = max(values) - min(values)\n a = (values - min(values)) / range\n range2 = 60 - 40\n a = (a * range2) + 40\n series.iloc[:, 1] = values\n print('Starting normalize ' + self.sensor_name)\n # Save normalized results\n series.to_csv('./dataset/csv/sampled/sample_1_day_normalized/' + self.sensor_name + '.csv', sep=',', encoding='utf-8', index=False)\n print('Normalize ' + self.sensor_name + ' data done!')\n\n def normality_test(self):\n _, series_values, _ = self.load_dataset()\n results = stats.shapiro(series_values)\n if results[1] > 0.05:\n self.normality = 1\n else:\n self.normality = 0\n # write results to a file\n # with open(os.path.join(self.root_path, 'normality.txt'), 'a') as f:\n # f.write('sensor name: ' + str(self.sensor_name + '-' + self.sample_rate) + ' ,normality: ' + str(self.normality) + '\\n')\n # save histogram image\n # fig = pyplot.figure()\n # pyplot.hist(series_values)\n # pyplot.title(self.file_name, fontsize=20)\n # pyplot.xlabel('Value', fontsize=16)\n # pyplot.ylabel('Frequency', fontsize=16)\n # fig.savefig(os.path.join(self.root_path, 'distribution_test', self.file_name + '.png'), bbox_inches='tight', dpi=150)\n\n def get_health_score(self,raw_datetime, prediction_value, n_test):\n _, series_values, _ = self.load_dataset()\n # calculate the distribution of the training data\n window = series_values[:len(series_values)-n_test]\n mu = np.mean(window)\n sigma = np.std(window)\n cdf = stats.norm.cdf(prediction_value, loc=mu, scale = sigma)\n health_index = 1 - abs(cdf - 0.5)*2\n df = pd.DataFrame({'time':np.array(raw_datetime)[-len(prediction_value):], 'prediction_value':np.squeeze(prediction_value), 'health_index':np.squeeze(health_index)})\n if self.save_info:\n # save health index to file\n print('save health index to csv starts...')\n df.to_csv(os.path.join(self.file_path, 'health_index.csv'), sep=',', encoding='utf-8',index=False)\n df.to_csv(os.path.join('./health_index/health_index_pred/',self.sensor_name + '.csv'), sep=',', encoding='utf-8', index=False)\n print('save health index to csv done...')\n\n return health_index\n\n def load_model_and_predict(self):\n # load model\n print('loading model ' + self.file_name + '.h5...')\n model = load_model(os.path.join(self.file_path, 'model_' + self.file_name + '-' + 'seq_' + str(self.n_seq) + '.h5'))\n # load dataset\n series, series_values, raw_datetime = self.load_dataset()\n # In order to make fake data, we need to random shuffle the values\n # series, series_values = self._random_shuffle(series)\n # n_test = int(0.2 * series.shape[0])\n n_test = 30\n scaler, train, test = self.prepare_data(series_values, n_test, self.n_lag, self.n_seq)\n # make a prediction\n forecasts = self.make_forecasts(model, self.n_batch, test, self.n_lag, self.n_seq)\n # inverse transform forecasts and test pyplot.show()\n\n forecasts = self.inverse_transform(series_values, forecasts, scaler, n_test + self.n_seq - 1)\n # map forecasts to a health score\n # self.get_health_score(raw_datetime, forecasts, n_test)\n\n actual = [row[self.n_lag:] for row in test]\n actual = self.inverse_transform(series_values, actual, scaler, n_test + self.n_seq - 1)\n # evaluate forecasts\n self.evaluate_forecasts(actual, forecasts, self.n_lag, self.n_seq, self.file_name)\n # plot forecasts\n # self.plot_forecasts(series_values, forecasts, n_test, self.file_name, self.sensor_name, raw_datetime, self.n_seq)\n self._plot(series_values, forecasts, n_test, self.file_name, self.sensor_name, raw_datetime, self.n_seq)\n\n\n def get_pred_health_score(self):\n print('loading model ' + self.file_name + '.h5...')\n model = load_model(\n os.path.join(self.file_path, 'model_' + self.file_name + '-' + 'seq_' + str(self.n_seq) + '.h5'))\n # load dataset\n series, series_values, raw_datetime = self.load_dataset()\n # In order to make fake data, we need to random shuffle the values\n # series, series_values = self._random_shuffle(series)\n\n # number of testing data, here use Novermber's data as testing\n a = [raw_datetime[i].month == 11 for i in range(0, len(raw_datetime))]\n n_test = len(np.where(a)[0])\n scaler, train, test = self.prepare_data(series_values, n_test, self.n_lag, self.n_seq)\n # make a prediction\n forecasts = self.make_forecasts(model, self.n_batch, test, self.n_lag, self.n_seq)\n # inverse transform forecasts and test pyplot.show()\n\n forecasts = self.inverse_transform(series_values, forecasts, scaler, n_test + self.n_seq - 1)\n forecasts = self.constrain(forecasts)\n # for sensor 'FT-202B' and 'PT-203', we should use log transfer to make them looks like Gaussian\n if self.sensor_name in ['FT-202B', 'PT-203', 'FT-204B','PT-204']:\n # use log transform\n # normal, low, high = self.operating_range\n # normal = np.log(normal + 10)\n # low = np.log(low + 10)\n # high = np.log(high + 10)\n # three_sigma = abs(normal-low) if abs(normal-low)>abs(normal-high) else abs(normal-high)\n # mu = normal\n # sigma = three_sigma / 3\n # cdf = stats.norm.cdf(np.log(np.array(forecasts) + 10), loc=mu, scale=sigma)\n # health_index_pred = 1 - abs(cdf - 0.5) * 2\n # time = raw_datetime[-n_test:]\n\n # use rayleigh distribution\n # if the prediction value is less than the mean of the rayleigh distribution, set health index as 1\n # otherwise the far from the mean, the less the health index is\n ####################\n # health_index_pred = np.zeros((len(forecasts),1))\n # mean, var, skew, kurt = rayleigh.stats(moments='mvsk')\n # index = forecasts <= mean\n # health_index_pred[index] = 1\n # index = forecasts > mean\n # cdf = rayleigh.cdf(forecasts)\n # health_index_pred[index] = (1 - cdf[index])*2\n # time = raw_datetime[-n_test:]\n #####################\n forecasts = np.asarray(forecasts)\n health_index = np.zeros((len(forecasts), 1))\n low, normal, high = self.threshold[self.sensor_name]\n three_sigma = abs(normal-high)\n mu = normal\n sigma = three_sigma/3\n index = forecasts <= normal\n health_index[index] = 1\n index = forecasts > normal\n cdf = stats.norm.cdf(forecasts[index], loc=mu, scale=sigma)\n health_index[index] = 1 - abs(cdf - 0.5) * 2\n time = raw_datetime[-n_test:]\n else:\n low, normal, high = self.threshold[self.sensor_name]\n three_sigma = abs(normal-low) if abs(normal-low)>abs(normal-high) else abs(normal-high)\n mu = normal\n sigma = three_sigma/3\n cdf = stats.norm.cdf(forecasts, loc=mu, scale=sigma)\n health_index = 1 - abs(cdf - 0.5) * 2\n time = raw_datetime[-n_test:]\n if self.save_info:\n # save health index to file\n print('save health index to csv starts...')\n df = pd.DataFrame({'time':time, 'prediction_value':np.squeeze(forecasts), 'health_index':np.squeeze(health_index)}, columns=['time','prediction_value','health_index'])\n df.to_csv(os.path.join(os.curdir,'health_index','health_index_pred',self.sensor_name+'.csv'), sep=',', encoding='utf-8',index = False)\n print('save health index to csv done...')\n\n def get_all_health_score(self):\n \"\"\"\n Calculate the health score for all data set (from May to November)\n :return:\n \"\"\"\n # load dataset\n series, series_values, raw_datetime = self.load_dataset()\n if self.sensor_name in ['FT-202B', 'PT-203', 'FT-204B', 'PT-204']:\n # health_index_pred = np.zeros(len(series_values))\n # mean, var, skew, kurt = rayleigh.stats(moments='mvsk')\n # index = series_values <= mean\n # health_index_pred[index] = 1\n # index = series_values > mean\n # cdf = rayleigh.cdf(series_values)\n # health_index_pred[index] = (1 - cdf[index]) * 2\n # time = raw_datetime\n health_index = np.zeros(len(series_values))\n normal, low, high = self.threshold\n three_sigma = abs(normal - high)\n mu = normal\n sigma = three_sigma / 3\n index = series_values <= normal\n health_index[index] = 1\n index = series_values > normal\n cdf = stats.norm.cdf(series_values[index], loc=mu, scale=sigma)\n health_index[index] = 1 - abs(cdf - 0.5) * 2\n time = raw_datetime\n else:\n normal, low, high = self.threshold\n three_sigma = abs(normal-low) if abs(normal-low)>abs(normal-high) else abs(normal-high)\n mu = normal\n sigma = three_sigma/3\n cdf = stats.norm.cdf(series_values, loc=mu, scale=sigma)\n health_index = 1 - abs(cdf - 0.5) * 2\n time = raw_datetime\n if self.save_info:\n # save health index to file\n print('save health index to csv starts...')\n df = pd.DataFrame({'time':time, 'value':np.squeeze(series_values), 'health_index':np.squeeze(health_index)}, columns=['time','value','health_index'])\n df.to_csv(os.path.join(os.curdir,'health_index_all',self.sensor_name+'.csv'), sep=',', encoding='utf-8',index = False)\n print('save health index to csv done...')\n"
] | [
[
"matplotlib.pyplot.hlines",
"scipy.stats.shapiro",
"pandas.Series",
"numpy.asarray",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"scipy.stats.norm.cdf",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.where",
"numpy.mean",
"pandas.read_csv",
"sklearn.preprocessing.MinMaxScaler",
"matplotlib.dates.DateFormatter",
"numpy.arange",
"pandas.concat",
"matplotlib.pyplot.close",
"numpy.std",
"matplotlib.pyplot.legend",
"sklearn.metrics.mean_squared_error",
"numpy.squeeze",
"pandas.DataFrame",
"numpy.array",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.xlabel"
]
] |
SKewLinez/tensorflow | [
"77d8c333405a080c57850c45531dbbf077b2bd0e"
] | [
"tensorflow/python/data/experimental/kernel_tests/map_and_batch_test.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `tf.data.experimental.map_and_batch()`.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.data.experimental.ops import batching\nfrom tensorflow.python.data.kernel_tests import checkpoint_test_base\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import combinations\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import script_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import checkpoint_management\nfrom tensorflow.python.training.tracking import util as trackable_utils\n\n\nclass MapAndBatchTest(test_base.DatasetTestBase, parameterized.TestCase):\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(\n num_parallel_calls=[None, 1, 2], num_parallel_batches=None) +\n combinations.combine(\n num_parallel_calls=None, num_parallel_batches=10)))\n def testMapAndBatch(self, num_parallel_calls, num_parallel_batches):\n \"\"\"Test a dataset that maps a TF function across its input elements.\"\"\"\n # The pipeline is TensorSliceDataset ->\n # RepeatDataset(count) -> MapAndBatchDataset(square_3, batch_size).\n components = (np.arange(7),\n np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],\n np.array(37.0) * np.arange(7))\n\n def _map_fn(x, y, z):\n return math_ops.square(x), math_ops.square(y), math_ops.square(z)\n\n def dataset_fn(batch_size, count):\n dataset = dataset_ops.Dataset.from_tensor_slices(components).repeat(\n count).apply(\n batching.map_and_batch(\n map_func=_map_fn,\n batch_size=batch_size,\n num_parallel_calls=num_parallel_calls,\n num_parallel_batches=num_parallel_batches))\n return dataset\n\n # Batch of a finite input, where the batch_size divides the\n # total number of elements.\n dataset = dataset_fn(14, 28)\n get_next = self.getNext(dataset)\n self.assertEqual(\n [[None] + list(c.shape[1:]) for c in components],\n [shape.as_list()\n for shape in dataset_ops.get_legacy_output_shapes(dataset)])\n num_batches = (28 * 7) // 14\n for i in range(num_batches):\n result = self.evaluate(get_next())\n for component, result_component in zip(components, result):\n for j in range(14):\n self.assertAllEqual(component[(i * 14 + j) % 7]**2,\n result_component[j])\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n # Batch of a finite input, where the batch_size does not\n # divide the total number of elements.\n get_next = self.getNext(dataset_fn(8, 14))\n\n # We expect (num_batches - 1) full-sized batches.\n num_batches = int(math.ceil((14 * 7) / 8))\n for i in range(num_batches - 1):\n result = self.evaluate(get_next())\n for component, result_component in zip(components, result):\n for j in range(8):\n self.assertAllEqual(component[(i * 8 + j) % 7]**2,\n result_component[j])\n\n result = self.evaluate(get_next())\n for component, result_component in zip(components, result):\n for j in range((14 * 7) % 8):\n self.assertAllEqual(component[((num_batches - 1) * 8 + j) % 7]**2,\n result_component[j])\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n # Batch of an empty input should fail straight away.\n self.assertDatasetProduces(dataset_fn(8, 0), expected_output=[])\n\n # Empty batch should be an initialization time error.\n with self.assertRaises(errors.InvalidArgumentError):\n self.assertDatasetProduces(dataset_fn(0, 14), expected_output=[])\n\n @combinations.generate(\n combinations.times(test_base.default_test_combinations(),\n combinations.combine(drop_remainder=[True, False])))\n def testMapAndBatchPartialBatch(self, drop_remainder):\n dataset = (\n dataset_ops.Dataset.range(10).apply(\n batching.map_and_batch(\n lambda x: array_ops.reshape(x * x, [1]),\n batch_size=4,\n drop_remainder=drop_remainder)))\n\n if drop_remainder:\n self.assertEqual(\n [4, 1], dataset_ops.get_legacy_output_shapes(dataset).as_list())\n else:\n self.assertEqual(\n [None, 1], dataset_ops.get_legacy_output_shapes(dataset).as_list())\n expected_output = [[[0], [1], [4], [9]], [[16], [25], [36], [49]]]\n if not drop_remainder:\n expected_output.append([[64], [81]])\n self.assertDatasetProduces(dataset, expected_output=expected_output)\n\n @combinations.generate(test_base.default_test_combinations())\n def testMapAndBatchYieldsPartialBatch(self):\n dataset = (\n dataset_ops.Dataset.range(10).apply(\n batching.map_and_batch(lambda x: array_ops.reshape(x * x, [1]), 4)))\n\n self.assertEqual(\n [None, 1], dataset_ops.get_legacy_output_shapes(dataset).as_list())\n expected_output = [[[0], [1], [4], [9]], [[16], [25], [36], [49]],\n [[64], [81]]]\n self.assertDatasetProduces(dataset, expected_output=expected_output)\n\n @combinations.generate(test_base.default_test_combinations())\n def testMapAndBatchParallelGetNext(self):\n dataset = dataset_ops.Dataset.range(50000).apply(\n batching.map_and_batch(lambda x: x, batch_size=100))\n\n if context.executing_eagerly():\n iterator = iter(dataset)\n get_next = iterator._next_internal # pylint: disable=protected-access\n else:\n iterator = dataset_ops.make_one_shot_iterator(dataset)\n get_next = iterator.get_next\n\n elements = []\n for _ in range(100):\n elements.append(get_next)\n\n for i in range(5):\n got = self.evaluate([element() for element in elements])\n got.sort(key=lambda x: x[0])\n expected = []\n for j in range(100):\n expected.append(range(i * 10000 + j * 100, i * 10000 + (j + 1) * 100))\n self.assertAllEqual(got, expected)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate([element() for element in elements])\n\n @combinations.generate(test_base.default_test_combinations())\n def testMapAndBatchParallelGetNextDropRemainder(self):\n dataset = dataset_ops.Dataset.range(49999).apply(\n batching.map_and_batch(\n lambda x: x, batch_size=100, drop_remainder=True))\n\n if context.executing_eagerly():\n iterator = iter(dataset)\n get_next = iterator._next_internal # pylint: disable=protected-access\n else:\n iterator = dataset_ops.make_one_shot_iterator(dataset)\n get_next = iterator.get_next\n\n elements = []\n for _ in range(100):\n elements.append(get_next)\n\n for i in range(4):\n got = self.evaluate([element() for element in elements])\n got.sort(key=lambda x: x[0])\n expected = []\n for j in range(100):\n expected.append(range(i * 10000 + j * 100, i * 10000 + (j + 1) * 100))\n self.assertAllEqual(got, expected)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate([element() for element in elements])\n\n @combinations.generate(test_base.default_test_combinations())\n def testMapAndBatchSparse(self):\n\n def _sparse(i):\n return sparse_tensor.SparseTensorValue(\n indices=[[0]], values=(i * [1]), dense_shape=[1])\n\n dataset = dataset_ops.Dataset.range(10).apply(\n batching.map_and_batch(_sparse, 5))\n\n self.assertDatasetProduces(\n dataset,\n expected_output=[\n sparse_tensor.SparseTensorValue(\n indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],\n values=[i * 5, i * 5 + 1, i * 5 + 2, i * 5 + 3, i * 5 + 4],\n dense_shape=[5, 1]) for i in range(2)\n ])\n\n @combinations.generate(test_base.default_test_combinations())\n def testMapAndBatchFails(self):\n \"\"\"Test a dataset that maps a TF function across its input elements.\"\"\"\n\n with self.assertRaisesRegex(errors.InvalidArgumentError, \"oops\"):\n dataset = dataset_ops.Dataset.from_tensors(\n array_ops.check_numerics(\n constant_op.constant(1.0) / constant_op.constant(0.0), \"oops\"))\n dataset = dataset.apply(batching.map_and_batch(lambda x: x, 14))\n get_next = self.getNext(dataset, requires_initialization=True)\n self.evaluate(get_next())\n\n @combinations.generate(test_base.default_test_combinations())\n def testMapAndBatchShapeMismatch(self):\n \"\"\"Test a dataset that maps a TF function across its input elements.\"\"\"\n\n def generator():\n yield [1]\n yield [2]\n yield [3]\n yield [[4, 5, 6]]\n\n dataset = dataset_ops.Dataset.from_generator(\n generator, output_types=dtypes.int32)\n batch_size = 4\n dataset = dataset.apply(batching.map_and_batch(lambda x: x, batch_size))\n self.assertDatasetProduces(\n dataset,\n expected_error=(errors.InvalidArgumentError,\n \"number of elements does not match\"))\n\n @combinations.generate(test_base.default_test_combinations())\n def testMapAndBatchImplicitDispose(self):\n # Tests whether a map and batch dataset will be cleaned up correctly when\n # the pipeline does not run it until exhaustion.\n # The pipeline is TensorSliceDataset -> RepeatDataset(1000) ->\n # MapAndBatchDataset(f=square_3, batch_size=100).\n components = (np.arange(1000),\n np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],\n np.array(37.0) * np.arange(1000))\n\n def _map_fn(x, y, z):\n return math_ops.square(x), math_ops.square(y), math_ops.square(z)\n\n dataset = dataset_ops.Dataset.from_tensor_slices(components).repeat(\n 1000).apply(batching.map_and_batch(_map_fn, batch_size=100))\n dataset = dataset.prefetch(5)\n get_next = self.getNext(dataset)\n for _ in range(3):\n self.evaluate(get_next())\n\n @combinations.generate(\n combinations.times(test_base.default_test_combinations(),\n combinations.combine(threshold=[0, 5, 10, 90, 95, 99]))\n )\n def testMapAndBatchMapError(self, threshold):\n\n def raising_py_fn(i):\n if i >= threshold:\n raise StopIteration()\n else:\n return i\n\n dataset = dataset_ops.Dataset.range(100).apply(\n batching.map_and_batch(\n lambda x: script_ops.py_func(raising_py_fn, [x], dtypes.int64),\n batch_size=10))\n\n get_next = self.getNext(dataset)\n for i in range(threshold // 10):\n self.assertAllEqual([i * 10 + j for j in range(10)],\n self.evaluate(get_next()))\n for i in range(threshold // 10, 10):\n with self.assertRaises(errors.InvalidArgumentError):\n self.evaluate(get_next())\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(element=False, dtype=dtypes.bool) +\n combinations.combine(\n element=-42,\n dtype=[dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64]) +\n combinations.combine(element=42, dtype=[dtypes.uint8, dtypes.uint16])\n + combinations.combine(\n element=42.0,\n dtype=[dtypes.float16, dtypes.float32, dtypes.float64]) +\n combinations.combine(element=b\"hello\", dtype=[dtypes.string])))\n def testMapAndBatchTypes(self, element, dtype):\n\n def gen():\n yield element\n\n dataset = dataset_ops.Dataset.from_generator(gen, dtype).repeat(100).apply(\n batching.map_and_batch(lambda x: x, batch_size=10))\n\n get_next = self.getNext(dataset)\n for _ in range(10):\n self.assertAllEqual([element for _ in range(10)],\n self.evaluate(get_next()))\n\n @combinations.generate(test_base.default_test_combinations())\n def testShortCircuitIdentity(self):\n map_fn = lambda x: x\n dataset = self.structuredDataset(None).repeat().apply(\n batching.map_and_batch(map_fn, batch_size=10))\n get_next = self.getNext(dataset)\n expected = map_fn(self.evaluate(self.structuredElement(None, shape=[10])))\n self.assertAllEqual(expected, self.evaluate(get_next()))\n\n @combinations.generate(test_base.default_test_combinations())\n def testShortCircuitReplicate(self):\n map_fn = lambda x: (x, x)\n dataset = self.structuredDataset(None).repeat().apply(\n batching.map_and_batch(map_fn, batch_size=10))\n get_next = self.getNext(dataset)\n expected = map_fn(self.evaluate(self.structuredElement(None, shape=[10])))\n self.assertAllEqual(expected, self.evaluate(get_next()))\n\n @combinations.generate(test_base.default_test_combinations())\n def testShortCircuitSwap(self):\n map_fn = lambda x, y: (y, x)\n dataset = self.structuredDataset(\n (None,\n None)).repeat().apply(batching.map_and_batch(map_fn, batch_size=10))\n get_next = self.getNext(dataset)\n expected = map_fn(\n *self.evaluate(self.structuredElement((None, None), shape=[10])))\n self.assertAllEqual(expected, self.evaluate(get_next()))\n\n @combinations.generate(test_base.default_test_combinations())\n def testShortCircuitProject(self):\n map_fn = lambda x, y: x\n dataset = self.structuredDataset(\n (None,\n None)).repeat().apply(batching.map_and_batch(map_fn, batch_size=10))\n get_next = self.getNext(dataset)\n expected = map_fn(\n *self.evaluate(self.structuredElement((None, None), shape=[10])))\n self.assertAllEqual(expected, self.evaluate(get_next()))\n\n @combinations.generate(test_base.default_test_combinations())\n def testShortCircuitCapturedInput(self):\n captured_t = variables.Variable(42)\n dataset = self.structuredDataset(None).repeat().apply(\n batching.map_and_batch(lambda x: captured_t, batch_size=10))\n self.evaluate(variables.global_variables_initializer())\n get_next = self.getNext(dataset, requires_initialization=True)\n self.assertAllEqual([42] * 10, self.evaluate(get_next()))\n\n @combinations.generate(test_base.default_test_combinations())\n def testMapAndBatchControlFlow(self):\n\n def map_fn(x):\n previous_control_flow_v2_value = control_flow_util.ENABLE_CONTROL_FLOW_V2\n control_flow_util.ENABLE_CONTROL_FLOW_V2 = True\n return_value = control_flow_ops.cond(x < 50, lambda: x + 1, lambda: x * x)\n control_flow_util.ENABLE_CONTROL_FLOW_V2 = previous_control_flow_v2_value\n return return_value\n\n dataset = dataset_ops.Dataset.range(100).apply(\n batching.map_and_batch(map_fn, batch_size=10))\n get_next = self.getNext(dataset)\n for i in range(10):\n if i < 5:\n self.assertAllEqual([i * 10 + j + 1 for j in range(10)],\n self.evaluate(get_next()))\n else:\n self.assertAllEqual(\n [((i * 10) + j) * ((i * 10) + j) for j in range(10)],\n self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n @combinations.generate(test_base.eager_only_combinations())\n def testCheckpointLargeBatches(self):\n # Batches of size 512M\n dataset = dataset_ops.Dataset.from_tensors(\n array_ops.ones((64, 1024, 1024), dtype=dtypes.float32)).repeat()\n dataset = dataset.map(lambda x: x+1, num_parallel_calls=5)\n dataset = dataset.batch(2)\n iterator = iter(dataset)\n next(iterator) # request an element to fill the buffer\n ckpt = trackable_utils.Checkpoint(iterator=iterator)\n manager = checkpoint_management.CheckpointManager(\n ckpt, self.get_temp_dir(), max_to_keep=1)\n manager.save()\n\n\nclass MapAndBatchCheckpointTest(checkpoint_test_base.CheckpointTestBase,\n parameterized.TestCase):\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n checkpoint_test_base.default_test_combinations(),\n combinations.combine(drop_remainder=[True, False])))\n def testNumParallelBatches(self, verify_fn, drop_remainder):\n range_size = 11\n num_shards = 3\n num_repeats = 2\n batch_size = 5\n num_parallel_batches = 2\n total_outputs = (range_size // num_shards) * num_repeats\n if drop_remainder:\n num_outputs = total_outputs // batch_size\n else:\n num_outputs = int(math.ceil(total_outputs / batch_size))\n\n def build_ds(range_start, drop_remainder):\n\n def _map_fn(x):\n return math_ops.square(x)\n\n return dataset_ops.Dataset.range(\n range_start, range_start + range_size).shard(\n num_shards=num_shards, index=0).repeat(num_repeats).apply(\n batching.map_and_batch(\n map_func=_map_fn,\n batch_size=batch_size,\n num_parallel_batches=num_parallel_batches,\n drop_remainder=drop_remainder))\n\n verify_fn(self, lambda: build_ds(10, drop_remainder=drop_remainder),\n num_outputs)\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n checkpoint_test_base.default_test_combinations(),\n combinations.combine(drop_remainder=[True, False])))\n def testNumParallelCalls(self, verify_fn, drop_remainder):\n range_size = 11\n num_shards = 3\n num_repeats = 2\n batch_size = 5\n num_parallel_calls = 7\n total_outputs = (range_size // num_shards) * num_repeats\n if drop_remainder:\n num_outputs = total_outputs // batch_size\n else:\n num_outputs = int(math.ceil(total_outputs / batch_size))\n\n def build_ds(range_start, drop_remainder=False):\n\n def _map_fn(x):\n return math_ops.square(x)\n\n return dataset_ops.Dataset.range(\n range_start, range_start + range_size).shard(\n num_shards=num_shards, index=0).repeat(num_repeats).apply(\n batching.map_and_batch(\n map_func=_map_fn,\n batch_size=batch_size,\n num_parallel_calls=num_parallel_calls,\n drop_remainder=drop_remainder))\n\n verify_fn(self, lambda: build_ds(10, drop_remainder=drop_remainder),\n num_outputs)\n\n @combinations.generate(\n combinations.times(test_base.default_test_combinations(),\n checkpoint_test_base.default_test_combinations()))\n def testSparse(self, verify_fn):\n\n def build_dataset():\n\n def map_fn(i):\n return sparse_tensor.SparseTensorValue(\n indices=[[0]], values=(i * [1]), dense_shape=[1])\n\n return dataset_ops.Dataset.range(10).apply(\n batching.map_and_batch(map_fn, 5))\n\n verify_fn(self, build_dataset, num_outputs=2)\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.python.data.ops.dataset_ops.make_one_shot_iterator",
"tensorflow.python.data.ops.dataset_ops.Dataset.range",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.data.kernel_tests.checkpoint_test_base.default_test_combinations",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.data.kernel_tests.test_base.default_test_combinations",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.ops.variables.global_variables_initializer",
"numpy.arange",
"tensorflow.python.ops.script_ops.py_func",
"tensorflow.python.ops.control_flow_ops.cond",
"tensorflow.python.framework.sparse_tensor.SparseTensorValue",
"tensorflow.python.data.ops.dataset_ops.get_legacy_output_shapes",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_generator",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.training.tracking.util.Checkpoint",
"tensorflow.python.framework.combinations.combine",
"tensorflow.python.platform.test.main",
"tensorflow.python.data.kernel_tests.test_base.eager_only_combinations",
"tensorflow.python.data.experimental.ops.batching.map_and_batch",
"tensorflow.python.ops.array_ops.reshape",
"numpy.array",
"tensorflow.python.eager.context.executing_eagerly"
]
] |
anapaulamendes/chefboost | [
"4628154f054cb6c79ab3f69a642d597c1265b202"
] | [
"chefboost/Chefboost.py"
] | [
"import pandas as pd\r\nimport math\r\nimport numpy as np\r\nimport time\r\nimport imp\r\nimport pickle\r\nimport os\r\nfrom os import path\r\nimport json\r\n\r\nfrom chefboost.commons import functions, evaluate as eval\r\nfrom chefboost.training import Preprocess, Training\r\nfrom chefboost.tuning import gbm, adaboost, randomforest\r\n\r\n#------------------------\r\n\r\ndef fit(df, config = {}, target_label = 'Decision', validation_df = None):\r\n\r\n\t\"\"\"\r\n\tParameters:\r\n\t\tdf (pandas data frame): Training data frame. The target column must be named as 'Decision' and it has to be in the last column\r\n\r\n\t\tconfig (dictionary):\r\n\r\n\t\t\tconfig = {\r\n\t\t\t\t'algorithm' (string): ID3, 'C4.5, CART, CHAID or Regression\r\n\t\t\t\t'enableParallelism' (boolean): False\r\n\r\n\t\t\t\t'enableGBM' (boolean): True,\r\n\t\t\t\t'epochs' (int): 7,\r\n\t\t\t\t'learning_rate' (int): 1,\r\n\r\n\t\t\t\t'enableRandomForest' (boolean): True,\r\n\t\t\t\t'num_of_trees' (int): 5,\r\n\r\n\t\t\t\t'enableAdaboost' (boolean): True,\r\n\t\t\t\t'num_of_weak_classifier' (int): 4\r\n\t\t\t}\r\n\r\n\t\tvalidation_df (pandas data frame): if nothing is passed to validation data frame, then the function validates built trees for training data frame\r\n\r\n\tReturns:\r\n\t\tchefboost model\r\n\r\n\t\"\"\"\r\n\r\n\t#------------------------\r\n\r\n\tprocess_id = os.getpid()\r\n\r\n\t#------------------------\r\n\t#rename target column name\r\n\tif target_label != 'Decision':\r\n\t\tdf = df.rename(columns = {target_label: 'Decision'})\r\n\r\n\t#if target is not the last column\r\n\tif df.columns[-1] != 'Decision':\r\n\t\tif 'Decision' in df.columns:\r\n\t\t\tnew_column_order = df.columns.drop('Decision').tolist() + ['Decision']\r\n\t\t\t#print(new_column_order)\r\n\t\t\tdf = df[new_column_order]\r\n\t\telse:\r\n\t\t\traise ValueError('Please set the target_label')\r\n\r\n\t#------------------------\r\n\r\n\tbase_df = df.copy()\r\n\r\n\t#------------------------\r\n\r\n\ttarget_label = df.columns[len(df.columns)-1]\r\n\tif target_label != 'Decision':\r\n\t\tprint(\"Expected: Decision, Existing: \",target_label)\r\n\t\traise ValueError('Please confirm that name of the target column is \"Decision\" and it is put to the right in pandas data frame')\r\n\r\n\t#------------------------\r\n\t#handle NaN values\r\n\r\n\tnan_values = []\r\n\r\n\tfor column in df.columns:\r\n\t\tif df[column].dtypes != 'object':\r\n\t\t\tmin_value = df[column].min()\r\n\t\t\tidx = df[df[column].isna()].index\r\n\r\n\t\t\tnan_value = []\r\n\t\t\tnan_value.append(column)\r\n\r\n\t\t\tif idx.shape[0] > 0:\r\n\t\t\t\tdf.loc[idx, column] = min_value - 1\r\n\t\t\t\tnan_value.append(min_value - 1)\r\n\t\t\t\tmin_value - 1\r\n\t\t\t\t#print(\"NaN values are replaced to \", min_value - 1, \" in column \", column)\r\n\t\t\telse:\r\n\t\t\t\tnan_value.append(None)\r\n\r\n\t\t\tnan_values.append(nan_value)\r\n\r\n\t#------------------------\r\n\r\n\t#initialize params and folders\r\n\tconfig = functions.initializeParams(config)\r\n\tfunctions.initializeFolders()\r\n\r\n\t#------------------------\r\n\r\n\talgorithm = config['algorithm']\r\n\r\n\tvalid_algorithms = ['ID3', 'C4.5', 'CART', 'CHAID', 'Regression']\r\n\r\n\tif algorithm not in valid_algorithms:\r\n\t\traise ValueError('Invalid algorithm passed. You passed ', algorithm,\" but valid algorithms are \",valid_algorithms)\r\n\r\n\t#------------------------\r\n\r\n\tenableRandomForest = config['enableRandomForest']\r\n\tnum_of_trees = config['num_of_trees']\r\n\tenableMultitasking = config['enableMultitasking'] #no longer used. check to remove this variable.\r\n\r\n\tenableGBM = config['enableGBM']\r\n\tepochs = config['epochs']\r\n\tlearning_rate = config['learning_rate']\r\n\r\n\tenableAdaboost = config['enableAdaboost']\r\n\tenableParallelism = config['enableParallelism']\r\n\r\n\t#------------------------\r\n\r\n\tif enableParallelism == True:\r\n\t\tprint(\"[INFO]: \",config[\"num_cores\"],\"CPU cores will be allocated in parallel running\")\r\n\r\n\t\tfrom multiprocessing import set_start_method, freeze_support\r\n\t\tset_start_method(\"spawn\", force=True)\r\n\t\tfreeze_support()\r\n\t#------------------------\r\n\traw_df = df.copy()\r\n\tnum_of_rows = df.shape[0]; num_of_columns = df.shape[1]\r\n\r\n\tif algorithm == 'Regression':\r\n\t\tif df['Decision'].dtypes == 'object':\r\n\t\t\traise ValueError('Regression trees cannot be applied for nominal target values! You can either change the algorithm or data set.')\r\n\r\n\tif df['Decision'].dtypes != 'object': #this must be regression tree even if it is not mentioned in algorithm\r\n\r\n\t\tif algorithm != 'Regression':\r\n\t\t\tprint(\"WARNING: You set the algorithm to \", algorithm,\" but the Decision column of your data set has non-object type.\")\r\n\t\t\tprint(\"That's why, the algorithm is set to Regression to handle the data set.\")\r\n\r\n\t\talgorithm = 'Regression'\r\n\t\tconfig['algorithm'] = 'Regression'\r\n\t\tglobal_stdev = df['Decision'].std(ddof=0)\r\n\r\n\tif enableGBM == True:\r\n\t\tprint(\"Gradient Boosting Machines...\")\r\n\t\talgorithm = 'Regression'\r\n\t\tconfig['algorithm'] = 'Regression'\r\n\r\n\tif enableAdaboost == True:\r\n\t\t#enableParallelism = False\r\n\t\tfor j in range(0, num_of_columns):\r\n\t\t\tcolumn_name = df.columns[j]\r\n\t\t\tif df[column_name].dtypes == 'object':\r\n\t\t\t\traise ValueError('Adaboost must be run on numeric data set for both features and target')\r\n\r\n\t#-------------------------\r\n\r\n\tprint(algorithm,\" tree is going to be built...\")\r\n\r\n\tdataset_features = dict() #initialize a dictionary. this is going to be used to check features numeric or nominal. numeric features should be transformed to nominal values based on scales.\r\n\r\n\theader = \"def findDecision(obj): #\"\r\n\r\n\tnum_of_columns = df.shape[1]-1\r\n\tfor i in range(0, num_of_columns):\r\n\t\tcolumn_name = df.columns[i]\r\n\t\tdataset_features[column_name] = df[column_name].dtypes\r\n\t\theader = header + \"obj[\" + str(i) +\"]: \"+column_name\r\n\t\tif i != num_of_columns - 1:\r\n\t\t\theader = header + \", \"\r\n\r\n\theader = header + \"\\n\"\r\n\r\n\t#------------------------\r\n\r\n\tbegin = time.time()\r\n\r\n\ttrees = []; alphas = []\r\n\r\n\tif enableAdaboost == True:\r\n\t\ttrees, alphas = adaboost.apply(df, config, header, dataset_features, validation_df = validation_df, process_id = process_id)\r\n\r\n\telif enableGBM == True:\r\n\r\n\t\tif df['Decision'].dtypes == 'object': #transform classification problem to regression\r\n\t\t\ttrees, alphas = gbm.classifier(df, config, header, dataset_features, validation_df = validation_df, process_id = process_id)\r\n\t\t\tclassification = True\r\n\r\n\t\telse: #regression\r\n\t\t\ttrees = gbm.regressor(df, config, header, dataset_features, validation_df = validation_df, process_id = process_id)\r\n\t\t\tclassification = False\r\n\r\n\telif enableRandomForest == True:\r\n\t\ttrees = randomforest.apply(df, config, header, dataset_features, validation_df = validation_df, process_id = process_id)\r\n\telse: #regular decision tree building\r\n\r\n\t\troot = 1; file = \"outputs/rules/rules.py\"\r\n\t\tfunctions.createFile(file, header)\r\n\r\n\t\tif enableParallelism == True:\r\n\t\t\tjson_file = \"outputs/rules/rules.json\"\r\n\t\t\tfunctions.createFile(json_file, \"[\\n\")\r\n\r\n\t\ttrees = Training.buildDecisionTree(df, root = root, file = file, config = config\r\n\t\t\t\t, dataset_features = dataset_features\r\n\t\t\t\t, parent_level = 0, leaf_id = 0, parents = 'root', validation_df = validation_df, main_process_id = process_id)\r\n\r\n\tprint(\"-------------------------\")\r\n\tprint(\"finished in \",time.time() - begin,\" seconds\")\r\n\r\n\tobj = {\r\n\t\t\"trees\": trees,\r\n\t\t\"alphas\": alphas,\r\n\t\t\"config\": config,\r\n\t\t\"nan_values\": nan_values\r\n\t}\r\n\r\n\t#-----------------------------------------\r\n\r\n\t#train set accuracy\r\n\tdf = base_df.copy()\r\n\tevaluate(obj, df, task = 'train')\r\n\r\n\t#validation set accuracy\r\n\tif isinstance(validation_df, pd.DataFrame):\r\n\t\tevaluate(obj, validation_df, task = 'validation')\r\n\r\n\t#-----------------------------------------\r\n\r\n\treturn obj\r\n\r\n\t#-----------------------------------------\r\n\r\ndef predict(model, param):\r\n\r\n\t\"\"\"\r\n\tParameters:\r\n\t\tmodel (built chefboost model): you should pass model argument to the return of fit function\r\n\t\tparam (list): pass input features as python list\r\n\r\n\t\te.g. chef.predict(model, param = ['Sunny', 'Hot', 'High', 'Weak'])\r\n\tReturns:\r\n\t\tprediction\r\n\t\"\"\"\r\n\r\n\ttrees = model[\"trees\"]\r\n\tconfig = model[\"config\"]\r\n\r\n\talphas = []\r\n\tif \"alphas\" in model:\r\n\t\talphas = model[\"alphas\"]\r\n\r\n\tnan_values = []\r\n\tif \"nan_values\" in model:\r\n\t\tnan_values = model[\"nan_values\"]\r\n\r\n\t#-----------------------\r\n\t#handle missing values\r\n\r\n\tcolumn_index = 0\r\n\tfor column in nan_values:\r\n\t\tcolumn_name = column[0]\r\n\t\tmissing_value = column[1]\r\n\r\n\t\tif pd.isna(missing_value) != True:\r\n\t\t\t#print(\"missing values will be replaced with \",missing_value,\" in \",column_name,\" column\")\r\n\r\n\t\t\tif pd.isna(param[column_index]):\r\n\t\t\t\tparam[column_index] = missing_value\r\n\r\n\t\tcolumn_index = column_index + 1\r\n\r\n\t#print(\"instance: \", param)\r\n\t#-----------------------\r\n\r\n\tenableGBM = config['enableGBM']\r\n\tadaboost = config['enableAdaboost']\r\n\tenableRandomForest = config['enableRandomForest']\r\n\r\n\t#-----------------------\r\n\r\n\tclassification = False\r\n\tprediction = 0\r\n\tprediction_classes = []\r\n\r\n\t#-----------------------\r\n\r\n\tif enableGBM == True:\r\n\r\n\t\tif len(trees) == config['epochs']:\r\n\t\t\tclassification = False\r\n\t\telse:\r\n\t\t\tclassification = True\r\n\t\t\tprediction_classes = [0 for i in alphas]\r\n\r\n\t#-----------------------\r\n\r\n\tif len(trees) > 1: #bagging or boosting\r\n\t\tindex = 0\r\n\t\tfor tree in trees:\r\n\t\t\tif adaboost != True:\r\n\r\n\t\t\t\tcustom_prediction = tree.findDecision(param)\r\n\r\n\t\t\t\tif custom_prediction != None:\r\n\t\t\t\t\tif type(custom_prediction) != str: #regression\r\n\r\n\t\t\t\t\t\tif enableGBM == True and classification == True:\r\n\t\t\t\t\t\t\tprediction_classes[index % len(alphas)] += custom_prediction\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tprediction += custom_prediction\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tclassification = True\r\n\t\t\t\t\t\tprediction_classes.append(custom_prediction)\r\n\t\t\telse: #adaboost\r\n\t\t\t\tprediction += alphas[index] * tree.findDecision(param)\r\n\t\t\tindex = index + 1\r\n\r\n\t\tif enableRandomForest == True:\r\n\t\t\t#notice that gbm requires cumilative sum but random forest requires mean of each tree\r\n\t\t\tprediction = prediction / len(trees)\r\n\r\n\t\tif adaboost == True:\r\n\t\t\tprediction = functions.sign(prediction)\r\n\telse: #regular decision tree\r\n\t\ttree = trees[0]\r\n\t\tprediction = tree.findDecision(param)\r\n\r\n\tif classification == False:\r\n\t\treturn prediction\r\n\telse:\r\n\t\tif enableGBM == True and classification == True:\r\n\t\t\treturn alphas[np.argmax(prediction_classes)]\r\n\t\telse: #classification\r\n\t\t\t#e.g. random forest\r\n\t\t\t#get predictions made by different trees\r\n\t\t\tpredictions = np.array(prediction_classes)\r\n\r\n\t\t\t#find the most frequent prediction\r\n\t\t\t(values, counts) = np.unique(predictions, return_counts=True)\r\n\t\t\tidx = np.argmax(counts)\r\n\t\t\tprediction = values[idx]\r\n\r\n\t\t\treturn prediction\r\n\r\ndef save_model(base_model, file_name=\"model.pkl\"):\r\n\r\n\t\"\"\"\r\n\tParameters:\r\n\t\tbase_model (built chefboost model): you should pass this to the return of fit function\r\n\t\tfile_name (string): you should pass target file name as exact path.\r\n\t\"\"\"\r\n\r\n\tmodel = base_model.copy()\r\n\r\n\t#modules cannot be saved. Save its reference instead.\r\n\tmodule_names = []\r\n\tfor tree in model[\"trees\"]:\r\n\t\tmodule_names.append(tree.__name__)\r\n\r\n\tmodel[\"trees\"] = module_names\r\n\r\n\tf = open(\"outputs/rules/\"+file_name, \"wb\")\r\n\tpickle.dump(model,f)\r\n\tf.close()\r\n\r\ndef load_model(file_name=\"model.pkl\"):\r\n\r\n\t\"\"\"\r\n\tParameters:\r\n\t\tfile_name (string): exact path of the target saved model\r\n\tReturns:\r\n\t\tbuilt chefboost model\r\n\t\"\"\"\r\n\r\n\tf = open('outputs/rules/'+file_name, 'rb')\r\n\tmodel = pickle.load(f)\r\n\r\n\t#restore modules from its references\r\n\tmodules = []\r\n\tfor model_name in model[\"trees\"]:\r\n\t\tmodule = functions.restoreTree(model_name)\r\n\t\tmodules.append(module)\r\n\r\n\tmodel[\"trees\"] = modules\r\n\r\n\treturn model\r\n\r\ndef restoreTree(moduleName):\r\n\r\n\t\"\"\"\r\n\tIf you have decision rules, then this function enables you to load a built chefboost model. You can then call prediction.\r\n\tParameters:\r\n\t\tmoduleName (string): you should pass outputs/rules/rules if you want to restore outputs/rules/rules.py\r\n\r\n\tReturns:\r\n\t\tbuilt chefboost model\r\n\t\"\"\"\r\n\r\n\treturn functions.restoreTree(moduleName)\r\n\r\ndef feature_importance(rules):\r\n\r\n\t\"\"\"\r\n\tParameters:\r\n\t\trules (string or list):\r\n\r\n\t\te.g. decision_rules = \"outputs/rules/rules.py\"\r\n\t\tor this could be retrieved from built model as shown below.\r\n\r\n\t\t\tdecision_rules = []\r\n\t\t\tfor tree in model[\"trees\"]:\r\n\t\t\t rule = .__dict__[\"__spec__\"].origin\r\n\t\t\t decision_rules.append(rule)\r\n\r\n\tReturns:\r\n\t\tpandas data frame\r\n\t\"\"\"\r\n\r\n\tif type(rules) != list:\r\n\t\trules = [rules]\r\n\telse:\r\n\t\tprint(\"rules: \",rules)\r\n\r\n\t#-----------------------------\r\n\r\n\tdfs = []\r\n\r\n\tfor rule in rules:\r\n\t\tprint(\"Decision rule: \",rule)\r\n\r\n\t\tfile = open(rule, 'r')\r\n\t\tlines = file.readlines()\r\n\r\n\t\tpivot = {}\r\n\t\trules = []\r\n\r\n\t\t#initialize feature importances\r\n\t\tline_idx = 0\r\n\t\tfor line in lines:\r\n\t\t\tif line_idx == 0:\r\n\t\t\t\tfeature_explainer_list = line.split(\"#\")[1].split(\", \")\r\n\t\t\t\tfor feature_explainer in feature_explainer_list:\r\n\t\t\t\t\tfeature = feature_explainer.split(\": \")[1].replace(\"\\n\", \"\")\r\n\t\t\t\t\tpivot[feature] = 0\r\n\t\t\telse:\r\n\t\t\t\tif \"# \" in line:\r\n\t\t\t\t\trule = line.strip().split(\"# \")[1]\r\n\t\t\t\t\trules.append(json.loads(rule))\r\n\r\n\t\t\tline_idx = line_idx + 1\r\n\r\n\t\tfeature_names = list(pivot.keys())\r\n\r\n\t\tfor feature in feature_names:\r\n\t\t\tfor rule in rules:\r\n\t\t\t\tif rule[\"feature\"] == feature:\r\n\r\n\r\n\t\t\t\t\tscore = rule[\"metric_value\"] * rule[\"instances\"]\r\n\t\t\t\t\tcurrent_depth = rule[\"depth\"]\r\n\r\n\t\t\t\t\tchild_scores = 0\r\n\t\t\t\t\t#find child node importances\r\n\t\t\t\t\tfor child_rule in rules:\r\n\t\t\t\t\t\tif child_rule[\"depth\"] == current_depth + 1:\r\n\r\n\t\t\t\t\t\t\tchild_score = child_rule[\"metric_value\"] * child_rule[\"instances\"]\r\n\r\n\t\t\t\t\t\t\tchild_scores = child_scores + child_score\r\n\r\n\t\t\t\t\tscore = score - child_scores\r\n\r\n\t\t\t\t\tpivot[feature] = pivot[feature] + score\r\n\r\n\t\t#normalize feature importance\r\n\r\n\t\ttotal_score = 0\r\n\t\tfor feature, score in pivot.items():\r\n\t\t\ttotal_score = total_score + score\r\n\r\n\t\tfor feature, score in pivot.items():\r\n\t\t\tpivot[feature] = round(pivot[feature] / total_score, 4)\r\n\r\n\t\tinstances = []\r\n\t\tfor feature, score in pivot.items():\r\n\t\t\tinstance = []\r\n\t\t\tinstance.append(feature)\r\n\t\t\tinstance.append(score)\r\n\t\t\tinstances.append(instance)\r\n\r\n\t\tdf = pd.DataFrame(instances, columns = [\"feature\", \"final_importance\"])\r\n\t\tdf = df.sort_values(by = [\"final_importance\"], ascending = False)\r\n\r\n\t\tif len(rules) == 1:\r\n\t\t\treturn df\r\n\t\telse:\r\n\t\t\tdfs.append(df)\r\n\r\n\tif len(rules) > 1:\r\n\r\n\t\thf = pd.DataFrame(feature_names, columns = [\"feature\"])\r\n\t\thf[\"importance\"] = 0\r\n\r\n\t\tfor df in dfs:\r\n\t\t\thf = hf.merge(df, on = [\"feature\"], how = \"left\")\r\n\t\t\thf[\"importance\"] = hf[\"importance\"] + hf[\"final_importance\"]\r\n\t\t\thf = hf.drop(columns = [\"final_importance\"])\r\n\r\n\t\t#------------------------\r\n\t\t#normalize\r\n\t\thf[\"importance\"] = hf[\"importance\"] / hf[\"importance\"].sum()\r\n\t\thf = hf.sort_values(by = [\"importance\"], ascending = False)\r\n\r\n\t\treturn hf\r\n\r\ndef evaluate(model, df, target_label = 'Decision', task = 'test'):\r\n\r\n\t\"\"\"\r\n\tParameters:\r\n\t\tmodel (built chefboost model): you should pass the return of fit function\r\n\t\tdf (pandas data frame): data frame you would like to evaluate\r\n\t\ttask (string): optionally you can pass this train, validation or test\r\n\t\"\"\"\r\n\r\n\t#--------------------------\r\n\r\n\tif target_label != 'Decision':\r\n\t\tdf = df.rename(columns = {target_label: 'Decision'})\r\n\r\n\t#if target is not the last column\r\n\tif df.columns[-1] != 'Decision':\r\n\t\tnew_column_order = df.columns.drop('Decision').tolist() + ['Decision']\r\n\t\tprint(new_column_order)\r\n\t\tdf = df[new_column_order]\r\n\r\n\t#--------------------------\r\n\r\n\tfunctions.bulk_prediction(df, model)\r\n\r\n\tenableAdaboost = model[\"config\"][\"enableAdaboost\"]\r\n\r\n\tif enableAdaboost == True:\r\n\t\tdf['Decision'] = df['Decision'].astype(str)\r\n\t\tdf['Prediction'] = df['Prediction'].astype(str)\r\n\r\n\teval.evaluate(df, task = task)\r\n"
] | [
[
"pandas.DataFrame",
"pandas.isna",
"numpy.argmax",
"numpy.array",
"numpy.unique"
]
] |
samize/pandas | [
"700be617eb567fb4ab82aa8151d5c4ee02c22b95"
] | [
"pandas/core/window/rolling.py"
] | [
"\"\"\"\nProvide a generic structure to support window functions,\nsimilar to how we have a Groupby object.\n\"\"\"\nfrom __future__ import annotations\n\nimport copy\nfrom datetime import timedelta\nfrom functools import partial\nimport inspect\nfrom textwrap import dedent\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Hashable,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs.tslibs import (\n BaseOffset,\n to_offset,\n)\nimport pandas._libs.window.aggregations as window_aggregations\nfrom pandas._typing import (\n ArrayLike,\n Axis,\n NDFrameT,\n WindowingRankType,\n)\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.compat.numpy import function as nv\nfrom pandas.util._decorators import doc\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import (\n ensure_float64,\n is_bool,\n is_integer,\n is_list_like,\n is_scalar,\n needs_i8_conversion,\n)\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCSeries,\n)\nfrom pandas.core.dtypes.missing import notna\n\nfrom pandas.core._numba import executor\nfrom pandas.core.algorithms import factorize\nfrom pandas.core.apply import ResamplerWindowApply\nfrom pandas.core.arrays import ExtensionArray\nfrom pandas.core.base import (\n DataError,\n SelectionMixin,\n)\nimport pandas.core.common as com\nfrom pandas.core.indexers.objects import (\n BaseIndexer,\n FixedWindowIndexer,\n GroupbyIndexer,\n VariableWindowIndexer,\n)\nfrom pandas.core.indexes.api import (\n DatetimeIndex,\n Index,\n MultiIndex,\n PeriodIndex,\n TimedeltaIndex,\n)\nfrom pandas.core.reshape.concat import concat\nfrom pandas.core.util.numba_ import (\n NUMBA_FUNC_CACHE,\n maybe_use_numba,\n)\nfrom pandas.core.window.common import (\n flex_binary_moment,\n zsqrt,\n)\nfrom pandas.core.window.doc import (\n _shared_docs,\n args_compat,\n create_section_header,\n kwargs_compat,\n kwargs_scipy,\n numba_notes,\n template_header,\n template_returns,\n template_see_also,\n window_agg_numba_parameters,\n window_apply_parameters,\n)\nfrom pandas.core.window.numba_ import (\n generate_manual_numpy_nan_agg_with_axis,\n generate_numba_apply_func,\n generate_numba_table_func,\n)\n\nif TYPE_CHECKING:\n from pandas import (\n DataFrame,\n Series,\n )\n from pandas.core.generic import NDFrame\n from pandas.core.groupby.ops import BaseGrouper\n from pandas.core.internals import Block # noqa:F401\n\n\nclass BaseWindow(SelectionMixin):\n \"\"\"Provides utilities for performing windowing operations.\"\"\"\n\n _attributes: list[str] = []\n exclusions: frozenset[Hashable] = frozenset()\n _on: Index\n\n def __init__(\n self,\n obj: NDFrame,\n window=None,\n min_periods: int | None = None,\n center: bool = False,\n win_type: str | None = None,\n axis: Axis = 0,\n on: str | Index | None = None,\n closed: str | None = None,\n method: str = \"single\",\n *,\n selection=None,\n ):\n self.obj = obj\n self.on = on\n self.closed = closed\n self.window = window\n self.min_periods = min_periods\n self.center = center\n # TODO(2.0): Change this back to self.win_type once deprecation is enforced\n self._win_type = win_type\n self.axis = obj._get_axis_number(axis) if axis is not None else None\n self.method = method\n self._win_freq_i8 = None\n if self.on is None:\n if self.axis == 0:\n self._on = self.obj.index\n else:\n # i.e. self.axis == 1\n self._on = self.obj.columns\n elif isinstance(self.on, Index):\n self._on = self.on\n elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns:\n self._on = Index(self.obj[self.on])\n else:\n raise ValueError(\n f\"invalid on specified as {self.on}, \"\n \"must be a column (of DataFrame), an Index or None\"\n )\n\n self._selection = selection\n self._validate()\n\n @property\n def win_type(self):\n if self._win_freq_i8 is not None:\n warnings.warn(\n \"win_type will no longer return 'freq' in a future version. \"\n \"Check the type of self.window instead.\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n return \"freq\"\n return self._win_type\n\n @property\n def is_datetimelike(self) -> bool:\n warnings.warn(\n \"is_datetimelike is deprecated and will be removed in a future version.\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n return self._win_freq_i8 is not None\n\n def validate(self) -> None:\n warnings.warn(\n \"validate is deprecated and will be removed in a future version.\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n return self._validate()\n\n def _validate(self) -> None:\n if self.center is not None and not is_bool(self.center):\n raise ValueError(\"center must be a boolean\")\n if self.min_periods is not None:\n if not is_integer(self.min_periods):\n raise ValueError(\"min_periods must be an integer\")\n elif self.min_periods < 0:\n raise ValueError(\"min_periods must be >= 0\")\n elif is_integer(self.window) and self.min_periods > self.window:\n raise ValueError(\n f\"min_periods {self.min_periods} must be <= window {self.window}\"\n )\n if self.closed is not None and self.closed not in [\n \"right\",\n \"both\",\n \"left\",\n \"neither\",\n ]:\n raise ValueError(\"closed must be 'right', 'left', 'both' or 'neither'\")\n if not isinstance(self.obj, (ABCSeries, ABCDataFrame)):\n raise TypeError(f\"invalid type: {type(self)}\")\n if isinstance(self.window, BaseIndexer):\n # Validate that the passed BaseIndexer subclass has\n # a get_window_bounds with the correct signature.\n get_window_bounds_signature = inspect.signature(\n self.window.get_window_bounds\n ).parameters.keys()\n expected_signature = inspect.signature(\n BaseIndexer().get_window_bounds\n ).parameters.keys()\n if get_window_bounds_signature != expected_signature:\n raise ValueError(\n f\"{type(self.window).__name__} does not implement \"\n f\"the correct signature for get_window_bounds\"\n )\n if self.method not in [\"table\", \"single\"]:\n raise ValueError(\"method must be 'table' or 'single\")\n\n def _create_data(self, obj: NDFrameT) -> NDFrameT:\n \"\"\"\n Split data into blocks & return conformed data.\n \"\"\"\n # filter out the on from the object\n if self.on is not None and not isinstance(self.on, Index) and obj.ndim == 2:\n obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)\n if self.axis == 1:\n # GH: 20649 in case of mixed dtype and axis=1 we have to convert everything\n # to float to calculate the complete row at once. We exclude all non-numeric\n # dtypes.\n obj = obj.select_dtypes(include=[\"number\"], exclude=[\"timedelta\"])\n obj = obj.astype(\"float64\", copy=False)\n obj._mgr = obj._mgr.consolidate()\n return obj\n\n def _gotitem(self, key, ndim, subset=None):\n \"\"\"\n Sub-classes to define. Return a sliced object.\n\n Parameters\n ----------\n key : str / list of selections\n ndim : {1, 2}\n requested ndim of result\n subset : object, default None\n subset to act on\n \"\"\"\n # create a new object to prevent aliasing\n if subset is None:\n subset = self.obj\n\n # we need to make a shallow copy of ourselves\n # with the same groupby\n with warnings.catch_warnings():\n # TODO(2.0): Remove once win_type deprecation is enforced\n warnings.filterwarnings(\"ignore\", \"win_type\", FutureWarning)\n kwargs = {attr: getattr(self, attr) for attr in self._attributes}\n\n selection = None\n if subset.ndim == 2 and (\n (is_scalar(key) and key in subset) or is_list_like(key)\n ):\n selection = key\n\n new_win = type(self)(subset, selection=selection, **kwargs)\n return new_win\n\n def __getattr__(self, attr: str):\n if attr in self._internal_names_set:\n return object.__getattribute__(self, attr)\n if attr in self.obj:\n return self[attr]\n\n raise AttributeError(\n f\"'{type(self).__name__}' object has no attribute '{attr}'\"\n )\n\n def _dir_additions(self):\n return self.obj._dir_additions()\n\n def __repr__(self) -> str:\n \"\"\"\n Provide a nice str repr of our rolling object.\n \"\"\"\n attrs_list = (\n f\"{attr_name}={getattr(self, attr_name)}\"\n for attr_name in self._attributes\n if getattr(self, attr_name, None) is not None and attr_name[0] != \"_\"\n )\n attrs = \",\".join(attrs_list)\n return f\"{type(self).__name__} [{attrs}]\"\n\n def __iter__(self):\n obj = self._selected_obj.set_axis(self._on)\n obj = self._create_data(obj)\n indexer = self._get_window_indexer()\n\n start, end = indexer.get_window_bounds(\n num_values=len(obj),\n min_periods=self.min_periods,\n center=self.center,\n closed=self.closed,\n )\n\n assert len(start) == len(\n end\n ), \"these should be equal in length from get_window_bounds\"\n\n for s, e in zip(start, end):\n result = obj.iloc[slice(s, e)]\n yield result\n\n def _prep_values(self, values: ArrayLike) -> np.ndarray:\n \"\"\"Convert input to numpy arrays for Cython routines\"\"\"\n if needs_i8_conversion(values.dtype):\n raise NotImplementedError(\n f\"ops for {type(self).__name__} for this \"\n f\"dtype {values.dtype} are not implemented\"\n )\n else:\n # GH #12373 : rolling functions error on float32 data\n # make sure the data is coerced to float64\n try:\n if isinstance(values, ExtensionArray):\n values = values.to_numpy(np.float64, na_value=np.nan)\n else:\n values = ensure_float64(values)\n except (ValueError, TypeError) as err:\n raise TypeError(f\"cannot handle this type -> {values.dtype}\") from err\n\n # Convert inf to nan for C funcs\n inf = np.isinf(values)\n if inf.any():\n values = np.where(inf, np.nan, values)\n\n # error: Incompatible return value type (got \"Optional[ndarray]\",\n # expected \"ndarray\")\n return values # type: ignore[return-value]\n\n def _insert_on_column(self, result: DataFrame, obj: DataFrame) -> None:\n # if we have an 'on' column we want to put it back into\n # the results in the same location\n from pandas import Series\n\n if self.on is not None and not self._on.equals(obj.index):\n name = self._on.name\n extra_col = Series(self._on, index=self.obj.index, name=name)\n if name in result.columns:\n # TODO: sure we want to overwrite results?\n result[name] = extra_col\n elif name in result.index.names:\n pass\n elif name in self._selected_obj.columns:\n # insert in the same location as we had in _selected_obj\n old_cols = self._selected_obj.columns\n new_cols = result.columns\n old_loc = old_cols.get_loc(name)\n overlap = new_cols.intersection(old_cols[:old_loc])\n new_loc = len(overlap)\n result.insert(new_loc, name, extra_col)\n else:\n # insert at the end\n result[name] = extra_col\n\n @property\n def _index_array(self):\n # TODO: why do we get here with e.g. MultiIndex?\n if needs_i8_conversion(self._on.dtype):\n return self._on.asi8\n return None\n\n def _resolve_output(self, out: DataFrame, obj: DataFrame) -> DataFrame:\n \"\"\"Validate and finalize result.\"\"\"\n if out.shape[1] == 0 and obj.shape[1] > 0:\n raise DataError(\"No numeric types to aggregate\")\n elif out.shape[1] == 0:\n return obj.astype(\"float64\")\n\n self._insert_on_column(out, obj)\n return out\n\n def _get_window_indexer(self) -> BaseIndexer:\n \"\"\"\n Return an indexer class that will compute the window start and end bounds\n \"\"\"\n if isinstance(self.window, BaseIndexer):\n return self.window\n if self._win_freq_i8 is not None:\n return VariableWindowIndexer(\n index_array=self._index_array,\n window_size=self._win_freq_i8,\n center=self.center,\n )\n return FixedWindowIndexer(window_size=self.window)\n\n def _apply_series(\n self, homogeneous_func: Callable[..., ArrayLike], name: str | None = None\n ) -> Series:\n \"\"\"\n Series version of _apply_blockwise\n \"\"\"\n obj = self._create_data(self._selected_obj)\n\n if name == \"count\":\n # GH 12541: Special case for count where we support date-like types\n obj = notna(obj).astype(int)\n try:\n values = self._prep_values(obj._values)\n except (TypeError, NotImplementedError) as err:\n raise DataError(\"No numeric types to aggregate\") from err\n\n result = homogeneous_func(values)\n return obj._constructor(result, index=obj.index, name=obj.name)\n\n def _apply_blockwise(\n self, homogeneous_func: Callable[..., ArrayLike], name: str | None = None\n ) -> DataFrame | Series:\n \"\"\"\n Apply the given function to the DataFrame broken down into homogeneous\n sub-frames.\n \"\"\"\n if self._selected_obj.ndim == 1:\n return self._apply_series(homogeneous_func, name)\n\n obj = self._create_data(self._selected_obj)\n if name == \"count\":\n # GH 12541: Special case for count where we support date-like types\n obj = notna(obj).astype(int)\n obj._mgr = obj._mgr.consolidate()\n\n def hfunc(values: ArrayLike) -> ArrayLike:\n values = self._prep_values(values)\n return homogeneous_func(values)\n\n if self.axis == 1:\n obj = obj.T\n\n taker = []\n res_values = []\n for i, arr in enumerate(obj._iter_column_arrays()):\n # GH#42736 operate column-wise instead of block-wise\n try:\n res = hfunc(arr)\n except (TypeError, NotImplementedError):\n pass\n else:\n res_values.append(res)\n taker.append(i)\n\n df = type(obj)._from_arrays(\n res_values,\n index=obj.index,\n columns=obj.columns.take(taker),\n verify_integrity=False,\n )\n\n if self.axis == 1:\n df = df.T\n\n if 0 != len(res_values) != len(obj.columns):\n # GH#42738 ignore_failures dropped nuisance columns\n dropped = obj.columns.difference(obj.columns.take(taker))\n warnings.warn(\n \"Dropping of nuisance columns in rolling operations \"\n \"is deprecated; in a future version this will raise TypeError. \"\n \"Select only valid columns before calling the operation. \"\n f\"Dropped columns were {dropped}\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n return self._resolve_output(df, obj)\n\n def _apply_tablewise(\n self, homogeneous_func: Callable[..., ArrayLike], name: str | None = None\n ) -> DataFrame | Series:\n \"\"\"\n Apply the given function to the DataFrame across the entire object\n \"\"\"\n if self._selected_obj.ndim == 1:\n raise ValueError(\"method='table' not applicable for Series objects.\")\n obj = self._create_data(self._selected_obj)\n values = self._prep_values(obj.to_numpy())\n values = values.T if self.axis == 1 else values\n result = homogeneous_func(values)\n result = result.T if self.axis == 1 else result\n out = obj._constructor(result, index=obj.index, columns=obj.columns)\n\n return self._resolve_output(out, obj)\n\n def _apply_pairwise(\n self,\n target: DataFrame | Series,\n other: DataFrame | Series | None,\n pairwise: bool | None,\n func: Callable[[DataFrame | Series, DataFrame | Series], DataFrame | Series],\n ) -> DataFrame | Series:\n \"\"\"\n Apply the given pairwise function given 2 pandas objects (DataFrame/Series)\n \"\"\"\n if other is None:\n other = target\n # only default unset\n pairwise = True if pairwise is None else pairwise\n elif not isinstance(other, (ABCDataFrame, ABCSeries)):\n raise ValueError(\"other must be a DataFrame or Series\")\n\n return flex_binary_moment(target, other, func, pairwise=bool(pairwise))\n\n def _apply(\n self,\n func: Callable[..., Any],\n name: str | None = None,\n numba_cache_key: tuple[Callable, str] | None = None,\n numba_args: tuple[Any, ...] = (),\n **kwargs,\n ):\n \"\"\"\n Rolling statistical measure using supplied function.\n\n Designed to be used with passed-in Cython array-based functions.\n\n Parameters\n ----------\n func : callable function to apply\n name : str,\n numba_cache_key : tuple\n caching key to be used to store a compiled numba func\n numba_args : tuple\n args to be passed when func is a numba func\n **kwargs\n additional arguments for rolling function and window function\n\n Returns\n -------\n y : type of input\n \"\"\"\n window_indexer = self._get_window_indexer()\n min_periods = (\n self.min_periods\n if self.min_periods is not None\n else window_indexer.window_size\n )\n\n def homogeneous_func(values: np.ndarray):\n # calculation function\n\n if values.size == 0:\n return values.copy()\n\n def calc(x):\n start, end = window_indexer.get_window_bounds(\n num_values=len(x),\n min_periods=min_periods,\n center=self.center,\n closed=self.closed,\n )\n assert len(start) == len(\n end\n ), \"these should be equal in length from get_window_bounds\"\n\n return func(x, start, end, min_periods, *numba_args)\n\n with np.errstate(all=\"ignore\"):\n result = calc(values)\n\n if numba_cache_key is not None:\n NUMBA_FUNC_CACHE[numba_cache_key] = func\n\n return result\n\n if self.method == \"single\":\n return self._apply_blockwise(homogeneous_func, name)\n else:\n return self._apply_tablewise(homogeneous_func, name)\n\n def _numba_apply(\n self,\n func: Callable[..., Any],\n numba_cache_key_str: str,\n engine_kwargs: dict[str, bool] | None = None,\n ):\n window_indexer = self._get_window_indexer()\n min_periods = (\n self.min_periods\n if self.min_periods is not None\n else window_indexer.window_size\n )\n obj = self._create_data(self._selected_obj)\n if self.axis == 1:\n obj = obj.T\n values = self._prep_values(obj.to_numpy())\n if values.ndim == 1:\n values = values.reshape(-1, 1)\n start, end = window_indexer.get_window_bounds(\n num_values=len(values),\n min_periods=min_periods,\n center=self.center,\n closed=self.closed,\n )\n aggregator = executor.generate_shared_aggregator(\n func, engine_kwargs, numba_cache_key_str\n )\n result = aggregator(values, start, end, min_periods)\n NUMBA_FUNC_CACHE[(func, numba_cache_key_str)] = aggregator\n result = result.T if self.axis == 1 else result\n if obj.ndim == 1:\n result = result.squeeze()\n out = obj._constructor(result, index=obj.index, name=obj.name)\n return out\n else:\n out = obj._constructor(result, index=obj.index, columns=obj.columns)\n return self._resolve_output(out, obj)\n\n def aggregate(self, func, *args, **kwargs):\n result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg()\n if result is None:\n return self.apply(func, raw=False, args=args, kwargs=kwargs)\n return result\n\n agg = aggregate\n\n\nclass BaseWindowGroupby(BaseWindow):\n \"\"\"\n Provide the groupby windowing facilities.\n \"\"\"\n\n _grouper: BaseGrouper\n _as_index: bool\n _attributes: list[str] = [\"_grouper\"]\n\n def __init__(\n self,\n obj: DataFrame | Series,\n *args,\n _grouper: BaseGrouper,\n _as_index: bool = True,\n **kwargs,\n ):\n from pandas.core.groupby.ops import BaseGrouper\n\n if not isinstance(_grouper, BaseGrouper):\n raise ValueError(\"Must pass a BaseGrouper object.\")\n self._grouper = _grouper\n self._as_index = _as_index\n # GH 32262: It's convention to keep the grouping column in\n # groupby.<agg_func>, but unexpected to users in\n # groupby.rolling.<agg_func>\n obj = obj.drop(columns=self._grouper.names, errors=\"ignore\")\n super().__init__(obj, *args, **kwargs)\n\n def _apply(\n self,\n func: Callable[..., Any],\n name: str | None = None,\n numba_cache_key: tuple[Callable, str] | None = None,\n numba_args: tuple[Any, ...] = (),\n **kwargs,\n ) -> DataFrame | Series:\n result = super()._apply(\n func,\n name,\n numba_cache_key,\n numba_args,\n **kwargs,\n )\n # Reconstruct the resulting MultiIndex\n # 1st set of levels = group by labels\n # 2nd set of levels = original DataFrame/Series index\n grouped_object_index = self.obj.index\n grouped_index_name = [*grouped_object_index.names]\n groupby_keys = copy.copy(self._grouper.names)\n result_index_names = groupby_keys + grouped_index_name\n\n drop_columns = [\n key\n for key in self._grouper.names\n if key not in self.obj.index.names or key is None\n ]\n\n if len(drop_columns) != len(groupby_keys):\n # Our result will have still kept the column in the result\n result = result.drop(columns=drop_columns, errors=\"ignore\")\n\n codes = self._grouper.codes\n levels = copy.copy(self._grouper.levels)\n\n group_indices = self._grouper.indices.values()\n if group_indices:\n indexer = np.concatenate(list(group_indices))\n else:\n indexer = np.array([], dtype=np.intp)\n codes = [c.take(indexer) for c in codes]\n\n # if the index of the original dataframe needs to be preserved, append\n # this index (but reordered) to the codes/levels from the groupby\n if grouped_object_index is not None:\n idx = grouped_object_index.take(indexer)\n if not isinstance(idx, MultiIndex):\n idx = MultiIndex.from_arrays([idx])\n codes.extend(list(idx.codes))\n levels.extend(list(idx.levels))\n\n result_index = MultiIndex(\n levels, codes, names=result_index_names, verify_integrity=False\n )\n\n result.index = result_index\n if not self._as_index:\n result = result.reset_index(level=list(range(len(groupby_keys))))\n return result\n\n def _apply_pairwise(\n self,\n target: DataFrame | Series,\n other: DataFrame | Series | None,\n pairwise: bool | None,\n func: Callable[[DataFrame | Series, DataFrame | Series], DataFrame | Series],\n ) -> DataFrame | Series:\n \"\"\"\n Apply the given pairwise function given 2 pandas objects (DataFrame/Series)\n \"\"\"\n # Manually drop the grouping column first\n target = target.drop(columns=self._grouper.names, errors=\"ignore\")\n target = self._create_data(target)\n result = super()._apply_pairwise(target, other, pairwise, func)\n # 1) Determine the levels + codes of the groupby levels\n if other is not None:\n # When we have other, we must reindex (expand) the result\n # from flex_binary_moment to a \"transform\"-like result\n # per groupby combination\n old_result_len = len(result)\n result = concat(\n [\n result.take(gb_indices).reindex(result.index)\n for gb_indices in self._grouper.indices.values()\n ]\n )\n\n gb_pairs = (\n com.maybe_make_list(pair) for pair in self._grouper.indices.keys()\n )\n groupby_codes = []\n groupby_levels = []\n # e.g. [[1, 2], [4, 5]] as [[1, 4], [2, 5]]\n for gb_level_pair in map(list, zip(*gb_pairs)):\n labels = np.repeat(np.array(gb_level_pair), old_result_len)\n codes, levels = factorize(labels)\n groupby_codes.append(codes)\n groupby_levels.append(levels)\n\n else:\n # When we evaluate the pairwise=True result, repeat the groupby\n # labels by the number of columns in the original object\n groupby_codes = self._grouper.codes\n # error: Incompatible types in assignment (expression has type\n # \"List[Index]\", variable has type \"List[Union[ndarray, Index]]\")\n groupby_levels = self._grouper.levels # type: ignore[assignment]\n\n group_indices = self._grouper.indices.values()\n if group_indices:\n indexer = np.concatenate(list(group_indices))\n else:\n indexer = np.array([], dtype=np.intp)\n\n if target.ndim == 1:\n repeat_by = 1\n else:\n repeat_by = len(target.columns)\n groupby_codes = [\n np.repeat(c.take(indexer), repeat_by) for c in groupby_codes\n ]\n # 2) Determine the levels + codes of the result from super()._apply_pairwise\n if isinstance(result.index, MultiIndex):\n result_codes = list(result.index.codes)\n result_levels = list(result.index.levels)\n result_names = list(result.index.names)\n else:\n idx_codes, idx_levels = factorize(result.index)\n result_codes = [idx_codes]\n result_levels = [idx_levels]\n result_names = [result.index.name]\n\n # 3) Create the resulting index by combining 1) + 2)\n result_codes = groupby_codes + result_codes\n result_levels = groupby_levels + result_levels\n result_names = self._grouper.names + result_names\n\n result_index = MultiIndex(\n result_levels, result_codes, names=result_names, verify_integrity=False\n )\n result.index = result_index\n return result\n\n def _create_data(self, obj: NDFrameT) -> NDFrameT:\n \"\"\"\n Split data into blocks & return conformed data.\n \"\"\"\n # Ensure the object we're rolling over is monotonically sorted relative\n # to the groups\n # GH 36197\n if not obj.empty:\n groupby_order = np.concatenate(list(self._grouper.indices.values())).astype(\n np.int64\n )\n obj = obj.take(groupby_order)\n return super()._create_data(obj)\n\n def _gotitem(self, key, ndim, subset=None):\n # we are setting the index on the actual object\n # here so our index is carried through to the selected obj\n # when we do the splitting for the groupby\n if self.on is not None:\n # GH 43355\n subset = self.obj.set_index(self._on)\n return super()._gotitem(key, ndim, subset=subset)\n\n def _validate_monotonic(self):\n \"\"\"\n Validate that \"on\" is monotonic; already validated at a higher level.\n \"\"\"\n pass\n\n\nclass Window(BaseWindow):\n \"\"\"\n Provide rolling window calculations.\n\n Parameters\n ----------\n window : int, offset, or BaseIndexer subclass\n Size of the moving window.\n\n If an integer, the fixed number of observations used for\n each window.\n\n If an offset, the time period of each window. Each\n window will be a variable sized based on the observations included in\n the time-period. This is only valid for datetimelike indexes.\n To learn more about the offsets & frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n If a BaseIndexer subclass, the window boundaries\n based on the defined ``get_window_bounds`` method. Additional rolling\n keyword arguments, namely ``min_periods``, ``center``, and\n ``closed`` will be passed to ``get_window_bounds``.\n\n min_periods : int, default None\n Minimum number of observations in window required to have a value;\n otherwise, result is ``np.nan``.\n\n For a window that is specified by an offset, ``min_periods`` will default to 1.\n\n For a window that is specified by an integer, ``min_periods`` will default\n to the size of the window.\n\n center : bool, default False\n If False, set the window labels as the right edge of the window index.\n\n If True, set the window labels as the center of the window index.\n\n win_type : str, default None\n If ``None``, all points are evenly weighted.\n\n If a string, it must be a valid `scipy.signal window function\n <https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows>`__.\n\n Certain Scipy window types require additional parameters to be passed\n in the aggregation function. The additional parameters must match\n the keywords specified in the Scipy window type method signature.\n\n on : str, optional\n For a DataFrame, a column label or Index level on which\n to calculate the rolling window, rather than the DataFrame's index.\n\n Provided integer column is ignored and excluded from result since\n an integer index is not used to calculate the rolling window.\n\n axis : int or str, default 0\n If ``0`` or ``'index'``, roll across the rows.\n\n If ``1`` or ``'columns'``, roll across the columns.\n\n closed : str, default None\n If ``'right'``, the first point in the window is excluded from calculations.\n\n If ``'left'``, the last point in the window is excluded from calculations.\n\n If ``'both'``, the no points in the window are excluded from calculations.\n\n If ``'neither'``, the first and last points in the window are excluded\n from calculations.\n\n Default ``None`` (``'right'``)\n\n .. versionchanged:: 1.2.0\n\n The closed parameter with fixed windows is now supported.\n\n method : str {'single', 'table'}, default 'single'\n\n .. versionadded:: 1.3.0\n\n Execute the rolling operation per single column or row (``'single'``)\n or over the entire object (``'table'``).\n\n This argument is only implemented when specifying ``engine='numba'``\n in the method call.\n\n Returns\n -------\n ``Window`` subclass if a ``win_type`` is passed\n\n ``Rolling`` subclass if ``win_type`` is not passed\n\n See Also\n --------\n expanding : Provides expanding transformations.\n ewm : Provides exponential weighted functions.\n\n Notes\n -----\n See :ref:`Windowing Operations <window.generic>` for further usage details\n and examples.\n\n Examples\n --------\n >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})\n >>> df\n B\n 0 0.0\n 1 1.0\n 2 2.0\n 3 NaN\n 4 4.0\n\n **window**\n\n Rolling sum with a window length of 2 observations.\n\n >>> df.rolling(2).sum()\n B\n 0 NaN\n 1 1.0\n 2 3.0\n 3 NaN\n 4 NaN\n\n Rolling sum with a window span of 2 seconds.\n\n >>> df_time = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},\n ... index = [pd.Timestamp('20130101 09:00:00'),\n ... pd.Timestamp('20130101 09:00:02'),\n ... pd.Timestamp('20130101 09:00:03'),\n ... pd.Timestamp('20130101 09:00:05'),\n ... pd.Timestamp('20130101 09:00:06')])\n\n >>> df_time\n B\n 2013-01-01 09:00:00 0.0\n 2013-01-01 09:00:02 1.0\n 2013-01-01 09:00:03 2.0\n 2013-01-01 09:00:05 NaN\n 2013-01-01 09:00:06 4.0\n\n >>> df_time.rolling('2s').sum()\n B\n 2013-01-01 09:00:00 0.0\n 2013-01-01 09:00:02 1.0\n 2013-01-01 09:00:03 3.0\n 2013-01-01 09:00:05 NaN\n 2013-01-01 09:00:06 4.0\n\n Rolling sum with forward looking windows with 2 observations.\n\n >>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2)\n >>> df.rolling(window=indexer, min_periods=1).sum()\n B\n 0 1.0\n 1 3.0\n 2 2.0\n 3 4.0\n 4 4.0\n\n **min_periods**\n\n Rolling sum with a window length of 2 observations, but only needs a minimum of 1\n observation to calculate a value.\n\n >>> df.rolling(2, min_periods=1).sum()\n B\n 0 0.0\n 1 1.0\n 2 3.0\n 3 2.0\n 4 4.0\n\n **center**\n\n Rolling sum with the result assigned to the center of the window index.\n\n >>> df.rolling(3, min_periods=1, center=True).sum()\n B\n 0 1.0\n 1 3.0\n 2 3.0\n 3 6.0\n 4 4.0\n\n >>> df.rolling(3, min_periods=1, center=False).sum()\n B\n 0 0.0\n 1 1.0\n 2 3.0\n 3 3.0\n 4 6.0\n\n **win_type**\n\n Rolling sum with a window length of 2, using the Scipy ``'gaussian'``\n window type. ``std`` is required in the aggregation function.\n\n >>> df.rolling(2, win_type='gaussian').sum(std=3)\n B\n 0 NaN\n 1 0.986207\n 2 2.958621\n 3 NaN\n 4 NaN\n \"\"\"\n\n _attributes = [\n \"window\",\n \"min_periods\",\n \"center\",\n \"win_type\",\n \"axis\",\n \"on\",\n \"closed\",\n \"method\",\n ]\n\n def _validate(self):\n super()._validate()\n\n if not isinstance(self.win_type, str):\n raise ValueError(f\"Invalid win_type {self.win_type}\")\n signal = import_optional_dependency(\n \"scipy.signal\", extra=\"Scipy is required to generate window weight.\"\n )\n self._scipy_weight_generator = getattr(signal, self.win_type, None)\n if self._scipy_weight_generator is None:\n raise ValueError(f\"Invalid win_type {self.win_type}\")\n\n if isinstance(self.window, BaseIndexer):\n raise NotImplementedError(\n \"BaseIndexer subclasses not implemented with win_types.\"\n )\n elif not is_integer(self.window) or self.window < 0:\n raise ValueError(\"window must be an integer 0 or greater\")\n\n if self.method != \"single\":\n raise NotImplementedError(\"'single' is the only supported method type.\")\n\n def _center_window(self, result: np.ndarray, offset: int) -> np.ndarray:\n \"\"\"\n Center the result in the window for weighted rolling aggregations.\n \"\"\"\n if self.axis > result.ndim - 1:\n raise ValueError(\"Requested axis is larger then no. of argument dimensions\")\n\n if offset > 0:\n lead_indexer = [slice(None)] * result.ndim\n lead_indexer[self.axis] = slice(offset, None)\n result = np.copy(result[tuple(lead_indexer)])\n return result\n\n def _apply(\n self,\n func: Callable[[np.ndarray, int, int], np.ndarray],\n name: str | None = None,\n numba_cache_key: tuple[Callable, str] | None = None,\n numba_args: tuple[Any, ...] = (),\n **kwargs,\n ):\n \"\"\"\n Rolling with weights statistical measure using supplied function.\n\n Designed to be used with passed-in Cython array-based functions.\n\n Parameters\n ----------\n func : callable function to apply\n name : str,\n use_numba_cache : tuple\n unused\n numba_args : tuple\n unused\n **kwargs\n additional arguments for scipy windows if necessary\n\n Returns\n -------\n y : type of input\n \"\"\"\n window = self._scipy_weight_generator(self.window, **kwargs)\n offset = (len(window) - 1) // 2 if self.center else 0\n\n def homogeneous_func(values: np.ndarray):\n # calculation function\n\n if values.size == 0:\n return values.copy()\n\n def calc(x):\n additional_nans = np.array([np.nan] * offset)\n x = np.concatenate((x, additional_nans))\n return func(x, window, self.min_periods or len(window))\n\n with np.errstate(all=\"ignore\"):\n # Our weighted aggregations return memoryviews\n result = np.asarray(calc(values))\n\n if self.center:\n result = self._center_window(result, offset)\n\n return result\n\n return self._apply_blockwise(homogeneous_func, name)\n\n @doc(\n _shared_docs[\"aggregate\"],\n see_also=dedent(\n \"\"\"\n See Also\n --------\n pandas.DataFrame.aggregate : Similar DataFrame method.\n pandas.Series.aggregate : Similar Series method.\n \"\"\"\n ),\n examples=dedent(\n \"\"\"\n Examples\n --------\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6], \"C\": [7, 8, 9]})\n >>> df\n A B C\n 0 1 4 7\n 1 2 5 8\n 2 3 6 9\n\n >>> df.rolling(2, win_type=\"boxcar\").agg(\"mean\")\n A B C\n 0 NaN NaN NaN\n 1 1.5 4.5 7.5\n 2 2.5 5.5 8.5\n \"\"\"\n ),\n klass=\"Series/DataFrame\",\n axis=\"\",\n )\n def aggregate(self, func, *args, **kwargs):\n result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg()\n if result is None:\n\n # these must apply directly\n result = func(self)\n\n return result\n\n agg = aggregate\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n kwargs_scipy,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n template_see_also[:-1],\n window_method=\"rolling\",\n aggregation_description=\"weighted window sum\",\n agg_method=\"sum\",\n )\n def sum(self, *args, **kwargs):\n nv.validate_window_func(\"sum\", args, kwargs)\n window_func = window_aggregations.roll_weighted_sum\n # error: Argument 1 to \"_apply\" of \"Window\" has incompatible type\n # \"Callable[[ndarray, ndarray, int], ndarray]\"; expected\n # \"Callable[[ndarray, int, int], ndarray]\"\n return self._apply(window_func, name=\"sum\", **kwargs) # type: ignore[arg-type]\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n kwargs_scipy,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n template_see_also[:-1],\n window_method=\"rolling\",\n aggregation_description=\"weighted window mean\",\n agg_method=\"mean\",\n )\n def mean(self, *args, **kwargs):\n nv.validate_window_func(\"mean\", args, kwargs)\n window_func = window_aggregations.roll_weighted_mean\n # error: Argument 1 to \"_apply\" of \"Window\" has incompatible type\n # \"Callable[[ndarray, ndarray, int], ndarray]\"; expected\n # \"Callable[[ndarray, int, int], ndarray]\"\n return self._apply(window_func, name=\"mean\", **kwargs) # type: ignore[arg-type]\n\n @doc(\n template_header,\n \".. versionadded:: 1.0.0 \\n\\n\",\n create_section_header(\"Parameters\"),\n kwargs_scipy,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n template_see_also[:-1],\n window_method=\"rolling\",\n aggregation_description=\"weighted window variance\",\n agg_method=\"var\",\n )\n def var(self, ddof: int = 1, *args, **kwargs):\n nv.validate_window_func(\"var\", args, kwargs)\n window_func = partial(window_aggregations.roll_weighted_var, ddof=ddof)\n kwargs.pop(\"name\", None)\n return self._apply(window_func, name=\"var\", **kwargs)\n\n @doc(\n template_header,\n \".. versionadded:: 1.0.0 \\n\\n\",\n create_section_header(\"Parameters\"),\n kwargs_scipy,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n template_see_also[:-1],\n window_method=\"rolling\",\n aggregation_description=\"weighted window standard deviation\",\n agg_method=\"std\",\n )\n def std(self, ddof: int = 1, *args, **kwargs):\n nv.validate_window_func(\"std\", args, kwargs)\n return zsqrt(self.var(ddof=ddof, name=\"std\", **kwargs))\n\n\nclass RollingAndExpandingMixin(BaseWindow):\n def count(self):\n window_func = window_aggregations.roll_sum\n return self._apply(window_func, name=\"count\")\n\n def apply(\n self,\n func: Callable[..., Any],\n raw: bool = False,\n engine: str | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n args: tuple[Any, ...] | None = None,\n kwargs: dict[str, Any] | None = None,\n ):\n if args is None:\n args = ()\n if kwargs is None:\n kwargs = {}\n\n if not is_bool(raw):\n raise ValueError(\"raw parameter must be `True` or `False`\")\n\n numba_cache_key = None\n numba_args: tuple[Any, ...] = ()\n if maybe_use_numba(engine):\n if raw is False:\n raise ValueError(\"raw must be `True` when using the numba engine\")\n caller_name = type(self).__name__\n numba_args = args\n if self.method == \"single\":\n apply_func = generate_numba_apply_func(\n kwargs, func, engine_kwargs, caller_name\n )\n numba_cache_key = (func, f\"{caller_name}_apply_single\")\n else:\n apply_func = generate_numba_table_func(\n kwargs, func, engine_kwargs, f\"{caller_name}_apply\"\n )\n numba_cache_key = (func, f\"{caller_name}_apply_table\")\n elif engine in (\"cython\", None):\n if engine_kwargs is not None:\n raise ValueError(\"cython engine does not accept engine_kwargs\")\n apply_func = self._generate_cython_apply_func(args, kwargs, raw, func)\n else:\n raise ValueError(\"engine must be either 'numba' or 'cython'\")\n\n return self._apply(\n apply_func,\n numba_cache_key=numba_cache_key,\n numba_args=numba_args,\n )\n\n def _generate_cython_apply_func(\n self,\n args: tuple[Any, ...],\n kwargs: dict[str, Any],\n raw: bool,\n function: Callable[..., Any],\n ) -> Callable[[np.ndarray, np.ndarray, np.ndarray, int], np.ndarray]:\n from pandas import Series\n\n window_func = partial(\n window_aggregations.roll_apply,\n args=args,\n kwargs=kwargs,\n raw=raw,\n function=function,\n )\n\n def apply_func(values, begin, end, min_periods, raw=raw):\n if not raw:\n values = Series(values, index=self.obj.index)\n return window_func(values, begin, end, min_periods)\n\n return apply_func\n\n def sum(\n self,\n *args,\n engine: str | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n **kwargs,\n ):\n nv.validate_window_func(\"sum\", args, kwargs)\n if maybe_use_numba(engine):\n if self.method == \"table\":\n func = generate_manual_numpy_nan_agg_with_axis(np.nansum)\n return self.apply(\n func,\n raw=True,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n else:\n from pandas.core._numba.kernels import sliding_sum\n\n return self._numba_apply(sliding_sum, \"rolling_sum\", engine_kwargs)\n window_func = window_aggregations.roll_sum\n return self._apply(window_func, name=\"sum\", **kwargs)\n\n def max(\n self,\n *args,\n engine: str | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n **kwargs,\n ):\n nv.validate_window_func(\"max\", args, kwargs)\n if maybe_use_numba(engine):\n if self.method == \"table\":\n func = generate_manual_numpy_nan_agg_with_axis(np.nanmax)\n else:\n func = np.nanmax\n\n return self.apply(\n func,\n raw=True,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n window_func = window_aggregations.roll_max\n return self._apply(window_func, name=\"max\", **kwargs)\n\n def min(\n self,\n *args,\n engine: str | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n **kwargs,\n ):\n nv.validate_window_func(\"min\", args, kwargs)\n if maybe_use_numba(engine):\n if self.method == \"table\":\n func = generate_manual_numpy_nan_agg_with_axis(np.nanmin)\n else:\n func = np.nanmin\n\n return self.apply(\n func,\n raw=True,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n window_func = window_aggregations.roll_min\n return self._apply(window_func, name=\"min\", **kwargs)\n\n def mean(\n self,\n *args,\n engine: str | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n **kwargs,\n ):\n nv.validate_window_func(\"mean\", args, kwargs)\n if maybe_use_numba(engine):\n if self.method == \"table\":\n func = generate_manual_numpy_nan_agg_with_axis(np.nanmean)\n return self.apply(\n func,\n raw=True,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n else:\n from pandas.core._numba.kernels import sliding_mean\n\n return self._numba_apply(sliding_mean, \"rolling_mean\", engine_kwargs)\n window_func = window_aggregations.roll_mean\n return self._apply(window_func, name=\"mean\", **kwargs)\n\n def median(\n self,\n engine: str | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n **kwargs,\n ):\n if maybe_use_numba(engine):\n if self.method == \"table\":\n func = generate_manual_numpy_nan_agg_with_axis(np.nanmedian)\n else:\n func = np.nanmedian\n\n return self.apply(\n func,\n raw=True,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n window_func = window_aggregations.roll_median_c\n return self._apply(window_func, name=\"median\", **kwargs)\n\n def std(self, ddof: int = 1, *args, **kwargs):\n nv.validate_window_func(\"std\", args, kwargs)\n window_func = window_aggregations.roll_var\n\n def zsqrt_func(values, begin, end, min_periods):\n return zsqrt(window_func(values, begin, end, min_periods, ddof=ddof))\n\n return self._apply(\n zsqrt_func,\n name=\"std\",\n **kwargs,\n )\n\n def var(self, ddof: int = 1, *args, **kwargs):\n nv.validate_window_func(\"var\", args, kwargs)\n window_func = partial(window_aggregations.roll_var, ddof=ddof)\n return self._apply(\n window_func,\n name=\"var\",\n **kwargs,\n )\n\n def skew(self, **kwargs):\n window_func = window_aggregations.roll_skew\n return self._apply(\n window_func,\n name=\"skew\",\n **kwargs,\n )\n\n def sem(self, ddof: int = 1, *args, **kwargs):\n return self.std(*args, **kwargs) / (self.count() - ddof).pow(0.5)\n\n def kurt(self, **kwargs):\n window_func = window_aggregations.roll_kurt\n return self._apply(\n window_func,\n name=\"kurt\",\n **kwargs,\n )\n\n def quantile(self, quantile: float, interpolation: str = \"linear\", **kwargs):\n if quantile == 1.0:\n window_func = window_aggregations.roll_max\n elif quantile == 0.0:\n window_func = window_aggregations.roll_min\n else:\n window_func = partial(\n window_aggregations.roll_quantile,\n quantile=quantile,\n interpolation=interpolation,\n )\n\n return self._apply(window_func, name=\"quantile\", **kwargs)\n\n def rank(\n self,\n method: WindowingRankType = \"average\",\n ascending: bool = True,\n pct: bool = False,\n **kwargs,\n ):\n window_func = partial(\n window_aggregations.roll_rank,\n method=method,\n ascending=ascending,\n percentile=pct,\n )\n\n return self._apply(window_func, name=\"rank\", **kwargs)\n\n def cov(\n self,\n other: DataFrame | Series | None = None,\n pairwise: bool | None = None,\n ddof: int = 1,\n **kwargs,\n ):\n from pandas import Series\n\n def cov_func(x, y):\n x_array = self._prep_values(x)\n y_array = self._prep_values(y)\n window_indexer = self._get_window_indexer()\n min_periods = (\n self.min_periods\n if self.min_periods is not None\n else window_indexer.window_size\n )\n start, end = window_indexer.get_window_bounds(\n num_values=len(x_array),\n min_periods=min_periods,\n center=self.center,\n closed=self.closed,\n )\n\n assert len(start) == len(\n end\n ), \"these should be equal in length from get_window_bounds\"\n\n with np.errstate(all=\"ignore\"):\n mean_x_y = window_aggregations.roll_mean(\n x_array * y_array, start, end, min_periods\n )\n mean_x = window_aggregations.roll_mean(x_array, start, end, min_periods)\n mean_y = window_aggregations.roll_mean(y_array, start, end, min_periods)\n count_x_y = window_aggregations.roll_sum(\n notna(x_array + y_array).astype(np.float64), start, end, 0\n )\n result = (mean_x_y - mean_x * mean_y) * (count_x_y / (count_x_y - ddof))\n return Series(result, index=x.index, name=x.name)\n\n return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func)\n\n def corr(\n self,\n other: DataFrame | Series | None = None,\n pairwise: bool | None = None,\n ddof: int = 1,\n **kwargs,\n ):\n\n from pandas import Series\n\n def corr_func(x, y):\n x_array = self._prep_values(x)\n y_array = self._prep_values(y)\n window_indexer = self._get_window_indexer()\n min_periods = (\n self.min_periods\n if self.min_periods is not None\n else window_indexer.window_size\n )\n start, end = window_indexer.get_window_bounds(\n num_values=len(x_array),\n min_periods=min_periods,\n center=self.center,\n closed=self.closed,\n )\n\n assert len(start) == len(\n end\n ), \"these should be equal in length from get_window_bounds\"\n\n with np.errstate(all=\"ignore\"):\n mean_x_y = window_aggregations.roll_mean(\n x_array * y_array, start, end, min_periods\n )\n mean_x = window_aggregations.roll_mean(x_array, start, end, min_periods)\n mean_y = window_aggregations.roll_mean(y_array, start, end, min_periods)\n count_x_y = window_aggregations.roll_sum(\n notna(x_array + y_array).astype(np.float64), start, end, 0\n )\n x_var = window_aggregations.roll_var(\n x_array, start, end, min_periods, ddof\n )\n y_var = window_aggregations.roll_var(\n y_array, start, end, min_periods, ddof\n )\n numerator = (mean_x_y - mean_x * mean_y) * (\n count_x_y / (count_x_y - ddof)\n )\n denominator = (x_var * y_var) ** 0.5\n result = numerator / denominator\n return Series(result, index=x.index, name=x.name)\n\n return self._apply_pairwise(self._selected_obj, other, pairwise, corr_func)\n\n\nclass Rolling(RollingAndExpandingMixin):\n\n _attributes: list[str] = [\n \"window\",\n \"min_periods\",\n \"center\",\n \"win_type\",\n \"axis\",\n \"on\",\n \"closed\",\n \"method\",\n ]\n\n def _validate(self):\n super()._validate()\n\n # we allow rolling on a datetimelike index\n if (\n self.obj.empty\n or isinstance(self._on, (DatetimeIndex, TimedeltaIndex, PeriodIndex))\n ) and isinstance(self.window, (str, BaseOffset, timedelta)):\n\n self._validate_monotonic()\n\n # this will raise ValueError on non-fixed freqs\n try:\n freq = to_offset(self.window)\n except (TypeError, ValueError) as err:\n raise ValueError(\n f\"passed window {self.window} is not \"\n \"compatible with a datetimelike index\"\n ) from err\n if isinstance(self._on, PeriodIndex):\n self._win_freq_i8 = freq.nanos / (self._on.freq.nanos / self._on.freq.n)\n else:\n self._win_freq_i8 = freq.nanos\n\n # min_periods must be an integer\n if self.min_periods is None:\n self.min_periods = 1\n\n elif isinstance(self.window, BaseIndexer):\n # Passed BaseIndexer subclass should handle all other rolling kwargs\n return\n elif not is_integer(self.window) or self.window < 0:\n raise ValueError(\"window must be an integer 0 or greater\")\n\n def _validate_monotonic(self):\n \"\"\"\n Validate monotonic (increasing or decreasing).\n \"\"\"\n if not (self._on.is_monotonic_increasing or self._on.is_monotonic_decreasing):\n self._raise_monotonic_error()\n\n def _raise_monotonic_error(self):\n formatted = self.on\n if self.on is None:\n formatted = \"index\"\n raise ValueError(f\"{formatted} must be monotonic\")\n\n @doc(\n _shared_docs[\"aggregate\"],\n see_also=dedent(\n \"\"\"\n See Also\n --------\n pandas.Series.rolling : Calling object with Series data.\n pandas.DataFrame.rolling : Calling object with DataFrame data.\n \"\"\"\n ),\n examples=dedent(\n \"\"\"\n Examples\n --------\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6], \"C\": [7, 8, 9]})\n >>> df\n A B C\n 0 1 4 7\n 1 2 5 8\n 2 3 6 9\n\n >>> df.rolling(2).sum()\n A B C\n 0 NaN NaN NaN\n 1 3.0 9.0 15.0\n 2 5.0 11.0 17.0\n\n >>> df.rolling(2).agg({\"A\": \"sum\", \"B\": \"min\"})\n A B\n 0 NaN NaN\n 1 3.0 4.0\n 2 5.0 5.0\n \"\"\"\n ),\n klass=\"Series/Dataframe\",\n axis=\"\",\n )\n def aggregate(self, func, *args, **kwargs):\n return super().aggregate(func, *args, **kwargs)\n\n agg = aggregate\n\n @doc(\n template_header,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n template_see_also,\n create_section_header(\"Examples\"),\n dedent(\n \"\"\"\n >>> s = pd.Series([2, 3, np.nan, 10])\n >>> s.rolling(2).count()\n 0 1.0\n 1 2.0\n 2 1.0\n 3 1.0\n dtype: float64\n >>> s.rolling(3).count()\n 0 1.0\n 1 2.0\n 2 2.0\n 3 2.0\n dtype: float64\n >>> s.rolling(4).count()\n 0 1.0\n 1 2.0\n 2 2.0\n 3 3.0\n dtype: float64\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n window_method=\"rolling\",\n aggregation_description=\"count of non NaN observations\",\n agg_method=\"count\",\n )\n def count(self):\n if self.min_periods is None:\n warnings.warn(\n (\n \"min_periods=None will default to the size of window \"\n \"consistent with other methods in a future version. \"\n \"Specify min_periods=0 instead.\"\n ),\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n self.min_periods = 0\n result = super().count()\n self.min_periods = None\n else:\n result = super().count()\n return result\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n window_apply_parameters,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n template_see_also[:-1],\n window_method=\"rolling\",\n aggregation_description=\"custom aggregation function\",\n agg_method=\"apply\",\n )\n def apply(\n self,\n func: Callable[..., Any],\n raw: bool = False,\n engine: str | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n args: tuple[Any, ...] | None = None,\n kwargs: dict[str, Any] | None = None,\n ):\n return super().apply(\n func,\n raw=raw,\n engine=engine,\n engine_kwargs=engine_kwargs,\n args=args,\n kwargs=kwargs,\n )\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n args_compat,\n window_agg_numba_parameters,\n kwargs_compat,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n template_see_also,\n create_section_header(\"Notes\"),\n numba_notes,\n create_section_header(\"Examples\"),\n dedent(\n \"\"\"\n >>> s = pd.Series([1, 2, 3, 4, 5])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n dtype: int64\n\n >>> s.rolling(3).sum()\n 0 NaN\n 1 NaN\n 2 6.0\n 3 9.0\n 4 12.0\n dtype: float64\n\n >>> s.rolling(3, center=True).sum()\n 0 NaN\n 1 6.0\n 2 9.0\n 3 12.0\n 4 NaN\n dtype: float64\n\n For DataFrame, each sum is computed column-wise.\n\n >>> df = pd.DataFrame({{\"A\": s, \"B\": s ** 2}})\n >>> df\n A B\n 0 1 1\n 1 2 4\n 2 3 9\n 3 4 16\n 4 5 25\n\n >>> df.rolling(3).sum()\n A B\n 0 NaN NaN\n 1 NaN NaN\n 2 6.0 14.0\n 3 9.0 29.0\n 4 12.0 50.0\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n window_method=\"rolling\",\n aggregation_description=\"sum\",\n agg_method=\"sum\",\n )\n def sum(\n self,\n *args,\n engine: str | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n **kwargs,\n ):\n nv.validate_rolling_func(\"sum\", args, kwargs)\n return super().sum(*args, engine=engine, engine_kwargs=engine_kwargs, **kwargs)\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n args_compat,\n window_agg_numba_parameters,\n kwargs_compat,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n template_see_also,\n create_section_header(\"Notes\"),\n numba_notes[:-1],\n window_method=\"rolling\",\n aggregation_description=\"maximum\",\n agg_method=\"max\",\n )\n def max(\n self,\n *args,\n engine: str | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n **kwargs,\n ):\n nv.validate_rolling_func(\"max\", args, kwargs)\n return super().max(*args, engine=engine, engine_kwargs=engine_kwargs, **kwargs)\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n args_compat,\n window_agg_numba_parameters,\n kwargs_compat,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n template_see_also,\n create_section_header(\"Notes\"),\n numba_notes,\n create_section_header(\"Examples\"),\n dedent(\n \"\"\"\n Performing a rolling minimum with a window size of 3.\n\n >>> s = pd.Series([4, 3, 5, 2, 6])\n >>> s.rolling(3).min()\n 0 NaN\n 1 NaN\n 2 3.0\n 3 2.0\n 4 2.0\n dtype: float64\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n window_method=\"rolling\",\n aggregation_description=\"minimum\",\n agg_method=\"min\",\n )\n def min(\n self,\n *args,\n engine: str | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n **kwargs,\n ):\n nv.validate_rolling_func(\"min\", args, kwargs)\n return super().min(*args, engine=engine, engine_kwargs=engine_kwargs, **kwargs)\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n args_compat,\n window_agg_numba_parameters,\n kwargs_compat,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n template_see_also,\n create_section_header(\"Notes\"),\n numba_notes,\n create_section_header(\"Examples\"),\n dedent(\n \"\"\"\n The below examples will show rolling mean calculations with window sizes of\n two and three, respectively.\n\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.rolling(2).mean()\n 0 NaN\n 1 1.5\n 2 2.5\n 3 3.5\n dtype: float64\n\n >>> s.rolling(3).mean()\n 0 NaN\n 1 NaN\n 2 2.0\n 3 3.0\n dtype: float64\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n window_method=\"rolling\",\n aggregation_description=\"mean\",\n agg_method=\"mean\",\n )\n def mean(\n self,\n *args,\n engine: str | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n **kwargs,\n ):\n nv.validate_rolling_func(\"mean\", args, kwargs)\n return super().mean(*args, engine=engine, engine_kwargs=engine_kwargs, **kwargs)\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n window_agg_numba_parameters,\n kwargs_compat,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n template_see_also,\n create_section_header(\"Notes\"),\n numba_notes,\n create_section_header(\"Examples\"),\n dedent(\n \"\"\"\n Compute the rolling median of a series with a window size of 3.\n\n >>> s = pd.Series([0, 1, 2, 3, 4])\n >>> s.rolling(3).median()\n 0 NaN\n 1 NaN\n 2 1.0\n 3 2.0\n 4 3.0\n dtype: float64\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n window_method=\"rolling\",\n aggregation_description=\"median\",\n agg_method=\"median\",\n )\n def median(\n self,\n engine: str | None = None,\n engine_kwargs: dict[str, bool] | None = None,\n **kwargs,\n ):\n return super().median(engine=engine, engine_kwargs=engine_kwargs, **kwargs)\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n dedent(\n \"\"\"\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n args_compat,\n kwargs_compat,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n \"numpy.std : Equivalent method for NumPy array.\\n\",\n template_see_also,\n create_section_header(\"Notes\"),\n dedent(\n \"\"\"\n The default ``ddof`` of 1 used in :meth:`Series.std` is different\n than the default ``ddof`` of 0 in :func:`numpy.std`.\n\n A minimum of one period is required for the rolling calculation.\n\n The implementation is susceptible to floating point imprecision as\n shown in the example below.\\n\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n create_section_header(\"Examples\"),\n dedent(\n \"\"\"\n >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])\n >>> s.rolling(3).std()\n 0 NaN\n 1 NaN\n 2 5.773503e-01\n 3 1.000000e+00\n 4 1.000000e+00\n 5 1.154701e+00\n 6 2.580957e-08\n dtype: float64\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n window_method=\"rolling\",\n aggregation_description=\"standard deviation\",\n agg_method=\"std\",\n )\n def std(self, ddof: int = 1, *args, **kwargs):\n nv.validate_rolling_func(\"std\", args, kwargs)\n return super().std(ddof=ddof, **kwargs)\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n dedent(\n \"\"\"\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n args_compat,\n kwargs_compat,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n \"numpy.var : Equivalent method for NumPy array.\\n\",\n template_see_also,\n create_section_header(\"Notes\"),\n dedent(\n \"\"\"\n The default ``ddof`` of 1 used in :meth:`Series.var` is different\n than the default ``ddof`` of 0 in :func:`numpy.var`.\n\n A minimum of one period is required for the rolling calculation.\n\n The implementation is susceptible to floating point imprecision as\n shown in the example below.\\n\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n create_section_header(\"Examples\"),\n dedent(\n \"\"\"\n >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])\n >>> s.rolling(3).var()\n 0 NaN\n 1 NaN\n 2 3.333333e-01\n 3 1.000000e+00\n 4 1.000000e+00\n 5 1.333333e+00\n 6 6.661338e-16\n dtype: float64\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n window_method=\"rolling\",\n aggregation_description=\"variance\",\n agg_method=\"var\",\n )\n def var(self, ddof: int = 1, *args, **kwargs):\n nv.validate_rolling_func(\"var\", args, kwargs)\n return super().var(ddof=ddof, **kwargs)\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n kwargs_compat,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n \"scipy.stats.skew : Third moment of a probability density.\\n\",\n template_see_also,\n create_section_header(\"Notes\"),\n \"A minimum of three periods is required for the rolling calculation.\\n\",\n window_method=\"rolling\",\n aggregation_description=\"unbiased skewness\",\n agg_method=\"skew\",\n )\n def skew(self, **kwargs):\n return super().skew(**kwargs)\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n dedent(\n \"\"\"\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n args_compat,\n kwargs_compat,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n template_see_also,\n create_section_header(\"Notes\"),\n \"A minimum of one period is required for the calculation.\\n\\n\",\n create_section_header(\"Examples\"),\n dedent(\n \"\"\"\n >>> s = pd.Series([0, 1, 2, 3])\n >>> s.rolling(2, min_periods=1).sem()\n 0 NaN\n 1 0.707107\n 2 0.707107\n 3 0.707107\n dtype: float64\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n window_method=\"rolling\",\n aggregation_description=\"standard error of mean\",\n agg_method=\"sem\",\n )\n def sem(self, ddof: int = 1, *args, **kwargs):\n return self.std(*args, **kwargs) / (self.count() - ddof).pow(0.5)\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n kwargs_compat,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n \"scipy.stats.kurtosis : Reference SciPy method.\\n\",\n template_see_also,\n create_section_header(\"Notes\"),\n \"A minimum of four periods is required for the calculation.\\n\\n\",\n create_section_header(\"Examples\"),\n dedent(\n \"\"\"\n The example below will show a rolling calculation with a window size of\n four matching the equivalent function call using `scipy.stats`.\n\n >>> arr = [1, 2, 3, 4, 999]\n >>> import scipy.stats\n >>> print(f\"{{scipy.stats.kurtosis(arr[:-1], bias=False):.6f}}\")\n -1.200000\n >>> print(f\"{{scipy.stats.kurtosis(arr[1:], bias=False):.6f}}\")\n 3.999946\n >>> s = pd.Series(arr)\n >>> s.rolling(4).kurt()\n 0 NaN\n 1 NaN\n 2 NaN\n 3 -1.200000\n 4 3.999946\n dtype: float64\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n window_method=\"rolling\",\n aggregation_description=\"Fisher's definition of kurtosis without bias\",\n agg_method=\"kurt\",\n )\n def kurt(self, **kwargs):\n return super().kurt(**kwargs)\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n dedent(\n \"\"\"\n quantile : float\n Quantile to compute. 0 <= quantile <= 1.\n interpolation : {{'linear', 'lower', 'higher', 'midpoint', 'nearest'}}\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`:\n\n * linear: `i + (j - i) * fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n * lower: `i`.\n * higher: `j`.\n * nearest: `i` or `j` whichever is nearest.\n * midpoint: (`i` + `j`) / 2.\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n kwargs_compat,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n template_see_also,\n create_section_header(\"Examples\"),\n dedent(\n \"\"\"\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.rolling(2).quantile(.4, interpolation='lower')\n 0 NaN\n 1 1.0\n 2 2.0\n 3 3.0\n dtype: float64\n\n >>> s.rolling(2).quantile(.4, interpolation='midpoint')\n 0 NaN\n 1 1.5\n 2 2.5\n 3 3.5\n dtype: float64\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n window_method=\"rolling\",\n aggregation_description=\"quantile\",\n agg_method=\"quantile\",\n )\n def quantile(self, quantile: float, interpolation: str = \"linear\", **kwargs):\n return super().quantile(\n quantile=quantile,\n interpolation=interpolation,\n **kwargs,\n )\n\n @doc(\n template_header,\n \".. versionadded:: 1.4.0 \\n\\n\",\n create_section_header(\"Parameters\"),\n dedent(\n \"\"\"\n method : {{'average', 'min', 'max'}}, default 'average'\n How to rank the group of records that have the same value (i.e. ties):\n\n * average: average rank of the group\n * min: lowest rank in the group\n * max: highest rank in the group\n\n ascending : bool, default True\n Whether or not the elements should be ranked in ascending order.\n pct : bool, default False\n Whether or not to display the returned rankings in percentile\n form.\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n kwargs_compat,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n template_see_also,\n create_section_header(\"Examples\"),\n dedent(\n \"\"\"\n >>> s = pd.Series([1, 4, 2, 3, 5, 3])\n >>> s.rolling(3).rank()\n 0 NaN\n 1 NaN\n 2 2.0\n 3 2.0\n 4 3.0\n 5 1.5\n dtype: float64\n\n >>> s.rolling(3).rank(method=\"max\")\n 0 NaN\n 1 NaN\n 2 2.0\n 3 2.0\n 4 3.0\n 5 2.0\n dtype: float64\n\n >>> s.rolling(3).rank(method=\"min\")\n 0 NaN\n 1 NaN\n 2 2.0\n 3 2.0\n 4 3.0\n 5 1.0\n dtype: float64\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n window_method=\"rolling\",\n aggregation_description=\"rank\",\n agg_method=\"rank\",\n )\n def rank(\n self,\n method: WindowingRankType = \"average\",\n ascending: bool = True,\n pct: bool = False,\n **kwargs,\n ):\n return super().rank(\n method=method,\n ascending=ascending,\n pct=pct,\n **kwargs,\n )\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n dedent(\n \"\"\"\n other : Series or DataFrame, optional\n If not supplied then will default to self and produce pairwise\n output.\n pairwise : bool, default None\n If False then only matching columns between self and other will be\n used and the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the\n output will be a MultiIndexed DataFrame in the case of DataFrame\n inputs. In the case of missing elements, only complete pairwise\n observations will be used.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n kwargs_compat,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n template_see_also[:-1],\n window_method=\"rolling\",\n aggregation_description=\"sample covariance\",\n agg_method=\"cov\",\n )\n def cov(\n self,\n other: DataFrame | Series | None = None,\n pairwise: bool | None = None,\n ddof: int = 1,\n **kwargs,\n ):\n return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)\n\n @doc(\n template_header,\n create_section_header(\"Parameters\"),\n dedent(\n \"\"\"\n other : Series or DataFrame, optional\n If not supplied then will default to self and produce pairwise\n output.\n pairwise : bool, default None\n If False then only matching columns between self and other will be\n used and the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the\n output will be a MultiIndexed DataFrame in the case of DataFrame\n inputs. In the case of missing elements, only complete pairwise\n observations will be used.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n kwargs_compat,\n create_section_header(\"Returns\"),\n template_returns,\n create_section_header(\"See Also\"),\n dedent(\n \"\"\"\n cov : Similar method to calculate covariance.\n numpy.corrcoef : NumPy Pearson's correlation calculation.\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n template_see_also,\n create_section_header(\"Notes\"),\n dedent(\n \"\"\"\n This function uses Pearson's definition of correlation\n (https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).\n\n When `other` is not specified, the output will be self correlation (e.g.\n all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`\n set to `True`.\n\n Function will return ``NaN`` for correlations of equal valued sequences;\n this is the result of a 0/0 division error.\n\n When `pairwise` is set to `False`, only matching columns between `self` and\n `other` will be used.\n\n When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame\n with the original index on the first level, and the `other` DataFrame\n columns on the second level.\n\n In the case of missing elements, only complete pairwise observations\n will be used.\\n\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n create_section_header(\"Examples\"),\n dedent(\n \"\"\"\n The below example shows a rolling calculation with a window size of\n four matching the equivalent function call using :meth:`numpy.corrcoef`.\n\n >>> v1 = [3, 3, 3, 5, 8]\n >>> v2 = [3, 4, 4, 4, 8]\n >>> # numpy returns a 2X2 array, the correlation coefficient\n >>> # is the number at entry [0][1]\n >>> print(f\"{{np.corrcoef(v1[:-1], v2[:-1])[0][1]:.6f}}\")\n 0.333333\n >>> print(f\"{{np.corrcoef(v1[1:], v2[1:])[0][1]:.6f}}\")\n 0.916949\n >>> s1 = pd.Series(v1)\n >>> s2 = pd.Series(v2)\n >>> s1.rolling(4).corr(s2)\n 0 NaN\n 1 NaN\n 2 NaN\n 3 0.333333\n 4 0.916949\n dtype: float64\n\n The below example shows a similar rolling calculation on a\n DataFrame using the pairwise option.\n\n >>> matrix = np.array([[51., 35.], [49., 30.], [47., 32.],\\\n [46., 31.], [50., 36.]])\n >>> print(np.corrcoef(matrix[:-1,0], matrix[:-1,1]).round(7))\n [[1. 0.6263001]\n [0.6263001 1. ]]\n >>> print(np.corrcoef(matrix[1:,0], matrix[1:,1]).round(7))\n [[1. 0.5553681]\n [0.5553681 1. ]]\n >>> df = pd.DataFrame(matrix, columns=['X','Y'])\n >>> df\n X Y\n 0 51.0 35.0\n 1 49.0 30.0\n 2 47.0 32.0\n 3 46.0 31.0\n 4 50.0 36.0\n >>> df.rolling(4).corr(pairwise=True)\n X Y\n 0 X NaN NaN\n Y NaN NaN\n 1 X NaN NaN\n Y NaN NaN\n 2 X NaN NaN\n Y NaN NaN\n 3 X 1.000000 0.626300\n Y 0.626300 1.000000\n 4 X 1.000000 0.555368\n Y 0.555368 1.000000\n \"\"\"\n ).replace(\"\\n\", \"\", 1),\n window_method=\"rolling\",\n aggregation_description=\"correlation\",\n agg_method=\"corr\",\n )\n def corr(\n self,\n other: DataFrame | Series | None = None,\n pairwise: bool | None = None,\n ddof: int = 1,\n **kwargs,\n ):\n return super().corr(other=other, pairwise=pairwise, ddof=ddof, **kwargs)\n\n\nRolling.__doc__ = Window.__doc__\n\n\nclass RollingGroupby(BaseWindowGroupby, Rolling):\n \"\"\"\n Provide a rolling groupby implementation.\n \"\"\"\n\n _attributes = Rolling._attributes + BaseWindowGroupby._attributes\n\n def _get_window_indexer(self) -> GroupbyIndexer:\n \"\"\"\n Return an indexer class that will compute the window start and end bounds\n\n Returns\n -------\n GroupbyIndexer\n \"\"\"\n rolling_indexer: type[BaseIndexer]\n indexer_kwargs: dict[str, Any] | None = None\n index_array = self._index_array\n if isinstance(self.window, BaseIndexer):\n rolling_indexer = type(self.window)\n indexer_kwargs = self.window.__dict__.copy()\n assert isinstance(indexer_kwargs, dict) # for mypy\n # We'll be using the index of each group later\n indexer_kwargs.pop(\"index_array\", None)\n window = self.window\n elif self._win_freq_i8 is not None:\n rolling_indexer = VariableWindowIndexer\n window = self._win_freq_i8\n else:\n rolling_indexer = FixedWindowIndexer\n window = self.window\n window_indexer = GroupbyIndexer(\n index_array=index_array,\n window_size=window,\n groupby_indices=self._grouper.indices,\n window_indexer=rolling_indexer,\n indexer_kwargs=indexer_kwargs,\n )\n return window_indexer\n\n def _validate_monotonic(self):\n \"\"\"\n Validate that on is monotonic;\n in this case we have to check only for nans, because\n monotonicity was already validated at a higher level.\n \"\"\"\n if self._on.hasnans:\n self._raise_monotonic_error()\n"
] | [
[
"pandas.core.window.numba_.generate_numba_apply_func",
"pandas.Series",
"pandas.compat.numpy.function.validate_rolling_func",
"pandas.core.apply.ResamplerWindowApply",
"pandas.core.dtypes.common.is_integer",
"pandas.core.indexes.api.MultiIndex.from_arrays",
"pandas.core.dtypes.common.is_list_like",
"pandas._libs.window.aggregations.roll_mean",
"pandas.core.indexers.objects.FixedWindowIndexer",
"pandas.compat.numpy.function.validate_window_func",
"pandas._libs.window.aggregations.roll_var",
"pandas.core._numba.executor.generate_shared_aggregator",
"pandas.core.window.numba_.generate_manual_numpy_nan_agg_with_axis",
"pandas._libs.tslibs.to_offset",
"pandas.core.window.numba_.generate_numba_table_func",
"pandas.core.indexes.api.Index",
"pandas.core.dtypes.missing.notna",
"numpy.where",
"pandas.core.dtypes.common.needs_i8_conversion",
"pandas.util._exceptions.find_stack_level",
"pandas.core.base.DataError",
"pandas.core.indexers.objects.BaseIndexer",
"pandas.core.indexers.objects.GroupbyIndexer",
"pandas.core.indexes.api.MultiIndex",
"pandas.core.algorithms.factorize",
"pandas.core.dtypes.common.ensure_float64",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.common.is_bool",
"pandas.core.common.maybe_make_list",
"numpy.isinf",
"numpy.errstate",
"pandas.compat._optional.import_optional_dependency",
"pandas.core.window.doc.create_section_header",
"pandas.core.util.numba_.maybe_use_numba",
"numpy.array",
"numpy.concatenate",
"pandas.core.indexers.objects.VariableWindowIndexer"
]
] |
AvantiShri/dragonn | [
"aeb9674f39b71d07ff62d2c3745bef4a2e55b95f"
] | [
"dragonn/positional_prc.py"
] | [
"from dragonn.utils import rolling_window\nimport numpy as np\nfrom sklearn.metrics import auc, precision_recall_curve\nimport matplotlib.pyplot as plt\nimport pdb\nfrom keras import backend as K\nimport tensorflow as tf\n\n \n\ndef positionalPRC(embeddings, scores,window_stride=1, coverage_thresh_for_positive=0.8,verbose=False):\n '''\n window_stride: number of bases to shift adjacent sequence windows by; default=1\n coverage_thresh_for_positive: sequence window must overlap the motif by this fraction (0 - 1) for the window to be labeled positive. \n embeddings: the list of motif embeddings from simulation data (most likely from simulation_data.valid_embeddings)\n scores: a 2-d numpy array (number of sequences x length of sequence) Generally one of: motif scan scores, ISM scores, gradient x input scores, deepLIFT scores for each sequence in the dataset \n\n returns: dictionary of motif_name-->[precision, recall, auPRC]\n '''\n #we concatenate across all sequences in the input dataset\n assert scores.shape[0]==len(embeddings)\n if len(scores.shape)>2:\n scores=np.squeeze(scores)\n \n #get the length of input sequences in the dataset \n seq_length=scores.shape[1]\n\n #keep lists of labels and predictions for each embedded entity\n all_prc_inputs={} \n\n #iterate through all input sequences \n for i in range(len(embeddings)):\n seq_embeddings=embeddings[i]\n seq_scores=scores[i]\n seq_prc_inputs=dict() \n \n #sequence may have multiple embeddings\n for embedding in seq_embeddings:\n motif_length=len(embedding.what.string)\n motif_name=embedding.what.stringDescription\n embedding_start_pos=embedding.startPos \n \n if motif_name not in all_prc_inputs:\n all_prc_inputs[motif_name]=dict()\n all_prc_inputs[motif_name]['labels']=[]\n all_prc_inputs[motif_name]['scores']=[]\n\n if motif_name not in seq_prc_inputs:\n seq_prc_inputs[motif_name]=dict()\n seq_prc_inputs[motif_name]['scores']=np.sum(rolling_window(seq_scores,motif_length),axis=1)\n seq_prc_inputs[motif_name]['labels']=None\n #label the window that starts at the embedding start position with 1.\n tmp_label_array=np.zeros((seq_length,))\n tmp_label_array[embedding_start_pos:embedding_start_pos+motif_length]=1\n tmp_label_windows=np.sum(rolling_window(tmp_label_array,motif_length),axis=1)\n min_window_sum=coverage_thresh_for_positive*motif_length \n #ambiguous windows are designated with 0.5 to allow for use of np.maximum below \n tmp_label_windows[(tmp_label_windows>0) & (tmp_label_windows<min_window_sum)]=0.5\n tmp_label_windows[tmp_label_windows>=min_window_sum]=1 #positive\n \n if type(seq_prc_inputs[motif_name]['labels']) is not np.ndarray:\n seq_prc_inputs[motif_name]['labels']=tmp_label_windows\n else:\n seq_prc_inputs[motif_name]['labels']=np.maximum(seq_prc_inputs[motif_name]['labels'],tmp_label_windows)\n #update the dictionary of PRC inputs concatenated across sequences\n for motif_name in seq_prc_inputs.keys():\n #drop any ambiguous indices\n non_ambiguous_indices=np.where(seq_prc_inputs[motif_name]['labels']!=0.5)\n #print(max(seq_prc_inputs[motif_name]['labels']))\n if(verbose==True):\n print(motif_name)\n print(\"labels:\"+str(seq_prc_inputs[motif_name]['labels']))\n print(\"scores:\"+str(seq_prc_inputs[motif_name]['scores']))\n all_prc_inputs[motif_name]['labels']+=list(seq_prc_inputs[motif_name]['labels'][non_ambiguous_indices])\n all_prc_inputs[motif_name]['scores']+=list(seq_prc_inputs[motif_name]['scores'][non_ambiguous_indices])\n\n #calculate the PRC values and auPRC\n prc_values=dict()\n for motif_name in all_prc_inputs:\n labels=all_prc_inputs[motif_name]['labels']\n scores=all_prc_inputs[motif_name]['scores']\n #we normalize the scores to a 0-1 range for sklean prc function\n normalized_scores = (scores-min(scores))/(max(scores)-min(scores))\n precision, recall = precision_recall_curve(labels, normalized_scores)[:2]\n #sort\n sorted_prc=sorted(zip(recall,precision))\n recall=[prc_val[0] for prc_val in sorted_prc]\n precision=[prc_val[1] for prc_val in sorted_prc]\n auPRC=auc(recall,precision)\n prc_values[motif_name]=[recall,precision,auPRC]\n return prc_values\n \n\n \ndef plot_positionalPRC(positionalPRC_output):\n '''\n accepts output dictionary from the positionalPRC function of the form: motif_name --> [precision,recall,auPRC] \n generates PRC curves for each motif on same coordinates \n '''\n from sklearn.utils.fixes import signature\n for motif_name,values in positionalPRC_output.items():\n recall=values[0]\n precision=values[1]\n auPRC=str(round(values[2],3))\n step_kwargs = ({'step': 'post'}\n if 'step' in signature(plt.fill_between).parameters\n else {})\n plt.step(recall, precision, label=motif_name+\":\"+auPRC,where='post')\n #uncomment to fill the area below the curve, generally not desirable if multiple curves plotted on same axes.\n #plt.fill_between(recall, precision, alpha=0.2, color='b', **step_kwargs)\n\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.ylim([0.0, 1.05])\n plt.xlim([0.0, 1.0])\n plt.legend()\n plt.show()\n \n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.squeeze",
"numpy.zeros",
"sklearn.metrics.auc",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.step",
"matplotlib.pyplot.ylim",
"numpy.maximum",
"sklearn.metrics.precision_recall_curve",
"numpy.where",
"matplotlib.pyplot.xlabel",
"sklearn.utils.fixes.signature"
]
] |
bguan/plmcbbox | [
"753b1f199194e3e680863010ae3177e680198b49"
] | [
"mcbbox/subcoco_effdet_icevision_fastai.py"
] | [
"# AUTOGENERATED! DO NOT EDIT! File to edit: 15_subcoco_effdet_icevision_fastai.ipynb (unless otherwise specified).\n\n__all__ = ['SubCocoParser', 'parse_subcoco', 'SaveModelDupBestCallback', 'FastGPUMonitorCallback',\n 'gen_transforms_and_learner', 'run_training', 'save_final']\n\n# Cell\nimport glob\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pickle\nimport PIL\nimport re\nimport requests\nimport tarfile\nimport sys\nimport torch\nimport torch.multiprocessing\nimport torchvision\nimport xml.etree.ElementTree\n\nfrom collections import defaultdict\nfrom IPython.utils import io\nfrom pathlib import Path\nfrom PIL import Image, ImageStat\nfrom shutil import copyfile, rmtree\nfrom tqdm import tqdm\nfrom typing import Hashable, List, Tuple, Union\n\n# Cell\nimport fastai\nimport icevision\nimport icevision.backbones as backbones\nimport icevision.models\nimport icevision.models.efficientdet as efficientdet\nimport icevision.tfms as tfms\n\nfrom albumentations import ShiftScaleRotate\nfrom fastai.test_utils import synth_learner\nfrom fastai.learner import Learner\nfrom fastai.callback.training import GradientAccumulation\nfrom fastai.callback.tracker import Callback, EarlyStoppingCallback, SaveModelCallback\nfrom gpumonitor.monitor import GPUStatMonitor\nfrom icevision.core import BBox, ClassMap, BaseRecord\nfrom icevision.parsers import Parser\nfrom icevision.parsers.mixins import LabelsMixin, BBoxesMixin, FilepathMixin, SizeMixin\nfrom icevision.data import Dataset, RandomSplitter\nfrom icevision.metrics.coco_metric import COCOMetricType, COCOMetric\nfrom icevision.utils import denormalize_imagenet\nfrom icevision.visualize.show_data import *\n\nfrom .subcoco_utils import *\n\nif torch.cuda.is_available():\n monitor = GPUStatMonitor(delay=1)\n\nprint(f\"Python ver {sys.version}, torch {torch.__version__}, torchvision {torchvision.__version__}, fastai {fastai.__version__}, icevision {icevision.__version__}\")\n\nif is_notebook():\n from nbdev.showdoc import *\n\n# Cell\nclass SubCocoParser(Parser, LabelsMixin, BBoxesMixin, FilepathMixin, SizeMixin):\n def __init__(self, stats:CocoDatasetStats, min_margin_ratio = 0, min_width_height_ratio = 0, quiet = True):\n self.stats = stats\n self.data = [] # list of tuple of form (img_id, wth, ht, bbox, label_id, img_path)\n skipped = 0\n for img_id, imgfname in stats.img2fname.items():\n imgf = stats.img_dir/imgfname\n if not os.path.isfile(imgf):\n skipped += 1\n continue\n width, height = stats.img2sz[img_id]\n bboxs = []\n lids = []\n for lid, x, y, w, h in stats.img2lbs[img_id]:\n if lid != None and box_within_bounds(x, y, w, h, width, height, min_margin_ratio, min_width_height_ratio):\n b = [int(x), int(y), int(w), int(h)]\n l = int(lid)\n bboxs.append(b)\n lids.append(l)\n else:\n if not quiet: print(f\"warning: skipping lxywh of {lid, x, y, w, h}\")\n\n if len(bboxs) > 0:\n self.data.append( (img_id, width, height, bboxs, lids, imgf, ) )\n else:\n skipped += 1\n\n print(f\"Skipped {skipped} out of {stats.num_imgs} images\")\n\n def __iter__(self):\n yield from iter(self.data)\n\n def __len__(self):\n return len(self.data)\n\n def imageid(self, o) -> Hashable:\n return o[0]\n\n def filepath(self, o) -> Union[str, Path]:\n return o[5]\n\n def height(self, o) -> int:\n return o[2]\n\n def width(self, o) -> int:\n return o[1]\n\n def labels(self, o) -> List[int]:\n return o[4]\n\n def bboxes(self, o) -> List[BBox]:\n return [BBox.from_xywh(x,y,w,h) for x,y,w,h in o[3]]\n\n def image_width_height(self, o) -> Tuple[int, int]:\n img_id = o[0]\n return self.stats.img2sz[img_id]\n\n# Cell\ndef parse_subcoco(stats:CocoDatasetStats)->List[List[BaseRecord]]:\n parser = SubCocoParser(stats, min_width_height_ratio = 0.05) # no need min_margin_ratio = 0.05 as icevision autofix\n train_records, valid_records = parser.parse(data_splitter=RandomSplitter([0.95, 0.05]), autofix=False)\n return train_records, valid_records\n\n# Cell\nclass SaveModelDupBestCallback(SaveModelCallback):\n \"Extend SaveModelCallback to save a duplicate with metric added to end of filename\"\n def __init__(self, monitor='valid_loss', comp=None, min_delta=0., fname='model', every_epoch=False, with_opt=False, reset_on_fit=True):\n super().__init__(\n monitor=monitor, comp=comp, min_delta=min_delta, reset_on_fit=reset_on_fit,\n fname=fname, every_epoch=every_epoch, with_opt=with_opt,\n )\n\n def after_epoch(self):\n \"Compare the value monitored to its best score and save if best.\"\n super().after_epoch()\n if self.new_best or self.epoch==0:\n last_saved = self.last_saved_path\n saved_stem = last_saved.stem\n backup_stem = f'{saved_stem}_e{self.epoch:03d}_m{self.best:.3f}'\n backup_file = backup_stem+(last_saved.suffix)\n backup_path = last_saved.parent / backup_file\n print(f'Backup {last_saved} as {backup_path}')\n if last_saved != backup_path: copyfile(last_saved, backup_path)\n\n# Cell\nclass FastGPUMonitorCallback(Callback):\n def __init__(self, delay=1, display_options=None):\n super(FastGPUMonitorCallback, self).__init__()\n self.delay = delay\n self.display_options = display_options if display_options else {}\n\n def before_epoch(self):\n self.monitor = GPUStatMonitor(self.delay, self.display_options)\n\n def after_epoch(self):\n self.monitor.stop()\n print(\"\")\n self.monitor.display_average_stats_per_gpu()\n\ndef gen_transforms_and_learner(stats:CocoDatasetStats,\n train_records:List[BaseRecord],\n valid_records:List[BaseRecord],\n img_sz=128,\n bs=4,\n acc_cycs=8,\n num_workers=2):\n train_tfms = tfms.A.Adapter([\n *tfms.A.aug_tfms(\n size=img_sz,\n presize=img_sz+128,\n shift_scale_rotate = tfms.A.ShiftScaleRotate(shift_limit=.025, scale_limit=0.025, rotate_limit=9)\n ),\n tfms.A.Normalize(mean=stats.chn_means/255, std=stats.chn_stds/255)\n ])\n valid_tfms = tfms.A.Adapter([*tfms.A.resize_and_pad(img_sz), tfms.A.Normalize()])\n train_ds = Dataset(train_records, train_tfms)\n valid_ds = Dataset(valid_records, valid_tfms)\n # Using gradient accumulation to process minibatch of 32 images in 8 loops, i.e. 8 images per loop.\n # I ran this model w img 512x512x3 on my Dell XPS15 w GTX-1050 with 4GB VRAM, 16GM RAM, ~20min/epoch.\n backbone_name = \"tf_efficientdet_lite0\"\n model = efficientdet.model(model_name=backbone_name, img_size=img_sz, num_classes=len(stats.lbl2name))\n train_dl = efficientdet.train_dl(train_ds, batch_size=bs, num_workers=num_workers, shuffle=True)\n valid_dl = efficientdet.valid_dl(valid_ds, batch_size=bs, num_workers=max(1,num_workers//2), shuffle=False)\n\n monitor_metric = 'COCOMetric'\n metrics = [ COCOMetric(metric_type=COCOMetricType.bbox)]\n\n save_model_fname=f'{backbone_name}-{img_sz}'\n callbacks=[\n GradientAccumulation(bs*acc_cycs),\n SaveModelDupBestCallback(fname=save_model_fname, monitor=monitor_metric),\n EarlyStoppingCallback(monitor=monitor_metric, min_delta=0.001, patience=10),\n FastGPUMonitorCallback(delay=1)\n ]\n\n learn = efficientdet.fastai.learner(dls=[train_dl, valid_dl], model=model, metrics=metrics, cbs=callbacks)\n learn.freeze()\n\n return valid_tfms, learn, backbone_name\n\n# Cell\n# Wrap in function this doesn't run upon import or when generating docs\ndef run_training(learn:Learner, resume_ckpt:Path, min_lr=0.005, head_runs=1, full_runs=1):\n if resume_ckpt:\n print(f'Loading {resume_ckpt}...')\n try:\n learn.model.load_state_dict(torch.load(resume_ckpt))\n except Exception as e:\n print(f'Error while trying to load {resume_ckpt}: {e}')\n monitor.display_average_stats_per_gpu()\n print(f\"Training for {head_runs}+{full_runs} epochs at min LR {min_lr}\")\n learn.fine_tune(full_runs, min_lr, freeze_epochs=head_runs)\n\n# Cell\ndef save_final(learn:Learner, save_model_fpath:str):\n torch.save(learn.model.state_dict(), save_model_fpath)"
] | [
[
"torch.cuda.is_available",
"torch.load"
]
] |
sidharthmiglani/Data-Science | [
"c4f69b85349c73d0a6241f0f91baedc770d2bf58"
] | [
"code/partone.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[21]:\n\n\nimport pandas as pd\nimport numpy as np\nimport json \nimport zipfile\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport re\n\n\n# In[12]:\n\n\nwikidata = pd.read_json('wikidata-movies.json.gz', orient='record', lines=True)\ngenres = pd.read_json('genres.json.gz', orient='record', lines=True)\nrotten = pd.read_json('rotten-tomatoes.json.gz', orient='record', lines=True)\nomdb = pd.read_json('omdb-data.json.gz', orient='record', lines=True)\nwikidata.drop(columns=['based_on','cast_member','director','made_profit','main_subject','series','filming_location','metacritic_id'])\n\n\n# In[13]:\n\n\nmovies = wikidata.merge(rotten, how='outer', on='imdb_id').merge(omdb, how='outer', on='imdb_id').merge(genres, how='outer', on='wikidata_id')\n\n\n# In[14]:\n\n\nrotten.head()\n\n\n# In[15]:\n\n\nsns.pairplot(rotten) #used to check which columns are closely co-related.\n\n\n# In[16]:\n\n\nsns.lmplot(x='critic_average',y='audience_average',data=rotten) #shows the linearity between audience and critic average ratting\n\n\n# In[17]:\n\n\nsns.heatmap(movies.isnull(), cbar=False) #shows all the null values in the dataframe \n\n\n# In[18]:\n\n\n#filtering out the NaN and NA\nmovies = movies.dropna(subset=['omdb_awards'])\nmovies = movies[movies['omdb_awards'] != 'N/A']\n\n\n# In[22]:\n\n\n#seperating all the awards from the string using regex\ndef awards_total(x):\n awards = re.findall(r'\\d+',x) #regex find numbers\n awards = list(map(int, awards)) #to int datatype\n total_awards = np.sum(awards)\n return total_awards\nmovies['Awards'] = movies['omdb_awards'].apply(awards_total)\n\n\n# In[24]:\n\n\n# filtering out the ratings\nmovies = movies.dropna(subset=['audience_average','critic_average']) # dropped all the null values\n#movies['critic_average'] = movies['critic_average']/2.0 # converted the rating out of 5\n\n\n# In[ ]:\n\n\n\n\n\n# In[26]:\n\n\nsns.heatmap(movies.isnull(), cbar=False) # cross checking if there are still any null values in the ratings, after filtering the data\n\n\n# In[28]:\n\n\n\n\n\n# In[32]:\n\n\nplt.scatter(movies['audience_average'],movies['Awards'])\nplt.title('Audience average v/s Awards')\nplt.xlabel('Audience average')\nplt.ylabel('Awards and nominations')\n\n\n# In[33]:\n\n\nplt.scatter(movies['critic_average'],movies['Awards'])\nplt.title('Critic average v/s Awards')\nplt.xlabel('Critic average')\nplt.ylabel('Awards and nominations')\n\n\n# In[ ]:\n\n\n\n\n"
] | [
[
"numpy.sum",
"pandas.read_json",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter"
]
] |
kojikoji/STGE | [
"dec9acc59e15eaca287d727a94709926c6be9224"
] | [
"stge/variational_bayes.py"
] | [
"# -*- coding: utf-8 -*-\nimport math\nimport numpy as np\nimport numba\nfrom numpy import linalg as LA\nfrom utils import get_num_break_slice\n\n\[email protected](nopython=True)\ndef calculate_Pi_mDelta(Ys, Mu, Sigma, sigma_s):\n # get number of point in each matrix\n sc_cell_num = Ys.shape[0]\n ref_cell_num = Mu.shape[0]\n gene_num = Ys.shape[1]\n # Initialize Pi Z\n Pi = np.zeros((sc_cell_num, ref_cell_num), dtype=np.float64)\n mDelta = np.zeros((sc_cell_num, ref_cell_num), dtype=np.float64)\n for i in range(sc_cell_num):\n for j in range(ref_cell_num):\n GSigma = gene_num*Sigma[j, j]\n ys = Ys[i, :]\n mu = Mu[j, :]\n delta = np.sum(np.square(ys - mu))\n # mDelta[i, j] = - (delta + GSigma)\n mDelta[i, j] = - (delta)\n # to avoid underflow, substract max value before exp\n regmDeltai = mDelta[i, :] - np.max(mDelta[i, :])\n rawPii = np.exp(regmDeltai/(2*sigma_s))\n # regulalize to sum_c Pi_ic = 1\n Pi[i, :] = rawPii/np.sum(rawPii)\n return(Pi, mDelta)\n\n\ndef convert2positive_definite(mat, eps=1.0e-10):\n min_eigen_values = np.min(np.linalg.eigvalsh(mat))\n pd_mat = mat + (eps - min_eigen_values) * np.identity(mat.shape[0])\n print(\"Min eigne value:\", min_eigen_values)\n return(pd_mat)\n\n\ndef cholesky_inv_prod(A, b):\n A_sum = np.sum(A)\n A = A/A_sum\n try:\n L = LA.cholesky(A)\n except:\n print(\"Not positive definite! It will be converted\")\n A = convert2positive_definite(A)\n L = LA.cholesky(A)\n t = LA.solve(L, b)\n x = LA.solve(L.T.conj(), t)/A_sum\n return(x)\n\n\ndef cholesky_inv(A):\n A_sum = np.sum(A)\n A = A/A_sum\n Imat = np.identity(A.shape[0])\n try:\n L = LA.cholesky(A)\n except:\n print(\"Not positive definite, it will be converted\")\n A = convert2positive_definite(A)\n L = LA.cholesky(A)\n t = LA.solve(L, Imat)\n x = LA.solve(L.T.conj(), t)/A_sum\n return(x)\n\n\ndef cholesky_inv_origin(A):\n A_sum = np.sum(A)\n A = A/A_sum\n Imat = np.identity(A.shape[0])\n try:\n L = LA.cholesky(A)\n except:\n print(\"Not positive definite, it will be converted\")\n A = convert2positive_definite(A)\n L = LA.cholesky(A)\n t = LA.solve(L, Imat)\n x = LA.solve(L.T.conj(), t)/A_sum\n return(x, A*A_sum)\n\n\nclass variational_bayes:\n def calculate_Mu_Sigma(Yt, Ys, Pi_list, A, K_inv,\n sigma_s, sigma_t,\n sc_t_nums, sc_t_breaks, ref_t_nums):\n '''\n This provide mean and variance of gene expression in\n refrence cells derived from cell movements movie.\n '''\n rp_list = []\n PiYsp_list = []\n for t in ref_t_nums.keys():\n if t in sc_t_nums.keys():\n # sump Pip for ref cells\n t_idx = [t for t in sc_t_nums.keys()].index(t)\n Pip = Pi_list[t_idx]\n rp = np.sum(Pip, axis=0)\n # average sc cell expressions by asignment wieght\n Ysp_slice = get_num_break_slice(\n sc_t_nums, sc_t_breaks, t)\n Ysp = Ys[Ysp_slice, :]\n PiYsp = Pip.T @ Ysp\n else:\n rp = np.zeros(ref_t_nums[t])\n PiYsp = np.zeros((ref_t_nums[t], Ys.shape[1]))\n PiYsp_list.append(PiYsp)\n rp_list.append(rp)\n PiYs = np.concatenate(PiYsp_list, axis=0)\n r = np.concatenate(rp_list)\n Sigma_inv = K_inv + (A.T @ A/sigma_t) + (np.diag(r)/(sigma_s))\n Sigma = np.linalg.inv(Sigma_inv)\n integrated_obs_vec = (A.T @ Yt/(sigma_t)) + PiYs/(sigma_s)\n Mu = Sigma @ integrated_obs_vec\n return((Mu, Sigma))\n\n def calculate_Pi_mDelta(\n Ys, Mu, Sigma, sigma_s,\n sc_t_nums, sc_t_breaks, ref_t_nums, ref_t_breaks):\n '''\n This provide the cell assignment of single cell RNA seq \n for refrence cells derived from cell movements movie.\n mYs: single cell expression weighted\n by assignment of each cell to each reference cell\n r: sum of Pi for each reference cell\n Lpi: lower bound of likelihood concerning Pi\n '''\n Pi_list = []\n mDelta_list = []\n for t in sc_t_nums.keys():\n Ysp_slice = get_num_break_slice(sc_t_nums, sc_t_breaks, t)\n Ysp = Ys[Ysp_slice, :]\n Mup_slice = get_num_break_slice(ref_t_nums, ref_t_breaks, t)\n Mup = Mu[Mup_slice, :]\n Pi, mDelta = calculate_Pi_mDelta(Ysp, Mup, Sigma, sigma_s)\n Pi_list.append(Pi)\n mDelta_list.append(mDelta)\n return(Pi_list, mDelta_list)\n\n def calculate_L(Yt, Pi_list, mDelta_list, Mu, Sigma,\n A, K, K_inv,\n sigma_s, sigma_t):\n '''\n This calculate lower bound of log likelihood\n '''\n # basic numbers\n sc_num = np.sum([\n Pi_list[i].shape[0] for i in range(len(Pi_list))])\n gene_num = Yt.shape[1]\n ts_num = Yt.shape[0]\n ref_cell_num = Mu.shape[0]\n # L for no vb variable\n Glog2pisigs = (gene_num/2)*np.log(2*np.pi*sigma_s)\n L_base = - sc_num*(np.log(ref_cell_num) + Glog2pisigs)\n # L for Pi\n LPi = 0\n for tidx in range(len(Pi_list)):\n LPi += np.sum(Pi_list[tidx]*mDelta_list[tidx]\n / (2*sigma_s))\n # L for tomo seq\n regulalizationLts = (gene_num*ts_num/2.0)*np.log(2*np.pi*sigma_t)\n delta = np.sum(np.square(Yt - (A @ Mu)))\n Lts = - (delta)/(2*sigma_t) - regulalizationLts\n # L for prior\n summed_mu_mut = Mu @ Mu.transpose()\n F_Ft = summed_mu_mut\n trKinvFFt = np.sum(K_inv * F_Ft)/2\n regulalizationLp = gene_num * np.linalg.slogdet(2*np.pi*K)[1]/2\n Lp = - trKinvFFt - regulalizationLp\n # H for qF\n HqF = 0\n # H for qZ\n HqZ = 0\n for tidx in range(len(Pi_list)):\n no_zero_Pip = Pi_list[tidx][Pi_list[tidx] != 0]\n HqZ += np.sum(no_zero_Pip*np.log(no_zero_Pip))\n L = L_base + LPi + Lts + Lp + HqF + HqZ\n return(L)\n\n def calculate_L_dsigma_s(Pi_list, mDelta_list, sigma_s, gene_num):\n '''\n This calculate the dfferentiated lower bound of log likelihood by sigma_s\n '''\n # basic numbers\n sc_num = np.sum([\n Pi_list[i].shape[0] for i in range(len(Pi_list))])\n GN_sigma_s = gene_num*sc_num/(2*sigma_s)\n PimDelta = 0\n for i in range(len(Pi_list)):\n PimDelta += np.sum(Pi_list[i] * mDelta_list[i])/(2*(sigma_s**2))\n L_dsigma_s = - (GN_sigma_s) - PimDelta\n return(L_dsigma_s)\n\n def calculate_L_dsigma_t(Yt, Mu, Sigma, A, sigma_s, sigma_t):\n '''\n This calculate the dfferentiated lower bound of log likelihood by sigma_t\n '''\n # basic numbers\n gene_num = Yt.shape[1]\n ts_num = Yt.shape[0]\n # L for tomo seq\n GN_sigma_t = gene_num*ts_num/(2*sigma_t)\n delta = np.sum(np.square(Yt - (A @ Mu)))\n Delta_sigma_t2 = (delta)/(2*(sigma_t**2))\n L_dsigma_t = Delta_sigma_t2 - GN_sigma_t\n return(L_dsigma_t)\n\n def calculate_optimized_sigma_s(Pi_list, mDelta_list, gene_num):\n '''\n This calculate sigma_s maximizing L\n '''\n # basic numbers\n sc_num = np.sum([\n Pi_list[i].shape[0] for i in range(len(Pi_list))])\n GN = gene_num*sc_num\n PiDelta = 0\n for i in range(len(Pi_list)):\n PiDelta += -np.sum(Pi_list[i] * mDelta_list[i])\n optimized_sigma_s = PiDelta/GN\n return(optimized_sigma_s)\n\n def calculate_optimized_sigma_t(Yt, Mu, Sigma, A):\n '''\n This calculate sigma_t maximizing L\n '''\n # basic numbers\n gene_num = Yt.shape[1]\n ts_num = Yt.shape[0]\n # L for tomo seq\n GT = gene_num*ts_num\n delta = np.sum(np.square(Yt - (A @ Mu)))\n Delta = (delta)\n optimized_sigma_t = Delta/GT\n return(optimized_sigma_t)\n\n def calculate_optimized_sigma_f(Mu, Sigma, Kp_inv):\n '''\n This calculate differentiated lower bound of log likelihood by theta_K\n '''\n # basic numbers\n gene_num = Mu.shape[1]\n ref_cell_num = Mu.shape[0]\n # L for prior\n # Generate expected F F^t\n summed_mu_mut = Mu @ Mu.transpose()\n F_Ft = summed_mu_mut\n optimized_sigma_f = np.sum(Kp_inv * F_Ft)/(gene_num*ref_cell_num)\n return(optimized_sigma_f)\n\n def calculate_L_dtheta_K(Mu, Sigma, K, K_inv, K_dtheta_K):\n '''\n This calculate differentiated lower bound of log likelihood by theta_K\n '''\n # basic numbers\n gene_num = Mu.shape[1]\n # L for prior\n # Generate expected F F^t\n # Mu = np.matrix(Mu)\n summed_mu_mut = Mu @ Mu.transpose()\n F_Ft = summed_mu_mut\n Kinv_dtheta_K = - K_inv @ K_dtheta_K @ K_inv\n trK_inv_FFt_dtheta_K = np.sum(Kinv_dtheta_K * F_Ft)/2\n logdetK_dtheta_K = gene_num * np.sum(K_inv * K_dtheta_K)/2\n L_dtheta_K = - (logdetK_dtheta_K + trK_inv_FFt_dtheta_K)\n return(L_dtheta_K)\n\n def variational_bayes(self, Ys, Yt, A, K,\n sigma_s, sigma_t,\n sc_t_breaks, ref_t_breaks,\n L_minimum_change= 1.0e-3):\n '''\n It compute mean and variance of gene expression matrix,\n and the cell assignment of single cell RNA seq \n for refrence cells derived from cell movements movie.\n '''\n # Pi = 0\n # pi0 = 1/N\n pre_L = -1.0e100\n L = pre_L + 1\n K_inv = np.linalg.inv(K)\n self.K_inv = K\n r = np.zeros(K.shape[0])\n mYs = np.zeros((K.shape[0], Ys.shape[1]))\n while L > pre_L + L_minimum_change:\n pre_L = L\n Mu, Sigma = variational_bayes.calculate_Mu_Sigma(\n Yt, mYs, r, A, K_inv,\n sigma_s, sigma_t)\n mYs, r, Lpi = variational_bayes.calculate__mYs_r_Lpi(\n Ys, Mu, Sigma, sigma_s,\n sc_t_breaks, ref_t_breaks)\n L = variational_bayes.calculate_L(\n Lpi, Yt, Mu, Sigma, A, sigma_t)\n self.Mu = Mu\n self.Sigma = Sigma\n self.mYs = mYs\n self.r = r\n self.Lpi = Lpi\n self.L = L\n\n def reconstructed_mean(self, K_reconst):\n return(K_reconst @ self.K_inv @ self.Mu)\n"
] | [
[
"numpy.sum",
"numpy.linalg.solve",
"numpy.zeros",
"numpy.diag",
"numpy.linalg.inv",
"numpy.linalg.eigvalsh",
"numpy.exp",
"numpy.linalg.cholesky",
"numpy.linalg.slogdet",
"numpy.max",
"numpy.log",
"numpy.concatenate",
"numpy.square",
"numpy.identity"
]
] |
berkeley-stat159/project-alpha | [
"330d025c4eda94d390a82e86deecb791086c9dbf"
] | [
"code/utils/scripts/tgrouping_script.py"
] | [
"\"\"\" Script for the tgrouping function.\nRun with: \n python tgrouping_script.py\n\"\"\"\n# Loading modules.\nfrom __future__ import absolute_import, division, print_function\nimport os\nimport numpy as np\nfrom scipy.stats import gamma\nimport matplotlib.pyplot as plt\nimport nibabel as nib\nimport sys\nimport numpy.linalg as npl\n\n# Paths. Use your own. \npathtodata = \"../../../data/ds009/sub001/\"\ncondition_location=pathtodata+\"model/model001/onsets/task001_run001/\"\nlocation_of_images=\"../../../images/\"\nsys.path.append(os.path.join(os.path.dirname('__file__'), \"../functions/\"))\n\n# Load functions\nfrom stimuli import events2neural\nfrom event_related_fMRI_functions import hrf_single, convolution_specialized\nfrom Image_Visualizing import present_3d, make_mask\nfrom glm import glm\nfrom hypothesis import t_stat\nfrom event_related_fMRI_functions import hrf_single, convolution_specialized\nfrom benjamini_hochberg import bh_procedure\nfrom mask_phase_2_dimension_change import masking_reshape_start, masking_reshape_end, neighbor_smoothing\nfrom tgrouping import t_binary_grouping, t_grouping_neighbor\n\n# Load the image data for subject 1.\nimg = nib.load(pathtodata+\"BOLD/task001_run001/bold.nii.gz\")\ndata = img.get_data()\ndata = data[...,6:] # Knock off the first 6 observations.\n\ncond1=np.loadtxt(condition_location+\"cond001.txt\")\ncond2=np.loadtxt(condition_location+\"cond002.txt\")\ncond3=np.loadtxt(condition_location+\"cond003.txt\")\n\n#######################\n# convolution #\n#######################\n\nall_stimuli=np.array(sorted(list(cond2[:,0])+list(cond3[:,0])+list(cond1[:,0]))) # could also just x_s_array\nmy_hrf = convolution_specialized(all_stimuli,np.ones(len(all_stimuli)),hrf_single,np.linspace(0,239*2-2,239))\n\nB,t,df,p = t_stat(data, my_hrf, np.array([0,1]))\n\n###############\n# tgrouping #\n###############\nmask = nib.load(pathtodata + '/anatomy/inplane001_brain_mask.nii.gz')\nmask = mask.get_data()\ninner_ones=np.ones(data.shape[:-1])\nmask= make_mask(inner_ones,mask,True)\n\nmask[mask>0]=1\n\n\nt_vals=t\n\n\nt_vals_3d=t_vals.reshape(data.shape[:-1])\n\npro=[.25,.1,.1,.05,.025]\nfolks=[1,1,5,5,10]\n\nplt.close()\nfor i in np.arange(5):\n\tstart,cutoff=t_grouping_neighbor(t_vals_3d,mask,pro[i],prop=True,neighbors= folks[i],abs_on=True)\n\tplt.imshow(present_3d(2*start-1),interpolation='nearest',cmap=\"seismic\")\n\tplt.title(\"T statistics \" +str(pro[i])+\" proportion \\n (cutoff=\" + str(cutoff)+\") , neighbors: \" + str(folks[i]))\n\tplt.colorbar()\n\tplt.savefig(location_of_images+str(pro[i])+\"_\" + str(folks[i])+\"_t.png\")\n\tplt.close()\n\n\n##################\n# Beta #\n##################\nb1 = B[1]\n#cutoff = .6\nb1_vals_3d=b1.reshape(data.shape[:-1])\npro=[.25,.1,.1,.05,.025]\nfolks=[1,1,5,5,10]\n\n#plt.close()\nfor i in np.arange(5):\n\tplt.figure()\n\tstart,cutoff=t_grouping_neighbor(b1_vals_3d,mask,pro[i],prop=True,neighbors= folks[i],abs_on=True)\n\tplt.imshow(present_3d(2*start-1),interpolation='nearest',cmap=\"seismic\")\n\tplt.title(\"Beta values \" +str(pro[i])+\" proportion \\n (cutoff=\" + str(cutoff)+\"), neighbors: \" + str(folks[i]))\n\tplt.colorbar()\n\tplt.savefig(location_of_images+str(pro[i])+\"_\" + str(folks[i])+\"_b.png\")\n\tplt.close()\n\n\n\n\n\n"
] | [
[
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.colorbar",
"numpy.linspace",
"numpy.loadtxt"
]
] |
matthewmturner/ibis | [
"9360bf9878e78c06cadd6733abd04bf98ee0a090"
] | [
"ibis/tests/all/test_aggregation.py"
] | [
"import numpy as np\nimport pandas as pd\nimport pytest\nfrom pytest import param\n\nimport ibis.expr.datatypes as dt\nfrom ibis.tests.backends import (\n BigQuery,\n Clickhouse,\n MySQL,\n Postgres,\n PySpark,\n SQLite,\n)\nfrom ibis.udf.vectorized import reduction\n\n\n@reduction(input_type=[dt.double], output_type=dt.double)\ndef mean_udf(s):\n return s.mean()\n\n\naggregate_test_params = [\n param(\n lambda t: t.double_col.mean(),\n lambda s: s.mean(),\n 'double_col',\n id='mean',\n ),\n param(\n lambda t: mean_udf(t.double_col),\n lambda s: s.mean(),\n 'double_col',\n id='mean_udf',\n marks=pytest.mark.udf,\n ),\n param(\n lambda t: t.double_col.min(),\n lambda s: s.min(),\n 'double_col',\n id='min',\n ),\n param(\n lambda t: t.double_col.max(),\n lambda s: s.max(),\n 'double_col',\n id='max',\n ),\n param(\n lambda t: (t.double_col + 5).sum(),\n lambda s: (s + 5).sum(),\n 'double_col',\n id='complex_sum',\n ),\n param(\n lambda t: t.timestamp_col.max(),\n lambda s: s.max(),\n 'timestamp_col',\n id='timestamp_max',\n ),\n]\n\n\[email protected](\n ('result_fn', 'expected_fn', 'expected_col'), aggregate_test_params,\n)\[email protected]_unsupported\ndef test_aggregate(\n backend, alltypes, df, result_fn, expected_fn, expected_col\n):\n expr = alltypes.aggregate(tmp=result_fn)\n result = expr.execute()\n\n # Create a single-row single-column dataframe with the Pandas `agg` result\n # (to match the output format of Ibis `aggregate`)\n expected = pd.DataFrame({'tmp': [df[expected_col].agg(expected_fn)]})\n\n pd.testing.assert_frame_equal(result, expected)\n\n\[email protected](\n ('result_fn', 'expected_fn', 'expected_col'), aggregate_test_params,\n)\[email protected]_unsupported\ndef test_aggregate_grouped(\n backend, alltypes, df, result_fn, expected_fn, expected_col\n):\n grouping_key_col = 'bigint_col'\n\n # Two (equivalent) variations:\n # 1) `groupby` then `aggregate`\n # 2) `aggregate` with `by`\n expr1 = alltypes.groupby(grouping_key_col).aggregate(tmp=result_fn)\n expr2 = alltypes.aggregate(tmp=result_fn, by=grouping_key_col)\n result1 = expr1.execute()\n result2 = expr2.execute()\n\n # Note: Using `reset_index` to get the grouping key as a column\n expected = (\n df.groupby(grouping_key_col)[expected_col]\n .agg(expected_fn)\n .rename('tmp')\n .reset_index()\n )\n\n # Row ordering may differ depending on backend, so sort on the grouping key\n result1 = result1.sort_values(by=grouping_key_col).reset_index(drop=True)\n result2 = result2.sort_values(by=grouping_key_col).reset_index(drop=True)\n expected = expected.sort_values(by=grouping_key_col).reset_index(drop=True)\n\n pd.testing.assert_frame_equal(result1, expected)\n pd.testing.assert_frame_equal(result2, expected)\n\n\[email protected](\n ('result_fn', 'expected_fn'),\n [\n param(\n lambda t, where: t.bool_col.count(where=where),\n lambda t, where: len(t.bool_col[where].dropna()),\n id='count',\n ),\n param(\n lambda t, where: t.bool_col.any(),\n lambda t, where: t.bool_col.any(),\n id='any',\n ),\n param(\n lambda t, where: t.bool_col.notany(),\n lambda t, where: ~t.bool_col.any(),\n id='notany',\n ),\n param(\n lambda t, where: -t.bool_col.any(),\n lambda t, where: ~t.bool_col.any(),\n id='any_negate',\n ),\n param(\n lambda t, where: t.bool_col.all(),\n lambda t, where: t.bool_col.all(),\n id='all',\n ),\n param(\n lambda t, where: t.bool_col.notall(),\n lambda t, where: ~t.bool_col.all(),\n id='notall',\n ),\n param(\n lambda t, where: -t.bool_col.all(),\n lambda t, where: ~t.bool_col.all(),\n id='all_negate',\n ),\n param(\n lambda t, where: t.double_col.sum(),\n lambda t, where: t.double_col.sum(),\n id='sum',\n ),\n param(\n lambda t, where: t.double_col.mean(),\n lambda t, where: t.double_col.mean(),\n id='mean',\n ),\n param(\n lambda t, where: t.double_col.min(),\n lambda t, where: t.double_col.min(),\n id='min',\n ),\n param(\n lambda t, where: t.double_col.max(),\n lambda t, where: t.double_col.max(),\n id='max',\n ),\n param(\n lambda t, where: t.double_col.approx_median(),\n lambda t, where: t.double_col.median(),\n id='approx_median',\n marks=pytest.mark.xpass_backends([Clickhouse]),\n ),\n param(\n lambda t, where: t.double_col.std(how='sample'),\n lambda t, where: t.double_col.std(ddof=1),\n id='std',\n ),\n param(\n lambda t, where: t.double_col.var(how='sample'),\n lambda t, where: t.double_col.var(ddof=1),\n id='var',\n ),\n param(\n lambda t, where: t.double_col.std(how='pop'),\n lambda t, where: t.double_col.std(ddof=0),\n id='std_pop',\n ),\n param(\n lambda t, where: t.double_col.var(how='pop'),\n lambda t, where: t.double_col.var(ddof=0),\n id='var_pop',\n ),\n param(\n lambda t, where: t.double_col.cov(t.float_col),\n lambda t, where: t.double_col.cov(t.float_col),\n id='covar',\n ),\n param(\n lambda t, where: t.double_col.corr(t.float_col),\n lambda t, where: t.double_col.corr(t.float_col),\n id='corr',\n ),\n param(\n lambda t, where: t.string_col.approx_nunique(),\n lambda t, where: t.string_col.nunique(),\n id='approx_nunique',\n marks=pytest.mark.xfail_backends([MySQL, SQLite]),\n ),\n param(\n lambda t, where: t.double_col.arbitrary(how='first'),\n lambda t, where: t.double_col.iloc[0],\n id='arbitrary_first',\n ),\n param(\n lambda t, where: t.double_col.arbitrary(how='last'),\n lambda t, where: t.double_col.iloc[-1],\n id='arbitrary_last',\n ),\n ],\n)\[email protected](\n ('ibis_cond', 'pandas_cond'),\n [\n param(lambda t: None, lambda t: slice(None), id='no_cond'),\n param(\n lambda t: t.string_col.isin(['1', '7']),\n lambda t: t.string_col.isin(['1', '7']),\n id='is_in',\n ),\n ],\n)\[email protected]_unsupported\ndef test_reduction_ops(\n backend, alltypes, df, result_fn, expected_fn, ibis_cond, pandas_cond\n):\n expr = result_fn(alltypes, ibis_cond(alltypes))\n result = expr.execute()\n expected = expected_fn(df, pandas_cond(df))\n np.testing.assert_allclose(result, expected)\n\n\[email protected](\n ('result_fn', 'expected_fn'),\n [\n param(\n lambda t: (\n t.groupby('bigint_col').aggregate(\n tmp=lambda t: t.string_col.group_concat(',')\n )\n ),\n lambda t: (\n t.groupby('bigint_col')\n .string_col.agg(lambda s: ','.join(s.values))\n .rename('tmp')\n .reset_index()\n ),\n id='group_concat',\n )\n ],\n)\[email protected]_unsupported\ndef test_group_concat(backend, alltypes, df, result_fn, expected_fn):\n expr = result_fn(alltypes)\n result = expr.execute()\n expected = expected_fn(df)\n\n assert set(result.iloc[:, 1]) == set(expected.iloc[:, 1])\n\n\[email protected](\n ('result_fn', 'expected_fn'),\n [\n param(\n lambda t: t.string_col.topk(3),\n lambda t: t.groupby('string_col')['string_col'].count().head(3),\n id='string_col_top3',\n )\n ],\n)\[email protected]_unsupported\[email protected]_backends([PySpark]) # Issue #2130\ndef test_topk_op(backend, alltypes, df, result_fn, expected_fn):\n # TopK expression will order rows by \"count\" but each backend\n # can have different result for that.\n # Note: Maybe would be good if TopK could order by \"count\"\n # and the field used by TopK\n t = alltypes.sort_by(alltypes.string_col)\n df = df.sort_values('string_col')\n result = result_fn(t).execute()\n expected = expected_fn(df)\n assert all(result['count'].values == expected.values)\n\n\[email protected](\n ('result_fn', 'expected_fn'),\n [\n param(\n lambda t: t[t.string_col.topk(3)],\n lambda t: t[\n t.string_col.isin(\n t.groupby('string_col')['string_col'].count().head(3).index\n )\n ],\n id='string_col_filter_top3',\n )\n ],\n)\[email protected]_unsupported\n# Issues #2369 #2133 #2131 #2132\[email protected]_backends([BigQuery, Clickhouse, MySQL, Postgres])\[email protected]_backends([SQLite], reason='Issue #2128')\ndef test_topk_filter_op(backend, alltypes, df, result_fn, expected_fn):\n # TopK expression will order rows by \"count\" but each backend\n # can have different result for that.\n # Note: Maybe would be good if TopK could order by \"count\"\n # and the field used by TopK\n t = alltypes.sort_by(alltypes.string_col)\n df = df.sort_values('string_col')\n result = result_fn(t).execute()\n expected = expected_fn(df)\n assert result.shape[0] == expected.shape[0]\n"
] | [
[
"pandas.testing.assert_frame_equal",
"numpy.testing.assert_allclose"
]
] |
banbiossa/deep-learning-from-scrach | [
"d183a73ad27c68a79500c35a94c174ce0455940c"
] | [
"src/ch04/gradient_2d.py"
] | [
"# coding: utf-8\n# cf.http://d.hatena.ne.jp/white_wheels/20100327/p3\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef _numerical_gradient_no_batch(f, x):\n h = 1e-4 # 0.0001\n grad = np.zeros_like(x)\n\n for idx in range(x.size):\n tmp_val = x[idx]\n x[idx] = float(tmp_val) + h\n fxh1 = f(x) # f(x+h)\n\n x[idx] = tmp_val - h\n fxh2 = f(x) # f(x-h)\n grad[idx] = (fxh1 - fxh2) / (2*h)\n\n x[idx] = tmp_val # 値を元に戻す\n\n return grad\n\n\ndef numerical_gradient(f, X):\n if X.ndim == 1:\n return _numerical_gradient_no_batch(f, X)\n else:\n grad = np.zeros_like(X)\n\n for idx, x in enumerate(X):\n grad[idx] = _numerical_gradient_no_batch(f, x)\n\n return grad\n\n\ndef function_2(x):\n if x.ndim == 1:\n return np.sum(x**2)\n else:\n return np.sum(x**2, axis=1)\n\n\ndef tangent_line(f, x):\n d = numerical_gradient(f, x)\n print(d)\n y = f(x) - d*x\n return lambda t: d*t + y\n\n\nif __name__ == '__main__':\n x0 = np.arange(-2, 2.5, 0.25)\n x1 = np.arange(-2, 2.5, 0.25)\n X, Y = np.meshgrid(x0, x1)\n\n X = X.flatten()\n Y = Y.flatten()\n\n grad = numerical_gradient(function_2, np.array([X, Y]))\n\n plt.figure()\n # ,headwidth=10,scale=40,color=\"#444444\")\n plt.quiver(X, Y, -grad[0], -grad[1], angles=\"xy\", color=\"#666666\")\n plt.xlim([-2, 2])\n plt.ylim([-2, 2])\n plt.xlabel('x0')\n plt.ylabel('x1')\n plt.grid()\n plt.legend()\n plt.draw()\n plt.show()\n"
] | [
[
"matplotlib.pylab.xlim",
"numpy.zeros_like",
"matplotlib.pylab.grid",
"numpy.sum",
"matplotlib.pylab.quiver",
"matplotlib.pylab.ylabel",
"matplotlib.pylab.legend",
"matplotlib.pylab.draw",
"matplotlib.pylab.figure",
"matplotlib.pylab.show",
"numpy.arange",
"matplotlib.pylab.xlabel",
"matplotlib.pylab.ylim",
"numpy.array",
"numpy.meshgrid"
]
] |
ajesse11x/Cirq | [
"ef7b260b9fcdf27f79ab6f0f15ffd27fab7ccd20"
] | [
"cirq/linalg/transformations.py"
] | [
"# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility methods for transforming matrices.\"\"\"\n\nfrom typing import Tuple\n\nimport numpy as np\n\n\ndef reflection_matrix_pow(reflection_matrix: np.ndarray, exponent: float):\n \"\"\"Raises a matrix with two opposing eigenvalues to a power.\n\n Args:\n reflection_matrix: The matrix to raise to a power.\n exponent: The power to raise the matrix to.\n\n Returns:\n The given matrix raised to the given power.\n \"\"\"\n\n # The eigenvalues are x and -x for some complex unit x. Determine x.\n squared_phase = np.dot(reflection_matrix[:, 0],\n reflection_matrix[0, :])\n phase = complex(np.sqrt(squared_phase))\n\n # Extract +x and -x eigencomponents of the matrix.\n i = np.eye(reflection_matrix.shape[0]) * phase\n pos_part = (i + reflection_matrix) * 0.5\n neg_part = (i - reflection_matrix) * 0.5\n\n # Raise the matrix to a power by raising its eigencomponents to that power.\n pos_factor = phase**(exponent - 1)\n neg_factor = pos_factor * complex(-1)**exponent\n pos_part_raised = pos_factor * pos_part\n neg_part_raised = neg_part * neg_factor\n return pos_part_raised + neg_part_raised\n\n\ndef match_global_phase(a: np.ndarray,\n b: np.ndarray\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Phases the given matrices so that they agree on the phase of one entry.\n\n To maximize precision, the position with the largest entry from one of the\n matrices is used when attempting to compute the phase difference between\n the two matrices.\n\n Args:\n a: A numpy array.\n b: Another numpy array.\n\n Returns:\n A tuple (a', b') where a' == b' implies a == b*exp(i t) for some t.\n \"\"\"\n\n # Not much point when they have different shapes.\n if a.shape != b.shape:\n return a, b\n\n # Find the entry with the largest magnitude in one of the matrices.\n k = max(np.ndindex(*a.shape), key=lambda t: abs(b[t]))\n\n def dephase(v):\n r = np.real(v)\n i = np.imag(v)\n\n # Avoid introducing floating point error when axis-aligned.\n if i == 0:\n return -1 if r < 0 else 1\n if r == 0:\n return 1j if i < 0 else -1j\n\n return np.exp(-1j * np.arctan2(i, r))\n\n # Zero the phase at this entry in both matrices.\n return a * dephase(a[k]), b * dephase(b[k])\n"
] | [
[
"numpy.eye",
"numpy.arctan2",
"numpy.sqrt",
"numpy.dot",
"numpy.real",
"numpy.ndindex",
"numpy.imag"
]
] |
tylerhuntington222/biosteam | [
"234959180a3210d95e39a012454f455723c92686"
] | [
"biosteam/units/design_tools/column_design.py"
] | [
"# -*- coding: utf-8 -*-\n# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules\n# Copyright (C) 2020, Yoel Cortes-Pena <[email protected]>\n# \n# This module is under the UIUC open-source license. See \n# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt\n# for license details.\n\"\"\"\nGeneral functional algorithms for the design and purchase cost estimation\nof distillation columns.\n\nReferences\n----------\n.. [1] Seider, W. D., Lewin, D. R., Seader, J. D., Widagdo, S., Gani, R.,\n & Ng, M. K. (2017). Product and Process Design Principles. Wiley.\n Cost Accounting and Capital Cost Estimation (Chapter 16)\n.. [2] M. Duss, R. Taylor. (2018)\n Predict Distillation Tray Efficiency. AICHE \n.. [3] Green, D. W. Distillation. In Perry’s Chemical Engineers’\n 674 Handbook, 9 ed.; McGraw-Hill Education, 2018.\n\n\"\"\"\nimport numpy as np\nfrom . import utils\nfrom flexsolve import njitable\nimport biosteam as bst\n\n__all__ = ('compute_purchase_cost_of_trays',\n 'compute_purchase_cost_of_tower',\n 'compute_empty_tower_cost',\n 'compute_plaform_ladder_cost',\n 'compute_tower_weight',\n 'compute_tower_wall_thickness',\n 'compute_tray_base_purchase_cost',\n 'compute_n_trays_factor',\n 'compute_murphree_stage_efficiency',\n 'compute_flow_parameter',\n 'compute_max_capacity_parameter',\n 'compute_max_vapor_velocity',\n 'compute_downcomer_area_fraction',\n 'compute_tower_diameter',\n 'compute_tower_height')\n\n@njitable(cache=True)\ndef minimum_thickness_from_diameter(D):\n return 0.03125 * D + 0.125\n\n@njitable(cache=True)\ndef compute_purchase_cost_of_trays(N_T, Di, F_TT, F_TM):\n \"\"\"\n Return total cost of all trays at BioSTEAM's CEPCI.\n \n Parameters\n ----------\n N_T : int\n Number of trays.\n Di : float\n Inner diameter [ft].\n F_TT : float\n Tray type factor.\n F_TM : float\n Tray material factor.\n \n Notes\n -----\n The purchase cost is given by [1]_. See source code for details.\n The purchase cost is scaled according to BioSTEAM's Chemical\n Plant Cost Index, `biosteam.CE`.\n \n \"\"\"\n F_CE = bst.CE/500\n C_BT = compute_tray_base_purchase_cost(Di)\n F_NT = compute_n_trays_factor(N_T)\n return N_T * F_CE * F_NT * F_TT * F_TM * C_BT\n\n@njitable(cache=True)\ndef compute_purchase_cost_of_tower(Di, L, W, F_VM):\n \"\"\"\n Return cost of tower at BioSTEAM's CEPCI.\n \n Parameters\n ----------\n Di : float\n Inner diameter [ft]\n L : float\n length [ft]\n W : float\n weight [lb].\n F_VM : float\n Tower material factor.\n \n Notes\n -----\n The purchase cost is given by [1]_. See source code for details.\n The purchase cost is scaled according to BioSTEAM's Chemical\n Plant Cost Index, `biosteam.CE`.\n \n \"\"\"\n F_CE = bst.CE/500\n C_V = compute_empty_tower_cost(W)\n C_PL = compute_plaform_ladder_cost(Di, L)\n return F_CE * (F_VM * C_V + C_PL)\n\n@njitable(cache=True)\ndef compute_empty_tower_cost(W):\n \"\"\"\n Return the cost [C_V; in USD] of an empty tower vessel assuming a CE of 500.\n \n Parameters\n ----------\n W : float\n Weight [lb].\n \n \n Notes\n -----\n The purchase cost is given by [1]_. See source code for details.\n \n \"\"\"\n return np.exp(7.2756 + 0.18255*np.log(W) + 0.02297*np.log(W)**2)\n\n@njitable(cache=True)\ndef compute_plaform_ladder_cost(Di, L):\n \"\"\"\n Return the cost [C_PL; in USD] of platforms and ladders assuming a CE of 500.\n \n Parameters\n ----------\n Di: float\n Inner diameter [ft].\n L: float\n Legnth [ft].\n \n Notes\n -----\n The purchase cost is given by [1]_. See source code for details.\n \n \"\"\"\n return 300.9*Di**0.63316*L**0.80161\n\n@njitable(cache=True)\ndef compute_tower_weight(Di, L, tv, rho_M):\n \"\"\"\n Return the weight [W; in lb] of the tower assuming 2:1 elliptical head.\n \n Parameters\n ----------\n Di : float\n Inner diameter [ft].\n L : float\n Legnth [ft].\n tv : float\n Shell thickness [in].\n rho_M: floa\n Density of material [lb/in^3].\n \n Notes\n -----\n The tower weight is given by [1]_. See source code for details.\n \n \"\"\"\n Di = Di*12\n L = L*12\n return np.pi*(Di+tv)*(L+0.8*Di)*tv*rho_M\n\n@njitable(cache=True)\ndef compute_tower_wall_thickness(Po, Di, L, S=15000, E=None, M=29.5):\n \"\"\"\n Return the wall thinkness [tv; in inches] designed to withstand the\n internal pressure and the wind/earthquake load at the bottom.\n \n Parameters\n ----------\n Po : float\n Operating internal pressure [psi].\n Di : float\n Internal diameter [ft].\n L : float\n Height [ft].\n S : float\n Maximum stress [psi].\n E : float\n Fractional weld efficiency\n M : float\n Elasticity [psi].\n \n Notes\n -----\n The wall thickness is given by [1]_. See source code for details.\n \n \"\"\"\n # TODO: Incorporate temperature for choosing S and M\n Di = Di*12 # ft to in\n L = L*12\n \n E_check = E is None\n if E_check:\n # Assume carbon steel with thickness more than 1.25 in\n E = 1.0 \n \n # Get design pressure, which should be higher than operating pressure.\n Po_gauge = Po - 14.69\n if Po_gauge < 0:\n # TODO: Double check vacuum calculation\n Pd = -Po_gauge\n tE = 1.3*Di*(Pd*L/M/Di)**0.4\n tEC = L*(0.18*Di - 2.2)*10**-5 - 0.19\n tv = tE + tEC\n return tv\n elif Po_gauge < 5:\n Pd = 10\n elif Po_gauge < 1000:\n Pd = np.exp(0.60608 + 0.91615*np.log(Po)) + 0.0015655*np.log(Po)**2\n else:\n Pd = 1.1*Po_gauge\n \n # Calculate thinkess according to ASME pressure-vessel code.\n ts = Pd*Di/(2*S*E-1.2*Pd)\n \n if E_check:\n # Weld efficiency of 0.85 for low thickness carbon steel\n if ts < 1.25:\n E = 0.85\n ts = Pd*Di/(2*S*E-1.2*Pd)\n \n # Add corrosion allowence\n ts += 1/8\n \n # Minimum thickness for vessel rigidity may be larger\n Di_ft = Di/12\n ts_min = minimum_thickness_from_diameter(Di_ft) if Di_ft > 4 else 0.25\n if ts < ts_min:\n ts = ts_min\n \n # Calculate thickness to withstand wind/earthquake load\n Do = Di + ts\n tw = 0.22*(Do + 18)*L**2/(S*Do**2)\n tv = tw if tw > ts else ts\n \n # Vessels are fabricated from metal plates with small increments\n if tv < 0.5:\n tv = utils.approx2step(tv, 3/16, 1/16)\n elif tv < 2:\n tv = utils.approx2step(tv, 0.5, 1/8)\n elif tv < 3:\n tv = utils.approx2step(tv, 2, 1/4)\n return tv\n\n@njitable(cache=True)\ndef compute_tray_base_purchase_cost(Di):\n \"\"\"Return the base cost of a tray [C_BT; USD] at a CE of 500.\n \n Parameters\n ----------\n Di : float\n Inner diameter [ft].\n \n Notes\n -----\n The purchase cost is given by [1]_. See source code for details.\n \n \"\"\"\n return 412.6985 * np.exp(0.1482*Di)\n\n@njitable(cache=True)\ndef compute_n_trays_factor(N_T):\n \"\"\"\n Return the cost factor for number of trays, F_NT.\n \n Parameters\n ----------\n N_T: Number of trays\n \n Notes\n -----\n The cost factor is given by [1]_. See source code for details.\n \n \"\"\"\n if N_T < 20:\n F_NT = 2.25/1.0414**N_T\n else:\n F_NT = 1\n return F_NT\n\n@njitable(cache=True)\ndef compute_murphree_stage_efficiency(mu, alpha, L, V):\n \"\"\"\n Return the sectional murphree efficiency, E_mv.\n \n Parameters\n ----------\n mu: float\n Viscosity [mPa*s]\n alpha: float\n Relative volatility. \n L: float\n Liquid flow rate by mol.\n V: float\n Vapor flow rate by mol.\n \n Notes\n -----\n The efficiency is given by [2]_. See source code for details.\n \n \"\"\"\n S = alpha*V/L # Stripping factor\n e = 0.503*mu**(-0.226)*(S if S > 1 else 1/S)**(-0.08 )\n if e < 1: return e\n else: return 1\n\n@njitable(cache=True)\ndef compute_flow_parameter(L, V, rho_V, rho_L):\n \"\"\"\n Return the flow parameter, F_LV.\n \n Parameters\n ----------\n L : float\n Liquid flow rate by mass.\n V : float\n Vapor flow rate by mass.\n rho_V : float\n Vapor density.\n rho_L : float\n Liquid density.\n \n Notes\n -----\n The flow parameter is given by [3]_. See source code for details.\n \n \"\"\"\n return L/V*(rho_V/rho_L)**0.5\n\n@njitable(cache=True)\ndef compute_max_capacity_parameter(TS, F_LV):\n \"\"\"Return the maximum capacity parameter before flooding [C_sbf; in m/s].\n \n Parameters\n ----------\n TS : float\n Tray spacing [mm].\n F_LV : float\n Flow parameter.\n \n Notes\n -----\n The max capacity parameter is given by [3]_. See source code for details.\n \n \"\"\"\n return 0.0105 + 8.127e-4*TS**0.755*np.exp(-1.463*F_LV**0.842)\n\n@njitable(cache=True)\ndef compute_max_vapor_velocity(C_sbf, sigma, rho_L, rho_V, F_F, A_ha):\n \"\"\"\n Return the maximum allowable vapor velocity\n through the net area of flow before flooding [U_f; in m/s].\n \n Parameters\n ----------\n C_sbf : \n Maximum Capacity Parameter (m/s)\n sigma : \n Liquid surface tension (dyn/cm)\n rho_L : \n Liquid density\n rho_V : \n Vapor density\n F_F : \n Foaming factor\n A_ha : \n Ratio of open area, A_h, to active area, A_a\n \n Notes\n -----\n The max vapor velocity is given by [3]_. See source code for details.\n \n \"\"\"\n F_ST = (sigma/20)**0.2 # Surface tension factor\n \n # Working area factor\n if A_ha >= 0.1 and A_ha <= 1:\n F_HA = 1\n elif A_ha >= 0.06:\n F_HA = 5*A_ha + 0.5\n else:\n raise ValueError(\"ratio of open to active area, 'A', must be between 0.06 and 1\") \n \n return C_sbf * F_HA * F_ST * ((rho_L-rho_V)/rho_V)**0.5\n\n@njitable(cache=True)\ndef compute_downcomer_area_fraction(F_LV):\n \"\"\"\n Return the ratio of downcomer area to net (total) area, `A_dn`.\n \n Parameters\n ----------\n F_LV : float\n Flow parameter.\n\n Notes\n -----\n The fraction of downcomer area is given by [3]_. See source code for details.\n\n \"\"\"\n if F_LV < 0.1:\n A_dn = 0.1\n elif F_LV < 1:\n A_dn = 0.1 + (F_LV-0.1)/9\n else:\n A_dn = 0.2\n return A_dn\n\n@njitable(cache=True)\ndef compute_tower_diameter(V_vol, U_f, f, A_dn):\n \"\"\"Return tower diameter [D_T; in meter].\n \n Parameters\n ----------\n V_vol : float\n Vapor volumetric flow rate [m^3/s].\n U_f : float\n Maximum vapor velocity before flooding [m/s].\n f : float\n Ratio of actual velocity to `U_f`.\n A_dn : float\n Ratio of downcomer area to net (total) area.\n \n Notes\n -----\n The tower diameter is given by [3]_. See source code for details.\n \n \"\"\"\n Di = (4*V_vol/(f*U_f*np.pi*(1-A_dn)))**0.5\n if Di < 0.914:\n # Make sure diameter is not too small\n Di = 0.914\n return Di\n\n@njitable(cache=True)\ndef compute_tower_height(TS, N_stages: int, top=True, bot=True):\n \"\"\"\n Return the height of a tower [H; in meter].\n \n Parameters\n ----------\n TS : float\n Tray spacing [mm].\n N_stages : float\n Number of stages.\n \n Notes\n -----\n The tower height is given by [3]_. See source code for details.\n \n \"\"\"\n # 3 m bottoms surge capacity, 1.25 m above top tray to remove entrained liquid\n H = TS*N_stages/1000\n if top:\n H += 1.2672\n if bot:\n H += 3\n return H "
] | [
[
"numpy.log",
"numpy.exp"
]
] |
blondegeek/se3cnn | [
"513f5f827c4c511bdc96e3c6ea663c8fbce60f57"
] | [
"examples/point/structure.py"
] | [
"# pylint: disable=C, R, not-callable, no-member, arguments-differ\nimport json\nfrom functools import partial\n\nimport pymatgen\nimport torch\nimport random\nfrom se3cnn.non_linearities import GatedBlock\nfrom se3cnn.non_linearities.rescaled_act import relu, sigmoid\nfrom se3cnn.point.kernel import Kernel\nfrom se3cnn.point.operations import PeriodicConvolution\nfrom se3cnn.point.radial import CosineBasisModel\n\n\ndef get_dataset(filename):\n with open(filename, 'r') as f:\n dataset = json.load(f)\n\n structures = [pymatgen.Structure.from_dict(s) for s, l in dataset]\n classes = ['diamond', 'fcc', 'bcc', 'hcp', 'rutile', 'perovskite', 'spinel', 'corundum']\n labels = [classes.index(l) for s, l in dataset]\n\n return structures, labels\n\n\nclass AvgSpacial(torch.nn.Module):\n def forward(self, features):\n return features.mean(1)\n\n\nclass Network(torch.nn.Module):\n def __init__(self, num_classes):\n super().__init__()\n\n representations = [(1,), (4, 4, 4, 4), (4, 4, 4, 4), (4, 4, 4, 4), (64,)]\n representations = [[(mul, l) for l, mul in enumerate(rs)] for rs in representations]\n\n R = partial(CosineBasisModel, max_radius=3.8, number_of_basis=10, h=100, L=2, act=relu)\n K = partial(Kernel, RadialModel=R)\n C = partial(PeriodicConvolution, K)\n\n self.firstlayers = torch.nn.ModuleList([\n GatedBlock(Rs_in, Rs_out, relu, sigmoid, C)\n for Rs_in, Rs_out in zip(representations, representations[1:])\n ])\n self.lastlayers = torch.nn.Sequential(AvgSpacial(), torch.nn.Linear(64, num_classes))\n\n def forward(self, structure):\n p = next(self.parameters())\n geometry = torch.stack([p.new_tensor(s.coords) for s in structure.sites])\n features = p.new_ones(1, len(geometry), 1)\n geometry = geometry.unsqueeze(0)\n\n for i, m in enumerate(self.firstlayers):\n assert torch.isfinite(features).all(), i\n features = m(features.div(4 ** 0.5), geometry, structure.lattice, 3.8)\n\n return self.lastlayers(features).squeeze(0)\n\n\ndef main():\n import time\n torch.manual_seed(42)\n random.seed(42)\n torch.set_default_dtype(torch.float64)\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n structures, labels = get_dataset('structure-1atomstype-trainset.json')\n labels = torch.tensor(labels, device=device)\n\n f = Network(8)\n f = f.to(device)\n\n optimizer = torch.optim.Adam(f.parameters())\n success = []\n\n t1 = time.time()\n for step in range(800):\n i = random.randint(0, len(structures) - 1)\n struct = structures[i]\n target = labels[i]\n\n out = f(struct)\n success.append(1 if out.argmax().item() == target else 0)\n loss = torch.nn.functional.cross_entropy(out.unsqueeze(0), target.unsqueeze(0))\n loss.backward()\n\n if step % 2 == 0:\n optimizer.step()\n optimizer.zero_grad()\n # print(\"step={} loss={:.2e} {}\".format(step, loss.item(), success[-10:]))\n \n t2 = time.time()\n print(f\"Training time: {t2-t1:.2f} seconds\")\n\n def test(filename):\n structures, labels = get_dataset(filename)\n pred = [f(s).argmax().item() for s in structures]\n from sklearn.metrics import confusion_matrix\n print(confusion_matrix(labels, pred))\n\n with torch.no_grad():\n test('structure-1atomstype-trainset.json')\n test('structure-1atomstype-testset.json')\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.nn.Linear",
"torch.manual_seed",
"torch.no_grad",
"torch.tensor",
"torch.set_default_dtype",
"sklearn.metrics.confusion_matrix",
"torch.cuda.is_available",
"torch.isfinite"
]
] |
thomasnevolianis/biotite | [
"cb238a8d8d7dc82b3bcea274d7d91d5c876badcd"
] | [
"src/biotite/application/msaapp.py"
] | [
"# This source code is part of the Biotite package and is distributed\n# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further\n# information.\n\n__name__ = \"biotite.application\"\n__author__ = \"Patrick Kunzmann\"\n__all__ = [\"MSAApp\"]\n\nimport abc\nfrom tempfile import NamedTemporaryFile\nfrom collections import OrderedDict\nimport numpy as np\nfrom .localapp import LocalApp\nfrom .application import AppState, requires_state\nfrom ..sequence.sequence import Sequence\nfrom ..sequence.seqtypes import NucleotideSequence, ProteinSequence\nfrom ..sequence.io.fasta.file import FastaFile\nfrom ..sequence.align.alignment import Alignment\nfrom ..sequence.align.matrix import SubstitutionMatrix\n\n\nclass MSAApp(LocalApp, metaclass=abc.ABCMeta):\n \"\"\"\n This is an abstract base class for multiple sequence alignment\n software.\n \n It handles conversion of :class:`Sequence` objects to FASTA input\n and FASTA output to an :class:`Alignment` object.\n Inheriting subclasses only need to incorporate the file path\n of these FASTA files into the program arguments.\n\n Furthermore, this class can handle custom substitution matrices,\n if the underlying program supports these.\n\n MSA software that supports alignment of protein sequences and custom\n substitution matrices, can be used to align exotic, normally\n unsupported sequence types:\n At first the exotic sequences are mapped into protein sequences and\n the custom substitution matrix is converted into a protein sequence\n substitution matrix.\n Then the protein sequences are aligned and finally the protein\n sequences are mapped back into the original sequence types.\n The mapping does not work, when the alphabet of the exotic\n sequences is larger than the amino acid alphabet.\n \n Internally this creates a :class:`Popen` instance, which handles\n the execution.\n \n Parameters\n ----------\n sequences : iterable object of Sequence\n The sequences to be aligned.\n bin_path : str, optional\n Path of the MSA software binary. By default, the default path\n will be used.\n matrix : SubstitutionMatrix, optional\n A custom substitution matrix.\n \"\"\"\n \n def __init__(self, sequences, bin_path, matrix=None):\n super().__init__(bin_path)\n \n if len(sequences) < 2:\n raise ValueError(\"At least two sequences are required\")\n # Check if all sequences share the same alphabet\n alphabet = sequences[0].get_alphabet()\n for seq in sequences:\n if seq.get_alphabet() != alphabet:\n raise ValueError(\"Alphabets of the sequences are not equal\")\n \n self._matrix = None\n # Check whether the program supports the alignment for the given\n # sequence type\n if ProteinSequence.alphabet.extends(alphabet) \\\n and self.supports_protein():\n self._is_mapped = False\n self._seqtype = \"protein\"\n if matrix is not None:\n if not self.supports_custom_protein_matrix():\n raise TypeError(\n \"The software does not support custom \"\n \"substitution matrices for protein sequences\"\n )\n self._matrix = matrix\n elif NucleotideSequence.alphabet_amb.extends(alphabet) \\\n and self.supports_nucleotide():\n self._is_mapped = False\n self._seqtype = \"nucleotide\"\n if matrix is not None:\n if not self.supports_custom_nucleotide_matrix():\n raise TypeError(\n \"The software does not support custom \"\n \"substitution matrices for nucleotide sequences\"\n )\n self._matrix = matrix\n else:\n # For all other sequence types, try to map the sequence into\n # a protein sequence\n if not self.supports_protein():\n # Alignment of a custom sequence type requires mapping\n # into a protein sequence\n raise TypeError(\n f\"The software cannot align sequences of type \"\n f\"{type(sequences[0]).__name__}: \"\n f\"No support for alignment of the mapped sequences\"\n )\n if not self.supports_custom_protein_matrix():\n # Alignment of a custom sequence type requires a custom\n # substitution matrix\n raise TypeError(\n f\"The software cannot align sequences of type \"\n f\"{type(sequences[0]).__name__}: \"\n f\"No support for custom substitution matrices\"\n )\n self._is_mapped = True\n self._sequences = sequences\n # Sequence masquerades as protein\n self._seqtype = \"protein\"\n self._mapped_sequences = MSAApp._map_sequences(sequences, alphabet)\n self._matrix = MSAApp._map_matrix(matrix)\n\n self._sequences = sequences\n self._in_file = NamedTemporaryFile(\"w\", suffix=\".fa\")\n self._out_file = NamedTemporaryFile(\"r\", suffix=\".fa\")\n self._matrix_file = NamedTemporaryFile(\"w\", suffix=\".mat\")\n\n def run(self):\n sequences = self._sequences if not self._is_mapped \\\n else self._mapped_sequences\n sequences_file = FastaFile()\n for i, seq in enumerate(sequences):\n sequences_file[str(i)] = str(seq)\n sequences_file.write(self._in_file)\n self._in_file.flush()\n if self._matrix is not None:\n self._matrix_file.write(str(self._matrix))\n self._matrix_file.flush()\n super().run()\n \n def evaluate(self):\n super().evaluate()\n alignment_file = FastaFile.read(self._out_file)\n seq_dict = OrderedDict(alignment_file)\n # Get alignment\n out_seq_str = [None] * len(seq_dict)\n for i in range(len(self._sequences)):\n out_seq_str[i] = seq_dict[str(i)]\n trace = Alignment.trace_from_strings(out_seq_str)\n self._alignment = Alignment(self._sequences, trace, None)\n # Also obtain original order\n self._order = np.zeros(len(seq_dict), dtype=int)\n for i, seq_index in enumerate(seq_dict):\n self._order[i] = int(seq_index)\n \n def clean_up(self):\n super().clean_up()\n self._in_file.close()\n self._out_file.close()\n self._matrix_file.close()\n \n @requires_state(AppState.JOINED)\n def get_alignment(self):\n \"\"\"\n Get the resulting multiple sequence alignment.\n \n Returns\n -------\n alignment : Alignment\n The global multiple sequence alignment.\n \"\"\"\n return self._alignment\n \n @requires_state(AppState.JOINED)\n def get_alignment_order(self):\n \"\"\"\n Get the order of the resulting multiple sequence alignment.\n\n Usually the order of sequences in the output file is\n different from the input file, e.g. the sequences are ordered\n according to the guide tree.\n After running an MSA software, the output sequence order of\n the alignment rearranged so that it is the same as the input\n order.\n This method returns the order of the sequences intended by the\n MSA software.\n \n Returns\n -------\n order : ndarray, dtype=int\n The sequence order intended by the MSA software.\n \n Examples\n --------\n Align sequences and restore the original order:\n\n app = ClustalOmegaApp(sequences)\n app.start()\n app.join()\n alignment = app.get_alignment()\n order = app.get_alignment_order()\n alignment = alignment[:, order]\n \"\"\"\n return self._order\n \n def get_input_file_path(self):\n \"\"\"\n Get input file path (FASTA format).\n \n PROTECTED: Do not call from outside.\n \n Returns\n -------\n path : str\n Path of input file.\n \"\"\"\n return self._in_file.name\n \n def get_output_file_path(self):\n \"\"\"\n Get output file path (FASTA format).\n \n PROTECTED: Do not call from outside.\n \n Returns\n -------\n path : str\n Path of output file.\n \"\"\"\n return self._out_file.name\n \n def get_matrix_file_path(self):\n \"\"\"\n Get file path for custom substitution matrix.\n \n PROTECTED: Do not call from outside.\n \n Returns\n -------\n path : str or None\n Path of substitution matrix.\n None if no matrix was given.\n \"\"\"\n return self._matrix_file.name if self._matrix is not None else None\n \n def get_seqtype(self):\n \"\"\"\n Get the type of aligned sequences.\n\n When a custom sequence type (neither nucleotide nor protein)\n is mapped onto a protein sequence, the return value is also\n ``'protein'``.\n \n PROTECTED: Do not call from outside.\n \n Returns\n -------\n seqtype : {'nucleotide', 'protein'}\n Type of sequences to be aligned.\n \"\"\"\n return self._seqtype\n \n @staticmethod\n @abc.abstractmethod\n def supports_nucleotide():\n \"\"\"\n Check whether this class supports nucleotide sequences for\n alignment.\n\n Returns\n -------\n support : bool\n True, if the class has support, false otherwise.\n \n PROTECTED: Override when inheriting.\n \"\"\"\n pass\n \n @staticmethod\n @abc.abstractmethod\n def supports_protein():\n \"\"\"\n Check whether this class supports nucleotide sequences for\n alignment.\n\n Returns\n -------\n support : bool\n True, if the class has support, false otherwise.\n \n PROTECTED: Override when inheriting.\n \"\"\"\n pass\n \n @staticmethod\n @abc.abstractmethod\n def supports_custom_nucleotide_matrix():\n \"\"\"\n Check whether this class supports custom substitution matrices\n for protein sequence alignment.\n\n Returns\n -------\n support : bool\n True, if the class has support, false otherwise.\n \n PROTECTED: Override when inheriting.\n \"\"\"\n pass\n \n @staticmethod\n @abc.abstractmethod\n def supports_custom_protein_matrix():\n \"\"\"\n Check whether this class supports custom substitution matrices\n for nucleotide sequence alignment.\n\n Returns\n -------\n support : bool\n True, if the class has support, false otherwise.\n \n PROTECTED: Override when inheriting.\n \"\"\"\n pass\n \n @staticmethod\n def _map_sequences(sequences, alphabet):\n if len(alphabet) > len(ProteinSequence.alphabet):\n # Cannot map into a protein sequence if the alphabet\n # has more symbols\n raise TypeError(\n f\"The software cannot align sequences of type \"\n f\"{type(sequences[0]).__name__}: \"\n f\"Alphabet is too large to be converted into amino \"\n f\"acid alphabet\"\n )\n mapped_sequences = []\n for seq in sequences:\n # Mapping is done by simply taking over the sequence\n # code of the original sequence\n prot_seq = ProteinSequence()\n prot_seq.code = seq.code\n mapped_sequences.append(prot_seq)\n return mapped_sequences\n \n @staticmethod\n def _map_matrix(matrix):\n if matrix is None:\n raise TypeError(\n \"A substitution matrix must be provided for custom \"\n \"sequence types\"\n )\n if not matrix.is_symmetric():\n raise ValueError(\n \"A symmetric matrix is required for \"\n \"multiple sequence alignments\"\n )\n # Create a protein substitution matrix with the values taken\n # from the original matrix\n # All trailing symbols are filled with zeros\n old_length = len(matrix.get_alphabet1())\n new_length = len(ProteinSequence.alphabet)\n new_score_matrix = np.zeros((new_length, new_length))\n new_score_matrix[:old_length, :old_length] = matrix.score_matrix()\n return SubstitutionMatrix(\n ProteinSequence.alphabet, ProteinSequence.alphabet,\n new_score_matrix\n )\n \n @classmethod\n def align(cls, sequences, bin_path=None, matrix=None):\n \"\"\"\n Perform a multiple sequence alignment.\n \n This is a convenience function, that wraps the :class:`MSAApp`\n execution.\n \n Parameters\n ----------\n sequences : iterable object of Sequence\n The sequences to be aligned\n bin_path : str, optional\n Path of the MSA software binary. By default, the default\n path will be used.\n matrix : SubstitutionMatrix, optional\n A custom substitution matrix.\n \n Returns\n -------\n alignment : Alignment\n The global multiple sequence alignment.\n \"\"\"\n if bin_path is None:\n app = cls(sequences, matrix=matrix)\n else:\n app = cls(sequences, bin_path, matrix=matrix)\n app.start()\n app.join()\n return app.get_alignment()\n"
] | [
[
"numpy.zeros"
]
] |
rahul1990bhatia/Algorithms | [
"0459c5a23ba5ed23785c1db5d2e2cc050ff553cd"
] | [
"Graph/kruskal.py"
] | [
"import abc #abstract base class\nimport numpy as np\n\n\n#####Adjacency Matrix#########\n# You should use adjacency matrix for small densily connected graphs\n# space complexity O(V^2)\n\n############################################################\n#Base Class representation of class with all interface methods\n############################################################\n\nclass graph(abc.ABC):\n\n def __init__(self, numVertices, directed=False):\n self.numVertices = numVertices\n self.directed = directed\n\n @abc.abstractmethod\n def add_edge(self, v1, v2, weight):\n pass\n\n @abc.abstractmethod\n def get_adjacent_vertices(self, v):\n pass\n\n @abc.abstractmethod\n def get_indegree(self, v):\n pass\n\n @abc.abstractmethod\n def get_edge_weight(self, v1, v2):\n pass\n\n @abc.abstractmethod\n def display(self):\n pass\n\n\n\n#################################################################\n# Represent the graph as adjacency matrix\n#################################################################\n\nclass AdjacencyMatrixGraph(graph):\n\n def __init__(self, numVertices, directed=False):\n super(AdjacencyMatrixGraph,self).__init__(numVertices, directed)\n self.matrix = np.zeros((numVertices,numVertices))\n\n def add_edge(self, v1, v2, weight=1):\n if v1 >= self.numVertices or v2 >= self.numVertices or v1 < 0 or v2 < 0:\n raise ValueError(\"Vertices %d and %d are out of bounds\" %(v1, v2))\n\n if weight < 1:\n raise ValueError(\"An edge weight cannot be < 1\")\n\n self.matrix[v1][v2] = weight\n\n if self.directed == False:\n self.matrix[v2][v1] = weight\n\n def get_adjacent_vertices(self, v):\n if v < 0 or v >= self.numVertices:\n raise ValueError(\"Can't access vertex %d\" % v)\n\n adjacent_vertices = []\n\n for i in range(self.numVertices):\n if self.matrix[v][i] > 0:\n adjacent_vertices.append(i)\n\n return adjacent_vertices\n\n def get_indegree(self, v):\n if v < 0 or v >= self.numVertices:\n raise ValueError(\"Can't access vertex %d\" % v)\n\n indegree = 0\n for i in range(self.numVertices):\n if self.matrix[i][v] > 0:\n indegree = indegree + 1\n\n return indegree\n\n def get_edge_weight(self, v1, v2):\n if v1 >= self.numVertices or v2 >= self.numVertices or v1 < 0 or v2 < 0:\n raise ValueError(\"Vertices %d and %d are out of bounds\" %(v1, v2))\n\n return self.matrix[v1][v2]\n\n def display(self):\n for i in range(self.numVertices):\n for v in self.get_adjacent_vertices(i):\n print(i, \"-->\", v)\n\n\n\ndef spanning_tree(graph):\n priority_queue = {}\n\n for v in range(graph.numVertices):\n for neigbor in graph.get_adjacent_vertices(v):\n priority_queue[(v,neigbor)] = graph.get_edge_weight(v, neigbor)\n\n visited_vertices = set()\n spanning_tree_dict = {}\n\n for v in range(graph.numVertices):\n spanning_tree_dict[v] = set()\n\n\n num_edge = 0\n\n while len(priority_queue.keys()) > 0 and num_edge < graph.numVertices - 1:\n min_val = 10000000\n min_key = None\n for key,value in priority_queue.items():\n if value < min_val:\n min_val = value\n min_key = key\n priority_queue.pop(min_key)\n v1, v2 = min_key\n\n if v1 in spanning_tree_dict[v2]:\n continue\n\n vertex_pair = sorted([v1,v2])\n\n spanning_tree_dict[vertex_pair[0]].add(vertex_pair[1])\n\n if has_cycle(spanning_tree_dict):\n spanning_tree_dict[vertex_pair[0]].remove(vertex_pair[1])\n continue\n\n num_edge = num_edge + 1\n visited_vertices.add(v1)\n visited_vertices.add(v2)\n\n print(\"visited_vertices: \", visited_vertices)\n\n if len(visited_vertices) != graph.numVertices:\n print(\"Minimum spanning tree not found!\")\n else:\n print(\"Spanning tree found\")\n for key in spanning_tree_dict:\n for value in spanning_tree_dict[key]:\n print(key, \"-->\", value)\n\ndef has_cycle(spanning_tree):\n\n for source in spanning_tree:\n q = []\n q.append(source)\n\n visited_vertices = set()\n while len(q) > 0:\n vertex = q.pop(0)\n\n if vertex in visited_vertices:\n return True\n\n visited_vertices.add(vertex)\n q.extend(spanning_tree[vertex])\n\n return False\n\ng = AdjacencyMatrixGraph(8)\ng.add_edge(0, 1, 1)\ng.add_edge(1, 2, 2)\ng.add_edge(1, 3, 2)\ng.add_edge(2, 3, 2)\ng.add_edge(1, 4, 3)\ng.add_edge(3, 5, 1)\ng.add_edge(5, 4, 2)\ng.add_edge(3, 6, 1)\ng.add_edge(6, 7, 1)\ng.add_edge(7, 0, 1)\n\n\nspanning_tree(g)\n"
] | [
[
"numpy.zeros"
]
] |
youlu860612/IDAR | [
"30e711fafb17731905febc8e86cb51306456d023"
] | [
"src/rule/msl_v2_rule.py"
] | [
"# msl_v2_rule.py\n# Defines the rules for gold standard and ambiguous subjects for msl v2 labels\n#\n# Steven Lu 8/21/2019\n\nimport numpy as np\n\n\nclass MSLV2Rule(object):\n def __init__(self, retirement_count):\n if retirement_count % 2 == 0 and retirement_count < 3:\n raise ValueError('retirement count must be an odd number that is '\n 'greater than or equal to 3 to apply marjority '\n 'rule.')\n self._retirement_count = retirement_count\n\n def get_retirement_count(self):\n return self._retirement_count\n\n def set_retirement_count(self, retirement_couont):\n self._retirement_count = retirement_couont\n\n def is_gold_standard(self, subject):\n records = subject.get_records()\n\n # subject doesn't go to gold standard list if it is not retired yet.\n if len(records) < self._retirement_count:\n return False\n\n annotations = [r.get_annotation() for r in records]\n votes = np.unique(annotations)\n\n # all votes must agree to be considered gold standard subject\n if len(votes) == 1:\n return True\n else:\n return False\n\n def is_ambiguous(self, subject):\n records = subject.get_records()\n\n # subject doesn't go to ambiguous list if it is not retired yet.\n if len(records) < self._retirement_count:\n return False\n\n annotations = [r.get_annotation() for r in records]\n votes = np.unique(annotations)\n\n if len(votes) > 1:\n return True\n else:\n return False\n\n # extract gold standard annotation. In order to call this function, the\n # subject variable must be gold standard.\n def extract_gold_standard_annotation(self, subject):\n records = subject.get_records()\n annotations = [r.get_annotation() for r in records]\n\n if len(np.unique(annotations)) > 1:\n raise Exception('extract_gold_standard_annotation() should not be '\n 'used if the subject is not gold standard.')\n\n return annotations[0]\n"
] | [
[
"numpy.unique"
]
] |
egilbertson-ucsf/basenji | [
"52dc3dbd53aa12f482041007236e89ef94f48cb4",
"3d06ec0f5e12f7c5c4be8dfd17efdad3feefd68f"
] | [
"bin/basenji_data_hic_write.py",
"bin/sonnet_sad_multi.py"
] | [
"#!/usr/bin/env python\n# Copyright 2017 Calico LLC\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# https://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =========================================================================\nfrom optparse import OptionParser\nimport os\nimport sys\n\nimport h5py\nimport numpy as np\nimport pdb\nimport pysam\n\nfrom basenji_data import ModelSeq\nfrom basenji.dna_io import dna_1hot\n\nimport tensorflow as tf\n\n\"\"\"\nbasenji_data_write.py\n\nWrite TF Records for batches of model sequences.\n\"\"\"\n\n################################################################################\n# main\n################################################################################\ndef main():\n usage = 'usage: %prog [options] <fasta_file> <seqs_bed_file> <seqs_cov_dir> <tfr_file>'\n parser = OptionParser(usage)\n parser.add_option('-g', dest='genome_index',\n default=None, type='int', help='Genome index')\n parser.add_option('-s', dest='start_i',\n default=0, type='int',\n help='Sequence start index [Default: %default]')\n parser.add_option('-e', dest='end_i',\n default=None, type='int',\n help='Sequence end index [Default: %default]')\n parser.add_option('--te', dest='target_extend',\n default=None, type='int', help='Extend targets vector [Default: %default]')\n parser.add_option('--ts', dest='target_start',\n default=0, type='int', help='Write targets into vector starting at index [Default: %default')\n parser.add_option('-u', dest='umap_npy',\n help='Unmappable array numpy file')\n parser.add_option('--umap_set', dest='umap_set',\n default=None, type='float',\n help='Sequence distribution value to set unmappable positions to, eg 0.25.')\n (options, args) = parser.parse_args()\n\n if len(args) != 4:\n parser.error('Must provide input arguments.')\n else:\n fasta_file = args[0]\n seqs_bed_file = args[1]\n seqs_cov_dir = args[2]\n tfr_file = args[3]\n\n ################################################################\n # read model sequences\n\n model_seqs = []\n for line in open(seqs_bed_file):\n a = line.split()\n model_seqs.append(ModelSeq(a[0],int(a[1]),int(a[2]),None))\n\n if options.end_i is None:\n options.end_i = len(model_seqs)\n\n num_seqs = options.end_i - options.start_i\n\n ################################################################\n # determine sequence coverage files\n\n seqs_cov_files = []\n ti = 0\n if options.genome_index is None:\n seqs_cov_file = '%s/%d.h5' % (seqs_cov_dir, ti)\n else:\n seqs_cov_file = '%s/%d-%d.h5' % (seqs_cov_dir, options.genome_index, ti)\n while os.path.isfile(seqs_cov_file):\n seqs_cov_files.append(seqs_cov_file)\n ti += 1\n if options.genome_index is None:\n seqs_cov_file = '%s/%d.h5' % (seqs_cov_dir, ti)\n else:\n seqs_cov_file = '%s/%d-%d.h5' % (seqs_cov_dir, options.genome_index, ti)\n\n if len(seqs_cov_files) == 0:\n print('Sequence coverage files not found, e.g. %s' % seqs_cov_file, file=sys.stderr)\n exit(1)\n\n seq_pool_len = h5py.File(seqs_cov_files[0], 'r')['seqs_hic'].shape[1]\n num_targets = len(seqs_cov_files)\n\n ################################################################\n # read targets\n\n # extend targets\n num_targets_tfr = num_targets\n if options.target_extend is not None:\n assert(options.target_extend >= num_targets_tfr)\n num_targets_tfr = options.target_extend\n\n # initialize targets\n targets = np.zeros((num_seqs, seq_pool_len, seq_pool_len, num_targets_tfr), dtype='float16')\n\n # read each target\n for ti in range(num_targets):\n seqs_cov_open = h5py.File(seqs_cov_files[ti], 'r')\n tii = options.target_start + ti\n targets[:,:,:,tii] = seqs_cov_open['seqs_hic'][options.start_i:options.end_i,:,:]\n seqs_cov_open.close()\n\n ################################################################\n # modify unmappable\n\n if options.umap_npy is not None and options.umap_set is not None:\n unmap_mask = np.load(options.umap_npy)\n\n for si in range(num_seqs):\n msi = options.start_i + si\n\n # determine unmappable null value\n seq_target_null = np.percentile(targets[si], q=[100*options.umap_set], axis=0)[0]\n\n # set unmappable positions to null\n targets[si,unmap_mask[msi,:],unmap_mask[msi,:],:] = np.minimum(targets[si,unmap_mask[msi,:],unmap_mask[msi,:],:], seq_target_null)\n\n ################################################################\n # write TFRecords\n\n # open FASTA\n fasta_open = pysam.Fastafile(fasta_file)\n\n # define options\n tf_opts = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)\n\n with tf.python_io.TFRecordWriter(tfr_file, tf_opts) as writer:\n for si in range(num_seqs):\n msi = options.start_i + si\n mseq = model_seqs[msi]\n\n # read FASTA\n seq_dna = fasta_open.fetch(mseq.chr, mseq.start, mseq.end)\n\n # one hot code\n seq_1hot = dna_1hot(seq_dna)\n\n if options.genome_index is None:\n example = tf.train.Example(features=tf.train.Features(feature={\n 'genome': _int_feature(0),\n 'sequence': _bytes_feature(seq_1hot.flatten().tostring()),\n 'target': _bytes_feature(targets[si,:,:,:].flatten().tostring())}))\n else:\n example = tf.train.Example(features=tf.train.Features(feature={\n 'genome': _int_feature(options.genome_index),\n 'sequence': _bytes_feature(seq_1hot.flatten().tostring()),\n 'target': _bytes_feature(targets[si,:,:,:].flatten().tostring())}))\n\n writer.write(example.SerializeToString())\n\n fasta_open.close()\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef _int_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n################################################################################\n# __main__\n################################################################################\nif __name__ == '__main__':\n main()\n",
"#!/usr/bin/env python\n# Copyright 2017 Calico LLC\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# https://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =========================================================================\n\nfrom optparse import OptionParser\nimport glob\nimport os\nimport pickle\nimport shutil\nimport subprocess\nimport sys\n\nimport h5py\nimport numpy as np\n\nimport slurm\n\n\"\"\"\nsonnet_sad_multi.py\n\nCompute SNP expression difference scores for variants in a VCF file,\nusing multiple processes.\n\"\"\"\n\n################################################################################\n# main\n################################################################################\ndef main():\n usage = 'usage: %prog [options] <model> <vcf_file>'\n parser = OptionParser(usage)\n\n # sad\n parser.add_option('-b', dest='batch_size',\n default=4, type='int',\n help='Batch size [Default: %default]')\n parser.add_option('-c', dest='slice_center',\n default=None, type='int',\n help='Slice center positions [Default: %default]')\n parser.add_option('-f', dest='genome_fasta',\n default='%s/data/hg19.fa' % os.environ['BASENJIDIR'],\n help='Genome FASTA for sequences [Default: %default]')\n parser.add_option('-o',dest='out_dir',\n default='sad',\n help='Output directory for tables and plots [Default: %default]')\n parser.add_option('--pseudo', dest='log_pseudo',\n default=1, type='float',\n help='Log2 pseudocount [Default: %default]')\n parser.add_option('--rc', dest='rc',\n default=False, action='store_true',\n help='Average forward and reverse complement predictions [Default: %default]')\n parser.add_option('--shifts', dest='shifts',\n default='0', type='str',\n help='Ensemble prediction shifts [Default: %default]')\n parser.add_option('--species', dest='species',\n default='human')\n parser.add_option('--stats', dest='sad_stats',\n default='SAD',\n help='Comma-separated list of stats to save. [Default: %default]')\n parser.add_option('-t', dest='targets_file',\n default=None, type='str',\n help='File specifying target indexes and labels in table format')\n\n # multi\n parser.add_option('-e', dest='conda_env',\n default='tf2.6',\n help='Anaconda environment [Default: %default]')\n parser.add_option('--name', dest='name',\n default='sad', help='SLURM name prefix [Default: %default]')\n parser.add_option('--max_proc', dest='max_proc',\n default=None, type='int',\n help='Maximum concurrent processes [Default: %default]')\n parser.add_option('-p', dest='processes',\n default=None, type='int',\n help='Number of processes, passed by multi script')\n parser.add_option('-q', dest='queue',\n default='gtx1080ti',\n help='SLURM queue on which to run the jobs [Default: %default]')\n parser.add_option('-r', dest='restart',\n default=False, action='store_true',\n help='Restart a partially completed job [Default: %default]')\n (options, args) = parser.parse_args()\n\n if len(args) != 2:\n parser.error('Must provide model and VCF file')\n else:\n model_file = args[0]\n vcf_file = args[1]\n\n #######################################################\n # prep work\n\n # output directory\n if not options.restart:\n if os.path.isdir(options.out_dir):\n print('Please remove %s' % options.out_dir, file=sys.stderr)\n exit(1)\n os.mkdir(options.out_dir)\n\n # pickle options\n options_pkl_file = '%s/options.pkl' % options.out_dir\n options_pkl = open(options_pkl_file, 'wb')\n pickle.dump(options, options_pkl)\n options_pkl.close()\n\n #######################################################\n # launch worker threads\n jobs = []\n for pi in range(options.processes):\n if not options.restart or not job_completed(options, pi):\n cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'\n cmd += ' conda activate %s;' % options.conda_env\n\n cmd += ' sonnet_sad.py %s %s %d' % (\n options_pkl_file, ' '.join(args), pi)\n\n name = '%s_p%d' % (options.name, pi)\n outf = '%s/job%d.out' % (options.out_dir, pi)\n errf = '%s/job%d.err' % (options.out_dir, pi)\n\n j = slurm.Job(cmd, name,\n outf, errf,\n queue=options.queue, gpu=1,\n mem=22000, time='14-0:0:0')\n jobs.append(j)\n\n slurm.multi_run(jobs, max_proc=options.max_proc, verbose=True,\n launch_sleep=10, update_sleep=60)\n\n #######################################################\n # collect output\n\n collect_h5('sad.h5', options.out_dir, options.processes)\n\n # for pi in range(options.processes):\n # shutil.rmtree('%s/job%d' % (options.out_dir,pi))\n\n\ndef collect_h5(file_name, out_dir, num_procs):\n # count variants\n num_variants = 0\n for pi in range(num_procs):\n # open job\n job_h5_file = '%s/job%d/%s' % (out_dir, pi, file_name)\n job_h5_open = h5py.File(job_h5_file, 'r')\n num_variants += len(job_h5_open['snp'])\n job_h5_open.close()\n\n # initialize final h5\n final_h5_file = '%s/%s' % (out_dir, file_name)\n final_h5_open = h5py.File(final_h5_file, 'w')\n\n # keep dict for string values\n final_strings = {}\n\n job0_h5_file = '%s/job0/%s' % (out_dir, file_name)\n job0_h5_open = h5py.File(job0_h5_file, 'r')\n for key in job0_h5_open.keys():\n if key in ['percentiles', 'target_ids', 'target_labels']:\n # copy\n final_h5_open.create_dataset(key, data=job0_h5_open[key])\n\n elif key[-4:] == '_pct':\n values = np.zeros(job0_h5_open[key].shape)\n final_h5_open.create_dataset(key, data=values)\n\n elif job0_h5_open[key].dtype.char == 'S':\n final_strings[key] = []\n\n elif job0_h5_open[key].ndim == 1:\n final_h5_open.create_dataset(key, shape=(num_variants,), dtype=job0_h5_open[key].dtype)\n\n else:\n num_targets = job0_h5_open[key].shape[1]\n final_h5_open.create_dataset(key, shape=(num_variants, num_targets), dtype=job0_h5_open[key].dtype)\n\n job0_h5_open.close()\n\n # set values\n vi = 0\n for pi in range(num_procs):\n # open job\n job_h5_file = '%s/job%d/%s' % (out_dir, pi, file_name)\n job_h5_open = h5py.File(job_h5_file, 'r')\n\n # append to final\n for key in job_h5_open.keys():\n if key in ['percentiles', 'target_ids', 'target_labels']:\n # once is enough\n pass\n\n elif key[-4:] == '_pct':\n # average\n u_k1 = np.array(final_h5_open[key])\n x_k = np.array(job_h5_open[key])\n final_h5_open[key][:] = u_k1 + (x_k - u_k1) / (pi+1)\n\n else:\n if job_h5_open[key].dtype.char == 'S':\n final_strings[key] += list(job_h5_open[key])\n else:\n job_variants = job_h5_open[key].shape[0]\n final_h5_open[key][vi:vi+job_variants] = job_h5_open[key]\n\n vi += job_variants\n job_h5_open.close()\n\n # create final string datasets\n for key in final_strings:\n final_h5_open.create_dataset(key,\n data=np.array(final_strings[key], dtype='S'))\n\n final_h5_open.close()\n\n\ndef job_completed(options, pi):\n \"\"\"Check whether a specific job has generated its\n output file.\"\"\"\n out_file = '%s/job%d/sad.h5' % (options.out_dir, pi)\n return os.path.isfile(out_file) or os.path.isdir(out_file)\n\n\n################################################################################\n# __main__\n################################################################################\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.load",
"numpy.zeros",
"tensorflow.python_io.TFRecordOptions",
"tensorflow.train.Int64List",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.train.BytesList",
"numpy.percentile",
"numpy.minimum"
],
[
"numpy.array",
"numpy.zeros"
]
] |
nisheetpatel/DynamicResourceAllocator | [
"39d196e023e846d2e2ec1e6bccab57998352f7fa"
] | [
"scripts/resourceAllocator.py"
] | [
"import numpy as np\nimport pandas as pd\n\nclass GradientFreeResourceAllocator:\n def __init__(self, depth=3, lmda=1, n_restarts=10, n_trials=int(1e3),\\\n allocationMethod='variablePrecision'):\n # General parameters with default values\n self.lmda = lmda\n self.n_trials = n_trials\n self.n_restarts = n_restarts\n self.allocationMethod = allocationMethod\n \n # Fixed attributes\n self.costType = 'dkl'\n self.randomizeStartState = True\n\n # Parameters specific for Huys' planning task\n self.depth = depth\n self.searchBudget = list(range(depth,(depth * 2**depth)+1,depth))\n q = np.array([170,100, 10, 10, 60, 10, 50, 60, 70, 60,120,50,\\\n 140, 70, 30, 30, 30, 30, 20, 80, 30, 80, 80,70,\\\n 100, 20,-20, 0,-20, 50,-20, 50, 50,100,100,20,\\\n 120, 40,-40,-90, 0,-50, 0,-40, 70, 0,120, 0,\\\n 140, 20,-20,-70,-20,-70, 20,-20,-70,-20,-20,20])\n self.q = q[ (len(q) - 12*depth) : ]\n\n # Task environment\n from task import HuysTask\n self.env = HuysTask(depth=self.depth)\n\n \n # Methods\n @staticmethod\n def kl_mvn(m0, S0, m1, S1):\n \"\"\"\n Kullback-Liebler divergence from Gaussian pm,pv to Gaussian qm,qv.\n Diagonal covariances are assumed. Divergence is expressed in nats.\n \"\"\" \n # store inv diag covariance of S1 and diff between means\n N = m0.shape[0]\n iS1 = np.linalg.inv(S1)\n diff = m1 - m0\n\n # kl is made of three terms\n tr_term = np.trace(iS1 @ S0)\n det_term = np.log(np.linalg.det(S1)/np.linalg.det(S0))\n quad_term = diff.T @ np.linalg.inv(S1) @ diff \n return .5 * (tr_term + det_term + quad_term - N)\n\n \n def reshapeSigma(self, sigma):\n # Reshaping sigma to be the right size for Huys' planning task\n if self.allocationMethod=='variablePrecision': # memories can have varying certainty\n sigma = np.array(list(sigma) + [1]*12)\n elif self.allocationMethod=='equalPrecision': # all memories have same uncertainty\n sigma = [float(sigma)]*(12*(self.depth-1)) + [1]*12\n if self.costType=='ent':\n sigma = np.clip(sigma,1,100) # setting upper and lower bounds\n return sigma\n\n\n def cost(self, sigma):\n \"\"\"\n Returns the cost associated with a given resource allocation.\n \"\"\"\n sigma = self.reshapeSigma(sigma)\n sigma_base = [100]*(12*(self.depth-1)) + [1]*12\n return {\n 'ent': - self.lmda * np.sum(2*np.log(sigma)),\n 'dkl': self.lmda * self.kl_mvn(self.q, np.diag(np.square(sigma)), \\\n self.q, np.diag(np.square( sigma_base )))\n }.get(self.costType, 0) # 0 is default if x is not found\n\n\n def expectedRewards(self, sigma, searchBudget):\n \"\"\"\n Returns mean reward obtained across n_trials for given search budget\n and resource allocation vector, sigma (uncertainty across memories).\n \"\"\"\n # Unpacking environment variables\n T = self.env.transition_matrix\n R = self.env.reward_matrix\n n_states = len(T) # length of transition matrix\n sigma = self.reshapeSigma(sigma)\n\n def onehot(ind, size=n_states):\n a = np.zeros((size))\n a[ind] = 1\n return a\n\n if self.randomizeStartState:\n start_state = np.random.choice(np.arange(6), size=self.n_trials)\n\n rewardsObtained = []\n for s0 in start_state:\n # Draw samples from the q distribution once for each trial\n q = np.random.normal(self.q, sigma)\n \n # Define search parameters\n N_accesses = searchBudget\n paths = [[s0]]*N_accesses\n rewards = [0]*N_accesses\n \n # Define array to disallow re-exploring states (convoluted)\n N_visitsLeft = []\n for i in range(self.depth+1):\n N_visitsLeft += 2**i * [int(2**(self.depth-i))]\n N_visitsLeft = np.array(N_visitsLeft)\n aa = '1'\n\n # Run the trial according to tree policy (currently depth-first search)\n s = s0 # initially\n while N_accesses > 0:\n # Thompson sampling: returns binary action 0 or 1\n a = np.argmax(q[2*s:2*s+2])\n s1 = np.nonzero(np.dot(onehot(s), T))[0][a] # next state\n\n # disallow re-exploring paths (convoluted; not implemented in paper)\n aa += str(a)\n a_idx = int(aa,2) - 1\n if N_visitsLeft[a_idx] == 0:\n s1 = np.argmax(np.dot(onehot(s), T) - onehot(s1))\n aa = aa[:-1] + str(abs(int(aa[-1])-1)) # stupid hack \n a_idx = int(aa,2) - 1\n\n # Getting rewards and updating time left\n r = R[s,s1]\n N_visitsLeft[a_idx] -= 1\n N_accesses -= 1\n \n # store paths accessed in working memory\n for index, row in enumerate(paths):\n if row[-1] == s:\n paths[index] = paths[index] + [s1]\n rewards[index] += r\n break\n \n # setup for the next step\n if s1 < n_states-6:\n s = s1 # if state non-terminal, curr_state = s1 (next_state)\n else:\n s = s0 # if state terminal, curr_state = s0 (initial/root)\n aa = '1'\n\n # Keep only paths that are fully discovered\n count = sum(map(lambda x: len(x) == self.depth+1, paths))\n paths = paths[:count]\n rewards = rewards[:count]\n\n # reward obtained for the trial (and path chosen commented)\n reward = max(rewards) # bestPath = paths[np.argmax(rewards)]\n rewardsObtained += [reward]\n\n return np.mean(rewardsObtained)\n\n\n def optimize(self, searchBudget):\n \"\"\"\n Optimally allocate resources across memories for a given search budget.\n \"\"\"\n import itertools\n import multiprocessing as mp\n\n # Define an output queue\n output = mp.Queue()\n\n # Define the objective function\n def obj_func(sigma, searchBudget=searchBudget):\n expectedReward = self.expectedRewards(sigma=sigma, searchBudget=searchBudget)\n cost = self.cost(sigma=sigma)\n return (-expectedReward + cost)\n\n # Define the optimisation function with output sent to queue\n def optimize_local(output=output):\n # \n if self.allocationMethod=='variablePrecision':\n # importing CMAES libarary\n import cma\n\n # setting up parameters and running the optimization \n x0 = 50 + 15*np.random.randn(12*(self.depth-1))\n res = cma.fmin(obj_func, x0, 30, options={'bounds':[1,100],\\\n 'tolfun':1, 'maxfevals': int(1e4)})\n sigma_opt = res[0] \n\n elif self.allocationMethod=='equalPrecision':\n # importing Bayesian optimization libraries\n import GPy, GPyOpt\n from GPyOpt.methods import BayesianOptimization\n\n # setting up parameters and running the optimization\n kernel = GPy.kern.Matern52(input_dim=1)\n domain = [{'name': 'var_1', 'type': 'continuous', 'domain': (0,100)}]\n optimizer = BayesianOptimization(obj_func, domain=domain, kernel=kernel)\n optimizer.run_optimization(max_iter=50)\n sigma_opt = optimizer.X[optimizer.Y.argmin()]\n\n # appending the result (scalar sigma) to output queue\n output.put(sigma_opt)\n\n # Setup a list of processes that we want to run\n processes = [mp.Process(target=optimize_local) for x in range(self.n_restarts)]\n\n # Run processes\n for p in processes:\n p.start()\n\n # Exit the completed processes\n for p in processes:\n p.join()\n\n # Get process results from the output queue\n results = [output.get() for p in processes]\n\n return results\n\n\n def run_optimization(self, saveResults=True):\n \"\"\"\n Find optimal resource allocation for all possible search budgets.\n \"\"\"\n results = [] \n for budget in self.searchBudget:\n print(f\"\\n\\nOptimizing for search budget = {budget} \\n\")\n result = self.optimize(budget)\n results += [result]\n\n # Saving the results\n if saveResults:\n pass # decide on a folder structure and save shit\n return np.array(results)\n\n\n\nif __name__ == \"__main__\":\n # Defining the two models\n varPrecModel = GradientFreeResourceAllocator()\n eqPrecModel = GradientFreeResourceAllocator(allocationMethod='equalPrecision')\n\n # Optimizing memory allocation and plotting results\n varPrecModel.results = varPrecModel.run_optimization()\n\n # Plotting results\n import plotting\n f_m, ax_m = plotting.plot_table(varPrecModel, tableColour='mean')\n f_s, ax_s = plotting.plot_table(varPrecModel, tableColour='std')\n f,ax1,ax2 = plotting.plot_curves(varPrecModel)\n g,bx1 = plotting.plot_dprime(varPrecModel, table=True)\n cx = plotting.plot_dprime(varPrecModel, table=False)\n\n # Saving files\n # import pickle\n # fh = open('varPrecModel.pkl','wb')\n # pickle.dump(varPrecModel, fh, pickle.HIGHEST_PROTOCOL)\n # fh.close()"
] | [
[
"numpy.zeros",
"numpy.linalg.inv",
"numpy.linalg.det",
"numpy.random.normal",
"numpy.random.randn",
"numpy.argmax",
"numpy.arange",
"numpy.trace",
"numpy.clip",
"numpy.log",
"numpy.array",
"numpy.square",
"numpy.mean"
]
] |
bisounoursrulio/package_test | [
"905db1bf8398055a9569f8245b00568a5f74c4c4"
] | [
"tests/automatic_tests.py"
] | [
"from __future__ import division\nimport unittest\nimport numpy as np\nimport linvpy as lp\nimport generate_random\nfrom scipy.sparse.linalg import lsmr\nimport matplotlib.pyplot as plt\nimport optimal as opt\nimport mestimator_marta as marta\nimport random\nimport copy\nimport toolboxinverse as inv\n\nTESTING_ITERATIONS = 50\n# For a matrix to be ill-conditioned, its condition number must be equal to or\n# greather than ILL_CONDITION_CRITERIA\nILL_CONDITION_CRITERIA = 1000\n\nPLOT_INTERVAL = 100\n\nclass TestUM(unittest.TestCase):\n\n\t# preparing to test\n\tdef setUp(self):\n\t\t''' Setting up for the test '''\n\t\t#print 'FooTest:setUp_:end'\n\t \n\t# ending the test\n\tdef tearDown(self):\n\t\t'''Cleaning up after the test'''\n\t\t#print 'FooTest:tearDown_:begin'\n\t\t## do something...\n\t\t#print 'FooTest:tearDown_:end'\n \n\t# Tests least_squares() on random inputs from size 1 to TESTING_ITERATIONS\n\tdef test_least_squares(self):\n\t\tfor i in range(1,TESTING_ITERATIONS):\n\t\t\tA,y = generate_random.generate_random(i,i) \n\t\t\tself.assertEquals(\n\t\t\t\tlp.least_squares(A,y).all(), \n\t\t\t\tnp.linalg.lstsq(A,y)[0].all()\n\t\t\t\t)\n\n\n\t# Tests the ill-conditoned matrix generator\n\t# Checks that the condition number is greather than ILL_CONDITION_CRITERIA\n\tdef test_ill_conditioned_matrix(self):\n\t\tfor i in range(3,TESTING_ITERATIONS):\n\t\t\tself.assertTrue(\n\t\t\t\tnp.linalg.cond(\n\t\t\t\t\tgenerate_random.generate_random_ill_conditioned(i)[0]\n\t\t\t\t\t) > ILL_CONDITION_CRITERIA\n\t\t\t\t)\n\t\n\n\t# Tests Tikhonov regularization against the native Scipy function\n\tdef test_tikhonov(self):\n\t\tfor i in range(2,TESTING_ITERATIONS):\n\t\t\t# Generates random lambda\n\t\t\tLAMBDA = np.random.rand(1)\n\t\t\tA,y = generate_random.generate_random_ill_conditioned(i)\n\t\t\tself.assertEquals(\n\t\t\t\tlp.tikhonov_regularization(A,y,LAMBDA).all(), \n\t\t\t\tlsmr(A,y,LAMBDA)[0].all()\n\t\t\t\t)\n\n# Plots loss functions\ndef plot_loss_functions():\n\tplt.plot([lp.rho_huber(i) for i in range(-PLOT_INTERVAL,PLOT_INTERVAL)], label=\"rho_huber\")\n\tplt.plot([lp.psi_huber(i) for i in range(-PLOT_INTERVAL,PLOT_INTERVAL)], label=\"psi_huber\")\n\tplt.plot([lp.rho_bisquare(i) for i in range(-PLOT_INTERVAL,PLOT_INTERVAL)], label=\"rho_bisquare\")\n\tplt.plot([lp.psi_bisquare(i) for i in range(-PLOT_INTERVAL,PLOT_INTERVAL)], label=\"psi_bisquare\")\n\tplt.plot([lp.rho_cauchy(i) for i in range(-PLOT_INTERVAL,PLOT_INTERVAL)], label=\"rho_cauchy\")\n\tplt.plot([lp.psi_cauchy(i) for i in range(-PLOT_INTERVAL,PLOT_INTERVAL)], label=\"psi_cauchy\")\n\n\t# Puts a legend box above the plots\n\tplt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,\n\t ncol=2, mode=\"expand\", borderaxespad=0.)\n\n\t# Displays the plots\n\tplt.show()\n\n# Uncomment the following line to display plots :\n#plot_loss_functions()\n\nif __name__ == '__main__':\n\tunittest.main()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"numpy.linalg.lstsq",
"numpy.random.rand",
"scipy.sparse.linalg.lsmr"
]
] |
HiKapok/DAN | [
"fb726fad86b3f53d12c7bc5b833a705d7d885563"
] | [
"dataset/convert_tfrecords.py"
] | [
"# Copyright 2018 Changan Wang\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datetime import datetime\nimport os\nimport random\nimport sys\nimport threading\nimport xml.etree.ElementTree as xml_tree\n\nimport numpy as np\nimport six\nimport tensorflow as tf\n\nimport dataset_common\n\n'''How to organize your dataset folder:\n WIDERFACE Dataset/\n |->WIDER_train/\n | |->images/\n | |->...\n |->WIDER_val/\n | |->images/\n | |->...\n |->WIDER_test/\n | |->images/\n | |->...\n |->wider_face_split/\n | |->wider_face_train.mat\n | |->...\n'''\ntf.app.flags.DEFINE_string('dataset_directory', '/data1/home/changanwang/widerface',\n 'All datas directory')\ntf.app.flags.DEFINE_string('output_directory', '/data1/home/changanwang/widerface/tfrecords',\n 'Output data directory')\ntf.app.flags.DEFINE_string('train_split', 'WIDER_train',\n 'Name of the training data sub-directory')\ntf.app.flags.DEFINE_string('validation_split', 'WIDER_val',\n 'Name of the validation data sub-directory')\ntf.app.flags.DEFINE_integer('train_shards', 16,\n 'Number of shards in training TFRecord files.')\ntf.app.flags.DEFINE_integer('validation_shards', 8,\n 'Number of shards in validation TFRecord files.')\ntf.app.flags.DEFINE_integer('num_threads', 8,\n 'Number of threads to preprocess the images.')\nRANDOM_SEED = 180530\n\nFLAGS = tf.app.flags.FLAGS\n\ndef _int64_feature(value):\n \"\"\"Wrapper for inserting int64 features into Example proto.\"\"\"\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\n\ndef _float_feature(value):\n \"\"\"Wrapper for inserting float features into Example proto.\"\"\"\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\ndef _bytes_list_feature(value):\n \"\"\"Wrapper for inserting a list of bytes features into Example proto.\n \"\"\"\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))\n\ndef _bytes_feature(value):\n \"\"\"Wrapper for inserting bytes features into Example proto.\"\"\"\n if isinstance(value, six.string_types):\n value = six.binary_type(value, encoding='utf-8')\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef _convert_to_example(filename, image_name, image_buffer, bboxes, blur, expression, illumination, invalid, occlusion, pose, height, width):\n \"\"\"Build an Example proto for an example.\n\n Args:\n filename: string, path to an image file, e.g., '/path/to/example.JPG'\n image_buffer: string, JPEG encoding of RGB image\n bboxes: List of bounding boxes for each image.\n blur: List, clear->0, normal blur->1, heavy blur->2.\n expression: List, typical expression->0, exaggerate expression->1.\n illumination: List, normal illumination->0, extreme illumination->1.\n invalid: List, false->0(valid image), true->1(invalid image).\n occlusion: List, no occlusion->0, partial occlusion->1, heavy occlusion->2.\n pose: List, typical pose->0, atypical pose->1.\n height: integer, image height in pixels\n width: integer, image width in pixels\n Returns:\n Example proto\n \"\"\"\n ymin = []\n xmin = []\n ymax = []\n xmax = []\n for b in bboxes:\n assert len(b) == 4\n # pylint: disable=expression-not-assigned\n [l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]\n # pylint: enable=expression-not-assigned\n channels = 3\n image_format = 'JPEG'\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': _int64_feature(height),\n 'image/width': _int64_feature(width),\n 'image/channels': _int64_feature(channels),\n 'image/shape': _int64_feature([height, width, channels]),\n 'image/object/bbox/xmin': _float_feature(xmin),\n 'image/object/bbox/xmax': _float_feature(xmax),\n 'image/object/bbox/ymin': _float_feature(ymin),\n 'image/object/bbox/ymax': _float_feature(ymax),\n 'image/object/bbox/blur': _int64_feature(blur),\n 'image/object/bbox/expression': _int64_feature(expression),\n 'image/object/bbox/illumination': _int64_feature(illumination),\n 'image/object/bbox/invalid': _int64_feature(invalid),\n 'image/object/bbox/occlusion': _int64_feature(occlusion),\n 'image/object/bbox/pose': _int64_feature(pose),\n 'image/format': _bytes_feature(image_format),\n 'image/filename': _bytes_feature(image_name.encode('utf8')),\n 'image/encoded': _bytes_feature(image_buffer)}))\n return example\n\nclass ImageCoder(object):\n \"\"\"Helper class that provides TensorFlow image coding utilities.\"\"\"\n\n def __init__(self):\n # Create a single Session to run all image coding calls.\n self._sess = tf.Session()\n\n # Initializes function that converts PNG to JPEG data.\n self._png_data = tf.placeholder(dtype=tf.string)\n image = tf.image.decode_png(self._png_data, channels=3)\n self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)\n\n # Initializes function that converts CMYK JPEG data to RGB JPEG data.\n self._cmyk_data = tf.placeholder(dtype=tf.string)\n image = tf.image.decode_jpeg(self._cmyk_data, channels=0)\n self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)\n\n # Initializes function that decodes RGB JPEG data.\n self._decode_jpeg_data = tf.placeholder(dtype=tf.string)\n self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)\n\n def png_to_jpeg(self, image_data):\n return self._sess.run(self._png_to_jpeg,\n feed_dict={self._png_data: image_data})\n\n def cmyk_to_rgb(self, image_data):\n return self._sess.run(self._cmyk_to_rgb,\n feed_dict={self._cmyk_data: image_data})\n\n def decode_jpeg(self, image_data):\n image = self._sess.run(self._decode_jpeg,\n feed_dict={self._decode_jpeg_data: image_data})\n assert len(image.shape) == 3\n assert image.shape[2] == 3\n return image\n\n# final_scaless = 0.\n# count = 0.\ndef _process_image(filename, coder):\n \"\"\"Process a single image file.\n\n Args:\n filename: string, path to an image file e.g., '/path/to/example.JPG'.\n coder: instance of ImageCoder to provide TensorFlow image coding utils.\n Returns:\n image_buffer: string, JPEG encoding of RGB image.\n height: integer, image height in pixels.\n width: integer, image width in pixels.\n \"\"\"\n # Read the image file.\n with tf.gfile.FastGFile(filename, 'rb') as f:\n image_data = f.read()\n\n # Decode the RGB JPEG.\n image = coder.decode_jpeg(image_data)\n\n # Check that image converted to RGB\n assert len(image.shape) == 3\n height = image.shape[0]\n width = image.shape[1]\n assert image.shape[2] == 3\n\n # shorter_side = min(height, width)\n # longer_side = max(height, width)\n\n # target_shorter_side = np.random.choice([1024., 1200.], 2, p=[0.5, 0.5])[0]\n # target_longer = target_shorter_side * longer_side / shorter_side\n # if target_longer > 1600:\n # final_scale = 1600./ longer_side\n # else:\n # final_scale = target_shorter_side / shorter_side\n\n # global final_scaless\n # global count\n # final_scaless += final_scale\n # #print(final_scale)\n # count+=1.\n\n return image_data, height, width\n\ndef _find_image_bounding_boxes(cur_record, all_ground_truth, height, width):\n \"\"\"Find the bounding boxes for a given image file.\n\n Args:\n cur_record: list of strings; the first of which is the sub-directory of cur_record, the second is the image filename.\n all_ground_truth: all the annotations of the faces in this data set.\n height: the height of the current image.\n width: the width of the current image.\n Returns:\n bboxes: List of bounding boxes for each image.\n blur: List, clear->0, normal blur->1, heavy blur->2.\n expression: List, typical expression->0, exaggerate expression->1.\n illumination: List, normal illumination->0, extreme illumination->1.\n invalid: List, false->0(valid image), true->1(invalid image).\n occlusion: List, no occlusion->0, partial occlusion->1, heavy occlusion->2.\n pose: List, typical pose->0, atypical pose->1.\n \"\"\"\n all_bboxes = all_ground_truth[cur_record]\n\n bboxes = []\n blur = []\n expression = []\n illumination = []\n invalid = []\n occlusion = []\n pose = []\n\n for bbox in all_bboxes:\n bbox = bbox.split()\n _x1, _y1, _w, _h, _blur, _expression, _illumination, _invalid, _occlusion, _pose = [int(_.strip()) for _ in bbox]\n # _w = max(_w, 1)\n # _h = max(_h, 1)\n\n # ymin = _y1 * 1.\n # xmin = _x1 * 1.\n # ymax = (_y1 + _h - 1) * 1.\n # xmax = (_x1 + _w - 1) * 1.\n _w = max(_w, 0)\n _h = max(_h, 0)\n\n ymin = _y1 * 1.\n xmin = _x1 * 1.\n ymax = (_y1 + _h) * 1.\n xmax = (_x1 + _w) * 1.\n bboxes.append((ymin, xmin, ymax, xmax))\n blur.append(_blur)\n expression.append(_expression)\n illumination.append(_illumination)\n invalid.append(_invalid)\n occlusion.append(_occlusion)\n pose.append(_pose)\n\n return bboxes, blur, expression, illumination, invalid, occlusion, pose\n\ndef _process_image_files_batch(coder, thread_index, ranges, name, directory, all_records, num_shards, all_ground_truth):\n \"\"\"Processes and saves list of images as TFRecord in 1 thread.\n\n Args:\n coder: instance of ImageCoder to provide TensorFlow image coding utils.\n thread_index: integer, unique batch to run index is within [0, len(ranges)).\n ranges: list of pairs of integers specifying ranges of each batches to\n analyze in parallel.\n name: string, unique identifier specifying the data set\n directory: string; the path of all datas\n all_records: list of string tuples; the first of each tuple is the sub-directory of the record, the second is the image filename.\n num_shards: integer number of shards for this data set.\n all_ground_truth: all the annotations of the faces in this data set.\n \"\"\"\n # Each thread produces N shards where N = int(num_shards / num_threads).\n # For instance, if num_shards = 128, and the num_threads = 2, then the first\n # thread would produce shards [0, 64).\n num_threads = len(ranges)\n assert not num_shards % num_threads\n num_shards_per_batch = int(num_shards / num_threads)\n\n shard_ranges = np.linspace(ranges[thread_index][0],\n ranges[thread_index][1],\n num_shards_per_batch + 1).astype(int)\n num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]\n\n counter = 0\n for s in range(num_shards_per_batch):\n # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'\n shard = thread_index * num_shards_per_batch + s\n output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)\n output_file = os.path.join(FLAGS.output_directory, output_filename)\n writer = tf.python_io.TFRecordWriter(output_file)\n\n shard_counter = 0\n files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)\n for i in files_in_shard:\n cur_record = all_records[i]\n filename = os.path.join(directory, cur_record)\n\n image_buffer, height, width = _process_image(filename, coder)\n bboxes, blur, expression, illumination, invalid, occlusion, pose = _find_image_bounding_boxes(cur_record, all_ground_truth, height, width)\n\n example = _convert_to_example(filename, cur_record, image_buffer, bboxes, blur, expression, illumination, invalid, occlusion, pose, height, width)\n writer.write(example.SerializeToString())\n shard_counter += 1\n counter += 1\n\n if not counter % 1000:\n print('%s [thread %d]: Processed %d of %d images in thread batch.' %\n (datetime.now(), thread_index, counter, num_files_in_thread))\n sys.stdout.flush()\n\n writer.close()\n print('%s [thread %d]: Wrote %d images to %s' %\n (datetime.now(), thread_index, shard_counter, output_file))\n sys.stdout.flush()\n shard_counter = 0\n print('%s [thread %d]: Wrote %d images to %d shards.' %\n (datetime.now(), thread_index, counter, num_files_in_thread))\n sys.stdout.flush()\n\ndef _process_image_files(name, directory, all_records, num_shards, all_ground_truth):\n \"\"\"Process and save list of images as TFRecord of Example protos.\n\n Args:\n name: string, unique identifier specifying the data set\n directory: string; the path of all datas\n all_records: list of string tuples; the first of each tuple is the sub-directory of the record, the second is the image filename.\n num_shards: integer number of shards for this data set.\n all_ground_truth: all the annotations of the faces in this data set.\n \"\"\"\n # Break all images into batches with a [ranges[i][0], ranges[i][1]].\n spacing = np.linspace(0, len(all_records), FLAGS.num_threads + 1).astype(np.int)\n ranges = []\n threads = []\n for i in range(len(spacing) - 1):\n ranges.append([spacing[i], spacing[i + 1]])\n\n # Launch a thread for each batch.\n print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))\n sys.stdout.flush()\n\n # Create a mechanism for monitoring when all threads are finished.\n coord = tf.train.Coordinator()\n\n # Create a generic TensorFlow-based utility for converting all image codings.\n coder = ImageCoder()\n\n threads = []\n for thread_index in range(len(ranges)):\n args = (coder, thread_index, ranges, name, directory, all_records, num_shards, all_ground_truth)\n t = threading.Thread(target=_process_image_files_batch, args=args)\n t.start()\n threads.append(t)\n\n # Wait for all the threads to terminate.\n coord.join(threads)\n print('%s: Finished writing all %d images in data set.' %\n (datetime.now(), len(all_records)))\n sys.stdout.flush()\n\ndef _process_dataset(name, directory, split_name, num_shards, all_ground_truth):\n \"\"\"Process a complete data set and save it as a TFRecord.\n\n Args:\n name: string, unique identifier specifying the data set.\n directory: string, root path to the data set.\n split_name: sub-path to the data set.\n num_shards: integer number of shards for this data set.\n all_ground_truth: all the annotations of the faces in this data set.\n \"\"\"\n #all_records = []\n jpeg_file_path = os.path.join(directory, split_name, 'images')\n all_records = list(all_ground_truth.keys())\n #all_records = list(zip([jpeg_file_path] * len(jpegs), jpegs))\n\n shuffled_index = list(range(len(all_records)))\n random.seed(RANDOM_SEED)\n random.shuffle(shuffled_index)\n all_records = [all_records[i] for i in shuffled_index]\n _process_image_files(name, jpeg_file_path, all_records, num_shards, all_ground_truth)\n\ndef get_train_or_val_gt(anna_file):\n # assume there is not empty objects in all images\n all_images = {}\n # take the first non-empty record as imagename\n sts_stamp = -1\n cur_image = None\n with open(anna_file) as file:\n for line in file:\n line = line.strip()\n if line == '':\n continue\n elif sts_stamp < 0:\n # encounter a new image\n assert (('jpg' in line) or ('--' in line)), 'mismatch records in {}'.format(anna_file)\n all_images[line] = []\n cur_image = line\n sts_stamp = 0 # set stamp to read total objects at next line\n elif sts_stamp > 0:\n all_images[cur_image].append(line)\n sts_stamp = sts_stamp - 1\n if sts_stamp == 0:\n sts_stamp = -1 # wait for next image\n else:\n sts_stamp = int(line)\n return all_images\n\nall_images_list = {\n 'train': os.path.join(FLAGS.dataset_directory, 'wider_face_split', 'wider_face_train_bbx_gt.txt'),\n 'valid': os.path.join(FLAGS.dataset_directory, 'wider_face_split', 'wider_face_val_bbx_gt.txt')\n}\n\ndef main(unused_argv):\n assert not FLAGS.train_shards % FLAGS.num_threads, (\n 'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')\n assert not FLAGS.validation_shards % FLAGS.num_threads, (\n 'Please make the FLAGS.num_threads commensurate with '\n 'FLAGS.validation_shards')\n print('Saving results to %s' % FLAGS.output_directory)\n\n # Run it!\n os.makedirs(FLAGS.output_directory, exist_ok=True)\n _process_dataset('valid', FLAGS.dataset_directory, FLAGS.validation_split, FLAGS.validation_shards, get_train_or_val_gt(all_images_list['valid']))\n _process_dataset('train', FLAGS.dataset_directory, FLAGS.train_split, FLAGS.train_shards, get_train_or_val_gt(all_images_list['train']))\n #global final_scaless\n #global count\n #print(final_scaless/count)\nif __name__ == '__main__':\n tf.app.run()\n\n# python dataset/convert_tfrecords.py --dataset_directory=/data1/home/changanwang/widerface/ --output_directory=/data1/home/changanwang/widerface/tfrecords\n\n"
] | [
[
"tensorflow.placeholder",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.app.run",
"tensorflow.train.Int64List",
"tensorflow.train.FloatList",
"tensorflow.gfile.FastGFile",
"numpy.arange",
"tensorflow.image.encode_jpeg",
"tensorflow.Session",
"tensorflow.image.decode_jpeg",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.image.decode_png",
"tensorflow.train.BytesList",
"numpy.linspace",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.train.Coordinator"
]
] |
tongni1975/tensorflow | [
"3d452dbcf7e1a71ba449f6acf7342cdd1dd11859"
] | [
"tensorflow/python/ops/control_flow_ops.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Control Flow Operations.\n\nSee the [Control\nFlow](https://tensorflow.org/api_guides/python/control_flow_ops) guide.\n\"\"\"\n# pylint: disable=g-bad-name\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport collections\nimport functools\nimport os\n\nimport six\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.protobuf import control_flow_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import cond_v2_impl\nfrom tensorflow.python.ops import control_flow_util as util\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_control_flow_ops\nfrom tensorflow.python.ops import gen_data_flow_ops\nfrom tensorflow.python.ops import gen_logging_ops\nfrom tensorflow.python.ops import gen_resource_variable_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import tensor_array_ops\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import,undefined-variable\nfrom tensorflow.python.ops.gen_control_flow_ops import *\n# pylint: enable=wildcard-import\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_should_use\nfrom tensorflow.python.util.tf_export import tf_export\n\n# The while_v2 module.\n_while_v2 = None\n\nENABLE_COND_V2 = os.getenv(\"TF_ENABLE_COND_V2\", \"0\") != \"0\"\n# Note: Setting this to True is not sufficient to switch to the v2 while_loop.\n# Users must also import the while_v2 module to set the _while_v2 module\n# variable above. We do this to avoid a circular dependency:\n# control_flow_ops -> while_v2 -> gradients_impl -> control_flow_ops\n# A ValueError is raised in tf.while_loop if this is set to True and the\n# `_while_v2` module is not set.\nENABLE_WHILE_V2 = os.getenv(\"TF_ENABLE_WHILE_V2\", \"0\") != \"0\"\n\n\n# We override the 'tuple' for a control flow op, so we keep python's\n# existing 'tuple' for later use in this module.\n_basetuple = tuple\n\n\ndef _summarize_eager(tensor, summarize=None):\n \"\"\"Returns a summarized string representation of eager `tensor`.\n\n Args:\n tensor: EagerTensor to summarize\n summarize: Include these many first elements of `array`\n \"\"\"\n # reshape((-1,)) is the fastest way to get a flat array view\n if tensor._rank(): # pylint: disable=protected-access\n flat = tensor.numpy().reshape((-1,))\n lst = [str(x) for x in flat[:summarize]]\n if len(lst) < flat.size:\n lst.append(\"...\")\n else:\n # tensor.numpy() returns a scalar for zero dimensional arrays\n if summarize != 0:\n lst = [str(tensor.numpy())]\n else:\n lst = []\n\n return \", \".join(lst)\n\n\n# pylint: disable=protected-access\n\n\n# Assert and Print are special symbols in python, so we must\n# use an upper-case version of them.\n@tf_export(\"debugging.Assert\", \"Assert\")\n@tf_should_use.should_use_result\ndef Assert(condition, data, summarize=None, name=None):\n \"\"\"Asserts that the given condition is true.\n\n If `condition` evaluates to false, print the list of tensors in `data`.\n `summarize` determines how many entries of the tensors to print.\n\n NOTE: In graph mode, to ensure that Assert executes, one usually attaches\n a dependency:\n\n ```python\n # Ensure maximum element of x is smaller or equal to 1\n assert_op = tf.Assert(tf.less_equal(tf.reduce_max(x), 1.), [x])\n with tf.control_dependencies([assert_op]):\n ... code using x ...\n ```\n\n Args:\n condition: The condition to evaluate.\n data: The tensors to print out when condition is false.\n summarize: Print this many entries of each tensor.\n name: A name for this operation (optional).\n\n Returns:\n assert_op: An `Operation` that, when executed, raises a\n `tf.errors.InvalidArgumentError` if `condition` is not true.\n @compatibility{eager} returns None.\n\n Raises:\n @compatibility{eager} `tf.errors.InvalidArgumentError` if `condition`\n is not true\n \"\"\"\n if context.executing_eagerly():\n if not condition:\n xs = ops.convert_n_to_tensor(data)\n data_str = [_summarize_eager(x, summarize) for x in xs]\n raise errors.InvalidArgumentError(\n node_def=None,\n op=None,\n message=\"Expected '%s' to be true. Summarized data: %s\" %\n (condition, \"\\n\".join(data_str)))\n return\n\n with ops.name_scope(name, \"Assert\", [condition, data]) as name:\n xs = ops.convert_n_to_tensor(data)\n if all([x.dtype in {dtypes.string, dtypes.int32} for x in xs]):\n # As a simple heuristic, we assume that string and int32 are\n # on host to avoid the need to use cond. If it is not case,\n # we will pay the price copying the tensor to host memory.\n return gen_logging_ops._assert(condition, data, summarize, name=\"Assert\")\n else:\n condition = ops.convert_to_tensor(condition, name=\"Condition\")\n\n def true_assert():\n return gen_logging_ops._assert(\n condition, data, summarize, name=\"Assert\")\n\n guarded_assert = cond(condition, no_op, true_assert, name=\"AssertGuard\")\n if context.executing_eagerly():\n return\n return guarded_assert.op\n\n\ndef _Identity(data, name=None):\n \"\"\"Return a tensor with the same shape and contents as the input tensor.\n\n Args:\n data: A Tensor.\n name: A name for this operation (optional).\n\n Returns:\n A Tensor with the same type and value as the input Tensor.\n \"\"\"\n data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)\n if isinstance(data, ops.Tensor):\n if data.dtype._is_ref_dtype: # pylint: disable=protected-access\n return gen_array_ops.ref_identity(data, name=name)\n else:\n return array_ops.identity(data, name=name)\n else:\n if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):\n raise TypeError(\"Type %s not supported\" % type(data))\n values = _Identity(data.values, name=name)\n indices = array_ops.identity(data.indices, name=\"indices\")\n if isinstance(data, ops.IndexedSlices):\n dense_shape = data.dense_shape\n if dense_shape is not None:\n dense_shape = array_ops.identity(dense_shape, name=\"dense_shape\")\n return ops.IndexedSlices(values, indices, dense_shape)\n else:\n dense_shape = array_ops.identity(data.dense_shape, name=\"dense_shape\")\n return sparse_tensor.SparseTensor(indices, values, dense_shape)\n\n\ndef _NextIteration(data, name=None):\n data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)\n if isinstance(data, ops.Tensor):\n if data.dtype._is_ref_dtype: # pylint: disable=protected-access\n return ref_next_iteration(data, name=name)\n else:\n return next_iteration(data, name=name)\n else:\n if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):\n raise TypeError(\"Type %s not supported\" % type(data))\n values = _NextIteration(data.values, name=name)\n indices = next_iteration(data.indices, name=\"indices\")\n if isinstance(data, ops.IndexedSlices):\n dense_shape = data.dense_shape\n if dense_shape is not None:\n dense_shape = next_iteration(dense_shape, name=\"dense_shape\")\n return ops.IndexedSlices(values, indices, dense_shape)\n else:\n dense_shape = next_iteration(data.dense_shape, name=\"dense_shape\")\n return sparse_tensor.SparseTensor(indices, values, dense_shape)\n\n\ndef _Enter(data,\n frame_name,\n is_constant=False,\n parallel_iterations=10,\n use_ref=True,\n use_input_shape=True,\n name=None):\n \"\"\"Creates or finds a child frame, and makes `data` available to it.\n\n The unique `frame_name` is used by the `Executor` to identify frames. If\n `is_constant` is true, `data` is a constant in the child frame; otherwise\n it may be changed in the child frame. At most `parallel_iterations`\n iterations are run in parallel in the child frame.\n\n Args:\n data: The tensor to be made available to the child frame.\n frame_name: The name of the child frame.\n is_constant: If true, the output is constant within the child frame.\n parallel_iterations: The number of iterations allowed to run in parallel.\n use_ref: If true, use ref_enter if data is of ref type.\n name: A name for this operation (optional).\n\n Returns:\n The same tensor as `data`.\n \"\"\"\n data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)\n if isinstance(data, ops.Tensor):\n if data.dtype._is_ref_dtype and use_ref: # pylint: disable=protected-access\n result = gen_control_flow_ops.ref_enter(\n data, frame_name, is_constant, parallel_iterations, name=name)\n else:\n result = gen_control_flow_ops.enter(\n data, frame_name, is_constant, parallel_iterations, name=name)\n if use_input_shape:\n result.set_shape(data.get_shape())\n return result\n else:\n if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):\n raise TypeError(\"Type %s not supported\" % type(data))\n values = _Enter(\n data.values,\n frame_name,\n is_constant,\n parallel_iterations=parallel_iterations,\n use_input_shape=use_input_shape,\n name=name)\n indices = gen_control_flow_ops.enter(\n data.indices,\n frame_name,\n is_constant,\n parallel_iterations,\n name=\"indices\")\n if use_input_shape:\n indices.set_shape(data.indices.get_shape())\n if isinstance(data, ops.IndexedSlices):\n dense_shape = data.dense_shape\n if dense_shape is not None:\n dense_shape = gen_control_flow_ops.enter(\n dense_shape,\n frame_name,\n is_constant,\n parallel_iterations,\n name=\"dense_shape\")\n if use_input_shape:\n dense_shape.set_shape(data.dense_shape.get_shape())\n return ops.IndexedSlices(values, indices, dense_shape)\n else:\n dense_shape = gen_control_flow_ops.enter(\n data.dense_shape,\n frame_name,\n is_constant,\n parallel_iterations,\n name=\"dense_shape\")\n if use_input_shape:\n dense_shape.set_shape(data.dense_shape.get_shape())\n return sparse_tensor.SparseTensor(indices, values, dense_shape)\n\n\ndef exit(data, name=None): # pylint: disable=redefined-builtin\n \"\"\"Exits the current frame to its parent frame.\n\n Exit makes its input `data` available to the parent frame.\n\n Args:\n data: The tensor to be made available to the parent frame.\n name: A name for this operation (optional).\n\n Returns:\n The same tensor as `data`.\n \"\"\"\n data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)\n if isinstance(data, ops.Tensor):\n if data.dtype._is_ref_dtype: # pylint: disable=protected-access\n return gen_control_flow_ops.ref_exit(data, name)\n else:\n return gen_control_flow_ops._exit(data, name)\n else:\n if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):\n raise TypeError(\"Type %s not supported\" % type(data))\n values = exit(data.values, name=name)\n indices = gen_control_flow_ops._exit(data.indices, name=\"indices\")\n if isinstance(data, ops.IndexedSlices):\n dense_shape = data.dense_shape\n if dense_shape is not None:\n dense_shape = gen_control_flow_ops._exit(dense_shape, name)\n return ops.IndexedSlices(values, indices, dense_shape)\n else:\n dense_shape = gen_control_flow_ops._exit(data.dense_shape, name)\n return sparse_tensor.SparseTensor(indices, values, dense_shape)\n\n\ndef switch(data, pred, dtype=None, name=None):\n \"\"\"Forwards `data` to an output determined by `pred`.\n\n If `pred` is false, the `data` input is forwarded to the first output.\n Otherwise, the data goes to the second output.\n\n This op handles `Tensor`s and `IndexedSlices`.\n\n Args:\n data: The tensor to be forwarded to the appropriate output.\n pred: A scalar that specifies which output port will receive data.\n dtype: Optional element type for the returned tensor. If missing,\n the type is inferred from the type of `value`.\n name: A name for this operation (optional).\n\n Returns:\n `(output_false, output_true)`: If `pred` is true, data will be forwarded\n to `output_true`, otherwise it goes to `output_false`.\n \"\"\"\n with ops.name_scope(name, \"Switch\", [data, pred]) as name:\n data = ops.internal_convert_to_tensor_or_indexed_slices(\n data, dtype=dtype, name=\"data\", as_ref=True)\n pred = ops.convert_to_tensor(pred, name=\"pred\")\n if isinstance(data, ops.Tensor):\n return gen_control_flow_ops.switch(data, pred, name=name)\n else:\n if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):\n raise TypeError(\"Type %s not supported\" % type(data))\n val, ind = data.values, data.indices\n val_f, val_t = gen_control_flow_ops.switch(val, pred, name=name)\n ind_f, ind_t = gen_control_flow_ops.switch(ind, pred, name=\"indices\")\n if isinstance(data, ops.IndexedSlices):\n dense_shape = data.dense_shape\n if dense_shape is not None:\n dense_shape_f, dense_shape_t = gen_control_flow_ops.switch(\n dense_shape, pred, name=\"dense_shape\")\n else:\n dense_shape_f, dense_shape_t = None, None\n return (ops.IndexedSlices(val_f, ind_f, dense_shape_f),\n ops.IndexedSlices(val_t, ind_t, dense_shape_t))\n else:\n dense_shape = data.dense_shape\n dense_shape_f, dense_shape_t = gen_control_flow_ops.switch(\n data.dense_shape, pred, name=\"dense_shape\")\n return (sparse_tensor.SparseTensor(ind_f, val_f, dense_shape_f),\n sparse_tensor.SparseTensor(ind_t, val_t, dense_shape_t))\n\n\ndef _SwitchRefOrTensor(data, pred, name=\"Switch\"):\n \"\"\"Forwards `data` to an output determined by `pred`.\n\n If `pred` is false, the `data` input is forwarded to the first output.\n Otherwise, the data goes to the second output.\n\n This op handles `Tensor`s and `IndexedSlices`.\n\n Args:\n data: The tensor to be forwarded to the appropriate output.\n pred: A scalar that specifies which output port will receive data.\n name: A name for this operation (optional).\n\n Returns:\n `(output_false, output_true)`: If `pred` is true, data will be forwarded to\n `output_true`, otherwise it goes to `output_false`.\n\n Raises:\n TypeError: if data is not a Tensor or IndexedSlices\n \"\"\"\n data = ops.convert_to_tensor_or_indexed_slices(data, name=\"data\")\n # NOTE(vrv): ops.colocate_with(data, ignore_existing=True) below\n # addresses the following scenario.\n #\n # Assume you execute Optimizer.apply_gradients() in a branch of a cond().\n #\n # 1. The update op is created inside a `with ops.colocate(var):` block\n #\n # 2. Some tensor `data` is captured and a switch is created in a\n # `with ops.colocate_with(data):` block.\n #\n # with ops.colocate_with(var):\n # with ops.colocate_with(data):\n # op = ...\n #\n # var and data may be pinned to different devices, so we want to ops\n # created within ops.colocate_with(data) to ignore the existing stack.\n with ops.colocate_with(data, ignore_existing=True):\n if isinstance(data, ops.Tensor):\n if data.dtype._is_ref_dtype: # pylint: disable=protected-access\n return ref_switch(data, pred, name=name)\n return switch(data, pred, name=name)\n\n\ndef merge(inputs, name=None):\n \"\"\"Returns the value of an available element of `inputs`.\n\n This op tests each of the tensors in `inputs` in turn to determine if any of\n them is available. If it finds an available tensor, it returns it and its\n index in `inputs`.\n\n It is an error if more than one tensor in `inputs` is available. If no tensor\n in `inputs` is available, the returned tensor and index are not set.\n\n This op handles both `Tensor`s and `IndexedSlices`. If inputs has a mix of\n `Tensor`s and `IndexedSlices`, all inputs are converted to IndexedSlices\n before merging.\n\n Args:\n inputs: The input tensors, at most one of which is available.\n name: A name for this operation (optional).\n\n Returns:\n A tuple containing the chosen input tensor and its index in `inputs`.\n\n Raises:\n ValueError: If any of the inputs is None, or inputs are IndexedSlices and\n some but not all have a dense_shape property.\n \"\"\"\n if any([inp is None for inp in inputs]):\n raise ValueError(\"At least one of the merge inputs is None: %s\" % inputs)\n with ops.name_scope(name, \"Merge\", inputs) as name:\n inputs = [\n ops.internal_convert_to_tensor_or_indexed_slices(inp, as_ref=True)\n for inp in inputs\n ]\n if all([isinstance(v, ops.Tensor) for v in inputs]):\n if all([v.dtype._is_ref_dtype for v in inputs]): # pylint: disable=protected-access\n return gen_control_flow_ops.ref_merge(inputs, name)\n else:\n return gen_control_flow_ops.merge(inputs, name)\n elif all([isinstance(v, sparse_tensor.SparseTensor) for v in inputs]):\n # Only handle the case when all inputs are SparseTensor.\n values, _ = merge([inp.values for inp in inputs], name=name)\n indices, chosen_index = gen_control_flow_ops.merge(\n [inp.indices for inp in inputs], name=\"indices\")\n dense_shape, _ = gen_control_flow_ops.merge(\n [inp.dense_shape for inp in inputs], name=\"dense_shape\")\n return (sparse_tensor.SparseTensor(indices, values, dense_shape),\n chosen_index)\n else:\n # For now convert all the inputs as IndexedSlices.\n inputs = math_ops._as_indexed_slices_list(inputs, optimize=False)\n values, _ = merge([inp.values for inp in inputs], name=name)\n indices, chosen_index = gen_control_flow_ops.merge(\n [inp.indices for inp in inputs], name=\"indices\")\n if any(inp.dense_shape is not None for inp in inputs):\n if any(inp.dense_shape is None for inp in inputs):\n raise ValueError(\"Either all merged IndexedSlices must have a \"\n \"dense_shape, or none must have a dense_shape.\")\n dense_shape, _ = gen_control_flow_ops.merge(\n [inp.dense_shape for inp in inputs], name=\"dense_shape\")\n else:\n dense_shape = None\n return ops.IndexedSlices(values, indices, dense_shape), chosen_index\n\n\n# pylint: enable=protected-access\n\n\ndef _convert_tensorarray_to_flow(tensor_or_tensor_array):\n if isinstance(tensor_or_tensor_array, tensor_array_ops.TensorArray):\n return tensor_or_tensor_array.flow\n else:\n return tensor_or_tensor_array\n\n\ndef _make_tensor_array(ta, t_or_flow):\n # pylint: disable=protected-access\n new_ta = tensor_array_ops.TensorArray(\n dtype=ta.dtype,\n handle=ta.handle,\n flow=t_or_flow,\n infer_shape=ta._infer_shape,\n colocate_with_first_write_call=ta._colocate_with_first_write_call)\n new_ta._colocate_with = ta._colocate_with\n new_ta._element_shape = ta._element_shape\n # pylint: enable=protected-access\n return new_ta\n\n\ndef _convert_flows_to_tensorarrays(tensors_or_tensorarrays, tensors_or_flows):\n if len(tensors_or_tensorarrays) != len(tensors_or_flows):\n raise ValueError(\n \"Lengths of original Tensor list and new list do not match: %d vs. %d\" %\n (len(tensors_or_tensorarrays), len(tensors_or_flows)))\n return [\n _make_tensor_array(ta, t_or_flow)\n if isinstance(ta, tensor_array_ops.TensorArray) else t_or_flow\n for (ta, t_or_flow) in zip(tensors_or_tensorarrays, tensors_or_flows)\n ]\n\n\ndef _ShapeLessThanOrEqual(shape1, shape2):\n if shape2.dims is None:\n return True\n if shape1.ndims != shape2.ndims:\n return False\n for dim1, dim2 in zip(shape1.dims, shape2.dims):\n if dim2.value is not None and dim1.value != dim2.value:\n return False\n return True\n\n\ndef _SetShapeInvariants(input_vars, enter_vars, shapes):\n \"\"\"Set the shapes of the tensors in `enter_vars` to `shapes`.\n\n Args:\n input_vars: A list of tensors that are inputs to `enter_vars`.\n enter_vars: A list of tensors whose shapes will be set.\n shapes: A (possibly nested) list of shapes.\n\n Raises:\n ValueError: If any tensor in `enter_vars` has a less specific shape\n than its corresponding shape in `shapes`.\n \"\"\"\n if shapes is None:\n return\n flat_shapes = nest.flatten(shapes)\n if not all([isinstance(s, tensor_shape.TensorShape) for s in flat_shapes]):\n raise ValueError(\"`shapes` must be a (possibly nested) list of shapes.\")\n # Check that the shapes of the inputs are less than the shape invariants,\n # and set the shapes of `enter_vars` to the shape invariants.\n for inp, var, shape in zip(input_vars, enter_vars, flat_shapes):\n if isinstance(var, ops.Tensor):\n if not _ShapeLessThanOrEqual(inp.get_shape(), shape):\n raise ValueError(\n \"The shape invariant specified for %s is not compatible with \"\n \"the initial shape of the loop variable. It enters the loop \"\n \"with shape %s, but the specified shape invariant is %s.\" %\n (inp.name, inp.get_shape(), shape))\n var.set_shape(shape)\n else:\n if not isinstance(var, (ops.IndexedSlices, sparse_tensor.SparseTensor)):\n raise TypeError(\"Type %s not supported\" % type(var))\n if isinstance(var, ops.IndexedSlices):\n if not _ShapeLessThanOrEqual(inp.values.get_shape(), shape):\n raise ValueError(\n \"The shape invariant specified for %s is not compatible with \"\n \"the initial shape of the values tensor of this IndexedSlices. \"\n \"It enters the loop with shape %s, but the specified shape \"\n \"invariant is %s.\" % (inp.values.name, inp.values.get_shape(),\n shape))\n var.values.set_shape(shape)\n var.indices.set_shape(tensor_shape.TensorShape([shape[0]]))\n if var.dense_shape is not None:\n var.dense_shape.set_shape(tensor_shape.TensorShape([shape.ndims]))\n else:\n if not _ShapeLessThanOrEqual(inp.dense_shape.get_shape(), shape):\n raise ValueError(\n \"The shape invariant specified for %s is not compatible with \"\n \"the initial shape of the shape tensor of this SparseTensor. \"\n \"It enters the loop with shape %s, but the specified shape \"\n \"invariant is %s.\" % (inp.dense_shape.name,\n inp.dense_shape.get_shape(), shape))\n var.values.set_shape(tensor_shape.TensorShape([None]))\n var.indices.set_shape(tensor_shape.TensorShape([None, shape.ndims]))\n var.dense_shape.set_shape(shape)\n\n\ndef _EnforceShapeInvariant(merge_var, next_var):\n \"\"\"Check if the shapes of the loops variables are invariants.\n\n Args:\n merge_var: The list of tensors representing the initial values of the\n loop variables.\n next_var: The list of tensors representing the values of the loop\n variables after one loop iteration.\n\n Raises:\n ValueError: If any tensor in `merge_var` has a more specific shape than\n its correspnding tensor in `next_var`.\n \"\"\"\n if isinstance(merge_var, ops.Tensor):\n m_shape = merge_var.get_shape()\n n_shape = next_var.get_shape()\n if not _ShapeLessThanOrEqual(n_shape, m_shape):\n enter = merge_var.op.inputs[0].op\n assert util.IsLoopEnter(enter)\n input_t = enter.inputs[0]\n raise ValueError(\n \"Input tensor '%s' enters the loop with shape %s, but has shape %s \"\n \"after one iteration. To allow the shape to vary across iterations, \"\n \"use the `shape_invariants` argument of tf.while_loop to specify a \"\n \"less-specific shape.\" %\n (input_t.name, input_t.shape, n_shape))\n else:\n if not isinstance(merge_var,\n (ops.IndexedSlices, sparse_tensor.SparseTensor)):\n raise TypeError(\"Type %s not supported\" % type(merge_var))\n if isinstance(merge_var, ops.IndexedSlices):\n m_values_shape = merge_var.values.get_shape()\n m_indices_shape = merge_var.indices.get_shape()\n m_shape_shape = tensor_shape.TensorShape(None)\n if merge_var.dense_shape is not None:\n m_shape_shape = merge_var.dense_shape.get_shape()\n n_values_shape = next_var.values.get_shape()\n n_indices_shape = next_var.indices.get_shape()\n n_shape_shape = tensor_shape.TensorShape(None)\n if next_var.dense_shape is not None:\n n_shape_shape = next_var.dense_shape.get_shape()\n if (not _ShapeLessThanOrEqual(n_values_shape, m_values_shape) or\n not _ShapeLessThanOrEqual(n_indices_shape, m_indices_shape)):\n if not _ShapeLessThanOrEqual(n_values_shape, m_values_shape):\n raise ValueError(\n \"The shape for %s is not an invariant for the loop. It enters \"\n \"the loop with shape (%s, %s, %s), but has shape (%s, %s, %s) \"\n \"after one iteration. Provide shape invariants using either the \"\n \"`shape_invariants` argument of tf.while_loop or set_shape() \"\n \"on the loop variables.\" %\n (merge_var.name, m_values_shape, m_indices_shape, m_shape_shape,\n n_values_shape, n_indices_shape, n_shape_shape))\n else:\n m_values_shape = merge_var.values.get_shape()\n m_indices_shape = merge_var.indices.get_shape()\n m_shape_shape = merge_var.dense_shape.get_shape()\n n_values_shape = next_var.values.get_shape()\n n_indices_shape = next_var.indices.get_shape()\n n_shape_shape = next_var.dense_shape.get_shape()\n if (not _ShapeLessThanOrEqual(n_values_shape, m_values_shape) or\n not _ShapeLessThanOrEqual(n_indices_shape, m_indices_shape) or\n not _ShapeLessThanOrEqual(n_shape_shape, m_shape_shape)):\n raise ValueError(\n \"The shape for %s is not an invariant for the loop. It enters \"\n \"the loop with shape (%s, %s, %s), but has shape (%s, %s, %s) \"\n \"after one iteration. Provide shape invariants using either \"\n \"the `shape_invariants` argument of tf.while_loop or set_shape() \"\n \"on the loop variables.\" %\n (merge_var.name, m_values_shape, m_indices_shape, m_shape_shape,\n n_values_shape, n_indices_shape, n_shape_shape))\n\n\ndef _AddNextAndBackEdge(m, v, enforce_shape_invariant=True):\n \"\"\"Add NextIteration and back edge from v to m.\"\"\"\n if isinstance(m, ops.Tensor):\n v = ops.convert_to_tensor(v)\n v = _NextIteration(v)\n if enforce_shape_invariant:\n # Make sure the shapes of loop outputs are correct. We do this before\n # calling _update_input, which will raise a less-helpful error message if\n # the types don't match.\n # TODO(skyewm): call this for other cases below (needs testing)\n _EnforceShapeInvariant(m, v)\n m.op._update_input(1, v) # pylint: disable=protected-access\n elif isinstance(m, ops.IndexedSlices):\n # pylint: disable=protected-access\n v = math_ops._as_indexed_slices(v, optimize=False)\n v = _NextIteration(v)\n m.values.op._update_input(1, v.values)\n m.indices.op._update_input(1, v.indices)\n # pylint: enable=protected-access\n if m.dense_shape is not None:\n if v.dense_shape is None:\n raise ValueError(\"Must have dense shape: %s\" % v.name)\n m.dense_shape.op._update_input(1, v.dense_shape)\n elif isinstance(m, sparse_tensor.SparseTensor):\n if not isinstance(v, sparse_tensor.SparseTensor):\n raise ValueError(\"Must be a sparse tensor: %s\" % v.name)\n v = _NextIteration(v)\n # pylint: disable=protected-access\n m.values.op._update_input(1, v.values)\n m.indices.op._update_input(1, v.indices)\n m.dense_shape.op._update_input(1, v.dense_shape)\n # pylint: enable=protected-access\n else:\n raise TypeError(\"Type %s not supported\" % type(m))\n return v\n\n\ndef GetMaxSizeFromNestedMaximumIterations(value, while_ctxt):\n \"\"\"Calculate a max_size for use by stack ops inside an XLA while_loop.\n\n Args:\n value: The value inside the while_loop forward context. Used for printing\n error messages.\n while_ctxt: The forward context inside which value resides. This does\n not always match the value's immediate context, as `value` may be\n inside e.g. a cond context inside the while_loop.\n\n Returns:\n A tensor containing the `max_size` to feed to a Stack initializer.\n\n Raises:\n ValueError: If `value` is nested inside a `while_loop` that either\n lacks a `maximum_iterations` parameter, or the `maximum_iterations`\n parameter:\n\n - is inside a `while_loop` that is a parent of the calling context, and\n - cannot be evaluated at graph build time to a constant.\n \"\"\"\n value_name = value.name\n # curr_ctxt is the context that tf.gradients was called in.\n curr_ctxt = ops.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access\n\n curr_ctxt_name = curr_ctxt.name if curr_ctxt is not None else \"\"\n max_size = constant_op.constant(1)\n\n # Loop through all containing while contexts between value and the\n # current context, multiplying together each context's\n # max_iterations to get the maximum stack size.\n while while_ctxt not in (None, curr_ctxt):\n max_iter = while_ctxt.maximum_iterations\n if max_iter is None:\n raise ValueError(\n \"Cannot create a gradient accumulator for tensor '%s' inside \"\n \"XLA while_loop because maximum_iterations was not passed to \"\n \"the tf.while_loop call ('%s').\" % (value_name, while_ctxt.name))\n\n # pylint: disable=protected-access\n max_iter_ctxt = max_iter.op._get_control_flow_context()\n # pylint: enable=protected-access\n\n # If max_iter_ctxt (non-strictly) contains curr_ctxt, then it's OK to use.\n if util.IsContainingContext(curr_ctxt, max_iter_ctxt):\n max_size *= max_iter\n else:\n # We cannot use max_iter because it's defined in a nested while\n # or cond context, so will fail if we try to use it as input to\n # any ops in curr_ctxt (e.g. max_size or the final accumulator\n # stack). Attempt to get a constant value out to use instead.\n const_max_iter = tensor_util.constant_value(max_iter)\n if const_max_iter is None:\n raise ValueError(\n \"Cannot create a gradient accumulator for tensor '%s' inside XLA \"\n \"while_loop. maximum_iterations tensor '%s' for while_loop context \"\n \"'%s' must be statically known (e.g. a constant value or known \"\n \"shape dimension), or be defined at or outside the while loop \"\n \"context '%s' (currently defined in '%s').\" %\n (value_name, max_iter.name, while_ctxt.name, curr_ctxt_name,\n max_iter_ctxt.name))\n max_size *= const_max_iter\n\n # Find the next outer WhileContext (or stop if we reach the\n # tf.gradient's context).\n while_ctxt = util.GetContainingWhileContext(\n while_ctxt.outer_context, stop_ctxt=curr_ctxt)\n\n return max_size\n\n\nclass GradLoopState(object):\n \"\"\"The state used for constructing the gradient graph for a while loop.\n\n We create a GradLoopState for each while loop in forward and its\n corresponding while loop in backprop. This gives us access to both\n the forward and the backprop WhileContexts.\n\n During the construction of gradient graph, any time when we detect\n a forward value that is needed for backprop, we create a history\n accumulator and add it to `history_map`. Any time when we backprop\n a loop switch op (in _SwitchGrad), we add the grad merge op in\n `switch_map`.\n \"\"\"\n\n def __init__(self, forward_ctxt, outer_grad_state):\n # The grad loop state for the outer while loop.\n self._outer_grad_state = None\n\n # The while loop context for forward.\n self._forward_context = None\n\n # The loop counter added by AddForwardLoopCounter. It is the value\n # of the loop counter for the next iteration.\n self._forward_index = None\n\n # A sync op for forward.\n self._forward_sync = None\n\n # The while loop context for backprop.\n self._grad_context = None\n\n # The loop counter added by AddBackpropLoopCounter. It is the value\n # of the loop counter for the current iteration.\n self._grad_index = None\n\n # A sync op for backprop.\n self._grad_sync = None\n\n # Information needed by backprop.\n self._history_map = {}\n self._switch_map = {}\n self._unused_exits = []\n self._deferred_exits = []\n self._forward_loop_exits = list(forward_ctxt.loop_exits)\n self._pending_exits_count = len(forward_ctxt.loop_exits)\n\n self._outer_grad_state = outer_grad_state\n if outer_grad_state:\n outer_forward_ctxt = outer_grad_state.forward_context\n else:\n if not hasattr(forward_ctxt, \"outer_context\"):\n raise ValueError(\"Failed to call gradients on a while loop without\"\n \"properly serializing graph via MetaGraphDef\")\n outer_forward_ctxt = forward_ctxt.outer_context\n\n # Add the forward loop counter.\n with forward_ctxt._graph.as_default(): # pylint: disable=protected-access\n if outer_forward_ctxt:\n outer_forward_ctxt.Enter()\n cnt, forward_index = forward_ctxt.AddForwardLoopCounter(outer_grad_state)\n if outer_forward_ctxt:\n outer_forward_ctxt.Exit()\n self._forward_context = forward_ctxt\n self._forward_index = forward_index\n\n # Add the backprop WhileContext, and the backprop loop counter.\n if outer_grad_state:\n # This is a nested loop. Remember the iteration counts for each\n # execution of this inner loop.\n outer_forward_ctxt.AddName(cnt.name)\n history_cnt = outer_grad_state.AddForwardAccumulator(cnt)\n\n outer_grad_ctxt = outer_grad_state.grad_context\n outer_grad_ctxt.Enter()\n self._grad_context = WhileContext(\n maximum_iterations=forward_ctxt.maximum_iterations,\n parallel_iterations=forward_ctxt.parallel_iterations,\n back_prop=forward_ctxt.back_prop,\n swap_memory=forward_ctxt.swap_memory,\n name=forward_ctxt.name,\n grad_state=self)\n real_cnt = outer_grad_state.AddBackpropAccumulatedValue(history_cnt, cnt)\n self._grad_index = self._grad_context.AddBackpropLoopCounter(\n real_cnt, outer_grad_state)\n outer_grad_ctxt.Exit()\n else:\n if outer_forward_ctxt:\n outer_forward_ctxt.Enter()\n self._grad_context = WhileContext(\n maximum_iterations=forward_ctxt.maximum_iterations,\n parallel_iterations=forward_ctxt.parallel_iterations,\n back_prop=forward_ctxt.back_prop,\n swap_memory=forward_ctxt.swap_memory,\n name=forward_ctxt.name,\n grad_state=self)\n self._grad_index = self._grad_context.AddBackpropLoopCounter(\n cnt, outer_grad_state)\n if outer_forward_ctxt:\n outer_forward_ctxt.Exit()\n\n @property\n def outer_grad_state(self):\n \"\"\"The grad loop state for outer loop.\"\"\"\n return self._outer_grad_state\n\n @property\n def forward_context(self):\n \"\"\"The while loop context for forward.\"\"\"\n return self._forward_context\n\n @property\n def forward_index(self):\n \"\"\"The loop index of forward loop.\"\"\"\n return self._forward_index\n\n @property\n def forward_sync(self):\n \"\"\"A control trigger node for synchronization in the forward loop.\n\n One main use is to keep the push ops of a stack executed in the\n iteration order.\n \"\"\"\n if self._forward_sync is None:\n with ops.control_dependencies(None):\n self._forward_sync = control_trigger(name=\"f_sync\")\n self._forward_sync._set_control_flow_context(self._forward_context)\n self._forward_index.op._add_control_input(self._forward_sync)\n return self._forward_sync\n\n @property\n def grad_context(self):\n \"\"\"The corresponding WhileContext for gradient.\"\"\"\n return self._grad_context\n\n @property\n def grad_index(self):\n \"\"\"The loop index of backprop loop.\"\"\"\n return self._grad_index\n\n @property\n def grad_sync(self):\n \"\"\"A control trigger node for synchronization in the grad loop.\n\n One main use is to keep the pop ops of a stack executed in the\n iteration order.\n \"\"\"\n if self._grad_sync is None:\n with ops.control_dependencies(None):\n self._grad_sync = control_trigger(name=\"b_sync\")\n self._grad_sync._set_control_flow_context(self._grad_context)\n self._grad_index.op._add_control_input(self._grad_sync)\n if self._grad_context.outer_context:\n self._grad_context.outer_context.AddInnerOp(self._grad_sync)\n return self._grad_sync\n\n @property\n def history_map(self):\n \"\"\"The map that records all the tensors needed for backprop.\"\"\"\n return self._history_map\n\n @property\n def switch_map(self):\n \"\"\"The map that records all the Switch ops for the while loop.\"\"\"\n return self._switch_map\n\n @property\n def unused_exits(self):\n \"\"\"The list of \"unused\" exits.\"\"\"\n return self._unused_exits\n\n @property\n def deferred_exits(self):\n \"\"\"The list of \"deferred\" exits.\"\"\"\n return self._deferred_exits\n\n @property\n def forward_loop_exits(self):\n \"\"\"The list of exits of the forward loop.\"\"\"\n return self._forward_loop_exits\n\n @property\n def pending_exits_count(self):\n \"\"\"The number of exits we expect to see but haven't.\"\"\"\n return self._pending_exits_count\n\n @pending_exits_count.setter\n def pending_exits_count(self, cnt):\n \"\"\"Set the pending count to cnt.\"\"\"\n self._pending_exits_count = cnt\n\n def AddForwardAccumulator(self, value, dead_branch=False):\n \"\"\"Add an accumulator for each forward tensor that is needed in backprop.\n\n This is added to the forward loop at the first time when a tensor\n in the forward loop is used by backprop gradient computation loop.\n We create an accumulator that accumulates the value of tensor at each\n iteration. Called in the control flow context where gradients() is called.\n\n The pseudocode is:\n ```\n acc = stack();\n while (_pivot) {\n acc = stack_push(acc, value);\n }\n ```\n\n We make sure that the stack push op in one iteration is executed before\n next iteration. This is achieved by adding a control edge from\n `forward_index.op.inputs[0].op` to the push op, and another control\n edge from the push op to either `forward_index.op` or `forward_sync`.\n\n Args:\n value: The source tensor in forward that is to be accumulated.\n dead_branch: True iff the tensor is on a dead branch of a cond.\n\n Returns:\n The stack that contains the accumulated history of the tensor.\n\n Raises:\n TypeError: For internal errors involving the value condition context.\n ValueError: If `value` is inside a XLA scope and a valid max size\n for the stack can't be found.\n \"\"\"\n # curr_ctxt is the context that tf.gradients was called in.\n with self._forward_index.graph.as_default():\n curr_ctxt = ops.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access\n with ops.control_dependencies(None):\n if curr_ctxt:\n curr_ctxt.Enter()\n with ops.colocate_with(value):\n # We only need to pass maximum_iterations to the stack if\n # we're inside an XLA context.\n if not util.IsInXLAContext(value.op):\n max_size = constant_op.constant(-1, dtypes.int32)\n else:\n max_size = GetMaxSizeFromNestedMaximumIterations(\n value, self.forward_context)\n acc = gen_data_flow_ops.stack_v2(\n max_size=max_size, elem_type=value.dtype.base_dtype, name=\"f_acc\")\n if curr_ctxt:\n curr_ctxt.Exit()\n\n # Make acc available in the forward context.\n enter_acc = self.forward_context.AddValue(acc)\n\n # Add the stack_push op in the context of value.op.\n swap_enabled = self.forward_context.swap_memory\n value_ctxt = util.GetOutputContext(value.op)\n if value_ctxt == self.forward_context:\n # value is not nested in the forward context.\n self.forward_context.Enter()\n push = gen_data_flow_ops.stack_push_v2(\n enter_acc, value, swap_memory=swap_enabled)\n self.forward_context.Exit()\n # Protect stack push and order it before forward_index.\n self.forward_index.op._add_control_input(push.op)\n else:\n # value is in a cond context within the forward context.\n if not isinstance(value_ctxt, CondContext):\n raise TypeError(\"value_ctxt is not a CondContext: %s\" % value_ctxt)\n if dead_branch:\n # The special case for creating a zero tensor for a dead\n # branch of a switch. See ControlFlowState.ZerosLike().\n value_ctxt.outer_context.Enter()\n push = gen_data_flow_ops.stack_push_v2(\n enter_acc, value, swap_memory=swap_enabled)\n value_ctxt.outer_context.Exit()\n push.op._set_control_flow_context(value_ctxt)\n else:\n value_ctxt.Enter()\n push = gen_data_flow_ops.stack_push_v2(\n enter_acc, value, swap_memory=swap_enabled)\n value_ctxt.Exit()\n # Protect stack push and order it before forward_sync.\n self.forward_sync._add_control_input(push.op)\n # Order stack push after the successor of forward_index\n add_op = self.forward_index.op.inputs[0].op\n push.op._add_control_input(add_op)\n return acc\n\n def AddBackpropAccumulatedValue(self, history_value, value,\n dead_branch=False):\n \"\"\"Add the getter for an accumulated value in the grad context.\n\n This is added to the backprop loop. Called in the grad context to\n get the value of an accumulated value. The stack pop op must be guarded\n by the pred of the controlling cond.\n\n Args:\n history_value: The history (a stack) of a value.\n value: The value that is pushed onto the stack.\n dead_branch: True iff the tensor is on a dead branch of a cond.\n\n Returns:\n The current value (the top of the stack).\n \"\"\"\n history_ctxt = history_value.op._get_control_flow_context()\n # Find the cond context that controls history_value if any.\n cond_ctxt = None\n value_ctxt = value.op._get_control_flow_context()\n while value_ctxt and value_ctxt != history_ctxt:\n if isinstance(value_ctxt, CondContext):\n cond_ctxt = value_ctxt\n break\n value_ctxt = value_ctxt.outer_context\n with ops.control_dependencies(None):\n self.grad_context.Enter()\n if cond_ctxt:\n # Guard stack pop with a switch if it is controlled by a cond.\n grad_state = self\n pred = None\n while pred is None and grad_state:\n pred = grad_state.history_map.get(cond_ctxt.pred.name)\n grad_state = grad_state.outer_grad_state\n if pred is None:\n pred = cond_ctxt.pred\n branch = (1 - cond_ctxt.branch) if dead_branch else cond_ctxt.branch\n history_value = _SwitchRefOrTensor(history_value, pred)[branch]\n pop = gen_data_flow_ops.stack_pop_v2(history_value,\n value.dtype.base_dtype)\n pop.set_shape(value.get_shape())\n self.grad_context.Exit()\n parallel_iterations = self.grad_context.parallel_iterations\n if parallel_iterations > 1:\n # All pops are ordered after pivot_for_body and before grad_sync.\n self.grad_sync._add_control_input(pop.op)\n return pop\n\n def GetRealValue(self, value):\n \"\"\"Get the real value of `value`.\n\n If backprop \"uses\" a value produced by forward inference, an accumulator\n is added in the forward loop to accumulate its values. We use the\n accumulated value. This method must be called in the grad loop context.\n `value` must be in forward and needed for backprop.\n\n Args:\n value: A tensor to be captured.\n\n Returns:\n The same tensor obtained from the saved history.\n \"\"\"\n assert value.op.type not in [\"Variable\", \"VariableV2\"]\n real_value = self._history_map.get(value.name)\n if real_value is None:\n cur_value = value\n cur_grad_state = self\n while True:\n enter_op = util.GetLoopConstantEnter(cur_value)\n if enter_op:\n # Special case: cur_value comes from a constant Enter node.\n cur_value = enter_op.inputs[0]\n cur_grad_state = cur_grad_state.outer_grad_state\n if cur_grad_state is None:\n # We are now outside all nested loops for this gradient(),\n # so `value` is a loop invariant and there is no need to\n # save the history of value. Just make cur_value to enter\n # the right control flow context.\n real_value = self._grad_context.AddValue(cur_value)\n break\n elif constant_op.is_constant(cur_value):\n # If the value to be forwarded is a constant, clone the constant in\n # the gradient loop rather than using a stack.\n # TODO(phawkins): consider hoisting the constant out of the loop\n # instead.\n real_value = constant_op.constant(\n tensor_util.constant_value(cur_value), dtype=cur_value.dtype)\n break\n else:\n # Record the history of this value in forward_ctxt.\n self._grad_context.Exit()\n history_value = cur_grad_state.AddForwardAccumulator(cur_value)\n self._grad_context.Enter()\n break\n\n if real_value is None:\n # Add the stack pop op in the grad context.\n real_value = cur_grad_state.AddBackpropAccumulatedValue(\n history_value, cur_value)\n if cur_grad_state != self:\n real_value = self._grad_context.AddValue(real_value)\n self._history_map[value.name] = real_value\n return real_value\n\n\ndef _GetWhileContext(op):\n \"\"\"Get the WhileContext to which this op belongs.\"\"\"\n ctxt = op._get_control_flow_context()\n if ctxt:\n ctxt = ctxt.GetWhileContext()\n return ctxt\n\n\nclass ControlFlowState(object):\n \"\"\"Maintain the mapping from the loops to their grad states.\"\"\"\n\n def __init__(self):\n self._map = {} # maps forward loop context to GradLoopState\n\n def GetGradState(self, op, before):\n \"\"\"Return the grad state for this op if it's in a forward loop context.\"\"\"\n if before and util.IsLoopExit(op):\n forward_ctxt = op._get_control_flow_context()\n forward_ctxt = forward_ctxt.outer_context\n if forward_ctxt:\n forward_ctxt = forward_ctxt.GetWhileContext()\n else:\n forward_ctxt = _GetWhileContext(op)\n if forward_ctxt:\n return self._map.get(forward_ctxt)\n return None\n\n def ProcessUnusedLoopExits(self, pending_count, to_ops_set):\n \"\"\"Process all the \"unused\" loop exits.\n\n The \"unused\" exits of the loops are added to `unused_exits`. An exit is\n unused if its pending_count is 0. If there is an exit with real gradient,\n all these deferred exits will enter the backprop loop with zero gradient.\n Otherwise, they will enter the backprop loop with None. As an example,\n people often write:\n\n ```python\n v1, _ = tf.while_loop(p, b, [x1, x2])\n result = gradients(v1, x1)\n ```\n\n The exit node for x2 is not included by the betweenness analysis. But we\n need to backprop x2 if x2 is involved in computing v1.\n\n Args:\n pending_count: The number of backprop inputs for every op.\n to_ops_set: The set of ops for ys in gradients(ys, xs)\n\n Returns:\n The set of unused loop exits that we know at this point we need\n to backprop.\n \"\"\"\n loop_exits = []\n for grad_state in self._map.values():\n for y in grad_state.forward_loop_exits:\n if pending_count[y.op] == 0:\n grad_state.pending_exits_count -= 1\n if y.op not in to_ops_set:\n grad_state.unused_exits.append(y)\n if grad_state.pending_exits_count == 0:\n loop_exits.extend(grad_state.unused_exits)\n # Need to include Enters in backprop for higher-order gradients.\n for y in grad_state.forward_context.loop_enters:\n if pending_count[y.op] == 0:\n pending_count[y.op] = 1\n return loop_exits\n\n def EnterGradWhileContext(self, op, before):\n \"\"\"Enter the WhileContext for gradient computation.\"\"\"\n grad_state = self.GetGradState(op, before)\n if grad_state:\n grad_state.grad_context.Enter()\n\n def ExitGradWhileContext(self, op, before):\n \"\"\"Exit the WhileContext for gradient computation.\"\"\"\n grad_state = self.GetGradState(op, before)\n if grad_state:\n grad_state.grad_context.Exit()\n\n def AddWhileContext(self, op, between_op_list, between_ops):\n \"\"\"Add the grad state for the while loop that op belongs to.\n\n Note that op is an Exit, and this method must be called in\n the control flow context where gradients() is called.\n\n Note that this method modifies `between_op_list` and `between_ops`.\n \"\"\"\n forward_ctxt = _GetWhileContext(op)\n grad_state = self._map.get(forward_ctxt)\n if grad_state is None:\n # This is a new while loop so create a grad state for it.\n outer_forward_ctxt = forward_ctxt.outer_context\n if outer_forward_ctxt:\n outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()\n outer_grad_state = None\n if outer_forward_ctxt:\n outer_grad_state = self._map.get(outer_forward_ctxt)\n grad_state = GradLoopState(forward_ctxt, outer_grad_state)\n self._map[forward_ctxt] = grad_state\n\n # We need to include all exits of a loop for backprop.\n for loop_exit in grad_state.forward_loop_exits:\n if loop_exit.op not in between_ops:\n between_ops.add(loop_exit.op)\n between_op_list.append(loop_exit.op)\n\n def ZerosLikeForExit(self, val):\n \"\"\"Create zeros_like gradient for a loop exit.\n\n If the result of a loop variable is not used but is involved in\n computing the result of some needed loop variable, we create a\n zero-valued tensor that is fed as gradient for the Exit node of that\n loop variable. Note that val.op is an Exit, and this method must be\n called in the control flow context where gradients() is called.\n\n Args:\n val: The output tensor of an Exit op.\n\n Returns:\n A zero tensor of the same shape of val.\n \"\"\"\n val_shape = val.get_shape()\n forward_ctxt = val.op._get_control_flow_context()\n outer_forward_ctxt = forward_ctxt.outer_context\n if outer_forward_ctxt:\n outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()\n outer_grad_state = None\n if outer_forward_ctxt:\n outer_grad_state = self._map.get(outer_forward_ctxt)\n if outer_grad_state:\n # This is a nested loop.\n if val_shape.is_fully_defined():\n # If the shape is known statically, just create a zero tensor\n # with the right shape in the right context.\n outer_grad_state.grad_context.Enter()\n result = array_ops.zeros(val_shape.dims, val.dtype)\n outer_grad_state.grad_context.Exit()\n else:\n # Only the shape of value is needed for backprop.\n forward_ctxt.outer_context.Enter()\n shape = array_ops.shape_internal(val, optimize=False)\n forward_ctxt.outer_context.Exit()\n # Save the shape to a stack.\n history_shape = outer_grad_state.AddForwardAccumulator(shape)\n # Get the shape back from the stack.\n outer_grad_ctxt = outer_grad_state.grad_context\n outer_grad_ctxt.Enter()\n real_shape = outer_grad_state.AddBackpropAccumulatedValue(\n history_shape, shape)\n result = array_ops.zeros(real_shape, val.dtype)\n outer_grad_ctxt.Exit()\n else:\n # This is not a nested loop.\n if val_shape.is_fully_defined():\n # If the shape is known statically, just create a zero tensor\n # with the right shape.\n result = array_ops.zeros(val_shape.dims, val.dtype)\n else:\n result = array_ops.zeros_like(val, optimize=False)\n return result\n\n def ZerosLike(self, op, index):\n \"\"\"Create zeros_like for the specified output of an op.\n\n If op is in a while loop that is part of gradients(), this method\n must be called in its grad loop context.\n\n Args:\n op: A tensorflow operation.\n index: the index for a specific output of the op.\n\n Returns:\n A zero tensor of the same shape of op.outputs[index].\n \"\"\"\n if util.IsLoopSwitch(op):\n return None\n dead_branch = util.IsSwitch(op)\n forward_ctxt = _GetWhileContext(op)\n grad_state = self._map.get(forward_ctxt)\n if grad_state is None:\n # op is not in a while loop that is part of gradients().\n return ZerosLikeOutsideLoop(op, index)\n op_ctxt = op._get_control_flow_context()\n val = ops.convert_to_tensor(op.outputs[index], name=\"tensor\")\n shape = val.get_shape()\n if shape.is_fully_defined():\n # If the shape is known statically, just create a zero tensor with\n # the right shape in the grad loop context.\n result = constant_op.constant(0, shape=shape.dims, dtype=val.dtype)\n if dead_branch:\n # op is a cond switch. Guard the zero tensor with a switch.\n pred = grad_state.history_map.get(op_ctxt.pred.name)\n branch = op_ctxt.branch\n result = _SwitchRefOrTensor(result, pred)[1 - branch]\n else:\n # Unknown shape so keep a history of the shape at runtime.\n if dead_branch:\n # Need to add a special switch to guard the value.\n pred = op_ctxt.pred\n branch = op_ctxt.branch\n op_ctxt.outer_context.Enter()\n val = _SwitchRefOrTensor(op.inputs[0], pred)[1 - branch]\n zeros_shape = array_ops.shape_internal(val, optimize=False)\n op_ctxt.outer_context.Exit()\n val.op._set_control_flow_context(op_ctxt)\n zeros_shape.op._set_control_flow_context(op_ctxt)\n else:\n op_ctxt.Enter()\n zeros_shape = array_ops.shape_internal(val, optimize=False)\n op_ctxt.Exit()\n\n # Add forward accumulator for shape.\n grad_state.grad_context.Exit()\n history_zeros_shape = grad_state.AddForwardAccumulator(\n zeros_shape, dead_branch=dead_branch)\n grad_state.grad_context.Enter()\n\n # Create a zero tensor with the right shape.\n shape = grad_state.AddBackpropAccumulatedValue(history_zeros_shape,\n zeros_shape, dead_branch)\n result = array_ops.zeros(shape, val.dtype)\n return result\n\n def PostProcessing(self):\n \"\"\"Perform postprocessing at the end of gradients().\n\n We have created the gradient graph at this point. So this function\n can be used to perform any postprocessing on the gradient graph.\n We currently perform the following postprocessing:\n 1. Patch the gradient graph if the output of a loop variable\n doesn't depend on its input.\n \"\"\"\n for _, grad_state in self._map.items():\n for _, b_merge in grad_state.switch_map.items():\n if b_merge.op.inputs[0] == b_merge.op.inputs[1]:\n # The value of this loop variable at iteration i+1 doesn't\n # depend on its value at iteration i. So use zeros as the\n # gradients for all iterations > 0.\n dtype = b_merge.op.inputs[0].dtype\n shape = b_merge.op.inputs[0].get_shape()\n # pylint: disable=protected-access\n if shape.is_fully_defined():\n grad_state.grad_context.Enter()\n # Create a zeros and use it for iterations > 0.\n grad_val = constant_op.constant(0, dtype=dtype, shape=shape)\n next_grad_val = _NextIteration(grad_val)\n grad_state.grad_context.Exit()\n else:\n # Create a zeros in the outer grad context.\n outer_grad_ctxt = grad_state.grad_context.outer_context\n if outer_grad_ctxt:\n outer_grad_ctxt.Enter()\n enter_grad_op = b_merge.op.inputs[0].op\n enter_grad = enter_grad_op.inputs[0]\n grad_shape = array_ops.shape_internal(enter_grad, optimize=False)\n grad_val = array_ops.zeros(grad_shape)\n if outer_grad_ctxt:\n outer_grad_ctxt.Exit()\n # Use the zeros for iterations > 0.\n grad_state.grad_context.Enter()\n next_grad_val = _NextIteration(grad_val)\n grad_state.grad_context.Exit()\n b_merge.op._update_input(1, next_grad_val)\n # pylint: enable=protected-access\n\n\ndef MaybeCreateControlFlowState(between_op_list, between_ops,\n colocate_gradients_with_ops):\n \"\"\"Create the state for all the while loops involved in one gradients().\n\n We create a ControlFlowState when there are while loops involved in\n gradients(). In gradients(), control flow logic is only invoked when\n the ControlFlowState is not None.\n\n Note that this method modifies `between_op_list` and `between_ops`.\n \"\"\"\n loop_state = None\n for op in between_op_list:\n if util.IsLoopExit(op):\n if loop_state is None:\n loop_state = ControlFlowState()\n if colocate_gradients_with_ops:\n with ops.colocate_with(op):\n loop_state.AddWhileContext(op, between_op_list, between_ops)\n else:\n loop_state.AddWhileContext(op, between_op_list, between_ops)\n return loop_state\n\n\ndef ZerosLikeOutsideLoop(op, index):\n \"\"\"Create zeros_like for the specified output of an op.\"\"\"\n val = op.outputs[index]\n if not util.IsSwitch(op):\n if val.dtype == dtypes.resource:\n return array_ops.zeros(gen_resource_variable_ops.variable_shape(val))\n return array_ops.zeros_like(val, optimize=False)\n else:\n op_ctxt = op._get_control_flow_context()\n if op_ctxt:\n # We are in a cond context. Use a switch to create zeros only when needed.\n pred = op_ctxt.pred\n branch = op_ctxt.branch\n switch_val = switch(op.inputs[0], pred)[1 - branch]\n # A op is created along the branch taken as control dependencies are on\n # the whole op and not on the tensor output.\n pivot = array_ops.identity(switch_val)\n if val.dtype == dtypes.resource:\n with ops.control_dependencies([pivot]):\n return array_ops.zeros(\n gen_resource_variable_ops.variable_shape(switch_val))\n zeros_shape = array_ops.shape_internal(switch_val, optimize=False)\n # Ensure ops created within array_ops.zeros are dominated by switch in\n # cond context.\n with ops.control_dependencies([pivot]):\n return array_ops.zeros(zeros_shape, dtype=val.dtype)\n else:\n return array_ops.zeros_like(val, optimize=False)\n\n\nclass ControlFlowContext(object):\n \"\"\"The base class for control flow context.\n\n The usage pattern is a sequence of (Enter, Exit) followed by a final\n ExitResult.\n\n We maintain the following state for control flow contexts during graph\n construction:\n 1. graph has _control_flow_context: the current context used to\n construct new nodes. Changed by ctxt.Enter() and ctxt.Exit()\n 2. op has _control_flow_context: the context to which the op belongs.\n Set at the time the op is created. Immutable.\n 3. A ControlFlowContext has _outer_context: the context in which this\n context is created. Set at the time a context is created. Immutable.\n 4. A ControlFlowContext has _context_stack.\n Pushed and popped by ctxt.Enter() and ctxt.Exit()\n \"\"\"\n\n def __init__(self, values_def=None, import_scope=None):\n self._nested_contexts = []\n self._outer_context = ops.get_default_graph()._get_control_flow_context()\n if self._outer_context:\n self._outer_context._nested_contexts.append(self) # pylint: disable=protected-access\n self._context_stack = []\n if values_def:\n self._init_values_from_proto(values_def, import_scope=import_scope)\n else:\n # The names of tensors that have been already seen in this context.\n self._values = set()\n # The keys are the names of tensors referenced by but external to this\n # context. Each value is the Tensor that should be used by this context to\n # access the key value (e.g. a switch output guarding a cond input value).\n self._external_values = {}\n\n def _init_values_from_proto(self, values_def, import_scope=None):\n \"\"\"Initializes values and external_values from `ValuesDef` protocol buffer.\n\n Args:\n values_def: `ValuesDef` protocol buffer.\n import_scope: Optional `string`. Name scope to add.\n \"\"\"\n assert isinstance(values_def, control_flow_pb2.ValuesDef)\n self._values = set(\n ops.prepend_name_scope(value, import_scope)\n for value in values_def.values)\n g = ops.get_default_graph()\n self._external_values = {}\n for k, v in values_def.external_values.items():\n k = ops.prepend_name_scope(k, import_scope)\n self._external_values[k] = g.as_graph_element(\n ops.prepend_name_scope(v, import_scope))\n op_names = set([\n op.split(\":\")[0]\n for op in self._values - set(self._external_values.keys())\n ])\n for op in op_names:\n # pylint: disable=protected-access\n g.as_graph_element(op)._set_control_flow_context(self)\n # pylint: enable=protected-access\n\n @property\n def name(self):\n return self._name\n\n @property\n def outer_context(self):\n \"\"\"Return the context containing this context.\"\"\"\n return self._outer_context\n\n @property\n def grad_state(self):\n raise NotImplementedError(\"Abstract method\")\n\n @property\n def back_prop(self):\n raise NotImplementedError(\"Abstract method\")\n\n @abc.abstractmethod\n def to_control_flow_context_def(self, context_def, export_scope=None):\n \"\"\"Serializes this into `context_def`.\n\n Args:\n context_def: a `ControlFlowContextDef` protocol buffer.\n export_scope: Optional `string`. Name scope to remove.\n \"\"\"\n raise NotImplementedError(\"Abstract method\")\n\n def _to_values_def(self, export_scope=None):\n \"\"\"Converts the values to a `ValuesDef` protocol buffer.\n\n Args:\n export_scope: Optional `string`. Name scope to remove.\n\n Returns:\n A `ValuesDef` protocol buffer.\n \"\"\"\n values_def = control_flow_pb2.ValuesDef()\n values_def.values.extend(\n [ops.strip_name_scope(v, export_scope) for v in sorted(self._values)])\n for k, v in self._external_values.items():\n k = ops.strip_name_scope(k, export_scope)\n values_def.external_values[k] = ops.strip_name_scope(v.name, export_scope)\n return values_def\n\n def AddName(self, name):\n self._values.add(name)\n\n # pylint: disable=protected-access\n def Enter(self):\n \"\"\"Enter this control flow context.\"\"\"\n graph = ops.get_default_graph()\n self._context_stack.append(graph._get_control_flow_context())\n graph._set_control_flow_context(self)\n\n def Exit(self):\n \"\"\"Exit this control flow context.\"\"\"\n graph = ops.get_default_graph()\n last_context = self._context_stack.pop()\n graph._set_control_flow_context(last_context)\n\n def EnterGradientColocation(self, op, gradient_uid):\n \"\"\"Start building a gradient colocated with an op.\"\"\"\n if self._outer_context:\n self._outer_context.EnterGradientColocation(op, gradient_uid)\n\n def ExitGradientColocation(self, op, gradient_uid):\n \"\"\"Start building a gradient colocated with an op.\"\"\"\n if self._outer_context:\n self._outer_context.ExitGradientColocation(op, gradient_uid)\n\n def ExitResult(self, result):\n \"\"\"Make a list of tensors available in the outer context.\"\"\"\n if self._outer_context:\n nest.map_structure(lambda x: self._outer_context.AddName(x.name), result)\n\n def GetWhileContext(self):\n \"\"\"Return the while context containing this context.\"\"\"\n if self._outer_context:\n return self._outer_context.GetWhileContext()\n return None\n\n def _IsInOuterContext(self, op):\n op_ctxt = util.GetOutputContext(op)\n outer_ctxt = self.outer_context\n while outer_ctxt != op_ctxt:\n if outer_ctxt is None:\n return False\n outer_ctxt = outer_ctxt.outer_context\n return True\n\n def _RemoveExternalControlEdges(self, op):\n \"\"\"Remove any external control dependency on this op.\"\"\"\n while_ctxt = self.GetWhileContext()\n # A control input of `op` is internal if it is in the same while\n # loop context as the enclosing while loop context of self.\n if while_ctxt is None:\n internal_control_inputs = op.control_inputs\n else:\n internal_control_inputs = []\n for x in op.control_inputs:\n ctxt = util.GetOutputContext(x)\n if ctxt is not None and ctxt.GetWhileContext() == while_ctxt:\n internal_control_inputs.append(x)\n external_control_inputs = []\n if len(internal_control_inputs) != len(op.control_inputs):\n external_control_inputs = list(set(op.control_inputs)\n - set(internal_control_inputs))\n op._remove_all_control_inputs()\n op._add_control_inputs(internal_control_inputs)\n return internal_control_inputs, external_control_inputs\n\n # pylint: enable=protected-access\n\n def AddInnerOp(self, op):\n \"\"\"Notifies a scope about an operator added to an inner scope.\"\"\"\n if self._outer_context:\n self._outer_context.AddInnerOp(op)\n\n def GetControlPivot(self):\n \"\"\"Returns the pivot node for this context, or None.\"\"\"\n return None\n\n def IsWhileContext(self):\n return False\n\n def IsCondContext(self):\n return False\n\n def IsXLAContext(self):\n return False\n\n def __str__(self):\n return self.name\n\n\nclass CondContext(ControlFlowContext):\n \"\"\"The context for the conditional construct.\"\"\"\n\n def __init__(self,\n pred=None,\n pivot=None,\n branch=None,\n name=\"cond_text\",\n context_def=None,\n import_scope=None):\n \"\"\"Creates a `CondContext`.\n\n Args:\n pred: The `boolean` tensor for the conditional predicate.\n pivot: The predicate tensor in this branch.\n branch: 0 or 1 representing this branch.\n name: Name of the `CondContext` python object.\n context_def: Optional `ContextDef` protocol buffer to initialize the\n `CondContext` object from.\n import_scope: Optional `string`. Name scope to add. Only used when\n initialing from protocol buffer.\n \"\"\"\n self._name = ops.get_default_graph().unique_name(name)\n\n if context_def:\n self._init_from_proto(context_def, import_scope=import_scope)\n else:\n # Initializes the default fields.\n ControlFlowContext.__init__(self)\n self._pred = pred # The boolean tensor for the cond predicate\n self._pivot = pivot # The predicate tensor in this branch\n self._branch = branch # 0 or 1 representing this branch\n\n # Values considered to have been already seen in this context. pred is not\n # included in this context.\n self._values.add(pred.name)\n self._external_values[pred.name] = pred\n self._values.add(pivot.name)\n pivot.op._set_control_flow_context(self) # pylint: disable=protected-access\n\n def _init_from_proto(self, context_def, import_scope=None):\n \"\"\"Creates a new `CondContext` from protocol buffer.\n\n Args:\n context_def: `CondContextDef` protocol buffer.\n import_scope: Optional `string`. Name scope to add.\n \"\"\"\n assert isinstance(context_def, control_flow_pb2.CondContextDef)\n # Create from context_def.\n g = ops.get_default_graph()\n self._name = ops.prepend_name_scope(context_def.context_name, import_scope)\n self._pred = g.as_graph_element(\n ops.prepend_name_scope(context_def.pred_name, import_scope))\n self._pivot = g.as_graph_element(\n ops.prepend_name_scope(context_def.pivot_name, import_scope))\n self._branch = context_def.branch\n super(CondContext, self).__init__(values_def=context_def.values_def,\n import_scope=import_scope)\n\n @property\n def pred(self):\n return self._pred\n\n @property\n def pivot(self):\n return self._pivot\n\n @property\n def branch(self):\n return self._branch\n\n @property\n def grad_state(self):\n if self.GetWhileContext():\n return self.GetWhileContext().grad_state\n return None\n\n @property\n def back_prop(self):\n if self.GetWhileContext():\n self.GetWhileContext().back_prop\n return False\n\n def GetControlPivot(self):\n return self._pivot\n\n def to_proto(self, export_scope=None):\n \"\"\"Converts a `CondContext` to a `CondContextDef` protocol buffer.\n\n Args:\n export_scope: Optional `string`. Name scope to remove.\n\n Returns:\n A `CondContextDef` protocol buffer.\n \"\"\"\n if (export_scope is None or self.name.startswith(export_scope)):\n context_def = control_flow_pb2.CondContextDef()\n context_def.context_name = ops.strip_name_scope(self.name, export_scope)\n context_def.pred_name = ops.strip_name_scope(self._pred.name,\n export_scope)\n context_def.pivot_name = ops.strip_name_scope(self._pivot.name,\n export_scope)\n context_def.branch = self._branch\n context_def.values_def.MergeFrom(super(CondContext, self)._to_values_def(\n export_scope))\n for nested in self._nested_contexts:\n nested_def = context_def.nested_contexts.add()\n nested.to_control_flow_context_def(nested_def)\n\n return context_def\n else:\n return None\n\n @staticmethod\n def from_proto(context_def, import_scope=None):\n \"\"\"Returns a `CondContext` object created from `context_def`.\"\"\"\n ret = CondContext(context_def=context_def,\n import_scope=import_scope)\n\n ret.Enter()\n for nested_def in context_def.nested_contexts:\n from_control_flow_context_def(nested_def, import_scope=import_scope)\n ret.Exit()\n return ret\n\n def to_control_flow_context_def(self, context_def, export_scope=None):\n context_def.cond_ctxt.CopyFrom(self.to_proto(export_scope=export_scope))\n\n def AddValue(self, val):\n \"\"\"Add `val` to the current context and its outer context recursively.\"\"\"\n if val.name in self._values:\n # Use the real value if it comes from outer context. This is needed in\n # particular for nested conds.\n result = self._external_values.get(val.name)\n result = val if result is None else result\n else:\n result = val\n self._values.add(val.name)\n if self._outer_context:\n result = self._outer_context.AddValue(val)\n self._values.add(result.name)\n self._external_values[result.name] = result\n with ops.control_dependencies(None):\n result = _SwitchRefOrTensor(result, self._pred)[self._branch]\n if self._outer_context:\n self._outer_context.AddInnerOp(result.op)\n\n result.op.graph.prevent_fetching(result.op)\n # pylint: disable=protected-access\n result.op._set_control_flow_context(self)\n # pylint: enable=protected-access\n\n self._values.add(result.name)\n self._external_values[val.name] = result\n return result\n\n def AddOp(self, op):\n self._AddOpInternal(op)\n\n def _AddOpInternal(self, op):\n \"\"\"Add `op` to the current context.\"\"\"\n if not op.inputs:\n # If we're in a while loop, remove any control inputs from outside the\n # loop.\n self._RemoveExternalControlEdges(op)\n\n if not any(util.OpInContext(input_op, self)\n for input_op in op.control_inputs):\n # pylint: disable=protected-access\n op._add_control_input(self._pivot.op)\n # pylint: enable=protected-access\n else:\n # Make each input to 'op' available in this CondContext. If an input is\n # already part of this context there's nothing to do, but if it's\n # external, AddValue() will handle adding the appropriate Switch node and\n # other bookkeeping.\n for index in range(len(op.inputs)):\n x = op.inputs[index]\n if op.type == \"Merge\" and x.op.type == \"NextIteration\":\n # Edge case: if we're importing a while loop inside this CondContext,\n # AddValue() will not correctly handle the NextIteration inputs to\n # Merge node. The problem is that the NextIteration should also be\n # part of this context, but if we're importing it won't have been\n # processed and added to the context yet, so AddValue() will try to\n # add a Switch which results in an invalid graph. Instead, we use the\n # NextIteration input as-is here, and it will eventually be added to\n # the context via AddOp().\n real_x = x\n else:\n real_x = self.AddValue(x)\n if real_x != x:\n # pylint: disable=protected-access\n op._update_input(index, real_x)\n # pylint: enable=protected-access\n # Remove any external control dependency on this op.\n self._RemoveExternalControlEdges(op)\n # pylint: disable=protected-access\n if op.graph._is_function(op.type) or op.type == \"SymbolicGradient\":\n op._add_control_input(self._pivot.op)\n # pylint: enable=protected-access\n\n # Mark op's outputs as seen by this context and any outer contexts.\n output_names = [x.name for x in op.outputs]\n ctxt = self\n while ctxt is not None:\n # pylint: disable=protected-access\n ctxt._values.update(output_names)\n ctxt = ctxt._outer_context\n # pylint: enable=protected-access\n\n if self._outer_context or not util.IsLoopExit(op):\n op.graph.prevent_fetching(op)\n\n if self._outer_context:\n self._outer_context.AddInnerOp(op)\n\n def _ProcessOutputTensor(self, val):\n \"\"\"Process an output tensor of a conditional branch.\"\"\"\n real_val = val\n if val.name not in self._values:\n # Handle the special case of lambda: x\n self._values.add(val.name)\n if self._outer_context:\n real_val = self._outer_context.AddValue(val)\n self._values.add(real_val.name)\n self._external_values[real_val.name] = real_val\n real_val = _SwitchRefOrTensor(real_val, self._pred)[self._branch]\n self._external_values[val.name] = real_val\n else:\n external_val = self._external_values.get(val.name)\n if external_val is not None:\n real_val = external_val\n return real_val\n\n def _BuildCondTensor(self, v):\n if isinstance(v, ops.Operation):\n # Use pivot as the proxy for this op.\n return with_dependencies([v], self._pivot)\n elif isinstance(v, (ops.IndexedSlices, sparse_tensor.SparseTensor)):\n values = self._ProcessOutputTensor(v.values)\n indices = self._ProcessOutputTensor(v.indices)\n if isinstance(v, ops.IndexedSlices):\n dense_shape = v.dense_shape\n if dense_shape is not None:\n dense_shape = self._ProcessOutputTensor(dense_shape)\n return ops.IndexedSlices(values, indices, dense_shape)\n else:\n dense_shape = self._ProcessOutputTensor(v.dense_shape)\n return sparse_tensor.SparseTensor(indices, values, dense_shape)\n else:\n v = nest.map_structure(_convert_tensorarray_to_flow, v)\n return self._ProcessOutputTensor(ops.convert_to_tensor(v))\n\n def BuildCondBranch(self, fn):\n \"\"\"Add the subgraph defined by fn() to the graph.\"\"\"\n pre_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access\n original_result = fn()\n post_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access\n if len(post_summaries) > len(pre_summaries):\n new_summaries = post_summaries[len(pre_summaries):]\n summary_ref = ops.get_collection_ref(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access\n summary_ref[:] = pre_summaries\n with ops.control_dependencies(new_summaries):\n if original_result is None:\n return no_op(), None\n else:\n original_result = nest.map_structure(array_ops.identity,\n original_result)\n if original_result is None:\n return None, None\n\n result = nest.map_structure(self._BuildCondTensor, original_result)\n if not isinstance(result, (list, _basetuple)):\n result = [result]\n return original_result, result\n\n def IsCondContext(self):\n return True\n\n\ndef _UnpackIfSingleton(res):\n if isinstance(res, (list, _basetuple)) and len(res) == 1:\n return res[0]\n else:\n return res\n\n\n# pylint: disable=redefined-outer-name\n# pylint: disable=g-doc-args\n@tf_export(\"cond\")\[email protected]_args(\n None, \"fn1/fn2 are deprecated in favor of the true_fn/false_fn arguments.\",\n \"fn1\", \"fn2\")\ndef cond(pred,\n true_fn=None,\n false_fn=None,\n strict=False,\n name=None,\n fn1=None,\n fn2=None):\n \"\"\"Return `true_fn()` if the predicate `pred` is true else `false_fn()`.\n\n `true_fn` and `false_fn` both return lists of output tensors. `true_fn` and\n `false_fn` must have the same non-zero number and type of outputs.\n\n **WARNING**: Any Tensors or Operations created outside of `true_fn` and\n `false_fn` will be executed regardless of which branch is selected at runtime.\n\n Although this behavior is consistent with the dataflow model of TensorFlow,\n it has frequently surprised users who expected a lazier semantics.\n Consider the following simple program:\n\n ```python\n z = tf.multiply(a, b)\n result = tf.cond(x < y, lambda: tf.add(x, z), lambda: tf.square(y))\n ```\n\n If `x < y`, the `tf.add` operation will be executed and `tf.square`\n operation will not be executed. Since `z` is needed for at least one\n branch of the `cond`, the `tf.multiply` operation is always executed,\n unconditionally.\n\n Note that `cond` calls `true_fn` and `false_fn` *exactly once* (inside the\n call to `cond`, and not at all during `Session.run()`). `cond`\n stitches together the graph fragments created during the `true_fn` and\n `false_fn` calls with some additional graph nodes to ensure that the right\n branch gets executed depending on the value of `pred`.\n\n `tf.cond` supports nested structures as implemented in\n `tensorflow.python.util.nest`. Both `true_fn` and `false_fn` must return the\n same (possibly nested) value structure of lists, tuples, and/or named tuples.\n Singleton lists and tuples form the only exceptions to this: when returned by\n `true_fn` and/or `false_fn`, they are implicitly unpacked to single values.\n This behavior is disabled by passing `strict=True`.\n\n Args:\n pred: A scalar determining whether to return the result of `true_fn` or\n `false_fn`.\n true_fn: The callable to be performed if pred is true.\n false_fn: The callable to be performed if pred is false.\n strict: A boolean that enables/disables 'strict' mode; see above.\n name: Optional name prefix for the returned tensors.\n\n Returns:\n Tensors returned by the call to either `true_fn` or `false_fn`. If the\n callables return a singleton list, the element is extracted from the list.\n\n Raises:\n TypeError: if `true_fn` or `false_fn` is not callable.\n ValueError: if `true_fn` and `false_fn` do not return the same number of\n tensors, or return tensors of different types.\n\n Example:\n\n ```python\n x = tf.constant(2)\n y = tf.constant(5)\n def f1(): return tf.multiply(x, 17)\n def f2(): return tf.add(y, 23)\n r = tf.cond(tf.less(x, y), f1, f2)\n # r is set to f1().\n # Operations in f2 (e.g., tf.add) are not executed.\n ```\n\n \"\"\"\n if ENABLE_COND_V2 and not context.executing_eagerly():\n return cond_v2_impl.cond_v2(pred, true_fn, false_fn, name)\n\n # We needed to make true_fn/false_fn keyword arguments for\n # backwards-compatibility. This check exists so that we can convert back to\n # having them be positional arguments.\n # TODO(josh11b): Make `true_fn` and `false_fn` positional arguments after\n # `fn1` and `fn2` are deleted.\n if fn1 is not None:\n if true_fn is not None:\n raise TypeError(\"cond(): true_fn and fn1 may not be set simultaneously.\")\n true_fn = fn1\n elif true_fn is None:\n raise TypeError(\"cond(): true_fn argument required\")\n if fn2 is not None:\n if false_fn is not None:\n raise TypeError(\"cond(): false_fn and fn2 may not be set simultaneously.\")\n false_fn = fn2\n elif false_fn is None:\n raise TypeError(\"cond(): false_fn argument required\")\n\n if not callable(true_fn):\n raise TypeError(\"true_fn must be callable.\")\n if not callable(false_fn):\n raise TypeError(\"false_fn must be callable.\")\n\n with ops.name_scope(name, \"cond\", [pred]):\n if context.executing_eagerly():\n if pred:\n return _UnpackIfSingleton(true_fn())\n return _UnpackIfSingleton(false_fn())\n\n # Add the Switch to the graph.\n if isinstance(pred, bool):\n raise TypeError(\"pred must not be a Python bool\")\n p_2, p_1 = switch(pred, pred)\n pivot_1 = array_ops.identity(p_1, name=\"switch_t\")\n pivot_2 = array_ops.identity(p_2, name=\"switch_f\")\n pred = array_ops.identity(pred, name=\"pred_id\")\n # Disable the fetching of tensors that are only on one branch of cond.\n for tensor in [p_1, p_2, pivot_1, pivot_2, pred]:\n tensor.op.graph.prevent_fetching(tensor.op)\n\n # Build the graph for the true branch in a new context.\n context_t = CondContext(pred, pivot_1, branch=1)\n try:\n context_t.Enter()\n orig_res_t, res_t = context_t.BuildCondBranch(true_fn)\n if orig_res_t is None:\n raise ValueError(\"true_fn must have a return value.\")\n context_t.ExitResult(res_t)\n finally:\n context_t.Exit()\n\n # Build the graph for the false branch in a new context.\n context_f = CondContext(pred, pivot_2, branch=0)\n try:\n context_f.Enter()\n orig_res_f, res_f = context_f.BuildCondBranch(false_fn)\n if orig_res_f is None:\n raise ValueError(\"false_fn must have a return value.\")\n context_f.ExitResult(res_f)\n finally:\n context_f.Exit()\n\n if not strict:\n orig_res_t = _UnpackIfSingleton(orig_res_t)\n orig_res_f = _UnpackIfSingleton(orig_res_f)\n\n # Check that the return values of the two branches have the same structure.\n try:\n nest.assert_same_structure(orig_res_t, orig_res_f)\n except TypeError as e:\n raise TypeError(\n \"Incompatible return types of true_fn and false_fn: {}\".format(e))\n except ValueError as e:\n raise ValueError(\n \"Incompatible return values of true_fn and false_fn: {}\".format(e))\n\n # Add the final merge to the graph.\n if not res_t:\n raise ValueError(\"true_fn and false_fn must return at least one result.\")\n\n res_t_flat = nest.flatten(res_t)\n res_f_flat = nest.flatten(res_f)\n\n for x, y in zip(res_t_flat, res_f_flat):\n assert ((isinstance(x, ops.IndexedSlices) and\n isinstance(y, ops.IndexedSlices)) or\n (isinstance(x, sparse_tensor.SparseTensor) and\n isinstance(y, sparse_tensor.SparseTensor)) or\n (isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor)))\n val_x = x if isinstance(x, ops.Tensor) else x.values\n val_y = y if isinstance(y, ops.Tensor) else y.values\n if val_x.dtype.base_dtype != val_y.dtype.base_dtype:\n raise ValueError(\n \"Outputs of true_fn and false_fn must have the same type: %s, %s\" %\n (val_x.dtype.name, val_y.dtype.name))\n\n merges = [merge(pair)[0] for pair in zip(res_f_flat, res_t_flat)]\n merges = _convert_flows_to_tensorarrays(nest.flatten(orig_res_t), merges)\n\n # Only add non-nested conds to the collection. Any nested control flow will\n # be encapsulated in the root context.\n assert context_t.outer_context == context_f.outer_context\n if context_t.outer_context is None:\n ops.add_to_collection(ops.GraphKeys.COND_CONTEXT, context_t)\n ops.add_to_collection(ops.GraphKeys.COND_CONTEXT, context_f)\n\n merges = nest.pack_sequence_as(structure=orig_res_t, flat_sequence=merges)\n\n # Singleton lists and tuples are automatically unpacked if strict == False.\n if not strict:\n merges = _UnpackIfSingleton(merges)\n return merges\n\n\n# pylint: enable=g-doc-args\n# pylint: enable=redefined-outer-name\n\n\ndef _resource_safe_shape(t):\n \"\"\"Returns the shape of t or the variable it points to.\"\"\"\n if t.dtype == dtypes.resource:\n while t.op.inputs:\n t = t.op.inputs[0]\n return tensor_shape.TensorShape(t.op.get_attr(\"shape\"))\n return array_ops.shape_internal(t, optimize=False)\n\n\n# TODO(yuanbyu): Consider having a unified notion of context for\n# not only conditionals and loops but also control dependency and\n# subgraphs.\nclass WhileContext(ControlFlowContext):\n \"\"\"The context for the loop construct.\"\"\"\n\n def __init__(self,\n maximum_iterations=None,\n parallel_iterations=10,\n back_prop=True,\n swap_memory=False,\n name=\"while_context\",\n grad_state=None,\n context_def=None,\n import_scope=None):\n \"\"\"\"Creates a `WhileContext`.\n\n Args:\n maximum_iterations: Optional upper bound on number of loop iterations.\n parallel_iterations: The number of iterations allowed to run in parallel.\n back_prop: Whether backprop is enabled for this while loop.\n swap_memory: Whether GPU-CPU memory swap is enabled for this loop.\n name: Optional name prefix for the returned tensors.\n grad_state: The gradient loop state.\n context_def: Optional `WhileContextDef` protocol buffer to initialize\n the `Whilecontext` python object from.\n import_scope: Optional `string`. Name scope to add. Only used when\n initialing from protocol buffer.\n \"\"\"\n if context_def:\n self._init_from_proto(context_def, import_scope=import_scope)\n else:\n ControlFlowContext.__init__(self)\n self._init_from_args(maximum_iterations, parallel_iterations, back_prop,\n swap_memory, name)\n # The gradient loop state.\n self._grad_state = grad_state\n\n def _init_from_args(self, maximum_iterations, parallel_iterations, back_prop,\n swap_memory, name):\n \"\"\"Creates a new `WhileContext` from arguments.\n\n Args:\n maximum_iterations: Optional upper bound on number of loop iterations.\n parallel_iterations: The number of iterations allowed to run in parallel.\n back_prop: Whether backprop is enabled for this while loop.\n swap_memory: Whether GPU-CPU memory swap is enabled for this loop.\n name: Optional name prefix for the returned tensors.\n\n Raises:\n ValueError: If `parallel_iterations` has invalid value.\n \"\"\"\n if not isinstance(parallel_iterations, int) or (parallel_iterations <= 0):\n raise ValueError(\"`parallel_iterations` must be a positive integer: \"\n \"%s\" % parallel_iterations)\n self._name = ops.get_default_graph().unique_name(name)\n self._maximum_iterations = maximum_iterations\n self._parallel_iterations = parallel_iterations\n self._back_prop = back_prop\n self._swap_memory = swap_memory\n # We use this node to control constants created by the pred lambda.\n self._pivot_for_pred = None\n # We use this node to control constants created by the body lambda.\n self._pivot_for_body = None\n # The boolean tensor for loop termination condition. Used in code\n # generation for gradient computation\n self._pivot = None\n # The list of exit tensors for loop variables.\n self._loop_exits = []\n # The list of enter tensors for loop variables.\n self._loop_enters = []\n self._graph = ops.get_default_graph()\n\n def _init_from_proto(self, context_def, import_scope=None):\n \"\"\"Creates a new `WhileContext` from protocol buffer.\n\n Args:\n context_def: `WhileContextDef` protocol buffer.\n import_scope: Optional `string`. Name scope to add.\n \"\"\"\n assert isinstance(context_def, control_flow_pb2.WhileContextDef)\n # Create from context_def.\n g = ops.get_default_graph()\n self._name = ops.prepend_name_scope(context_def.context_name, import_scope)\n if context_def.maximum_iterations_name:\n self._maximum_iterations = g.as_graph_element(\n ops.prepend_name_scope(context_def.maximum_iterations_name,\n import_scope))\n else:\n self._maximum_iterations = None\n self._parallel_iterations = context_def.parallel_iterations\n self._back_prop = context_def.back_prop\n self._swap_memory = context_def.swap_memory\n self._pivot_for_pred = g.as_graph_element(\n ops.prepend_name_scope(context_def.pivot_for_pred_name, import_scope))\n # We use this node to control constants created by the body lambda.\n self._pivot_for_body = g.as_graph_element(\n ops.prepend_name_scope(context_def.pivot_for_body_name, import_scope))\n # The boolean tensor for loop termination condition. Used in code\n # generation for gradient computation.\n self._pivot = g.as_graph_element(\n ops.prepend_name_scope(context_def.pivot_name, import_scope))\n # The list of exit tensors for loop variables.\n self._loop_exits = [\n g.as_graph_element(ops.prepend_name_scope(exit_name, import_scope))\n for exit_name in context_def.loop_exit_names\n ]\n # The list of enter tensors for loop variables.\n self._loop_enters = [\n g.as_graph_element(ops.prepend_name_scope(enter_name, import_scope))\n for enter_name in context_def.loop_enter_names\n ]\n super(WhileContext, self).__init__(\n values_def=context_def.values_def, import_scope=import_scope)\n\n # import_scope causes self.name to be different from the original serialized\n # context's name. Rewrite \"frame_name\" attrs with the new name.\n if import_scope:\n for tensor_name in self._values:\n op = g.as_graph_element(tensor_name).op\n if util.IsLoopEnter(op):\n # pylint: disable=protected-access\n op._set_attr(\"frame_name\",\n attr_value_pb2.AttrValue(s=compat.as_bytes(self.name)))\n # pylint: enable=protected-access\n self._graph = ops.get_default_graph()\n\n @property\n def maximum_iterations(self):\n \"\"\"The maximum number of iterations that will be executed.\"\"\"\n return self._maximum_iterations\n\n @property\n def parallel_iterations(self):\n \"\"\"The number of iterations allowed to run in parallel.\"\"\"\n return self._parallel_iterations\n\n @property\n def back_prop(self):\n \"\"\"True iff backprop is enabled for this while loop.\"\"\"\n return self._back_prop\n\n @property\n def swap_memory(self):\n \"\"\"True iff GPU-CPU memory swap is enabled for this while loop.\"\"\"\n return self._swap_memory\n\n @property\n def pivot(self):\n \"\"\"The boolean tensor representing the loop termination condition.\"\"\"\n return self._pivot\n\n @property\n def loop_enters(self):\n \"\"\"The list of enter tensors for loop variables.\"\"\"\n return self._loop_enters\n\n @property\n def loop_exits(self):\n \"\"\"The list of exit tensors for loop variables.\"\"\"\n return self._loop_exits\n\n @property\n def grad_state(self):\n \"\"\"The gradient loop state.\"\"\"\n return self._grad_state\n\n def to_proto(self, export_scope=None):\n \"\"\"Converts a `WhileContext` to a `WhileContextDef` protocol buffer.\n\n Args:\n export_scope: Optional `string`. Name scope to remove.\n\n Returns:\n A `WhileContextDef` protocol buffer.\n \"\"\"\n if (export_scope is None or self.name.startswith(export_scope)):\n context_def = control_flow_pb2.WhileContextDef()\n context_def.context_name = ops.strip_name_scope(self.name, export_scope)\n context_def.parallel_iterations = self._parallel_iterations\n if self._maximum_iterations is not None:\n context_def.maximum_iterations_name = ops.strip_name_scope(\n self._maximum_iterations.name, export_scope)\n context_def.back_prop = self._back_prop\n context_def.swap_memory = self._swap_memory\n context_def.pivot_for_pred_name = ops.strip_name_scope(\n self._pivot_for_pred.name, export_scope)\n context_def.pivot_for_body_name = ops.strip_name_scope(\n self._pivot_for_body.name, export_scope)\n context_def.pivot_name = ops.strip_name_scope(self._pivot.name,\n export_scope)\n context_def.loop_exit_names.extend([\n ops.strip_name_scope(l.name, export_scope) for l in self._loop_exits\n ])\n context_def.loop_enter_names.extend([\n ops.strip_name_scope(l.name, export_scope) for l in self._loop_enters\n ])\n context_def.values_def.MergeFrom(\n super(WhileContext, self)._to_values_def(\n export_scope=export_scope))\n for nested in self._nested_contexts:\n nested_def = context_def.nested_contexts.add()\n nested.to_control_flow_context_def(nested_def)\n\n return context_def\n else:\n return None\n\n def to_control_flow_context_def(self, context_def, export_scope=None):\n context_def.while_ctxt.CopyFrom(self.to_proto(export_scope=export_scope))\n\n @staticmethod\n def from_proto(context_def, import_scope=None):\n \"\"\"Returns a `WhileContext` object created from `context_def`.\n\n Args:\n context_def: A `WhileContextDef` protocol buffer.\n import_scope: Optional `string`. Name scope to add.\n\n Returns:\n A `WhileContext` Python object.\n \"\"\"\n ret = WhileContext(context_def=context_def,\n import_scope=import_scope)\n ret.Enter()\n for nested_def in context_def.nested_contexts:\n from_control_flow_context_def(nested_def, import_scope=import_scope)\n ret.Exit()\n return ret\n\n def GetWhileContext(self):\n return self\n\n def GetControlPivot(self):\n if self._pivot_for_body is not None:\n return self._pivot_for_body\n return self._pivot_for_pred\n\n def AddValue(self, val):\n \"\"\"Add `val` to the current context and its outer context recursively.\"\"\"\n result = val\n new_value = val.name not in self._values\n # Don't treat ops in this context as new values. Usually all known values\n # are in self._values, except when we're importing a while loop inside this\n # WhileContext. Since there's a cycle in this case, `val` may be part of the\n # imported while loop but not yet processed by this context and added to\n # self._values in _AddOpInternal. We only want to process external input\n # tensors to the while loop here.\n new_value &= val.op._control_flow_context is not self # pylint: disable=protected-access\n if new_value:\n self._values.add(val.name)\n\n # If we are in a grad context and val is from its forward context,\n # use GetRealValue(), which adds the logic to save the history of\n # val in forward.\n grad_ctxt = ops.get_default_graph()._get_control_flow_context()\n if grad_ctxt:\n grad_ctxt = grad_ctxt.GetWhileContext()\n if grad_ctxt.grad_state:\n forward_ctxt = _GetWhileContext(val.op)\n if util.IsLoopExit(val.op):\n forward_ctxt = forward_ctxt.outer_context\n if forward_ctxt:\n forward_ctxt = forward_ctxt.GetWhileContext()\n if forward_ctxt == grad_ctxt.grad_state.forward_context:\n real_val = grad_ctxt.grad_state.GetRealValue(val)\n self._external_values[val.name] = real_val\n return real_val\n\n if self._outer_context is not None:\n result = self._outer_context.AddValue(val)\n # Create an Enter to make `result` known to this loop context.\n with ops.control_dependencies(None):\n enter = _Enter(\n result,\n self._name,\n is_constant=True,\n parallel_iterations=self._parallel_iterations)\n enter.graph.prevent_feeding(enter)\n if self._outer_context:\n self._outer_context.AddInnerOp(enter.op)\n # Fix the control inputs and control flow context of these enter ops.\n self._FixControlInputsAndContext([enter])\n\n # Add `enter` in this context.\n self._values.add(enter.name)\n self._external_values[val.name] = enter\n result = enter\n else:\n actual_val = self._external_values.get(val.name)\n if actual_val is not None:\n result = actual_val\n return result\n\n def AddOp(self, op):\n \"\"\"Add `op` to the current context.\"\"\"\n # For a reduction op, if op is in a grad context and its input is from\n # its forward context, moving op to the forward context means we would\n # store the tensor after the reduction as opposed to the tensor before\n # reduction, and therefore could significantly reduce memory consumption.\n # For now, we do this only for a few ops.\n if op.type in {\"Shape\", \"Size\", \"Rank\"}:\n grad_ctxt = ops.get_default_graph()._get_control_flow_context()\n if grad_ctxt:\n grad_ctxt = grad_ctxt.GetWhileContext()\n if grad_ctxt.grad_state:\n op_input_forward_ctxt = _GetWhileContext(op.inputs[0].op)\n if op_input_forward_ctxt == grad_ctxt.grad_state.forward_context:\n op_input_ctxt = op.inputs[0].op._get_control_flow_context()\n op._set_control_flow_context(op_input_ctxt)\n op_input_ctxt._AddOpInternal(op)\n return\n self._AddOpInternal(op)\n\n def _AddOpInternal(self, op):\n \"\"\"Add `op` to the current context.\n\n We move any external control dependencies of the op to the loop pivot, to\n ensure they get executed.\n \"\"\"\n if not op.inputs:\n # Remove any external control dependency on this op\n control_inputs, external_inputs = self._RemoveExternalControlEdges(op)\n # Add a control edge from the control pivot to this op.\n if not control_inputs:\n # pylint: disable=protected-access\n op._add_control_input(self.GetControlPivot().op)\n # pylint: enable=protected-access\n for x in op.outputs:\n self._values.add(x.name)\n else:\n for index in range(len(op.inputs)):\n x = op.inputs[index]\n real_x = self.AddValue(x)\n if real_x != x:\n op._update_input(index, real_x) # pylint: disable=protected-access\n # Remove any external control dependency on this op.\n _, external_inputs = self._RemoveExternalControlEdges(op)\n # Add a control dependency to prevent loop invariants from\n # enabling ops that should not be executed.\n self._MaybeAddControlDependency(op)\n for x in op.outputs:\n self._values.add(x.name)\n if external_inputs:\n # Use an identity to pull control inputs as data inputs. Note that we\n # ignore ops which don't have outputs. TODO(apassos): fix that\n with ops.control_dependencies(None):\n self.Enter()\n external_inputs = [array_ops.identity(x.outputs[0]).op\n for x in external_inputs if x.outputs]\n self.Exit()\n op._add_control_inputs(external_inputs) # pylint: disable=protected-access\n if self._outer_context or not util.IsLoopExit(op):\n op.graph.prevent_fetching(op)\n for x in op.outputs:\n op.graph.prevent_feeding(x)\n\n if self._outer_context:\n self._outer_context.AddInnerOp(op)\n\n def _MaybeAddControlDependency(self, op):\n \"\"\"Add a control input to the op if it only depends on loop invariants.\"\"\"\n\n def _IsOpFree(op):\n \"\"\"Determines if `op` needs a control dependency.\"\"\"\n if op.control_inputs:\n return False\n # pylint: disable=protected-access\n if op.graph._is_function(op.type) or op.type == \"SymbolicGradient\":\n return True\n # pylint: enable=protected-access\n for x in op.inputs:\n if not util.IsLoopConstantEnter(x.op):\n return False\n return True\n\n if _IsOpFree(op):\n # pylint: disable=protected-access\n op._add_control_input(self.GetControlPivot().op)\n # pylint: enable=protected-access\n\n def AddForwardLoopCounter(self, outer_grad_state):\n \"\"\"Adds a loop that counts the number of iterations.\n\n This is added to the forward loop at the time when we start to\n create the loop for backprop gradient computation. Called in\n the outer context of this forward context.\n\n The pseudocode is:\n `n = 0; while (_pivot) { n++; }`\n\n Note that a control dependency is added to `n` to ensure the correct\n execution order of stack push ops.\n\n Args:\n outer_grad_state: The outer grad state. None if not nested.\n\n Returns:\n The number of iterations taken by the forward loop and the loop index.\n \"\"\"\n n = constant_op.constant(0, name=\"f_count\")\n if outer_grad_state is not None:\n # Force the stack pushes of i-th execution of an inner loop to be ordered\n # before the pushes of (i+1)-th execution of the same inner loop.\n outer_add_op = outer_grad_state.forward_index.op.inputs[0].op\n n.op._add_control_input(outer_add_op) # pylint: disable=protected-access\n\n self.Enter()\n self.AddName(n.name)\n enter_n = _Enter(\n n,\n self._name,\n is_constant=False,\n parallel_iterations=self._parallel_iterations,\n name=\"f_count\")\n self.loop_enters.append(enter_n)\n\n merge_n = merge([enter_n, enter_n])[0]\n switch_n = switch(merge_n, self._pivot)\n\n index = math_ops.add(switch_n[1], 1)\n next_n = _NextIteration(index)\n merge_n.op._update_input(1, next_n)\n\n total_iterations = exit(switch_n[0], name=\"f_count\")\n self.loop_exits.append(total_iterations)\n self.ExitResult([total_iterations])\n self.Exit()\n return total_iterations, next_n\n\n def AddBackpropLoopCounter(self, count, outer_grad_state):\n \"\"\"Add the backprop loop that controls the iterations.\n\n This is added to the backprop loop. It is used to control the loop\n termination of the backprop loop. Called in the outer context of\n this grad context.\n\n The pseudocode is:\n `n = count; while (n >= 1) { n--; }`\n\n Note that a control dependency is added to `final_zero` to ensure the\n correct execution order of stack pop ops.\n\n Args:\n count: The number of iterations for backprop.\n outer_grad_state: The outer grad state. None if not nested.\n\n Returns:\n The loop index.\n \"\"\"\n in_separate_functions = count.graph is not ops.get_default_graph()\n if in_separate_functions:\n # Brings the count into this graph\n count = array_ops.identity(count)\n else:\n # TODO(apassos) XLA expects this constant to be created outside the loop,\n # so doing that for now.\n one = constant_op.constant(1, name=\"b_count\")\n\n self.Enter()\n self.AddName(count.name)\n enter_count = _Enter(\n count,\n self._name,\n is_constant=False,\n parallel_iterations=self._parallel_iterations,\n name=\"b_count\")\n self.loop_enters.append(enter_count)\n\n merge_count = merge([enter_count, enter_count])[0]\n self._pivot_for_pred = merge_count\n\n if in_separate_functions:\n one = constant_op.constant(1, name=\"b_count\")\n pred = math_ops.greater_equal(merge_count, one)\n self._pivot = loop_cond(pred, name=\"b_count\")\n switch_count = switch(merge_count, self._pivot)\n\n index = math_ops.subtract(switch_count[1], one)\n self._pivot_for_body = index\n next_count = _NextIteration(index)\n merge_count.op._update_input(1, next_count)\n\n final_zero = exit(switch_count[0], name=\"b_count\")\n self.loop_exits.append(final_zero)\n if outer_grad_state is not None:\n # Force the stack pops of i-th execution of an inner loop to be ordered\n # before the pops of (i+1)-th execution of the same inner loop.\n # pylint: disable=protected-access\n outer_grad_state.grad_sync._add_control_input(final_zero.op)\n # pylint: enable=protected-access\n\n self.ExitResult([final_zero])\n self.Exit()\n return next_count\n\n def AddBackpropAccumulator(self, op, grad):\n \"\"\"Add an accumulation loop for every loop invariant.\n\n This is added to the backprop loop. It is used to accumulate partial\n gradients within each loop iteration. Called when in the gradient while\n context.\n\n The pseudocode is:\n ```\n acc = 0.0;\n while (_pivot) {\n acc += grad;\n }\n ```\n\n Args:\n op: The Enter op for a loop invariant.\n grad: The partial gradient of an iteration for a loop invariant.\n\n Returns:\n The gradient for a loop invariant.\n \"\"\"\n self.Exit()\n # Create a zeros tensor with the right shape for acc. If we don't\n # know the full shape statically, we will have to get the shape\n # dynamically from the forward inference. Getting the shape right\n # for the zeros is only needed for the base case when the loop exits\n # without running any iterations.\n shape = grad.get_shape()\n if shape.is_fully_defined():\n if self.outer_context:\n self.outer_context.Enter()\n acc = constant_op.constant(0, grad.dtype, shape=shape, name=\"b_acc\")\n if self.outer_context:\n self.outer_context.Exit()\n else:\n value = op.inputs[0]\n if (isinstance(self.outer_context, WhileContext) and\n self.outer_context.grad_state is not None):\n # We are in a nested while loop.\n forward_ctxt = self.grad_state.forward_context\n forward_ctxt.outer_context.Enter()\n zeros_shape = array_ops.shape_internal(value, optimize=False)\n forward_ctxt.outer_context.Exit()\n outer_grad_state = self.grad_state.outer_grad_state\n history_zeros_shape = outer_grad_state.AddForwardAccumulator(\n zeros_shape)\n self.outer_context.Enter()\n real_shape = outer_grad_state.AddBackpropAccumulatedValue(\n history_zeros_shape, zeros_shape)\n acc = array_ops.zeros(real_shape, grad.dtype)\n self.outer_context.Exit()\n else:\n if self.outer_context:\n self.outer_context.Enter()\n zeros_shape = array_ops.shape_internal(value, optimize=False)\n acc = array_ops.zeros(zeros_shape, grad.dtype)\n if self.outer_context:\n self.outer_context.Exit()\n\n self.Enter()\n self.AddName(acc.name)\n enter_acc = _Enter(\n acc,\n self._name,\n is_constant=False,\n parallel_iterations=self._parallel_iterations,\n name=\"b_acc\")\n self.loop_enters.append(enter_acc)\n\n merge_acc = merge([enter_acc, enter_acc], name=\"b_acc\")[0]\n switch_acc_false, switch_acc_true = switch(merge_acc, self._pivot)\n\n add_acc = math_ops.add(switch_acc_true, grad)\n next_acc = _NextIteration(add_acc)\n merge_acc.op._update_input(1, next_acc) # pylint: disable=protected-access\n\n result_acc = exit(switch_acc_false, name=\"b_acc\")\n self.loop_exits.append(result_acc)\n self.ExitResult([result_acc])\n return result_acc\n\n def AddBackpropIndexedSlicesAccumulator(self, op, grad):\n \"\"\"This is used for accumulating gradients that are IndexedSlices.\n\n This is essentially the equivalent of AddBackpropAccumulator but optimized\n for things like updating embeddings from within a while loop.\n\n Args:\n op: The Enter op for a loop invariant.\n grad: The partial gradients represented as an IndexedSlices.\n\n Returns:\n The accumulated IndexedSlices gradient of the loop invariant.\n \"\"\"\n values = grad.values\n indices = grad.indices\n dense_shape = grad.dense_shape\n\n self.Exit()\n if self.outer_context:\n self.outer_context.Enter()\n if values.get_shape().is_fully_defined():\n values_shape = tensor_shape.TensorShape(\n [tensor_shape.Dimension(1)] + values.get_shape().dims[1:])\n if self.outer_context:\n self.outer_context.Enter()\n values_acc = constant_op.constant(\n 0, values.dtype, shape=values_shape, name=\"b_acc\")\n if self.outer_context:\n self.outer_context.Exit()\n else:\n values_shape = _resource_safe_shape(op.inputs[0])[1:]\n values_shape = array_ops.concat([[1], values_shape], 0)\n values_acc = array_ops.zeros(values_shape, dtype=values.dtype)\n indices_acc = constant_op.constant([0], indices.dtype)\n shape_acc = None\n if dense_shape is not None:\n if dense_shape.get_shape().is_fully_defined():\n if self.outer_context:\n self.outer_context.Enter()\n shape_acc = constant_op.constant(\n 0, dense_shape.dtype, shape=dense_shape.get_shape())\n if self.outer_context:\n self.outer_context.Exit()\n else:\n shape_acc = array_ops.zeros_like(\n array_ops.shape_internal(op.inputs[0], optimize=False,\n out_type=dense_shape.dtype),\n optimize=False)\n\n if self.outer_context:\n self.outer_context.Exit()\n\n self.Enter()\n self.AddName(values_acc.name)\n self.AddName(indices_acc.name)\n init_acc = [indices_acc, values_acc]\n if shape_acc is not None:\n self.AddName(shape_acc.name)\n init_acc.append(shape_acc)\n\n # Set use_input_shape=False since the accumulator tensors will grow in\n # size. If use_input_shape=True, the _update_input call below will result in\n # incompatible shapes.\n enter_acc = [\n _Enter(\n x,\n self._name,\n is_constant=False,\n parallel_iterations=self._parallel_iterations,\n use_input_shape=False,\n name=\"b_acc\") for x in init_acc\n ]\n # Manually set appropriate partial shapes.\n enter_acc[0].set_shape([None])\n if values_acc.shape.dims is not None:\n enter_acc[1].set_shape([None] + values_acc.shape.as_list()[1:])\n self.loop_enters.extend(enter_acc)\n\n merge_acc = [merge([x, x], name=\"b_acc\")[0] for x in enter_acc]\n switch_acc = [switch(x, self._pivot) for x in merge_acc]\n\n # The actual accumulation.\n acc_indexed_slices = [\n array_ops.concat([xa[1], xv], 0)\n for xa, xv in zip(switch_acc[:2], [indices, values])\n ]\n if shape_acc is not None:\n # For the shape we just keep the maximum\n acc_indexed_slices.append(math_ops.maximum(dense_shape, switch_acc[2][1]))\n\n next_acc = [_NextIteration(x) for x in acc_indexed_slices]\n for xm, xn in zip(merge_acc, next_acc):\n xm.op._update_input(1, xn) # pylint: disable=protected-access\n\n exit_acc = [exit(x[0], name=\"b_acc\") for x in switch_acc]\n self.loop_exits.extend(exit_acc)\n\n self.ExitResult(exit_acc)\n return ops.IndexedSlices(\n indices=exit_acc[0],\n values=exit_acc[1],\n dense_shape=exit_acc[2] if shape_acc is not None else None)\n\n def _InitializeValues(self, values):\n \"\"\"Makes the values known to this context.\"\"\"\n self._values = set()\n for x in values:\n if isinstance(x, ops.Tensor):\n self._values.add(x.name)\n else:\n self._values.add(x.values.name)\n self._values.add(x.indices.name)\n if isinstance(x, ops.IndexedSlices):\n dense_shape = x.dense_shape\n elif isinstance(x, sparse_tensor.SparseTensor):\n dense_shape = x.dense_shape\n else:\n raise TypeError(\"Type %s not supported\" % type(x))\n if dense_shape is not None:\n self._values.add(dense_shape.name)\n\n def _BuildLoop(self, pred, body, original_loop_vars, loop_vars,\n shape_invariants):\n \"\"\"Core: Add the loop termination condition and body to the graph.\"\"\"\n flat_loop_vars = nest.flatten(original_loop_vars)\n\n # Let the context know the loop variables so the loop variables\n # would be added in the outer contexts properly.\n self._InitializeValues(loop_vars)\n real_vars = loop_vars\n if self._outer_context:\n real_vars = [self._outer_context.AddValue(x) for x in loop_vars]\n with ops.control_dependencies(None):\n enter_vars = [\n _Enter(\n x,\n self._name,\n is_constant=False,\n parallel_iterations=self._parallel_iterations,\n use_input_shape=(shape_invariants is None)) for x in real_vars\n ]\n for x in enter_vars:\n x.graph.prevent_feeding(x)\n if self._outer_context:\n self._outer_context.AddInnerOp(x.op)\n\n # Finds the closest enclosing non-None control pivot.\n outer_context = self._outer_context\n control_pivot = None\n while outer_context is not None and control_pivot is None:\n control_pivot = outer_context.GetControlPivot()\n # pylint: disable=protected-access\n outer_context = outer_context._outer_context\n # pylint: enable=protected-access\n\n if control_pivot is not None:\n for var in enter_vars:\n if util.IsLoopConstantEnter(var.op.inputs[0].op):\n # pylint: disable=protected-access\n var.op._add_control_input(control_pivot.op)\n # pylint: enable=protected-access\n _SetShapeInvariants(real_vars, enter_vars, shape_invariants)\n\n # Fix the control inputs and control flow context of these enter ops.\n self._FixControlInputsAndContext(enter_vars)\n self._InitializeValues(enter_vars)\n self._loop_enters = enter_vars\n\n merge_vars = [merge([x, x])[0] for x in enter_vars]\n self._pivot_for_pred = merge_vars[0]\n\n # Build the graph for pred.\n merge_vars_with_tensor_arrays = (\n _convert_flows_to_tensorarrays(flat_loop_vars, merge_vars))\n packed_vars = nest.pack_sequence_as(\n structure=original_loop_vars,\n flat_sequence=merge_vars_with_tensor_arrays)\n c = ops.convert_to_tensor(pred(*packed_vars))\n self._pivot = loop_cond(c, name=\"LoopCond\")\n switch_vars = [_SwitchRefOrTensor(x, self._pivot) for x in merge_vars]\n\n # Build the graph for body.\n vars_for_body = [_Identity(x[1]) for x in switch_vars]\n self._pivot_for_body = vars_for_body[0]\n # Convert TensorArray flow variables inside the context back into\n # their associated TensorArrays for calling the body.\n vars_for_body_with_tensor_arrays = (\n _convert_flows_to_tensorarrays(flat_loop_vars, vars_for_body))\n packed_vars_for_body = nest.pack_sequence_as(\n structure=original_loop_vars,\n flat_sequence=vars_for_body_with_tensor_arrays)\n pre_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access\n body_result = body(*packed_vars_for_body)\n post_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access\n if not nest.is_sequence(body_result):\n body_result = [body_result]\n if len(post_summaries) > len(pre_summaries):\n new_summaries = post_summaries[len(pre_summaries):]\n summary_ref = ops.get_collection_ref(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access\n summary_ref[:] = pre_summaries\n with ops.control_dependencies(new_summaries):\n\n def map_fn(x):\n # TODO(apassos) figure out how to trigger with tensor arrays as well\n if isinstance(x, tensor_array_ops.TensorArray):\n return x\n return array_ops.identity(x)\n\n body_result = nest.map_structure(map_fn, body_result)\n\n # Compare the structure types of input and output of body.\n # For backwards compatibility, the first layer is forced to a list\n # during this comparison, because inputs are typically lists and\n # outputs of the body are typically tuples.\n nest.assert_same_structure(list(packed_vars_for_body), list(body_result))\n\n # Store body_result to keep track of TensorArrays returned by body\n original_body_result = body_result\n # Convert TensorArrays returned by body into their flow variables\n result = nest.map_structure(_convert_tensorarray_to_flow,\n nest.flatten(body_result))\n result = ops.convert_n_to_tensor_or_indexed_slices(result)\n\n # Add NextIteration and the back edges to complete the loop.\n if len(merge_vars) != len(result):\n raise ValueError(\"Number of inputs and outputs of body must match \"\n \"loop_vars: %d, %d\" % (len(merge_vars), len(result)))\n next_vars = []\n for m, v in zip(merge_vars, result):\n next_vars.append(_AddNextAndBackEdge(m, v))\n\n # Add the exit ops.\n exit_vars = [exit(x[0]) for x in switch_vars]\n self._loop_exits = exit_vars\n\n # Exit the loop.\n self.ExitResult(exit_vars)\n\n return original_body_result, exit_vars\n\n def BuildLoop(self, pred, body, loop_vars, shape_invariants,\n return_same_structure):\n \"\"\"Add the loop termination condition and body to the graph.\"\"\"\n\n # Keep original_loop_vars to identify which are TensorArrays\n original_loop_vars = loop_vars\n # Convert TensorArrays to their flow variables\n loop_vars = nest.map_structure(_convert_tensorarray_to_flow,\n nest.flatten(loop_vars))\n loop_vars = ops.convert_n_to_tensor_or_indexed_slices(loop_vars)\n try:\n self.Enter()\n # _BuildLoop calls _update_input in several places. _mutation_lock()\n # ensures a Session.run call cannot occur between creating and mutating\n # new ops.\n with ops.get_default_graph()._mutation_lock(): # pylint: disable=protected-access\n original_body_result, exit_vars = self._BuildLoop(\n pred, body, original_loop_vars, loop_vars, shape_invariants)\n finally:\n self.Exit()\n\n flat_result = nest.flatten(original_body_result)\n # Convert TensorArray flow variables outside the context back into\n # their associated TensorArrays for returning to caller.\n exit_vars_with_tensor_arrays = (\n _convert_flows_to_tensorarrays(flat_result, exit_vars))\n packed_exit_vars = nest.pack_sequence_as(\n structure=original_body_result,\n flat_sequence=exit_vars_with_tensor_arrays)\n\n if return_same_structure:\n return packed_exit_vars\n else:\n return packed_exit_vars[0] if len(exit_vars) == 1 else packed_exit_vars\n\n def _FixControlInputsAndContext(self, enters):\n graph = ops.get_default_graph()\n # pylint: disable=protected-access\n for e in enters:\n if isinstance(e, ops.Tensor):\n xs = [e]\n else:\n if not isinstance(e, (ops.IndexedSlices, sparse_tensor.SparseTensor)):\n raise TypeError(\"Type %s not supported\" % type(e))\n xs = [e.values, e.indices]\n shape = e.dense_shape\n if shape is not None:\n xs.append(shape)\n for x in xs:\n inp_op = x.op.inputs[0].op\n control_inputs = graph._control_dependencies_for_inputs([inp_op])\n outer_control_inputs = [\n op for op in control_inputs if self._IsInOuterContext(op)\n ]\n x.op._set_control_flow_context(self)\n x.op._add_control_inputs(outer_control_inputs)\n graph._record_op_seen_by_control_dependencies(x.op)\n # pylint: enable=protected-access\n\n def IsWhileContext(self):\n return True\n\n\n# pylint: disable=redefined-outer-name\n@tf_export(\"while_loop\")\ndef while_loop(cond,\n body,\n loop_vars,\n shape_invariants=None,\n parallel_iterations=10,\n back_prop=True,\n swap_memory=False,\n name=None,\n maximum_iterations=None,\n return_same_structure=False):\n \"\"\"Repeat `body` while the condition `cond` is true.\n\n `cond` is a callable returning a boolean scalar tensor. `body` is a callable\n returning a (possibly nested) tuple, namedtuple or list of tensors of the same\n arity (length and structure) and types as `loop_vars`. `loop_vars` is a\n (possibly nested) tuple, namedtuple or list of tensors that is passed to both\n `cond` and `body`. `cond` and `body` both take as many arguments as there are\n `loop_vars`.\n\n In addition to regular Tensors or IndexedSlices, the body may accept and\n return TensorArray objects. The flows of the TensorArray objects will\n be appropriately forwarded between loops and during gradient calculations.\n\n Note that `while_loop` calls `cond` and `body` *exactly once* (inside the\n call to `while_loop`, and not at all during `Session.run()`). `while_loop`\n stitches together the graph fragments created during the `cond` and `body`\n calls with some additional graph nodes to create the graph flow that\n repeats `body` until `cond` returns false.\n\n For correctness, `tf.while_loop()` strictly enforces shape invariants for\n the loop variables. A shape invariant is a (possibly partial) shape that\n is unchanged across the iterations of the loop. An error will be raised\n if the shape of a loop variable after an iteration is determined to be more\n general than or incompatible with its shape invariant. For example, a shape\n of [11, None] is more general than a shape of [11, 17], and [11, 21] is not\n compatible with [11, 17]. By default (if the argument `shape_invariants` is\n not specified), it is assumed that the initial shape of each tensor in\n `loop_vars` is the same in every iteration. The `shape_invariants` argument\n allows the caller to specify a less specific shape invariant for each loop\n variable, which is needed if the shape varies between iterations. The\n `tf.Tensor.set_shape`\n function may also be used in the `body` function to indicate that\n the output loop variable has a particular shape. The shape invariant for\n SparseTensor and IndexedSlices are treated specially as follows:\n\n a) If a loop variable is a SparseTensor, the shape invariant must be\n TensorShape([r]) where r is the rank of the dense tensor represented\n by the sparse tensor. It means the shapes of the three tensors of the\n SparseTensor are ([None], [None, r], [r]). NOTE: The shape invariant here\n is the shape of the SparseTensor.dense_shape property. It must be the shape of\n a vector.\n\n b) If a loop variable is an IndexedSlices, the shape invariant must be\n a shape invariant of the values tensor of the IndexedSlices. It means\n the shapes of the three tensors of the IndexedSlices are (shape, [shape[0]],\n [shape.ndims]).\n\n `while_loop` implements non-strict semantics, enabling multiple iterations\n to run in parallel. The maximum number of parallel iterations can be\n controlled by `parallel_iterations`, which gives users some control over\n memory consumption and execution order. For correct programs, `while_loop`\n should return the same result for any parallel_iterations > 0.\n\n For training, TensorFlow stores the tensors that are produced in the\n forward inference and are needed in back propagation. These tensors are a\n main source of memory consumption and often cause OOM errors when training\n on GPUs. When the flag swap_memory is true, we swap out these tensors from\n GPU to CPU. This for example allows us to train RNN models with very long\n sequences and large batches.\n\n Args:\n cond: A callable that represents the termination condition of the loop.\n body: A callable that represents the loop body.\n loop_vars: A (possibly nested) tuple, namedtuple or list of numpy array,\n `Tensor`, and `TensorArray` objects.\n shape_invariants: The shape invariants for the loop variables.\n parallel_iterations: The number of iterations allowed to run in parallel.\n It must be a positive integer.\n back_prop: Whether backprop is enabled for this while loop.\n swap_memory: Whether GPU-CPU memory swap is enabled for this loop.\n name: Optional name prefix for the returned tensors.\n maximum_iterations: Optional maximum number of iterations of the while loop\n to run. If provided, the `cond` output is AND-ed with an additional\n condition ensuring the number of iterations executed is no greater than\n `maximum_iterations`.\n return_same_structure: If True, output has same structure as `loop_vars`. If\n eager execution is enabled, this is ignored (and always treated as True).\n\n Returns:\n The output tensors for the loop variables after the loop.\n If `return_same_structure` is True, the return value has the same\n structure as `loop_vars`.\n If `return_same_structure` is False, the return value is a Tensor,\n TensorArray or IndexedSlice if the length of `loop_vars` is 1, or a list\n otherwise.\n\n Raises:\n TypeError: if `cond` or `body` is not callable.\n ValueError: if `loop_vars` is empty.\n\n Example:\n\n ```python\n i = tf.constant(0)\n c = lambda i: tf.less(i, 10)\n b = lambda i: tf.add(i, 1)\n r = tf.while_loop(c, b, [i])\n ```\n\n Example with nesting and a namedtuple:\n\n ```python\n import collections\n Pair = collections.namedtuple('Pair', 'j, k')\n ijk_0 = (tf.constant(0), Pair(tf.constant(1), tf.constant(2)))\n c = lambda i, p: i < 10\n b = lambda i, p: (i + 1, Pair((p.j + p.k), (p.j - p.k)))\n ijk_final = tf.while_loop(c, b, ijk_0)\n ```\n\n Example using shape_invariants:\n\n ```python\n i0 = tf.constant(0)\n m0 = tf.ones([2, 2])\n c = lambda i, m: i < 10\n b = lambda i, m: [i+1, tf.concat([m, m], axis=0)]\n tf.while_loop(\n c, b, loop_vars=[i0, m0],\n shape_invariants=[i0.get_shape(), tf.TensorShape([None, 2])])\n ```\n\n Example which demonstrates non-strict semantics: In the following\n example, the final value of the counter `i` does not depend on `x`. So\n the `while_loop` can increment the counter parallel to updates of `x`.\n However, because the loop counter at one loop iteration depends\n on the value at the previous iteration, the loop counter itself cannot\n be incremented in parallel. Hence if we just want the final value of the\n counter (which we print on the line `print(sess.run(i))`), then\n `x` will never be incremented, but the counter will be updated on a\n single thread. Conversely, if we want the value of the output (which we\n print on the line `print(sess.run(out).shape)`), then the counter may be\n incremented on its own thread, while `x` can be incremented in\n parallel on a separate thread. In the extreme case, it is conceivable\n that the thread incrementing the counter runs until completion before\n `x` is incremented even a single time. The only thing that can never\n happen is that the thread updating `x` can never get ahead of the\n counter thread because the thread incrementing `x` depends on the value\n of the counter.\n\n ```python\n import tensorflow as tf\n\n n = 10000\n x = tf.constant(list(range(n)))\n c = lambda i, x: i < n\n b = lambda i, x: (tf.Print(i + 1, [i]), tf.Print(x + 1, [i], \"x:\"))\n i, out = tf.while_loop(c, b, (0, x))\n with tf.Session() as sess:\n print(sess.run(i)) # prints [0] ... [9999]\n\n # The following line may increment the counter and x in parallel.\n # The counter thread may get ahead of the other thread, but not the\n # other way around. So you may see things like\n # [9996] x:[9987]\n # meaning that the counter thread is on iteration 9996,\n # while the other thread is on iteration 9987\n print(sess.run(out).shape)\n ```\n\n \"\"\"\n if ENABLE_WHILE_V2 and not context.executing_eagerly():\n if not _while_v2:\n raise ValueError(\"The while_v2 module is not set. Did you forget to \"\n \"import tensorflow.python.ops.\"\n \"while_v2?\")\n return _while_v2.while_loop(\n cond, body, loop_vars, shape_invariants=shape_invariants, name=name)\n\n with ops.name_scope(name, \"while\", loop_vars):\n if not loop_vars:\n raise ValueError(\"No loop variables provided\")\n if not callable(cond):\n raise TypeError(\"cond must be callable.\")\n if not callable(body):\n raise TypeError(\"body must be callable.\")\n if parallel_iterations < 1:\n raise TypeError(\"parallel_iterations must be a positive integer.\")\n\n if maximum_iterations is not None:\n maximum_iterations = ops.convert_to_tensor(\n maximum_iterations, name=\"maximum_iterations\")\n if maximum_iterations.shape.ndims != 0:\n raise ValueError(\"maximum_iterations must be a scalar, saw shape: %s\" %\n maximum_iterations.shape)\n\n counter = constant_op.constant(\n 0, dtype=maximum_iterations.dtype, name=\"iteration_counter\")\n orig_cond = cond\n orig_body = body\n if len(loop_vars) == 1:\n loop_vars = (counter, loop_vars[0])\n cond = lambda i, lv: ( # pylint: disable=g-long-lambda\n math_ops.logical_and(i < maximum_iterations, orig_cond(lv)))\n body = lambda i, lv: (i + 1, orig_body(lv))\n else:\n loop_vars = (counter, loop_vars)\n cond = lambda i, lv: ( # pylint: disable=g-long-lambda\n math_ops.logical_and(i < maximum_iterations, orig_cond(*lv)))\n body = lambda i, lv: (i + 1, orig_body(*lv))\n\n if context.executing_eagerly():\n try_to_pack = len(loop_vars) == 1\n packed = False # whether the body result was packed into a 1-item tuple\n\n while cond(*loop_vars):\n loop_vars = body(*loop_vars)\n if try_to_pack and not isinstance(loop_vars, (list, _basetuple)):\n packed = True\n loop_vars = (loop_vars,)\n if maximum_iterations is not None:\n return loop_vars[1]\n else:\n return loop_vars[0] if packed else loop_vars\n\n if shape_invariants is not None:\n if maximum_iterations is not None:\n shape_invariants = (tensor_shape.TensorShape([]), shape_invariants)\n nest.assert_same_structure(loop_vars, shape_invariants)\n\n loop_context = WhileContext(\n maximum_iterations=maximum_iterations,\n parallel_iterations=parallel_iterations,\n back_prop=back_prop,\n swap_memory=swap_memory)\n # Only add non-nested loops to the collection. Any nested control flow will\n # be encapsulated in the root context.\n if loop_context.outer_context is None:\n ops.add_to_collection(ops.GraphKeys.WHILE_CONTEXT, loop_context)\n result = loop_context.BuildLoop(cond, body, loop_vars, shape_invariants,\n return_same_structure)\n if maximum_iterations is not None:\n return result[1]\n else:\n return result\n\n\n# pylint: enable=redefined-outer-name\n\n\ndef _AsTensorList(x, p):\n \"\"\"Return x as a list of Tensors or IndexedSlices.\n\n For entries of `x` that are Operations, this returns an Identity of `p`\n with a dependency on the operation.\n\n Args:\n x: A Tensor/IndexedSlices/Operation or a list or tuple of them.\n p: A Tensor to return for entries in `x` that are Operations.\n\n Returns:\n A list of Tensors or IndexedSlices.\n \"\"\"\n if not isinstance(x, (list, _basetuple)):\n x = [x]\n\n l = []\n for v in x:\n if isinstance(v, ops.Operation):\n v = with_dependencies([v], p)\n v = ops.convert_to_tensor_or_indexed_slices(v)\n if isinstance(v, ops.Tensor):\n l.append(array_ops.identity(v))\n else:\n l.append(\n ops.IndexedSlices(\n array_ops.identity(v.values), array_ops.identity(v.indices)))\n return l\n\n\ndef _CheckResults(a, b):\n assert len(a) == len(b), (\n \"Values returned by a() and b() must have the same length.\")\n for x, y in zip(a, b):\n assert x.dtype == y.dtype, (\n \"Values returned by a() [%s] and b() [%s] must have \"\n \"the same type: %s, %s.\" % (x.name, y.name, x.dtype.name, y.dtype.name))\n\n\ndef with_dependencies(dependencies, output_tensor, name=None):\n \"\"\"Produces the content of `output_tensor` only after `dependencies`.\n\n In some cases, a user may want the output of an operation to be\n consumed externally only after some other dependencies have run\n first. This function ensures returns `output_tensor`, but only after all\n operations in `dependencies` have run. Note that this means that there is\n no guarantee that `output_tensor` will be evaluated after any `dependencies`\n have run.\n\n See also `tf.tuple` and `tf.group`.\n\n Args:\n dependencies: Iterable of operations to run before this op finishes.\n output_tensor: A `Tensor` or `IndexedSlices` that will be returned.\n name: (Optional) A name for this operation.\n\n Returns:\n Same as `output_tensor`.\n\n Raises:\n TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.\n \"\"\"\n if context.executing_eagerly():\n return output_tensor\n with ops.name_scope(name, \"control_dependency\",\n list(dependencies) + [output_tensor]) as name:\n with ops.colocate_with(output_tensor):\n with ops.control_dependencies(dependencies):\n output_tensor = ops.convert_to_tensor_or_indexed_slices(output_tensor)\n if isinstance(output_tensor, ops.Tensor):\n return _Identity(output_tensor, name=name)\n else:\n return ops.IndexedSlices(\n _Identity(output_tensor.values, name=name), output_tensor.indices,\n output_tensor.dense_shape)\n\n\ndef _GroupControlDeps(dev, deps, name=None):\n with ops.control_dependencies(deps):\n if dev is None:\n return no_op(name=name)\n else:\n with ops.device(dev):\n return no_op(name=name)\n\n\n# TODO(touts): Accept \"inputs\" as a list.\n@tf_export(\"group\")\ndef group(*inputs, **kwargs):\n \"\"\"Create an op that groups multiple operations.\n\n When this op finishes, all ops in `inputs` have finished. This op has no\n output.\n\n See also `tf.tuple` and\n `tf.control_dependencies`.\n\n Args:\n *inputs: Zero or more tensors to group.\n name: A name for this operation (optional).\n\n Returns:\n An Operation that executes all its inputs.\n\n Raises:\n ValueError: If an unknown keyword argument is provided.\n \"\"\"\n if context.executing_eagerly():\n return None\n name = kwargs.pop(\"name\", None)\n if kwargs:\n raise ValueError(\"Unknown keyword arguments: \" + \", \".join(kwargs.keys()))\n with ops.name_scope(name, \"group_deps\", inputs) as name:\n # Grouping no inputs means do nothing\n if not inputs:\n return no_op(name=name)\n\n # Sorts *inputs according to their devices.\n ops_on_device = {} # device -> operations specified on the device.\n for inp in nest.flatten(inputs):\n if not hasattr(inp, \"device\"):\n raise TypeError(\"Expected tf.group() expected Tensor arguments not \"\n \"'%s' with type '%s'\" % (inp, type(inp)))\n dev = inp.device\n if dev in ops_on_device:\n ops_on_device[dev].append(inp)\n else:\n ops_on_device[dev] = [inp]\n if len(ops_on_device) == 1:\n # 1-level tree. The root node is the returned NoOp node.\n (dev, deps), = ops_on_device.items()\n return _GroupControlDeps(dev, deps, name=name)\n\n # 2-level tree. The root node is the returned NoOp node.\n # deps contains 1 NoOp node for each device.\n deps = []\n\n def device_key(dev):\n \"\"\"A sort key that allows None to be compared to strings.\"\"\"\n return \"\" if dev is None else dev\n\n for dev in sorted(six.iterkeys(ops_on_device), key=device_key):\n deps.append(_GroupControlDeps(dev, ops_on_device[dev]))\n\n with ops.control_dependencies(deps):\n return no_op(name=name)\n\n\n@tf_export(\"tuple\")\ndef tuple(tensors, name=None, control_inputs=None): # pylint: disable=redefined-builtin\n \"\"\"Group tensors together.\n\n This creates a tuple of tensors with the same values as the `tensors`\n argument, except that the value of each tensor is only returned after the\n values of all tensors have been computed.\n\n `control_inputs` contains additional ops that have to finish before this op\n finishes, but whose outputs are not returned.\n\n This can be used as a \"join\" mechanism for parallel computations: all the\n argument tensors can be computed in parallel, but the values of any tensor\n returned by `tuple` are only available after all the parallel computations\n are done.\n\n See also `tf.group` and\n `tf.control_dependencies`.\n\n Args:\n tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`.\n name: (optional) A name to use as a `name_scope` for the operation.\n control_inputs: List of additional ops to finish before returning.\n\n Returns:\n Same as `tensors`.\n\n Raises:\n ValueError: If `tensors` does not contain any `Tensor` or `IndexedSlices`.\n TypeError: If `control_inputs` is not a list of `Operation` or `Tensor`\n objects.\n\n \"\"\"\n if context.executing_eagerly():\n return tensors\n with ops.name_scope(name, \"tuple\", tensors) as name:\n tensors = [t if (isinstance(t, ops.Operation)\n or tensor_util.is_tensor(t)\n or t is None)\n else ops.convert_to_tensor(t) for t in tensors]\n gating_ops = [t if isinstance(t, ops.Operation) else t.op for t in tensors\n if t is not None]\n if control_inputs:\n for c in control_inputs:\n if isinstance(c, ops.Tensor):\n c = c.op\n elif not isinstance(c, ops.Operation):\n raise TypeError(\"Control input must be Operation or Tensor: %s\" % c)\n gating_ops.append(c)\n # Note that in order to ensure ordering in the pbtxt, we must take care to\n # ensure the order here.\n gating_ops = sorted(set(gating_ops), key=lambda op: op._id) # Uniquify ops.\n if not gating_ops:\n raise ValueError(\"Must have at least one Tensor: %s\" % tensors)\n gate = group(*gating_ops)\n tpl = []\n for t in tensors:\n if tensor_util.is_tensor(t):\n tpl.append(with_dependencies([gate], t))\n elif isinstance(t, ops.Operation):\n with ops.control_dependencies([gate]):\n tpl.append(group(t))\n else:\n tpl.append(None)\n return tpl\n\n\ndef _assert_at_most_n_true(predicates, n, msg):\n \"\"\"Returns an Assert op that checks that at most n predicates are True.\n\n Args:\n predicates: list of bool scalar tensors.\n n: maximum number of true predicates allowed.\n msg: Error message.\n \"\"\"\n preds_c = array_ops.stack(predicates, name=\"preds_c\")\n num_true_conditions = math_ops.reduce_sum(\n math_ops.cast(preds_c, dtypes.int32), name=\"num_true_conds\")\n condition = math_ops.less_equal(num_true_conditions,\n constant_op.constant(n, name=\"n_true_conds\"))\n preds_names = \", \".join(getattr(p, \"name\", \"?\") for p in predicates)\n error_msg = [\n \"%s: more than %d conditions (%s) evaluated as True:\" %\n (msg, n, preds_names), preds_c\n ]\n return Assert(condition, data=error_msg, summarize=len(predicates))\n\n\ndef _case_create_default_action(predicates, actions):\n \"\"\"Creates default action for a list of actions and their predicates.\n\n It uses the input actions to select an arbitrary as default and makes sure\n that corresponding predicates have valid values.\n\n Args:\n predicates: a list of bool scalar tensors\n actions: a list of callable objects which return tensors.\n\n Returns:\n a callable\n \"\"\"\n k = len(predicates) - 1 # could pick any\n predicate, action = predicates[k], actions[k]\n other_predicates, other_actions = predicates[:k], actions[:k]\n\n def default_action():\n others_msg = (\"Implementation error: \"\n \"selected default action #%d was called, but some of other \"\n \"predicates are True: \" % k)\n default_msg = (\"Input error: \"\n \"None of conditions evaluated as True:\",\n array_ops.stack(predicates, name=\"preds_c\"))\n with ops.control_dependencies([\n _assert_at_most_n_true(other_predicates, n=0, msg=others_msg),\n Assert(predicate, data=default_msg)\n ]):\n return action()\n\n return default_action, other_predicates, other_actions\n\n\ndef _case_verify_and_canonicalize_args(pred_fn_pairs, exclusive, name,\n allow_python_preds):\n \"\"\"Verifies input arguments for the case function.\n\n Args:\n pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor,\n and a callable which returns a list of tensors.\n exclusive: True iff at most one predicate is allowed to evaluate to `True`.\n name: A name for the case operation.\n allow_python_preds: if true, pred_fn_pairs may contain Python bools in\n addition to boolean Tensors\n Raises:\n TypeError: If `pred_fn_pairs` is not a list/dictionary.\n TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.\n TypeError: If `fns[i]` is not callable for any i, or `default` is not\n callable.\n\n Returns:\n a tuple <list of scalar bool tensors, list of callables>.\n \"\"\"\n if not isinstance(pred_fn_pairs, (list, _basetuple, dict)):\n raise TypeError(\"fns must be a list, tuple, or dict\")\n\n if isinstance(pred_fn_pairs, collections.OrderedDict):\n pred_fn_pairs = pred_fn_pairs.items()\n elif isinstance(pred_fn_pairs, dict):\n pred_fn_pairs = sorted(pred_fn_pairs.items(), key=lambda item: item[0].name)\n if not exclusive:\n logging.warn(\"%s: An unordered dictionary of predicate/fn pairs was \"\n \"provided, but exclusive=False. The order of conditional \"\n \"tests is deterministic but not guaranteed.\", name)\n for pred_fn_pair in pred_fn_pairs:\n if not isinstance(pred_fn_pair, _basetuple) or len(pred_fn_pair) != 2:\n raise TypeError(\"Each entry in pred_fn_pairs must be a 2-tuple\")\n pred, fn = pred_fn_pair\n\n if isinstance(pred, ops.Tensor):\n if pred.dtype != dtypes.bool:\n raise TypeError(\"pred must be Tensor of type bool: %s\" % pred.name)\n elif not allow_python_preds:\n raise TypeError(\"pred must be a Tensor, got: %s\" % pred)\n elif not isinstance(pred, bool):\n raise TypeError(\"pred must be a Tensor or bool, got: %s\" % pred)\n\n if not callable(fn):\n raise TypeError(\"fn for pred %s must be callable.\" % pred.name)\n\n predicates, actions = zip(*pred_fn_pairs)\n return predicates, actions\n\n\ndef _case_helper(cond_fn, pred_fn_pairs, default,\n exclusive, name, allow_python_preds=False, **cond_kwargs):\n \"\"\"Implementation of case that allows for different cond functions.\n\n Args:\n cond_fn: method that has signature and semantics of `cond` above.\n pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor, and a\n callable which returns a list of tensors.\n default: Optional callable that returns a list of tensors.\n exclusive: True iff at most one predicate is allowed to evaluate to `True`.\n name: A name for this operation (optional).\n allow_python_preds: if true, pred_fn_pairs may contain Python bools in\n addition to boolean Tensors\n **cond_kwargs: keyword arguments that will be passed to `cond_fn`.\n\n Returns:\n The tensors returned by the first pair whose predicate evaluated to True, or\n those returned by `default` if none does.\n\n Raises:\n TypeError: If `pred_fn_pairs` is not a list/dictionary.\n TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.\n TypeError: If `fns[i]` is not callable for any i, or `default` is not\n callable.\n \"\"\"\n predicates, actions = _case_verify_and_canonicalize_args(\n pred_fn_pairs, exclusive, name, allow_python_preds)\n with ops.name_scope(name, \"case\", [predicates]):\n if default is None:\n default, predicates, actions = _case_create_default_action(\n predicates, actions)\n fn = default\n # To eval conditions in direct order we create nested conditions in reverse:\n # cond_fn(c[0], true_fn=.., false_fn=cond_fn(c[1], ...))\n for predicate, action in reversed(list(zip(predicates, actions))):\n fn = functools.partial(\n cond_fn, predicate, true_fn=action, false_fn=fn, **cond_kwargs)\n if exclusive:\n with ops.control_dependencies([\n _assert_at_most_n_true(\n predicates, n=1, msg=\"Input error: exclusive=True\")\n ]):\n return fn()\n else:\n return fn()\n\n\n@tf_export(\"case\")\ndef case(pred_fn_pairs,\n default=None,\n exclusive=False,\n strict=False,\n name=\"case\"):\n \"\"\"Create a case operation.\n\n The `pred_fn_pairs` parameter is a dict or list of pairs of size N.\n Each pair contains a boolean scalar tensor and a python callable that\n creates the tensors to be returned if the boolean evaluates to True.\n `default` is a callable generating a list of tensors. All the callables\n in `pred_fn_pairs` as well as `default` (if provided) should return the same\n number and types of tensors.\n\n If `exclusive==True`, all predicates are evaluated, and an exception is\n thrown if more than one of the predicates evaluates to `True`.\n If `exclusive==False`, execution stops at the first predicate which\n evaluates to True, and the tensors generated by the corresponding function\n are returned immediately. If none of the predicates evaluate to True, this\n operation returns the tensors generated by `default`.\n\n `tf.case` supports nested structures as implemented in\n `tensorflow.python.util.nest`. All of the callables must return the same\n (possibly nested) value structure of lists, tuples, and/or named tuples.\n Singleton lists and tuples form the only exceptions to this: when returned by\n a callable, they are implicitly unpacked to single values. This\n behavior is disabled by passing `strict=True`.\n\n If an unordered dictionary is used for `pred_fn_pairs`, the order of the\n conditional tests is not guaranteed. However, the order is guaranteed to be\n deterministic, so that variables created in conditional branches are created\n in fixed order across runs.\n\n **Example 1:**\n\n Pseudocode:\n\n ```\n if (x < y) return 17;\n else return 23;\n ```\n\n Expressions:\n\n ```python\n f1 = lambda: tf.constant(17)\n f2 = lambda: tf.constant(23)\n r = case([(tf.less(x, y), f1)], default=f2)\n ```\n\n **Example 2:**\n\n Pseudocode:\n\n ```\n if (x < y && x > z) raise OpError(\"Only one predicate may evaluate true\");\n if (x < y) return 17;\n else if (x > z) return 23;\n else return -1;\n ```\n\n Expressions:\n\n ```python\n def f1(): return tf.constant(17)\n def f2(): return tf.constant(23)\n def f3(): return tf.constant(-1)\n r = case({tf.less(x, y): f1, tf.greater(x, z): f2},\n default=f3, exclusive=True)\n ```\n\n Args:\n pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a\n callable which returns a list of tensors.\n default: Optional callable that returns a list of tensors.\n exclusive: True iff at most one predicate is allowed to evaluate to `True`.\n strict: A boolean that enables/disables 'strict' mode; see above.\n name: A name for this operation (optional).\n\n Returns:\n The tensors returned by the first pair whose predicate evaluated to True, or\n those returned by `default` if none does.\n\n Raises:\n TypeError: If `pred_fn_pairs` is not a list/dictionary.\n TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.\n TypeError: If `fns[i]` is not callable for any i, or `default` is not\n callable.\n \"\"\"\n return _case_helper(cond, pred_fn_pairs, default, exclusive, name,\n allow_python_preds=False, strict=strict)\n\n\nclass XLAControlFlowContext(ControlFlowContext):\n \"\"\"Base class for XLA and TPU control flow contexts.\"\"\"\n\n def __init__(self):\n super(XLAControlFlowContext, self).__init__()\n self._name = \"XLAControlFlowContext\"\n\n def IsXLAContext(self):\n return True\n\n def AddOp(self, _):\n pass\n\n def AddValue(self, x):\n return x\n\n\ndef from_control_flow_context_def(context_def, import_scope=None):\n \"\"\"Deserializes `context_def` into the appropriate ControlFlowContext.\n\n Args:\n context_def: ControlFlowContextDef proto\n import_scope: Optional `string`. Name scope to add.\n\n Returns:\n A ControlFlowContext subclass\n \"\"\"\n if context_def.HasField(\"cond_ctxt\"):\n return CondContext.from_proto(context_def.cond_ctxt,\n import_scope=import_scope)\n if context_def.HasField(\"while_ctxt\"):\n return WhileContext.from_proto(context_def.while_ctxt,\n import_scope=import_scope)\n raise NotImplementedError(\"Unknown ControlFlowContextDef field: %s\"\n % context_def.WhichOneof(\"ctxt\"))\n\n\nops.register_proto_function(\n ops.GraphKeys.COND_CONTEXT,\n proto_type=control_flow_pb2.CondContextDef,\n to_proto=CondContext.to_proto,\n from_proto=CondContext.from_proto)\n\nops.register_proto_function(\n ops.GraphKeys.WHILE_CONTEXT,\n proto_type=control_flow_pb2.WhileContextDef,\n to_proto=WhileContext.to_proto,\n from_proto=WhileContext.from_proto)\n"
] | [
[
"tensorflow.core.protobuf.control_flow_pb2.WhileContextDef",
"tensorflow.python.framework.ops.colocate_with",
"tensorflow.python.ops.control_flow_util.IsInXLAContext",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.framework.ops.convert_to_tensor_or_indexed_slices",
"tensorflow.python.ops.gen_logging_ops._assert",
"tensorflow.python.ops.gen_control_flow_ops.ref_exit",
"tensorflow.python.ops.control_flow_util.IsLoopSwitch",
"tensorflow.python.ops.gen_control_flow_ops.ref_merge",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.platform.tf_logging.warn",
"tensorflow.python.ops.gen_control_flow_ops.merge",
"tensorflow.python.util.nest.is_sequence",
"tensorflow.python.ops.gen_resource_variable_ops.variable_shape",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.util.nest.assert_same_structure",
"tensorflow.python.ops.math_ops._as_indexed_slices_list",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.math_ops._as_indexed_slices",
"tensorflow.python.framework.ops.IndexedSlices",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.gen_data_flow_ops.stack_pop_v2",
"tensorflow.python.framework.ops.add_to_collection",
"tensorflow.python.ops.math_ops.add",
"tensorflow.core.protobuf.control_flow_pb2.CondContextDef",
"tensorflow.python.ops.control_flow_util.OpInContext",
"tensorflow.python.ops.gen_control_flow_ops._exit",
"tensorflow.python.framework.ops.strip_name_scope",
"tensorflow.python.framework.ops.register_proto_function",
"tensorflow.python.ops.control_flow_util.IsLoopExit",
"tensorflow.python.ops.gen_array_ops.ref_identity",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.ops.control_flow_util.IsLoopConstantEnter",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.framework.ops.device",
"tensorflow.core.protobuf.control_flow_pb2.ValuesDef",
"tensorflow.python.ops.gen_data_flow_ops.stack_v2",
"tensorflow.python.framework.constant_op.is_constant",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.gen_data_flow_ops.stack_push_v2",
"tensorflow.python.ops.control_flow_util.GetContainingWhileContext",
"tensorflow.python.framework.tensor_shape.Dimension",
"tensorflow.python.framework.ops.prepend_name_scope",
"tensorflow.python.framework.ops.convert_n_to_tensor_or_indexed_slices",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.ops.control_flow_util.IsContainingContext",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.ops.internal_convert_to_tensor_or_indexed_slices",
"tensorflow.python.ops.gen_control_flow_ops.ref_enter",
"tensorflow.python.ops.gen_control_flow_ops.enter",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.ops.tensor_array_ops.TensorArray",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.ops.math_ops.subtract",
"tensorflow.python.ops.gen_control_flow_ops.switch",
"tensorflow.python.ops.control_flow_util.GetLoopConstantEnter",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.ops.array_ops.shape_internal",
"tensorflow.python.ops.control_flow_util.IsLoopEnter",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.cond_v2_impl.cond_v2",
"tensorflow.python.ops.math_ops.greater_equal",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.framework.ops.convert_n_to_tensor",
"tensorflow.python.ops.control_flow_util.IsSwitch",
"tensorflow.python.ops.control_flow_util.GetOutputContext",
"tensorflow.python.util.deprecation.deprecated_args",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.framework.ops.get_collection_ref",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.framework.tensor_util.is_tensor"
]
] |
sealneaward/pnr-labels | [
"75bcf7778f9bda2db5165be037e47f8dfdbe50d1"
] | [
"pnr/annotation/roles.py"
] | [
"import numpy as np\nimport pandas as pd\nfrom sklearn.metrics import mean_squared_error\n\n\ndef get_possession_team(player, movement):\n \"\"\"\n Return the team_id of the ball_handler\n \"\"\"\n team_id = movement.loc[movement.player_id == player, 'team_id'].values[0]\n return team_id\n\n\ndef get_ball_handler(movement):\n \"\"\"\n Use ball location to MSE of player location\n\n Parameters\n ----------\n ball_location: np.array\n x/y location data\n player_location: np.array\n x/y location data\n\n Returns\n -------\n distance: np.array\n difference in locations to use to find ball handler\n \"\"\"\n movement['distance_to_ball'] = 0\n ball_movement = movement.loc[movement.player_id == -1, :]\n players_movement = movement.loc[movement.player_id != -1, :]\n smallest_distance = 999999\n ball_handler = None\n\n for player_id, player_movement in players_movement.groupby('player_id'):\n\n player_movement['shot_location_x'] = ball_movement['x_loc'].values\n player_movement['shot_location_y'] = ball_movement['y_loc'].values\n\n mse = mean_squared_error(\n (\n player_movement[['x_loc', 'y_loc']].values\n ),\n (\n player_movement[['shot_location_x', 'shot_location_y']].values\n )\n )\n\n if smallest_distance > mse:\n smallest_distance = mse\n ball_handler = player_id\n\n return ball_handler\n\n\ndef get_screen_setter(ball_handler, ball_handler_team, movement, annotation):\n \"\"\"\n Use radius from ball to find offensive player not ball handler\n that is the player setting the screen\n\n Parameters\n ----------\n ball_handler: int\n player id\n ball_handler_team: int\n team id\n movement: pd.DataFrame\n sportvu movement data\n annotation: dict\n pnr information\n\n Returns\n -------\n screen_setter: int\n player id\n \"\"\"\n # get closest time to screen annotation time\n game_clocks = movement['game_clock'].drop_duplicates(inplace=False).values\n game_clock = game_clocks[np.argmin(np.abs(game_clocks - (annotation['gameclock'] + 0.6)))]\n screen_setter = None\n\n movement = movement.loc[movement.game_clock == game_clock, :]\n ball_handler_movement = movement.loc[movement.player_id == ball_handler, :]\n players_movement = movement.loc[\n (movement.player_id != ball_handler) &\n (movement.player_id != -1) &\n (movement.team_id == ball_handler_team)\n , :]\n\n smallest_distance = 999999\n for player_id, player_movement in players_movement.groupby('player_id'):\n\n player_movement['ball_handler_location_x'] = ball_handler_movement['x_loc'].values\n player_movement['ball_handler_location_y'] = ball_handler_movement['y_loc'].values\n\n mse = mean_squared_error(\n (\n player_movement[['x_loc', 'y_loc']].values\n ),\n (\n player_movement[['ball_handler_location_x', 'ball_handler_location_y']].values\n )\n )\n\n if smallest_distance > mse:\n smallest_distance = mse\n screen_setter = player_id\n\n return screen_setter\n\n\ndef get_ball_defender(ball_handler, ball_handler_team, movement):\n \"\"\"\n Use ball location to MSE of player location\n\n Parameters\n ----------\n ball_location: np.array\n x/y location data\n player_location: np.array\n x/y location data\n\n Returns\n -------\n distance: np.array\n difference in locations to use to find ball handler\n \"\"\"\n movement['distance_to_ball'] = 0\n ball_handler_movement = movement.loc[movement.player_id == ball_handler, :]\n players_movement = movement.loc[\n (movement.player_id != ball_handler) &\n (movement.player_id != -1) &\n (movement.team_id != ball_handler_team)\n , :]\n ball_defender = None\n\n smallest_distance = 999999\n for player_id, player_movement in players_movement.groupby('player_id'):\n\n player_movement['ball_handler_location_x'] = ball_handler_movement['x_loc'].values\n player_movement['ball_handler_location_y'] = ball_handler_movement['y_loc'].values\n\n mse = mean_squared_error(\n (\n player_movement[['x_loc', 'y_loc']].values\n ),\n (\n player_movement[['ball_handler_location_x', 'ball_handler_location_y']].values\n )\n )\n\n if smallest_distance > mse:\n smallest_distance = mse\n ball_defender = player_id\n\n return ball_defender\n\n\ndef get_screen_defender(screen_setter, ball_defender, screen_setter_team, movement):\n \"\"\"\n Use ball location to MSE of player location\n\n Parameters\n ----------\n ball_location: np.array\n x/y location data\n player_location: np.array\n x/y location data\n\n Returns\n -------\n distance: np.array\n difference in locations to use to find ball handler\n \"\"\"\n movement['distance_to_ball'] = 0\n screen_setter_movement = movement.loc[movement.player_id == screen_setter, :]\n players_movement = movement.loc[\n (movement.player_id != ball_defender) &\n (movement.player_id != -1) &\n (movement.team_id != screen_setter_team)\n , :]\n screen_defender = None\n\n smallest_distance = 999999\n for player_id, player_movement in players_movement.groupby('player_id'):\n\n player_movement['ball_handler_location_x'] = screen_setter_movement['x_loc'].values\n player_movement['ball_handler_location_y'] = screen_setter_movement['y_loc'].values\n\n mse = mean_squared_error(\n (\n player_movement[['x_loc', 'y_loc']].values\n ),\n (\n player_movement[['ball_handler_location_x', 'ball_handler_location_y']].values\n )\n )\n\n if smallest_distance > mse:\n smallest_distance = mse\n screen_defender = player_id\n\n return screen_defender\n\n\ndef get_roles(annotation, movement, data_config):\n \"\"\"\n Get 4 roles from movement to satisfy 4 roles in PnR\n\n Parameters\n ----------\n annotation: pd.DataFrame\n pnr information\n movement: pd.DataFrame\n sportvu movement information\n data_config: dict\n configuration information\n \"\"\"\n annotation_movement = movement.loc[\n (movement.game_clock <= (annotation['gameclock'] + 0.6)) &\n (movement.game_clock >= (annotation['gameclock'] + 0.6 - int(data_config['data_config']['tfr']/data_config['data_config']['frame_rate'])))\n , :]\n\n ball_handler_id = get_ball_handler(annotation_movement)\n if ball_handler_id is None:\n return None\n ball_handler_team = get_possession_team(ball_handler_id, annotation_movement)\n\n ball_defender_id = get_ball_defender(ball_handler_id, ball_handler_team, annotation_movement)\n if ball_defender_id is None:\n return None\n ball_defender_team = get_possession_team(ball_defender_id, annotation_movement)\n\n screen_setter_id = get_screen_setter(ball_handler_id, ball_handler_team, annotation_movement, annotation)\n if screen_setter_id is None:\n return None\n screen_setter_team = get_possession_team(screen_setter_id, annotation_movement)\n\n screen_defender_id = get_screen_defender(screen_setter_id, ball_defender_id, screen_setter_team, annotation_movement)\n if screen_defender_id is None:\n return None\n screen_defender_team = get_possession_team(screen_defender_id, annotation_movement)\n\n annotation['ball_handler'] = ball_handler_id\n annotation['ball_defender'] = ball_defender_id\n annotation['screen_setter'] = screen_setter_id\n annotation['screen_defender'] = screen_defender_id\n annotation['offense_id'] = ball_handler_team\n annotation['defense_id'] = ball_defender_team\n\n return annotation"
] | [
[
"numpy.abs",
"sklearn.metrics.mean_squared_error"
]
] |
fdroessler/pandas | [
"dc86509b44b3fb0cd9a1a6d6ed564b082dc50848"
] | [
"pandas/core/reshape/merge.py"
] | [
"\"\"\"\nSQL-style merge routines\n\"\"\"\n\nimport copy\nimport string\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import hashtable as libhashtable, join as libjoin, lib\nfrom pandas.compat import lzip\nfrom pandas.errors import MergeError\nfrom pandas.util._decorators import Appender, Substitution\n\nfrom pandas.core.dtypes.common import (\n ensure_float64, ensure_int64, ensure_object, is_array_like, is_bool,\n is_bool_dtype, is_categorical_dtype, is_datetime64_dtype,\n is_datetime64tz_dtype, is_datetimelike, is_dtype_equal,\n is_extension_array_dtype, is_float_dtype, is_int64_dtype, is_integer,\n is_integer_dtype, is_list_like, is_number, is_numeric_dtype,\n is_object_dtype, needs_i8_conversion)\nfrom pandas.core.dtypes.missing import isnull, na_value_for_dtype\n\nfrom pandas import Categorical, DataFrame, Index, MultiIndex, Series, Timedelta\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays.categorical import _recode_for_categories\nimport pandas.core.common as com\nfrom pandas.core.frame import _merge_doc\nfrom pandas.core.internals import (\n concatenate_block_managers, items_overlap_with_suffix)\nimport pandas.core.sorting as sorting\nfrom pandas.core.sorting import is_int64_overflow_possible\n\n\n@Substitution('\\nleft : DataFrame')\n@Appender(_merge_doc, indents=0)\ndef merge(left, right, how='inner', on=None, left_on=None, right_on=None,\n left_index=False, right_index=False, sort=False,\n suffixes=('_x', '_y'), copy=True, indicator=False,\n validate=None):\n op = _MergeOperation(left, right, how=how, on=on, left_on=left_on,\n right_on=right_on, left_index=left_index,\n right_index=right_index, sort=sort, suffixes=suffixes,\n copy=copy, indicator=indicator,\n validate=validate)\n return op.get_result()\n\n\nif __debug__:\n merge.__doc__ = _merge_doc % '\\nleft : DataFrame'\n\n\ndef _groupby_and_merge(by, on, left, right, _merge_pieces,\n check_duplicates=True):\n \"\"\"\n groupby & merge; we are always performing a left-by type operation\n\n Parameters\n ----------\n by: field to group\n on: duplicates field\n left: left frame\n right: right frame\n _merge_pieces: function for merging\n check_duplicates: boolean, default True\n should we check & clean duplicates\n \"\"\"\n\n pieces = []\n if not isinstance(by, (list, tuple)):\n by = [by]\n\n lby = left.groupby(by, sort=False)\n\n # if we can groupby the rhs\n # then we can get vastly better perf\n try:\n\n # we will check & remove duplicates if indicated\n if check_duplicates:\n if on is None:\n on = []\n elif not isinstance(on, (list, tuple)):\n on = [on]\n\n if right.duplicated(by + on).any():\n right = right.drop_duplicates(by + on, keep='last')\n rby = right.groupby(by, sort=False)\n except KeyError:\n rby = None\n\n for key, lhs in lby:\n\n if rby is None:\n rhs = right\n else:\n try:\n rhs = right.take(rby.indices[key])\n except KeyError:\n # key doesn't exist in left\n lcols = lhs.columns.tolist()\n cols = lcols + [r for r in right.columns\n if r not in set(lcols)]\n merged = lhs.reindex(columns=cols)\n merged.index = range(len(merged))\n pieces.append(merged)\n continue\n\n merged = _merge_pieces(lhs, rhs)\n\n # make sure join keys are in the merged\n # TODO, should _merge_pieces do this?\n for k in by:\n try:\n if k in merged:\n merged[k] = key\n except KeyError:\n pass\n\n pieces.append(merged)\n\n # preserve the original order\n # if we have a missing piece this can be reset\n from pandas.core.reshape.concat import concat\n result = concat(pieces, ignore_index=True)\n result = result.reindex(columns=pieces[0].columns, copy=False)\n return result, lby\n\n\ndef merge_ordered(left, right, on=None,\n left_on=None, right_on=None,\n left_by=None, right_by=None,\n fill_method=None, suffixes=('_x', '_y'),\n how='outer'):\n \"\"\"Perform merge with optional filling/interpolation designed for ordered\n data like time series data. Optionally perform group-wise merge (see\n examples)\n\n Parameters\n ----------\n left : DataFrame\n right : DataFrame\n on : label or list\n Field names to join on. Must be found in both DataFrames.\n left_on : label or list, or array-like\n Field names to join on in left DataFrame. Can be a vector or list of\n vectors of the length of the DataFrame to use a particular vector as\n the join key instead of columns\n right_on : label or list, or array-like\n Field names to join on in right DataFrame or vector/list of vectors per\n left_on docs\n left_by : column name or list of column names\n Group left DataFrame by group columns and merge piece by piece with\n right DataFrame\n right_by : column name or list of column names\n Group right DataFrame by group columns and merge piece by piece with\n left DataFrame\n fill_method : {'ffill', None}, default None\n Interpolation method for data\n suffixes : Sequence, default is (\"_x\", \"_y\")\n A length-2 sequence where each element is optionally a string\n indicating the suffix to add to overlapping column names in\n `left` and `right` respectively. Pass a value of `None` instead\n of a string to indicate that the column name from `left` or\n `right` should be left as-is, with no suffix. At least one of the\n values must not be None.\n\n .. versionchanged:: 0.25.0\n how : {'left', 'right', 'outer', 'inner'}, default 'outer'\n * left: use only keys from left frame (SQL: left outer join)\n * right: use only keys from right frame (SQL: right outer join)\n * outer: use union of keys from both frames (SQL: full outer join)\n * inner: use intersection of keys from both frames (SQL: inner join)\n\n .. versionadded:: 0.19.0\n\n Returns\n -------\n merged : DataFrame\n The output type will the be same as 'left', if it is a subclass\n of DataFrame.\n\n See Also\n --------\n merge\n merge_asof\n\n Examples\n --------\n >>> A >>> B\n key lvalue group key rvalue\n 0 a 1 a 0 b 1\n 1 c 2 a 1 c 2\n 2 e 3 a 2 d 3\n 3 a 1 b\n 4 c 2 b\n 5 e 3 b\n\n >>> merge_ordered(A, B, fill_method='ffill', left_by='group')\n group key lvalue rvalue\n 0 a a 1 NaN\n 1 a b 1 1.0\n 2 a c 2 2.0\n 3 a d 2 3.0\n 4 a e 3 3.0\n 5 b a 1 NaN\n 6 b b 1 1.0\n 7 b c 2 2.0\n 8 b d 2 3.0\n 9 b e 3 3.0\n \"\"\"\n def _merger(x, y):\n # perform the ordered merge operation\n op = _OrderedMerge(x, y, on=on, left_on=left_on, right_on=right_on,\n suffixes=suffixes, fill_method=fill_method,\n how=how)\n return op.get_result()\n\n if left_by is not None and right_by is not None:\n raise ValueError('Can only group either left or right frames')\n elif left_by is not None:\n result, _ = _groupby_and_merge(left_by, on, left, right,\n lambda x, y: _merger(x, y),\n check_duplicates=False)\n elif right_by is not None:\n result, _ = _groupby_and_merge(right_by, on, right, left,\n lambda x, y: _merger(y, x),\n check_duplicates=False)\n else:\n result = _merger(left, right)\n return result\n\n\ndef merge_asof(left, right, on=None,\n left_on=None, right_on=None,\n left_index=False, right_index=False,\n by=None, left_by=None, right_by=None,\n suffixes=('_x', '_y'),\n tolerance=None,\n allow_exact_matches=True,\n direction='backward'):\n \"\"\"Perform an asof merge. This is similar to a left-join except that we\n match on nearest key rather than equal keys.\n\n Both DataFrames must be sorted by the key.\n\n For each row in the left DataFrame:\n\n - A \"backward\" search selects the last row in the right DataFrame whose\n 'on' key is less than or equal to the left's key.\n\n - A \"forward\" search selects the first row in the right DataFrame whose\n 'on' key is greater than or equal to the left's key.\n\n - A \"nearest\" search selects the row in the right DataFrame whose 'on'\n key is closest in absolute distance to the left's key.\n\n The default is \"backward\" and is compatible in versions below 0.20.0.\n The direction parameter was added in version 0.20.0 and introduces\n \"forward\" and \"nearest\".\n\n Optionally match on equivalent keys with 'by' before searching with 'on'.\n\n .. versionadded:: 0.19.0\n\n Parameters\n ----------\n left : DataFrame\n right : DataFrame\n on : label\n Field name to join on. Must be found in both DataFrames.\n The data MUST be ordered. Furthermore this must be a numeric column,\n such as datetimelike, integer, or float. On or left_on/right_on\n must be given.\n left_on : label\n Field name to join on in left DataFrame.\n right_on : label\n Field name to join on in right DataFrame.\n left_index : boolean\n Use the index of the left DataFrame as the join key.\n\n .. versionadded:: 0.19.2\n\n right_index : boolean\n Use the index of the right DataFrame as the join key.\n\n .. versionadded:: 0.19.2\n\n by : column name or list of column names\n Match on these columns before performing merge operation.\n left_by : column name\n Field names to match on in the left DataFrame.\n\n .. versionadded:: 0.19.2\n\n right_by : column name\n Field names to match on in the right DataFrame.\n\n .. versionadded:: 0.19.2\n\n suffixes : 2-length sequence (tuple, list, ...)\n Suffix to apply to overlapping column names in the left and right\n side, respectively.\n tolerance : integer or Timedelta, optional, default None\n Select asof tolerance within this range; must be compatible\n with the merge index.\n allow_exact_matches : boolean, default True\n\n - If True, allow matching with the same 'on' value\n (i.e. less-than-or-equal-to / greater-than-or-equal-to)\n - If False, don't match the same 'on' value\n (i.e., strictly less-than / strictly greater-than)\n\n direction : 'backward' (default), 'forward', or 'nearest'\n Whether to search for prior, subsequent, or closest matches.\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n merged : DataFrame\n\n See Also\n --------\n merge\n merge_ordered\n\n Examples\n --------\n >>> left = pd.DataFrame({'a': [1, 5, 10], 'left_val': ['a', 'b', 'c']})\n >>> left\n a left_val\n 0 1 a\n 1 5 b\n 2 10 c\n\n >>> right = pd.DataFrame({'a': [1, 2, 3, 6, 7],\n ... 'right_val': [1, 2, 3, 6, 7]})\n >>> right\n a right_val\n 0 1 1\n 1 2 2\n 2 3 3\n 3 6 6\n 4 7 7\n\n >>> pd.merge_asof(left, right, on='a')\n a left_val right_val\n 0 1 a 1\n 1 5 b 3\n 2 10 c 7\n\n >>> pd.merge_asof(left, right, on='a', allow_exact_matches=False)\n a left_val right_val\n 0 1 a NaN\n 1 5 b 3.0\n 2 10 c 7.0\n\n >>> pd.merge_asof(left, right, on='a', direction='forward')\n a left_val right_val\n 0 1 a 1.0\n 1 5 b 6.0\n 2 10 c NaN\n\n >>> pd.merge_asof(left, right, on='a', direction='nearest')\n a left_val right_val\n 0 1 a 1\n 1 5 b 6\n 2 10 c 7\n\n We can use indexed DataFrames as well.\n\n >>> left = pd.DataFrame({'left_val': ['a', 'b', 'c']}, index=[1, 5, 10])\n >>> left\n left_val\n 1 a\n 5 b\n 10 c\n\n >>> right = pd.DataFrame({'right_val': [1, 2, 3, 6, 7]},\n ... index=[1, 2, 3, 6, 7])\n >>> right\n right_val\n 1 1\n 2 2\n 3 3\n 6 6\n 7 7\n\n >>> pd.merge_asof(left, right, left_index=True, right_index=True)\n left_val right_val\n 1 a 1\n 5 b 3\n 10 c 7\n\n Here is a real-world times-series example\n\n >>> quotes\n time ticker bid ask\n 0 2016-05-25 13:30:00.023 GOOG 720.50 720.93\n 1 2016-05-25 13:30:00.023 MSFT 51.95 51.96\n 2 2016-05-25 13:30:00.030 MSFT 51.97 51.98\n 3 2016-05-25 13:30:00.041 MSFT 51.99 52.00\n 4 2016-05-25 13:30:00.048 GOOG 720.50 720.93\n 5 2016-05-25 13:30:00.049 AAPL 97.99 98.01\n 6 2016-05-25 13:30:00.072 GOOG 720.50 720.88\n 7 2016-05-25 13:30:00.075 MSFT 52.01 52.03\n\n >>> trades\n time ticker price quantity\n 0 2016-05-25 13:30:00.023 MSFT 51.95 75\n 1 2016-05-25 13:30:00.038 MSFT 51.95 155\n 2 2016-05-25 13:30:00.048 GOOG 720.77 100\n 3 2016-05-25 13:30:00.048 GOOG 720.92 100\n 4 2016-05-25 13:30:00.048 AAPL 98.00 100\n\n By default we are taking the asof of the quotes\n\n >>> pd.merge_asof(trades, quotes,\n ... on='time',\n ... by='ticker')\n time ticker price quantity bid ask\n 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96\n 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98\n 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93\n 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93\n 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN\n\n We only asof within 2ms between the quote time and the trade time\n\n >>> pd.merge_asof(trades, quotes,\n ... on='time',\n ... by='ticker',\n ... tolerance=pd.Timedelta('2ms'))\n time ticker price quantity bid ask\n 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96\n 1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN\n 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93\n 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93\n 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN\n\n We only asof within 10ms between the quote time and the trade time\n and we exclude exact matches on time. However *prior* data will\n propagate forward\n\n >>> pd.merge_asof(trades, quotes,\n ... on='time',\n ... by='ticker',\n ... tolerance=pd.Timedelta('10ms'),\n ... allow_exact_matches=False)\n time ticker price quantity bid ask\n 0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN\n 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98\n 2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN\n 3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN\n 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN\n \"\"\"\n op = _AsOfMerge(left, right,\n on=on, left_on=left_on, right_on=right_on,\n left_index=left_index, right_index=right_index,\n by=by, left_by=left_by, right_by=right_by,\n suffixes=suffixes,\n how='asof', tolerance=tolerance,\n allow_exact_matches=allow_exact_matches,\n direction=direction)\n return op.get_result()\n\n\n# TODO: transformations??\n# TODO: only copy DataFrames when modification necessary\nclass _MergeOperation:\n \"\"\"\n Perform a database (SQL) merge operation between two DataFrame objects\n using either columns as keys or their row indexes\n \"\"\"\n _merge_type = 'merge'\n\n def __init__(self, left, right, how='inner', on=None,\n left_on=None, right_on=None, axis=1,\n left_index=False, right_index=False, sort=True,\n suffixes=('_x', '_y'), copy=True, indicator=False,\n validate=None):\n left = validate_operand(left)\n right = validate_operand(right)\n self.left = self.orig_left = left\n self.right = self.orig_right = right\n self.how = how\n self.axis = axis\n\n self.on = com.maybe_make_list(on)\n self.left_on = com.maybe_make_list(left_on)\n self.right_on = com.maybe_make_list(right_on)\n\n self.copy = copy\n self.suffixes = suffixes\n self.sort = sort\n\n self.left_index = left_index\n self.right_index = right_index\n\n self.indicator = indicator\n\n if isinstance(self.indicator, str):\n self.indicator_name = self.indicator\n elif isinstance(self.indicator, bool):\n self.indicator_name = '_merge' if self.indicator else None\n else:\n raise ValueError(\n 'indicator option can only accept boolean or string arguments')\n\n if not is_bool(left_index):\n raise ValueError(\n 'left_index parameter must be of type bool, not '\n '{left_index}'.format(left_index=type(left_index)))\n if not is_bool(right_index):\n raise ValueError(\n 'right_index parameter must be of type bool, not '\n '{right_index}'.format(right_index=type(right_index)))\n\n # warn user when merging between different levels\n if left.columns.nlevels != right.columns.nlevels:\n msg = ('merging between different levels can give an unintended '\n 'result ({left} levels on the left, {right} on the right)'\n ).format(left=left.columns.nlevels,\n right=right.columns.nlevels)\n warnings.warn(msg, UserWarning)\n\n self._validate_specification()\n\n # note this function has side effects\n (self.left_join_keys,\n self.right_join_keys,\n self.join_names) = self._get_merge_keys()\n\n # validate the merge keys dtypes. We may need to coerce\n # to avoid incompat dtypes\n self._maybe_coerce_merge_keys()\n\n # If argument passed to validate,\n # check if columns specified as unique\n # are in fact unique.\n if validate is not None:\n self._validate(validate)\n\n def get_result(self):\n if self.indicator:\n self.left, self.right = self._indicator_pre_merge(\n self.left, self.right)\n\n join_index, left_indexer, right_indexer = self._get_join_info()\n\n ldata, rdata = self.left._data, self.right._data\n lsuf, rsuf = self.suffixes\n\n llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,\n rdata.items, rsuf)\n\n lindexers = {1: left_indexer} if left_indexer is not None else {}\n rindexers = {1: right_indexer} if right_indexer is not None else {}\n\n result_data = concatenate_block_managers(\n [(ldata, lindexers), (rdata, rindexers)],\n axes=[llabels.append(rlabels), join_index],\n concat_axis=0, copy=self.copy)\n\n typ = self.left._constructor\n result = typ(result_data).__finalize__(self, method=self._merge_type)\n\n if self.indicator:\n result = self._indicator_post_merge(result)\n\n self._maybe_add_join_keys(result, left_indexer, right_indexer)\n\n self._maybe_restore_index_levels(result)\n\n return result\n\n def _indicator_pre_merge(self, left, right):\n\n columns = left.columns.union(right.columns)\n\n for i in ['_left_indicator', '_right_indicator']:\n if i in columns:\n raise ValueError(\"Cannot use `indicator=True` option when \"\n \"data contains a column named {name}\"\n .format(name=i))\n if self.indicator_name in columns:\n raise ValueError(\n \"Cannot use name of an existing column for indicator column\")\n\n left = left.copy()\n right = right.copy()\n\n left['_left_indicator'] = 1\n left['_left_indicator'] = left['_left_indicator'].astype('int8')\n\n right['_right_indicator'] = 2\n right['_right_indicator'] = right['_right_indicator'].astype('int8')\n\n return left, right\n\n def _indicator_post_merge(self, result):\n\n result['_left_indicator'] = result['_left_indicator'].fillna(0)\n result['_right_indicator'] = result['_right_indicator'].fillna(0)\n\n result[self.indicator_name] = Categorical((result['_left_indicator'] +\n result['_right_indicator']),\n categories=[1, 2, 3])\n result[self.indicator_name] = (\n result[self.indicator_name]\n .cat.rename_categories(['left_only', 'right_only', 'both']))\n\n result = result.drop(labels=['_left_indicator', '_right_indicator'],\n axis=1)\n return result\n\n def _maybe_restore_index_levels(self, result):\n \"\"\"\n Restore index levels specified as `on` parameters\n\n Here we check for cases where `self.left_on` and `self.right_on` pairs\n each reference an index level in their respective DataFrames. The\n joined columns corresponding to these pairs are then restored to the\n index of `result`.\n\n **Note:** This method has side effects. It modifies `result` in-place\n\n Parameters\n ----------\n result: DataFrame\n merge result\n\n Returns\n -------\n None\n \"\"\"\n names_to_restore = []\n for name, left_key, right_key in zip(self.join_names,\n self.left_on,\n self.right_on):\n if (self.orig_left._is_level_reference(left_key) and\n self.orig_right._is_level_reference(right_key) and\n name not in result.index.names):\n\n names_to_restore.append(name)\n\n if names_to_restore:\n result.set_index(names_to_restore, inplace=True)\n\n def _maybe_add_join_keys(self, result, left_indexer, right_indexer):\n\n left_has_missing = None\n right_has_missing = None\n\n keys = zip(self.join_names, self.left_on, self.right_on)\n for i, (name, lname, rname) in enumerate(keys):\n if not _should_fill(lname, rname):\n continue\n\n take_left, take_right = None, None\n\n if name in result:\n\n if left_indexer is not None and right_indexer is not None:\n if name in self.left:\n\n if left_has_missing is None:\n left_has_missing = (left_indexer == -1).any()\n\n if left_has_missing:\n take_right = self.right_join_keys[i]\n\n if not is_dtype_equal(result[name].dtype,\n self.left[name].dtype):\n take_left = self.left[name]._values\n\n elif name in self.right:\n\n if right_has_missing is None:\n right_has_missing = (right_indexer == -1).any()\n\n if right_has_missing:\n take_left = self.left_join_keys[i]\n\n if not is_dtype_equal(result[name].dtype,\n self.right[name].dtype):\n take_right = self.right[name]._values\n\n elif left_indexer is not None \\\n and is_array_like(self.left_join_keys[i]):\n take_left = self.left_join_keys[i]\n take_right = self.right_join_keys[i]\n\n if take_left is not None or take_right is not None:\n\n if take_left is None:\n lvals = result[name]._values\n else:\n lfill = na_value_for_dtype(take_left.dtype)\n lvals = algos.take_1d(take_left, left_indexer,\n fill_value=lfill)\n\n if take_right is None:\n rvals = result[name]._values\n else:\n rfill = na_value_for_dtype(take_right.dtype)\n rvals = algos.take_1d(take_right, right_indexer,\n fill_value=rfill)\n\n # if we have an all missing left_indexer\n # make sure to just use the right values\n mask = left_indexer == -1\n if mask.all():\n key_col = rvals\n else:\n key_col = Index(lvals).where(~mask, rvals)\n\n if result._is_label_reference(name):\n result[name] = key_col\n elif result._is_level_reference(name):\n if isinstance(result.index, MultiIndex):\n key_col.name = name\n idx_list = [result.index.get_level_values(level_name)\n if level_name != name else key_col\n for level_name in result.index.names]\n\n result.set_index(idx_list, inplace=True)\n else:\n result.index = Index(key_col, name=name)\n else:\n result.insert(i, name or 'key_{i}'.format(i=i), key_col)\n\n def _get_join_indexers(self):\n \"\"\" return the join indexers \"\"\"\n return _get_join_indexers(self.left_join_keys,\n self.right_join_keys,\n sort=self.sort,\n how=self.how)\n\n def _get_join_info(self):\n left_ax = self.left._data.axes[self.axis]\n right_ax = self.right._data.axes[self.axis]\n\n if self.left_index and self.right_index and self.how != 'asof':\n join_index, left_indexer, right_indexer = \\\n left_ax.join(right_ax, how=self.how, return_indexers=True,\n sort=self.sort)\n elif self.right_index and self.how == 'left':\n join_index, left_indexer, right_indexer = \\\n _left_join_on_index(left_ax, right_ax, self.left_join_keys,\n sort=self.sort)\n\n elif self.left_index and self.how == 'right':\n join_index, right_indexer, left_indexer = \\\n _left_join_on_index(right_ax, left_ax, self.right_join_keys,\n sort=self.sort)\n else:\n (left_indexer,\n right_indexer) = self._get_join_indexers()\n\n if self.right_index:\n if len(self.left) > 0:\n join_index = self._create_join_index(self.left.index,\n self.right.index,\n left_indexer,\n right_indexer,\n how='right')\n else:\n join_index = self.right.index.take(right_indexer)\n left_indexer = np.array([-1] * len(join_index))\n elif self.left_index:\n if len(self.right) > 0:\n join_index = self._create_join_index(self.right.index,\n self.left.index,\n right_indexer,\n left_indexer,\n how='left')\n else:\n join_index = self.left.index.take(left_indexer)\n right_indexer = np.array([-1] * len(join_index))\n else:\n join_index = Index(np.arange(len(left_indexer)))\n\n if len(join_index) == 0:\n join_index = join_index.astype(object)\n return join_index, left_indexer, right_indexer\n\n def _create_join_index(self, index, other_index, indexer,\n other_indexer, how='left'):\n \"\"\"\n Create a join index by rearranging one index to match another\n\n Parameters\n ----------\n index: Index being rearranged\n other_index: Index used to supply values not found in index\n indexer: how to rearrange index\n how: replacement is only necessary if indexer based on other_index\n\n Returns\n -------\n join_index\n \"\"\"\n join_index = index.take(indexer)\n if (self.how in (how, 'outer') and\n not isinstance(other_index, MultiIndex)):\n # if final index requires values in other_index but not target\n # index, indexer may hold missing (-1) values, causing Index.take\n # to take the final value in target index\n mask = indexer == -1\n if np.any(mask):\n # if values missing (-1) from target index,\n # take from other_index instead\n join_list = join_index.to_numpy()\n other_list = other_index.take(other_indexer).to_numpy()\n join_list[mask] = other_list[mask]\n join_index = Index(join_list, dtype=join_index.dtype,\n name=join_index.name)\n return join_index\n\n def _get_merge_keys(self):\n \"\"\"\n Note: has side effects (copy/delete key columns)\n\n Parameters\n ----------\n left\n right\n on\n\n Returns\n -------\n left_keys, right_keys\n \"\"\"\n left_keys = []\n right_keys = []\n join_names = []\n right_drop = []\n left_drop = []\n\n left, right = self.left, self.right\n\n is_lkey = lambda x: is_array_like(x) and len(x) == len(left)\n is_rkey = lambda x: is_array_like(x) and len(x) == len(right)\n\n # Note that pd.merge_asof() has separate 'on' and 'by' parameters. A\n # user could, for example, request 'left_index' and 'left_by'. In a\n # regular pd.merge(), users cannot specify both 'left_index' and\n # 'left_on'. (Instead, users have a MultiIndex). That means the\n # self.left_on in this function is always empty in a pd.merge(), but\n # a pd.merge_asof(left_index=True, left_by=...) will result in a\n # self.left_on array with a None in the middle of it. This requires\n # a work-around as designated in the code below.\n # See _validate_specification() for where this happens.\n\n # ugh, spaghetti re #733\n if _any(self.left_on) and _any(self.right_on):\n for lk, rk in zip(self.left_on, self.right_on):\n if is_lkey(lk):\n left_keys.append(lk)\n if is_rkey(rk):\n right_keys.append(rk)\n join_names.append(None) # what to do?\n else:\n if rk is not None:\n right_keys.append(\n right._get_label_or_level_values(rk))\n join_names.append(rk)\n else:\n # work-around for merge_asof(right_index=True)\n right_keys.append(right.index)\n join_names.append(right.index.name)\n else:\n if not is_rkey(rk):\n if rk is not None:\n right_keys.append(\n right._get_label_or_level_values(rk))\n else:\n # work-around for merge_asof(right_index=True)\n right_keys.append(right.index)\n if lk is not None and lk == rk:\n # avoid key upcast in corner case (length-0)\n if len(left) > 0:\n right_drop.append(rk)\n else:\n left_drop.append(lk)\n else:\n right_keys.append(rk)\n if lk is not None:\n left_keys.append(left._get_label_or_level_values(lk))\n join_names.append(lk)\n else:\n # work-around for merge_asof(left_index=True)\n left_keys.append(left.index)\n join_names.append(left.index.name)\n elif _any(self.left_on):\n for k in self.left_on:\n if is_lkey(k):\n left_keys.append(k)\n join_names.append(None)\n else:\n left_keys.append(left._get_label_or_level_values(k))\n join_names.append(k)\n if isinstance(self.right.index, MultiIndex):\n right_keys = [lev._values.take(lev_codes) for lev, lev_codes\n in zip(self.right.index.levels,\n self.right.index.codes)]\n else:\n right_keys = [self.right.index._values]\n elif _any(self.right_on):\n for k in self.right_on:\n if is_rkey(k):\n right_keys.append(k)\n join_names.append(None)\n else:\n right_keys.append(right._get_label_or_level_values(k))\n join_names.append(k)\n if isinstance(self.left.index, MultiIndex):\n left_keys = [lev._values.take(lev_codes) for lev, lev_codes\n in zip(self.left.index.levels,\n self.left.index.codes)]\n else:\n left_keys = [self.left.index.values]\n\n if left_drop:\n self.left = self.left._drop_labels_or_levels(left_drop)\n\n if right_drop:\n self.right = self.right._drop_labels_or_levels(right_drop)\n\n return left_keys, right_keys, join_names\n\n def _maybe_coerce_merge_keys(self):\n # we have valid mergees but we may have to further\n # coerce these if they are originally incompatible types\n #\n # for example if these are categorical, but are not dtype_equal\n # or if we have object and integer dtypes\n\n for lk, rk, name in zip(self.left_join_keys,\n self.right_join_keys,\n self.join_names):\n if (len(lk) and not len(rk)) or (not len(lk) and len(rk)):\n continue\n\n lk_is_cat = is_categorical_dtype(lk)\n rk_is_cat = is_categorical_dtype(rk)\n lk_is_object = is_object_dtype(lk)\n rk_is_object = is_object_dtype(rk)\n\n # if either left or right is a categorical\n # then the must match exactly in categories & ordered\n if lk_is_cat and rk_is_cat:\n if lk.is_dtype_equal(rk):\n continue\n\n elif lk_is_cat or rk_is_cat:\n pass\n\n elif is_dtype_equal(lk.dtype, rk.dtype):\n continue\n\n msg = (\"You are trying to merge on {lk_dtype} and \"\n \"{rk_dtype} columns. If you wish to proceed \"\n \"you should use pd.concat\".format(lk_dtype=lk.dtype,\n rk_dtype=rk.dtype))\n\n # if we are numeric, then allow differing\n # kinds to proceed, eg. int64 and int8, int and float\n # further if we are object, but we infer to\n # the same, then proceed\n if is_numeric_dtype(lk) and is_numeric_dtype(rk):\n if lk.dtype.kind == rk.dtype.kind:\n continue\n\n # check whether ints and floats\n elif is_integer_dtype(rk) and is_float_dtype(lk):\n if not (lk == lk.astype(rk.dtype))[~np.isnan(lk)].all():\n warnings.warn('You are merging on int and float '\n 'columns where the float values '\n 'are not equal to their int '\n 'representation', UserWarning)\n continue\n\n elif is_float_dtype(rk) and is_integer_dtype(lk):\n if not (rk == rk.astype(lk.dtype))[~np.isnan(rk)].all():\n warnings.warn('You are merging on int and float '\n 'columns where the float values '\n 'are not equal to their int '\n 'representation', UserWarning)\n continue\n\n # let's infer and see if we are ok\n elif (lib.infer_dtype(lk, skipna=False)\n == lib.infer_dtype(rk, skipna=False)):\n continue\n\n # Check if we are trying to merge on obviously\n # incompatible dtypes GH 9780, GH 15800\n\n # bool values are coerced to object\n elif ((lk_is_object and is_bool_dtype(rk)) or\n (is_bool_dtype(lk) and rk_is_object)):\n pass\n\n # object values are allowed to be merged\n elif ((lk_is_object and is_numeric_dtype(rk)) or\n (is_numeric_dtype(lk) and rk_is_object)):\n inferred_left = lib.infer_dtype(lk, skipna=False)\n inferred_right = lib.infer_dtype(rk, skipna=False)\n bool_types = ['integer', 'mixed-integer', 'boolean', 'empty']\n string_types = ['string', 'unicode', 'mixed', 'bytes', 'empty']\n\n # inferred bool\n if (inferred_left in bool_types and\n inferred_right in bool_types):\n pass\n\n # unless we are merging non-string-like with string-like\n elif ((inferred_left in string_types and\n inferred_right not in string_types) or\n (inferred_right in string_types and\n inferred_left not in string_types)):\n raise ValueError(msg)\n\n # datetimelikes must match exactly\n elif is_datetimelike(lk) and not is_datetimelike(rk):\n raise ValueError(msg)\n elif not is_datetimelike(lk) and is_datetimelike(rk):\n raise ValueError(msg)\n elif is_datetime64tz_dtype(lk) and not is_datetime64tz_dtype(rk):\n raise ValueError(msg)\n elif not is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk):\n raise ValueError(msg)\n\n elif lk_is_object and rk_is_object:\n continue\n\n # Houston, we have a problem!\n # let's coerce to object if the dtypes aren't\n # categorical, otherwise coerce to the category\n # dtype. If we coerced categories to object,\n # then we would lose type information on some\n # columns, and end up trying to merge\n # incompatible dtypes. See GH 16900.\n if name in self.left.columns:\n typ = lk.categories.dtype if lk_is_cat else object\n self.left = self.left.assign(\n **{name: self.left[name].astype(typ)})\n if name in self.right.columns:\n typ = rk.categories.dtype if rk_is_cat else object\n self.right = self.right.assign(\n **{name: self.right[name].astype(typ)})\n\n def _validate_specification(self):\n # Hm, any way to make this logic less complicated??\n if self.on is None and self.left_on is None and self.right_on is None:\n\n if self.left_index and self.right_index:\n self.left_on, self.right_on = (), ()\n elif self.left_index:\n if self.right_on is None:\n raise MergeError('Must pass right_on or right_index=True')\n elif self.right_index:\n if self.left_on is None:\n raise MergeError('Must pass left_on or left_index=True')\n else:\n # use the common columns\n common_cols = self.left.columns.intersection(\n self.right.columns)\n if len(common_cols) == 0:\n raise MergeError(\n 'No common columns to perform merge on. '\n 'Merge options: left_on={lon}, right_on={ron}, '\n 'left_index={lidx}, right_index={ridx}'\n .format(lon=self.left_on, ron=self.right_on,\n lidx=self.left_index, ridx=self.right_index))\n if not common_cols.is_unique:\n raise MergeError(\"Data columns not unique: {common!r}\"\n .format(common=common_cols))\n self.left_on = self.right_on = common_cols\n elif self.on is not None:\n if self.left_on is not None or self.right_on is not None:\n raise MergeError('Can only pass argument \"on\" OR \"left_on\" '\n 'and \"right_on\", not a combination of both.')\n self.left_on = self.right_on = self.on\n elif self.left_on is not None:\n n = len(self.left_on)\n if self.right_index:\n if len(self.left_on) != self.right.index.nlevels:\n raise ValueError('len(left_on) must equal the number '\n 'of levels in the index of \"right\"')\n self.right_on = [None] * n\n elif self.right_on is not None:\n n = len(self.right_on)\n if self.left_index:\n if len(self.right_on) != self.left.index.nlevels:\n raise ValueError('len(right_on) must equal the number '\n 'of levels in the index of \"left\"')\n self.left_on = [None] * n\n if len(self.right_on) != len(self.left_on):\n raise ValueError(\"len(right_on) must equal len(left_on)\")\n\n def _validate(self, validate):\n\n # Check uniqueness of each\n if self.left_index:\n left_unique = self.orig_left.index.is_unique\n else:\n left_unique = MultiIndex.from_arrays(self.left_join_keys\n ).is_unique\n\n if self.right_index:\n right_unique = self.orig_right.index.is_unique\n else:\n right_unique = MultiIndex.from_arrays(self.right_join_keys\n ).is_unique\n\n # Check data integrity\n if validate in [\"one_to_one\", \"1:1\"]:\n if not left_unique and not right_unique:\n raise MergeError(\"Merge keys are not unique in either left\"\n \" or right dataset; not a one-to-one merge\")\n elif not left_unique:\n raise MergeError(\"Merge keys are not unique in left dataset;\"\n \" not a one-to-one merge\")\n elif not right_unique:\n raise MergeError(\"Merge keys are not unique in right dataset;\"\n \" not a one-to-one merge\")\n\n elif validate in [\"one_to_many\", \"1:m\"]:\n if not left_unique:\n raise MergeError(\"Merge keys are not unique in left dataset;\"\n \" not a one-to-many merge\")\n\n elif validate in [\"many_to_one\", \"m:1\"]:\n if not right_unique:\n raise MergeError(\"Merge keys are not unique in right dataset;\"\n \" not a many-to-one merge\")\n\n elif validate in ['many_to_many', 'm:m']:\n pass\n\n else:\n raise ValueError(\"Not a valid argument for validate\")\n\n\ndef _get_join_indexers(left_keys, right_keys, sort=False, how='inner',\n **kwargs):\n \"\"\"\n\n Parameters\n ----------\n left_keys: ndarray, Index, Series\n right_keys: ndarray, Index, Series\n sort: boolean, default False\n how: string {'inner', 'outer', 'left', 'right'}, default 'inner'\n\n Returns\n -------\n tuple of (left_indexer, right_indexer)\n indexers into the left_keys, right_keys\n\n \"\"\"\n from functools import partial\n\n assert len(left_keys) == len(right_keys), \\\n 'left_key and right_keys must be the same length'\n\n # bind `sort` arg. of _factorize_keys\n fkeys = partial(_factorize_keys, sort=sort)\n\n # get left & right join labels and num. of levels at each location\n llab, rlab, shape = map(list, zip(* map(fkeys, left_keys, right_keys)))\n\n # get flat i8 keys from label lists\n lkey, rkey = _get_join_keys(llab, rlab, shape, sort)\n\n # factorize keys to a dense i8 space\n # `count` is the num. of unique keys\n # set(lkey) | set(rkey) == range(count)\n lkey, rkey, count = fkeys(lkey, rkey)\n\n # preserve left frame order if how == 'left' and sort == False\n kwargs = copy.copy(kwargs)\n if how == 'left':\n kwargs['sort'] = sort\n join_func = _join_functions[how]\n\n return join_func(lkey, rkey, count, **kwargs)\n\n\ndef _restore_dropped_levels_multijoin(left, right, dropped_level_names,\n join_index, lindexer, rindexer):\n \"\"\"\n *this is an internal non-public method*\n\n Returns the levels, labels and names of a multi-index to multi-index join.\n Depending on the type of join, this method restores the appropriate\n dropped levels of the joined multi-index.\n The method relies on lidx, rindexer which hold the index positions of\n left and right, where a join was feasible\n\n Parameters\n ----------\n left : MultiIndex\n left index\n right : MultiIndex\n right index\n dropped_level_names : str array\n list of non-common level names\n join_index : MultiIndex\n the index of the join between the\n common levels of left and right\n lindexer : intp array\n left indexer\n rindexer : intp array\n right indexer\n\n Returns\n -------\n levels : list of Index\n levels of combined multiindexes\n labels : intp array\n labels of combined multiindexes\n names : str array\n names of combined multiindexes\n\n \"\"\"\n\n def _convert_to_mulitindex(index):\n if isinstance(index, MultiIndex):\n return index\n else:\n return MultiIndex.from_arrays([index.values],\n names=[index.name])\n\n # For multi-multi joins with one overlapping level,\n # the returned index if of type Index\n # Assure that join_index is of type MultiIndex\n # so that dropped levels can be appended\n join_index = _convert_to_mulitindex(join_index)\n\n join_levels = join_index.levels\n join_codes = join_index.codes\n join_names = join_index.names\n\n # lindexer and rindexer hold the indexes where the join occurred\n # for left and right respectively. If left/right is None then\n # the join occurred on all indices of left/right\n if lindexer is None:\n lindexer = range(left.size)\n\n if rindexer is None:\n rindexer = range(right.size)\n\n # Iterate through the levels that must be restored\n for dropped_level_name in dropped_level_names:\n if dropped_level_name in left.names:\n idx = left\n indexer = lindexer\n else:\n idx = right\n indexer = rindexer\n\n # The index of the level name to be restored\n name_idx = idx.names.index(dropped_level_name)\n\n restore_levels = idx.levels[name_idx]\n # Inject -1 in the codes list where a join was not possible\n # IOW indexer[i]=-1\n codes = idx.codes[name_idx]\n restore_codes = algos.take_nd(codes, indexer, fill_value=-1)\n\n join_levels = join_levels + [restore_levels]\n join_codes = join_codes + [restore_codes]\n join_names = join_names + [dropped_level_name]\n\n return join_levels, join_codes, join_names\n\n\nclass _OrderedMerge(_MergeOperation):\n _merge_type = 'ordered_merge'\n\n def __init__(self, left, right, on=None, left_on=None, right_on=None,\n left_index=False, right_index=False, axis=1,\n suffixes=('_x', '_y'), copy=True,\n fill_method=None, how='outer'):\n\n self.fill_method = fill_method\n _MergeOperation.__init__(self, left, right, on=on, left_on=left_on,\n left_index=left_index,\n right_index=right_index,\n right_on=right_on, axis=axis,\n how=how, suffixes=suffixes,\n sort=True # factorize sorts\n )\n\n def get_result(self):\n join_index, left_indexer, right_indexer = self._get_join_info()\n\n # this is a bit kludgy\n ldata, rdata = self.left._data, self.right._data\n lsuf, rsuf = self.suffixes\n\n llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,\n rdata.items, rsuf)\n\n if self.fill_method == 'ffill':\n left_join_indexer = libjoin.ffill_indexer(left_indexer)\n right_join_indexer = libjoin.ffill_indexer(right_indexer)\n else:\n left_join_indexer = left_indexer\n right_join_indexer = right_indexer\n\n lindexers = {\n 1: left_join_indexer} if left_join_indexer is not None else {}\n rindexers = {\n 1: right_join_indexer} if right_join_indexer is not None else {}\n\n result_data = concatenate_block_managers(\n [(ldata, lindexers), (rdata, rindexers)],\n axes=[llabels.append(rlabels), join_index],\n concat_axis=0, copy=self.copy)\n\n typ = self.left._constructor\n result = typ(result_data).__finalize__(self, method=self._merge_type)\n\n self._maybe_add_join_keys(result, left_indexer, right_indexer)\n\n return result\n\n\ndef _asof_function(direction):\n name = 'asof_join_{dir}'.format(dir=direction)\n return getattr(libjoin, name, None)\n\n\ndef _asof_by_function(direction):\n name = 'asof_join_{dir}_on_X_by_Y'.format(dir=direction)\n return getattr(libjoin, name, None)\n\n\n_type_casters = {\n 'int64_t': ensure_int64,\n 'double': ensure_float64,\n 'object': ensure_object,\n}\n\n\ndef _get_cython_type_upcast(dtype):\n \"\"\" Upcast a dtype to 'int64_t', 'double', or 'object' \"\"\"\n if is_integer_dtype(dtype):\n return 'int64_t'\n elif is_float_dtype(dtype):\n return 'double'\n else:\n return 'object'\n\n\nclass _AsOfMerge(_OrderedMerge):\n _merge_type = 'asof_merge'\n\n def __init__(self, left, right, on=None, left_on=None, right_on=None,\n left_index=False, right_index=False,\n by=None, left_by=None, right_by=None,\n axis=1, suffixes=('_x', '_y'), copy=True,\n fill_method=None,\n how='asof', tolerance=None,\n allow_exact_matches=True,\n direction='backward'):\n\n self.by = by\n self.left_by = left_by\n self.right_by = right_by\n self.tolerance = tolerance\n self.allow_exact_matches = allow_exact_matches\n self.direction = direction\n\n _OrderedMerge.__init__(self, left, right, on=on, left_on=left_on,\n right_on=right_on, left_index=left_index,\n right_index=right_index, axis=axis,\n how=how, suffixes=suffixes,\n fill_method=fill_method)\n\n def _validate_specification(self):\n super(_AsOfMerge, self)._validate_specification()\n\n # we only allow on to be a single item for on\n if len(self.left_on) != 1 and not self.left_index:\n raise MergeError(\"can only asof on a key for left\")\n\n if len(self.right_on) != 1 and not self.right_index:\n raise MergeError(\"can only asof on a key for right\")\n\n if self.left_index and isinstance(self.left.index, MultiIndex):\n raise MergeError(\"left can only have one index\")\n\n if self.right_index and isinstance(self.right.index, MultiIndex):\n raise MergeError(\"right can only have one index\")\n\n # set 'by' columns\n if self.by is not None:\n if self.left_by is not None or self.right_by is not None:\n raise MergeError('Can only pass by OR left_by '\n 'and right_by')\n self.left_by = self.right_by = self.by\n if self.left_by is None and self.right_by is not None:\n raise MergeError('missing left_by')\n if self.left_by is not None and self.right_by is None:\n raise MergeError('missing right_by')\n\n # add 'by' to our key-list so we can have it in the\n # output as a key\n if self.left_by is not None:\n if not is_list_like(self.left_by):\n self.left_by = [self.left_by]\n if not is_list_like(self.right_by):\n self.right_by = [self.right_by]\n\n if len(self.left_by) != len(self.right_by):\n raise MergeError('left_by and right_by must be same length')\n\n self.left_on = self.left_by + list(self.left_on)\n self.right_on = self.right_by + list(self.right_on)\n\n # check 'direction' is valid\n if self.direction not in ['backward', 'forward', 'nearest']:\n raise MergeError('direction invalid: {direction}'\n .format(direction=self.direction))\n\n @property\n def _asof_key(self):\n \"\"\" This is our asof key, the 'on' \"\"\"\n return self.left_on[-1]\n\n def _get_merge_keys(self):\n\n # note this function has side effects\n (left_join_keys,\n right_join_keys,\n join_names) = super(_AsOfMerge, self)._get_merge_keys()\n\n # validate index types are the same\n for i, (lk, rk) in enumerate(zip(left_join_keys, right_join_keys)):\n if not is_dtype_equal(lk.dtype, rk.dtype):\n raise MergeError(\"incompatible merge keys [{i}] {lkdtype} and \"\n \"{rkdtype}, must be the same type\"\n .format(i=i, lkdtype=lk.dtype,\n rkdtype=rk.dtype))\n\n # validate tolerance; must be a Timedelta if we have a DTI\n if self.tolerance is not None:\n\n if self.left_index:\n lt = self.left.index\n else:\n lt = left_join_keys[-1]\n\n msg = (\"incompatible tolerance {tolerance}, must be compat \"\n \"with type {lkdtype}\".format(\n tolerance=type(self.tolerance),\n lkdtype=lt.dtype))\n\n if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt):\n if not isinstance(self.tolerance, Timedelta):\n raise MergeError(msg)\n if self.tolerance < Timedelta(0):\n raise MergeError(\"tolerance must be positive\")\n\n elif is_int64_dtype(lt):\n if not is_integer(self.tolerance):\n raise MergeError(msg)\n if self.tolerance < 0:\n raise MergeError(\"tolerance must be positive\")\n\n elif is_float_dtype(lt):\n if not is_number(self.tolerance):\n raise MergeError(msg)\n if self.tolerance < 0:\n raise MergeError(\"tolerance must be positive\")\n\n else:\n raise MergeError(\"key must be integer, timestamp or float\")\n\n # validate allow_exact_matches\n if not is_bool(self.allow_exact_matches):\n msg = \"allow_exact_matches must be boolean, passed {passed}\"\n raise MergeError(msg.format(passed=self.allow_exact_matches))\n\n return left_join_keys, right_join_keys, join_names\n\n def _get_join_indexers(self):\n \"\"\" return the join indexers \"\"\"\n\n def flip(xs):\n \"\"\" unlike np.transpose, this returns an array of tuples \"\"\"\n labels = list(string.ascii_lowercase[:len(xs)])\n dtypes = [x.dtype for x in xs]\n labeled_dtypes = list(zip(labels, dtypes))\n return np.array(lzip(*xs), labeled_dtypes)\n\n # values to compare\n left_values = (self.left.index.values if self.left_index else\n self.left_join_keys[-1])\n right_values = (self.right.index.values if self.right_index else\n self.right_join_keys[-1])\n tolerance = self.tolerance\n\n # we require sortedness and non-null values in the join keys\n msg_sorted = \"{side} keys must be sorted\"\n msg_missings = \"Merge keys contain null values on {side} side\"\n\n if not Index(left_values).is_monotonic:\n if isnull(left_values).any():\n raise ValueError(msg_missings.format(side='left'))\n else:\n raise ValueError(msg_sorted.format(side='left'))\n\n if not Index(right_values).is_monotonic:\n if isnull(right_values).any():\n raise ValueError(msg_missings.format(side='right'))\n else:\n raise ValueError(msg_sorted.format(side='right'))\n\n # initial type conversion as needed\n if needs_i8_conversion(left_values):\n left_values = left_values.view('i8')\n right_values = right_values.view('i8')\n if tolerance is not None:\n tolerance = tolerance.value\n\n # a \"by\" parameter requires special handling\n if self.left_by is not None:\n # remove 'on' parameter from values if one existed\n if self.left_index and self.right_index:\n left_by_values = self.left_join_keys\n right_by_values = self.right_join_keys\n else:\n left_by_values = self.left_join_keys[0:-1]\n right_by_values = self.right_join_keys[0:-1]\n\n # get tuple representation of values if more than one\n if len(left_by_values) == 1:\n left_by_values = left_by_values[0]\n right_by_values = right_by_values[0]\n else:\n left_by_values = flip(left_by_values)\n right_by_values = flip(right_by_values)\n\n # upcast 'by' parameter because HashTable is limited\n by_type = _get_cython_type_upcast(left_by_values.dtype)\n by_type_caster = _type_casters[by_type]\n left_by_values = by_type_caster(left_by_values)\n right_by_values = by_type_caster(right_by_values)\n\n # choose appropriate function by type\n func = _asof_by_function(self.direction)\n return func(left_values,\n right_values,\n left_by_values,\n right_by_values,\n self.allow_exact_matches,\n tolerance)\n else:\n # choose appropriate function by type\n func = _asof_function(self.direction)\n return func(left_values,\n right_values,\n self.allow_exact_matches,\n tolerance)\n\n\ndef _get_multiindex_indexer(join_keys, index, sort):\n from functools import partial\n\n # bind `sort` argument\n fkeys = partial(_factorize_keys, sort=sort)\n\n # left & right join labels and num. of levels at each location\n rcodes, lcodes, shape = map(list, zip(* map(fkeys,\n index.levels,\n join_keys)))\n if sort:\n rcodes = list(map(np.take, rcodes, index.codes))\n else:\n i8copy = lambda a: a.astype('i8', subok=False, copy=True)\n rcodes = list(map(i8copy, index.codes))\n\n # fix right labels if there were any nulls\n for i in range(len(join_keys)):\n mask = index.codes[i] == -1\n if mask.any():\n # check if there already was any nulls at this location\n # if there was, it is factorized to `shape[i] - 1`\n a = join_keys[i][lcodes[i] == shape[i] - 1]\n if a.size == 0 or not a[0] != a[0]:\n shape[i] += 1\n\n rcodes[i][mask] = shape[i] - 1\n\n # get flat i8 join keys\n lkey, rkey = _get_join_keys(lcodes, rcodes, shape, sort)\n\n # factorize keys to a dense i8 space\n lkey, rkey, count = fkeys(lkey, rkey)\n\n return libjoin.left_outer_join(lkey, rkey, count, sort=sort)\n\n\ndef _get_single_indexer(join_key, index, sort=False):\n left_key, right_key, count = _factorize_keys(join_key, index, sort=sort)\n\n left_indexer, right_indexer = libjoin.left_outer_join(\n ensure_int64(left_key),\n ensure_int64(right_key),\n count, sort=sort)\n\n return left_indexer, right_indexer\n\n\ndef _left_join_on_index(left_ax, right_ax, join_keys, sort=False):\n if len(join_keys) > 1:\n if not ((isinstance(right_ax, MultiIndex) and\n len(join_keys) == right_ax.nlevels)):\n raise AssertionError(\"If more than one join key is given then \"\n \"'right_ax' must be a MultiIndex and the \"\n \"number of join keys must be the number of \"\n \"levels in right_ax\")\n\n left_indexer, right_indexer = \\\n _get_multiindex_indexer(join_keys, right_ax, sort=sort)\n else:\n jkey = join_keys[0]\n\n left_indexer, right_indexer = \\\n _get_single_indexer(jkey, right_ax, sort=sort)\n\n if sort or len(left_ax) != len(left_indexer):\n # if asked to sort or there are 1-to-many matches\n join_index = left_ax.take(left_indexer)\n return join_index, left_indexer, right_indexer\n\n # left frame preserves order & length of its index\n return left_ax, None, right_indexer\n\n\ndef _right_outer_join(x, y, max_groups):\n right_indexer, left_indexer = libjoin.left_outer_join(y, x, max_groups)\n return left_indexer, right_indexer\n\n\n_join_functions = {\n 'inner': libjoin.inner_join,\n 'left': libjoin.left_outer_join,\n 'right': _right_outer_join,\n 'outer': libjoin.full_outer_join,\n}\n\n\ndef _factorize_keys(lk, rk, sort=True):\n # Some pre-processing for non-ndarray lk / rk\n if is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk):\n lk = lk._data\n rk = rk._data\n\n elif (is_categorical_dtype(lk) and\n is_categorical_dtype(rk) and\n lk.is_dtype_equal(rk)):\n if lk.categories.equals(rk.categories):\n # if we exactly match in categories, allow us to factorize on codes\n rk = rk.codes\n else:\n # Same categories in different orders -> recode\n rk = _recode_for_categories(rk.codes, rk.categories, lk.categories)\n\n lk = ensure_int64(lk.codes)\n rk = ensure_int64(rk)\n\n elif (is_extension_array_dtype(lk.dtype) and\n is_extension_array_dtype(rk.dtype) and\n lk.dtype == rk.dtype):\n lk, _ = lk._values_for_factorize()\n rk, _ = rk._values_for_factorize()\n\n if is_integer_dtype(lk) and is_integer_dtype(rk):\n # GH#23917 TODO: needs tests for case where lk is integer-dtype\n # and rk is datetime-dtype\n klass = libhashtable.Int64Factorizer\n lk = ensure_int64(com.values_from_object(lk))\n rk = ensure_int64(com.values_from_object(rk))\n elif (issubclass(lk.dtype.type, (np.timedelta64, np.datetime64)) and\n issubclass(rk.dtype.type, (np.timedelta64, np.datetime64))):\n # GH#23917 TODO: Needs tests for non-matching dtypes\n klass = libhashtable.Int64Factorizer\n lk = ensure_int64(com.values_from_object(lk))\n rk = ensure_int64(com.values_from_object(rk))\n else:\n klass = libhashtable.Factorizer\n lk = ensure_object(lk)\n rk = ensure_object(rk)\n\n rizer = klass(max(len(lk), len(rk)))\n\n llab = rizer.factorize(lk)\n rlab = rizer.factorize(rk)\n\n count = rizer.get_count()\n\n if sort:\n uniques = rizer.uniques.to_array()\n llab, rlab = _sort_labels(uniques, llab, rlab)\n\n # NA group\n lmask = llab == -1\n lany = lmask.any()\n rmask = rlab == -1\n rany = rmask.any()\n\n if lany or rany:\n if lany:\n np.putmask(llab, lmask, count)\n if rany:\n np.putmask(rlab, rmask, count)\n count += 1\n\n return llab, rlab, count\n\n\ndef _sort_labels(uniques, left, right):\n if not isinstance(uniques, np.ndarray):\n # tuplesafe\n uniques = Index(uniques).values\n\n llength = len(left)\n labels = np.concatenate([left, right])\n\n _, new_labels = sorting.safe_sort(uniques, labels, na_sentinel=-1)\n new_labels = ensure_int64(new_labels)\n new_left, new_right = new_labels[:llength], new_labels[llength:]\n\n return new_left, new_right\n\n\ndef _get_join_keys(llab, rlab, shape, sort):\n\n # how many levels can be done without overflow\n pred = lambda i: not is_int64_overflow_possible(shape[:i])\n nlev = next(filter(pred, range(len(shape), 0, -1)))\n\n # get keys for the first `nlev` levels\n stride = np.prod(shape[1:nlev], dtype='i8')\n lkey = stride * llab[0].astype('i8', subok=False, copy=False)\n rkey = stride * rlab[0].astype('i8', subok=False, copy=False)\n\n for i in range(1, nlev):\n with np.errstate(divide='ignore'):\n stride //= shape[i]\n lkey += llab[i] * stride\n rkey += rlab[i] * stride\n\n if nlev == len(shape): # all done!\n return lkey, rkey\n\n # densify current keys to avoid overflow\n lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort)\n\n llab = [lkey] + llab[nlev:]\n rlab = [rkey] + rlab[nlev:]\n shape = [count] + shape[nlev:]\n\n return _get_join_keys(llab, rlab, shape, sort)\n\n\ndef _should_fill(lname, rname):\n if not isinstance(lname, str) or not isinstance(rname, str):\n return True\n return lname == rname\n\n\ndef _any(x):\n return x is not None and com._any_not_none(*x)\n\n\ndef validate_operand(obj):\n if isinstance(obj, DataFrame):\n return obj\n elif isinstance(obj, Series):\n if obj.name is None:\n raise ValueError('Cannot merge a Series without a name')\n else:\n return obj.to_frame()\n else:\n raise TypeError('Can only merge Series or DataFrame objects, '\n 'a {obj} was passed'.format(obj=type(obj)))\n"
] | [
[
"pandas.core.common.values_from_object",
"pandas.core.dtypes.common.is_number",
"pandas.core.sorting.is_int64_overflow_possible",
"numpy.any",
"pandas.core.reshape.concat.concat",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.Categorical",
"pandas.core.dtypes.common.ensure_int64",
"pandas.core.dtypes.common.is_numeric_dtype",
"pandas.core.common._any_not_none",
"pandas.core.dtypes.common.is_integer",
"pandas.core.dtypes.common.is_list_like",
"pandas._libs.join.ffill_indexer",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.errors.MergeError",
"pandas.core.dtypes.missing.na_value_for_dtype",
"pandas.core.algorithms.take_1d",
"numpy.isnan",
"pandas.core.dtypes.common.needs_i8_conversion",
"pandas.core.dtypes.common.is_datetime64_dtype",
"pandas.core.sorting.safe_sort",
"pandas.core.dtypes.common.ensure_object",
"pandas.compat.lzip",
"pandas._libs.lib.infer_dtype",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.core.algorithms.take_nd",
"pandas.Timedelta",
"numpy.putmask",
"numpy.prod",
"pandas.core.dtypes.missing.isnull",
"pandas.Index",
"pandas.core.dtypes.common.is_bool",
"pandas.core.internals.items_overlap_with_suffix",
"pandas.core.common.maybe_make_list",
"pandas.core.dtypes.common.is_int64_dtype",
"pandas.util._decorators.Substitution",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.MultiIndex.from_arrays",
"pandas.core.dtypes.common.is_datetimelike",
"pandas.core.arrays.categorical._recode_for_categories",
"pandas.core.dtypes.common.is_dtype_equal",
"numpy.errstate",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.dtypes.common.is_array_like",
"pandas._libs.join.left_outer_join",
"numpy.concatenate",
"pandas.core.dtypes.common.is_integer_dtype"
]
] |
crazycodeon/flink | [
"a66a876126b2f702fa224be534aca4c729dd6f8a"
] | [
"flink-python/pyflink/fn_execution/utils/operation_utils.py"
] | [
"################################################################################\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n################################################################################\nimport datetime\nfrom collections.abc import Generator\n\nfrom functools import partial\n\nfrom typing import Any, Tuple, Dict, List\n\nfrom pyflink.common import Row\nfrom pyflink.fn_execution import pickle\nfrom pyflink.serializers import PickleSerializer\nfrom pyflink.table import functions\nfrom pyflink.table.udf import DelegationTableFunction, DelegatingScalarFunction, \\\n ImperativeAggregateFunction, PandasAggregateFunctionWrapper\n\n_func_num = 0\n_constant_num = 0\n\n\ndef normalize_table_function_result(it):\n def normalize_one_row(value):\n if isinstance(value, tuple):\n # We assume that tuple is a single line output\n return [*value]\n elif isinstance(value, Row):\n # We assume that tuple is a single line output\n return value._values\n else:\n # single field value\n return [value]\n\n if it is None:\n return []\n\n if isinstance(it, (list, range, Generator)):\n def func():\n for item in it:\n yield normalize_one_row(item)\n\n return func()\n else:\n return [normalize_one_row(it)]\n\n\ndef normalize_pandas_result(it):\n import pandas as pd\n arrays = []\n for result in it:\n if isinstance(result, (Row, Tuple)):\n arrays.append(pd.concat([pd.Series([item]) for item in result], axis=1))\n else:\n arrays.append(pd.Series([result]))\n return arrays\n\n\ndef wrap_input_series_as_dataframe(*args):\n import pandas as pd\n return pd.concat(args, axis=1)\n\n\ndef check_pandas_udf_result(f, *input_args):\n output = f(*input_args)\n import pandas as pd\n assert type(output) == pd.Series or type(output) == pd.DataFrame, \\\n \"The result type of Pandas UDF '%s' must be pandas.Series or pandas.DataFrame, got %s\" \\\n % (f.__name__, type(output))\n assert len(output) == len(input_args[0]), \\\n \"The result length '%d' of Pandas UDF '%s' is not equal to the input length '%d'\" \\\n % (len(output), f.__name__, len(input_args[0]))\n return output\n\n\ndef extract_over_window_user_defined_function(user_defined_function_proto):\n window_index = user_defined_function_proto.window_index\n return (*extract_user_defined_function(user_defined_function_proto, True), window_index)\n\n\ndef extract_user_defined_function(user_defined_function_proto, pandas_udaf=False)\\\n -> Tuple[str, Dict, List]:\n \"\"\"\n Extracts user-defined-function from the proto representation of a\n :class:`UserDefinedFunction`.\n\n :param user_defined_function_proto: the proto representation of the Python\n :param pandas_udaf: whether the user_defined_function_proto is pandas udaf\n :class:`UserDefinedFunction`\n \"\"\"\n\n def _next_func_num():\n global _func_num\n _func_num = _func_num + 1\n return _func_num\n\n def _extract_input(args) -> Tuple[str, Dict, List]:\n local_variable_dict = {}\n local_funcs = []\n args_str = []\n for arg in args:\n if arg.HasField(\"udf\"):\n # for chaining Python UDF input: the input argument is a Python ScalarFunction\n udf_arg, udf_variable_dict, udf_funcs = extract_user_defined_function(arg.udf)\n args_str.append(udf_arg)\n local_variable_dict.update(udf_variable_dict)\n local_funcs.extend(udf_funcs)\n elif arg.HasField(\"inputOffset\"):\n # the input argument is a column of the input row\n args_str.append(\"value[%s]\" % arg.inputOffset)\n else:\n # the input argument is a constant value\n constant_value_name, parsed_constant_value = \\\n _parse_constant_value(arg.inputConstant)\n args_str.append(constant_value_name)\n local_variable_dict[constant_value_name] = parsed_constant_value\n return \",\".join(args_str), local_variable_dict, local_funcs\n\n variable_dict = {}\n user_defined_funcs = []\n\n user_defined_func = pickle.loads(user_defined_function_proto.payload)\n if pandas_udaf:\n user_defined_func = PandasAggregateFunctionWrapper(user_defined_func)\n func_name = 'f%s' % _next_func_num()\n if isinstance(user_defined_func, DelegatingScalarFunction) \\\n or isinstance(user_defined_func, DelegationTableFunction):\n if user_defined_function_proto.is_pandas_udf:\n variable_dict[func_name] = partial(check_pandas_udf_result, user_defined_func.func)\n else:\n variable_dict[func_name] = user_defined_func.func\n else:\n variable_dict[func_name] = user_defined_func.eval\n user_defined_funcs.append(user_defined_func)\n\n func_args, input_variable_dict, input_funcs = _extract_input(user_defined_function_proto.inputs)\n variable_dict.update(input_variable_dict)\n user_defined_funcs.extend(input_funcs)\n if user_defined_function_proto.takes_row_as_input:\n if input_variable_dict:\n # for constant or other udfs as input arguments.\n func_str = \"%s(%s)\" % (func_name, func_args)\n elif user_defined_function_proto.is_pandas_udf or pandas_udaf:\n # for pandas udf/udaf, the input data structure is a List of Pandas.Series\n # we need to merge these Pandas.Series into a Pandas.DataFrame\n variable_dict['wrap_input_series_as_dataframe'] = wrap_input_series_as_dataframe\n func_str = \"%s(wrap_input_series_as_dataframe(%s))\" % (func_name, func_args)\n else:\n # directly use `value` as input argument\n # e.g.\n # lambda value: Row(value[0], value[1])\n # can be optimized to\n # lambda value: value\n func_str = \"%s(value)\" % func_name\n else:\n func_str = \"%s(%s)\" % (func_name, func_args)\n return func_str, variable_dict, user_defined_funcs\n\n\ndef _parse_constant_value(constant_value) -> Tuple[str, Any]:\n j_type = constant_value[0]\n serializer = PickleSerializer()\n pickled_data = serializer.loads(constant_value[1:])\n # the type set contains\n # TINYINT,SMALLINT,INTEGER,BIGINT,FLOAT,DOUBLE,DECIMAL,CHAR,VARCHAR,NULL,BOOLEAN\n # the pickled_data doesn't need to transfer to anther python object\n if j_type == 0:\n parsed_constant_value = pickled_data\n # the type is DATE\n elif j_type == 1:\n parsed_constant_value = \\\n datetime.date(year=1970, month=1, day=1) + datetime.timedelta(days=pickled_data)\n # the type is TIME\n elif j_type == 2:\n seconds, milliseconds = divmod(pickled_data, 1000)\n minutes, seconds = divmod(seconds, 60)\n hours, minutes = divmod(minutes, 60)\n parsed_constant_value = datetime.time(hours, minutes, seconds, milliseconds * 1000)\n # the type is TIMESTAMP\n elif j_type == 3:\n parsed_constant_value = \\\n datetime.datetime(year=1970, month=1, day=1, hour=0, minute=0, second=0) \\\n + datetime.timedelta(milliseconds=pickled_data)\n else:\n raise Exception(\"Unknown type %s, should never happen\" % str(j_type))\n\n def _next_constant_num():\n global _constant_num\n _constant_num = _constant_num + 1\n return _constant_num\n\n constant_value_name = 'c%s' % _next_constant_num()\n return constant_value_name, parsed_constant_value\n\n\ndef extract_user_defined_aggregate_function(\n current_index,\n user_defined_function_proto,\n distinct_info_dict: Dict[Tuple[List[str]], Tuple[List[int], List[int]]]):\n user_defined_agg = load_aggregate_function(user_defined_function_proto.payload)\n assert isinstance(user_defined_agg, ImperativeAggregateFunction)\n args_str = []\n local_variable_dict = {}\n for arg in user_defined_function_proto.inputs:\n if arg.HasField(\"inputOffset\"):\n # the input argument is a column of the input row\n args_str.append(\"value[%s]\" % arg.inputOffset)\n else:\n # the input argument is a constant value\n constant_value_name, parsed_constant_value = \\\n _parse_constant_value(arg.inputConstant)\n for key, value in local_variable_dict.items():\n if value == parsed_constant_value:\n constant_value_name = key\n break\n if constant_value_name not in local_variable_dict:\n local_variable_dict[constant_value_name] = parsed_constant_value\n args_str.append(constant_value_name)\n\n if user_defined_function_proto.distinct:\n if tuple(args_str) in distinct_info_dict:\n distinct_info_dict[tuple(args_str)][0].append(current_index)\n distinct_info_dict[tuple(args_str)][1].append(user_defined_function_proto.filter_arg)\n distinct_index = distinct_info_dict[tuple(args_str)][0][0]\n else:\n distinct_info_dict[tuple(args_str)] = \\\n ([current_index], [user_defined_function_proto.filter_arg])\n distinct_index = current_index\n else:\n distinct_index = -1\n if user_defined_function_proto.takes_row_as_input and not local_variable_dict:\n # directly use `value` as input argument\n # e.g.\n # lambda value: Row(value[0], value[1])\n # can be optimized to\n # lambda value: value\n func_str = \"lambda value : [value]\"\n else:\n func_str = \"lambda value : (%s,)\" % \",\".join(args_str)\n return user_defined_agg, \\\n eval(func_str, local_variable_dict) \\\n if args_str else lambda v: tuple(), \\\n user_defined_function_proto.filter_arg, \\\n distinct_index\n\n\ndef is_built_in_function(payload):\n # The payload may be a pickled bytes or the class name of the built-in functions.\n # If it represents a built-in function, it will start with 0x00.\n # If it is a pickled bytes, it will start with 0x80.\n return payload[0] == 0\n\n\ndef load_aggregate_function(payload):\n if is_built_in_function(payload):\n built_in_function_class_name = payload[1:].decode(\"utf-8\")\n cls = getattr(functions, built_in_function_class_name)\n return cls()\n else:\n return pickle.loads(payload)\n"
] | [
[
"pandas.Series",
"pandas.concat"
]
] |
Leonardo-Maciel/PSO_Maciel | [
"3939448da45716260f3ac7811afdd13be670f346"
] | [
"Funções Analíticas/Virtualenv/Lib/site-packages/matplotlib/tests/test_testing.py"
] | [
"import warnings\nimport pytest\nfrom matplotlib.testing.decorators import check_figures_equal\n\n\[email protected](\n strict=True, reason=\"testing that warnings fail tests\"\n)\ndef test_warn_to_fail():\n warnings.warn(\"This should fail the test\")\n\n\[email protected](\"a\", [1])\n@check_figures_equal(extensions=[\"png\"])\[email protected](\"b\", [1])\ndef test_parametrize_with_check_figure_equal(a, fig_ref, b, fig_test):\n assert a == b\n\n\ndef test_wrap_failure():\n with pytest.raises(ValueError, match=\"^The decorated function\"):\n @check_figures_equal()\n def should_fail(test, ref):\n pass\n"
] | [
[
"matplotlib.testing.decorators.check_figures_equal"
]
] |
xishansnow/MLAPP | [
"2f30cd94fd852a3f66fe92a124f65722bd2af509"
] | [
"mlapp/MLAPP_CODE/MLAPP-C6-Code/bootstrapDemo.py"
] | [
"import numpy as np\nfrom unidrnd import unid_rnd\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nplt.rcParams[\"figure.figsize\"]=(10,15)\n# 真实的模型参数值\ntheta = 0.7\n# 采样的样本数量\nn_samples = [10, 100]\nx_label = ['(a)','(b)','(c)','(d)']\nfor index,n_sample in enumerate(n_samples):\n B = 10000 # 重复试验的次数\n X = np.random.rand(n_sample) < theta\n estimator = lambda X: np.mean(X)\n bmle = estimator(X)\n mleBoot = np.zeros(B)\n mleBootNP = np.zeros(B)\n for b in range(B):\n Xb = np.random.rand(n_sample) < bmle # 有参数采样\n mleBoot[b] = estimator(Xb) \n ndx = unid_rnd(n_sample, n_sample) # 无参数采样\n Xnonparam = X[ndx]\n mleBootNP[b] = estimator(Xnonparam)\n \n ax1 = plt.subplot(2,2,index+1)\n ax1.hist(mleBoot, density=True)\n ax1.set_title('Boot:true={},n={},mle={},se={:.2f}'.format(theta,n_sample,bmle,np.std(mleBoot)))\n ax1.set_xlabel(x_label[index])\n #ax2 = plt.subplot(122)\n #ax2.hist(mleBootNP, density=True)\n\n # 后验分布\n N1 = np.sum(X == 1)\n N0 = np.sum(X == 0)\n alpha1 =1 ; alpha0 = 0\n a = N1 + alpha1\n b = N0 + alpha0\n X_post = stats.beta.rvs(a=a,b=b,size=B)\n ax2 = plt.subplot(2,2,index+3)\n ax2.hist(X_post, density=True)\n ax2.set_title('Bayes:true={},n={},post_mean={:.2f},se={:.2f}'.format(theta,n_sample,np.mean(X_post),np.std(X_post)))\n ax2.set_xlabel(x_label[index+2])\n\nplt.show()"
] | [
[
"numpy.sum",
"scipy.stats.beta.rvs",
"numpy.zeros",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.random.rand",
"numpy.std",
"numpy.mean"
]
] |
Sunmingzhen/CogView | [
"6bc71b7cc07a209d258729674019f7d15a0ac4bb"
] | [
"data_utils/configure_data.py"
] | [
"# -*- encoding: utf-8 -*-\n'''\n@File : configure_data.py\n@Time : 2021/01/11 23:28:38\n@Author : Ming Ding \n@Contact : [email protected]\n'''\n\n# here put the import lib\nimport os\nimport ipdb\nimport sys\nimport math\nimport random\nfrom tqdm import tqdm\nimport copy\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom bisect import bisect_right\n\nfrom .unified_tokenizer import get_tokenizer\nfrom .datasets import get_dataset_by_type\nfrom torch.utils import data\nfrom .samplers import DistributedBatchSampler\n\nimport mpu\n\ndef make_data_loader_origin(dataset, batch_size, num_iters, args):\n world_size = torch.distributed.get_world_size(\n group=mpu.get_data_parallel_group())\n rank = torch.distributed.get_rank(group=mpu.get_data_parallel_group())\n distributed = world_size > 1\n\n sampler = torch.utils.data.SequentialSampler(dataset)\n drop_last = distributed\n #the GPUs in the same model parallel group receive the same data\n if distributed:\n batch_sampler = DistributedBatchSampler(sampler,\n batch_size,\n drop_last,\n rank,\n world_size,\n gradient_accumulation_steps=args.gradient_accumulation_steps)\n else:\n batch_sampler = torch.utils.data.BatchSampler(sampler,\n batch_size,\n drop_last)\n data_loader = torch.utils.data.DataLoader(dataset,\n batch_sampler=batch_sampler,\n num_workers=args.num_workers,\n pin_memory=True)\n\n return data_loader\n\ndef make_data_loader(dataset, batch_size, num_iters, args):\n world_size = torch.distributed.get_world_size(\n group=mpu.get_data_parallel_group())\n rank = torch.distributed.get_rank(group=mpu.get_data_parallel_group())\n distributed = world_size > 1\n\n # sampler = torch.utils.data.SequentialSampler(dataset)\n # drop_last = distributed\n # the GPUs in the same model parallel group receive the same data\n # if distributed:\n # batch_sampler = DistributedBatchSampler(sampler,\n # batch_size,\n # drop_last,\n # rank,\n # world_size,\n # gradient_accumulation_steps=args.gradient_accumulation_steps)\n # else:\n # batch_sampler = torch.utils.data.BatchSampler(sampler,\n # batch_size,\n # drop_last)\n # data_loader = torch.utils.data.DataLoader(dataset,\n # batch_sampler=batch_sampler,\n # num_workers=args.num_workers,\n # pin_memory=True)\n\n data_loader = torch.utils.data.DataLoader(dataset,\n num_workers=args.num_workers,\n pin_memory=True, drop_last=True)\n return data_loader\n\ndef make_dataset(dataset_type, path, args, **kwargs):\n \"\"\"function to create datasets+tokenizers for common options\"\"\"\n\n return get_dataset_by_type(dataset_type, path, args)\n\ndef make_dataset_origin(dataset_type, path, split, args, **kwargs):\n \"\"\"function to create datasets+tokenizers for common options\"\"\"\n print('make dataset ...', path)\n if split is None:\n split = [1.]\n\n assert isinstance(path, list)\n # TODO other dsclass, e.g. odps\n # ds = [get_dataset_by_type(dataset_type, p, args) for p in path]\n # dataset object can be copied N times\n ds = []\n for p in path:\n d = get_dataset_by_type(dataset_type, p, args)\n ds.append(d)\n\n # if p.find('t2i') >= 0:\n # ds.extend([d] * 4)\n # print(f'Enlarge {p} 4 times...')\n # elif p.find('i2t') >= 0:\n # ds.extend([d] * 2)\n # print(f'Enlarge {p} 2 times...')\n # else:\n # ds.append(d)\n\n ds = RandomMappingDataset(ConcatDataset(ds))\n\n if should_split(split):\n ds = split_ds(ds, split) # Large dataset, cannot shuffle, randomly mapping\n # FIXME this will merge valid set and train set.\n return ds\n\ndef make_loaders(args):\n \"\"\"makes training/val/test\"\"\"\n\n world_size = torch.distributed.get_world_size(\n group=mpu.get_data_parallel_group())\n batch_size = args.batch_size * world_size\n eval_batch_size = batch_size\n if args.eval_batch_size is not None:\n eval_batch_size = args.eval_batch_size * world_size\n\n split = get_split(args)\n\n data_set_args = {\n 'path': args.train_data,\n 'dataset_type': args.dataset_type,\n 'split': split,\n }\n\n eval_set_args = copy.copy(data_set_args)\n eval_set_args['split'] = [1.]\n \n # make datasets splits and tokenizer\n train = None\n valid = None\n test = None\n\n if args.train_data is not None:\n # train, valid, test = make_dataset_origin(**data_set_args, args=args)\n train, valid, test = get_dataset_by_type(args.dataset_type, args.train_data, args=args)\n # if should_split(split):\n # train, valid, test = train\n\n # wrap datasets with data loader\n if train is not None and args.batch_size > 0:\n train = make_data_loader_origin(train, batch_size, args.train_iters, args)\n args.do_train = True\n else:\n args.do_train = False\n eval_batch_size = eval_batch_size if eval_batch_size != 0 else batch_size\n if valid is not None:\n valid = make_data_loader_origin(valid, eval_batch_size, args.train_iters, args)\n args.do_valid = True\n else:\n args.do_valid = False\n if test is not None:\n test = make_data_loader_origin(test, eval_batch_size, len(test) // eval_batch_size + 1, args)\n args.do_test = True\n else:\n args.do_test = False\n\n return train, valid, test\n\n\n\ndef get_split(args):\n \"\"\"\n Get dataset splits from comma separated string list\n \"\"\"\n splits = []\n if args.split.find(',') != -1:\n splits = [float(s) for s in args.split.split(',')]\n elif args.split.find('/') != -1:\n splits = [float(s) for s in args.split.split('/')]\n else:\n splits = [float(args.split)]\n split_total = sum(splits)\n if split_total < 1.:\n splits.append(1-split_total)\n while len(splits) < 3:\n splits.append(0.)\n splits = splits[:3]\n if args.valid_data is not None:\n splits[1] = 0.\n if args.test_data is not None:\n splits[2] = 0.\n final_sum = sum(splits)\n return [s/final_sum for s in splits]\n\ndef should_split(split):\n \"\"\"\n given split proportions checks if should split\n Examples:\n >>> should_split([10,0,0]) \n False\n >>> should_split([1,.1,.2])\n True\n \"\"\"\n return max(split) / sum(split) != 1.\n\ndef split_ds(ds, split=[.8,.2,.0]):\n \"\"\"\n Split a dataset into subsets given proportions of how\n much to allocate per split. If a split is 0% returns None for that split.\n Purpose: Useful for creating train/val/test splits\n Arguments:\n ds (Dataset or array-like): Data to be split.\n split (1D array-like): proportions to split `ds`. `sum(splits) != 0`\n shuffle (boolean): Randomly split dataset. Default: True\n \"\"\"\n split_sum = sum(split)\n if split_sum == 0:\n raise Exception('Split cannot sum to 0.')\n split = np.array(split)\n split /= split_sum\n ds_len = len(ds)\n\n start_idx = 0\n residual_idx = 0\n rtn_ds = [None]*len(split)\n for i, f in enumerate(split):\n if f != 0:\n proportion = ds_len*split[i]\n residual_idx += proportion % 1\n split_ = int(int(proportion) + residual_idx)\n split_range = (start_idx, start_idx+max(split_, 1))\n rtn_ds[i] = SplitDataset(ds, split_range)\n start_idx += split_\n residual_idx %= 1\n return rtn_ds\n\nclass ConcatDataset(data.Dataset):\n \"\"\"\n Dataset to concatenate multiple datasets.\n Purpose: useful to assemble different existing datasets, possibly\n large-scale datasets as the concatenation operation is done in an\n on-the-fly manner.\n Arguments:\n datasets (sequence): List of datasets to be concatenated.\n \"\"\"\n\n @staticmethod\n def cumsum(sequence):\n r, s = [], 0\n for e in sequence:\n l = len(e)\n r.append(l + s)\n s += l\n return r\n\n def __init__(self, datasets, **kwargs):\n super(ConcatDataset, self).__init__()\n assert len(datasets) > 0, 'datasets should not be an empty iterable'\n self.datasets = list(datasets)\n self.cumulative_sizes = self.cumsum(self.datasets)\n\n def __len__(self):\n return self.cumulative_sizes[-1]\n\n def __getitem__(self, idx):\n dataset_idx = bisect_right(self.cumulative_sizes, idx)\n if dataset_idx == 0:\n sample_idx = idx\n else:\n sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]\n return self.datasets[dataset_idx][sample_idx]\n\n\nclass SplitDataset(data.Dataset):\n \"\"\"\n Dataset wrapper to access a subset of another dataset.\n Purpose: useful to index into existing datasets, possibly\n large-scale datasets as the subindexing operation is done in an\n on-the-fly manner.\n Arguments:\n ds (Dataset or array-like): List of datasets to be subindexed\n split_range (Tuple): (Left, Right)\n \"\"\"\n def __init__(self, ds, split_range, **kwargs):\n self.split_range = split_range\n self.wrapped_data = ds\n\n def __len__(self):\n return self.split_range[1] - self.split_range[0]\n\n def __getitem__(self, index):\n index += self.split_range[0]\n assert index < self.split_range[1]\n return self.wrapped_data[index]\n\n def __iter__(self):\n for idx in range(*self.split_range):\n yield self.wrapped_data[idx]\n\nclass RandomMappingDataset(data.Dataset):\n '''\n Dataset wrapper to randomly mapping indices to original order.\n Will also enlarge the length\n '''\n def __init__(self, ds, **kwargs):\n self.wrapped_data = ds\n\n def __len__(self):\n return len(self.wrapped_data) * 60\n\n def __getitem__(self, index):\n rng = random.Random(index)\n rng = np.random.RandomState(seed=[rng.randint(0, 2**32-1) for _ in range(16)])\n index = rng.randint(len(self.wrapped_data))\n return self.wrapped_data[index]\n\ndef detect_new_datasets(args):\n if args.new_dataset_path is None:\n return None\n if not os.path.exists(args.new_dataset_path):\n print('Warning: new_dataset_path not exists... skip detection.')\n return None\n current_datasets = [str(os.path.abspath(path)) for path in args.train_data]\n\n found = []\n for _p in os.listdir(args.new_dataset_path):\n p = os.path.join(args.new_dataset_path, _p)\n if str(p).endswith('lmdb') and not str(os.path.abspath(p)) in current_datasets:\n found.append(p)\n if len(found) == 0:\n return None\n else:\n args.train_data = args.train_data + found\n return make_loaders(args) \n"
] | [
[
"numpy.array",
"torch.utils.data.DataLoader",
"torch.utils.data.SequentialSampler",
"torch.utils.data.BatchSampler"
]
] |
Evoiis/Robot-Follow-Ahead-with-Obstacle-Avoidance | [
"72a407eafc7cdebf0639314c4f4ad0dd6902e6e8"
] | [
"far_ws/src/follow_ahead_rl/old_script/plot_bag.py"
] | [
"#!/usr/bin/env python\nimport argparse\nimport copy\nimport traceback\n\nfrom os import listdir\nfrom os.path import isfile, join\n\n#from cv_bridge import CvBridge\n\n\nimport math\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport random\n# u\nimport numpy as np\nimport cv2 as cv\n\nimport rospy\n# Brings in the SimpleActionClient\nimport actionlib\n# Brings in the .action file and messages used by the move base action\nfrom move_base_msgs.msg import MoveBaseAction, MoveBaseGoal\n\n\nfrom squaternion import quat2euler\nfrom squaternion import euler2quat\n\nfrom sensor_msgs.msg import Image\nfrom geometry_msgs.msg import Point\nfrom geometry_msgs.msg import Point32\nfrom geometry_msgs.msg import TransformStamped\nfrom rosgraph_msgs.msg import Clock\n\nfrom costmap_converter.msg import ObstacleArrayMsg\nfrom costmap_converter.msg import ObstacleMsg\nfrom geometry_msgs.msg import Twist\n\n\nimport threading\n\n\nimport _thread\n\nfrom squaternion import quat2euler\nfrom squaternion import euler2quat\n\nfrom simple_pid import PID\n\nimport pickle\nimport utils\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass Robot():\n def __init__(self, name):\n self.name = name\n self.prev_call_vicon = None\n self.state_ = {\"position\":(None, None), \\\n \"orientation\":None}\n self.all_states_ = []\n self.last_time_observation = None\n if self.name == \"robot\":\n rospy.Subscriber(\"/vicon/Robot/Robot\", TransformStamped, self.vicon_cb)\n elif self.name == \"person\":\n rospy.Subscriber(\"/vicon/Person/Person\", TransformStamped, self.vicon_cb)\n\n def get_pos(self, idx):\n if \"position\" in self.all_states_[idx].keys():\n pos = self.all_states_[idx][\"position\"]\n else:\n pos = self.all_states_[idx][\"pos\"]\n return pos\n\n def get_orientation(self, idx):\n return self.all_states_[idx][\"orientation\"]\n\n\n def vicon_cb(self, pose_msg):\n if self.last_time_observation is not None and abs(rospy.Time.now().to_sec() - self.last_time_observation) <0.025:\n return\n pos = pose_msg.transform.translation\n self.last_time_observation = rospy.Time.now().to_sec()\n self.state_[\"position\"] = (pos.x, pos.y)\n euler = quat2euler(pose_msg.transform.rotation.x, pose_msg.transform.rotation.y, pose_msg.transform.rotation.z, pose_msg.transform.rotation.w)\n self.state_[\"orientation\"] = euler[0]\n self.all_states_.append(self.state_.copy())\n\n def get_relative_position(self, center, idx):\n relative_orientation = self.all_states_[idx]['orientation']\n center_pos = np.asarray(center.get_pos(idx))\n center_orientation = center.all_states_[idx]['orientation']\n\n # transform the pos to center coordinat\n relative_pos = np.asarray(self.get_pos(idx) - center_pos)\n rotation_matrix = np.asarray([[np.cos(-center_orientation), np.sin(-center_orientation)], [-np.sin(-center_orientation), np.cos(-center_orientation)]])\n relative_pos = np.matmul(relative_pos, rotation_matrix)\n\n return relative_pos\n\n def get_relative_heading_position(self, center, idx):\n relative_orientation = self.all_states_[idx]['orientation']\n center_pos = np.asarray(center.get_pos(idx))\n center_orientation = center.all_states_[idx]['orientation']\n print (np.rad2deg(relative_orientation - center_orientation))\n\n # transform the relative to center coordinat\n relative_pos = np.asarray(self.get_pos(idx) - center_pos)\n relative_pos2 = np.asarray((relative_pos[0] +math.cos(relative_orientation) , relative_pos[1] + math.sin(relative_orientation)))\n rotation_matrix = np.asarray([[np.cos(-center_orientation), np.sin(-center_orientation)], [-np.sin(-center_orientation), np.cos(-center_orientation)]])\n relative_pos = np.matmul(relative_pos, rotation_matrix)\n relative_pos2 = np.matmul(relative_pos2, rotation_matrix)\n angle_relative = np.arctan2(relative_pos2[1]-relative_pos[1], relative_pos2[0]-relative_pos[0])\n return angle_relative, relative_pos\n\n def is_bag_finish(self):\n if self.last_time_observation is not None and abs(rospy.Time.now().to_sec() - self.last_time_observation) > 1:\n return True\n return False\n\nclass Results():\n def __init__(self):\n self.center_pos_ = (0, 0)\n self.name = \"\"\n self.DESIRE_DISTANCE = 1.5\n self.colors_visualization = cv.cvtColor(cv.applyColorMap(np.arange(0, 255, dtype=np.uint8), cv.COLORMAP_WINTER), cv.COLOR_RGB2BGR).reshape(255,3).tolist()\n self.current_obsevation_image_ = np.zeros([500,500,3])\n self.current_obsevation_image_.fill(255)\n\n self.color_index = 0\n self.first_call_observation = True\n self.robot = Robot(\"robot\")\n self.person = Robot(\"person\")\n\n def add_line_observation_to_image(self, pos, pos2):\n color = self.colors_visualization[self.color_index]\n pos_image = utils.to_image_coordinate(pos, self.center_pos_)\n pos_image2 = utils.to_image_coordinate(pos2, self.center_pos_)\n if pos_image[0] >self.current_obsevation_image_.shape[0] or pos_image[0] < 0 or pos_image[1] >self.current_obsevation_image_.shape[1] or pos_image[1] < 0:\n rospy.logerr(\"problem with observation: {}\".format(pos_image))\n return\n self.new_obsevation_image_ = cv.line(self.new_obsevation_image_, (pos_image[0], pos_image[1]), (pos_image2[0], pos_image2[1]), color, 1)\n\n def add_triangle_observation_to_image(self, pos, orientation):\n color = self.colors_visualization[self.color_index]\n pos_image = utils.to_image_coordinate(pos, self.center_pos_)\n pos_triangle1 = utils.to_image_coordinate((pos[0]+math.cos(orientation)*0.3, pos[1]+math.sin(orientation)*0.3), self.center_pos_)\n pos_triangle2 = utils.to_image_coordinate((pos[0]+math.cos(orientation+math.pi/2)*0.1, pos[1]+math.sin(orientation+math.pi/2)*0.1), self.center_pos_)\n pos_triangle3 = utils.to_image_coordinate((pos[0]+math.cos(orientation-math.pi/2)*0.1, pos[1]+math.sin(orientation-math.pi/2)*0.1), self.center_pos_)\n poses = [pos_triangle1, pos_triangle2, pos_triangle3]\n\n for pos in poses:\n if pos[0] >self.current_obsevation_image_.shape[0] or pos[0] < 0 or pos[1] >self.current_obsevation_image_.shape[1] or pos[1] < 0:\n rospy.logerr(\"problem with observation: {}\".format(pos))\n return\n self.new_obsevation_image_ = cv.drawContours(self.new_obsevation_image_, [np.asarray(poses)], 0, color, -1)\n\n\n def add_arrow_observation_to_image(self, pos, orientation):\n color = self.colors_visualization[self.color_index]\n pos_image = utils.to_image_coordinate(pos, self.center_pos_)\n pos_image2 = utils.to_image_coordinate((pos[0]+math.cos(orientation)*0.3, pos[1]+math.sin(orientation)*0.3), self.center_pos_)\n if pos_image[0] >self.current_obsevation_image_.shape[0] or pos_image[0] < 0 or pos_image[1] >self.current_obsevation_image_.shape[1] or pos_image[1] < 0:\n rospy.logerr(\"problem with observation: {}\".format(pos_image))\n return\n self.new_obsevation_image_ = cv.arrowedLine(self.new_obsevation_image_, (pos_image[0], pos_image[1]), (pos_image2[0], pos_image2[1]), color, 2, tipLength=0.5)\n\n def add_circle_observation_to_image(self, pos, center_pos=None, image=None):\n color = self.colors_visualization[self.color_index]\n if image is None:\n image = self.new_obsevation_image_\n if center_pos is None:\n center_pos = self.center_pos_\n pos_image = utils.to_image_coordinate(pos, center_pos)\n if pos_image[0] >self.current_obsevation_image_.shape[0] or pos_image[0] < 0 or pos_image[1] >self.current_obsevation_image_.shape[1] or pos_image[1] < 0:\n rospy.logerr(\"problem with observation: {}\".format(pos_image))\n return\n return (cv.circle(image , (pos_image[0], pos_image[1]), 4, color, 2))\n\n\n\n def update_observation_image(self, idx, len_data):\n self.new_obsevation_image_ = np.copy(self.current_obsevation_image_)\n robot_pos = self.robot.get_pos(idx)\n robot_orientation = self.robot.get_orientation(idx)\n person_pos = self.person.get_pos(idx)\n person_orientation = self.person.get_orientation(idx)\n if person_orientation is None or robot_orientation is None:\n rospy.logerr(\"person or robot orientation is None\")\n return\n if self.first_call_observation:\n self.first_call_observation = False\n self.center_pos = person_pos\n #self.add_circle_observation_to_image(robot_pos)\n self.add_arrow_observation_to_image(robot_pos, robot_orientation)\n self.add_triangle_observation_to_image(person_pos, person_orientation)\n\n # self.add_line_observation_to_image(robot_pos, person_pos)\n alpha = 0.50\n self.current_obsevation_image_ = cv.addWeighted(self.new_obsevation_image_, alpha, self.current_obsevation_image_, 1 - alpha, 0)\n self.color_index += 255//len_data\n\n\n def get_current_observation_image(self):\n\n image = self.current_obsevation_image_.astype(np.uint8)\n #image = image/255.\n\n return image\n\n\n def get_angle_person_robot(self, idx):\n pos_rel = self.robot.get_relative_position(self.person, idx)\n angle_robot_person = math.atan2(pos_rel[1], pos_rel[0])\n return (utils.wrap_pi_to_pi(angle_robot_person))\n\n def get_dist_person_robot(self, idx):\n pos_rel = self.robot.get_relative_position(self.person, idx)\n return math.hypot(pos_rel[0], pos_rel[1])\n\n def get_reward(self, idx):\n reward = 0\n pos_rel = self.robot.get_relative_position(self.person, idx)\n angle_robot_person = math.atan2(pos_rel[1], pos_rel[0])\n angle_robot_person = np.rad2deg(utils.wrap_pi_to_pi(angle_robot_person))\n distance = math.hypot(pos_rel[0], pos_rel[1])\n # Negative reward for being behind the person\n if distance<0.4:\n reward -= 1\n if distance < 0.5:\n reward = -1.3\n elif abs(distance - self.DESIRE_DISTANCE) < 0.5:\n reward += 0.5 * (0.5 - abs(distance - self.DESIRE_DISTANCE))\n elif distance >= self.DESIRE_DISTANCE + 0.5:\n reward -= 0.25 * (distance - self.DESIRE_DISTANCE + 0.5)\n elif distance < self.DESIRE_DISTANCE - 0.5:\n reward -= (self.DESIRE_DISTANCE - 0.5 - distance)/(self.DESIRE_DISTANCE - 0.5)\n if abs(angle_robot_person) < 25:\n reward += 0.5 * (25 - abs(angle_robot_person)) / 25\n else:\n reward -= 0.25 * abs(angle_robot_person) / 180\n if abs(distance - self.DESIRE_DISTANCE) < 0.5 and abs(angle_robot_person) < 25:\n reward += 0.25\n\n reward = min(max(reward, -1), 1)\n return reward\n\n def save(self, name):\n dic_data = {\"name\":name,\"robot\":self.robot.all_states_, \"person\":self.person.all_states_}\n with open (name+\"_.pkl\", \"wb\") as f:\n pickle.dump(dic_data, f)\n\n def load(self, file_address, use_sim=False):\n with open(file_address, \"rb\") as f:\n dic_data = pickle.load(f)\n\n self.name = dic_data[\"name\"]\n self.person.all_states_ = dic_data[\"person\"][4:].copy()\n self.robot.all_states_ = dic_data[\"robot\"][4:].copy()\n if use_sim:\n self.person.all_states_ = [ self.person.all_states_[idx*10] for idx in range (len(self.person.all_states_)//10)]\n self.robot.all_states_ = [ self.robot.all_states_[idx*10] for idx in range (len(self.robot.all_states_)//10)]\n\n def wait_until_bag_finish(self):\n while not self.robot.is_bag_finish() or not self.person.is_bag_finish():\n rospy.sleep(0.1)\n rospy.loginfo(\"waiting for bag to finish\")\n if len(self.person.all_states_)>0 and len(self.robot.all_states_)>0:\n print(self.robot.get_relative_position(self.person, -1))\n print(np.rad2deg(self.get_angle_person_robot(-1)))\n print (self.robot.all_states_)\n print (self.person.all_states_)\n\n def calculate_orientation_dif(self, idx):\n ori_rel, pos_rel = self.robot.get_relative_heading_position(self.person, idx)\n return ori_rel\n\n def get_metrics(self):\n rewards = []\n orientations = []\n orientation_dif = []\n distances = []\n len_data = min(len(self.robot.all_states_), len(self.person.all_states_))\n for idx in range (len_data):\n # if idx % 10==0:\n # self.update_observation_image(idx)\n rewards.append(self.get_reward(idx))\n distances.append(self.get_dist_person_robot(idx))\n orientations.append(self.get_angle_person_robot(idx))\n orientation_dif.append(self.calculate_orientation_dif(idx))\n\n mean_orientation = np.mean(orientations)\n sum_orientations_m = 0\n for orientation in orientations:\n sum_orientations_m += np.power(utils.wrap_pi_to_pi(mean_orientation - orientation),2)\n sum_orientations_m /= len(orientations)\n std = np.sqrt(sum_orientations_m)\n\n\n return {\"name\":self.name, \"orientation_mean\":np.average(orientations), \"orientation_std\":std, \\\n \"reward\":np.sum(rewards), \"distance\":np.average(distances), \"distance_std\":np.std(distances),\\\n \"ori_dif\":np.average(orientation_dif)}\n\n\n def plot_calculate_metrics(self):\n rewards = []\n orientations = []\n distances = []\n len_data = min(len(self.robot.all_states_), len(self.person.all_states_))\n for idx in range (len_data):\n if idx % 3==0:\n self.update_observation_image(idx, len_data//3)\n rewards.append(self.get_reward(idx))\n distances.append(self.get_dist_person_robot(idx))\n orientations.append(self.get_angle_person_robot(idx))\n print (np.rad2deg(self.robot.get_relative_heading_position(self.person, 0)[0]))\n\n img = self.get_current_observation_image()\n img = cv.cvtColor(img, cv.COLOR_RGB2BGR)\n print(f\"\\n\\ndist avg: {np.average(distances)} orientation avg: {np.rad2deg(np.average(orientations))}, reward: {np.sum(rewards)} reward avg: {np.average(rewards)}\")\n cv.imshow(\"image\", img)\n cv.waitKey(0)\n\n\n\ndef plot_all_results( results, is_sim=False):\n\n name = []\n orientations = []\n rewards = []\n distances = []\n orientations_std = []\n distances_std = []\n for result in results:\n met = result.get_metrics()\n name.append(met[\"name\"])\n rewards.append(met[\"reward\"])\n distances.append(met[\"distance\"])\n distances_std.append(met[\"distance_std\"])\n orientations.append(np.rad2deg(met[\"orientation_mean\"]))\n orientations_std.append(np.rad2deg(met[\"orientation_std\"]))\n print (f\"{name[-1]}: Distance_avg: {distances[-1]:.2f} Distance_std: {distances_std[-1]:.2f} Orientation_avg: {orientations[-1]:.1f} Orientation_std: {orientations_std[-1]:.1f} reward: {rewards[-1]:.2f} ori_dif: {np.rad2deg(met['ori_dif']):0.2f}\")\n if is_sim:\n print (f\"{name[-1]}: ${distances[-1]:.2f}\\pm{distances_std[-1]:.1f}$ & ${orientations[-1]:.1f}\\pm{orientations_std[-1]:.1f}$ & ${rewards[-1]:.2f}$\")\n else:\n print (f\"{name[-1]}: ${distances[-1]:.2f}\\pm{distances_std[-1]:.1f}$ & ${orientations[-1]:.1f}\\pm{orientations_std[-1]:.1f}$ & ${rewards[-1]:.2f}$\")\n print (\"\\n\")\n\n #df = pd.DataFrame({'name': name, 'assess':[x for x in range(len(name))]})\n\n #plt.errorbar(range(len(df['name'])), orientations, orientations_std, fmt='o')\n #plt.xticks(range(len(df['name'])), df['name'])\n\nif __name__== \"__main__\":\n parser = argparse.ArgumentParser(description='input weight file of the network')\n parser.add_argument('--name', default=\"no_name\", type=str, help='name_traj')\n parser.add_argument('--file-name', default=\"no_name\", type=str, help='name_file_to_load')\n parser.add_argument('--folder-name', default=\"no_name\", type=str, help='name_file_to_load')\n parser.add_argument('--save', action='store_true')\n parser.add_argument('--load-file', action='store_true')\n parser.add_argument('--load-folder', action='store_true')\n parser.add_argument('--plot', action='store_true')\n parser.add_argument('--use-sim-data', action='store_true')\n parser.add_argument('--from-bag', action='store_true')\n args = parser.parse_args()\n\n node = rospy.init_node('plot_results')\n if args.load_folder:\n onlyfiles = [join(args.folder_name, f) for f in listdir(args.folder_name) if isfile(join(args.folder_name, f))]\n onlyfiles.sort()\n\n all_results = []\n for pkl_name in onlyfiles:\n result = Results()\n result.load(pkl_name)\n name_list = result.name.split(\"_\")\n if not args.use_sim_data and name_list[-1] != \"planner\" and name_list[-1] != \"line\":\n print (\"error \")\n continue\n new_name = f\"{name_list[-1]}_{name_list[-2]}_base_line\"\n result.name = new_name\n result.save(new_name)\n\n all_results.append(result)\n plot_all_results(all_results, args.use_sim_data)\n #plt.show()\n\n\n\n\n else:\n result = Results()\n if args.from_bag or args.load_file:\n if args.from_bag:\n result.wait_until_bag_finish()\n else:\n result.load(args.file_name, args.use_sim_data)\n else:\n print(\"exiting you need to load or read from bag file\")\n exit(0)\n\n if args.save:\n result.save(args.name)\n\n if args.plot:\n result.plot_calculate_metrics()\n"
] | [
[
"numpy.arctan2",
"numpy.matmul",
"numpy.sum",
"numpy.sin",
"numpy.zeros",
"numpy.rad2deg",
"numpy.asarray",
"numpy.copy",
"numpy.cos",
"numpy.arange",
"numpy.sqrt",
"numpy.std",
"numpy.average",
"numpy.mean"
]
] |
ing-a-zepeda/covid19mexico | [
"9988a1e69f549ebc00cdf71fc9a9be696a371eea"
] | [
"python/ml-arima-covid.py"
] | [
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom pandas.plotting import lag_plot\nfrom pandas import datetime\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom sklearn.metrics import mean_squared_error\n\ndf = pd.read_csv(\"corona2.csv\")\ndf.head(6)\n\nplt.figure()\nlag_plot(df['InfectadosDia'], lag = 1)\nplt.title('TESLA Stock - Autocorrelation plot with lag = 1')\nplt.show()\n\nplt.plot(df[\"Date\"], df[\"InfectadosDia\"])\nplt.xticks(np.arange(0,200,5), df['Date'][0:200:5], rotation=\"vertical\")\nplt.title(\"TESLA stock price over time\")\nplt.xlabel(\"time\")\nplt.ylabel(\"deaths\")\nplt.show()\n\ntrain_data, test_data = df[0:int(len(df)*0.7)], df[int(len(df)*0.7):]\ntraining_data = train_data['InfectadosDia'].values\ntest_data = test_data['InfectadosDia'].values\nhistory = [x for x in training_data]\nmodel_predictions = []\nN_test_observations = len(test_data)\nfor time_point in range(N_test_observations):\n model = ARIMA(history, order=(4,1,0))\n model_fit = model.fit(disp=0)\n output = model_fit.forecast()\n yhat = output[0]\n model_predictions.append(yhat)\n true_test_value = test_data[time_point]\n history.append(true_test_value)\nMSE_error = mean_squared_error(test_data, model_predictions)\nprint('Testing Mean Squared Error is {}'.format(MSE_error))\n\n\ntest_set_range = df[int(len(df)*0.7):].index\nplt.plot(test_set_range, model_predictions, color='blue', marker='o', linestyle='dashed',label='Muertes pronosticadas python')\nplt.plot(test_set_range, test_data, color='red', label='Muertes reales')\nplt.title('Muertes covid')\nplt.xlabel('Fecha')\nplt.ylabel('Muertes')\nplt.xticks(np.arange(125,200,1), df.Date[125:200:1], rotation=\"vertical\")\nplt.legend()\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"sklearn.metrics.mean_squared_error",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"pandas.plotting.lag_plot",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel"
]
] |
adefossez/audio | [
"19fc580da97baf179395bb257647c5c25b993e42"
] | [
"test/torchaudio_unittest/librosa_compatibility_test.py"
] | [
"\"\"\"Test suites for numerical compatibility with librosa\"\"\"\nimport os\nimport unittest\nfrom distutils.version import StrictVersion\n\nimport torch\nimport torchaudio\nimport torchaudio.functional as F\nfrom torchaudio._internal.module_utils import is_module_available\n\nLIBROSA_AVAILABLE = is_module_available('librosa')\n\nif LIBROSA_AVAILABLE:\n import numpy as np\n import librosa\n import scipy\n\nimport pytest\n\nfrom torchaudio_unittest import common_utils\n\n\[email protected](not LIBROSA_AVAILABLE, \"Librosa not available\")\nclass TestFunctional(common_utils.TorchaudioTestCase):\n \"\"\"Test suite for functions in `functional` module.\"\"\"\n def test_griffinlim(self):\n # NOTE: This test is flaky without a fixed random seed\n # See https://github.com/pytorch/audio/issues/382\n torch.random.manual_seed(42)\n tensor = torch.rand((1, 1000))\n\n n_fft = 400\n ws = 400\n hop = 100\n window = torch.hann_window(ws)\n normalize = False\n momentum = 0.99\n n_iter = 8\n length = 1000\n rand_init = False\n init = 'random' if rand_init else None\n\n specgram = F.spectrogram(tensor, 0, window, n_fft, hop, ws, 2, normalize).sqrt()\n ta_out = F.griffinlim(specgram, window, n_fft, hop, ws, 1, normalize,\n n_iter, momentum, length, rand_init)\n lr_out = librosa.griffinlim(specgram.squeeze(0).numpy(), n_iter=n_iter, hop_length=hop,\n momentum=momentum, init=init, length=length)\n lr_out = torch.from_numpy(lr_out).unsqueeze(0)\n\n self.assertEqual(ta_out, lr_out, atol=5e-5, rtol=1e-5)\n\n def _test_create_fb(self, n_mels=40, sample_rate=22050, n_fft=2048, fmin=0.0, fmax=8000.0, norm=None):\n librosa_fb = librosa.filters.mel(sr=sample_rate,\n n_fft=n_fft,\n n_mels=n_mels,\n fmax=fmax,\n fmin=fmin,\n htk=True,\n norm=norm)\n fb = F.create_fb_matrix(sample_rate=sample_rate,\n n_mels=n_mels,\n f_max=fmax,\n f_min=fmin,\n n_freqs=(n_fft // 2 + 1),\n norm=norm)\n\n for i_mel_bank in range(n_mels):\n self.assertEqual(\n fb[:, i_mel_bank], torch.tensor(librosa_fb[i_mel_bank]), atol=1e-4, rtol=1e-5)\n\n def test_create_fb(self):\n self._test_create_fb()\n self._test_create_fb(n_mels=128, sample_rate=44100)\n self._test_create_fb(n_mels=128, fmin=2000.0, fmax=5000.0)\n self._test_create_fb(n_mels=56, fmin=100.0, fmax=9000.0)\n self._test_create_fb(n_mels=56, fmin=800.0, fmax=900.0)\n self._test_create_fb(n_mels=56, fmin=1900.0, fmax=900.0)\n self._test_create_fb(n_mels=10, fmin=1900.0, fmax=900.0)\n if StrictVersion(librosa.__version__) < StrictVersion(\"0.7.2\"):\n return\n self._test_create_fb(n_mels=128, sample_rate=44100, norm=\"slaney\")\n self._test_create_fb(n_mels=128, fmin=2000.0, fmax=5000.0, norm=\"slaney\")\n self._test_create_fb(n_mels=56, fmin=100.0, fmax=9000.0, norm=\"slaney\")\n self._test_create_fb(n_mels=56, fmin=800.0, fmax=900.0, norm=\"slaney\")\n self._test_create_fb(n_mels=56, fmin=1900.0, fmax=900.0, norm=\"slaney\")\n self._test_create_fb(n_mels=10, fmin=1900.0, fmax=900.0, norm=\"slaney\")\n\n def test_amplitude_to_DB(self):\n spec = torch.rand((6, 201))\n\n amin = 1e-10\n db_multiplier = 0.0\n top_db = 80.0\n\n # Power to DB\n multiplier = 10.0\n\n ta_out = F.amplitude_to_DB(spec, multiplier, amin, db_multiplier, top_db)\n lr_out = librosa.core.power_to_db(spec.numpy())\n lr_out = torch.from_numpy(lr_out)\n\n self.assertEqual(ta_out, lr_out, atol=5e-5, rtol=1e-5)\n\n # Amplitude to DB\n multiplier = 20.0\n\n ta_out = F.amplitude_to_DB(spec, multiplier, amin, db_multiplier, top_db)\n lr_out = librosa.core.amplitude_to_db(spec.numpy())\n lr_out = torch.from_numpy(lr_out)\n\n self.assertEqual(ta_out, lr_out, atol=5e-5, rtol=1e-5)\n\n\[email protected]('complex_specgrams', [\n torch.randn(2, 1025, 400, 2)\n])\[email protected]('rate', [0.5, 1.01, 1.3])\[email protected]('hop_length', [256])\[email protected](not LIBROSA_AVAILABLE, \"Librosa not available\")\ndef test_phase_vocoder(complex_specgrams, rate, hop_length):\n # Due to cummulative sum, numerical error in using torch.float32 will\n # result in bottom right values of the stretched sectrogram to not\n # match with librosa.\n\n complex_specgrams = complex_specgrams.type(torch.float64)\n phase_advance = torch.linspace(0, np.pi * hop_length, complex_specgrams.shape[-3], dtype=torch.float64)[..., None]\n\n complex_specgrams_stretch = F.phase_vocoder(complex_specgrams, rate=rate, phase_advance=phase_advance)\n\n # == Test shape\n expected_size = list(complex_specgrams.size())\n expected_size[-2] = int(np.ceil(expected_size[-2] / rate))\n\n assert complex_specgrams.dim() == complex_specgrams_stretch.dim()\n assert complex_specgrams_stretch.size() == torch.Size(expected_size)\n\n # == Test values\n index = [0] * (complex_specgrams.dim() - 3) + [slice(None)] * 3\n mono_complex_specgram = complex_specgrams[index].numpy()\n mono_complex_specgram = mono_complex_specgram[..., 0] + \\\n mono_complex_specgram[..., 1] * 1j\n expected_complex_stretch = librosa.phase_vocoder(mono_complex_specgram,\n rate=rate,\n hop_length=hop_length)\n\n complex_stretch = complex_specgrams_stretch[index].numpy()\n complex_stretch = complex_stretch[..., 0] + 1j * complex_stretch[..., 1]\n\n assert np.allclose(complex_stretch, expected_complex_stretch, atol=1e-5)\n\n\ndef _load_audio_asset(*asset_paths, **kwargs):\n file_path = common_utils.get_asset_path(*asset_paths)\n sound, sample_rate = torchaudio.load(file_path, **kwargs)\n return sound, sample_rate\n\n\[email protected](not LIBROSA_AVAILABLE, \"Librosa not available\")\nclass TestTransforms(common_utils.TorchaudioTestCase):\n \"\"\"Test suite for functions in `transforms` module.\"\"\"\n def assert_compatibilities(self, n_fft, hop_length, power, n_mels, n_mfcc, sample_rate):\n common_utils.set_audio_backend('default')\n path = common_utils.get_asset_path('sinewave.wav')\n sound, sample_rate = common_utils.load_wav(path)\n sound_librosa = sound.cpu().numpy().squeeze() # (64000)\n\n # test core spectrogram\n spect_transform = torchaudio.transforms.Spectrogram(\n n_fft=n_fft, hop_length=hop_length, power=power)\n out_librosa, _ = librosa.core.spectrum._spectrogram(\n y=sound_librosa, n_fft=n_fft, hop_length=hop_length, power=power)\n\n out_torch = spect_transform(sound).squeeze().cpu()\n self.assertEqual(out_torch, torch.from_numpy(out_librosa), atol=1e-5, rtol=1e-5)\n\n # test mel spectrogram\n melspect_transform = torchaudio.transforms.MelSpectrogram(\n sample_rate=sample_rate, window_fn=torch.hann_window,\n hop_length=hop_length, n_mels=n_mels, n_fft=n_fft)\n librosa_mel = librosa.feature.melspectrogram(\n y=sound_librosa, sr=sample_rate, n_fft=n_fft,\n hop_length=hop_length, n_mels=n_mels, htk=True, norm=None)\n librosa_mel_tensor = torch.from_numpy(librosa_mel)\n torch_mel = melspect_transform(sound).squeeze().cpu()\n self.assertEqual(\n torch_mel.type(librosa_mel_tensor.dtype), librosa_mel_tensor, atol=5e-3, rtol=1e-5)\n\n # test s2db\n power_to_db_transform = torchaudio.transforms.AmplitudeToDB('power', 80.)\n power_to_db_torch = power_to_db_transform(spect_transform(sound)).squeeze().cpu()\n power_to_db_librosa = librosa.core.spectrum.power_to_db(out_librosa)\n self.assertEqual(power_to_db_torch, torch.from_numpy(power_to_db_librosa), atol=5e-3, rtol=1e-5)\n\n mag_to_db_transform = torchaudio.transforms.AmplitudeToDB('magnitude', 80.)\n mag_to_db_torch = mag_to_db_transform(torch.abs(sound)).squeeze().cpu()\n mag_to_db_librosa = librosa.core.spectrum.amplitude_to_db(sound_librosa)\n self.assertEqual(mag_to_db_torch, torch.from_numpy(mag_to_db_librosa), atol=5e-3, rtol=1e-5)\n\n power_to_db_torch = power_to_db_transform(melspect_transform(sound)).squeeze().cpu()\n db_librosa = librosa.core.spectrum.power_to_db(librosa_mel)\n db_librosa_tensor = torch.from_numpy(db_librosa)\n self.assertEqual(\n power_to_db_torch.type(db_librosa_tensor.dtype), db_librosa_tensor, atol=5e-3, rtol=1e-5)\n\n # test MFCC\n melkwargs = {'hop_length': hop_length, 'n_fft': n_fft}\n mfcc_transform = torchaudio.transforms.MFCC(\n sample_rate=sample_rate, n_mfcc=n_mfcc, norm='ortho', melkwargs=melkwargs)\n\n # librosa.feature.mfcc doesn't pass kwargs properly since some of the\n # kwargs for melspectrogram and mfcc are the same. We just follow the\n # function body in\n # https://librosa.github.io/librosa/_modules/librosa/feature/spectral.html#melspectrogram\n # to mirror this function call with correct args:\n #\n # librosa_mfcc = librosa.feature.mfcc(\n # y=sound_librosa, sr=sample_rate, n_mfcc = n_mfcc,\n # hop_length=hop_length, n_fft=n_fft, htk=True, norm=None, n_mels=n_mels)\n\n librosa_mfcc = scipy.fftpack.dct(db_librosa, axis=0, type=2, norm='ortho')[:n_mfcc]\n librosa_mfcc_tensor = torch.from_numpy(librosa_mfcc)\n torch_mfcc = mfcc_transform(sound).squeeze().cpu()\n\n self.assertEqual(\n torch_mfcc.type(librosa_mfcc_tensor.dtype), librosa_mfcc_tensor, atol=5e-3, rtol=1e-5)\n\n def test_basics1(self):\n kwargs = {\n 'n_fft': 400,\n 'hop_length': 200,\n 'power': 2.0,\n 'n_mels': 128,\n 'n_mfcc': 40,\n 'sample_rate': 16000\n }\n self.assert_compatibilities(**kwargs)\n\n def test_basics2(self):\n kwargs = {\n 'n_fft': 600,\n 'hop_length': 100,\n 'power': 2.0,\n 'n_mels': 128,\n 'n_mfcc': 20,\n 'sample_rate': 16000\n }\n self.assert_compatibilities(**kwargs)\n\n # NOTE: Test passes offline, but fails on TravisCI (and CircleCI), see #372.\n @unittest.skipIf('CI' in os.environ, 'Test is known to fail on CI')\n def test_basics3(self):\n kwargs = {\n 'n_fft': 200,\n 'hop_length': 50,\n 'power': 2.0,\n 'n_mels': 128,\n 'n_mfcc': 50,\n 'sample_rate': 24000\n }\n self.assert_compatibilities(**kwargs)\n\n def test_basics4(self):\n kwargs = {\n 'n_fft': 400,\n 'hop_length': 200,\n 'power': 3.0,\n 'n_mels': 128,\n 'n_mfcc': 40,\n 'sample_rate': 16000\n }\n self.assert_compatibilities(**kwargs)\n\n def test_MelScale(self):\n \"\"\"MelScale transform is comparable to that of librosa\"\"\"\n n_fft = 2048\n n_mels = 256\n hop_length = n_fft // 4\n sample_rate = 44100\n sound = common_utils.get_whitenoise(sample_rate=sample_rate, duration=60)\n sound = sound.mean(dim=0, keepdim=True)\n spec_ta = F.spectrogram(\n sound, pad=0, window=torch.hann_window(n_fft), n_fft=n_fft,\n hop_length=hop_length, win_length=n_fft, power=2, normalized=False)\n spec_lr = spec_ta.cpu().numpy().squeeze()\n # Perform MelScale with torchaudio and librosa\n melspec_ta = torchaudio.transforms.MelScale(n_mels=n_mels, sample_rate=sample_rate)(spec_ta)\n melspec_lr = librosa.feature.melspectrogram(\n S=spec_lr, sr=sample_rate, n_fft=n_fft, hop_length=hop_length,\n win_length=n_fft, center=True, window='hann', n_mels=n_mels, htk=True, norm=None)\n # Note: Using relaxed rtol instead of atol\n self.assertEqual(melspec_ta, torch.from_numpy(melspec_lr[None, ...]), atol=1e-8, rtol=1e-3)\n\n def test_InverseMelScale(self):\n \"\"\"InverseMelScale transform is comparable to that of librosa\"\"\"\n n_fft = 2048\n n_mels = 256\n n_stft = n_fft // 2 + 1\n hop_length = n_fft // 4\n\n # Prepare mel spectrogram input. We use torchaudio to compute one.\n path = common_utils.get_asset_path('steam-train-whistle-daniel_simon.wav')\n sound, sample_rate = common_utils.load_wav(path)\n sound = sound[:, 2**10:2**10 + 2**14]\n sound = sound.mean(dim=0, keepdim=True)\n spec_orig = F.spectrogram(\n sound, pad=0, window=torch.hann_window(n_fft), n_fft=n_fft,\n hop_length=hop_length, win_length=n_fft, power=2, normalized=False)\n melspec_ta = torchaudio.transforms.MelScale(n_mels=n_mels, sample_rate=sample_rate)(spec_orig)\n melspec_lr = melspec_ta.cpu().numpy().squeeze()\n # Perform InverseMelScale with torch audio and librosa\n spec_ta = torchaudio.transforms.InverseMelScale(\n n_stft, n_mels=n_mels, sample_rate=sample_rate)(melspec_ta)\n spec_lr = librosa.feature.inverse.mel_to_stft(\n melspec_lr, sr=sample_rate, n_fft=n_fft, power=2.0, htk=True, norm=None)\n spec_lr = torch.from_numpy(spec_lr[None, ...])\n\n # Align dimensions\n # librosa does not return power spectrogram while torchaudio returns power spectrogram\n spec_orig = spec_orig.sqrt()\n spec_ta = spec_ta.sqrt()\n\n threshold = 2.0\n # This threshold was choosen empirically, based on the following observation\n #\n # torch.dist(spec_lr, spec_ta, p=float('inf'))\n # >>> tensor(1.9666)\n #\n # The spectrograms reconstructed by librosa and torchaudio are not comparable elementwise.\n # This is because they use different approximation algorithms and resulting values can live\n # in different magnitude. (although most of them are very close)\n # See\n # https://github.com/pytorch/audio/pull/366 for the discussion of the choice of algorithm\n # https://github.com/pytorch/audio/pull/448/files#r385747021 for the distribution of P-inf\n # distance over frequencies.\n self.assertEqual(spec_ta, spec_lr, atol=threshold, rtol=1e-5)\n\n threshold = 1700.0\n # This threshold was choosen empirically, based on the following observations\n #\n # torch.dist(spec_orig, spec_ta, p=1)\n # >>> tensor(1644.3516)\n # torch.dist(spec_orig, spec_lr, p=1)\n # >>> tensor(1420.7103)\n # torch.dist(spec_lr, spec_ta, p=1)\n # >>> tensor(943.2759)\n assert torch.dist(spec_orig, spec_ta, p=1) < threshold\n"
] | [
[
"numpy.allclose",
"torch.Size",
"numpy.ceil",
"torch.randn",
"scipy.fftpack.dct",
"torch.rand",
"torch.linspace",
"torch.tensor",
"torch.from_numpy",
"torch.dist",
"torch.abs",
"torch.random.manual_seed",
"torch.hann_window"
]
] |
F35H/RandFunc | [
"58868e65389447237013024cf49124d5f1da3a8e"
] | [
"KNOWNALGO/W03/W0301.py"
] | [
"import time\r\nfrom os import system\r\nfrom statistics import stdev\r\nfrom statistics import variance\r\n\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\n\r\nimport W0302\r\nfrom W0303 import seedList\r\n\r\nif __name__ == \"__main__\":\r\n system(\"cls\")\r\n\r\n tStr = str( time.time() )\r\n t = int( tStr[-4:-2]) \r\n\r\n MDMethSet = W0302.MDMethod( seedList[t] )\r\n MDWSeqSet = set()\r\n MDSqreSet = set()\r\n \r\n for i in range(20):\r\n MDWSeqSet.add( \r\n W0302.MDWSeq( seedList[i + t] ) ) \r\n MDSqreSet.add( \r\n W0302.MDSquares(t, seedList[i + t] ) ) \r\n \r\n print( str( MDMethSet ) + \"\\n\" ) \r\n print( str( MDWSeqSet ) + \"\\n\" )\r\n print( str( MDSqreSet ) + \"\\n\" )\r\n \r\n print( \"MDMethSet Variance: \"\r\n + str( variance(MDMethSet) ))\r\n print( \"MDMethSet Deviation: \"\r\n + str( stdev(MDMethSet) ) + \"\\n\")\r\n\r\n print( \"MDWSeqSet Variance: \" \r\n + str( variance(MDWSeqSet) ))\r\n print( \"MDWSeqSet Deviation: \"\r\n + str( stdev(MDWSeqSet) ) + \"\\n\")\r\n \r\n print( \"MDSqreSet Variance: \" \r\n + str( variance(MDSqreSet) ))\r\n print( \"MDSqreSet Deviation: \"\r\n + str( stdev(MDSqreSet) ) + \"\\n\")\r\n \r\n \r\n mpl.use(\"TkAgg\")\r\n plt.style.use(\"fast\")\r\n \r\n f, plts = plt.subplots(3)\r\n \r\n plts[0].barh(range(len(MDMethSet)),\r\n MDMethSet, color=\"g\",\r\n label=\"Middle Square Method\")\r\n plts[1].barh(range(len(MDWSeqSet)),\r\n MDWSeqSet, color=\"b\",\r\n label=\"Middle Square Weyl-Sequence\")\r\n plts[2].barh(range(len(MDSqreSet)),\r\n MDSqreSet, color=\"r\",\r\n label=\"Middle Square Fast Counter\")\r\n \r\n plt.show()"
] | [
[
"matplotlib.use",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] |
TomLXXVI/pypv | [
"df3dfba586bdec171e3fa9795b7bae48f76f83f2"
] | [
"pypv/lib/nummath/deriv.py"
] | [
"import numpy as np\n\n\nclass Deriv:\n \"\"\"\n Calculate the derivative with given order of the function f(t) at point t.\n \"\"\"\n def __init__(self, f, dt, o=1):\n \"\"\"\n Initialize the differentiation solver.\n Params:\n - f the name of the function object ('def f(t):...')\n - dt the calculation step between successive points_1\n - o the order of the derivative to be calculated\n \"\"\"\n self.f = f\n self.dt = dt\n self.o = o\n\n # coefficients of forward finite difference approximations of order O(h^2)\n self.co = np.array([\n [-3.0, 4.0, -1.0, 0.0, 0.0, 0.0],\n [2.0, -5.0, 4.0, -1.0, 0.0, 0.0],\n [-5.0, 18.0, -24.0, 14.0, -3.0, 0.0],\n [3.0, -14.0, 26.0, -24.0, 11.0, -2.0]\n ])\n self.den = np.array([2 * dt, dt ** 2, 2 * dt ** 3, dt ** 4])\n\n def solve(self, t):\n \"\"\"\n Calculate the derivative at point 't'.\n The method uses Richardson extrapolation to improve accuracy.\n \"\"\"\n df = [0.0, 0.0]\n for i, dt_ in enumerate([self.dt, self.dt / 2]):\n t_array = np.arange(t, t + 6 * dt_, dt_)\n f_array = np.array([self.f(t_i) for t_i in t_array])\n c_array = self.co[self.o - 1, :]\n df[i] = (c_array * f_array) / self.den[self.o - 1]\n return (4.0 * df[1] - df[0]) / 3.0\n"
] | [
[
"numpy.array",
"numpy.arange"
]
] |
adrianopls/GRIPy-X | [
"21c7fa1f32f8dbb0a5dff93c2bac5acf1f9181ca"
] | [
"fileio/lis.py"
] | [
"import os\nimport struct\nfrom collections import OrderedDict\nfrom pathlib import PurePath\n\nimport numpy as np\n\nimport app\nfrom fileio.tif import TIFFile\n\n\nPRA = {\n \"Physical Record Type\": (1, 1),\n \"Checksum Type\": (2, 2),\n \"File Number Presence\": (5, 1),\n \"Record Number Presence\": (6, 1),\n \"Parity Error\": (9, 1),\n \"Checksum Error\": (10, 1),\n \"Predecessor Continuation Attribute\": (14,1),\n \"Sucessor Continuation Attribute\": (15, 1)\n}\n\n\ndef _get_code_default_size(code): \n if not isinstance(code, int):\n return ValueError('code needs to be a int value.') \n if code == 56:\n return 1 \n elif code == 65:\n return -1\n elif code == 66:\n return 1\n elif code == 68:\n return 4 \n elif code == 73:\n return 4\n elif code == 79:\n return 2\n elif code == 77:\n return -1\n elif code == 1001:\n return 1\n else:\n msg = 'Codigo ' + str(code) + ' nao reconhecido, ainda....'\n raise Exception(msg) \n \n \ndef get(data, code):\n if _get_code_default_size(code) != -1 and _get_code_default_size(code) != len(data):\n msg = 'Data must have ' + str(_get_code_default_size(code)) + ' bytes. Found ' + str(len(data)) + ' bytes. (code=' + str(code) + ')'\n raise ValueError(msg)\n \n # size = _get_code_default_size(code)\n # if size < 0:\n # raise Exception('Size must be greater than -1.')\n # if size == 0:\n # return None \n if code == 56:\n return _get_value(data, 'b', True) \n elif code == 65:\n #print 'offset: ', offset\n #print 'size: ', size\n return _get_string(data)\n elif code == 66:\n return _get_value(data, 'b', False)\n \n elif code == 68:\n values = [] \n for i in range(len(data)):\n start = i\n end = start+1\n v = ord(data[start:end])\n values.append(v)\n \n result = '' \n\n for value in values:\n result += bin(value)[2:].zfill(8)\n exponent = result[1:9] \n fraction = result[9:32]\n \n if result[0] == '0':\n exponent = int(exponent, 2)\n fraction = int(fraction, 2) / 2. ** 23\n value = fraction * 2. ** (exponent - 128) \n else: \n converted_exponent = ''\n for i in range(8):\n if exponent[i] == '0':\n converted_exponent += '1'\n else:\n converted_exponent += '0' \n exponent = int(converted_exponent, 2) \n converted_fraction = ''\n achou = False\n for i in range(22, -1, -1):\n if achou:\n if fraction[i] == '0':\n converted_fraction = '1' + converted_fraction\n else:\n converted_fraction = '0' + converted_fraction \n else:\n converted_fraction = fraction[i] + converted_fraction\n if fraction[i] == '1':\n achou = True \n fraction = int(converted_fraction, 2) / 2. ** 23 \n fraction = fraction * (-1)\n value = fraction * 2. ** (exponent - 128) \n return value \n \n elif code == 73:\n return _get_value(data, 'l', True)\n \n elif code == 79:\n return _get_value(data, 'h', False, True)\n \n elif code == 77:\n values = [] \n for i in range(len(data)):\n values.append(ord(data[i:i+1]))\n result = '' \n for value in values:\n result += bin(value)[2:].zfill(8)\n return result \n # for 'LRA' \n# elif code == 1001:\n# self.set_offset(self.get_offset() + 1)\n# return None \n else:\n msg = 'Codigo ' + str(code) + ' no offset nao reconhecido, ainda....'\n raise Exception(msg) \n\n\n\ndef _get_value(data, mode, signed=True, big_endian=True):\n big = '' \n if big_endian:\n big = '>'\n if signed is False: \n mode = mode.upper()\n format_ = big + mode \n #print('data: {} - format_: {} - Tdata: {} - Tformat: {}'.format(data, \n # format_, type(data), type(format_)))\n n = struct.unpack(format_, data)\n return n[0] \n\n\ndef _get_string(data):\n string = data.decode(\"utf-8\")\n \"\"\"\n print(data, type(data))\n for i in range(len(data)):\n print(data[i], type(data[i]))\n #string += struct.unpack('s', data[i])[0]\n string += data.decode(\"utf-8\")\n \"\"\" \n return string.strip()\n\n\ndef decode_PRA(PRA_raw_data):\n map_ = {}\n for key, value in PRA.items():\n map_[key] = int(PRA_raw_data[value[0]:value[0]+value[1]])\n return map_ \n\n\n\n\n\nclass LogicalRegister(object):\n\n def __init__(self):\n self.code = -1\n self.registers = OrderedDict()\n\n def __str__(self):\n ret_val = 'CODIGO: ' + str(self.code) + '\\n'\n ret_val += 'DATA: ' + str(self.registers) + '\\n'\n return ret_val\n \n \n \n \n \nclass PhysicalRegister(object):\n\n def __init__(self):\n #self.lenght = 0\n self.attr = {}\n self.lr_data = None\n self.trailer = None\n\n def __str__(self):\n raise Exception()\n #return str(self.body) \n\n\n\n\ndef _get_trailer_size(PRA):\n size = 0\n if PRA.get('Record Number Presence'):\n size += 2\n if PRA.get('File Number Presence'):\n size += 2\n if PRA.get('Checksum Type'):\n size += 2 \n return size \n\n\n\nclass LISFile(object):\n \n def __init__(self):\n self.physical_records = None\n self.logical_records = None\n json_file = 'LIS_MAPPING.json'\n \n fullpath_json = PurePath(app.BASE_PATH, self.__module__.split('.')[0] + os.sep + json_file)\n \n# print('fullpath_json:', fullpath_json)\n \n self._json = app.app_utils.read_json_file(fullpath_json)\n \n \n def read_file(self, file_name):\n self.file_name = file_name\n file_ = open(self.file_name, mode='rb')\n self.input_data = file_.read()\n file_.close()\n \n \n def read_physical_records(self):\n prs_data = []\n # Reading file\n result, new_data = TIFFile.desencapsulate(self.input_data)\n #print('\\nresult, new_data:', result, new_data)\n if result:\n self.input_data = bytes('', 'utf-8')\n for d in new_data:\n self.input_data += d\n offset = 0\n while offset < len(self.input_data):\n pr_lenght_data = self.input_data[offset:offset+_get_code_default_size(79)]\n offset += _get_code_default_size(79)\n if not pr_lenght_data:\n break\n pr_lenght = get(pr_lenght_data, 79)\n pr_data = self.input_data[offset:offset+(pr_lenght-len(pr_lenght_data))]\n offset += pr_lenght-len(pr_lenght_data)\n if not pr_data:\n break\n prs_data.append(pr_data) \n self.input_data = None \n # Creating Physical Records, but not processing Logical Records\n self.physical_records = [] \n for pr_data in prs_data: \n # PRA - Physical Record Attributes\n pos = 0\n PRA_data = get(pr_data[pos:pos+2], 77)\n pos += 2 \n attr = decode_PRA(PRA_data) \n lr_data = pr_data[pos:(len(pr_data) - _get_trailer_size(attr))]\n trailer_data = pr_data[len(pr_data) - _get_trailer_size(attr):]\n trailer_pos = 0 \n trailer = {}\n if attr.get('Record Number Presence'):\n value = get(trailer_data[trailer_pos:trailer_pos+_get_code_default_size(79)], 79)\n trailer_pos += _get_code_default_size(79)\n trailer['Record Number'] = value\n if attr.get('File Number Presence'):\n value = get(trailer_data[trailer_pos:trailer_pos+_get_code_default_size(79)], 79)\n trailer_pos += _get_code_default_size(79) \n trailer['File Number'] = value\n if attr.get('Checksum Type'):\n value = get(trailer_data[trailer_pos:trailer_pos+_get_code_default_size(79)], 79) \n trailer['Checksum'] = value\n # If there is some continuation records, joint then \n if attr.get('Predecessor Continuation Attribute'):\n self.physical_records[-1].lr_data = self.physical_records[-1].lr_data + lr_data \n else: \n pr = PhysicalRegister() \n pr.attr = attr\n pr.lr_data = lr_data\n pr.trailer = trailer\n self.physical_records.append(pr)\n \n \n \n def read_logical_records(self):\n self.logical_records = [] \n \n# print(len(self.physical_records))\n for pr in self.physical_records:\n try:\n lr = LogicalRegister()\n pos = 0\n new_pos = _get_code_default_size(66)\n lr.code = get(pr.lr_data[pos:(pos+new_pos)], 66)\n pos = new_pos\n # LRA (1 byte) is not used \n pos += 1\n json_obj = self._json.get(str(lr.code))\n if json_obj.get('data') is None: \n \n if lr.code == 0:\n list_ = []\n lr.registers['Frame'] = list_\n lr_data_format = None\n for i in range(len(self.logical_records)-1, -1, -1):\n if self.logical_records[i].code == 64:\n lr_data_format = self.logical_records[i]\n break \n entry_block = OrderedDict() \n for entry in lr_data_format.registers.get('Entry Block'):\n value = entry.get('Entry')\n if isinstance(value, float):\n value = float(\"{0:.4f}\".format(value))\n entry_block[entry.get('Entry Type')] = value\n curves = OrderedDict()\n # Only one depth register per frame [entry_block.get(13)==1] \n if entry_block.get(13) == 1:\n code = entry_block.get(15)\n size = _get_code_default_size(code)\n depth = get(pr.lr_data[2:pos+size], code)\n pos += size\n depth = float(\"{0:.4f}\".format(depth))\n curves[-1] = depth\n \n # Initializing logs curves dict\n for idx in range(len(lr_data_format.registers.get('Datum Spec Block'))):\n curves[idx] = [] \n if entry_block.get(12) is not None:\n absent_value = entry_block.get(12)\n else:\n absent_value = -999.25\n while pos < len(pr.lr_data):\n for idx, entry_dict in enumerate(lr_data_format.registers.get('Datum Spec Block')): \n #item_name = entry_dict.get('Mnemonic')\n item_size = entry_dict.get('Size')\n item_code = entry_dict.get('Representation Code')\n item_samples = entry_dict.get('Number Samples') \n item_inc = int(item_size/item_samples) \n \n for i in range(item_samples): \n value = get(pr.lr_data[pos:pos+item_inc], item_code)\n value = float(\"{0:.6f}\".format(value))\n curves.get(idx).append(value)\n pos += item_inc\n \n if curves.get(-1) is not None:\n bigger_size = 0\n for idx in range(len(lr_data_format.registers.get('Datum Spec Block'))):\n if len(curves.get(idx)) > bigger_size:\n bigger_size = len(curves.get(idx))\n if entry_block.get(4) == 1:\n step = entry_block.get(8) * (-1)\n else:\n step = entry_block.get(8) \n depth = []\n for i in range(bigger_size):\n depth.append(curves.get(-1)+i*step)\n curves[-1] = depth \n \n new_curves = OrderedDict() \n for idx, curve in curves.items(): \n new_curve = np.asarray(curve) \n where = (new_curve == absent_value)\n new_curve[where] = np.nan \n new_curves[idx] = new_curve\n curves = None \n list_.append(new_curves)\n self.logical_records.append(lr)\n continue \n \n \n elif lr.code == 34:\n json_obj = self._json.get('Component Block')\n \n elif lr.code == 64:\n json_obj = self._json.get('Entry Block')\n list_ = []\n lr.registers['Entry Block'] = list_\n item = None \n while item == None or item.get('Entry Type') != 0:\n if item is not None:\n list_.append(item) \n item = OrderedDict() \n for d in json_obj.get('data'):\n if d.get('name') == 'Entry':\n if item.get('Entry Type') == 0:\n pos += item.get('Entry Size')\n break\n code = item.get('Entry Repr Code Nb')\n value = get(pr.lr_data[pos:(pos+item.get('Entry Size'))], code)\n item[d.get('name')] = value\n pos += item.get('Entry Size') \n elif d.get('name') != '':\n value = get(pr.lr_data[pos:(pos+d.get('size'))], d.get('code'))\n item[d.get('name')] = value \n pos += d.get('size')\n else:\n pos += d.get('size')\n \n json_obj = self._json.get('Datum Spec Block')\n datum_spec_block_option = -1\n for entry_dict in list_:\n if entry_dict.get('Entry Type') == 1:\n datum_spec_block_option = entry_dict.get('Entry')\n break\n if datum_spec_block_option == 0:\n json_obj = self._json.get('Datum Spec Block 0')\n elif datum_spec_block_option == 1:\n json_obj = self._json.get('Datum Spec Block 1')\n else:\n raise Exception()\n elif lr.code == 234:\n continue \n #raise Exception() \n else: \n #continue\n raise Exception() \n list_ = [] \n lr.registers[json_obj.get('name')] = list_ \n item = None\n while pos < len(pr.lr_data):\n if item is not None:\n list_.append(item)\n item = OrderedDict()\n for d in json_obj.get('data'):\n if lr.code == 34 and d.get('name') == 'Component':\n code = item.get('Component Repr Code')\n size = item.get('Component Size')\n value = get(pr.lr_data[pos:(pos+size)], code) \n pos += size\n item[d.get('name')] = value \n elif d.get('name'):\n code = d.get('code')\n size = _get_code_default_size(code)\n if size == -1:\n if d.get('size') is not None:\n size = d.get('size')\n else:\n size = len(pr.lr_data)-pos\n #print\n #print 'pos: ', pos\n #print 'lr.code: ', lr.code\n #print 'code: ', code\n #print 'size: ', size\n value = get(pr.lr_data[pos:(pos+size)], code)\n pos += size\n item[d.get('name')] = value\n else:\n pos += d.get('size')\n else:\n if item is not None:\n list_.append(item)\n self.logical_records.append(lr)\n except Exception as e:\n print('ERROR:', e)\n raise\n #continue\n \n\n \n \n"
] | [
[
"numpy.asarray"
]
] |
ltgoslo/norBERT | [
"d75d5c12d9b7f9cc11c65757f2228b7e6070b69b"
] | [
"benchmarking/experiments/pos_finetuning.py"
] | [
"#!/bin/env python3\n\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport data_preparation.data_preparation_pos as data_preparation_pos\nimport fine_tuning\nimport utils.model_utils as model_utils\nimport utils.pos_utils as pos_utils\n\n\ndef test(training_lang,\n test_lang,\n split=\"test\",\n short_model_name=\"ltgoslo/norbert\",\n epochs=5, data_path=None, task=\"pos\"):\n checkpoints_path = \"checkpoints/\"\n trainer = fine_tuning.Trainer(training_lang, data_path, task, short_model_name)\n # Model parameters\n max_length = 256\n batch_size = 8\n eval_batch_size = 8\n learning_rate = 2e-5\n tagset = pos_utils.get_ud_tags()\n num_labels = len(tagset)\n # Model creation\n trainer.build_model(max_length, batch_size, learning_rate, epochs, num_labels, tagset=tagset,\n eval_batch_size=64)\n weights_path = checkpoints_path + training_lang + \"/\"\n weights_filename = short_model_name.replace(\"/\", \"_\") + \"_pos.hdf5\"\n print(\"Using weights from\", weights_path + weights_filename)\n trainer.model.load_weights(weights_path + weights_filename)\n # Checkpoint for best model weights\n # trainer.setup_checkpoint(checkpoints_path)\n trainer.prepare_data()\n #\n test_lang_path = data_path + test_lang\n test_data, test_dataset = data_preparation_pos.load_dataset(test_lang_path, trainer.tokenizer,\n max_length, trainer.tagset,\n dataset_name=split)\n trainer.setup_eval(test_data, split)\n test_dataset, test_batches = model_utils.make_batches(test_dataset, eval_batch_size,\n repetitions=1, shuffle=False)\n # return trainer, test_data, test_dataset, test_batches\n test_preds = trainer.handle_oom(trainer.model.predict,\n test_dataset,\n steps=test_batches,\n verbose=1)\n score = trainer.metric(test_preds, test_data, split) * 100\n print(\"{0}-{1} {2}: {3:.1f}\".format(training_lang, test_lang, split, score))\n return score\n\n\ndef train(training_lang,\n short_model_name=\"ltgoslo/norbert\",\n epochs=10, data_path=None, task=\"pos\"):\n checkpoints_path = \"checkpoints/\"\n #\n trainer = fine_tuning.Trainer(training_lang, data_path, task, short_model_name)\n #\n # Model parameters\n max_length = 256\n batch_size = 8\n learning_rate = 2e-5\n tagset = pos_utils.get_ud_tags()\n num_labels = len(tagset)\n #\n # Model creation\n trainer.build_model(max_length, batch_size, learning_rate, epochs, num_labels, tagset=tagset,\n eval_batch_size=64)\n #\n # Checkpoint for best model weights\n trainer.setup_checkpoint(checkpoints_path)\n #\n trainer.prepare_data()\n #\n print(\"Train examples:\", len(trainer.train_data))\n #\n # Print an example sentence for sanity\n example_batch = trainer.train_dataset.as_numpy_iterator().next()\n for token, label in zip(example_batch[0][\"input_ids\"][0], example_batch[1][0]):\n if not token:\n break\n elif token == example_batch[0][\"input_ids\"][0][10]:\n print(\"...\")\n break\n print(\"{:<25}{:<20}\".format(trainer.tokenizer.decode(int(token)), tagset[label]))\n #\n try:\n trainer.setup_training()\n trainer.train()\n trainer.make_definitive()\n except KeyboardInterrupt:\n pass\n return trainer\n\n\ndef prepare_test_data(trainer):\n # Load plain data and TF dataset\n data, dataset = data_preparation_pos.load_dataset(\n trainer.lang_path, trainer.tokenizer, trainer.max_length, trainer.tagset,\n dataset_name=\"test\")\n trainer.setup_eval(data, \"test\")\n dataset, batches = model_utils.make_batches(\n dataset, trainer.eval_batch_size, repetitions=1, shuffle=False)\n return dataset, batches, data\n\n\ndef setup_eval(data, tokenizer, label_map, max_length, dataset_name=\"test\"):\n eval_info = {dataset_name: {}}\n eval_info[dataset_name][\"all_words\"] = []\n eval_info[dataset_name][\"all_labels\"] = []\n eval_info[dataset_name][\"real_tokens\"] = []\n eval_info[dataset_name][\"subword_locs\"] = []\n acc_lengths = 0\n #\n for i in range(len(data)):\n eval_info[dataset_name][\"all_words\"].extend(data[i][\"tokens\"]) # Full words\n eval_info[dataset_name][\"all_labels\"].extend(\n [label_map[label] for label in data[i][\"tags\"]])\n _, _, idx_map = tokenizer.subword_tokenize(data[i][\"tokens\"], data[i][\"tags\"])\n # Examples always start at a multiple of max_length\n # Where they end depends on the number of resulting subwords\n example_start = i * max_length\n example_end = example_start + len(idx_map)\n eval_info[dataset_name][\"real_tokens\"].extend(\n np.arange(example_start, example_end, dtype=int))\n # Get subword starts and ends\n sub_ids, sub_starts, sub_lengths = np.unique(idx_map, return_counts=True, return_index=True)\n sub_starts = sub_starts[sub_lengths > 1] + acc_lengths\n sub_ends = sub_starts + sub_lengths[sub_lengths > 1]\n eval_info[dataset_name][\"subword_locs\"].extend(np.array([sub_starts, sub_ends]).T.tolist())\n acc_lengths += len(idx_map)\n return eval_info\n\n\ndef get_score_pos(preds, dataset_name, eval_info):\n filtered_preds = preds[0].argmax(axis=-1).flatten()[\n eval_info[dataset_name][\"real_tokens\"]].tolist()\n filtered_logits = preds[0].reshape(\n (preds[0].shape[0] * preds[0].shape[1], preds[0].shape[2])\n )[eval_info[dataset_name][\"real_tokens\"]]\n new_preds = pos_utils.reconstruct_subwords(\n eval_info[dataset_name][\"subword_locs\"], filtered_preds, filtered_logits\n )\n return (np.array(eval_info[dataset_name][\"all_labels\"]) == np.array(new_preds)).mean()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model_name\", default=\"norbert\")\n parser.add_argument(\"--short_model_name\", default=\"ltgoslo/norbert\")\n parser.add_argument(\"--training_language\", default=\"nob\")\n parser.add_argument(\"--epochs\", type=int, default=10)\n\n args = parser.parse_args()\n\n training_language = args.training_language\n ud_data_path = \"../data/ud/\"\n run_name = args.model_name\n model_identifier = args.short_model_name\n current_task = \"pos\"\n\n # Train models\n training_object = train(training_language, short_model_name=model_identifier,\n data_path=ud_data_path, task=current_task, epochs=args.epochs)\n\n dev_score = test(training_language,\n training_language,\n \"dev\",\n short_model_name=model_identifier,\n data_path=ud_data_path, task=current_task)\n\n test_score = test(training_language,\n training_language,\n \"test\",\n short_model_name=model_identifier,\n data_path=ud_data_path, task=current_task)\n\n table = pd.DataFrame({\"Language\": training_language,\n \"Dev Accuracy\": [dev_score],\n \"Test Accuracy\": [test_score]\n })\n\n print(table)\n print(table.to_latex(index=False, float_format=\"{0:.1f}\".format))\n table.to_csv(f\"results/{training_language}_{run_name}_{current_task}.tsv\", sep=\"\\t\")\n print(f\"Scores saved to results/{training_language}_{run_name}_{current_task}.tsv\")\n"
] | [
[
"numpy.arange",
"pandas.DataFrame",
"numpy.array",
"numpy.unique"
]
] |
akizminet/submarine | [
"aa6e865f27167a26050d8daa293e0b4f41a144b6"
] | [
"submarine-sdk/pysubmarine/submarine/ml/tensorflow/input/input.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\n\nimport tensorflow as tf\n\nlogger = logging.getLogger(__name__)\n\nAUTOTUNE = tf.data.experimental.AUTOTUNE\n\n\ndef libsvm_input_fn(\n filepath,\n batch_size=256,\n num_epochs=3, # pylint: disable=W0613\n perform_shuffle=False,\n delimiter=\" \",\n **kwargs\n):\n def _input_fn():\n def decode_libsvm(line):\n columns = tf.string_split([line], delimiter)\n labels = tf.string_to_number(columns.values[0], out_type=tf.float32)\n splits = tf.string_split(columns.values[1:], \":\")\n id_vals = tf.reshape(splits.values, splits.dense_shape)\n feat_ids, feat_vals = tf.split(id_vals, num_or_size_splits=2, axis=1)\n feat_ids = tf.string_to_number(feat_ids, out_type=tf.int32)\n feat_vals = tf.string_to_number(feat_vals, out_type=tf.float32)\n return {\"feat_ids\": feat_ids, \"feat_vals\": feat_vals}, labels\n\n dataset = (\n tf.data.TextLineDataset(filepath)\n .map(decode_libsvm, num_parallel_calls=AUTOTUNE)\n .prefetch(AUTOTUNE)\n )\n\n if perform_shuffle:\n dataset = dataset.shuffle(buffer_size=batch_size)\n\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.batch(batch_size)\n\n return dataset\n\n return _input_fn\n"
] | [
[
"tensorflow.data.TextLineDataset",
"tensorflow.reshape",
"tensorflow.string_to_number",
"tensorflow.string_split",
"tensorflow.split"
]
] |
Yoark/Transformer-Attention | [
"b8c62cb8618a03150ccfd73f705893d2b931b224"
] | [
"tests/tasks/test_sequence_tagging.py"
] | [
"from torchnlp.tasks.sequence_tagging import Tagger, hparams_tagging_base, VOCABS_FILE\n\nimport torch\nimport torch.nn as nn\n\nimport torchtext\nfrom torchtext import data\nfrom torchtext import datasets\n\nimport pytest\n\ndef udpos_dataset(batch_size):\n # Setup fields with batch dimension first\n inputs = data.Field(init_token=\"<bos>\", eos_token=\"<eos>\", batch_first=True)\n tags = data.Field(init_token=\"<bos>\", eos_token=\"<eos>\", batch_first=True)\n \n # Download and the load default data.\n train, val, test = datasets.UDPOS.splits(\n fields=(('inputs_word', inputs), ('labels', tags), (None, None)))\n \n # Build vocab\n inputs.build_vocab(train.inputs)\n tags.build_vocab(train.tags)\n \n # Get iterators\n train_iter, val_iter, test_iter = data.BucketIterator.splits(\n (train, val, test), batch_size=batch_size, \n device=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\"))\n train_iter.repeat = False\n return train_iter, val_iter, test_iter, inputs, tags\n\nclass DummyTagger(Tagger):\n def __init__(self, hparams, **kwargs):\n super(DummyTagger, self).__init__(hparams=hparams, **kwargs)\n self.linear = nn.Linear(hparams.embedding_size_word, \n hparams.hidden_size)\n\n def compute(self, inputs_word_emb, inputs_char_emb):\n return self.linear(inputs_word_emb)\n\[email protected]\ndef test_tagger(tmpdir):\n tmpdir.chdir()\n hparams = hparams_tagging_base()\n train_iter, val_iter, test_iter, inputs, tags = udpos_dataset(hparams.batch_size)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n tagger = DummyTagger(hparams=hparams, vocabs=(inputs.vocab, None, tags.vocab)).to(device)\n assert tagger.embedding_word.weight.shape == (len(inputs.vocab), hparams.embedding_size_word)\n assert tagger.output_layer.output_projection.weight.shape == (len(tags.vocab), hparams.hidden_size)\n\n batch = next(iter(val_iter))\n loss, preds = tagger.loss(batch, compute_predictions=True)\n\n assert loss > 0\n assert preds.data.shape == batch.labels.data.shape\n\[email protected]\ndef test_tagger_create(tmpdir):\n tmpdir.chdir()\n hparams = hparams_tagging_base()\n train_iter, val_iter, test_iter, inputs, tags = udpos_dataset(hparams.batch_size)\n\n tagger = DummyTagger.create('test.Task', hparams=hparams, vocabs=(inputs.vocab, None, tags.vocab))\n assert isinstance(tagger, DummyTagger)\n assert tmpdir.join('test.Task-DummyTagger').join(VOCABS_FILE).check()\n\[email protected]\ndef test_tagger_load(tmpdir):\n tmpdir.chdir()\n hparams = hparams_tagging_base()\n train_iter, val_iter, test_iter, inputs, tags = udpos_dataset(hparams.batch_size)\n\n tagger = DummyTagger.create('test.Task', hparams=hparams, vocabs=(inputs.vocab, None, tags.vocab))\n tagger.iterations += 10\n tagger.save('test.Task')\n\n tagger_load, _ = DummyTagger.load('test.Task', checkpoint=-1)\n assert isinstance(tagger_load.vocab_tags, torchtext.vocab.Vocab)"
] | [
[
"torch.cuda.is_available",
"torch.nn.Linear"
]
] |
dgarciabriseno/sunpy | [
"40f6d7d5ba9d63ac7ffd3ea8587642867caeae25"
] | [
"sunpy/image/tests/test_transform.py"
] | [
"import numpy as np\nimport pytest\nimport skimage.data as images\nfrom matplotlib.figure import Figure\nfrom skimage import transform as tf\n\nfrom astropy.coordinates.matrix_utilities import rotation_matrix\n\nfrom sunpy.image.transform import _rotation_registry, affine_transform\nfrom sunpy.tests.helpers import figure_test\nfrom sunpy.util import SunpyDeprecationWarning, SunpyUserWarning\n\n# Tolerance for tests\nRTOL = 1.0e-10\n\n\[email protected]\ndef original():\n # Test image\n return images.camera().astype('float')\n\n\[email protected]\ndef identity():\n return np.array([[1, 0], [0, 1]])\n\n\[email protected]\ndef rot30():\n return rotation_matrix(30)[0:2, 0:2]\n\n\ndef compare_results(expect, result, allclose=True):\n \"\"\"\n Function to check that the obtained results are what was expected, to\n within the relative tolerance defined above.\n \"\"\"\n # Outermost pixels can contain artefacts which will be ignored.\n exp = expect[1:-1, 1:-1]\n res = result[1:-1, 1:-1]\n t1 = abs(exp.mean() - res.mean()) <= RTOL*exp.mean()\n\n # Don't do the allclose test for skimage due to its forced interpolation beyond the edge of the\n # original image\n if not allclose:\n return t1\n else:\n notclose = ~np.isclose(exp, res, rtol=RTOL)\n t2 = not np.any(notclose)\n\n # Print out every mismatch\n if not t2:\n with np.errstate(divide='ignore'):\n mismatches = np.stack([*notclose.nonzero(), exp[notclose], res[notclose]]).T\n for row in mismatches:\n print(f\"i={int(row[0]+1)}, j={int(row[1]+1)}: \",\n f\"expected={row[2]}, result={row[3]}, \"\n f\"adiff={row[2]-row[3]}, rdiff={(row[2]-row[3])/row[2]}\")\n\n return t1 and t2\n\n\[email protected](\"angle, k\", [(90.0, 1), (-90.0, -1), (-270.0, 1),\n (-90.0, 3), (360.0, 0), (-360.0, 0)])\ndef test_rotation(original, angle, k):\n # Test rotation against expected outcome\n angle = np.radians(angle)\n c = np.round(np.cos(angle))\n s = np.round(np.sin(angle))\n rmatrix = np.array([[c, -s], [s, c]])\n expected = np.rot90(original, k=k)\n\n # Run the tests at order 4 as it produces more accurate 90 deg rotations\n rot = affine_transform(original, order=4, rmatrix=rmatrix)\n assert compare_results(expected, rot)\n\n # TODO: Check incremental 360 degree rotation against original image\n\n # Check derotated image against original\n derot_matrix = np.array([[c, s], [-s, c]])\n derot = affine_transform(rot, order=4, rmatrix=derot_matrix)\n assert compare_results(original, derot)\n\n\[email protected](\"angle, k\", [(90.0, 1), (-90.0, -1), (-270.0, 1),\n (-90.0, 3), (360.0, 0), (-360.0, 0)])\ndef test_skimage_rotation(original, angle, k):\n # Test rotation against expected outcome\n angle = np.radians(angle)\n c = np.round(np.cos(angle))\n s = np.round(np.sin(angle))\n rmatrix = np.array([[c, -s], [s, c]])\n expected = np.rot90(original, k=k)\n rot = affine_transform(original, rmatrix=rmatrix, method='scikit-image')\n assert compare_results(expected, rot, allclose=False)\n\n # TODO: Check incremental 360 degree rotation against original image\n\n # Check derotated image against original\n derot_matrix = np.array([[c, s], [-s, c]])\n derot = affine_transform(rot, rmatrix=derot_matrix, method='scikit-image')\n assert compare_results(original, derot, allclose=False)\n\n\ndx_values, dy_values = list(range(-100, 101, 100))*3, list(range(-100, 101, 100))*3\ndy_values.sort()\n\n\[email protected](\"dx, dy\", list(zip(dx_values, dy_values)))\ndef test_shift(original, dx, dy):\n # Rotation center for all translation tests.\n image_center = np.array(original.shape)/2.0 - 0.5\n\n # No rotation for all translation tests.\n rmatrix = np.array([[1.0, 0.0], [0.0, 1.0]])\n\n # Check a shifted shape against expected outcome\n expected = np.roll(np.roll(original, dx, axis=1), dy, axis=0)\n rcen = image_center - np.array([dx, dy])\n shift = affine_transform(original, rmatrix=rmatrix, recenter=True, image_center=rcen, missing=0)\n ymin, ymax = max([0, dy]), min([original.shape[1], original.shape[1]+dy])\n xmin, xmax = max([0, dx]), min([original.shape[0], original.shape[0]+dx])\n assert compare_results(expected[ymin:ymax, xmin:xmax], shift[ymin:ymax, xmin:xmax])\n\n # Check shifted and unshifted shape against original image\n rcen = image_center + np.array([dx, dy])\n unshift = affine_transform(shift, rmatrix=rmatrix, recenter=True, image_center=rcen, missing=0)\n # Need to ignore the portion of the image cut off by the first shift\n ymin, ymax = max([0, -dy]), min([original.shape[1], original.shape[1]-dy])\n xmin, xmax = max([0, -dx]), min([original.shape[0], original.shape[0]-dx])\n assert compare_results(original[ymin:ymax, xmin:xmax], unshift[ymin:ymax, xmin:xmax])\n\n\[email protected](\"scale_factor\", [0.25, 0.5, 0.75, 1.0, 1.25, 1.5])\ndef test_scale(original, scale_factor):\n # No rotation for all scaling tests.\n rmatrix = np.array([[1.0, 0.0], [0.0, 1.0]])\n\n # Check a scaled image against the expected outcome\n # When we depend on SciPy 1.6, we can replace this with scipy.ndimage.zoom(..., grid_mode=True)\n newim = tf.rescale(original / original.max(), scale_factor, order=1,\n mode='constant', anti_aliasing=False) * original.max()\n # Old width and new center of image\n w = original.shape[0] / 2.0 - 0.5\n new_c = (newim.shape[0] / 2.0) - 0.5\n expected = np.zeros(original.shape)\n upper = int(w + new_c + 1)\n if scale_factor > 1:\n lower = int(new_c - w)\n expected = newim[lower:upper, lower:upper]\n else:\n lower = int(w - new_c)\n expected[lower:upper, lower:upper] = newim\n scale = affine_transform(original, rmatrix=rmatrix, scale=scale_factor, order=1, missing=0)\n assert compare_results(expected, scale)\n\n\[email protected](\"angle, dx, dy, scale_factor\", [(90, -100, 40, 0.25),\n (-90, 40, -80, 0.75),\n (180, 20, 50, 1.5)])\ndef test_all(original, angle, dx, dy, scale_factor):\n \"\"\"\n Tests to make sure that combinations of scaling, shifting and rotation\n produce the expected output.\n \"\"\"\n k = int(angle / 90)\n angle = np.radians(angle)\n image_center = np.array(original.shape) / 2.0 - 0.5\n\n # Check a shifted, rotated and scaled shape against expected outcome\n c = np.round(np.cos(angle))\n s = np.round(np.sin(angle))\n rmatrix = np.array([[c, -s], [s, c]])\n # When we depend on SciPy 1.6, we can replace this with scipy.ndimage.zoom(..., grid_mode=True)\n scale = tf.rescale(original / original.max(), scale_factor, order=1,\n mode='constant', anti_aliasing=False) * original.max()\n new = np.zeros(original.shape)\n\n disp = np.array([dx, dy])\n dxs, dys = np.asarray(disp * scale_factor, dtype=int)\n # Old width and new center of image\n w = np.array(original.shape[0])/2.0 - 0.5\n new_c = (np.array(scale.shape[0])/2.0 - 0.5)\n upper = int(w+new_c+1)\n if scale_factor > 1:\n lower = int(new_c-w)\n new = scale[lower-dys:upper-dys, lower-dxs:upper-dxs]\n else:\n lower = int(w-new_c)\n new[lower+dys:upper+dys, lower+dxs:upper+dxs] = scale\n rcen = image_center - disp\n expected = np.rot90(new, k=k)\n\n rotscaleshift = affine_transform(original, rmatrix=rmatrix, scale=scale_factor, order=1,\n recenter=True, image_center=rcen, missing=0)\n assert compare_results(expected, rotscaleshift)\n\n # Check a rotated/shifted and restored image against original\n transformed = affine_transform(original, rmatrix=rmatrix, scale=1.0, order=1, recenter=True,\n image_center=rcen, missing=0)\n inv_rcen = image_center + np.dot(rmatrix.T, np.array([dx, dy]))\n inverse = affine_transform(transformed, rmatrix=rmatrix.T, scale=1.0, order=1, recenter=True,\n image_center=inv_rcen, missing=0)\n\n # Need to ignore the portion of the image cut off by the first shift\n ymin, ymax = max([0, -dy]), min([original.shape[1], original.shape[1]-dy])\n xmin, xmax = max([0, -dx]), min([original.shape[0], original.shape[0]-dx])\n assert compare_results(original[ymin:ymax, xmin:xmax], inverse[ymin:ymax, xmin:xmax])\n\n\ndef test_flat(identity):\n # Test that a flat array can be rotated using scikit-image\n in_arr = np.array([[100]], dtype=np.float64)\n out_arr = affine_transform(in_arr, rmatrix=identity)\n assert np.allclose(in_arr, out_arr, rtol=RTOL)\n\n\ndef test_nan_skimage(identity):\n # Test preservation of NaN values for scikit-image rotation\n in_arr = np.array([[np.nan, 0]])\n out_arr = affine_transform(in_arr, rmatrix=identity, order=0, method='scikit-image')\n assert np.isnan(out_arr[0, 0])\n\n\ndef test_nan_scipy(identity):\n # Test preservation of NaN values for scipy rotation\n in_arr = np.array([[np.nan, 0]])\n out_arr = affine_transform(in_arr, rmatrix=identity, order=0, method='scipy')\n assert np.isnan(out_arr[0, 0])\n\n\ndef test_int(identity):\n # Test casting of integer array to float array\n in_arr = np.array([[100]], dtype=int)\n with pytest.warns(SunpyUserWarning, match='Integer input data has been cast to float64'):\n out_arr = affine_transform(in_arr, rmatrix=identity, method='scikit-image')\n assert np.issubdtype(out_arr.dtype, np.floating)\n\n\ndef test_float32(identity):\n # Check that float32 input remains as float32 output\n # Test casting of integer array to float array\n in_arr = np.array([[100]], dtype=np.float32)\n out_arr = affine_transform(in_arr, rmatrix=identity)\n assert np.issubdtype(out_arr.dtype, np.float32)\n\n\ndef test_deprecated_args(identity):\n in_arr = np.array([[100]])\n with pytest.warns(SunpyDeprecationWarning, match=\"The 'use_scipy' argument is deprecated\"):\n out_arr = affine_transform(in_arr, rmatrix=identity, use_scipy=True)\n\n with pytest.warns(SunpyDeprecationWarning, match=\"The 'use_scipy' argument is deprecated\"):\n out_arr = affine_transform(in_arr, rmatrix=identity, use_scipy=False)\n\n with pytest.raises(ValueError, match=\"Method blah not in supported methods\"):\n out_arr = affine_transform(in_arr, rmatrix=identity, method='blah')\n\n with pytest.warns(SunpyUserWarning, match=\"Using scipy instead of scikit-image for rotation\"):\n out_arr = affine_transform(in_arr, rmatrix=identity, use_scipy=True, method='scikit-image')\n\n\ndef test_reproducible_matrix_multiplication():\n # Test whether matrix multiplication involving a large matrix always gives the same answer\n # This indirectly tests whichever BLAS/LAPACK libraries that NumPy is linking to (if any)\n x = np.arange(500000, dtype=np.float64)\n src = np.vstack((x, -10*x)).T\n matrix = np.array([[0, 1], [1, 0]])\n\n expected = np.vstack((-10*x, x)).T # src @ matrix\n\n mismatches = np.zeros(500, int)\n for i in range(len(mismatches)):\n result = src @ matrix\n mismatches[i] = (~np.isclose(result, expected)).sum()\n if mismatches[i] != 0:\n print(f\"{mismatches[i]} mismatching elements in multiplication #{i}\")\n\n assert np.sum(mismatches != 0) == 0\n\n\n@figure_test\ndef test_clipping(rot30):\n # Generates a plot to test the clipping the output image to the range of the input image\n image = np.ones((20, 20))\n image[4:-4, 4:-4] = 2\n\n num_methods = len(_rotation_registry.keys())\n\n fig = Figure(figsize=(12, 2*num_methods))\n axs = fig.subplots(nrows=num_methods, ncols=5)\n\n for i, method in enumerate(_rotation_registry.keys()):\n axs[i, 0].imshow(image, vmin=0, vmax=3)\n axs[i, 1].imshow(affine_transform(image, rot30, clip=False, method=method, missing=0),\n vmin=0, vmax=3)\n axs[i, 2].imshow(affine_transform(image, rot30, clip=True, method=method, missing=0),\n vmin=0, vmax=3)\n axs[i, 3].imshow(affine_transform(image, rot30, clip=True, method=method, missing=2),\n vmin=0, vmax=3)\n axs[i, 4].imshow(affine_transform(image, rot30, clip=True, method=method, missing=np.nan),\n vmin=0, vmax=3)\n axs[i, 0].set_ylabel(method)\n\n axs[0, 0].set_title('Original')\n axs[0, 1].set_title('no clip & missing=0')\n axs[0, 2].set_title('clip & missing=0')\n axs[0, 3].set_title('clip & missing=2')\n axs[0, 4].set_title('clip & missing=NaN')\n\n return fig\n\n\[email protected](\"ignore:.*bug in the implementation of scikit-image\")\n@figure_test\ndef test_nans(rot30):\n # Generates a plot to test the preservation and expansions of NaNs by the rotation\n image_with_nans = np.ones((23, 23))\n image_with_nans[4:-4, 4:-4] = 2\n image_with_nans[9:-9, 9:-9] = np.nan\n\n num_methods = len(_rotation_registry.keys())\n\n fig = Figure(figsize=(16, 2*num_methods))\n axs = fig.subplots(nrows=num_methods, ncols=7)\n\n axs[0, 0].set_title('Original (NaNs are white)')\n\n for j in range(6):\n axs[0, j+1].set_title(f'order={j}')\n for i, method in enumerate(_rotation_registry.keys()):\n axs[i, 0].imshow(image_with_nans, vmin=-1.1, vmax=1.1)\n for j in range(6):\n if j not in _rotation_registry[method].allowed_orders:\n with pytest.raises(ValueError):\n affine_transform(image_with_nans, rot30, order=j, method=method, missing=np.nan)\n axs[i, j+1].remove()\n else:\n axs[i, j+1].imshow(affine_transform(image_with_nans, rot30,\n order=j, method=method, missing=np.nan),\n vmin=-1.1, vmax=1.1)\n axs[i, 0].set_ylabel(method)\n\n return fig\n\n\[email protected](\"ignore:.*bug in the implementation of scikit-image\")\[email protected]('method', _rotation_registry.keys())\[email protected]('order', range(6))\ndef test_endian(method, order, rot30):\n if order not in _rotation_registry[method].allowed_orders:\n return\n\n # Test that the rotation output values do not change with input byte order\n native = np.ones((10, 10))\n swapped = native.byteswap().newbyteorder()\n\n rot_native = affine_transform(native, rot30, order=order, method=method, missing=0)\n rot_swapped = affine_transform(swapped, rot30, order=order, method=method, missing=0)\n\n assert compare_results(rot_native, rot_swapped)\n"
] | [
[
"numpy.ones",
"numpy.sum",
"numpy.issubdtype",
"numpy.isclose",
"numpy.asarray",
"numpy.any",
"numpy.vstack",
"numpy.allclose",
"matplotlib.figure.Figure",
"numpy.cos",
"numpy.isnan",
"numpy.zeros",
"numpy.arange",
"numpy.roll",
"numpy.errstate",
"numpy.rot90",
"numpy.array",
"numpy.sin",
"numpy.radians"
]
] |
ee14b104/sktime | [
"4a84a8257ccd15aa7736557aef5d34e015e16fd1"
] | [
"sktime/forecasting/tests/test_all_forecasters.py"
] | [
"#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\n# test API provided through BaseForecaster\n\n__author__ = [\"Markus Löning\"]\n__all__ = [\n \"test_raises_not_fitted_error\",\n \"test_score\",\n \"test_predict_time_index\",\n \"test_update_predict_predicted_indices\",\n \"test_bad_y_input\",\n \"test_fitted_params\",\n \"test_predict_time_index_in_sample_full\",\n \"test_predict_pred_interval\",\n \"test_update_predict_single\",\n]\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom sktime.exceptions import NotFittedError\nfrom sktime.forecasting.model_selection import SlidingWindowSplitter\nfrom sktime.forecasting.model_selection import temporal_train_test_split\nfrom sktime.forecasting.tests._config import TEST_ALPHAS\nfrom sktime.forecasting.tests._config import TEST_FHS\nfrom sktime.forecasting.tests._config import TEST_OOS_FHS\nfrom sktime.forecasting.tests._config import TEST_STEP_LENGTHS\nfrom sktime.forecasting.tests._config import TEST_WINDOW_LENGTHS\nfrom sktime.forecasting.tests._config import TEST_YS\nfrom sktime.forecasting.tests._config import VALID_INDEX_FH_COMBINATIONS\nfrom sktime.performance_metrics.forecasting import smape_loss\nfrom sktime.utils import all_estimators\nfrom sktime.utils._testing import _construct_instance\nfrom sktime.utils._testing.forecasting import _make_fh\nfrom sktime.utils._testing.forecasting import assert_correct_pred_time_index\nfrom sktime.utils._testing.forecasting import get_expected_index_for_update_predict\nfrom sktime.utils._testing.forecasting import make_forecasting_problem\nfrom sktime.utils.validation.forecasting import check_fh\n\n# get all forecasters\nFORECASTERS = all_estimators(estimator_types=\"forecaster\", return_names=False)\nFH0 = 1\n\n# testing data\ny = make_forecasting_problem()\ny_train, y_test = temporal_train_test_split(y, train_size=0.75)\n\n\[email protected](\"Forecaster\", FORECASTERS)\ndef test_fitted_params(Forecaster):\n f = _construct_instance(Forecaster)\n f.fit(y_train, fh=FH0)\n try:\n params = f.get_fitted_params()\n assert isinstance(params, dict)\n\n except NotImplementedError:\n pass\n\n\[email protected](\"Forecaster\", FORECASTERS)\ndef test_raises_not_fitted_error(Forecaster):\n # We here check extra method of the forecaster API: update and update_predict.\n f = _construct_instance(Forecaster)\n\n # predict is check in test suite for all estimators\n with pytest.raises(NotFittedError):\n f.update(y_test, update_params=False)\n\n with pytest.raises(NotFittedError):\n cv = SlidingWindowSplitter(fh=1, window_length=1)\n f.update_predict(y_test, cv=cv)\n\n try:\n with pytest.raises(NotFittedError):\n f.get_fitted_params()\n except NotImplementedError:\n pass\n\n\ndef assert_correct_msg(exception, msg):\n assert exception.value.args[0] == msg\n\n\[email protected](\"Forecaster\", FORECASTERS)\[email protected](\n \"y\", [np.random.random(size=3), [1, 3, 0.5], (1, 3, 0.5)] # array # list # tuple\n)\ndef test_bad_y_input(Forecaster, y):\n # Check that bad input arguments raise an appropriate error message.\n with pytest.raises(ValueError, match=r\"univariate\"):\n f = _construct_instance(Forecaster)\n f.fit(y, fh=FH0)\n\n\[email protected](\"Forecaster\", FORECASTERS)\[email protected](\n \"index_type, fh_type, is_relative\", VALID_INDEX_FH_COMBINATIONS\n)\[email protected](\"steps\", TEST_FHS) # fh steps\ndef test_predict_time_index(Forecaster, index_type, fh_type, is_relative, steps):\n # Check that predicted time index matches forecasting horizon.\n y_train = make_forecasting_problem(index_type=index_type)\n cutoff = y_train.index[-1]\n fh = _make_fh(cutoff, steps, fh_type, is_relative)\n f = _construct_instance(Forecaster)\n try:\n f.fit(y_train, fh=fh)\n y_pred = f.predict()\n assert_correct_pred_time_index(y_pred.index, y_train.index[-1], fh)\n except NotImplementedError:\n pass\n\n\[email protected](\"Forecaster\", FORECASTERS)\[email protected](\n \"index_type, fh_type, is_relative\", VALID_INDEX_FH_COMBINATIONS\n)\ndef test_predict_time_index_in_sample_full(\n Forecaster, index_type, fh_type, is_relative\n):\n # Check that predicted time index matched forecasting horizon for full in-sample\n # predictions.\n y_train = make_forecasting_problem(index_type=index_type)\n cutoff = y_train.index[-1]\n steps = -np.arange(len(y_train)) # full in-sample fh\n fh = _make_fh(cutoff, steps, fh_type, is_relative)\n f = _construct_instance(Forecaster)\n try:\n f.fit(y_train, fh=fh)\n y_pred = f.predict()\n assert_correct_pred_time_index(y_pred.index, y_train.index[-1], fh)\n except NotImplementedError:\n pass\n\n\ndef check_pred_ints(pred_ints, y_train, y_pred, fh):\n # make iterable\n if isinstance(pred_ints, pd.DataFrame):\n pred_ints = [pred_ints]\n\n for pred_int in pred_ints:\n assert list(pred_int.columns) == [\"lower\", \"upper\"]\n assert_correct_pred_time_index(pred_int.index, y_train.index[-1], fh)\n\n # check if errors are weakly monotonically increasing\n pred_errors = y_pred - pred_int[\"lower\"]\n # assert pred_errors.is_mononotic_increasing\n assert np.all(\n pred_errors.values[1:].round(4) >= pred_errors.values[:-1].round(4)\n )\n\n\[email protected](\"Forecaster\", FORECASTERS)\[email protected](\"fh\", TEST_OOS_FHS)\[email protected](\"alpha\", TEST_ALPHAS)\ndef test_predict_pred_interval(Forecaster, fh, alpha):\n # Check prediction intervals.\n f = _construct_instance(Forecaster)\n f.fit(y_train, fh=fh)\n try:\n y_pred, pred_ints = f.predict(return_pred_int=True, alpha=alpha)\n check_pred_ints(pred_ints, y_train, y_pred, fh)\n\n except NotImplementedError:\n pass\n\n\[email protected](\"Forecaster\", FORECASTERS)\[email protected](\"fh\", TEST_OOS_FHS)\ndef test_score(Forecaster, fh):\n # Check score method\n f = _construct_instance(Forecaster)\n f.fit(y_train, fh=fh)\n y_pred = f.predict()\n\n fh_idx = check_fh(fh).to_indexer() # get zero based index\n expected = smape_loss(y_pred, y_test.iloc[fh_idx])\n\n # compare with actual score\n f = _construct_instance(Forecaster)\n f.fit(y_train, fh=fh)\n actual = f.score(y_test.iloc[fh_idx], fh=fh)\n assert actual == expected\n\n\[email protected](\"Forecaster\", FORECASTERS)\[email protected](\"fh\", TEST_OOS_FHS)\ndef test_update_predict_single(Forecaster, fh):\n # Check correct time index of update-predict\n f = _construct_instance(Forecaster)\n f.fit(y_train, fh=fh)\n y_pred = f.update_predict_single(y_test)\n assert_correct_pred_time_index(y_pred.index, y_test.index[-1], fh)\n\n\ndef check_update_predict_y_pred(y_pred, y_test, fh, step_length):\n assert isinstance(y_pred, (pd.Series, pd.DataFrame))\n if isinstance(y_pred, pd.DataFrame):\n assert y_pred.shape[1] > 1\n expected_index = get_expected_index_for_update_predict(y_test, fh, step_length)\n np.testing.assert_array_equal(y_pred.index, expected_index)\n\n\[email protected](\"Forecaster\", FORECASTERS)\[email protected](\"fh\", TEST_OOS_FHS)\[email protected](\"window_length\", TEST_WINDOW_LENGTHS)\[email protected](\"step_length\", TEST_STEP_LENGTHS)\[email protected](\"y\", TEST_YS)\ndef test_update_predict_predicted_indices(\n Forecaster, fh, window_length, step_length, y\n):\n y_train, y_test = temporal_train_test_split(y)\n cv = SlidingWindowSplitter(fh, window_length=window_length, step_length=step_length)\n f = _construct_instance(Forecaster)\n f.fit(y_train, fh=fh)\n try:\n y_pred = f.update_predict(y_test, cv=cv)\n check_update_predict_y_pred(y_pred, y_test, fh, step_length)\n except NotImplementedError:\n pass\n"
] | [
[
"numpy.random.random",
"numpy.testing.assert_array_equal"
]
] |
sarahbald/BIG_2021_microbiome_evolution | [
"8adb48e9596a30f2db5b49a0f47f0ddc1188fcdf"
] | [
"scripts/calculate_within_person_sfs.py"
] | [
"import parse_midas_data\n#import pylab\nimport sys\nimport numpy\nimport bz2\nimport calculate_snp_prevalences\n\n\n\n################################################################################\n#\n# Standard header to read in argument information\n#\n################################################################################\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"species_name\", help=\"name of species to process\")\nparser.add_argument(\"--debug\", help=\"Loads only a subset of SNPs for speed\", action=\"store_true\")\nparser.add_argument(\"--chunk-size\", type=int, help=\"max number of records to load\", default=1000000000)\nargs = parser.parse_args()\n\nspecies_name = args.species_name\ndebug = args.debug\nchunk_size = args.chunk_size\n################################################################################\n\n\n# Should we do this?\nsys.stderr.write(\"Loading core genes...\\n\")\ncore_genes = parse_midas_data.load_core_genes(species_name)\nsys.stderr.write(\"Done! %d core genes\\n\" % len(core_genes))\nallowed_genes = core_genes\n\nsys.stderr.write(\"Loading population freqs...\\n\")\npopulation_freqs = calculate_snp_prevalences.parse_population_freqs(species_name)\nsys.stderr.write(\"Done! %d SNVs\\n\" % len(population_freqs))\n\nallowed_variant_type_list = ['1D','2D','3D','4D']\nallowed_variant_types = set(allowed_variant_type_list)\n\n# Open post-processed MIDAS output\nsnp_file = bz2.BZ2File(\"%ssnps/%s/annotated_snps.txt.bz2\" % (parse_midas_data.data_directory, species_name),\"r\")\n\nline = snp_file.readline() # header\nitems = line.split()[1:]\nsamples = numpy.array([item.strip() for item in items])\n\n# We shouldn't be doing this for raw data\n#samples = parse_midas_data.parse_merged_sample_names(items)\n\nsite_map = [{} for sample in samples]\nfor sample_idx in xrange(0,len(samples)):\n site_map[sample_idx] = {variant_type:{} for variant_type in allowed_variant_types}\n\nsys.stderr.write(\"Calculating within-person SFSs...\\n\")\nnum_sites_processed = 0\nfor line in snp_file:\n #\n items = line.split()\n # Load information about site\n info_items = items[0].split(\"|\")\n chromosome = info_items[0]\n location = long(info_items[1])\n gene_name = info_items[2]\n variant_type = info_items[3]\n\n if len(info_items) > 5: # for backwards compatability\n polarization = info_items[4]\n pvalue = float(info_items[5])\n else:\n polarization=\"?\"\n pvalue = float(info_items[4])\n\n #\n if variant_type not in allowed_variant_types:\n continue\n #\n if len(allowed_genes)>0 and (gene_name not in allowed_genes):\n continue\n #\n # Load alt and depth counts\n alts = []\n depths = []\n for item in items[1:]:\n subitems = item.split(\",\")\n alts.append(long(subitems[0]))\n depths.append(long(subitems[1]))\n alts = numpy.array(alts)\n depths = numpy.array(depths)\n refs = depths-alts\n #print alts\n #print depths\n #\n # population_freq returns the fraction of people for which the alt is the major allele.\n # This is a very important quantity being computed! It is later used for identifying CPS samples.\n if (chromosome, location) in population_freqs:\n population_freq = population_freqs[(chromosome, location)]\n else:\n population_freq = 0\n\n # polarize SFS according to population freq\n if population_freq>0.5:\n alts,refs = refs,alts\n population_freq = 1-population_freq\n\n #\n for i in xrange(0,len(alts)):\n site = (depths[i],alts[i])\n #\n if site not in site_map[i][variant_type]:\n site_map[i][variant_type][site] = [0,0.0]\n #\n site_map[i][variant_type][site][0] += 1\n site_map[i][variant_type][site][1] += population_freq # weight of polarization reversals\n #\n #\n num_sites_processed+=1\n #print num_sites_processed\n if num_sites_processed%50000==0:\n sys.stderr.write(\"%dk sites processed...\\n\" % (num_sites_processed/1000))\n if debug:\n break\n\nsnp_file.close()\nsys.stderr.write(\"Done!\\n\")\n# Write to disk!\nsys.stderr.write(\"Writing output...\\n\")\n# First write (filtered) genome-wide coverage distribution\noutput_file = bz2.BZ2File(\"%ssnps/%s/within_sample_sfs.txt.bz2\" % (parse_midas_data.data_directory, species_name),\"w\")\noutput_file.write(\"\\t\".join([\"SampleID\", \"variant_type\", \"D,A,count,reverse_count\", \"...\"]))\nfor sample_idx in xrange(0,len(samples)):\n sample = samples[sample_idx]\n for variant_type in allowed_variant_type_list:\n output_file.write(\"\\n\")\n output_file.write(\"\\t\".join([sample, variant_type]+[\"%d,%d,%d,%g\" % (site[0],site[1],site_map[sample_idx][variant_type][site][0],site_map[sample_idx][variant_type][site][1]) for site in sorted(site_map[sample_idx][variant_type].keys())]))\noutput_file.close()\nsys.stderr.write(\"Done!\\n\")\n"
] | [
[
"numpy.array"
]
] |
YukeWang96/pytorch_geometric | [
"3c4466a3f38a2eba92073c730a09953ab5082c3d"
] | [
"torch_geometric/datasets/karate.py"
] | [
"import torch\nimport numpy as np\nimport networkx as nx\nfrom torch_geometric.data import InMemoryDataset, Data\n\n\nclass KarateClub(InMemoryDataset):\n r\"\"\"Zachary's karate club network from the `\"An Information Flow Model for\n Conflict and Fission in Small Groups\"\n <http://www1.ind.ku.dk/complexLearning/zachary1977.pdf>`_ paper, containing\n 34 nodes, connected by 154 (undirected and unweighted) edges.\n Every node is labeled by one of two classes.\n\n Args:\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n \"\"\"\n\n def __init__(self, transform=None):\n super(KarateClub, self).__init__('.', transform, None, None)\n\n G = nx.karate_club_graph()\n\n adj = nx.to_scipy_sparse_matrix(G).tocoo()\n row = torch.from_numpy(adj.row.astype(np.int64)).to(torch.long)\n col = torch.from_numpy(adj.col.astype(np.int64)).to(torch.long)\n edge_index = torch.stack([row, col], dim=0)\n data = Data(edge_index=edge_index)\n data.num_nodes = edge_index.max().item() + 1\n data.x = torch.eye(data.num_nodes, dtype=torch.float)\n y = [0 if G.node[i]['club'] == 'Mr. Hi' else 1 for i in G.nodes]\n data.y = torch.tensor(y)\n self.data, self.slices = self.collate([data])\n\n def _download(self):\n return\n\n def _process(self):\n return\n\n def __repr__(self):\n return '{}()'.format(self.__class__.__name__)\n"
] | [
[
"torch.stack",
"torch.eye",
"torch.tensor"
]
] |
rgerum/open_gpias | [
"53936c10794328d89df179b2f0a56bccdc1883d8"
] | [
"open_gpias/StimulusFrontEnd.py"
] | [
"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# StimulusFrontEnd.py\r\n\r\n# Copyright (c) 2018, Richard Gerum, Achim Schilling, Hinrich Rahlfs, Matthias Streb\r\n#\r\n# This file is part of ASR-Setup.\r\n#\r\n# ASR-Setup is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# ASR-Setup is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with ASR-Setup. If not, see <http://www.gnu.org/licenses/>\r\n\r\nimport os\r\nimport sys\r\nimport time\r\nimport numpy as np\r\nfrom qtpy import QtCore, QtGui, QtWidgets\r\nimport qtawesome as qta\r\nfrom threading import Thread\r\nfrom . import StimulusBackend\r\nfrom . import gui_helpers\r\nfrom .MeasurementPlot import plotWidget\r\n\r\n\r\nclass measurementGui(QtWidgets.QWidget):\r\n timeString = \"\"\r\n shutDown = 0\r\n thisplot = None\r\n backup_plot_count = 0\r\n\r\n measurement_thread = None\r\n plot_window = None\r\n\r\n def __init__(self, parent, protocol, config, signal):\r\n super().__init__()\r\n self.setWindowTitle(\"Acoustic Startle Response - Measure\")\r\n self.parent = parent\r\n self.config = config\r\n self.parent.settingsUpdated.connect(self.statusUpdated)\r\n\r\n layout1 = QtWidgets.QVBoxLayout(self)\r\n\r\n layout2 = QtWidgets.QHBoxLayout()\r\n layout1.addLayout(layout2)\r\n\r\n # Metadata\r\n layout_properties = QtWidgets.QVBoxLayout()\r\n layout2.addLayout(layout_properties)\r\n # experimenter\r\n self.textEdit_Experimenter = gui_helpers.addLineEdit(layout_properties, \"Experimenter:\", \"Experimenter\")\r\n self.textEdit_Experimenter.textEdited.connect(self.statusUpdated)\r\n # animal name\r\n self.textEdit_Mousname = gui_helpers.addLineEdit(layout_properties, \"Animal name:\", \"Mouse\")\r\n self.textEdit_Mousname.textEdited.connect(self.statusUpdated)\r\n # animal status\r\n self.textEdit_status = gui_helpers.addLineEdit(layout_properties, \"Animal status:\", \"pre or post\")\r\n self.textEdit_status.textEdited.connect(self.statusUpdated)\r\n\r\n # status display\r\n self.status_bar = gui_helpers.QStatusBar(dict(Soundcard=False, NiDAQ=False, Protocol=False, Metadata=False), layout_properties)\r\n layout_properties.addStretch()\r\n\r\n layout_properties2 = QtWidgets.QVBoxLayout()\r\n layout2.addLayout(layout_properties2)\r\n\r\n # Label\r\n self.labelStatus = QtWidgets.QLabel(\"Measurement status:\")\r\n layout_properties2.addWidget(self.labelStatus)\r\n\r\n # Progress Bar\r\n self.progressBar = QtWidgets.QProgressBar()\r\n layout_properties2.addWidget(self.progressBar)\r\n self.progressBar.setFormat(\"%v/%m\")\r\n\r\n # Remaining time\r\n self.labelRemaining = QtWidgets.QLabel(\"Remaining time:\")\r\n layout_properties2.addWidget(self.labelRemaining)\r\n\r\n self.textEdit_out = gui_helpers.addLogBox(layout_properties2, \"Output:\")\r\n layout_properties2.addStretch()\r\n\r\n self.plot = plotWidget(config=config)\r\n layout1.addWidget(self.plot)\r\n\r\n layout_buttons = QtWidgets.QHBoxLayout()\r\n layout1.addLayout(layout_buttons)\r\n\r\n # measurement control buttons\r\n self.startButton = gui_helpers.addPushButton(layout_buttons, \"Start Measurement\", self.startStimulation, icon=qta.icon(\"fa.play\"))\r\n self.pauseButton = gui_helpers.addPushButton(layout_buttons, \"Pause Measurement\", self.pause, icon=qta.icon(\"fa.pause\"))\r\n self.stopButton = gui_helpers.addPushButton(layout_buttons, \"Stop Measurement\", self.stop, icon=qta.icon(\"fa.stop\"))\r\n\r\n self.setButtonStatus(0)\r\n\r\n self.measurement_thread = StimulusBackend.Measurement(protocol, config, signal)\r\n self.measurement_thread.trial_finished.connect(self.trialFinishedEvent)\r\n self.measurement_thread.measurement_finished.connect(self.m_finished)\r\n self.measurement_thread.paused.connect(self.m_paused)\r\n self.measurement_thread.stopped.connect(self.m_stopped)\r\n self.measurement_thread.resumed.connect(self.m_resumed)\r\n self.parent.settingsUpdated.connect(self.measurement_thread.signal.loadConfig)\r\n\r\n self.measurement_thread.error.connect(self.textEdit_out.addLog)\r\n\r\n self.statusUpdated()\r\n\r\n self.textEdit_out.addLog(\"Program started\")\r\n\r\n if 0:\r\n data = np.load(r\"D:\\Repositories\\open_gpias\\open_gpias\\Achim_LS01_pre__turner_and_threshold_2018.npy\")\r\n\r\n self.plt_index = 10\r\n self.plot_it(data, self.plt_index)\r\n\r\n layout_navigate = QtWidgets.QHBoxLayout(self)\r\n layout_properties.addLayout(layout_navigate)\r\n gui_helpers.addPushButton(layout_navigate, \"\", self.navigateLeft, icon=qta.icon(\"fa.arrow-left\"))\r\n self.label_title = QtWidgets.QSpinBox()\r\n self.label_title.setSuffix(\" / 0\")\r\n self.label_title.setPrefix(\"Trial \")\r\n self.label_title.setRange(0, 400)\r\n self.label_title.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_title.valueChanged.connect(self.plotOutputSignal)\r\n layout_navigate.addWidget(self.label_title)\r\n gui_helpers.addPushButton(layout_navigate, \"\", self.navigateRight, icon=qta.icon(\"fa.arrow-right\"))\r\n\r\n def navigateLeft(self):\r\n self.plt_index -= 1\r\n self.label_title.setValue(self.plt_index)\r\n\r\n def plotOutputSignal(self):\r\n data = np.load(r\"D:\\Repositories\\open_gpias\\open_gpias\\Achim_LS01_pre__turner_and_threshold_2018.npy\")\r\n self.plot_it(data, self.plt_index)\r\n\r\n def navigateRight(self):\r\n self.plt_index += 1\r\n self.label_title.setValue(self.plt_index)\r\n\r\n def trialFinishedEvent(self, data_extracted, idxStartle, protocol):\r\n self.plot_it(data_extracted, idxStartle)\r\n self.save_backup(data_extracted)\r\n self.update_timer(protocol, idxStartle)\r\n\r\n def setButtonStatus(self, status):\r\n if status == 0: # no measurement\r\n self.startButton.setEnabled(True)\r\n self.pauseButton.setEnabled(False)\r\n self.stopButton.setEnabled(False)\r\n if status == 1: # running measurement\r\n self.startButton.setEnabled(False)\r\n self.pauseButton.setEnabled(True)\r\n self.stopButton.setEnabled(True)\r\n if status == 2: # pause measurement\r\n self.startButton.setEnabled(False)\r\n self.pauseButton.setEnabled(True)\r\n self.stopButton.setEnabled(False)\r\n if status == -1: # waiting to stop or pause\r\n self.startButton.setEnabled(False)\r\n self.pauseButton.setEnabled(False)\r\n self.stopButton.setEnabled(False)\r\n\r\n def statusUpdated(self):\r\n status = dict(Soundcard=self.measurement_thread.signal.checkSettings(),\r\n NiDAQ=self.measurement_thread.checkNiDAQ(),\r\n Protocol=self.measurement_thread.protocolWidget.checkProtocol(),\r\n Metadata=self.checkData())\r\n self.status_bar.setStatus(status)\r\n\r\n def stop(self): # stop button pushed\r\n \"\"\"\r\n Callback function for stop button, stops and resets measurement\r\n \"\"\"\r\n self.setButtonStatus(-1)\r\n\r\n #self.textEdit_out.setText('Stopping Measurement. Please wait') # TODO\r\n self.measurement_thread.stop = True\r\n self.measurement_thread.pause = False # In case it was previously paused\r\n # self.timer.stop()\r\n\r\n def pause(self): # pause button pushed\r\n \"\"\"\r\n Callback function for pause button\r\n \"\"\"\r\n if self.measurement_thread.pause:\r\n self.pauseButton.setText(\"Pause\")\r\n self.measurement_thread.pause = False\r\n self.setButtonStatus(-1)\r\n else:\r\n self.pauseButton.setText(\"Resume\")\r\n self.setButtonStatus(-1)\r\n #self.textEdit_out.setText('Pausing Measurement. Please wait') # TODO\r\n self.measurement_thread.pause = True\r\n\r\n def startStimulation(self):\r\n \"\"\"\r\n Start the stimulation and recording.\r\n \"\"\"\r\n # Check the input fields\r\n ok1, message1 = self.checkData()\r\n ok2, message2 = self.measurement_thread.protocolWidget.checkProtocol()\r\n ok3, message3 = self.measurement_thread.signal.checkSettings()\r\n message = \"\"\r\n if not ok1:\r\n message += message1+\"\\n\"\r\n if not ok2:\r\n message += message2+\"\\n\"\r\n if not ok3:\r\n message += message3+\"\\n\"\r\n if not ok1 or not ok2 or not ok3:\r\n QtWidgets.QMessageBox.critical(self, 'Error', message)\r\n return\r\n\r\n # If the measurement is paused, resume it\r\n if self.measurement_thread.pause:\r\n self.measurement_thread.pause = False\r\n return\r\n self.measurement_thread.stop = False\r\n\r\n # reset this to notify save_data\r\n self.timeString = \"\"\r\n\r\n self.setButtonStatus(1)\r\n\r\n self.pauseButton.setText(\"Pause\")\r\n\r\n self.textEdit_out.addLog(\"Measurement started\")\r\n\r\n Thread(target=self.measurement_thread.run_thread, args=()).start() # start Measurement\r\n\r\n def update_timer(self, protocol, idx):\r\n self.progressBar.setRange(0, len(protocol))\r\n self.progressBar.setValue(idx+1)\r\n if idx >= 0:\r\n digits = len(str(len(protocol)))\r\n self.textEdit_out.addLog((\"Trial %\"+str(digits)+\"d/%d finished.\") % (idx+1, len(protocol)))\r\n # print the remaini\r\n self.labelRemaining.setText(\"Remaining time: %s\" % str(self.measurement_thread.signal.getProtocolDuration(protocol, idx)).split(\".\")[0])\r\n\r\n def save_backup(self, data_extracted):\r\n self.save_data(data_extracted, finished=False)\r\n return\r\n if self.backup_plot_count >= 10:\r\n self.save_data(data_extracted, finished=False)\r\n self.backup_plot_count = 0\r\n else:\r\n self.backup_plot_count += 1\r\n\r\n def plot_it(self, data, idx):\r\n \"\"\" provide the plot with the data \"\"\"\r\n print(\"plot_id\", data.shape, idx)\r\n self.plot.setData(data[idx, :, :], idx)\r\n data[idx][6][0] = self.plot.get_max()\r\n\r\n def m_finished(self, data_extracted, empty):\r\n self.save_data(data_extracted, finished=True)\r\n self.textEdit_out.addLog(\"Measurement finished\")\r\n self.setButtonStatus(0)\r\n QtWidgets.QMessageBox.information(self, 'Finished', 'Measurement Completed')\r\n\r\n def m_paused(self):\r\n # self.timer.stop()\r\n self.setButtonStatus(2)\r\n self.textEdit_out.addLog(\"Measurement paused\")\r\n QtWidgets.QMessageBox.information(self, 'Paused', 'The door can be opened.')\r\n\r\n def m_resumed(self):\r\n self.setButtonStatus(1)\r\n self.textEdit_out.addLog(\"Measurement resumed\")\r\n\r\n def m_stopped(self):\r\n self.textEdit_out.addLog(\"Measurement stopped\")\r\n self.setButtonStatus(0)\r\n self.measurement_thread.pause = False # reset in case pause button was pressed\r\n QtWidgets.QMessageBox.information(self, 'Stopped', 'Measurement stopped.')\r\n if self.shutDown:\r\n self.shutDown = 2\r\n self.close()\r\n\r\n def save_data(self, data_extracted, finished=True):\r\n\r\n # add time from first trial\r\n if self.timeString == \"\":\r\n self.timeString = time.strftime(\"%Y-%m-%d_%H-%M\")\r\n\r\n # get the string from the protocol\r\n fileNameEnding = self.measurement_thread.protocolWidget.getProtocolName()\r\n\r\n # get the string from the metadata\r\n metadata = [self.textEdit_Experimenter.text(), self.textEdit_Mousname.text(),\r\n self.textEdit_status.text(), self.timeString]\r\n\r\n # join the directory tree\r\n dirname = os.path.join(*metadata)\r\n\r\n # join the data into the filename\r\n filename = \"UNFINISHED_\" + \"_\".join(metadata)\r\n\r\n # get the output directories\r\n directory = os.path.join(self.config.output_directory, self.config.directory_measurements, dirname)\r\n directory_backup = os.path.join(self.config.output_directory, self.config.directory_backup, dirname)\r\n\r\n # create directory\r\n if not os.path.exists(directory):\r\n os.makedirs(directory)\r\n\r\n # save the data to the backup folder\r\n np.save(os.path.join(directory, filename + '_extracted_data.npy'), data_extracted)\r\n\r\n # if the measurement is finished\r\n if finished:\r\n # wait? TODO Why?\r\n time.sleep(3)\r\n\r\n # rename the file to remove the \"UNFINISHED_\" tag\r\n os.rename(os.path.join(directory, filename + '_extracted_data.npy'),\r\n os.path.join(directory, filename.replace(\"UNFINISHED_\", \"\") + '_extracted_data.npy'))\r\n\r\n # reset the time string\r\n self.timeString = \"\"\r\n\r\n # extract and save the amplitudes\r\n only_amplitudes = self.raw_to_amplitude(data_extracted)\r\n np.save(os.path.join(directory, filename.replace(\"UNFINISHED_\", \"\") + '_amplitudes.npy'), only_amplitudes)\r\n\r\n # create the directory for the backup\r\n if not os.path.exists(directory_backup):\r\n os.makedirs(directory_backup)\r\n\r\n # save the data to the backup folder\r\n np.save(os.path.join(directory_backup, filename.replace(\"UNFINISHED_\", \"\") + '_extracted_data.npy'),\r\n data_extracted)\r\n\r\n def movementCheck(self, data):\r\n \"\"\" check if animal has moved before noise burst \"\"\"\r\n # data until threshold\r\n val = data[:8000]\r\n if max(val) > self.config.acceleration_threshold:\r\n return False\r\n else:\r\n return True\r\n\r\n def get_max(self, data):\r\n \"\"\"\r\n Calculation of max acceleration\r\n \"\"\"\r\n # calculation of maximum only if trial is valid\r\n if self.movementCheck(data):\r\n # searching for maximum after stimulus\r\n return max(data[800:])\r\n else:\r\n return np.NaN\r\n\r\n def rms(self, data_x, data_y, data_z):\r\n \"\"\" calculation of root mean square data and low pass filtering \"\"\"\r\n data = np.empty(shape=(data_x.__len__(), 1))\r\n\r\n data_xf = self.butter_lowpass_filter(data_x)\r\n data_yf = self.butter_lowpass_filter(data_y)\r\n data_zf = self.butter_lowpass_filter(data_z)\r\n\r\n sensitivity = self.config.acceleration_sensor_sensitivity_v_to_g\r\n data = np.sqrt((data_xf / sensitivity) ** 2 + (data_yf / sensitivity) ** 2 + (data_zf / sensitivity) ** 2)\r\n return data\r\n\r\n def raw_to_amplitude(self, extracted_data):\r\n # HINT: 6 is the index of the headerline\r\n\r\n # extract the maximal amplitude of each trial\r\n for idx in range(len(extracted_data)):\r\n # get the filtered rms of x, y, z\r\n data = self.rms(extracted_data[idx][0], extracted_data[idx][1], extracted_data[idx][2])\r\n extracted_data[idx][6][0] = self.get_max(data)\r\n\r\n only_amplitude = np.zeros((len(extracted_data), 10))\r\n for i, item in enumerate(extracted_data):\r\n only_amplitude[i] = item[6][:10]\r\n # local_amplitudeIDX = 0\r\n # local_noiseIDX = 1\r\n # local_noiseGapIDX = 2\r\n local_noiseFreqMinIDX = 3\r\n local_noiseFreqMaxIDX = 4\r\n local_preStimAttenIDX = 5\r\n # local_preStimFreqIDX = 6\r\n # local_ISIIDX = 7\r\n # local_noiseTimeIDX = 8\r\n local_noiseFreqMidIDX = 9\r\n noise_atten = 60\r\n for i in range(len(only_amplitude)):\r\n if only_amplitude[i][local_noiseFreqMinIDX] != 0:\r\n only_amplitude[i][local_noiseFreqMidIDX] = int(only_amplitude[i][local_noiseFreqMinIDX] * (\r\n only_amplitude[i][local_noiseFreqMaxIDX] / only_amplitude[i][local_noiseFreqMinIDX]) ** (\r\n 1 / 2) + 1)\r\n only_amplitude[i][local_preStimAttenIDX] = noise_atten\r\n return only_amplitude\r\n\r\n def closeEvent(self, event):\r\n \"\"\"\r\n our own close event to prevent the user from closing without intention\r\n further more the measurement thread is closed\r\n \"\"\"\r\n if self.shutDown == 1:\r\n event.ignore()\r\n return\r\n if self.shutDown == 2:\r\n if self.thisplot:\r\n self.thisplot.esc() # MS\r\n return\r\n\r\n if self.measurement_thread is not None: # TODO check running\r\n msg = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Warning, \"warning\",\r\n \"Sind sie sich sicher, dass Sie die Messung schließen\" +\r\n \" möchten? Es ist nicht möglich die Messung zu einem\" +\r\n \" späteren Zeitpunkt fortzuführen!\")\r\n msg.addButton(QtWidgets.QMessageBox.Ok)\r\n msg.addButton(QtWidgets.QMessageBox.Cancel)\r\n ret = msg.exec_()\r\n if ret == QtWidgets.QMessageBox.Ok:\r\n\r\n self.textEdit_out.setText('Stopping Measurement. Please wait')\r\n self.measurement_thread.stop = True\r\n self.measurement_thread.pause = False\r\n self.shutDown = 1\r\n event.ignore()\r\n else:\r\n event.ignore()\r\n else:\r\n if self.thisplot:\r\n self.thisplot.esc() # MS\r\n\r\n def checkData(self):\r\n \"\"\"\r\n Checks the entries of the textEdit Fields\r\n and shows Error Message if it is not correct\"\r\n\r\n Returns\r\n -------\r\n False: Exception/Error while reading testEdit Field\r\n True: Entries correct\r\n \"\"\"\r\n errors = []\r\n\r\n # check if mouse name is given\r\n if self.textEdit_Mousname.text() == \"\":\r\n errors.append(\"Please fill in mouse name\")\r\n\r\n # check if experimenter name is given\r\n if self.textEdit_Experimenter.text() == '':\r\n errors.append(\"Please fill in experimenter name\")\r\n\r\n # check if status is given\r\n status = self.textEdit_status.text().strip()\r\n if status == \"\":\r\n errors.append(\"Please fill in status\")\r\n else:\r\n # check if status is either pre or post followed by an integer\r\n allowed_status_texts = [\"pre\", \"post\"]\r\n for text in allowed_status_texts:\r\n if status.startswith(text):\r\n value = status[len(text):].strip()\r\n status = text\r\n # try to set the text (removing possible spaces in the status)\r\n try:\r\n if status == \"post\":\r\n self.textEdit_status.setText(\"%s%d\" % (status, int(value)))\r\n elif status == \"pre\":\r\n self.textEdit_status.setText(\"%s\" % (status))\r\n else:\r\n raise ValueError\r\n except (ValueError, UnboundLocalError):\r\n errors.append(\"Status has to be either 'pre' or 'post' followed by an integer.\")\r\n\r\n # do we have errors? warn the user!\r\n if len(errors):\r\n return False, \"\\n\".join(errors)\r\n # if not, everything is fine\r\n return True, \"Mouse %s measured by %s in state %s\" % (self.textEdit_Mousname.text(), self.textEdit_Experimenter.text(), self.textEdit_status.text())\r\n\r\n\r\n ##########low pass filter##########\r\n def butter_lowpass(self, cutoff, oder, sf):\r\n from scipy.signal import butter\r\n N = oder\r\n Fc = cutoff\r\n nyq = sf / 2\r\n b, a = butter(N, Fc / nyq, btype='low', analog=False)\r\n return b, a\r\n\r\n def butter_lowpass_filter(self, data):\r\n from scipy.signal import lfilter\r\n b, a = self.butter_lowpass(45, 6, 10000)\r\n y = lfilter(b, a, data)\r\n return y\r\n ###################################"
] | [
[
"numpy.sqrt",
"numpy.load",
"scipy.signal.lfilter",
"scipy.signal.butter"
]
] |
DoktorBotti/Probabilistic-Backpropagation | [
"56c9ca818f88fd11e4c38585eefaceb3c28e2184"
] | [
"theano/PBP_net/PBP_net.py"
] | [
"\nimport numpy as np\n\nimport pickle\n\nimport gzip\n\nimport pbp\n\nclass PBP_net:\n\n def __init__(self, X_train, y_train, n_hidden, n_epochs = 40,\n normalize = False):\n\n \"\"\"\n Constructor for the class implementing a Bayesian neural network\n trained with the probabilistic back propagation method.\n\n @param X_train Matrix with the features for the training data.\n @param y_train Vector with the target variables for the\n training data.\n @param n_hidden Vector with the number of neurons for each\n hidden layer.\n @param n_epochs Numer of epochs for which to train the\n network. The recommended value 40 should be\n enough.\n @param normalize Whether to normalize the input features. This\n is recommended unles the input vector is for\n example formed by binary features (a\n fingerprint). In that case we do not recommend\n to normalize the features.\n \"\"\"\n\n # We normalize the training data to have zero mean and unit standard\n # deviation in the training set if necessary\n\n if normalize:\n self.std_X_train = np.std(X_train, 0)\n self.std_X_train[ self.std_X_train == 0 ] = 1\n self.mean_X_train = np.mean(X_train, 0)\n else:\n self.std_X_train = np.ones(X_train.shape[ 1 ])\n self.mean_X_train = np.zeros(X_train.shape[ 1 ])\n\n X_train = (X_train - np.full(X_train.shape, self.mean_X_train)) / \\\n np.full(X_train.shape, self.std_X_train)\n\n self.mean_y_train = np.mean(y_train)\n self.std_y_train = np.std(y_train)\n\n y_train_normalized = (y_train - self.mean_y_train) / self.std_y_train\n\n # We construct the network\n\n n_units_per_layer = \\\n np.concatenate(([ X_train.shape[ 1 ] ], n_hidden, [ 1 ]))\n self.pbp_instance = \\\n pbp.PBP(n_units_per_layer, self.mean_y_train, self.std_y_train)\n\n # We iterate the learning process\n\n self.pbp_instance.do_pbp(X_train, y_train_normalized, n_epochs)\n\n # We are done!\n\n def re_train(self, X_train, y_train, n_epochs):\n\n \"\"\"\n Function that re-trains the network on some data.\n\n @param X_train Matrix with the features for the training data.\n @param y_train Vector with the target variables for the\n training data.\n @param n_epochs Numer of epochs for which to train the\n network. \n \"\"\"\n\n # We normalize the training data \n\n X_train = (X_train - np.full(X_train.shape, self.mean_X_train)) / \\\n np.full(X_train.shape, self.std_X_train)\n\n y_train_normalized = (y_train - self.mean_y_train) / self.std_y_train\n\n self.pbp_instance.do_pbp(X_train, y_train_normalized, n_epochs)\n\n def predict(self, X_test):\n\n \"\"\"\n Function for making predictions with the Bayesian neural network.\n\n @param X_test The matrix of features for the test data\n \n \n @return m The predictive mean for the test target variables.\n @return v The predictive variance for the test target\n variables.\n @return v_noise The estimated variance for the additive noise.\n\n \"\"\"\n\n X_test = np.array(X_test, ndmin = 2)\n\n # We normalize the test set\n\n X_test = (X_test - np.full(X_test.shape, self.mean_X_train)) / \\\n np.full(X_test.shape, self.std_X_train)\n\n # We compute the predictive mean and variance for the target variables\n # of the test data\n\n m, v, v_noise = self.pbp_instance.get_predictive_mean_and_variance(X_test)\n\n # We are done!\n\n return m, v, v_noise\n\n def predict_deterministic(self, X_test):\n\n \"\"\"\n Function for making predictions with the Bayesian neural network.\n\n @param X_test The matrix of features for the test data\n \n \n @return o The predictive value for the test target variables.\n\n \"\"\"\n\n X_test = np.array(X_test, ndmin = 2)\n\n # We normalize the test set\n\n X_test = (X_test - np.full(X_test.shape, self.mean_X_train)) / \\\n np.full(X_test.shape, self.std_X_train)\n\n # We compute the predictive mean and variance for the target variables\n # of the test data\n\n o = self.pbp_instance.get_deterministic_output(X_test)\n\n # We are done!\n\n return o\n\n def sample_weights(self):\n\n \"\"\"\n Function that draws a sample from the posterior approximation\n to the weights distribution.\n\n \"\"\"\n \n self.pbp_instance.sample_w()\n\n def save_to_file(self, filename):\n\n \"\"\"\n Function that stores the network in a file.\n\n @param filename The name of the file.\n \n \"\"\"\n\n # We save the network to a file using pickle\n\n def save_object(obj, filename):\n\n result = pickle.dumps(obj)\n with gzip.GzipFile(filename, 'wb') as dest: dest.write(result)\n dest.close()\n\n save_object(self, filename)\n\ndef load_PBP_net_from_file(filename):\n\n \"\"\"\n Function that load a network from a file.\n\n @param filename The name of the file.\n \n \"\"\"\n\n def load_object(filename):\n\n with gzip.GzipFile(filename, 'rb') as \\\n source: result = source.read()\n ret = pickle.loads(result)\n source.close()\n\n return ret\n\n # We load the dictionary with the network parameters\n\n PBP_network = load_object(filename)\n\n return PBP_network\n"
] | [
[
"numpy.ones",
"numpy.zeros",
"numpy.array",
"numpy.std",
"numpy.concatenate",
"numpy.full",
"numpy.mean"
]
] |
MJYINMC/Wheeled-Robot | [
"7425c9bdf4a2e55b7b4a4420a012a3daa3a6a114"
] | [
"icp_ws/src/course_agv_slam/scripts/mapping.py"
] | [
"#!/usr/bin/env python\nimport math\nimport numpy as np\n\n# f = open('/home/rosuser/catkin_ws/tmp/mapping.log', 'w')\ndef fprint(s):\n return\n f.write(s)\n f.write('\\n')\n f.flush()\n\nclass Mapping():\n def __init__(self, xw, yw, xyreso):\n self.width_x = xw*xyreso\n self.width_y = yw*xyreso\n self.xyreso = xyreso\n self.xw = xw\n self.yw = yw\n # default 0.5 -- [[0.5 for i in range(yw)] for i in range(xw)]\n self.pmap = np.ones((self.xw, self.yw))/2\n self.minx = -self.width_x/2.0\n self.maxx = self.width_x/2.0\n self.miny = -self.width_y/2.0\n self.maxy = self.width_y/2.0\n\n def logodd(self, p):\n fprint(' logodd: p:{}'.format(p))\n if p == 0:\n return -2.94\n else: \n return math.log(p / (1-p))\n\n def update_prob(self, old_p, new_p):\n fprint(' update_prob: old_p:{}, pn:{}'.format(old_p, new_p))\n el= math.exp(self.logodd(old_p) + self.logodd(new_p))\n return el/(1+el)\n\n def bresenham(self, p_ob, p_center):\n def is_in_map(point):\n x, y = point\n if 0 <= x < self.xw and 0 <= y < self.yw:\n return True\n return False\n\n res = []\n \n x0, y0 = p_ob\n x1, y1 = p_center\n x0, y0 = int(x0), int(y0)\n x1, y1 = int(x1), int(y1)\n steep = abs(y1 - y0) > abs(x1 - x0)\n if steep:\n x0, y0 = y0, x0\n x1, y1 = y1, x1\n if x0 > x1:\n x0, x1 = x1, x0\n y0, y1 = y1, y0\n\n dx = x1 - x0\n dy = y1 - y0\n derr = 2*abs(dy)\n err = 0\n\n ystep = 1 if y0 < y1 else -1\n x, y = x0, y0\n while x <= x1:\n if steep and is_in_map((y, x)):\n res.append((y, x))\n elif is_in_map((x, y)):\n res.append((x, y))\n\n err += derr\n if err > dx:\n y += ystep\n err -= 2* dx\n x += 1\n \n return res\n\n def update(self, ox, oy, center_x, center_y):\n def process_axis(a, b):\n return int(a/self.xyreso + b/2)\n\n center_x, center_y = process_axis(center_x, self.xw), process_axis(center_y, self.yw)\n for p_ob in zip(ox, oy):\n p_ob = (process_axis(p_ob[0], self.xw), process_axis(p_ob[1], self.yw))\n line = self.bresenham(p_ob, (center_x, center_y))\n for p_observed in line:\n prob = 0.8 if p_observed == p_ob else 0.1\n res = self.update_prob(self.pmap[p_observed], prob)\n if res > 0.95:\n res = 0.95\n if res < 0.05:\n res = 0.05\n self.pmap[p_observed] = 0 if math.isnan(res) else res\n\n return self.pmap\n"
] | [
[
"numpy.ones"
]
] |
srl-ethz/diffPD_sim2real | [
"e491668995a163b8ff7542d99f0b4e0c0f4ed2df"
] | [
"python/example/rolling_jelly_3d.py"
] | [
"import sys\nsys.path.append('../')\n\nimport os\nfrom pathlib import Path\nimport time\nimport numpy as np\nimport scipy.optimize\nimport pickle\n\nfrom py_diff_pd.common.common import ndarray, create_folder, rpy_to_rotation, rpy_to_rotation_gradient\nfrom py_diff_pd.common.common import print_info, print_ok, print_error, PrettyTabular\nfrom py_diff_pd.common.grad_check import check_gradients\nfrom py_diff_pd.core.py_diff_pd_core import StdRealVector\nfrom py_diff_pd.env.rolling_jelly_env_3d import RollingJellyEnv3d\n\ndef test_rolling_jelly(verbose):\n seed = 42\n folder = Path('rolling_jelly_3d')\n refinement = 10\n youngs_modulus = 2e6\n poissons_ratio = 0.4\n env = RollingJellyEnv3d(seed, folder, { 'refinement': refinement,\n 'youngs_modulus': youngs_modulus,\n 'poissons_ratio': poissons_ratio })\n deformable = env.deformable()\n\n # Setting thread number.\n thread_cts = [2, 4, 8]\n\n methods = ('newton_pcg', 'newton_cholesky', 'pd_eigen', 'pd_no_acc')\n opts = ({ 'max_newton_iter': 5000, 'max_ls_iter': 10, 'abs_tol': 1e-9, 'rel_tol': 1e-4, 'verbose': 0, 'thread_ct': 4 },\n { 'max_newton_iter': 5000, 'max_ls_iter': 10, 'abs_tol': 1e-9, 'rel_tol': 1e-4, 'verbose': 0, 'thread_ct': 4 },\n { 'max_pd_iter': 5000, 'max_ls_iter': 10, 'abs_tol': 1e-9, 'rel_tol': 1e-4, 'verbose': 0, 'thread_ct': 4,\n 'use_bfgs': 1, 'bfgs_history_size': 10 },\n { 'max_pd_iter': 5000, 'max_ls_iter': 10, 'abs_tol': 1e-9, 'rel_tol': 1e-4, 'verbose': 0, 'thread_ct': 4,\n 'use_bfgs': 1, 'bfgs_history_size': 10, 'use_acc': 0 })\n\n dt = 5e-3\n frame_num = 100\n\n # Initial state.\n dofs = deformable.dofs()\n act_dofs = deformable.act_dofs()\n q0 = env.default_init_position() + np.random.normal(scale=0.001, size=dofs)\n radius = env.radius()\n pivot = ndarray([radius, radius, 0])\n omega = ndarray([0, 10.0, 0])\n omega_x, omega_y, omega_z = omega\n omega_skewed = ndarray([\n [0, -omega_z, omega_y],\n [omega_z, 0, -omega_x],\n [-omega_y, omega_x, 0]\n ])\n v0 = (q0.reshape((-1, 3)) @ -omega_skewed).ravel()\n a0 = np.zeros(act_dofs)\n f0 = np.zeros(dofs)\n\n # Visualization.\n if verbose:\n for method, opt in zip(methods, opts):\n _, _, info = env.simulate(dt, frame_num, 'pd_eigen' if method == 'pd_no_acc' else method,\n opt, q0, v0, [a0 for _ in range(frame_num)],\n [f0 for _ in range(frame_num)], require_grad=True, vis_folder=method)\n print('{}: forward: {:3.3f}s; backward: {:3.3f}s'.format(method, info['forward_time'], info['backward_time']))\n os.system('eog {}.gif'.format(folder / method))\n\n # Benchmark time.\n print('Reporting time cost. DoFs: {:d}, Contact DoFs: {:d}, frames: {:d}, dt: {:3.3e}'.format(dofs,\n env.contact_dofs(), frame_num, dt))\n rel_tols = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7]\n forward_backward_times = {}\n forward_times = {}\n backward_times = {}\n losses = {}\n grads = {}\n for method in methods:\n for thread_ct in thread_cts:\n meth_thread_num = '{}_{}threads'.format(method, thread_ct)\n forward_backward_times[meth_thread_num] = []\n forward_times[meth_thread_num] = []\n backward_times[meth_thread_num] = []\n losses[meth_thread_num] = []\n grads[meth_thread_num] = []\n\n for rel_tol in rel_tols:\n print_info('rel_tol: {:3.3e}'.format(rel_tol))\n tabular = PrettyTabular({\n 'method': '{:^30s}',\n 'forward and backward (s)': '{:3.3f}',\n 'forward only (s)': '{:3.3f}',\n 'loss': '{:3.3f}',\n '|grad|': '{:3.3f}'\n })\n print_info(tabular.head_string())\n\n for method, opt in zip(methods, opts):\n opt['rel_tol'] = rel_tol\n for thread_ct in thread_cts:\n opt['thread_ct'] = thread_ct\n meth_thread_num = '{}_{}threads'.format(method, thread_ct)\n loss, grad, info = env.simulate(dt, frame_num, 'pd_eigen' if method == 'pd_no_acc' else method,\n opt, q0, v0, [a0 for _ in range(frame_num)],\n [f0 for _ in range(frame_num)], require_grad=True, vis_folder=None)\n grad_q, grad_v, grad_a, grad_f = grad\n grad = np.zeros(q0.size + v0.size + a0.size + f0.size)\n grad[:dofs] = grad_q\n grad[dofs:2 * dofs] = grad_v\n grad[2 * dofs:2 * dofs + act_dofs] = np.sum(ndarray(grad_a), axis=0)\n grad[2 * dofs + act_dofs:] = np.sum(ndarray(grad_f), axis=0)\n l, g, forward_time, backward_time = loss, grad, info['forward_time'], info['backward_time']\n print(tabular.row_string({\n 'method': meth_thread_num,\n 'forward and backward (s)': forward_time + backward_time,\n 'forward only (s)': forward_time,\n 'loss': l,\n '|grad|': np.linalg.norm(g) }))\n forward_backward_times[meth_thread_num].append(forward_time + backward_time)\n forward_times[meth_thread_num].append(forward_time)\n backward_times[meth_thread_num].append(backward_time)\n losses[meth_thread_num].append(l)\n grads[meth_thread_num].append(g)\n pickle.dump((rel_tols, forward_times, backward_times, losses, grads), open(folder / 'table.bin', 'wb'))\n\nif __name__ == '__main__':\n verbose = True\n test_rolling_jelly(verbose)"
] | [
[
"numpy.random.normal",
"numpy.linalg.norm",
"numpy.zeros"
]
] |
Palspal98/inpainting_gmcnn | [
"e9af21043fc665df55ab36bf65463618f88c1b69"
] | [
"tensorflow/net/network.py"
] | [
"import tensorflow as tf\nfrom net.ops import random_bbox, bbox2mask, local_patch\nfrom net.ops import priority_loss_mask\nfrom net.ops import id_mrf_reg\nfrom net.ops import gan_wgan_loss, gradients_penalty, random_interpolates\nfrom net.ops import free_form_mask_tf\nfrom util.util import f2uint\nfrom functools import partial\n\nclass GMCNNModel:\n def __init__(self):\n self.config = None\n\n # shortcut ops\n self.conv7 = partial(tf.layers.conv2d, kernel_size=7, activation=tf.nn.elu, padding='SAME')\n self.conv5 = partial(tf.layers.conv2d, kernel_size=5, activation=tf.nn.elu, padding='SAME')\n self.conv3 = partial(tf.layers.conv2d, kernel_size=3, activation=tf.nn.elu, padding='SAME')\n self.conv5_ds = partial(tf.layers.conv2d, kernel_size=5, strides=2, activation=tf.nn.leaky_relu, padding='SAME')\n\n def build_generator(self, x, mask, reuse=False, name='inpaint_net'):\n xshape = x.get_shape().as_list()\n xh, xw = xshape[1], xshape[2]\n ones_x = tf.ones_like(x)[:, :, :, 0:1]\n x_w_mask = tf.concat([x, ones_x, ones_x * mask], axis=3)\n\n # network with three branches\n cnum = self.config.g_cnum\n b_names = ['b1', 'b2', 'b3', 'merge']\n\n conv_7 = self.conv7\n conv_5 = self.conv5\n conv_3 = self.conv3\n with tf.variable_scope(name, reuse=reuse):\n # branch 1\n x = conv_7(inputs=x_w_mask, filters=cnum, strides=1, name=b_names[0] + 'conv1')\n x = conv_7(inputs=x, filters=2*cnum, strides=2, name=b_names[0] + 'conv2_downsample')\n x = conv_7(inputs=x, filters=2*cnum, strides=1, name=b_names[0] + 'conv3')\n x = conv_7(inputs=x, filters=4*cnum, strides=2, name=b_names[0] + 'conv4_downsample')\n x = conv_7(inputs=x, filters=4*cnum, strides=1, name=b_names[0] + 'conv5')\n x = conv_7(inputs=x, filters=4*cnum, strides=1, name=b_names[0] + 'conv6')\n x = conv_7(inputs=x, filters=4*cnum, strides=1, dilation_rate=2, name=b_names[0] + 'conv7_atrous')\n x = conv_7(inputs=x, filters=4*cnum, strides=1, dilation_rate=4, name=b_names[0] + 'conv8_atrous')\n x = conv_7(inputs=x, filters=4*cnum, strides=1, dilation_rate=8, name=b_names[0] + 'conv9_atrous')\n x = conv_7(inputs=x, filters=4*cnum, strides=1, dilation_rate=16, name=b_names[0] + 'conv10_atrous')\n if cnum > 32:\n x = conv_7(inputs=x, filters=4 * cnum, strides=1, dilation_rate=32, name=b_names[0] + 'conv11_atrous')\n x = conv_7(inputs=x, filters=4*cnum, strides=1, name=b_names[0] + 'conv11')\n x = conv_7(inputs=x, filters=4*cnum, strides=1, name=b_names[0] + 'conv12')\n x_b1 = tf.image.resize_bilinear(x, [xh, xw], align_corners=True)\n\n # branch 2\n x = conv_5(inputs=x_w_mask, filters=cnum, strides=1, name=b_names[1] + 'conv1')\n x = conv_5(inputs=x, filters=2 * cnum, strides=2, name=b_names[1] + 'conv2_downsample')\n x = conv_5(inputs=x, filters=2 * cnum, strides=1, name=b_names[1] + 'conv3')\n x = conv_5(inputs=x, filters=4 * cnum, strides=2, name=b_names[1] + 'conv4_downsample')\n x = conv_5(inputs=x, filters=4 * cnum, strides=1, name=b_names[1] + 'conv5')\n x = conv_5(inputs=x, filters=4 * cnum, strides=1, name=b_names[1] + 'conv6')\n x = conv_5(inputs=x, filters=4 * cnum, strides=1, dilation_rate=2, name=b_names[1] + 'conv7_atrous')\n x = conv_5(inputs=x, filters=4 * cnum, strides=1, dilation_rate=4, name=b_names[1] + 'conv8_atrous')\n x = conv_5(inputs=x, filters=4 * cnum, strides=1, dilation_rate=8, name=b_names[1] + 'conv9_atrous')\n x = conv_5(inputs=x, filters=4 * cnum, strides=1, dilation_rate=16, name=b_names[1] + 'conv10_atrous')\n if cnum > 32:\n x = conv_5(inputs=x, filters=4 * cnum, strides=1, dilation_rate=32, name=b_names[1] + 'conv11_atrous')\n x = conv_5(inputs=x, filters=4 * cnum, strides=1, name=b_names[1] + 'conv11')\n x = conv_5(inputs=x, filters=4 * cnum, strides=1, name=b_names[1] + 'conv12')\n x = tf.image.resize_nearest_neighbor(x, [xh//2, xw//2], align_corners=True)\n with tf.variable_scope(b_names[1] + 'conv13_upsample'):\n x = conv_3(inputs=x, filters=2 * cnum, strides=1, name=b_names[1] + 'conv13_upsample_conv')\n x = conv_5(inputs=x, filters=2 * cnum, strides=1, name=b_names[1] + 'conv14')\n x_b2 = tf.image.resize_bilinear(x, [xh, xw], align_corners=True)\n\n # branch 3\n x = conv_5(inputs=x_w_mask, filters=cnum, strides=1, name=b_names[2] + 'conv1')\n x = conv_3(inputs=x, filters=2 * cnum, strides=2, name=b_names[2] + 'conv2_downsample')\n x = conv_3(inputs=x, filters=2 * cnum, strides=1, name=b_names[2] + 'conv3')\n x = conv_3(inputs=x, filters=4 * cnum, strides=2, name=b_names[2] + 'conv4_downsample')\n x = conv_3(inputs=x, filters=4 * cnum, strides=1, name=b_names[2] + 'conv5')\n x = conv_3(inputs=x, filters=4 * cnum, strides=1, name=b_names[2] + 'conv6')\n x = conv_3(inputs=x, filters=4 * cnum, strides=1, dilation_rate=2, name=b_names[2] + 'conv7_atrous')\n x = conv_3(inputs=x, filters=4 * cnum, strides=1, dilation_rate=4, name=b_names[2] + 'conv8_atrous')\n x = conv_3(inputs=x, filters=4 * cnum, strides=1, dilation_rate=8, name=b_names[2] + 'conv9_atrous')\n x = conv_3(inputs=x, filters=4 * cnum, strides=1, dilation_rate=16, name=b_names[2] + 'conv10_atrous')\n if cnum > 32:\n x = conv_3(inputs=x, filters=4 * cnum, strides=1, dilation_rate=32, name=b_names[2] + 'conv11_atrous')\n x = conv_3(inputs=x, filters=4 * cnum, strides=1, name=b_names[2] + 'conv11')\n x = conv_3(inputs=x, filters=4 * cnum, strides=1, name=b_names[2] + 'conv12')\n x = tf.image.resize_nearest_neighbor(x, [xh // 2, xw // 2], align_corners=True)\n with tf.variable_scope(b_names[2] + 'conv13_upsample'):\n x = conv_3(inputs=x, filters=2 * cnum, strides=1, name=b_names[2] + 'conv13_upsample_conv')\n x = conv_3(inputs=x, filters=2 * cnum, strides=1, name=b_names[2] + 'conv14')\n x = tf.image.resize_nearest_neighbor(x, [xh, xw], align_corners=True)\n with tf.variable_scope(b_names[2] + 'conv15_upsample'):\n x = conv_3(inputs=x, filters=cnum, strides=1, name=b_names[2] + 'conv15_upsample_conv')\n x_b3 = conv_3(inputs=x, filters=cnum//2, strides=1, name=b_names[2] + 'conv16')\n\n x_merge = tf.concat([x_b1, x_b2, x_b3], axis=3)\n\n x = conv_3(inputs=x_merge, filters=cnum // 2, strides=1, name=b_names[3] + 'conv17')\n x = tf.layers.conv2d(inputs=x, kernel_size=3, filters=3, strides=1, activation=None, padding='SAME',\n name=b_names[3] + 'conv18')\n x = tf.clip_by_value(x, -1., 1.)\n return x\n\n\n def wgan_patch_discriminator(self, x, mask, d_cnum, reuse=False):\n cnum = d_cnum\n with tf.variable_scope('discriminator_local', reuse=reuse):\n h, w = mask.get_shape().as_list()[1:3]\n x = self.conv5_ds(x, filters=cnum, name='conv1')\n x = self.conv5_ds(x, filters=cnum*2, name='conv2')\n x = self.conv5_ds(x, filters=cnum*4, name='conv3')\n x = self.conv5_ds(x, filters=cnum*8, name='conv4')\n x = tf.layers.conv2d(x, kernel_size=5, strides=2, filters=1, activation=None, name='conv5', padding='SAME')\n\n mask = tf.contrib.layers.max_pool2d(mask, 2, padding='SAME')\n mask = tf.contrib.layers.max_pool2d(mask, 2, padding='SAME')\n mask = tf.contrib.layers.max_pool2d(mask, 2, padding='SAME')\n mask = tf.contrib.layers.max_pool2d(mask, 2, padding='SAME')\n mask = tf.contrib.layers.max_pool2d(mask, 2, padding='SAME')\n\n x = x * mask\n x = tf.reduce_sum(x, axis=[1, 2, 3]) / tf.reduce_sum(mask, axis=[1, 2, 3])\n mask_local = tf.image.resize_nearest_neighbor(mask, [h, w], align_corners=True)\n return x, mask_local\n\n\n def wgan_local_discriminator(self, x, d_cnum, reuse=False):\n cnum = d_cnum\n with tf.variable_scope('disc_local', reuse=reuse):\n x = self.conv5_ds(x, filters=cnum, name='conv1')\n x = self.conv5_ds(x, filters=cnum * 2, name='conv2')\n x = self.conv5_ds(x, filters=cnum * 4, name='conv3')\n x = self.conv5_ds(x, filters=cnum * 8, name='conv4')\n x = self.conv5_ds(x, filters=cnum * 4, name='conv5')\n x = self.conv5_ds(x, filters=cnum * 2, name='conv6')\n\n x = tf.layers.flatten(x, name='flatten')\n return x\n\n def wgan_global_discriminator(self, x, d_cnum, reuse=False):\n cnum = d_cnum\n with tf.variable_scope('disc_global', reuse=reuse):\n x = self.conv5_ds(x, filters=cnum, name='conv1')\n x = self.conv5_ds(x, filters=cnum * 2, name='conv2')\n x = self.conv5_ds(x, filters=cnum * 4, name='conv3')\n x = self.conv5_ds(x, filters=cnum * 8, name='conv4')\n x = self.conv5_ds(x, filters=cnum * 4, name='conv5')\n x = self.conv5_ds(x, filters=cnum * 2, name='conv6')\n x = tf.layers.flatten(x, name='flatten')\n return x\n\n def wgan_discriminator(self, batch_local, batch_global, d_cnum, reuse=False):\n with tf.variable_scope('discriminator', reuse=reuse):\n dlocal = self.wgan_local_discriminator(batch_local, d_cnum, reuse=reuse)\n dglobal = self.wgan_global_discriminator(batch_global, d_cnum, reuse=reuse)\n dout_local = tf.layers.dense(dlocal, 1, name='dout_local_fc')\n dout_global = tf.layers.dense(dglobal, 1, name='dout_global_fc')\n return dout_local, dout_global\n\n def wgan_mask_discriminator(self, batch_global, mask, d_cnum, reuse=False):\n with tf.variable_scope('discriminator', reuse=reuse):\n dglobal = self.wgan_global_discriminator(batch_global, d_cnum, reuse=reuse)\n dout_global = tf.layers.dense(dglobal, 1, name='dout_global_fc')\n dout_local, mask_local = self.wgan_patch_discriminator(batch_global, mask, d_cnum, reuse=reuse)\n return dout_local, dout_global, mask_local\n\n def build_net(self, batch_data, config, summary=True, reuse=False):\n self.config = config\n batch_pos = batch_data / 127.5 - 1.\n # generate mask, 1 represents masked point\n if config.mask_type == 'rect':\n bbox = random_bbox(config)\n mask = bbox2mask(bbox, config, name='mask_c')\n else:\n mask = free_form_mask_tf(parts=8, im_size=(config.img_shapes[0], config.img_shapes[1]),\n maxBrushWidth=20, maxLength=80, maxVertex=16)\n batch_incomplete = batch_pos * (1. - mask)\n mask_priority = priority_loss_mask(mask)\n batch_predicted = self.build_generator(batch_incomplete, mask, reuse=reuse)\n\n losses = {}\n # apply mask and complete image\n batch_complete = batch_predicted * mask + batch_incomplete * (1. - mask)\n if config.mask_type == 'rect':\n # local patches\n local_patch_batch_pos = local_patch(batch_pos, bbox)\n local_patch_batch_complete = local_patch(batch_complete, bbox)\n local_patch_mask = local_patch(mask, bbox)\n local_patch_batch_pred = local_patch(batch_predicted, bbox)\n mask_priority = local_patch(mask_priority, bbox)\n else:\n local_patch_batch_pos = batch_pos\n local_patch_batch_complete = batch_complete\n local_patch_batch_pred = batch_predicted\n\n if config.pretrain_network:\n print('Pretrain the whole net with only reconstruction loss.')\n\n if not config.pretrain_network:\n config.feat_style_layers = {'conv3_2': 1.0, 'conv4_2': 1.0}\n config.feat_content_layers = {'conv4_2': 1.0}\n\n config.mrf_style_w = 1.0\n config.mrf_content_w = 1.0\n\n ID_MRF_loss = id_mrf_reg(local_patch_batch_pred, local_patch_batch_pos, config)\n # ID_MRF_loss = id_mrf_reg(batch_predicted, batch_pos, config)\n\n losses['ID_MRF_loss'] = ID_MRF_loss\n tf.summary.scalar('losses/ID_MRF_loss', losses['ID_MRF_loss'])\n\n pretrain_l1_alpha = config.pretrain_l1_alpha\n losses['l1_loss'] = \\\n pretrain_l1_alpha * tf.reduce_mean(tf.abs(local_patch_batch_pos - local_patch_batch_pred) * mask_priority)\n if not config.pretrain_network:\n losses['l1_loss'] += tf.reduce_mean(ID_MRF_loss * config.mrf_alpha)\n losses['ae_loss'] = pretrain_l1_alpha * tf.reduce_mean(tf.abs(batch_pos - batch_predicted) * (1. - mask))\n if not config.pretrain_network:\n losses['ae_loss'] += pretrain_l1_alpha * tf.reduce_mean(tf.abs(batch_pos - batch_predicted) * (1. - mask))\n losses['ae_loss'] /= tf.reduce_mean(1. - mask)\n\n if summary:\n viz_img = tf.concat([batch_pos, batch_incomplete, batch_predicted, batch_complete], axis=2)[:, :, :, ::-1]\n tf.summary.image('gt__degraded__predicted__completed', f2uint(viz_img))\n tf.summary.scalar('losses/l1_loss', losses['l1_loss'])\n tf.summary.scalar('losses/ae_loss', losses['ae_loss'])\n\n # gan\n batch_pos_neg = tf.concat([batch_pos, batch_complete], axis=0)\n\n if config.mask_type == 'rect':\n # local deterministic patch\n local_patch_batch_pos_neg = tf.concat([local_patch_batch_pos, local_patch_batch_complete], 0)\n # wgan with gradient penalty\n pos_neg_local, pos_neg_global = self.wgan_discriminator(local_patch_batch_pos_neg,\n batch_pos_neg, config.d_cnum, reuse=reuse)\n else:\n pos_neg_local, pos_neg_global, mask_local = self.wgan_mask_discriminator(batch_pos_neg,\n mask, config.d_cnum, reuse=reuse)\n pos_local, neg_local = tf.split(pos_neg_local, 2)\n pos_global, neg_global = tf.split(pos_neg_global, 2)\n # wgan loss\n global_wgan_loss_alpha = 1.0\n g_loss_local, d_loss_local = gan_wgan_loss(pos_local, neg_local, name='gan/local_gan')\n g_loss_global, d_loss_global = gan_wgan_loss(pos_global, neg_global, name='gan/global_gan')\n losses['g_loss'] = global_wgan_loss_alpha * g_loss_global + g_loss_local\n losses['d_loss'] = d_loss_global + d_loss_local\n # gp\n interpolates_global = random_interpolates(batch_pos, batch_complete)\n if config.mask_type == 'rect':\n interpolates_local = random_interpolates(local_patch_batch_pos, local_patch_batch_complete)\n dout_local, dout_global = self.wgan_discriminator(\n interpolates_local, interpolates_global, config.d_cnum, reuse=True)\n else:\n interpolates_local = interpolates_global\n dout_local, dout_global, _ = self.wgan_mask_discriminator(interpolates_global, mask, config.d_cnum, reuse=True)\n\n # apply penalty\n if config.mask_type == 'rect':\n penalty_local = gradients_penalty(interpolates_local, dout_local, mask=local_patch_mask)\n else:\n penalty_local = gradients_penalty(interpolates_local, dout_local, mask=mask)\n penalty_global = gradients_penalty(interpolates_global, dout_global, mask=mask)\n losses['gp_loss'] = config.wgan_gp_lambda * (penalty_local + penalty_global)\n losses['d_loss'] = losses['d_loss'] + losses['gp_loss']\n if summary and not config.pretrain_network:\n tf.summary.scalar('convergence/d_loss', losses['d_loss'])\n tf.summary.scalar('convergence/local_d_loss', d_loss_local)\n tf.summary.scalar('convergence/global_d_loss', d_loss_global)\n tf.summary.scalar('gan_wgan_loss/gp_loss', losses['gp_loss'])\n tf.summary.scalar('gan_wgan_loss/gp_penalty_local', penalty_local)\n tf.summary.scalar('gan_wgan_loss/gp_penalty_global', penalty_global)\n\n if config.pretrain_network:\n losses['g_loss'] = 0\n else:\n losses['g_loss'] = config.gan_loss_alpha * losses['g_loss']\n losses['g_loss'] += config.l1_loss_alpha * losses['l1_loss']\n ##\n\n print('Set L1_LOSS_ALPHA to %f' % config.l1_loss_alpha)\n print('Set GAN_LOSS_ALPHA to %f' % config.gan_loss_alpha)\n\n losses['g_loss'] += config.ae_loss_alpha * losses['ae_loss']\n print('Set AE_LOSS_ALPHA to %f' % config.ae_loss_alpha)\n g_vars = tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES, 'inpaint_net')\n d_vars = tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES, 'discriminator')\n return g_vars, d_vars, losses\n\n def evaluate(self, im, mask, config, reuse=False):\n # generate mask, 1 represents masked point\n self.config = config\n im = im / 127.5 - 1\n im = im * (1 - mask)\n # inpaint\n batch_predict = self.build_generator(im, mask, reuse=reuse)\n # apply mask and reconstruct\n batch_complete = batch_predict * mask + im * (1 - mask)\n return batch_complete\n"
] | [
[
"tensorflow.layers.conv2d",
"tensorflow.summary.scalar",
"tensorflow.ones_like",
"tensorflow.layers.flatten",
"tensorflow.get_collection",
"tensorflow.reduce_mean",
"tensorflow.variable_scope",
"tensorflow.contrib.layers.max_pool2d",
"tensorflow.abs",
"tensorflow.image.resize_bilinear",
"tensorflow.concat",
"tensorflow.clip_by_value",
"tensorflow.image.resize_nearest_neighbor",
"tensorflow.reduce_sum",
"tensorflow.layers.dense",
"tensorflow.split"
]
] |
jackdbd/hdf5-pydata-munich | [
"3f814c7f58ac460f7bdadfc8deed9d9d44269dcf"
] | [
"snippets/create_synthetic_data.py"
] | [
"\"\"\"Create synthetic data to benchmark PyTables queries.\n\nUsage\n-----\n# Generate 10 datasets of synthetic data\npython create_synthetic_data.py -n 1000000\n\"\"\"\nimport os\nimport argparse\nimport time\nimport tables as tb\nimport numpy as np\n\n\nclass SyntheticDataDescription(tb.IsDescription):\n unsigned_int_field = tb.UInt8Col(pos=0)\n int_field = tb.Int32Col(pos=1)\n float_field = tb.Float32Col(pos=2)\n bool_field = tb.BoolCol(pos=3)\n\n\ndef fill_table(table, data):\n num_records = len(data['integers'])\n print('Fill up the table with {} records'.format(num_records))\n # Get the record object associated with the table:\n row = table.row\n for i in range(num_records):\n row['unsigned_int_field'] = data['uintegers'][i]\n row['int_field'] = data['integers'][i]\n row['float_field'] = data['floats'][i]\n row['bool_field'] = data['booleans'][i]\n row.append()\n # Flush the table buffers\n table.flush()\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('-n', '--num_records', type=int, default=1000, \n help='Number of records to generate')\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n here = os.path.abspath(os.path.dirname(__file__))\n data_dir = os.path.abspath(os.path.join(here, '..', 'data'))\n file_path = os.path.join(data_dir, 'pytables-synthetic-data.h5')\n filters = tb.Filters(complevel=5, complib='zlib')\n\n size = args.num_records\n data = {\n 'uintegers': np.random.randint(0, 255, size, dtype='uint8'),\n 'integers': np.random.randint(low=-123, high=456, size=size, dtype='int32'),\n 'floats': np.random.normal(loc=0, scale=1, size=size).astype(np.float32),\n 'booleans': np.random.choice([True, False], size=size),\n }\n\n t0 = time.time()\n with tb.open_file(file_path, 'w') as f:\n table = f.create_table(\n where='/', name='data_table', description=SyntheticDataDescription,\n title='Synthetic data', filters=filters)\n\n fill_table(table, data)\n\n t1 = time.time()\n print('Creating the HDF5 file took {:.2f}s'.format(t1 - t0))\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.random.normal",
"numpy.random.randint",
"numpy.random.choice"
]
] |
camilorey/signals_package | [
"fe28fe9b4f791a951fb5249ab0da5efbcd54fd5c"
] | [
"signals/perturbations/step_perturbation.py"
] | [
"import warnings\nimport numpy as np\nfrom .perturbation import Perturbation\n\nclass StepPerturbation(Perturbation):\n \"\"\"\n This class will simulate a Step perturbation, with a support beginning at _t0 and ending in _t0+_support,\n that causes a Petrurbation in the form a Step Function.\n\n Parameters to construct a Step Perturbation are two:\n strength: the amplitude of the step to take\n step: the time at which the step is to take place\n direction: the direction in which the step is to happen (before (-1) or after t0 (+1)).\n \"\"\"\n def construct_function(self,kwargs:dict):\n \"\"\"\n StepPerturbation implementation of the construct_function(kwargs) method\n :param kwargs: a dictionary containing perhaps the parameters of a Step Perturbation Function.\n The Dictionary needs to hold parameters such as spike (for self._amplitude) or t0 (for self._t0)\n to create a step.\n :return: None. Step Perturbation attributes are set internally.\n \"\"\"\n if 'step' in kwargs:\n w = kwargs['step']\n is_in_support = self.check_parameter_in_support(w)\n if not is_in_support:\n warnings.warn('Warning: Step position is outside of support.')\n self.set_parameter(kwargs,'_step','step',1)\n self.set_parameter(kwargs,'_direction','dir',1)\n\n def perturbation_function(self, t: float) -> float:\n \"\"\"\n For the Step function, we will return a random number N(strength,strength*0.15) but the\n sign of the number will be positive if t>step and negtive if t<step. If the direction is reversed\n then the signs will interchange.\n :param t: a number\n :return: float\n \"\"\"\n random_number = np.random.normal(loc=self._strength,\n scale=self._strength * 0.05,\n size=1)[0]\n if t < self._step:\n random_number *=-1\n else:\n random_number *= 1\n return random_number*self._direction"
] | [
[
"numpy.random.normal"
]
] |
vietlinhtspt/NewFasterRCNN | [
"c2a1f51bfe8445662966d4cf62a098d8d3b373c4"
] | [
"lib/layer_utils/proposal_target_layer.py"
] | [
"# --------------------------------------------------------\n# Faster R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick, Sean Bell and Xinlei Chen\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport numpy.random as npr\nfrom model.config import cfg\nfrom model.bbox_transform import bbox_transform\nfrom utils.cython_bbox import bbox_overlaps\n\n\ndef proposal_target_layer(rpn_rois, rpn_scores, gt_boxes, _num_classes):\n \"\"\"\n Assign object detection proposals to ground-truth targets. Produces proposal\n classification labels and bounding-box regression targets.\n \"\"\"\n\n # Proposal ROIs (0, x1, y1, x2, y2) coming from RPN\n # (i.e., rpn.proposal_layer.ProposalLayer), or any other source\n all_rois = rpn_rois\n all_scores = rpn_scores\n\n # Include ground-truth boxes in the set of candidate rois\n if cfg.TRAIN.USE_GT:\n zeros = np.zeros((gt_boxes.shape[0], 1), dtype=gt_boxes.dtype)\n all_rois = np.vstack(\n (all_rois, np.hstack((zeros, gt_boxes[:, :-1])))\n )\n # not sure if it a wise appending, but anyway i am not using it\n all_scores = np.vstack((all_scores, zeros))\n\n num_images = 1\n rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images\n fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)\n\n # Sample rois with classification labels and bounding box regression\n # targets\n labels, rois, roi_scores, bbox_targets, bbox_inside_weights = _sample_rois(\n all_rois, all_scores, gt_boxes, fg_rois_per_image,\n rois_per_image, _num_classes)\n\n rois = rois.reshape(-1, 5)\n roi_scores = roi_scores.reshape(-1)\n labels = labels.reshape(-1, 1)\n bbox_targets = bbox_targets.reshape(-1, _num_classes * 4)\n bbox_inside_weights = bbox_inside_weights.reshape(-1, _num_classes * 4)\n bbox_outside_weights = np.array(bbox_inside_weights > 0).astype(np.float32)\n\n return rois, roi_scores, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights\n\n\ndef _get_bbox_regression_labels(bbox_target_data, num_classes):\n \"\"\"Bounding-box regression targets (bbox_target_data) are stored in a\n compact form N x (class, tx, ty, tw, th)\n\n This function expands those targets into the 4-of-4*K representation used\n by the network (i.e. only one class has non-zero targets).\n\n Returns:\n bbox_target (ndarray): N x 4K blob of regression targets\n bbox_inside_weights (ndarray): N x 4K blob of loss weights\n \"\"\"\n\n clss = bbox_target_data[:, 0]\n bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)\n bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)\n inds = np.where(clss > 0)[0]\n for ind in inds:\n cls = clss[ind]\n start = int(4 * cls)\n end = start + 4\n bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]\n bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS\n return bbox_targets, bbox_inside_weights\n\n\ndef _compute_targets(ex_rois, gt_rois, labels):\n \"\"\"Compute bounding-box regression targets for an image.\"\"\"\n\n assert ex_rois.shape[0] == gt_rois.shape[0]\n assert ex_rois.shape[1] == 4\n assert gt_rois.shape[1] == 4\n\n targets = bbox_transform(ex_rois, gt_rois)\n if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:\n # Optionally normalize targets by a precomputed mean and stdev\n targets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS))\n / np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS))\n return np.hstack(\n (labels[:, np.newaxis], targets)).astype(np.float32, copy=False)\n\n\ndef _sample_rois(all_rois, all_scores, gt_boxes, fg_rois_per_image, rois_per_image, num_classes):\n \"\"\"Generate a random sample of RoIs comprising foreground and background\n examples.\n \"\"\"\n # overlaps: (rois x gt_boxes)\n overlaps = bbox_overlaps(\n np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),\n np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))\n gt_assignment = overlaps.argmax(axis=1)\n max_overlaps = overlaps.max(axis=1)\n labels = gt_boxes[gt_assignment, 4]\n\n # Select foreground RoIs as those with >= FG_THRESH overlap\n fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]\n # Guard against the case when an image has fewer than fg_rois_per_image\n # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)\n bg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &\n (max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]\n\n # Small modification to the original version where we ensure a fixed number of regions are sampled\n if fg_inds.size > 0 and bg_inds.size > 0:\n fg_rois_per_image = min(fg_rois_per_image, fg_inds.size)\n fg_inds = npr.choice(fg_inds, size=int(fg_rois_per_image), replace=False)\n bg_rois_per_image = rois_per_image - fg_rois_per_image\n to_replace = bg_inds.size < bg_rois_per_image\n bg_inds = npr.choice(bg_inds, size=int(bg_rois_per_image), replace=to_replace)\n elif fg_inds.size > 0:\n to_replace = fg_inds.size < rois_per_image\n fg_inds = npr.choice(fg_inds, size=int(rois_per_image), replace=to_replace)\n fg_rois_per_image = rois_per_image\n elif bg_inds.size > 0:\n to_replace = bg_inds.size < rois_per_image\n bg_inds = npr.choice(bg_inds, size=int(rois_per_image), replace=to_replace)\n fg_rois_per_image = 0\n else:\n import pdb\n pdb.set_trace()\n\n # The indices that we're selecting (both fg and bg)\n keep_inds = np.append(fg_inds, bg_inds)\n # Select sampled values from various arrays:\n labels = labels[keep_inds]\n # Clamp labels for the background RoIs to 0\n labels[int(fg_rois_per_image):] = 0\n rois = all_rois[keep_inds]\n roi_scores = all_scores[keep_inds]\n\n bbox_target_data = _compute_targets(\n rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)\n\n bbox_targets, bbox_inside_weights = \\\n _get_bbox_regression_labels(bbox_target_data, num_classes)\n\n return labels, rois, roi_scores, bbox_targets, bbox_inside_weights\n"
] | [
[
"numpy.vstack",
"numpy.append",
"numpy.zeros",
"numpy.ascontiguousarray",
"numpy.hstack",
"numpy.round",
"numpy.where",
"numpy.array"
]
] |
redkfa/Dual-model-CNN-keras | [
"0ee8ff4778bc213a0336a166c5c61d8ae067200a"
] | [
"CODE/pc0_224ver.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 13 05:13:27 2018\n\n@author: s207\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 10 02:43:10 2018\n\n@author: s207\n\"\"\"\nfrom keras.callbacks import TensorBoard\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nfrom keras import backend as K\nfrom keras.applications.resnet50 import ResNet50\nfrom keras.models import Model\nfrom keras.layers.merge import concatenate\nfrom keras.models import Sequential\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.models import load_model\nimport numpy as np\nfrom sklearn.metrics import classification_report, confusion_matrix\n\n# dimensions of our images.\nimg_width, img_height = 224,224\n\ntrain_data_dir = 'train/pc1'\nvalidation_data_dir = 'test/pc1'\nnb_train_samples = 622 #7331\nnb_validation_samples = 162\nepochs = 10000\nbatch_size =32\nsteps_epoch = nb_train_samples/batch_size\nvalidation_steps= nb_validation_samples/batch_size\nnum_classes = 8\n\nif K.image_data_format() == 'channels_first':\n input_shape = (3, img_width, img_height)\nelse:\n input_shape = (img_width, img_height, 3)\n \n \n\n\nColor_model = Sequential()\nColor_model.add(Conv2D(32, kernel_size=(5, 5),\n activation='relu',\n input_shape=input_shape))\nColor_model.add(Conv2D(64, (3, 3), activation='relu'))\nColor_model.add(MaxPooling2D(pool_size=(2, 2)))\nColor_model.add(Dropout(0.25))\nColor_model.add(Conv2D(64, (3, 3), activation='relu'))\nColor_model.add(MaxPooling2D(pool_size=(2, 2)))\nColor_model.add(Dropout(0.25))\nColor_model.add(Conv2D(128, (3, 3), activation='relu'))\nColor_model.add(MaxPooling2D(pool_size=(2, 2)))\nColor_model.add(Dropout(0.25))\n\n\nColor_model.add(Conv2D(128, (3, 3), activation='relu'))\nColor_model.add(MaxPooling2D(pool_size=(2, 2)))\nColor_model.add(Dropout(0.25))\nColor_model.add(Conv2D(64, (3, 3), activation='relu'))\nColor_model.add(MaxPooling2D(pool_size=(2, 2)))\nColor_model.add(Dropout(0.25))\nColor_model.add(Conv2D(32, (3, 3), activation='relu'))\nColor_model.add(MaxPooling2D(pool_size=(2, 2)))\nColor_model.add(Dropout(0.25))\n\nColor_model.add(Flatten())\nColor_model.add(Dense(128, activation='relu'))\nColor_model.add(Dropout(0.5))\nColor_model.add(Dense(num_classes, activation='softmax'))\n\n\ninp = Color_model.input\nout = Color_model.output\n\n\nDepth_model = Sequential()\nDepth_model.add(Conv2D(32, kernel_size=(5, 5),\n activation='relu',\n input_shape=input_shape))\nDepth_model.add(Conv2D(64, (3, 3), activation='relu'))\nDepth_model.add(MaxPooling2D(pool_size=(2, 2)))\nDepth_model.add(Dropout(0.25))\nDepth_model.add(Conv2D(64, (3, 3), activation='relu'))\nDepth_model.add(MaxPooling2D(pool_size=(2, 2)))\nDepth_model.add(Dropout(0.25))\nDepth_model.add(Conv2D(128, (3, 3), activation='relu'))\nDepth_model.add(MaxPooling2D(pool_size=(2, 2)))\nDepth_model.add(Dropout(0.25))\n\n\nDepth_model.add(Conv2D(128, (3, 3), activation='relu'))\nDepth_model.add(MaxPooling2D(pool_size=(2, 2)))\nDepth_model.add(Dropout(0.25))\nDepth_model.add(Conv2D(64, (3, 3), activation='relu'))\nDepth_model.add(MaxPooling2D(pool_size=(2, 2)))\nDepth_model.add(Dropout(0.25))\nDepth_model.add(Conv2D(32, (3, 3), activation='relu'))\nDepth_model.add(MaxPooling2D(pool_size=(2, 2)))\nDepth_model.add(Dropout(0.25))\n\n\nDepth_model.add(Flatten())\nDepth_model.add(Dense(1024, activation='relu'))\nDepth_model.add(Dropout(0.5))\n\nDepth_model.add(Dense(128, activation='relu'))\nDepth_model.add(Dropout(0.5))\nDepth_model.add(Dense(num_classes, activation='softmax'))\n\n\n\nfor layer in Depth_model.layers:\n layer.name = layer.name + str(\"two\")\ninp2 = Depth_model.input\nout2 = Depth_model.output\n\nconcatenated = concatenate([out,out2], axis=-1)\nallout = Dense(num_classes, activation = 'softmax',name = 'theout')(concatenated)\n\n\nself_model = Model([inp, inp2], allout)\n\n'''\nself_model=load_model('pc0_classification_model_224.h5')\n'''\ncolor_train_datagen = ImageDataGenerator( rescale=1. / 255 ,shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True\n)\n\n# this is the augmentation configuration we will use for testing:\n# only rescaling\ncolor_test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n#compile the model\nself_model.compile(optimizer='SGD', loss='categorical_crossentropy', metrics=['accuracy'])\n\ntensorboard = TensorBoard(log_dir='./logs', histogram_freq=0,write_graph=True, write_images=False)\ntensorboard.set_model(self_model)\n\ndef generate_generator_multiple(generator, dir1, dir2, batch_size, img_height, img_width):\n genX1 = generator.flow_from_directory(dir1,\n target_size=(img_height, img_width),\n \n class_mode='categorical',\n batch_size=batch_size,\n shuffle=False,\n seed=1)\n\n genX2 = generator.flow_from_directory(dir2,\n target_size=(img_height, img_width),\n \n class_mode='categorical',\n batch_size=batch_size,\n shuffle=False,\n seed=1)\n while True:\n X1i = genX1.next()\n X2i = genX2.next()\n yield [X1i[0], X2i[0]], X2i[1] # Yield both images and their mutual label\n\n\n\ninputgenerator = generate_generator_multiple(generator=color_train_datagen,\n dir1='train/pc1',\n dir2='train/pc2',\n batch_size=batch_size,\n # classes=['block', 'cup', 'glasses', 'key', 'pill', 'smartphone', 'usb', 'wallet'],\n img_height=img_height,\n img_width=img_height)\n\nval_generator = generate_generator_multiple(color_test_datagen,\n dir1='test/pc1',\n dir2='test/pc2',\n batch_size=batch_size,\n # classes=['block', 'cup', 'glasses', 'key', 'pill', 'smartphone', 'usb', 'wallet'],\n img_height=img_height,\n img_width=img_height)\n\ntestgenerator = color_test_datagen.flow_from_directory('test/pc1',\n target_size=(img_height, img_width), \n class_mode='categorical',\n batch_size=batch_size,\n shuffle=False,\n seed=1)\n \ntestgenerator2 = color_test_datagen.flow_from_directory('test/pc2',\n target_size=(img_height, img_width), \n class_mode='categorical',\n batch_size=batch_size,\n shuffle=False,\n seed=1) \ncheckpoint = ModelCheckpoint('model_pc0_best_224.h5', verbose=1, monitor='val_loss',save_best_only=True, mode='auto') \n\nhistory = self_model.fit_generator(inputgenerator,\n steps_per_epoch=steps_epoch,\n epochs=epochs,\n validation_data=val_generator,\n validation_steps=validation_steps,\n use_multiprocessing=True,\n shuffle=False,verbose=2,\n callbacks=[checkpoint,tensorboard])\nself_model.summary() \n#Confution Matrix and Classification Report\nY_pred = self_model.predict_generator(val_generator, nb_validation_samples/batch_size)\ny_pred = np.argmax(Y_pred, axis=1)\nprint('Confusion Matrix')\nprint(confusion_matrix(testgenerator.classes, y_pred))\nprint('Classification Report')\ntarget_names = ['block', 'cup', 'glasses', 'key', 'pill', 'smartphone', 'usb', 'wallet']\nprint(classification_report(testgenerator.classes, y_pred, target_names=target_names))\n\n\n \n#self_model.save_weights('pc0_classification.h5')\nself_model.save('pc0_classification_model_224.h5')\n"
] | [
[
"sklearn.metrics.confusion_matrix",
"numpy.argmax",
"sklearn.metrics.classification_report"
]
] |
tanaylab/metacells | [
"ecd957b306bd6af2fcfd56efb246ce15b0d8238a"
] | [
"metacells/pipeline/consistency.py"
] | [
"'''\nSplit\n-----\n'''\n\nfrom re import Pattern\nfrom typing import Collection, Optional, Tuple, Union\n\nimport numpy as np\nfrom anndata import AnnData\n\nimport metacells.parameters as pr\nimport metacells.tools as tl\nimport metacells.utilities as ut\n\nfrom .direct import compute_direct_metacells\n\n__all__ = [\n 'split_groups',\n 'compute_groups_self_consistency',\n]\n\n\[email protected]()\[email protected]_call()\[email protected]_doc()\ndef split_groups(\n adata: AnnData,\n what: Union[str, ut.Matrix] = '__x__',\n *,\n group: str = 'metacell',\n feature_downsample_min_samples: int = pr.feature_downsample_min_samples,\n feature_downsample_min_cell_quantile: float = pr.feature_downsample_min_cell_quantile,\n feature_downsample_max_cell_quantile: float = pr.feature_downsample_max_cell_quantile,\n feature_min_gene_total: Optional[int] = None,\n feature_min_gene_top3: Optional[int] = None,\n feature_min_gene_relative_variance: Optional[float] = pr.feature_min_gene_relative_variance,\n forbidden_gene_names: Optional[Collection[str]] = None,\n forbidden_gene_patterns: Optional[Collection[Union[str, Pattern]]] = None,\n cells_similarity_value_normalization: float = pr.cells_similarity_value_normalization,\n cells_similarity_log_data: bool = pr.cells_similarity_log_data,\n cells_similarity_method: str = pr.cells_similarity_method,\n max_cell_size: Optional[float] = pr.max_cell_size,\n max_cell_size_factor: Optional[float] = pr.max_cell_size_factor,\n knn_balanced_ranks_factor: float = pr.knn_balanced_ranks_factor,\n knn_incoming_degree_factor: float = pr.knn_incoming_degree_factor,\n knn_outgoing_degree_factor: float = pr.knn_outgoing_degree_factor,\n min_seed_size_quantile: float = pr.min_seed_size_quantile,\n max_seed_size_quantile: float = pr.max_seed_size_quantile,\n candidates_cooldown_pass: float = pr.cooldown_pass,\n candidates_cooldown_node: float = pr.cooldown_node,\n random_seed: int = pr.random_seed,\n) -> None:\n '''\n Split each metacell into two parts using ``what`` (default: {what}) data.\n\n This creates a new partition of cells into half-metacells, which can used to\n :py:func:`compute_groups_self_consistency`.\n\n **Input**\n\n The input annotated ``adata`` is expected to contain a per-observation annotation named\n ``group`` (default: {group}) which identifies the group (metacells) each observation (cell)\n belongs to.\n\n **Returns**\n\n Sets the following annotations in ``adata``:\n\n Observation (Cell) Annotations\n ``half_<group>``\n The index of the half-group each cell belongs to. This is ``-1`` for ungrouped cells.\n Indices 0 to the number of groups are the first (low) halves; from the number of groups\n to twice that are the second (low) halves.\n\n **Computation Parameters**\n\n 1. For each group (metacell), invoke\n :py:func:`metacells.pipeline.direct.compute_direct_metacells` on the observations (cells)\n included in the group, forcing the creation of two half-groups that cover all the group's\n cells. The parameters are passed to this call as-is, setting ``must_complete_cover`` to\n ``True`` (that is, disabling outliers detection), and disabling restrictions on the\n half-group sizes.\n '''\n group_of_cells = ut.get_o_numpy(adata, group)\n groups_count = np.max(group_of_cells) + 1\n half_groups_of_cells = np.full(adata.n_obs, -1, dtype='int32')\n\n @ut.timed_call('split_group')\n def split_group(group_index: int) -> Tuple[ut.NumpyVector, ut.NumpyVector]:\n group_cells_mask = group_of_cells == group_index\n assert np.any(group_cells_mask)\n name = '.%s-%s/%s' % (group, group_index, groups_count)\n gdata = ut.slice(adata, name=name, top_level=False,\n obs=group_cells_mask,\n track_obs='complete_cell_index')\n target_metacell_size = (gdata.n_obs + 1) // 2\n compute_direct_metacells(gdata, what,\n feature_downsample_min_samples=feature_downsample_min_samples,\n feature_downsample_min_cell_quantile=feature_downsample_min_cell_quantile,\n feature_downsample_max_cell_quantile=feature_downsample_max_cell_quantile,\n feature_min_gene_total=feature_min_gene_total,\n feature_min_gene_top3=feature_min_gene_top3,\n feature_min_gene_relative_variance=feature_min_gene_relative_variance,\n forbidden_gene_names=forbidden_gene_names,\n forbidden_gene_patterns=forbidden_gene_patterns,\n cells_similarity_value_normalization=cells_similarity_value_normalization,\n cells_similarity_log_data=cells_similarity_log_data,\n cells_similarity_method=cells_similarity_method,\n target_metacell_size=target_metacell_size,\n max_cell_size=max_cell_size,\n max_cell_size_factor=max_cell_size_factor,\n cell_sizes=None,\n knn_k=target_metacell_size,\n min_knn_k=target_metacell_size,\n knn_balanced_ranks_factor=knn_balanced_ranks_factor,\n knn_incoming_degree_factor=knn_incoming_degree_factor,\n knn_outgoing_degree_factor=knn_outgoing_degree_factor,\n min_seed_size_quantile=min_seed_size_quantile,\n max_seed_size_quantile=max_seed_size_quantile,\n candidates_cooldown_pass=candidates_cooldown_pass,\n candidates_cooldown_node=candidates_cooldown_node,\n candidates_min_split_size_factor=None,\n candidates_max_merge_size_factor=None,\n candidates_min_metacell_cells=1,\n must_complete_cover=True,\n random_seed=random_seed)\n direct_groups = ut.get_o_numpy(gdata, 'metacell')\n zero_count = np.sum(direct_groups == 0)\n one_count = np.sum(direct_groups == 1)\n ut.log_calc(f'group: {group_index} size: {len(direct_groups)} '\n f'split into: {zero_count} + {one_count}')\n assert zero_count + one_count == len(direct_groups)\n assert zero_count > 0\n assert one_count > 0\n return (group_cells_mask, group_index + groups_count * direct_groups)\n\n for (group_cells_mask, group_cells_halves) \\\n in ut.parallel_map(split_group, groups_count):\n half_groups_of_cells[group_cells_mask] = group_cells_halves\n\n ut.set_o_data(adata, f'half_{group}', half_groups_of_cells,\n formatter=ut.groups_description)\n\n\[email protected]()\[email protected]_call()\[email protected]_doc()\ndef compute_groups_self_consistency(\n adata: AnnData,\n what: Union[str, ut.Matrix] = '__x__',\n *,\n group: str = 'metacell',\n genes_mask: Optional[ut.NumpyVector] = None,\n self_similarity_log_data: bool = pr.self_similarity_log_data,\n self_similarity_value_normalization: float = pr.self_similarity_value_normalization,\n self_similarity_method: str = pr.self_similarity_method,\n reproducible: bool = pr.reproducible,\n logistics_location: float = pr.logistics_location,\n logistics_slope: float = pr.logistics_slope,\n) -> ut.NumpyVector:\n '''\n Compute the self consistency (similarity between two halves) of some groups.\n\n **Input**\n\n The input annotated ``adata`` is expected to contain a per-observation annotation named\n ``group`` (default: {group}) which identifies the group (metacells) each observation (cell)\n belongs to, and ``half_<group>`` which identifies the half-group each observation belongs\n to (e.g. as computed by :py:func:`split_groups`). Specifically, the indices of the halves\n of group index ``i`` are ``i`` and ``i + groups_count``.\n\n **Returns**\n\n A Numpy vector holding, for each group, the similarity between its two halves.\n\n **Computation Parameters**\n\n 1. For each group, compute the sum of values in each half and normalize it to fractions (sum of 1).\n\n 2. If ``genes_mask`` is specified, select only the genes specified in it. Note the sum of the\n fractions of the genes of each group in the result will be less than or equal to 1.\n\n 3. If ``self_similarity_log_data`` (default: {self_similarity_log_data}), log2 the values using\n ``self_similarity_value_normalization`` (default: {self_similarity_value_normalization}).\n\n 4. Compute the ``self_similarity_method`` (default: {self_similarity_method}) between the two\n halves. If this is the ``logistics`` similarity, then this will use ``logistics_location``\n (default: {logistics_location}) and ``logistics_slope`` (default: {logistics_slope}). If this\n is ``pearson``, and if ``reproducible`` (default: {reproducible}) is ``True``, a slower\n (still parallel) but reproducible algorithm will be used to compute Pearson correlations.\n '''\n hdata = \\\n tl.group_obs_data(adata, what, groups=f'half_{group}', name='.halves')\n assert hdata is not None\n\n sum_of_halves = ut.get_o_numpy(hdata, f'{what}|sum')\n halves_values = \\\n ut.to_numpy_matrix(ut.get_vo_proper(hdata, what, layout='row_major'))\n halves_data = \\\n ut.mustbe_numpy_matrix(ut.scale_by(halves_values, sum_of_halves,\n by='row'))\n\n if self_similarity_value_normalization > 0:\n halves_data += self_similarity_value_normalization\n\n if self_similarity_log_data:\n halves_data = ut.log_data(halves_data, base=2)\n\n if genes_mask is not None:\n halves_data = halves_data[:, genes_mask]\n\n assert hdata.n_obs % 2 == 0\n groups_count = hdata.n_obs // 2\n low_half_indices = np.arange(groups_count)\n high_half_indices = low_half_indices + groups_count\n\n low_half_data = halves_data[low_half_indices, :]\n high_half_data = halves_data[high_half_indices, :]\n\n assert self_similarity_method in ('logistics', 'pearson')\n if self_similarity_method == 'logistics':\n similarity = ut.pairs_logistics_rows(low_half_data, high_half_data,\n location=logistics_location,\n slope=logistics_slope)\n similarity *= -1\n similarity += 1\n else:\n similarity = ut.pairs_corrcoef_rows(low_half_data, high_half_data,\n reproducible=reproducible)\n\n return similarity\n"
] | [
[
"numpy.sum",
"numpy.any",
"numpy.arange",
"numpy.max",
"numpy.full"
]
] |
gcode-ai/rayml | [
"92c4f3c6041f465fee27a6c03bd7959c4ef21124"
] | [
"rayml/tests/component_tests/test_exponential_smoothing_regressor.py"
] | [
"from unittest.mock import patch\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom rayml.model_family import ModelFamily\nfrom rayml.pipelines.components import ExponentialSmoothingRegressor\nfrom rayml.problem_types import ProblemTypes\n\npytestmark = [\n pytest.mark.noncore_dependency,\n pytest.mark.skip_during_conda,\n pytest.mark.skip_if_39,\n]\n\n\ndef test_model_family():\n assert (\n ExponentialSmoothingRegressor().model_family\n == ModelFamily.EXPONENTIAL_SMOOTHING\n )\n\n\ndef test_problem_types():\n assert set(ExponentialSmoothingRegressor.supported_problem_types) == {\n ProblemTypes.TIME_SERIES_REGRESSION\n }\n\n\ndef test_model_instance(ts_data):\n X, y = ts_data\n regressor = ExponentialSmoothingRegressor()\n fitted = regressor.fit(X, y)\n assert isinstance(fitted, ExponentialSmoothingRegressor)\n\n\ndef test_fit_ts_without_y(ts_data):\n X, y = ts_data\n\n regressor = ExponentialSmoothingRegressor()\n with pytest.raises(\n ValueError, match=\"Exponential Smoothing Regressor requires y as input.\"\n ):\n regressor.fit(X=X)\n\n\[email protected](\"train_features_index_dt\", [True, False])\[email protected](\"train_target_index_dt\", [True, False])\[email protected](\n \"train_none, no_features, datetime_feature\",\n [\n (True, False, False),\n (False, True, False),\n (False, False, True),\n (False, False, False),\n ],\n)\n@patch(\"sktime.forecasting.exp_smoothing.ExponentialSmoothing.fit\")\ndef test_remove_datetime(\n mock_fit,\n train_features_index_dt,\n train_target_index_dt,\n train_none,\n datetime_feature,\n no_features,\n get_ts_X_y,\n):\n X_train, _, y_train = get_ts_X_y(\n train_features_index_dt,\n train_target_index_dt,\n train_none,\n datetime_feature,\n no_features,\n test_features_index_dt=False,\n )\n\n if not train_none:\n if train_features_index_dt:\n assert isinstance(X_train.index, pd.DatetimeIndex)\n else:\n assert not isinstance(X_train.index, pd.DatetimeIndex)\n if datetime_feature:\n assert X_train.select_dtypes(include=[\"datetime64\"]).shape[1] == 1\n if train_target_index_dt:\n assert isinstance(y_train.index, pd.DatetimeIndex)\n else:\n assert not isinstance(y_train.index, pd.DatetimeIndex)\n\n regressor = ExponentialSmoothingRegressor()\n regressor.fit(X_train, y_train)\n\n y_train_removed = mock_fit.call_args.kwargs.get(\"y\", None)\n if y_train_removed is not None:\n assert not isinstance(y_train_removed.index, pd.DatetimeIndex)\n\n\ndef test_set_forecast(get_ts_X_y):\n from sktime.forecasting.base import ForecastingHorizon\n\n _, X_test, _ = get_ts_X_y(\n train_features_index_dt=False,\n train_target_index_dt=False,\n train_none=False,\n datetime_feature=False,\n no_features=False,\n test_features_index_dt=False,\n )\n\n regressor = ExponentialSmoothingRegressor()\n fh_ = regressor._set_forecast(X_test)\n assert isinstance(fh_, ForecastingHorizon)\n assert len(fh_) == len(X_test)\n assert fh_.is_relative\n\n\ndef test_feature_importance(ts_data):\n X, y = ts_data\n regressor = ExponentialSmoothingRegressor()\n with patch.object(regressor, \"_component_obj\"):\n regressor.fit(X, y)\n pd.testing.assert_series_equal(\n regressor.feature_importance, pd.Series(np.zeros(1))\n )\n\n\[email protected](\n \"train_none, train_features_index_dt, \"\n \"train_target_index_dt, no_features, \"\n \"datetime_feature, test_features_index_dt\",\n [\n (True, False, False, False, False, False),\n (False, True, True, False, False, True),\n (False, True, True, False, False, False),\n (True, False, True, True, True, False),\n ],\n)\ndef test_fit_predict(\n train_features_index_dt,\n train_target_index_dt,\n train_none,\n no_features,\n datetime_feature,\n test_features_index_dt,\n get_ts_X_y,\n):\n from sktime.forecasting.base import ForecastingHorizon\n from sktime.forecasting.exp_smoothing import ExponentialSmoothing\n\n X_train, X_test, y_train = get_ts_X_y(\n train_features_index_dt,\n train_target_index_dt,\n train_none,\n datetime_feature,\n no_features,\n test_features_index_dt,\n )\n\n fh_ = ForecastingHorizon([i + 1 for i in range(len(X_test))], is_relative=True)\n\n sk_clf = ExponentialSmoothing()\n regressor = sk_clf.fit(X=X_train, y=y_train)\n y_pred_sk = regressor.predict(fh=fh_, X=X_test)\n\n m_clf = ExponentialSmoothingRegressor()\n m_clf.fit(X=X_train, y=y_train)\n y_pred = m_clf.predict(X=X_test)\n\n assert (y_pred_sk.values == y_pred.values).all()\n assert y_pred.index.equals(X_test.index)\n\n\ndef test_predict_no_X_in_fit(\n get_ts_X_y,\n):\n from sktime.forecasting.base import ForecastingHorizon\n from sktime.forecasting.exp_smoothing import ExponentialSmoothing\n\n X_train, X_test, y_train = get_ts_X_y(\n train_features_index_dt=False,\n train_target_index_dt=True,\n train_none=True,\n datetime_feature=False,\n no_features=True,\n test_features_index_dt=False,\n )\n\n fh_ = ForecastingHorizon([i + 1 for i in range(len(X_test))], is_relative=True)\n\n sk_clf = ExponentialSmoothing()\n regressor = sk_clf.fit(X=X_train, y=y_train)\n y_pred_sk = regressor.predict(fh=fh_)\n\n m_clf = ExponentialSmoothingRegressor()\n m_clf.fit(X=None, y=y_train)\n y_pred = m_clf.predict(X=X_test)\n\n assert (y_pred_sk.values == y_pred.values).all()\n"
] | [
[
"numpy.zeros"
]
] |
hackingmaterials/duramat | [
"8e7c0efecc1af89cdc27a3ed569b8c5f1d985888"
] | [
"clearsky_detection/cities_mapper.py"
] | [
"\n\nimport pandas as pd\nimport os\n\ngeological_info = pd.read_json('./cities.json')\ngeological_info = geological_info.drop(['growth_from_2000_to_2013', 'population'], axis=1)\ngeological_info['city'] = geological_info['city'].apply(lambda x: x.replace(' ', ''))\ngeological_info['state'] = geological_info['state'].apply(lambda x: x.replace(' ', ''))\n\nprint(geological_info.head())\n\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# map = Basemap(projection='merc', lat_0 = 57, lon_0 = -135,\n# resolution = 'h', area_thresh = 0.1,\n# llcrnrlon=-136.25, llcrnrlat=56.0,\n# urcrnrlon=-134.25, urcrnrlat=57.75)\n\nmap = Basemap(llcrnrlon=-119, llcrnrlat=22, urcrnrlon=-64,\n urcrnrlat=49, projection='lcc', lat_1=33, lat_2=45,\n lon_0=-95, resolution='i', area_thresh=10000)\n\nmap.drawcoastlines()\nmap.drawcountries()\nmap.drawstates()\n# map.fillcontinents(color = 'coral')\nmap.drawmapboundary()\n\nfor lon, lat in zip(geological_info['longitude'], geological_info['latitude']):\n x, y = map(lon, lat)\n map.scatter(x, y, marker='x')\n # map.scatter(x, y, s=20)\n\nmap.readshapefile(os.path.expanduser('~/Downloads/cb_2016_us_nation_5m/cb_2016_us_nation_5m'), 'us_borders', drawbounds=True)\n\nprint(map.us_borders)\n# map.plot(geological_info['longitude'], geological_info['latitude'])\n\nplt.show()\n"
] | [
[
"matplotlib.pyplot.show",
"pandas.read_json"
]
] |
arlain23/pydensecrf | [
"dee24b055d92dbbc906d50b282e0862d83c0cf80"
] | [
"tests/issue29.py"
] | [
"# probs of shape 3d image per class: Nb_classes x Height x Width x Depth\n# assume the image has shape (69, 51, 72)\nimport numpy as np\nimport pydensecrf.densecrf as dcrf\nfrom pydensecrf.utils import unary_from_softmax, create_pairwise_gaussian\n\n###\n\n#shape = (69, 51, 72)\n#probs = np.random.randn(5, 69, 51).astype(np.float32)\n#probs /= probs.sum(axis=0, keepdims=True)\n#\n#d = dcrf.DenseCRF(np.prod(shape), probs.shape[0])\n#U = unary_from_softmax(probs)\n#print(U.shape)\n#d.setUnaryEnergy(U)\n#feats = create_pairwise_gaussian(sdims=(1.0, 1.0, 1.0), shape=shape)\n#d.addPairwiseEnergy(feats, compat=3, kernel=dcrf.FULL_KERNEL, normalization=dcrf.NORMALIZE_SYMMETRIC)\n#Q = d.inference(5)\n#new_image = np.argmax(Q, axis=0).reshape((shape[0], shape[1],shape[2]))\n\n\n###\n\nSHAPE, NLABELS = (69, 51, 72), 5\nprobs = np.random.randn(NLABELS, 68, 50).astype(np.float32) # WRONG shape here\nprobs /= probs.sum(axis=0, keepdims=True)\n\nd = dcrf.DenseCRF(np.prod(SHAPE), NLABELS)\n\nd.setUnaryEnergy(unary_from_softmax(probs)) # THIS SHOULD THROW and not crash later\nfeats = create_pairwise_gaussian(sdims=(1.0, 1.0, 1.0), shape=SHAPE)\nd.addPairwiseEnergy(feats, compat=3, kernel=dcrf.FULL_KERNEL, normalization=dcrf.NORMALIZE_SYMMETRIC)\n\nQ = d.inference(5)\nnew_image = np.argmax(Q, axis=0).reshape(SHAPE)\n"
] | [
[
"numpy.random.randn",
"numpy.argmax",
"numpy.prod"
]
] |
oxoxlol/api_key_detector | [
"e0ca322cda65e619479109fbcb8a7dda9483a0e9"
] | [
"sequentiality.py"
] | [
"import math\nimport sys\n\nimport matplotlib\n\nmatplotlib.use('Agg') # Avoid tkinter dependency\nimport matplotlib.pyplot as plt\n\nfrom . import charset as cset\n\n\ndef string_sequentiality(string, charset, plot_scatterplot=False):\n \"\"\"\n Computes how much a string contains sequence of consecutive or distance-fixed characters\n\n :param string: the string\n :param charset: a charset as a string\n :param plot_scatterplot: optional boolean, if true plots a scatterplot\n\n :return: sequentiality index, 0 (low sequentiality) to 1 (high sequentiality)\n :rtype: float\n \"\"\"\n if len(string) <= 2:\n return 0\n string_length = len(string)\n window_size = math.floor(math.log(string_length))\n window_size = int(window_size)\n counter = 0\n buckets = {}\n for j in range(1, len(string)):\n for i in range(max(j - window_size, 0), j):\n diff = math.fabs((ord(string[j]) - ord(string[i])))\n buckets[diff] = buckets.get(diff, 0) + 1\n counter += 1\n\n # normalize histogram\n for key in buckets.keys():\n buckets[key] = buckets[key] / counter\n\n # Calculate MSE\n charset_buckets = cset.get_char_distance_distribution(charset)\n mse = 0\n for key in charset_buckets.keys():\n diff = buckets.get(key, 0) - charset_buckets.get(key, 0)\n square_diff = diff ** 2\n mse += square_diff / len(charset_buckets.keys())\n\n if plot_scatterplot:\n # Plot the scatterplot\n subplot = plt.subplot(111)\n subplot.set_xlabel(\"Average distance from other characters\")\n subplot.set_ylabel(\"% of chars at distance x from the others\")\n s1 = s2 = None\n for v in charset_buckets.items():\n x = v[0]\n y = v[1]\n s1 = plt.scatter(x, y * 100, alpha=0.6, color='r', label='charset')\n\n for v in buckets.items():\n x = v[0]\n y = v[1]\n s2 = plt.scatter(x, y * 100, alpha=0.6, color='g', label='string')\n\n plt.legend(handles=[s1, s2])\n plt.show()\n\n return mse\n\n\ndef weighted_sequentiality(string, charset):\n \"\"\"\n Returns the string sequentiality weighted by the string length. I.e.\n ABC is less meaningful than ABCDEFGHIJKLMNO\n\n :param string:\n :param charset:\n :return:\n \"\"\"\n return string_sequentiality(string, charset) * len(string)\n\n\ndef multiple_string_sequentiality(item_charset_dict):\n \"\"\"\n Calculates the sequentiality for a list of strings\n\n :param item_charset_dict: list of string:charset\n :return: string:sequentiality dictionary\n :rtype: dict\n \"\"\"\n items = {}\n for item in item_charset_dict:\n items[item] = (string_sequentiality(item[0], item[1]))\n return items\n\n\ndef main(argv):\n if len(argv) != 2:\n print(\"Usage: python {0} string_to_be_computed\".format(argv[0]))\n return\n else:\n print(\n \"Sequentiality index: {0}\".format(string_sequentiality(argv[1], cset.get_narrower_charset(argv[1]), True)))\n\n\nif __name__ == '__main__':\n main(sys.argv)\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.use",
"matplotlib.pyplot.scatter"
]
] |
AK391/RealBasicVSR | [
"41a27d90447b324296e932da7b08b7a42aa7a613"
] | [
"generate_video_demo.py"
] | [
"import glob\n\nimport cv2\nimport mmcv\nimport numpy as np\n\n\nclass VideoDemo:\n ''' Generate video demo given two sets of images.\n\n Please note that there will be video compression when you save the output\n as a video. Therefore, the result would be inferior to the actual outputs.\n\n Args:\n input_left_dir (str): The directory storing the images at the left.\n input_right_dir (str): The directory storing the images at the right.\n output_path (str): The path of the output video file.\n start_frame (int): The first frame to start sliding.\n pause_frame (int): The frame where a pause is raised.\n repeat_when_pause (int): The number of frames to be repeated when\n paused.\n slide_step (int): The step size of each slide. It controls the speed of\n the slide.\n line_width (int): The width of the line separating two images.\n frame_rate (int): The frame rate of the output video.\n\n '''\n\n def __init__(self, input_left_dir, input_right_dir, output_path,\n start_frame, pause_frame, repeat_when_pause, slide_step,\n line_width, frame_rate):\n\n self.paths_left = sorted(glob.glob(f'{input_left_dir}/*'))\n self.paths_right = sorted(glob.glob(f'{input_right_dir}/*'))\n\n self.output_path = output_path\n self.start_frame = start_frame\n self.pause_frame = pause_frame\n self.repeat_when_pause = repeat_when_pause\n self.slide_step = slide_step\n self.line_width = line_width\n self.frame_rate = frame_rate\n\n # initialize video writer\n self.video_writer = None\n\n def merge_images(self, img_left, img_right, x_coord):\n img_out = np.copy(img_left)\n img_out[:, x_coord:, :] = img_right[:, x_coord:, :]\n\n # add white line\n img_out[:, x_coord:x_coord + self.line_width, :] *= 0\n img_out[:, x_coord:x_coord + self.line_width, :] += 255\n\n return img_out\n\n def __call__(self):\n for i, (path_left, path_right) in enumerate(\n zip(self.paths_left, self.paths_right)):\n\n # start sliding\n if i >= self.start_frame:\n img_left = mmcv.imread(path_left, backend='cv2')\n img_right = mmcv.imread(path_right, backend='cv2')\n # img_right = mmcv.imrescale(\n # img_right, 4, interpolation='nearest', backend='cv2')\n current_idx = self.slide_step * (i - self.start_frame)\n img_out = self.merge_images(img_left, img_right, current_idx)\n\n else:\n img_out = mmcv.imread(path_left, backend='cv2')\n\n # create video writer if haven't\n if self.video_writer is None:\n h, w = img_out.shape[:2]\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n self.video_writer = cv2.VideoWriter(self.output_path, fourcc,\n self.frame_rate, (w, h))\n\n self.video_writer.write(img_out.astype(np.uint8))\n\n # pause at somewhere\n if i == self.pause_frame:\n for _ in range(0, self.repeat_when_pause):\n self.video_writer.write(img_out.astype(np.uint8))\n\n # pause before sliding over the last frame\n for _ in range(0, self.repeat_when_pause):\n self.video_writer.write(img_out.astype(np.uint8))\n\n # slide over the last frame\n w = img_out.shape[1]\n current_idx = min(current_idx, w - self.line_width)\n while current_idx + self.line_width >= 0:\n img_out = self.merge_images(img_left, img_right, current_idx)\n self.video_writer.write(img_out.astype(np.uint8))\n\n current_idx -= self.slide_step\n\n # pause before ending the demo\n self.video_writer.write(img_right.astype(np.uint8))\n for _ in range(0, self.repeat_when_pause):\n self.video_writer.write(img_right.astype(np.uint8))\n\n cv2.destroyAllWindows()\n self.video_writer.release()\n\n\nif __name__ == '__main__':\n \"\"\"\n Assuming you have used our demo code to generate output images in\n results/demo_000. You can then use the following code to generate a video\n demo.\n \"\"\"\n\n video_demo = VideoDemo(\n input_left_dir='results/demo_000',\n input_right_dir='data/demo_000',\n output_path='demo_video.mp4',\n start_frame=5,\n pause_frame=15,\n repeat_when_pause=25,\n slide_step=100,\n line_width=10,\n frame_rate=25,\n )\n video_demo()\n"
] | [
[
"numpy.copy"
]
] |
shixingxing/tf-learn | [
"4fa2eee3a51328fcf12665495356d0ce05cc537a"
] | [
"pachong/gupiao.py"
] | [
"import requests\nimport re\nimport pandas as pd\nimport _thread\n\n\n# 用get方法访问服务器并提取页面数据\ndef getHtml(cmd, page):\n url = \"http://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?cb=jQuery112406115645482397511_1542356447436&type=CT&token=4f1862fc3b5e77c150a2b985b12db0fd&sty=FCOIATC&js=(%7Bdata%3A%5B(x)%5D%2CrecordsFiltered%3A(tot)%7D)&cmd=\" + cmd + \"&st=(ChangePercent)&sr=-1&p=\" + str(\n page) + \"&ps=20\"\n r = requests.get(url)\n pat = \"data:\\[(.*?)\\]\"\n data = re.compile(pat, re.S).findall(r.text)\n return data\n\n\n# 获取单个页面股票数据\ndef getOnePageStock(cmd, page):\n data = getHtml(cmd, page)\n datas = data[0].split('\",\"')\n stocks = []\n for i in range(len(datas)):\n stock = datas[i].replace('\"', \"\").split(\",\")\n stocks.append(stock)\n return stocks\n\n\ndef loadByCmd(cmd, i):\n page = 1\n stocks = getOnePageStock(cmd[i], page)\n print(\"Thread:\" + str(_thread.get_ident()) + i + \"已加载第\" + str(page) + \"页\")\n # 自动爬取多页,并在结束时停止\n while True:\n page += 1\n if getHtml(cmd[i], page) != getHtml(cmd[i], page - 1):\n stocks.extend(getOnePageStock(cmd[i], page))\n print(\"Thread:\" + str(_thread.get_ident()) + i + \"已加载第\" + str(page) + \"页\")\n else:\n break\n df = pd.DataFrame(stocks)\n # 提取主要数据/提取全部数据\n # df.drop([0,14,15,16,17,18,19,20,21,22,23,25],axis=1,inplace=True)\n columns = {1: \"代码\", 2: \"名称\", 3: \"最新价格\", 4: \"涨跌额\", 5: \"涨跌幅\", 6: \"成交量\", 7: \"成交额\", 8: \"振幅\", 9: \"最高\", 10: \"最低\",\n 11: \"今开\", 12: \"昨收\", 13: \"量比\", 24: \"时间\"}\n df.rename(columns=columns, inplace=True)\n df.to_excel(\"股票--\" + i + \".xls\")\n print(\"已保存\" + i + \".xls\")\n\n\ndef main():\n print(\"Main Thread:\" + str(_thread.get_ident()))\n cmd = {\n \"上证指数\": \"C.1\",\n \"深圳指数\": \"C.5\",\n \"沪深A股\": \"C._A\",\n \"上证A股\": \"C.2\",\n \"深圳A股\": \"C._SZAME\",\n \"新股\": \"C.BK05011\",\n \"中小板\": \"C.13\",\n \"创业板\": \"C.80\"\n }\n for i in cmd.keys():\n try:\n _thread.start_new_thread(loadByCmd, (cmd, i))\n except:\n print(\"error\")\n\n\nmain()\nwhile 1:\n pass\n"
] | [
[
"pandas.DataFrame"
]
] |
Nik6198/Advanced-DataStructure | [
"c7c6154b8dd5ff94825ad91020dccd8637d73fb1"
] | [
"Fibonacci Heap/dijstra_using_fib_heap.py"
] | [
"import _fib_heap\nimport random\nimport time \nimport matplotlib.pyplot as plt\nclass graph:\n\n def __init__(self,n):\n self.graph=[]\n \n for i in range(n):\n temp=[random.randint(0,1001) for i in range(n)]\n temp[i]=0\n self.graph.append(temp)\n \n \n def accept(self):\n for i in range(len(self.graph)):\n m=int(input())\n n=int(input())\n for j in range(n):\n dest=int(input())\n cost=int(input())\n self.graph[m-1][dest-1]=cost \n \n def dij_array(self,source,n):\n start=time.time()\n dist=self.graph[source-1].copy()\n #print(dist)\n vis=[False for i in range(len(self.graph))]\n vis[source-1]=True\n \n \n for i in range(len(self.graph)-1):\n min=9999999\n v=None\n for i in range(len(self.graph)):\n \n if not vis[i] and min>dist[i]:\n v=i\n min=dist[i]\n \n if v is None:\n break\n vis[v]=True\n min=dist[v]\n for i in range(len(self.graph)):\n if not vis[i] and dist[i]>dist[v]+self.graph[v][i]:\n dist[i]=dist[v]+self.graph[v][i]\n #print(min,v,dist,vis)\n t=abs(start-time.time())\n plt.plot([n],[t],'bo') \n print(\"time taken for array\",t,n)\n return dist\n\n def dij_heap(self,source,n):\n \n h1=_fib_heap.fib()\n \n for i in range(len(self.graph)):\n if i is not (source-1):\n h1.insert(key=self.graph[source-1][i],vertex=i)\n dist=self.graph[source-1].copy()\n dist1=dist.copy()\n #print(dist)\n #h1.print1()\n #print(\"**\")\n #vis=[False for i in range(len(self.graph))]\n #vis[source-1]=True\n start=time.time()\n for i in range(len(self.graph)-1):\n min=h1.extract_min()\n #print(min[0],min[1])\n #if h1.min is not None:\n #print(\"***\")\n #h1.print1()\n #print(\"***\")\n list1=[]\n \n for k in h1.hash.keys():\n #print(h1.hash.keys(),\"hi\",h1.hash[k],\"bye\")\n #list1=list(h1.hash.values())\n #print(k,list1[0].key[1])\n \n if h1.hash[k].key[1] > dist[min[0]]+self.graph[min[0]][k]:\n h1.decrease_key(k,dist1[k],dist[min[0]]+self.graph[min[0]][k])\n dist[k]=dist[min[0]]+self.graph[min[0]][k]\n t=abs(start-time.time())\n plt.plot(n,[t],'ro') \n print(\"time taken is for heap\",t,n)\n \n return dist \n#h1=fib()\nn=0\nwhile n<=5000:\n n+=100\n#n=int(input())\n g=graph(n)\n#print(g.graph[0])\n#g.accept()\n\n#for i in g.graph:\n # print(i)\n \n\n g.dij_heap(1,n)\n g.dij_array(1,n)\nplt.show()\n"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
]
] |
tomMEM/nltools | [
"848112e9c3f973e1c2a4b3790682ef9e5afc5b45"
] | [
"examples/01_DataOperations/plot_adjacency.py"
] | [
"\"\"\"\nAdjacency Class\n===============\n\nNltools has an additional data structure class for working with two-dimensional\nsquare matrices. This can be helpful when working with similarity/distance\nmatrices or directed or undirected graphs. Similar to the Brain_Data class,\nmatrices are vectorized and can store multiple matrices in the same object.\nThis might reflect different brain regions, subjects, or time. Most of the\nmethods on the Adjacency class are consistent with those in the Brain_Data\nclass.\n\n\"\"\"\n\n#########################################################################\n# Load Data\n# ----------\n#\n# Similar to the Brain_Data class, Adjacency instances can be initialized by passing in a numpy array or pandas data frame, or a path to a csv file or list of files. Here we will generate some fake data to demonstrate how to use this class. In addition to data, you must indicate the type of matrix. Currently, you can specify `['similarity','distance','directed']`. Similarity matrices are symmetrical with typically ones along diagonal, Distance matrices are symmetrical with zeros along diagonal, and Directed graph matrices are not symmetrical. Symmetrical matrices only store the upper triangle. The Adjacency class can also accommodate labels, but does not require them.\n\nfrom nltools.data import Adjacency\nfrom scipy.linalg import block_diag\nimport numpy as np\n\nm1 = block_diag(np.ones((4, 4)), np.zeros((4, 4)), np.zeros((4, 4)))\nm2 = block_diag(np.zeros((4, 4)), np.ones((4, 4)), np.zeros((4, 4)))\nm3 = block_diag(np.zeros((4, 4)), np.zeros((4, 4)), np.ones((4, 4)))\nnoisy = (m1*1+m2*2+m3*3) + np.random.randn(12, 12)*.1\ndat = Adjacency(noisy, matrix_type='similarity', labels=['C1']*4 + ['C2']*4 + ['C3']*4)\n\n#########################################################################\n# Basic information about the object can be viewed by simply calling it.\n\nprint(dat)\n\n#########################################################################\n# Adjacency objects can easily be converted back into two-dimensional matrices with the `.squareform()` method.\n\ndat.squareform()\n\n#########################################################################\n# Matrices can viewed as a heatmap using the `.plot()` method.\n\ndat.plot()\n\n#########################################################################\n# The mean within a a grouping label can be calculated using the `.within_cluster_mean()` method. You must specify a group variable to group the data. Here we use the labels.\n\nprint(dat.within_cluster_mean(clusters=dat.labels))\n\n#########################################################################\n# Regression\n# ----------\n#\n# Adjacency objects can currently accommodate two different types of regression. Sometimes we might want to decompose an Adjacency matrix from a linear combination of other Adjacency matrices. Other times we might want to perform a regression at each pixel in a stack of Adjacency matrices. Here we provide an example of each method. We use the same data we generated above, but attempt to decompose it by each block of data. We create the design matrix by simply concatenating the matrices we used to create the data object. The regress method returns a dictionary containing all of the relevant information from the regression. Here we show that the model recovers the average weight in each block.\n\nX = Adjacency([m1, m2, m3], matrix_type='similarity')\nstats = dat.regress(X)\nprint(stats['beta'])\n\n#########################################################################\n# In addition to decomposing a single adjacency matrix, we can also estimate a model that predicts the variance over each voxel. This is equivalent to a univariate regression in imaging analyses. Remember that just like in imaging these tests are non-independent and may require correcting for multiple comparisons. Here we create some data that varies over matrices and identify pixels that follow a particular on-off-on pattern. We plot the t-values that exceed 2.\n\nfrom nltools.data import Design_Matrix\nimport matplotlib.pyplot as plt\n\ndata = Adjacency([m1 + np.random.randn(12,12)*.5 for x in range(5)] +\n [np.zeros((12, 12)) + np.random.randn(12, 12)*.5 for x in range(5)] +\n [m1 + np.random.randn(12, 12)*.5 for x in range(5)])\n\nX = Design_Matrix([1]*5 + [0]*5 + [1]*5)\nf = X.plot()\nf.set_title('Model', fontsize=18)\n\nstats = data.regress(X)\nt = stats['t'].plot(vmin=2)\nplt.title('Significant Pixels',fontsize=18)\n\n#########################################################################\n# Similarity/Distance\n# -------------------\n#\n# We can calculate similarity between two Adjacency matrices using `.similiarity()`.\n\nstats = dat.similarity(m1)\nprint(stats)\n\n#########################################################################\n# We can also calculate the distance between multiple matrices contained within a single Adjacency object. Any distance metric is available from the `sci-kit learn <http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.html>`_ by specifying the `method` flag. This outputs an Adjacency matrix. In the example below we see that several matrices are more similar to each other (i.e., when the signal is on). Remember that the nodes here now represent each matrix from the original distance matrix.\n\ndist = data.distance(metric='correlation')\ndist.plot()\n\n#########################################################################\n# Similarity matrices can be converted to and from Distance matrices using `.similarity_to_distance()` and `.distance_to_similarity()`.\n\ndist.distance_to_similarity().plot()\n\n#########################################################################\n# Multidimensional Scaling\n# ------------------------\n#\n# We can perform additional analyses on distance matrices such as multidimensional scaling. Here we provide an example to create a 3D multidimensional scaling plot of our data to see if the on and off matrices might naturally group together.\n\ndist = data.distance(metric='correlation')\ndist.labels = ['On']*5 + ['Off']*5 + ['On']*5\ndist.plot_mds(n_components=3)\n\n#########################################################################\n# Graphs\n# ------\n#\n# Adjacency matrices can be cast to networkx objects using `.to_graph()` if the optional dependency is installed. This allows any graph theoretic metrics or plots to be easily calculated from Adjacency objects.\n\nimport networkx as nx\n\ndat = Adjacency(m1+m2+m3, matrix_type='similarity')\ng = dat.to_graph()\n\nprint('Degree of each node: %s' % g.degree())\n\nnx.draw_circular(g)\n"
] | [
[
"matplotlib.pyplot.title",
"numpy.ones",
"numpy.random.randn",
"numpy.zeros"
]
] |
willwheelera/pyscf | [
"1de7f6fb8403bb0769a05eade2c2e7aa4f8a160e"
] | [
"pyscf/pbc/df/test/test_ft_ao.py"
] | [
"#!/usr/bin/env python\n# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy\nfrom pyscf.pbc import gto as pgto\nfrom pyscf.pbc import dft as pdft\nfrom pyscf.pbc.df import ft_ao\nfrom pyscf.pbc import tools\nfrom pyscf import lib\n\ndef setUpModule():\n global cell, cell1\n cell = pgto.Cell()\n cell.atom = '''\n He1 1.3 .2 .3\n He2 .1 .1 1.1 '''\n cell.basis = {'He1': 'sto3g', 'He2': 'ccpvdz'}\n cell.mesh = (31,)*3\n cell.a = numpy.diag([2.2, 1.9, 2.])\n cell.build()\n\n cell1 = pgto.Cell()\n cell1.atom = '''\n He 1.3 .2 .3\n He .1 .1 1.1 '''\n cell1.basis = {'He': [[0, [0.8, 1]],\n [1, [0.6, 1]]\n ]}\n cell1.mesh = [17]*3\n cell1.a = numpy.array(([2.0, .9, 0. ],\n [0.1, 1.9, 0.4],\n [0.8, 0 , 2.1]))\n cell1.build()\n\ndef tearDownModule():\n global cell, cell1\n del cell, cell1\n\nclass KnownValues(unittest.TestCase):\n def test_ft_ao(self):\n coords = pdft.gen_grid.gen_uniform_grids(cell)\n aoR = pdft.numint.eval_ao(cell, coords)\n ngrids, nao = aoR.shape\n ref = numpy.asarray([tools.fft(aoR[:,i], cell.mesh) for i in range(nao)])\n ref = ref.T * (cell.vol/ngrids)\n dat = ft_ao.ft_ao(cell, cell.Gv)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,0]-dat[:,0]) , 8.4358614794095722e-11, 9)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,1]-dat[:,1]) , 0.0041669297531642616 , 4)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:]-dat[:,2:]), 5.8677286005879366e-14, 9)\n\n coords = pdft.gen_grid.gen_uniform_grids(cell1)\n aoR = pdft.numint.eval_ao(cell1, coords)\n ngrids, nao = aoR.shape\n ref = numpy.asarray([tools.fft(aoR[:,i], cell1.mesh) for i in range(nao)])\n ref = ref.T * (cell1.vol/ngrids)\n dat = ft_ao.ft_ao(cell1, cell1.Gv)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,0]-dat[:,0]) , 0, 5)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,1]-dat[:,1]) , 0, 3)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:]-dat[:,2:]), 0, 3)\n\n def test_ft_ao_with_kpts(self):\n numpy.random.seed(1)\n kpt = numpy.random.random(3)\n coords = pdft.gen_grid.gen_uniform_grids(cell)\n aoR = pdft.numint.eval_ao(cell, coords, kpt=kpt)\n ngrids, nao = aoR.shape\n expmikr = numpy.exp(-1j*numpy.dot(coords,kpt))\n ref = numpy.asarray([tools.fftk(aoR[:,i], cell.mesh, expmikr) for i in range(nao)])\n ref = ref.T * (cell.vol/ngrids)\n dat = ft_ao.ft_ao(cell, cell.Gv, kpt=kpt)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,0]-dat[:,0]) , 1.3359899490499813e-10, 9)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,1]-dat[:,1]) , 0.0042404556036939756 , 4)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:]-dat[:,2:]), 4.8856357999633564e-14, 9)\n\n coords = pdft.gen_grid.gen_uniform_grids(cell1)\n aoR = pdft.numint.eval_ao(cell1, coords, kpt=kpt)\n ngrids, nao = aoR.shape\n expmikr = numpy.exp(-1j*numpy.dot(coords,kpt))\n ref = numpy.asarray([tools.fftk(aoR[:,i], cell1.mesh, expmikr) for i in range(nao)])\n ref = ref.T * (cell1.vol/ngrids)\n dat = ft_ao.ft_ao(cell1, cell1.Gv, kpt=kpt)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,0]-dat[:,0]) , 0, 5)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,1]-dat[:,1]) , 0, 3)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:]-dat[:,2:]), 0, 3)\n\n def test_ft_aoao(self):\n #coords = pdft.gen_grid.gen_uniform_grids(cell)\n #aoR = pdft.numint.eval_ao(cell, coords)\n #ngrids, nao = aoR.shape\n #ref = numpy.asarray([tools.fft(aoR[:,i].conj()*aoR[:,j], cell.mesh)\n # for i in range(nao) for j in range(nao)])\n #ref = ref.reshape(nao,nao,-1).transpose(2,0,1) * (cell.vol/ngrids)\n #dat = ft_ao.ft_aopair(cell, cell.Gv, aosym='s1hermi')\n #self.assertAlmostEqual(numpy.linalg.norm(ref[:,0,0]-dat[:,0,0]) , 0, 5)\n #self.assertAlmostEqual(numpy.linalg.norm(ref[:,1,1]-dat[:,1,1]) , 0.02315483195832373, 4)\n #self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:,2:]-dat[:,2:,2:]), 0, 9)\n #self.assertAlmostEqual(numpy.linalg.norm(ref[:,0,2:]-dat[:,0,2:]) , 0, 9)\n #self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:,0]-dat[:,2:,0]) , 0, 9)\n #idx = numpy.tril_indices(nao)\n #ref = dat[:,idx[0],idx[1]]\n #dat = ft_ao.ft_aopair(cell, cell.Gv, aosym='s2')\n #self.assertAlmostEqual(abs(dat-ref).sum(), 0, 9)\n\n coords = pdft.gen_grid.gen_uniform_grids(cell1)\n Gv, Gvbase, kws = cell1.get_Gv_weights(cell1.mesh)\n b = cell1.reciprocal_vectors()\n gxyz = lib.cartesian_prod([numpy.arange(len(x)) for x in Gvbase])\n dat = ft_ao.ft_aopair(cell1, cell1.Gv, aosym='s1', b=b,\n gxyz=gxyz, Gvbase=Gvbase)\n self.assertAlmostEqual(lib.fp(dat), 1.5666516306798806+1.953555017583245j, 9)\n dat = ft_ao.ft_aopair(cell1, cell1.Gv, aosym='s2', b=b,\n gxyz=gxyz, Gvbase=Gvbase)\n self.assertAlmostEqual(lib.fp(dat), -0.85276967757297917+1.0378751267506394j, 9)\n dat = ft_ao.ft_aopair(cell1, cell1.Gv, aosym='s1hermi', b=b,\n gxyz=gxyz, Gvbase=Gvbase)\n self.assertAlmostEqual(lib.fp(dat), 1.5666516306798806+1.953555017583245j, 9)\n aoR = pdft.numint.eval_ao(cell1, coords)\n ngrids, nao = aoR.shape\n aoaoR = numpy.einsum('pi,pj->ijp', aoR, aoR)\n ref = tools.fft(aoaoR.reshape(nao*nao,-1), cell1.mesh)\n ref = ref.reshape(nao,nao,-1).transpose(2,0,1) * (cell1.vol/ngrids)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,0,0]-dat[:,0,0]) , 0, 7)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,1,1]-dat[:,1,1]) , 0, 7)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:,2:]-dat[:,2:,2:]), 0, 7)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,0,2:]-dat[:,0,2:]) , 0, 7)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:,0]-dat[:,2:,0]) , 0, 7)\n idx = numpy.tril_indices(nao)\n ref = dat[:,idx[0],idx[1]]\n dat = ft_ao.ft_aopair(cell1, cell1.Gv, aosym='s2')\n self.assertAlmostEqual(abs(dat-ref).sum(), 0, 9)\n\n def test_ft_aoao_pdotp(self):\n coords = pdft.gen_grid.gen_uniform_grids(cell1)\n Gv, Gvbase, kws = cell1.get_Gv_weights(cell1.mesh)\n dat = ft_ao.ft_aopair(cell1, cell1.Gv, aosym='s1', intor='GTO_ft_pdotp_sph')\n self.assertAlmostEqual(lib.fp(dat), 5.7858606710458078-8.654809509773056j, 9)\n aoR = pdft.numint.eval_ao(cell1, coords, deriv=1)\n ngrids, nao = aoR.shape[1:]\n aoaoR = numpy.einsum('xpi,xpj->ijp', aoR[1:4], aoR[1:4])\n ref = tools.fft(aoaoR.reshape(nao*nao,-1), cell1.mesh)\n ref = ref.reshape(nao,nao,-1).transpose(2,0,1) * (cell1.vol/ngrids)\n self.assertAlmostEqual(abs(ref-dat).max(), 0, 7)\n\n def test_ft_aoao_pxp(self):\n coords = pdft.gen_grid.gen_uniform_grids(cell1)\n Gv, Gvbase, kws = cell1.get_Gv_weights(cell1.mesh)\n dat = ft_ao.ft_aopair(cell1, cell1.Gv, aosym='s1', intor='GTO_ft_pxp_sph', comp=3)\n self.assertAlmostEqual(lib.fp(dat), (6.4124798727215779-10.673712733378771j), 9)\n aoR = pdft.numint.eval_ao(cell1, coords, deriv=1)\n ngrids, nao = aoR.shape[1:]\n aox, aoy, aoz = aoR[1:]\n aoaoR =(numpy.einsum('pi,pj->ijp', aoy, aoz) - numpy.einsum('pi,pj->ijp', aoz, aoy),\n numpy.einsum('pi,pj->ijp', aoz, aox) - numpy.einsum('pi,pj->ijp', aox, aoz),\n numpy.einsum('pi,pj->ijp', aox, aoy) - numpy.einsum('pi,pj->ijp', aoy, aox))\n ref = tools.fft(numpy.array(aoaoR).reshape(3*nao*nao,-1), cell1.mesh)\n ref = ref.reshape(3,nao,nao,-1).transpose(0,3,1,2) * (cell1.vol/ngrids)\n self.assertAlmostEqual(abs(ref-dat).max(), 0, 7)\n\n def test_ft_aoao_with_kpts_high_cost(self):\n numpy.random.seed(1)\n kpti, kptj = numpy.random.random((2,3))\n dat = ft_ao.ft_aopair(cell, cell.Gv, kpti_kptj=(kpti,kptj))\n self.assertAlmostEqual(lib.fp(dat), -0.80184732435570638+2.4078835207597176j, 9)\n coords = pdft.gen_grid.gen_uniform_grids(cell)\n aoi = pdft.numint.eval_ao(cell, coords, kpt=kpti)\n aoj = pdft.numint.eval_ao(cell, coords, kpt=kptj)\n ngrids, nao = aoj.shape\n q = kptj - kpti\n expmikr = numpy.exp(-1j*numpy.dot(coords,q))\n ref = numpy.asarray([tools.fftk(aoi[:,i].conj()*aoj[:,j], cell.mesh, expmikr)\n for i in range(nao) for j in range(nao)])\n ref = ref.reshape(nao,nao,-1).transpose(2,0,1) * (cell.vol/ngrids)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,0,0]-dat[:,0,0]) , 0, 5)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,1,1]-dat[:,1,1]) , 0.023225471785938184 , 4)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:,2:]-dat[:,2:,2:]), 0, 9)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,0,2:]-dat[:,0,2:]) , 0, 9)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:,0]-dat[:,2:,0]) , 0, 9)\n\n def test_ft_aoao_pair_vs_fft(self):\n numpy.random.seed(1)\n kpti, kptj = numpy.random.random((2,3))\n coords = pdft.gen_grid.gen_uniform_grids(cell1)\n aoi = pdft.numint.eval_ao(cell1, coords, kpt=kpti)\n aoj = pdft.numint.eval_ao(cell1, coords, kpt=kptj)\n ngrids, nao = aoj.shape\n q = kptj - kpti\n dat = ft_ao.ft_aopair(cell1, cell1.Gv, kpti_kptj=(kpti,kptj), q=q)\n self.assertAlmostEqual(lib.fp(dat), 0.72664436503332241+3.2542145296611373j, 9)\n expmikr = numpy.exp(-1j*numpy.dot(coords,q))\n ref = numpy.asarray([tools.fftk(aoi[:,i].conj()*aoj[:,j], cell1.mesh, expmikr)\n for i in range(nao) for j in range(nao)])\n ref = ref.reshape(nao,nao,-1).transpose(2,0,1) * (cell1.vol/ngrids)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,0,0]-dat[:,0,0]) , 0, 7)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,1,1]-dat[:,1,1]) , 0, 7)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:,2:]-dat[:,2:,2:]), 0, 7)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,0,2:]-dat[:,0,2:]) , 0, 7)\n self.assertAlmostEqual(numpy.linalg.norm(ref[:,2:,0]-dat[:,2:,0]) , 0, 7)\n\n def test_ft_aoao_with_kpts1(self):\n numpy.random.seed(1)\n kpti, kptj = kpts = numpy.random.random((2,3))\n Gv = cell.get_Gv([11]*3)\n q = numpy.random.random(3)\n dat = ft_ao.ft_aopair_kpts(cell, Gv, q=q, kptjs=kpts)\n self.assertAlmostEqual(lib.fp(dat[0]), (2.3753953914129382-2.5365192689115088j), 9)\n self.assertAlmostEqual(lib.fp(dat[1]), (2.4951510097641840-3.1990956672116355j), 9)\n dat = ft_ao.ft_aopair(cell, Gv)\n self.assertAlmostEqual(lib.fp(dat), (1.2534723618134684+1.830086071817564j), 9)\n\n def test_ft_aoao1(self):\n cell = pgto.Cell()\n cell.a = numpy.eye(3) * 5\n n = 15\n cell.mesh = numpy.array([n,n,n])\n cell.atom = '''C 1.3 .2 .3\n C .1 .1 1.1\n '''\n cell.basis = {'C': [[1, (0.6, 1)]]}\n cell.unit = 'B'\n cell.build(0,0)\n\n ao2 = ft_ao.ft_aopair(cell, cell.Gv)\n nao = cell.nao_nr()\n coords = cell.get_uniform_grids()\n aoR = cell.pbc_eval_gto('GTOval', coords)\n aoR2 = numpy.einsum('ki,kj->kij', aoR.conj(), aoR)\n ngrids = aoR.shape[0]\n\n ao2ref = [tools.fft(aoR2[:,i,j], cell.mesh) * cell.vol/ngrids\n for i in range(nao) for j in range(nao)]\n ao2ref = numpy.array(ao2ref).reshape(6,6,-1).transpose(2,0,1)\n self.assertAlmostEqual(abs(ao2ref - ao2).max(), 0, 6)\n\n aoG = ft_ao.ft_ao(cell, cell.Gv)\n aoref = [tools.fft(aoR[:,i], cell.mesh) * cell.vol/ngrids\n for i in range(nao)]\n self.assertAlmostEqual(abs(numpy.array(aoref).T - aoG).max(), 0, 6)\n\n def test_ft_aopair_bvk(self):\n from pyscf.pbc.tools import k2gamma\n n = 2\n cell = pgto.Cell()\n cell.a = numpy.eye(3) * 4\n cell.mesh = numpy.array([n,n,n])\n cell.atom = '''C 1.3 .2 .3\n C .1 .1 1.1\n '''\n cell.basis = 'ccpvdz'\n cell.unit = 'B'\n cell.build()\n\n kpts = cell.make_kpts([2,2,2])\n Gv, Gvbase, kws = cell.get_Gv_weights()\n b = cell.reciprocal_vectors()\n gxyz = lib.cartesian_prod([numpy.arange(len(x)) for x in Gvbase])\n bvk_kmesh = k2gamma.kpts_to_kmesh(cell, kpts)\n\n ref = ft_ao.ft_aopair_kpts(cell, Gv, b=b, gxyz=gxyz, Gvbase=Gvbase, kptjs=kpts)\n aopair = ft_ao.ft_aopair_kpts(cell, Gv, b=b, gxyz=gxyz, Gvbase=Gvbase,\n kptjs=kpts, bvk_kmesh=bvk_kmesh)\n self.assertAlmostEqual(abs(ref - aopair).max(), 0, 8)\n self.assertAlmostEqual(lib.fp(aopair), (-5.735639500461687-12.425151458809875j), 8)\n\nif __name__ == '__main__':\n print('Full Tests for ft_ao')\n unittest.main()\n"
] | [
[
"numpy.eye",
"numpy.einsum",
"numpy.diag",
"numpy.random.seed",
"numpy.tril_indices",
"numpy.random.random",
"numpy.array",
"numpy.dot",
"numpy.linalg.norm"
]
] |
knutdrand/bdgtools | [
"18d21586515ec03e5fb96e959447f6b35e5350de"
] | [
"bdgtools/splitregions.py"
] | [
"import numpy as np\nfrom .regions import Regions\nclass SplitRegions:\n def __init__(self, regions, offsets):\n self._regions = regions\n self._offsets = offsets\n\n def get_signals(self, bedgraph):\n signals = bedgraph.extract_regions(self._regions)\n return signals.join_rows(self._offsets)\n \n def sizes(self):\n return np.diff(np.insert(np.cumsum(self._regions.sizes()), 0, 0)[self._offsets])\n \n @property\n def starts(self):\n return self._regions.starts[self._offsets[:-1]]\n\n @property\n def ends(self):\n return self._regions.ends[self._offsets[:-1]]\n\nclass Genes(SplitRegions):\n def __init__(self, *args, coding_regions):\n super().__init__(*args)\n assert np.all(coding_regions.ends<=self.sizes()), (coding_regions.ends[coding_regions.ends>=self.sizes()], self.sizes()[coding_regions.ends>=self.sizes()])\n self._coding_regions = coding_regions\n\n def __repr__(self):\n return f\"Genes({self._regions}, {self._offsets}, {self._coding_regions})\"\n\n def __eq__(self, other):\n t = self._regions==other._regions\n t &= np.all(self._offsets==other._offsets)\n t &= self._coding_regions == other._coding_regions\n return t\n \n # def get_signals(self, bedgraph):\n # signals = super().get_signals(bedgraph)\n # utr_l = Regions(np.zeros_like(self._coding_regions.starts),\n # self._coding_regions.starts)\n # utr_r = Regions(self._coding_regions.ends,\n # self.sizes())\n # return tuple(signals.extract_regions(regions)\n # for regions in (utr_l, self._coding_regions, utr_r))\n"
] | [
[
"numpy.all"
]
] |
wishprophet/TensorFlow | [
"aacb173a65fe9e392c1a72309aff58ed02f5c32d"
] | [
"test_tensorboard.py"
] | [
"# encoding: utf-8\n\n\"\"\"\n@version: ??\n@author: Mouse\n@license: Apache Licence\n@contact: [email protected]\n@software: PyCharm\n@file: tensorboard.py\n@time: 2018/5/10 9:36\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n\ndef main():\n # 载入数据\n mnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\n # 每个批次大小\n batch_size = 50\n # 计算批次个数\n n_batch = mnist.train.num_examples // batch_size\n with tf.name_scope('input'):\n # 定义两个placeholder\n x = tf.placeholder(tf.float32, [None, 784], name='x-input')\n y = tf.placeholder(tf.float32, [None, 10], name='y-input')\n # dropout参数\n keep_prob = tf.placeholder(tf.float32)\n learning_rate = tf.Variable(0.001, dtype=tf.float32)\n\n with tf.name_scope('layer'):\n # 创建一个简单的神经网络\n # 隐藏层\n W1 = tf.Variable(tf.truncated_normal([784, 600], stddev=0.1))\n b1 = tf.Variable(tf.zeros([600]) + 0.1)\n L1 = tf.nn.tanh(tf.matmul(x, W1)+b1)\n L1_drop = tf.nn.dropout(L1, keep_prob)\n\n W2 = tf.Variable(tf.truncated_normal([600, 400], stddev=0.1))\n b2 = tf.Variable(tf.zeros([400]) + 0.1)\n L2 = tf.nn.tanh(tf.matmul(L1_drop, W2)+b2)\n L2_drop = tf.nn.dropout(L2, keep_prob)\n\n W3 = tf.Variable(tf.truncated_normal([400, 200], stddev=0.1))\n b3 = tf.Variable(tf.zeros([200]) + 0.1)\n L3 = tf.nn.tanh(tf.matmul(L2_drop, W3)+b3)\n L3_drop = tf.nn.dropout(L3, keep_prob)\n\n # 输出层\n # 权值初始化截断的正态分布标准差为0.1\n # 偏执值初始化 0+0.1\n W = tf.Variable(tf.truncated_normal([200, 10], stddev=0.1))\n b = tf.Variable(tf.zeros([10]) + 0.1)\n prediction = tf.nn.softmax(tf.matmul(L3_drop, W)+b)\n\n # 二次代价函数\n # loss = tf.reduce_mean(tf.square(y-prediction))\n # 交叉熵代价函数\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))\n # Adam\n train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n # 其他优化器\n # train_step = tf.train.AdamOptimizer(1e-2).minimize(loss)\n # 初始化\n init = tf.global_variables_initializer()\n\n # 结果存放在一个布尔型列表中\n # argmax返回一维张量中最大的值所在的位置,mnist中label:([1,0,0,0,0,0,0,0,0,0,0]),\n # agrmax返回的就是1所在的位置,如果预测值与所给的标签集相同,表示成功识别数字,返回值为1,反之为0\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))\n # cast转换数据类型,Bool-Float,然后计算平均值就是准确度\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n with tf.Session() as sess:\n sess.run(init)\n # 在当前目录下logs目录,存储图结构\n writer = tf.summary.FileWriter('logs/', sess.graph)\n for epoch in range(1):\n sess.run(tf.assign(learning_rate, 0.001*(0.95**epoch)))\n for batch in range(n_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 0.9})\n lr = sess.run(learning_rate)\n test_acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels, keep_prob: 0.9})\n train_acc = sess.run(accuracy, feed_dict={x: mnist.train.images, y: mnist.train.labels, keep_prob: 0.9})\n # train_acc 和 test_acc差的比较多说明过拟合\n print('epoch ' + str(epoch) + ' lr:' + str(lr) + ' test_acc:' + str(test_acc)+' train_acc:' + str(train_acc))\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"tensorflow.placeholder",
"tensorflow.zeros",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.global_variables_initializer",
"tensorflow.truncated_normal",
"tensorflow.train.AdamOptimizer",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.matmul",
"tensorflow.cast",
"tensorflow.name_scope",
"tensorflow.assign",
"tensorflow.argmax",
"tensorflow.Variable",
"tensorflow.Session",
"tensorflow.summary.FileWriter",
"tensorflow.nn.dropout"
]
] |
OctaveLauby/olfactory | [
"679b67459c12002041a8f77e1bdffe33d776500b"
] | [
"olfactory/tests/test_preprocessing.py"
] | [
"import numpy as np\nimport pytest\n\nfrom olfactory import preprocessing\n\n\ndef test_pipe_filter():\n\n class Elem(dict):\n\n def __init__(self, dic):\n super().__init__(dic)\n self.id = dic['id']\n self.flags = set()\n\n def __getattr__(self, attribute):\n try:\n return self[attribute]\n except KeyError:\n raise AttributeError(\n \"%s is neither a param attribute nor a field\" % attribute\n ) from None\n\n def __eq__(self, other):\n if isinstance(other, int):\n return self.id == other\n else:\n super().__eq__(other)\n\n def block(func, flag=None):\n def nfunc(elem):\n if func(elem):\n if flag:\n elem.flags.add(flag)\n return True\n return False\n return nfunc\n\n elems = [\n Elem({'id': 0, 'active': True, 'hidden_key': \"haveit\"}),\n Elem({'id': 1, 'active': True, 'hidden_key': \"haveit\"}),\n Elem({'id': 2, 'active': False, 'hidden_key': \"haveit\"}),\n Elem({'id': 3, 'active': True}),\n Elem({'id': 4, 'active': True}),\n Elem({'id': 5, 'active': False}),\n ]\n\n # Filter inactive\n pipe = [block(lambda e: not e.active, \"inactive\")]\n assert preprocessing.pipe_filter(elems, pipe) == [0, 1, 3, 4]\n for elem in elems:\n if elem.id in [2, 5]:\n assert \"inactive\" in elem.flags\n\n # Filter inactive and with hidden key\n pipe = [\n block(lambda e: not e.active, \"inactive\"),\n block(lambda e: 'hidden_key' not in e, \"nohiddenkey\")\n ]\n assert preprocessing.pipe_filter(elems, pipe) == [0, 1]\n for elem in elems:\n if elem.id in [3, 4]:\n assert \"nohiddenkey\" in elem.flags\n if elem.id in [5]: # Fast break so no double flag\n assert \"nohiddenkey\" not in elem.flags\n\n assert preprocessing.pipe_filter(elems, pipe, fastbreak=False) == [0, 1]\n for elem in elems:\n if elem.id in [3, 4, 5]:\n assert \"nohiddenkey\" in elem.flags\n\n # Filter elems with flag\n pipe = [block(lambda e: e.flags)]\n kept, rm = preprocessing.pipe_filter(elems, pipe, w_rm=True)\n assert kept == [0, 1]\n assert rm == [2, 3, 4, 5]\n flags = preprocessing.pipe_filter(elems, pipe, r_flags=True)\n assert flags == [False, False, True, True, True, True]\n\n\ndef test_xy_merge():\n xy1 = ([1, 2, 3, 4], [10, 20, 30, 4])\n xy2 = ([0, 3.5, 4, 5, 6], [0, 0.35, 4, 0.5, 0.6])\n assert preprocessing.xy_merge(xy1, xy2) == (\n [0, 1, 2, 3, 3.5, 4, 4, 5, 6],\n [0, 10, 20, 30, 0.35, 4, 4, 0.5, 0.6]\n )\n\n x1 = np.array([1, 2, 4, 5, 8, 9])\n y1 = 10 * x1\n x2 = np.array([0, 2, 3, 10])\n y2 = 10 * x2\n assert preprocessing.xy_merge((x1, y1), (x2, y2)) == (\n [0, 1, 2, 2, 3, 4, 5, 8, 9, 10],\n [0, 10, 20, 20, 30, 40, 50, 80, 90, 100]\n )\n\n with pytest.raises(ValueError):\n preprocessing.xy_merge(([1], [1]), ([1], [10]))\n\n res = preprocessing.xy_merge(\n ([1], [1]), ([1], [10]), raise_err=False, twin_mean=True\n )\n assert res == ([1], [5.5])\n res = preprocessing.xy_merge(\n ([1], [1]), ([1], [10]), raise_err=False, twin_mean=False\n )\n assert res == ([1, 1], [1, 10])\n\n\ndef test_resample():\n\n X = np.array([30, 50, 85, 90])\n Y = np.array([.3, .5, .85, .9])\n\n assert preprocessing.resample(X, Y, step=10) == (\n [30, 40, 50, 60, 70, 80, 90],\n [.3, .4, .5, .6, .7, .8, .9]\n )\n assert preprocessing.resample(X, Y, step=30) == (\n [30, 60, 90],\n [.3, .6, .9]\n )\n assert preprocessing.resample(X, Y, step=40) == (\n [30, 70],\n [.3, .7]\n )\n assert preprocessing.resample(X, Y, n_pts=7) == (\n [30, 40, 50, 60, 70, 80, 90],\n [.3, .4, .5, .6, .7, .8, .9]\n )\n\n with pytest.raises(ValueError):\n preprocessing.resample(X, Y)\n\n with pytest.raises(ValueError):\n preprocessing.resample(X, Y, step=5, n_pts=5)\n\n with pytest.raises(ValueError):\n preprocessing.resample(X, Y, n_pts=1)\n\n\ndef test_rescale():\n\n # Classic use\n a = np.array([3, 10, 0, 5, 9])\n np.testing.assert_equal(preprocessing.rescale(a), [0.3, 1, 0, 0.5, 0.9])\n np.testing.assert_equal(\n preprocessing.rescale(a, bounds=(-20, 20)),\n [-8, 20, -20, 0, 16]\n )\n np.testing.assert_equal(\n preprocessing.rescale(a, batch=2),\n [1.5 / 8, 8.5 / 8, -1.5 / 8, 3.5 / 8, 7.5 / 8]\n )\n np.testing.assert_equal(\n preprocessing.rescale(a, bounds=(0, 8), batch=2),\n [1.5, 8.5, -1.5, 3.5, 7.5]\n )\n\n # Using is_sorted\n s_a = np.sort(a)\n np.testing.assert_equal(\n preprocessing.rescale(s_a, is_sorted=True),\n [0, 0.3, 0.5, 0.9, 1]\n )\n np.testing.assert_equal(\n preprocessing.rescale(np.flip(s_a, axis=0), is_sorted=True, is_reversed=True),\n [1, 0.9, 0.5, 0.3, 0]\n )\n np.testing.assert_equal(\n preprocessing.rescale(np.flip(s_a, axis=0), is_sorted=True),\n [0, 0.1, 0.5, 0.7, 1]\n )\n\n # Using is_sorted when it is not\n np.testing.assert_equal(\n preprocessing.rescale(a, bounds=(0, 6), is_sorted=True),\n [0, 7, -3, 2, 6]\n )\n np.testing.assert_equal(\n preprocessing.rescale(np.flip(a, axis=0), bounds=(0, 6), is_sorted=True),\n [0, 4, 9, -1, 6]\n )\n\n # Batch greater than len(a)\n with pytest.raises(ValueError):\n preprocessing.rescale(a, batch=5)\n np.testing.assert_equal(\n preprocessing.rescale(a, batch=5, bounds=(0, 8), adapt=True),\n [1.5, 8.5, -1.5, 3.5, 7.5]\n )\n"
] | [
[
"numpy.array",
"numpy.sort",
"numpy.flip"
]
] |
Ohtani-y/open_model_zoo | [
"280b59fc6c00455889a1949c795558252fdad96f",
"2543996541346418919c5cddfb71e33e2cdef080"
] | [
"tools/accuracy_checker/openvino/tools/accuracy_checker/annotation_converters/kaldi_speech_recognition_pipeline.py",
"demos/multi_camera_multi_target_tracking_demo/python/mc_tracker/sct.py"
] | [
"\"\"\"\nCopyright (c) 2018-2021 Intel Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport numpy as np\nfrom ..data_readers import KaldiMatrixIdentifier, KaldiARKReader, KaldiFrameIdentifier\nfrom ..representation import CharacterRecognitionAnnotation, RegressionAnnotation\nfrom ..config import PathField, BoolField, StringField\nfrom ..utils import read_txt\nfrom .format_converter import BaseFormatConverter, ConverterReturn\n\n\nclass KaldiSpeechRecognitionDataConverter(BaseFormatConverter):\n __provider__ = 'kaldi_asr_data'\n\n @classmethod\n def parameters(cls):\n params = super().parameters()\n params.update({\n 'annotation_file': PathField(description='file with gt transcription'),\n 'data_dir': PathField(description='directory with ark files', is_directory=True),\n 'features_subset_file': PathField(description='file with list testing ark files', optional=True),\n 'ivectors': BoolField(optional=True, default=False, description='include ivectors features')\n })\n return params\n\n def configure(self):\n self.annotation_file = self.get_value_from_config('annotation_file')\n self.data_dir = self.get_value_from_config('data_dir')\n self.feat_list_file = self.get_value_from_config('features_subset_file')\n self.ivectors = self.get_value_from_config('ivectors')\n\n def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):\n ark_list = self.select_subset()\n transcripts = self.read_annotation()\n annotations = []\n for ark in ark_list:\n ivect = None\n if isinstance(ark, tuple):\n ark, ivect = ark\n utterances = KaldiARKReader.read_frames(ark)\n for utt in utterances:\n if utt not in transcripts:\n continue\n\n identifier = (\n KaldiMatrixIdentifier(ark.name, utt)\n if not ivect else [KaldiMatrixIdentifier(ark.name, utt), KaldiMatrixIdentifier(ivect.name, utt)]\n )\n\n gt = transcripts[utt]\n annotations.append(CharacterRecognitionAnnotation(identifier, gt))\n\n return ConverterReturn(annotations, None, None)\n\n def select_subset(self):\n if not self.ivectors:\n if self.feat_list_file:\n return [self.data_dir / ark for ark in read_txt(self.feat_list_file)]\n return list(self.data_dir.glob('*.ark'))\n if self.feat_list_file:\n return [\n (self.data_dir / ark.split(' ')[0], self.data_dir / ark.split(' ')[1])\n for ark in read_txt(self.feat_list_file)\n ]\n pairs = []\n for ivector_file in self.data_dir.glob(\"*_ivector*.ark\"):\n feats_file = self.data_dir / ivector_file.name.replace('_ivector', '')\n if not feats_file.exists():\n continue\n pairs.append((feats_file, ivector_file))\n return pairs\n\n def read_annotation(self):\n trascript_dict = {}\n for line in read_txt(self.annotation_file):\n utterance_key, text = line.split(' ', 1)\n trascript_dict[utterance_key] = text\n return trascript_dict\n\n\nclass KaldiFeatureRegressionConverter(BaseFormatConverter):\n __provider__ = 'kaldi_feat_regression'\n\n @classmethod\n def parameters(cls):\n params = super().parameters()\n params.update({\n 'data_dir': PathField(description='directory with ark files', is_directory=True),\n 'ref_data_dir': PathField(description='directory with ref data', is_directory=True, optional=True),\n 'features_subset_file': PathField(description='file with list testing ark files', optional=True),\n 'ivectors': BoolField(optional=True, default=False, description='include ivectors features'),\n 'ref_file_suffix': StringField(optional=True, default='_kaldi_score'),\n 'vectors_mode': BoolField(optional=True, default=True, description='Split data to vectors'),\n 'utterance_name_agnostic': BoolField(\n optional=True, default=False, description='do not match names per utterance'\n ),\n 'use_numpy_data': BoolField(\n optional=True, default=False, description='allow to search npz files instead of ark'\n )\n })\n return params\n\n def configure(self):\n self.data_dir = self.get_value_from_config('data_dir')\n self.feat_list_file = self.get_value_from_config('features_subset_file')\n self.ivectors = self.get_value_from_config('ivectors')\n self.ref_data_dir = self.get_value_from_config('ref_data_dir')\n if self.ref_data_dir is None:\n self.ref_data_dir = self.data_dir\n self.ref_file_suffix = self.get_value_from_config('ref_file_suffix')\n self.vectors_mode = self.get_value_from_config('vectors_mode')\n self.utt_agnostic = self.get_value_from_config('utterance_name_agnostic')\n self.file_ext = '.ark' if not self.get_value_from_config('use_numpy_data') else '.npz'\n\n def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):\n ark_list = self.select_subset()\n annotation = []\n for files in ark_list:\n input_files, ref_ark = files[:-1], files[-1]\n if not self.utt_agnostic:\n annotation = self._convert_utt_specific(input_files, ref_ark, annotation)\n else:\n annotation = self._convert_utt_agnostic(input_files, ref_ark, annotation)\n\n return ConverterReturn(annotation, None, None)\n\n def _convert_utt_agnostic(self, input_files, ref_ark, annotation):\n input_utts = []\n for in_file in input_files:\n input_utts.append(KaldiARKReader.read_frames(in_file) if in_file.suffix != '.npz' else np.load(in_file))\n utt_ids = [list(in_utt.keys()) for in_utt in input_utts]\n ref_scores = KaldiARKReader.read_frames(ref_ark) if ref_ark.suffix != '.npz' else np.load(ref_ark)\n for idx, (_, ref_matrix) in enumerate(ref_scores.items()):\n current_utts = [u[idx] for u in utt_ids]\n if self.vectors_mode:\n for v_idx, ref_v in enumerate(ref_matrix):\n identifier = [\n KaldiFrameIdentifier(in_file.name, utt, v_idx)\n if in_file.suffix != '.npz' else generate_numpy_identifier(in_file.name, utt, v_idx)\n for in_file, utt in zip(input_files, current_utts)\n ]\n if len(identifier) == 1:\n identifier = identifier[0]\n annotation.append(RegressionAnnotation(identifier, ref_v))\n else:\n identifier = [\n KaldiMatrixIdentifier(in_file.name, utt)\n if in_file.suffix != '.npz' else generate_numpy_identifier(in_file.name, utt)\n for in_file, utt in zip(input_files, current_utts)\n ]\n if len(identifier) == 1:\n identifier = identifier[0]\n annotation.append(RegressionAnnotation(identifier, ref_matrix))\n return annotation\n\n def _convert_utt_specific(self, input_files, ref_ark, annotation):\n utterances = (\n KaldiARKReader.read_frames(input_files[0])\n if input_files[0].suffix != '.npz' else dict(np.load(input_files[0])))\n ref_scores = KaldiARKReader.read_frames(ref_ark) if ref_ark.suffix != '.npz' else dict(np.load(ref_ark))\n for utt, matrix in utterances.items():\n if utt not in ref_scores:\n continue\n ref_matrix = ref_scores[utt]\n if self.vectors_mode:\n for vector_id, _ in enumerate(matrix):\n identifier = [\n KaldiFrameIdentifier(in_file.name, utt, vector_id)\n if in_file.suffix != '.npz' else generate_numpy_identifier(in_file.name, utt, vector_id)\n for in_file in input_files\n ]\n if len(identifier) == 1:\n identifier = identifier[0]\n ref_vector = ref_matrix[vector_id]\n annotation.append(RegressionAnnotation(identifier, ref_vector))\n else:\n identifier = [KaldiMatrixIdentifier(in_file.name, utt)\n if in_file.suffix != '.npz' else generate_numpy_identifier(in_file.name, utt)\n for in_file in input_files]\n if len(identifier) == 1:\n identifier = identifier[0]\n annotation.append(RegressionAnnotation(identifier, ref_matrix))\n return annotation\n\n def select_subset(self):\n if self.feat_list_file:\n subset = []\n for ark in read_txt(self.feat_list_file):\n files = [self.data_dir / f for f in ark.split(' ')[:-1]]\n files.append(self.ref_data_dir / ark.split(' ')[-1])\n subset.append(files)\n return subset\n\n if not self.ivectors:\n pairs = []\n for ark_file in self.data_dir.glob('*{}'.format(self.file_ext)):\n if self.data_dir == self.ref_data_dir and self.ref_file_suffix in ark_file.name:\n continue\n ref_file = self.ref_data_dir / ark_file.name.replace(self.file_ext, self.ref_file_suffix+self.file_ext)\n pairs.append((ark_file, ref_file))\n return pairs\n triples = []\n for ivector_file in self.data_dir.glob(\"*_ivector{}\".format(self.file_ext)):\n feats_file = self.data_dir / ivector_file.name.replace('_ivector', '')\n ref_file = self.ref_data_dir / ivector_file.name.replace('_ivector', self.ref_file_suffix)\n if not feats_file.exists() or not ref_file.exists():\n continue\n triples.append((feats_file, ivector_file, ref_file))\n return triples\n\n\ndef generate_numpy_identifier(file_name, array_id, idx=None):\n return '{}{}#{}'.format(array_id, '' if idx is None else '_{}'.format(idx), file_name)\n",
"\"\"\"\n Copyright (c) 2019 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport random\nfrom copy import deepcopy as copy\nfrom collections import namedtuple\n\nimport cv2\n\nimport numpy as np\nfrom scipy.optimize import linear_sum_assignment\nfrom scipy.spatial.distance import cosine, cdist\n\nfrom utils.analyzer import Analyzer\nfrom utils.misc import AverageEstimator\n\nTHE_BIGGEST_DISTANCE = 10.\n\nTrackedObj = namedtuple('TrackedObj', 'rect label')\n\n\nclass ClusterFeature:\n def __init__(self, feature_len, initial_feature=None):\n self.clusters = []\n self.clusters_sizes = []\n self.feature_len = feature_len\n if initial_feature is not None:\n self.clusters.append(initial_feature)\n self.clusters_sizes.append(1)\n\n def update(self, feature_vec):\n if len(self.clusters) < self.feature_len:\n self.clusters.append(feature_vec)\n self.clusters_sizes.append(1)\n elif sum(self.clusters_sizes) < 2*self.feature_len:\n idx = random.randint(0, self.feature_len - 1) # nosec - disable B311:random check\n self.clusters_sizes[idx] += 1\n self.clusters[idx] += (feature_vec - self.clusters[idx]) / \\\n self.clusters_sizes[idx]\n else:\n distances = cdist(feature_vec.reshape(1, -1),\n np.array(self.clusters).reshape(len(self.clusters), -1), 'cosine')\n nearest_idx = np.argmin(distances)\n self.clusters_sizes[nearest_idx] += 1\n self.clusters[nearest_idx] += (feature_vec - self.clusters[nearest_idx]) / \\\n self.clusters_sizes[nearest_idx]\n\n def merge(self, features, other, other_features):\n if len(features) > len(other_features):\n for feature in other_features:\n if feature is not None:\n self.update(feature)\n else:\n for feature in features:\n if feature is not None:\n other.update(feature)\n self.clusters = copy(other.clusters)\n self.clusters_sizes = copy(other.clusters_sizes)\n\n def get_clusters_matrix(self):\n return np.array(self.clusters).reshape(len(self.clusters), -1)\n\n def __len__(self):\n return len(self.clusters)\n\n\nclass OrientationFeature:\n def __init__(self, feature_len, initial_feature=(None, None)):\n assert feature_len > 0\n self.orientation_features = [AverageEstimator() for _ in range(feature_len)]\n self.is_initialized = False\n if initial_feature[0] is not None and initial_feature[1] is not None and initial_feature[1] >= 0:\n self.is_initialized = True\n self.orientation_features[initial_feature[1]].update(initial_feature[0])\n\n def is_valid(self):\n return self.is_initialized\n\n def update(self, new_feature, idx):\n if idx >= 0:\n self.is_initialized = True\n self.orientation_features[idx].update(new_feature)\n\n def merge(self, other):\n for f1, f2 in zip(self.orientation_features, other.orientation_features):\n f1.merge(f2)\n self.is_initialized |= f1.is_valid()\n\n def dist_to_other(self, other):\n distances = [1.]\n for f1, f2 in zip(self.orientation_features, other.orientation_features):\n if f1.is_valid() and f2.is_valid():\n distances.append(0.5 * cosine(f1.get(), f2.get()))\n return min(distances)\n\n def dist_to_vec(self, vec, orientation):\n assert orientation < len(self.orientation_features)\n if orientation >= 0 and self.orientation_features[orientation].is_valid():\n return 0.5 * cosine(vec, self.orientation_features[orientation].get())\n return 1.\n\n\ndef clusters_distance(clusters1, clusters2):\n if len(clusters1) > 0 and len(clusters2) > 0:\n distances = 0.5 * cdist(clusters1.get_clusters_matrix(),\n clusters2.get_clusters_matrix(), 'cosine')\n return np.amin(distances)\n return 1.\n\n\ndef clusters_vec_distance(clusters, feature):\n if len(clusters) > 0 and feature is not None:\n distances = 0.5 * cdist(clusters.get_clusters_matrix(),\n feature.reshape(1, -1), 'cosine')\n return np.amin(distances)\n return 1.\n\n\nclass Track:\n def __init__(self, id, cam_id, box, time, feature=None, num_clusters=4, crops=None, orientation=None):\n self.id = id\n self.cam_id = cam_id\n self.f_avg = AverageEstimator()\n self.f_clust = ClusterFeature(num_clusters)\n self.f_orient = OrientationFeature(4, (feature, orientation))\n self.features = [feature]\n self.boxes = [box]\n self.timestamps = [time]\n self.crops = [crops]\n if feature is not None:\n self.f_avg.update(feature)\n self.f_clust.update(feature)\n\n def get_last_feature(self):\n return self.features[-1]\n\n def get_end_time(self):\n return self.timestamps[-1]\n\n def get_start_time(self):\n return self.timestamps[0]\n\n def get_last_box(self):\n return self.boxes[-1]\n\n def __len__(self):\n return len(self.timestamps)\n\n def _interpolate(self, target_box, timestamp, skip_size):\n last_box = self.get_last_box()\n for t in range(1, skip_size):\n interp_box = [int(b1 + (b2 - b1) / skip_size * t) for b1, b2 in zip(last_box, target_box)]\n self.boxes.append(interp_box)\n self.timestamps.append(self.get_end_time() + 1)\n self.features.append(None)\n\n def _filter_last_box(self, filter_speed):\n if self.timestamps[-1] - self.timestamps[-2] == 1:\n filtered_box = list(self.boxes[-2])\n for j in range(len(self.boxes[-1])):\n filtered_box[j] = int((1 - filter_speed) * filtered_box[j]\n + filter_speed * self.boxes[-1][j])\n self.boxes[-1] = tuple(filtered_box)\n\n def add_detection(self, box, feature, timestamp, max_skip_size=1, filter_speed=0.7, crop=None):\n skip_size = timestamp - self.get_end_time()\n if 1 < skip_size <= max_skip_size:\n self._interpolate(box, timestamp, skip_size)\n assert self.get_end_time() == timestamp - 1\n\n self.boxes.append(box)\n self.timestamps.append(timestamp)\n self.features.append(feature)\n self._filter_last_box(filter_speed)\n if feature is not None:\n self.f_clust.update(feature)\n self.f_avg.update(feature)\n if crop is not None:\n self.crops.append(crop)\n\n def merge_continuation(self, other, interpolate_time_thresh=0):\n assert self.get_end_time() < other.get_start_time()\n skip_size = other.get_start_time() - self.get_end_time()\n if 1 < skip_size <= interpolate_time_thresh:\n self._interpolate(other.boxes[0], other.get_start_time(), skip_size)\n assert self.get_end_time() == other.get_start_time() - 1\n\n self.f_avg.merge(other.f_avg)\n self.f_clust.merge(self.features, other.f_clust, other.features)\n self.f_orient.merge(other.f_orient)\n self.timestamps += other.timestamps\n self.boxes += other.boxes\n self.features += other.features\n self.crops += other.crops\n\n\nclass SingleCameraTracker:\n def __init__(self, id, global_id_getter, global_id_releaser,\n reid_model=None,\n time_window=10,\n continue_time_thresh=2,\n track_clear_thresh=3000,\n match_threshold=0.4,\n merge_thresh=0.35,\n n_clusters=4,\n max_bbox_velocity=0.2,\n detection_occlusion_thresh=0.7,\n track_detection_iou_thresh=0.5,\n process_curr_features_number=0,\n visual_analyze=None,\n interpolate_time_thresh=10,\n detection_filter_speed=0.7,\n rectify_thresh=0.25):\n self.reid_model = reid_model\n self.global_id_getter = global_id_getter\n self.global_id_releaser = global_id_releaser\n self.id = id\n self.tracks = []\n self.history_tracks = []\n self.time = 0\n assert time_window >= 1\n self.time_window = time_window\n assert continue_time_thresh >= 1\n self.continue_time_thresh = continue_time_thresh\n assert track_clear_thresh >= 1\n self.track_clear_thresh = track_clear_thresh\n assert 0 <= match_threshold <= 1\n self.match_threshold = match_threshold\n assert 0 <= merge_thresh <= 1\n self.merge_thresh = merge_thresh\n assert n_clusters >= 1\n self.n_clusters = n_clusters\n assert 0 <= max_bbox_velocity\n self.max_bbox_velocity = max_bbox_velocity\n assert 0 <= detection_occlusion_thresh <= 1\n self.detection_occlusion_thresh = detection_occlusion_thresh\n assert 0 <= track_detection_iou_thresh <= 1\n self.track_detection_iou_thresh = track_detection_iou_thresh\n self.process_curr_features_number = process_curr_features_number\n assert interpolate_time_thresh >= 0\n self.interpolate_time_thresh = interpolate_time_thresh\n assert 0 <= detection_filter_speed <= 1\n self.detection_filter_speed = detection_filter_speed\n self.rectify_time_thresh = self.continue_time_thresh * 4\n self.rectify_length_thresh = self.time_window // 2\n assert 0 <= rectify_thresh <= 1\n self.rectify_thresh = rectify_thresh\n\n self.analyzer = None\n self.current_detections = None\n\n if visual_analyze is not None and visual_analyze.enable:\n self.analyzer = Analyzer(self.id, **vars(visual_analyze))\n\n def process(self, frame, detections, mask=None):\n reid_features = [None]*len(detections)\n if self.reid_model:\n reid_features = self._get_embeddings(frame, detections, mask)\n\n assignment = self._continue_tracks(detections, reid_features)\n self._create_new_tracks(detections, reid_features, assignment)\n self._clear_old_tracks()\n self._rectify_tracks()\n if self.time % self.time_window == 0:\n self._merge_tracks()\n if self.analyzer:\n self.analyzer.plot_timeline(self.id, self.time, self.tracks)\n self.time += 1\n\n def get_tracked_objects(self):\n label = 'ID'\n objs = []\n for track in self.tracks:\n if track.get_end_time() == self.time - 1 and len(track) > self.time_window:\n objs.append(TrackedObj(track.get_last_box(),\n label + ' ' + str(track.id)))\n elif track.get_end_time() == self.time - 1 and len(track) <= self.time_window:\n objs.append(TrackedObj(track.get_last_box(), label + ' -1'))\n return objs\n\n def get_tracks(self):\n return self.tracks\n\n def get_archived_tracks(self):\n return self.history_tracks\n\n def check_and_merge(self, track_source, track_candidate):\n id_candidate = track_source.id\n idx = -1\n for i, track in enumerate(self.tracks):\n if track.boxes == track_candidate.boxes:\n idx = i\n if idx < 0: # in this case track already has been modified, merge is invalid\n return\n\n collisions_found = False\n for i, hist_track in enumerate(self.history_tracks):\n if hist_track.id == id_candidate \\\n and not (hist_track.get_end_time() < self.tracks[idx].get_start_time()\n or self.tracks[idx].get_end_time() < hist_track.get_start_time()):\n collisions_found = True\n break\n\n for i, track in enumerate(self.tracks):\n if track is not None and track.id == id_candidate:\n collisions_found = True\n break\n\n if not collisions_found:\n self.tracks[idx].id = id_candidate\n self.tracks[idx].f_clust.merge(self.tracks[idx].features,\n track_source.f_clust, track_source.features)\n track_candidate.f_clust = copy(self.tracks[idx].f_clust)\n self.tracks = list(filter(None, self.tracks))\n\n def _continue_tracks(self, detections, features):\n active_tracks_idx = []\n for i, track in enumerate(self.tracks):\n if track.get_end_time() >= self.time - self.continue_time_thresh:\n active_tracks_idx.append(i)\n\n occluded_det_idx = []\n for i, det1 in enumerate(detections):\n for j, det2 in enumerate(detections):\n if i != j and self._ios(det1, det2) > self.detection_occlusion_thresh:\n occluded_det_idx.append(i)\n features[i] = None\n break\n\n cost_matrix = self._compute_detections_assignment_cost(active_tracks_idx, detections, features)\n\n assignment = [None for _ in range(cost_matrix.shape[0])]\n if cost_matrix.size > 0:\n row_ind, col_ind = linear_sum_assignment(cost_matrix)\n for i, j in zip(row_ind, col_ind):\n idx = active_tracks_idx[j]\n if cost_matrix[i, j] < self.match_threshold and \\\n self._check_velocity_constraint(self.tracks[idx].get_last_box(),\n self.tracks[idx].get_end_time(),\n detections[i], self.time) and \\\n self._iou(self.tracks[idx].boxes[-1], detections[i]) > self.track_detection_iou_thresh:\n assignment[i] = j\n\n for i, j in enumerate(assignment):\n if j is not None:\n idx = active_tracks_idx[j]\n crop = self.current_detections[i] if self.current_detections is not None else None\n self.tracks[idx].add_detection(detections[i], features[i],\n self.time, self.continue_time_thresh,\n self.detection_filter_speed, crop)\n return assignment\n\n def _clear_old_tracks(self):\n clear_tracks = []\n for track in self.tracks:\n # remove too old tracks\n if track.get_end_time() < self.time - self.track_clear_thresh:\n track.features = []\n self.history_tracks.append(track)\n continue\n # remove too short and outdated tracks\n if track.get_end_time() < self.time - self.continue_time_thresh \\\n and len(track) < self.time_window:\n self.global_id_releaser(track.id)\n continue\n clear_tracks.append(track)\n self.tracks = clear_tracks\n\n def _rectify_tracks(self):\n active_tracks_idx = []\n not_active_tracks_idx = []\n for i, track in enumerate(self.tracks):\n if track.get_end_time() >= self.time - self.rectify_time_thresh \\\n and len(track) >= self.rectify_length_thresh:\n active_tracks_idx.append(i)\n elif len(track) >= self.rectify_length_thresh:\n not_active_tracks_idx.append(i)\n\n distance_matrix = np.zeros((len(active_tracks_idx),\n len(not_active_tracks_idx)), dtype=np.float32)\n for i, idx1 in enumerate(active_tracks_idx):\n for j, idx2 in enumerate(not_active_tracks_idx):\n distance_matrix[i, j] = self._get_rectification_distance(self.tracks[idx1], self.tracks[idx2])\n\n indices_rows = np.arange(distance_matrix.shape[0])\n indices_cols = np.arange(distance_matrix.shape[1])\n\n while len(indices_rows) > 0 and len(indices_cols) > 0:\n i, j = np.unravel_index(np.argmin(distance_matrix), distance_matrix.shape)\n dist = distance_matrix[i, j]\n if dist < self.rectify_thresh:\n self._concatenate_tracks(active_tracks_idx[indices_rows[i]],\n not_active_tracks_idx[indices_cols[j]])\n distance_matrix = np.delete(distance_matrix, i, 0)\n indices_rows = np.delete(indices_rows, i)\n distance_matrix = np.delete(distance_matrix, j, 1)\n indices_cols = np.delete(indices_cols, j)\n else:\n break\n self.tracks = list(filter(None, self.tracks))\n\n def _get_rectification_distance(self, track1, track2):\n if (track1.get_start_time() > track2.get_end_time()\n or track2.get_start_time() > track1.get_end_time()) \\\n and track1.f_avg.is_valid() and track2.f_avg.is_valid() \\\n and self._check_tracks_velocity_constraint(track1, track2):\n return clusters_distance(track1.f_clust, track2.f_clust)\n return THE_BIGGEST_DISTANCE\n\n def _merge_tracks(self):\n distance_matrix = self._get_merge_distance_matrix()\n\n tracks_indices = np.arange(distance_matrix.shape[0])\n\n while len(tracks_indices) > 0:\n i, j = np.unravel_index(np.argmin(distance_matrix), distance_matrix.shape)\n dist = distance_matrix[i, j]\n if dist < self.merge_thresh:\n kept_idx = self._concatenate_tracks(tracks_indices[i], tracks_indices[j])\n deleted_idx = tracks_indices[i] if kept_idx == tracks_indices[j] else tracks_indices[j]\n assert self.tracks[deleted_idx] is None\n if deleted_idx == tracks_indices[i]:\n idx_to_delete = i\n idx_to_update = j\n else:\n assert deleted_idx == tracks_indices[j]\n idx_to_delete = j\n idx_to_update = i\n updated_row = self._get_updated_merge_distance_matrix_row(kept_idx,\n deleted_idx,\n tracks_indices)\n distance_matrix[idx_to_update, :] = updated_row\n distance_matrix[:, idx_to_update] = updated_row\n distance_matrix = np.delete(distance_matrix, idx_to_delete, 0)\n distance_matrix = np.delete(distance_matrix, idx_to_delete, 1)\n tracks_indices = np.delete(tracks_indices, idx_to_delete)\n else:\n break\n\n self.tracks = list(filter(None, self.tracks))\n\n def _get_merge_distance(self, track1, track2):\n if (track1.get_start_time() > track2.get_end_time()\n or track2.get_start_time() > track1.get_end_time()) \\\n and track1.f_avg.is_valid() and track2.f_avg.is_valid() \\\n and self._check_tracks_velocity_constraint(track1, track2):\n f_avg_dist = 0.5 * cosine(track1.f_avg.get(), track2.f_avg.get())\n if track1.f_orient.is_valid():\n f_complex_dist = track1.f_orient.dist_to_other(track2.f_orient)\n else:\n f_complex_dist = clusters_distance(track1.f_clust, track2.f_clust)\n return min(f_avg_dist, f_complex_dist)\n\n return THE_BIGGEST_DISTANCE\n\n def _get_merge_distance_matrix(self):\n distance_matrix = THE_BIGGEST_DISTANCE*np.eye(len(self.tracks), dtype=np.float32)\n for i, track1 in enumerate(self.tracks):\n for j, track2 in enumerate(self.tracks):\n if i < j:\n distance_matrix[i, j] = self._get_merge_distance(track1, track2)\n distance_matrix += np.transpose(distance_matrix)\n return distance_matrix\n\n def _get_updated_merge_distance_matrix_row(self, update_idx, ignore_idx, alive_indices):\n distance_matrix = THE_BIGGEST_DISTANCE*np.ones(len(alive_indices), dtype=np.float32)\n for i, idx in enumerate(alive_indices):\n if idx != update_idx and idx != ignore_idx:\n distance_matrix[i] = self._get_merge_distance(self.tracks[update_idx], self.tracks[idx])\n return distance_matrix\n\n def _concatenate_tracks(self, i, idx):\n if self.tracks[i].get_end_time() < self.tracks[idx].get_start_time():\n self.tracks[i].merge_continuation(self.tracks[idx], self.interpolate_time_thresh)\n self.tracks[idx] = None\n return i\n else:\n assert self.tracks[idx].get_end_time() < self.tracks[i].get_start_time()\n self.tracks[idx].merge_continuation(self.tracks[i], self.interpolate_time_thresh)\n self.tracks[i] = None\n return idx\n\n def _create_new_tracks(self, detections, features, assignment):\n assert len(detections) == len(features)\n for i, j in enumerate(assignment):\n if j is None:\n crop = self.current_detections[i] if self.analyzer else None\n self.tracks.append(Track(self.global_id_getter(), self.id,\n detections[i], self.time, features[i],\n self.n_clusters, crop, None))\n\n def _compute_detections_assignment_cost(self, active_tracks_idx, detections, features):\n cost_matrix = np.zeros((len(detections), len(active_tracks_idx)), dtype=np.float32)\n if self.analyzer and len(self.tracks) > 0:\n self.analyzer.prepare_distances(self.tracks, self.current_detections)\n\n for i, idx in enumerate(active_tracks_idx):\n track_box = self.tracks[idx].get_last_box()\n for j, d in enumerate(detections):\n iou_dist = 0.5 * (1 - self._giou(d, track_box))\n reid_dist_curr, reid_dist_avg, reid_dist_clust = None, None, None\n if self.tracks[idx].f_avg.is_valid() and features[j] is not None \\\n and self.tracks[idx].get_last_feature() is not None:\n reid_dist_avg = 0.5 * cosine(self.tracks[idx].f_avg.get(), features[j])\n reid_dist_curr = 0.5 * cosine(self.tracks[idx].get_last_feature(), features[j])\n\n if self.process_curr_features_number > 0:\n num_features = len(self.tracks[idx])\n step = -(-num_features // self.process_curr_features_number)\n step = step if step > 0 else 1\n start_index = 0 if self.process_curr_features_number > 1 else num_features - 1\n for s in range(start_index, num_features - 1, step):\n if self.tracks[idx].features[s] is not None:\n reid_dist_curr = min(reid_dist_curr, 0.5 * cosine(self.tracks[idx].features[s], features[j]))\n\n reid_dist_clust = clusters_vec_distance(self.tracks[idx].f_clust, features[j])\n reid_dist = min(reid_dist_avg, reid_dist_curr, reid_dist_clust)\n else:\n reid_dist = 0.5\n cost_matrix[j, i] = iou_dist * reid_dist\n if self.analyzer:\n self.analyzer.visualize_distances(idx, j, [reid_dist_curr, reid_dist_avg, reid_dist_clust, 1 - iou_dist])\n if self.analyzer:\n self.analyzer.visualize_distances(affinity_matrix=1 - cost_matrix, active_tracks_idx=active_tracks_idx)\n self.analyzer.show_all_dist_imgs(self.time, len(self.tracks))\n return cost_matrix\n\n @staticmethod\n def _area(box):\n return max((box[2] - box[0]), 0) * max((box[3] - box[1]), 0)\n\n def _giou(self, b1, b2, a1=None, a2=None):\n if a1 is None:\n a1 = self._area(b1)\n if a2 is None:\n a2 = self._area(b2)\n intersection = self._area([max(b1[0], b2[0]), max(b1[1], b2[1]),\n min(b1[2], b2[2]), min(b1[3], b2[3])])\n\n enclosing = self._area([min(b1[0], b2[0]), min(b1[1], b2[1]),\n max(b1[2], b2[2]), max(b1[3], b2[3])])\n u = a1 + a2 - intersection\n iou = intersection / u if u > 0 else 0\n giou = iou - (enclosing - u) / enclosing if enclosing > 0 else -1\n return giou\n\n def _iou(self, b1, b2, a1=None, a2=None):\n if a1 is None:\n a1 = self._area(b1)\n if a2 is None:\n a2 = self._area(b2)\n intersection = self._area([max(b1[0], b2[0]), max(b1[1], b2[1]),\n min(b1[2], b2[2]), min(b1[3], b2[3])])\n\n u = a1 + a2 - intersection\n return intersection / u if u > 0 else 0\n\n def _ios(self, b1, b2, a1=None, a2=None):\n # intersection over self\n if a1 is None:\n a1 = self._area(b1)\n intersection = self._area([max(b1[0], b2[0]), max(b1[1], b2[1]),\n min(b1[2], b2[2]), min(b1[3], b2[3])])\n return intersection / a1 if a1 > 0 else 0\n\n def _get_embeddings(self, frame, detections, mask=None):\n rois = []\n embeddings = []\n\n if self.analyzer:\n self.current_detections = []\n\n for i in range(len(detections)):\n rect = detections[i]\n left, top, right, bottom = rect\n crop = frame[top:bottom, left:right]\n if mask and len(mask[i]) > 0:\n crop = cv2.bitwise_and(crop, crop, mask=mask[i])\n if left != right and top != bottom:\n rois.append(crop)\n\n if self.analyzer:\n self.current_detections.append(cv2.resize(crop, self.analyzer.crop_size))\n\n if rois:\n embeddings = self.reid_model.forward(rois)\n assert len(rois) == len(embeddings)\n\n return embeddings\n\n def _check_tracks_velocity_constraint(self, track1, track2):\n if track1.get_end_time() < track2.get_start_time():\n return self._check_velocity_constraint(track1.get_last_box(), track1.get_end_time(),\n track2.boxes[0], track2.get_start_time())\n else:\n return self._check_velocity_constraint(track2.get_last_box(), track2.get_end_time(),\n track1.boxes[0], track1.get_start_time())\n\n def _check_velocity_constraint(self, detection1, det1_time, detection2, det2_time):\n dt = abs(det2_time - det1_time)\n avg_size = 0\n for det in [detection1, detection2]:\n avg_size += 0.5 * (abs(det[2] - det[0]) + abs(det[3] - det[1]))\n avg_size *= 0.5\n shifts = [abs(x - y) for x, y in zip(detection1, detection2)]\n velocity = sum(shifts) / len(shifts) / dt / avg_size\n if velocity > self.max_bbox_velocity:\n return False\n return True\n"
] | [
[
"numpy.load"
],
[
"numpy.transpose",
"numpy.argmin",
"scipy.optimize.linear_sum_assignment",
"numpy.arange",
"numpy.amin",
"numpy.delete",
"numpy.array",
"scipy.spatial.distance.cosine"
]
] |
eddddddy/Pyxelate | [
"9c7656c35fc8fda497fa496b6758c395716507aa"
] | [
"pyxelate/universe.py"
] | [
"from copy import copy\nfrom typing import List, Tuple, Union, Iterator, Iterable\n\n\nimport numpy as np\n\n\nclass Size:\n def __init__(self, size: Union[int, None]):\n \"\"\"\n Create a Size object with the given size. If the size passed\n in is None, then it is treated as infinite\n :param size: the size\n \"\"\"\n self.size = size\n\n def is_infinite(self) -> bool:\n \"\"\"\n Return True if this Size is infinite, and False otherwise\n :return: whether or not this Size is infinite\n \"\"\"\n return self.size is None\n\n\nclass Rule:\n\n def __init__(self, window: np.ndarray, center: Union[int, Tuple[int, ...]], becomes: int):\n \"\"\"\n Create a single evolution rule\n :param window: matrix of dead (0) and alive (1) cells\n :param center: index of the target cell in the window\n :param becomes: what the target cell becomes in the next generation\n \"\"\"\n if isinstance(center, int):\n center = (center,)\n if len(center) != len(window.shape):\n raise ValueError(\"Center must have the same dimensions as the window\")\n\n self.window = window\n self.center = center\n self.becomes = becomes\n\n\nclass RuleList:\n\n def __init__(self, rules: Union[List[Rule], None] = None):\n \"\"\"\n Create a complete set of evolution rules for the universe\n \"\"\"\n self._rules = rules if rules is not None else []\n\n def add_rule(self, rule: Rule) -> None:\n \"\"\"\n Add a Rule to the RuleList\n :param rule: the Rule\n :return: None\n \"\"\"\n self._rules.append(rule)\n\n def __iter__(self) -> Iterator:\n \"\"\"\n Get an iterator over all Rules in the RuleList\n :return: the Rule iterator\n \"\"\"\n return iter(self._rules)\n\n\nclass Universe:\n\n def __init__(self, dimensions: int, size: Union[Size, None] = None, initial: Union[np.ndarray, None] = None):\n \"\"\"\n Create a (initially static) universe with cells that can be either dead (0) or alive (1)\n :param dimensions: the dimensionality of the universe; can currently only\n be 1 or 2\n :param size: the size of the universe; currently cannot be infinite, and does not\n have to be provided if initial state is provided\n :param initial: the initial state of the universe; if not provided, then all\n cells start off dead\n \"\"\"\n Universe.__check_dimensions(dimensions)\n self._dimensions = dimensions\n\n if (size is None and initial is None) or (size is not None and initial is not None):\n raise ValueError(\"Exactly one of size or initial must be provided\")\n\n if size is not None:\n if size.is_infinite():\n raise ValueError(\"Infinite universes not yet supported\")\n\n self._size = size\n\n if dimensions == 1:\n self._universe = np.array([0] * size.size)\n elif dimensions == 2:\n self._universe = np.array([[0] * size.size for _ in range(size.size)])\n else:\n Universe.__check_initial(initial, dimensions)\n self._size = initial.shape[0]\n\n self._universe = np.copy(initial)\n\n @staticmethod\n def __check_dimensions(dimensions: int) -> None:\n if dimensions <= 0:\n raise ValueError(\"Universes must have at least one dimension\")\n elif dimensions > 2:\n raise ValueError(\"Higher than two-dimensional universe not supported\")\n\n @staticmethod\n def __check_initial(initial: np.ndarray, dimensions: int) -> None:\n if len(initial.shape) != dimensions:\n raise ValueError(\"Initial universe must have the same number of dimensions provided\")\n size = initial.shape[0]\n if not np.all(initial.shape == size):\n raise ValueError(\"Initial universe must have the same size across all dimensions\")\n\n def __repr__(self) -> str:\n if self._dimensions == 1:\n return ''.join(map(str, self._universe))\n else:\n return '\\n'.join([''.join(map(str, row)) for row in self._universe])\n\n def transform(self, location: Union[int, Tuple[int, ...], Iterable[int], Iterable[Tuple[int, ...]]], state: int) -> None:\n \"\"\"\n Change the cell at the specified location(s) to the specified state\n :param location: position of the cell to transform; if an iterable, then transform\n all cells in the iterable\n :param state: new state of the cell\n :return: None\n \"\"\"\n if isinstance(location, int) or isinstance(location, tuple):\n if isinstance(location, int):\n location = (location,)\n if len(location) != self._dimensions:\n raise ValueError(\"Location must have the same number of dimensions as the universe\")\n\n self._universe[location] = state\n else:\n for loc in location:\n if isinstance(loc, int):\n loc = (loc,)\n if len(loc) != self._dimensions:\n raise ValueError(\"Location must have the same number of dimensions as the universe\")\n\n self._universe[loc] = state\n\n def apply(self, rules: RuleList) -> None:\n \"\"\"\n Apply an evolution rules to the cells in the universe\n :param rules: the rules to apply\n :return: None\n \"\"\"\n for rule in rules:\n if len(rule.window.shape) != self._dimensions:\n raise ValueError(\"Rules must match the dimensions\")\n\n new_universe = np.copy(self._universe)\n\n if self._dimensions == 1:\n padding = max((rule.window.shape[0] for rule in rules))\n old_universe = np.pad(self._universe, padding)\n for index in range(padding, self._size.size + padding):\n for rule in rules:\n center = rule.center[0]\n if np.all(old_universe[index - center:index - center + rule.window.shape[0]] == rule.window):\n new_universe[index - padding] = rule.becomes\n break\n else:\n vertical_padding = max((rule.window.shape[0] for rule in rules))\n horizontal_padding = max((rule.window.shape[1] for rule in rules))\n old_universe = np.pad(self._universe, ((vertical_padding, vertical_padding), (horizontal_padding, horizontal_padding)))\n for row_index in range(vertical_padding, self._size.size + vertical_padding):\n for col_index in range(horizontal_padding, self._size.size + vertical_padding):\n for rule in rules:\n vertical_center, horizontal_center = rule.center\n if np.all(old_universe[\n row_index - vertical_center:row_index - vertical_center + rule.window.shape[0],\n col_index - horizontal_center:col_index - horizontal_center + rule.window.shape[1]\n ] == rule.window):\n new_universe[row_index - vertical_padding, col_index - horizontal_padding] = rule.becomes\n\n self._universe = new_universe\n\n\nclass Simulator:\n\n def __init__(self, universe: Universe, rule_list: RuleList):\n \"\"\"\n Create a Simulator starting with the given Universe and RuleList\n :param universe: the universe to start with\n :param rule_list: the rules to evolve the universe\n \"\"\"\n self._universe = universe\n self._rule_list = rule_list\n\n def step(self, num_steps: int = 1) -> None:\n \"\"\"\n Go forward in the universe num_steps time steps\n :param num_steps: number of steps to advance the Universe\n :return: None\n \"\"\"\n for _ in range(num_steps):\n self._universe.apply(self._rule_list)\n\n def print_universe(self) -> None:\n \"\"\"\n Print the current universe\n :return: None\n \"\"\"\n print(f\"{self._universe}\\n\")\n"
] | [
[
"numpy.array",
"numpy.pad",
"numpy.all",
"numpy.copy"
]
] |
PedroAbreuQB/kedro | [
"a38552a0266d4ad7b823f1640e98aefa6175fd33"
] | [
"kedro/io/hdf_dataset.py"
] | [
"# Copyright 2018-2019 QuantumBlack Visual Analytics Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND\n# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS\n# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n# The QuantumBlack Visual Analytics Limited (\"QuantumBlack\") name and logo\n# (either separately or in combination, \"QuantumBlack Trademarks\") are\n# trademarks of QuantumBlack. The License does not grant you any right or\n# license to the QuantumBlack Trademarks. You may not use the QuantumBlack\n# Trademarks or any confusingly similar mark as a trademark for your product,\n# or use the QuantumBlack Trademarks in any other manner that might cause\n# confusion in the marketplace, including but not limited to in advertising,\n# on websites, or on software.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"``HDFDataSet`` loads/saves data from/to a hdf file using an underlying\nfilesystem (e.g.: local, S3, GCS). It uses pandas.HDFStore to handle the hdf file.\n\"\"\"\nfrom copy import deepcopy\nfrom pathlib import PurePosixPath\nfrom typing import Any, Dict\n\nimport fsspec\nimport pandas as pd\n\nfrom kedro.io.core import (\n AbstractVersionedDataSet,\n DataSetError,\n Version,\n get_filepath_str,\n get_protocol_and_path,\n)\n\nHDFSTORE_DRIVER = \"H5FD_CORE\"\n\n\nclass HDFDataSet(AbstractVersionedDataSet):\n \"\"\"``HDFDataSet`` loads/saves data from/to a hdf file using an underlying\n filesystem (e.g. local, S3, GCS). It uses pandas.HDFStore to handle the hdf file.\n\n Example:\n ::\n\n >>> from kedro.io.hdf_dataset import HDFDataSet\n >>> import pandas as pd\n >>>\n >>> data = pd.DataFrame({'col1': [1, 2], 'col2': [4, 5],\n >>> 'col3': [5, 6]})\n >>>\n >>> # data_set = HDFDataSet(filepath=\"gcs://bucket/test.hdf\", key='data')\n >>> data_set = HDFDataSet(filepath=\"test.h5\", key='data')\n >>> data_set.save(data)\n >>> reloaded = data_set.load()\n >>> assert data.equals(reloaded)\n\n \"\"\"\n\n DEFAULT_LOAD_ARGS = {} # type: Dict[str, Any]\n DEFAULT_SAVE_ARGS = {} # type: Dict[str, Any]\n\n # pylint: disable=too-many-arguments\n def __init__(\n self,\n filepath: str,\n key: str,\n load_args: Dict[str, Any] = None,\n save_args: Dict[str, Any] = None,\n version: Version = None,\n credentials: Dict[str, Any] = None,\n fs_args: Dict[str, Any] = None,\n ) -> None:\n \"\"\"Creates a new instance of ``HDFDataSet`` pointing to a concrete hdf file\n on a specific filesystem.\n\n Args:\n filepath: Filepath to a hdf file prefixed with a protocol like `s3://`.\n If prefix is not provided, `file` protocol (local filesystem) will be used.\n The prefix should be any protocol supported by ``fsspec``.\n Note: `http(s)` doesn't support versioning.\n key: Identifier to the group in the HDF store.\n load_args: PyTables options for loading hdf files.\n You can find all available arguments at:\n https://www.pytables.org/usersguide/libref/top_level.html#tables.open_file\n All defaults are preserved.\n save_args: PyTables options for saving hdf files.\n You can find all available arguments at:\n https://www.pytables.org/usersguide/libref/top_level.html#tables.open_file\n All defaults are preserved.\n version: If specified, should be an instance of\n ``kedro.io.core.Version``. If its ``load`` attribute is\n None, the latest version will be loaded. If its ``save``\n attribute is None, save version will be autogenerated.\n credentials: Credentials required to get access to the underlying filesystem.\n E.g. for ``GCSFileSystem`` it should look like `{\"token\": None}`.\n fs_args: Extra arguments to pass into underlying filesystem class.\n E.g. for ``GCSFileSystem`` class: `{\"project\": \"my-project\", ...}`\n \"\"\"\n _fs_args = deepcopy(fs_args) or {}\n _credentials = deepcopy(credentials) or {}\n self._key = key\n\n protocol, path = get_protocol_and_path(filepath, version)\n\n self._protocol = protocol\n self._fs = fsspec.filesystem(self._protocol, **_credentials, **_fs_args)\n\n super().__init__(\n filepath=PurePosixPath(path),\n version=version,\n exists_function=self._fs.exists,\n glob_function=self._fs.glob,\n )\n\n # Handle default load and save arguments\n self._load_args = deepcopy(self.DEFAULT_LOAD_ARGS)\n if load_args is not None:\n self._load_args.update(load_args)\n self._save_args = deepcopy(self.DEFAULT_SAVE_ARGS)\n if save_args is not None:\n self._save_args.update(save_args)\n\n def _describe(self) -> Dict[str, Any]:\n return dict(\n filepath=self._filepath,\n key=self._key,\n protocol=self._protocol,\n load_args=self._load_args,\n save_args=self._save_args,\n version=self._version,\n )\n\n def _load(self) -> pd.DataFrame:\n load_path = get_filepath_str(self._get_load_path(), self._protocol)\n\n with self._fs.open(load_path, mode=\"rb\") as fs_file:\n binary_data = fs_file.read()\n\n # Set driver_core_backing_store to False to disable saving\n # contents of the in-memory h5file to disk\n with pd.HDFStore(\n \"in-memory-load-file\",\n mode=\"r\",\n driver=HDFSTORE_DRIVER,\n driver_core_backing_store=0,\n driver_core_image=binary_data,\n **self._load_args,\n ) as store:\n return store[self._key]\n\n def _save(self, data: pd.DataFrame) -> None:\n save_path = get_filepath_str(self._get_save_path(), self._protocol)\n\n with pd.HDFStore(\n \"in-memory-save-file\",\n mode=\"w\",\n driver=HDFSTORE_DRIVER,\n driver_core_backing_store=0,\n **self._save_args,\n ) as store:\n store.put(self._key, data, format=\"table\")\n # pylint: disable=protected-access\n binary_data = store._handle.get_file_image()\n\n with self._fs.open(save_path, mode=\"wb\") as fs_file:\n fs_file.write(binary_data)\n\n self.invalidate_cache()\n\n def _exists(self) -> bool:\n try:\n load_path = get_filepath_str(self._get_load_path(), self._protocol)\n except DataSetError:\n return False\n\n return self._fs.exists(load_path)\n\n def _release(self) -> None:\n self.invalidate_cache()\n\n def invalidate_cache(self) -> None:\n \"\"\"Invalidate underlying filesystem caches.\"\"\"\n filepath = get_filepath_str(self._filepath, self._protocol)\n self._fs.invalidate_cache(filepath)\n"
] | [
[
"pandas.HDFStore"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.