function
stringlengths 11
56k
| repo_name
stringlengths 5
60
| features
sequence |
---|---|---|
def link_fn(latent_mean):
return latent_mean | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def statistical_linear_regression_quadrature(self, m, v, hyp=None, num_quad_points=20):
"""
Perform statistical linear regression (SLR) using Gauss-Hermite quadrature.
We aim to find a likelihood approximation p(yₙ|fₙ) ≈ 𝓝(yₙ|Afₙ+b,Ω+Var[yₙ|fₙ]).
"""
x, w = hermgauss(num_quad_points) # Gauss-Hermite sigma points and weights
w = w / np.sqrt(pi) # scale weights by 1/√π
sigma_points = np.sqrt(2) * np.sqrt(v) * x + m # scale locations according to cavity dist.
lik_expectation, _ = self.conditional_moments(sigma_points, hyp)
# Compute zₙ via quadrature:
# zₙ = ∫ E[yₙ|fₙ] 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ E[yₙ|xᵢ√(2vₙ) + mₙ]
z = np.sum(
w * lik_expectation
)
# Compute variance S via quadrature:
# S = ∫ (E[yₙ|fₙ]-zₙ) (E[yₙ|fₙ]-zₙ)' 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (E[yₙ|xᵢ√(2vₙ) + mₙ]-zₙ) (E[yₙ|xᵢ√(2vₙ) + mₙ]-zₙ)'
S = np.sum(
w * (lik_expectation - z) * (lik_expectation - z)
)
# Compute cross covariance C via quadrature:
# C = ∫ (fₙ-mₙ) (E[yₙ|fₙ]-zₙ)' 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (fₙ-mₙ) (E[yₙ|xᵢ√(2vₙ) + mₙ]-zₙ)'
C = np.sum(
w * (sigma_points - m) * (lik_expectation - z)
)
# compute likelihood approximation 𝓝(yₙ|Afₙ+b,Ω+Var[yₙ|fₙ])
A = C * v**-1 # the scale
b = z - A * m # the offset
omega = S - A * v * A # the linearisation error
return A, b, omega | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def statistical_linear_regression(self, m, v, hyp=None):
"""
If no custom SLR method is provided, we use Gauss-Hermite quadrature.
"""
return self.statistical_linear_regression_quadrature(m, v, hyp) | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def observation_model(self, f, r, hyp=None):
"""
The implicit observation model is:
h(fₙ,rₙ) = E[yₙ|fₙ] + √Var[yₙ|fₙ] rₙ
"""
conditional_expectation, conditional_variance = self.conditional_moments(f, hyp)
obs_model = conditional_expectation + cholesky(conditional_variance) * r
return np.squeeze(obs_model) | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def analytical_linearisation(self, m, hyp=None):
"""
Compute the Jacobian of the state space observation model w.r.t. the
function fₙ and the noise term rₙ.
The implicit observation model is:
h(fₙ,rₙ) = E[yₙ|fₙ] + √Var[yₙ|fₙ] rₙ
The Jacobians are evaluated at the means, fₙ=m, rₙ=0, to be used during
extended Kalman filtering and extended Kalman EP.
"""
Jf, Jr = jacrev(self.observation_model, argnums=(0, 1))(m, 0.0, hyp)
return Jf, Jr | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def variational_expectation_quadrature(self, y, m, v, hyp=None, num_quad_points=20):
"""
Computes the "variational expectation" via Gauss-Hermite quadrature, i.e. the
expected log-likelihood, and its derivatives w.r.t. the posterior mean
E[log p(yₙ|fₙ)] = log ∫ p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
with EP power a.
:param y: observed data (yₙ) [scalar]
:param m: posterior mean (mₙ) [scalar]
:param v: posterior variance (vₙ) [scalar]
:param hyp: likelihood hyperparameter [scalar]
:param num_quad_points: the number of Gauss-Hermite sigma points to use during quadrature [scalar]
:return:
exp_log_lik: the expected log likelihood, E[log p(yₙ|fₙ)] [scalar]
dE: first derivative of E[log p(yₙ|fₙ)] w.r.t. mₙ [scalar]
d2E: second derivative of E[log p(yₙ|fₙ)] w.r.t. mₙ [scalar]
"""
x, w = hermgauss(num_quad_points) # Gauss-Hermite sigma points and weights
w = w / np.sqrt(pi) # scale weights by 1/√π
sigma_points = np.sqrt(2) * np.sqrt(v) * x + m # scale locations according to cavity dist.
# pre-compute wᵢ log p(yₙ|xᵢ√(2vₙ) + mₙ)
weighted_log_likelihood_eval = w * self.evaluate_log_likelihood(y, sigma_points, hyp)
# Compute expected log likelihood via quadrature:
# E[log p(yₙ|fₙ)] = ∫ log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ pᵃ(yₙ|xᵢ√(2vₙ) + mₙ)
exp_log_lik = np.sum(
weighted_log_likelihood_eval
)
# Compute first derivative via quadrature:
# dE[log p(yₙ|fₙ)]/dmₙ = ∫ (fₙ-mₙ) vₙ⁻¹ log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (fₙ-mₙ) vₙ⁻¹ log p(yₙ|xᵢ√(2vₙ) + mₙ)
dE = np.sum(
(sigma_points - m) / v
* weighted_log_likelihood_eval
)
# Compute second derivative via quadrature:
# d²E[log p(yₙ|fₙ)]/dmₙ² = ∫ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹] log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹] log p(yₙ|xᵢ√(2vₙ) + mₙ)
d2E = np.sum(
(0.5 * (v ** -2) * (sigma_points - m) ** 2 - 0.5 * v ** -1)
* weighted_log_likelihood_eval
)
return exp_log_lik, dE, d2E | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def variational_expectation(self, y, m, v, hyp=None):
"""
If no custom variational expectation method is provided, we use Gauss-Hermite quadrature.
"""
return self.variational_expectation_quadrature(y, m, v, hyp) | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def __init__(self, hyp):
"""
:param hyp: The observation noise variance, σ²
"""
super().__init__(hyp=hyp)
if self.hyp is None:
print('using default likelihood parameter since none was supplied')
self.hyp = 0.1
self.name = 'Gaussian' | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def evaluate_likelihood(self, y, f, hyp=None):
"""
Evaluate the Gaussian function 𝓝(yₙ|fₙ,σ²).
Can be used to evaluate Q quadrature points.
:param y: observed data yₙ [scalar]
:param f: mean, i.e. the latent function value fₙ [Q, 1]
:param hyp: likelihood variance σ² [scalar]
:return:
𝓝(yₙ|fₙ,σ²), where σ² is the observation noise [Q, 1]
"""
hyp = softplus(self.hyp) if hyp is None else hyp
return (2 * pi * hyp) ** -0.5 * np.exp(-0.5 * (y - f) ** 2 / hyp) | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def evaluate_log_likelihood(self, y, f, hyp=None):
"""
Evaluate the log-Gaussian function log𝓝(yₙ|fₙ,σ²).
Can be used to evaluate Q quadrature points.
:param y: observed data yₙ [scalar]
:param f: mean, i.e. the latent function value fₙ [Q, 1]
:param hyp: likelihood variance σ² [scalar]
:return:
log𝓝(yₙ|fₙ,σ²), where σ² is the observation noise [Q, 1]
"""
hyp = softplus(self.hyp) if hyp is None else hyp
return -0.5 * np.log(2 * pi * hyp) - 0.5 * (y - f) ** 2 / hyp | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def conditional_moments(self, f, hyp=None):
"""
The first two conditional moments of a Gaussian are the mean and variance:
E[y|f] = f
Var[y|f] = σ²
"""
hyp = softplus(self.hyp) if hyp is None else hyp
return f, hyp | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def moment_match(self, y, m, v, hyp=None, power=1.0):
"""
Closed form Gaussian moment matching.
Calculates the log partition function of the EP tilted distribution:
logZₙ = log ∫ 𝓝ᵃ(yₙ|fₙ,σ²) 𝓝(fₙ|mₙ,vₙ) dfₙ = E[𝓝(yₙ|fₙ,σ²)]
and its derivatives w.r.t. mₙ, which are required for moment matching.
:param y: observed data (yₙ) [scalar]
:param m: cavity mean (mₙ) [scalar]
:param v: cavity variance (vₙ) [scalar]
:param hyp: observation noise variance (σ²) [scalar]
:param power: EP power / fraction (a) - this is never required for the Gaussian likelihood [scalar]
:return:
lZ: the log partition function, logZₙ [scalar]
dlZ: first derivative of logZₙ w.r.t. mₙ (if derivatives=True) [scalar]
d2lZ: second derivative of logZₙ w.r.t. mₙ (if derivatives=True) [scalar]
"""
hyp = softplus(self.hyp) if hyp is None else hyp
return gaussian_moment_match(y, m, v, hyp) | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def __init__(self, hyp):
"""
:param hyp: None. This likelihood model has no hyperparameters.
"""
super().__init__(hyp=hyp)
self.name = 'Probit' | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def link_fn(latent_mean):
return erfc(-latent_mean / np.sqrt(2.0)) - 1.0 | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def eval(self, mu, var):
"""
ported from GPML toolbox - not used.
"""
lp, _, _ = self.moment_match(1, mu, var)
p = np.exp(lp)
ymu = 2 * p - 1
yvar = 4 * p * (1 - p)
return lp, ymu, yvar | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def evaluate_likelihood(self, y, f, hyp=None):
"""
Evaluate the Gaussian CDF likelihood model,
Φ(yₙfₙ) = (1 + erf(yₙfₙ / √2)) / 2
for erf(z) = (2/√π) ∫ exp(-x²) dx, where the integral is over [0, z]
Can be used to evaluate Q quadrature points when performing moment matching.
:param y: observed data yₙ ϵ {-1, +1} [scalar]
:param f: latent function value fₙ [Q, 1]
:param hyp: dummy input, Probit has no hyperparameters
:return:
Φ(yₙfₙ) [Q, 1]
"""
return (1.0 + erf(y * f / np.sqrt(2.0))) / 2.0 # Φ(z) | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def evaluate_log_likelihood(self, y, f, hyp=None):
"""
Evaluate the Gaussian CDF log-likelihood,
log Φ(yₙfₙ) = log[(1 + erf(yₙfₙ / √2)) / 2]
for erf(z) = (2/√π) ∫ exp(-x²) dx, where the integral is over [0, z].
Can be used to evaluate Q quadrature points when performing moment matching.
:param y: observed data yₙ ϵ {-1, +1} [scalar]
:param f: latent function value fₙ [Q, 1]
:param hyp: dummy input, Probit has no hyperparameters
:return:
log Φ(yₙfₙ) [Q, 1]
"""
return np.log(1.0 + erf(y * f / np.sqrt(2.0)) + 1e-10) - np.log(2) # logΦ(z) | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def conditional_moments(self, f, hyp=None):
"""
The first two conditional moments of a Probit likelihood are:
E[yₙ|fₙ] = Φ(fₙ)
Var[yₙ|fₙ] = Φ(fₙ) (1 - Φ(fₙ))
where Φ(fₙ) = (1 + erf(fₙ / √2)) / 2
"""
# TODO: not working
# phi = (1.0 + erf(f / np.sqrt(2.0))) / 2.0
# phi = self.link_fn(f)
# phi = erfc(f / np.sqrt(2.0)) - 1.0
phi = self.evaluate_likelihood(1.0, f)
return phi, phi * (1.0 - phi) | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def moment_match(self, y, m, v, hyp=None, power=1.0):
"""
Probit likelihood moment matching.
Calculates the log partition function of the EP tilted distribution:
logZₙ = log ∫ Φᵃ(yₙfₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
and its derivatives w.r.t. mₙ, which are required for moment matching.
If the EP fraction a = 1, we get
= log Φ(yₙzₙ), where zₙ = mₙ / √(1 + vₙ) [see Rasmussen & Williams p74]
otherwise we must use quadrature to compute the log partition and its derivatives.
Note: we enforce yₙ ϵ {-1, +1}.
:param y: observed data (yₙ) [scalar]
:param m: cavity mean (mₙ) [scalar]
:param v: cavity variance (vₙ) [scalar]
:param hyp: dummy variable (Probit has no hyperparameters)
:param power: EP power / fraction (a) [scalar]
:return:
lZ: the log partition function, logZₙ [scalar]
dlZ: first derivative of logZₙ w.r.t. mₙ (if derivatives=True) [scalar]
d2lZ: second derivative of logZₙ w.r.t. mₙ (if derivatives=True) [scalar]
"""
y = np.sign(y) # only allow values of +/-1
# y[np.where(y == 0)] = -1 # set zeros to -1
y = np.sign(y - 0.01) # set zeros to -1
if power == 1: # if a = 1, we can calculate the moments in closed form
z = m / np.sqrt(1.0 + v)
z = z * y # zₙ = yₙmₙ / √(1 + vₙ)
# logZₙ = log ∫ Φ(yₙfₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# = log Φ(yₙmₙ/√(1 + vₙ)) [see Rasmussen & Williams p74]
lZ, dlp = logphi(z)
# dlogZₙ/dmₙ = yₙ dlogΦ(zₙ)/dmₙ / √(1 + vₙ)
dlZ = y * dlp / np.sqrt(1.0 + v) # first derivative w.r.t mₙ
# d²logZₙ/dmₙ² = -dlogΦ(zₙ)/dmₙ (zₙ + dlogΦ(zₙ)/dmₙ) / √(1 + vₙ)
d2lZ = -dlp * (z + dlp) / (1.0 + v) # second derivative w.r.t mₙ
site_mean = m - dlZ / d2lZ # approx. likelihood (site) mean (see Rasmussen & Williams p75)
site_var = - (v + 1 / d2lZ) # approx. likelihood (site) variance
return lZ, site_mean, site_var
else:
# if a is not 1, we can calculate the moments via quadrature
return self.moment_match_quadrature(y, m, v, None, power) | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def __init__(self, hyp=None, link='exp'):
"""
:param hyp: None. This likelihood model has no hyperparameters
:param link: link function, either 'exp' or 'logistic'
"""
super().__init__(hyp=hyp)
if link is 'exp':
self.link_fn = lambda mu: np.exp(mu)
elif link is 'logistic':
self.link_fn = lambda mu: np.log(1.0 + np.exp(mu))
else:
raise NotImplementedError('link function not implemented')
self.name = 'Poisson' | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def evaluate_likelihood(self, y, f, hyp=None):
"""
Evaluate the Poisson likelihood:
p(yₙ|fₙ) = Poisson(fₙ) = μʸ exp(-μ) / yₙ!
for μ = g(fₙ), where g() is the link function (exponential or logistic).
We use the gamma function to evaluate yₙ! = gamma(yₙ + 1).
Can be used to evaluate Q quadrature points when performing moment matching.
:param y: observed data (yₙ) [scalar]
:param f: latent function value (fₙ) [Q, 1]
:param hyp: dummy variable (Poisson has no hyperparameters)
:return:
Poisson(fₙ) = μʸ exp(-μ) / yₙ! [Q, 1]
"""
mu = self.link_fn(f)
return mu**y * np.exp(-mu) / np.exp(gammaln(y + 1)) | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def evaluate_log_likelihood(self, y, f, hyp=None):
"""
Evaluate the Poisson log-likelihood:
log p(yₙ|fₙ) = log Poisson(fₙ) = log(μʸ exp(-μ) / yₙ!)
for μ = g(fₙ), where g() is the link function (exponential or logistic).
We use the gamma function to evaluate yₙ! = gamma(yₙ + 1).
Can be used to evaluate Q quadrature points when performing moment matching.
:param y: observed data (yₙ) [scalar]
:param f: latent function value (fₙ) [Q, 1]
:param hyp: dummy variable (Poisson has no hyperparameters)
:return:
log Poisson(fₙ) = log(μʸ exp(-μ) / yₙ!) [Q, 1]
"""
mu = self.link_fn(f)
return y * np.log(mu) - mu - gammaln(y + 1) | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def conditional_moments(self, f, hyp=None):
"""
The first two conditional moments of a Poisson distribution are equal to the intensity:
E[yₙ|fₙ] = link(fₙ)
Var[yₙ|fₙ] = link(fₙ)
"""
return self.link_fn(f), self.link_fn(f) | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def __init__(self, hyp=None, p1=0.5, p2=0.5):
"""
:param hyp: None
"""
self.p1 = p1
self.p2 = p2
super().__init__(hyp=hyp)
self.name = 'Beta' | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def evaluate_likelihood(self, y, f, hyp=None):
return beta() | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def __init__(self, hyp=None, omega=0.8, var1=0.3, var2=0.5):
"""
:param hyp: None
"""
self.omega = omega
self.var1 = var1
self.var2 = var2
super().__init__(hyp=hyp)
self.name = 'sum of Gaussians' | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def evaluate_likelihood(self, y, f, hyp=None):
return (npdf(y, f+self.omega, self.var1) + npdf(y, f-self.omega, self.var2)) / 2. | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def evaluate_log_likelihood(self, y, f, hyp=None):
return np.log(self.evaluate_likelihood(y, f, hyp)) | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def conditional_moments(self, f, hyp=None):
return f, (self.var1 + self.var2) / 2 | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def __init__(self, hyp, rho=1.2, p=0.2):
"""
:param hyp: the noise variance σ² [scalar]
"""
self.rho = rho
self.p = p
super().__init__(hyp=hyp)
if self.hyp is None:
print('using default likelihood parameter since none was supplied')
self.hyp = 0.1
self.name = 'Threshold' | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def link_fn(self, latent_mean):
return (1 - self.rho) * latent_mean + self.rho * threshold_func(latent_mean, self.p) | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def evaluate_likelihood(self, y, f, hyp=None):
hyp = self.hyp if hyp is None else hyp
return npdf(y, f, hyp) | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def evaluate_log_likelihood(self, y, f, hyp=None):
hyp = self.hyp if hyp is None else hyp
return log_npdf(y, f, hyp) | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def conditional_moments(self, f, hyp=None):
hyp = self.hyp if hyp is None else hyp
lik_expectation = self.link_fn(f)
return lik_expectation, hyp | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def log_npdf(x, m, v):
return -(x - m) ** 2 / (2 * v) - 0.5 * np.log(2 * pi * v) | AaltoML/kalman-jax | [
85,
12,
85,
2,
1585896736
] |
def parse(nt, model, encoding='utf-8', disjoint=None, only_rel=None, exclude_rel=None):
'''
nt - string or file-like object with NTriples to parse
model - Versa model into which to parse the data
encoding character encoding for NTriples (default UTF-8)
disjoint - if not None a list or set of link tuples against which parsed links
should be compared, and omitted if matching.
only_rel - if not None a collection of link relations limiting the parsed
NTriples statements to only be added to the model if the
predicate matches one in only_rel
exclude_rel - if not None a collection of link relations limiting
the parsed NTriples statements to be skipped if the predicate
matches one in exclude_rel | uogbuji/versa | [
10,
5,
10,
4,
1394141786
] |
def _add(o, r, t, a=None):
'''
Conditionally add a statement to model, if not a duplicate
'''
a = a or {}
parts = (o, r, t, tuple(a.items()))
if (parts in added_links) or (parts in disjoint):
return False
model.add(o, r, t, a)
added_links.add((o, r, t, tuple(a.items())))
return True | uogbuji/versa | [
10,
5,
10,
4,
1394141786
] |
def parse_iter(nt_fp, model_fact=newmodel):
raise NotImplementedError | uogbuji/versa | [
10,
5,
10,
4,
1394141786
] |
def write(models, out=None, base=None):
'''
models - one or more input Versa models from which output is generated.
'''
assert out is not None #Output stream required
if not isinstance(models, list): models = [models]
for m in models:
for link in m.match():
s, p, o = link[:3]
#Skip docheader statements
if s == (base or '') + '@docheader': continue
if p in RESOURCE_MAPPING: p = RESOURCE_MAPPING[p]
if o in RESOURCE_MAPPING: o = RESOURCE_MAPPING[o] | uogbuji/versa | [
10,
5,
10,
4,
1394141786
] |
def setUp(self):
u = get_user_model()
u.objects.create_superuser('admin', '[email protected]', self.PW)
self.user = u.objects.create_user('user', '[email protected]', self.PW)
self.user2 = u.objects.create_user('user1', '[email protected]', self.PW) | pstrinkle/drf-coupons | [
1,
2,
1,
2,
1483745217
] |
def describe_location(location, locations):
if location.can_describe:
final_location = locations.get(location.pk)
if final_location is not None:
location = final_location
result = location.serialize(include_type=True, detailed=False, simple_geometry=True)
if hasattr(location, 'serialize_position'):
result.update(location.serialize_position())
return result | c3nav/c3nav | [
137,
31,
137,
17,
1461327231
] |
def __init__(self, router, origin, destination, path_nodes, options,
origin_addition, destination_addition, origin_xyz, destination_xyz):
self.router = router
self.origin = origin
self.destination = destination
self.path_nodes = path_nodes
self.options = options
self.origin_addition = origin_addition
self.destination_addition = destination_addition
self.origin_xyz = origin_xyz
self.destination_xyz = destination_xyz | c3nav/c3nav | [
137,
31,
137,
17,
1461327231
] |
def __init__(self, route, node, edge, last_item):
self.route = route
self.node = node
self.edge = edge
self.last_item = last_item
self.descriptions = [] | c3nav/c3nav | [
137,
31,
137,
17,
1461327231
] |
def waytype(self):
if self.edge and self.edge.waytype:
return self.route.router.waytypes[self.edge.waytype] | c3nav/c3nav | [
137,
31,
137,
17,
1461327231
] |
def router_waytype(self):
if self.edge:
return self.route.router.waytypes[self.edge.waytype] | c3nav/c3nav | [
137,
31,
137,
17,
1461327231
] |
def space(self):
return self.route.router.spaces[self.node.space] | c3nav/c3nav | [
137,
31,
137,
17,
1461327231
] |
def level(self):
return self.route.router.levels[self.space.level_id] | c3nav/c3nav | [
137,
31,
137,
17,
1461327231
] |
def new_space(self):
return not self.last_item or self.space.pk != self.last_item.space.pk | c3nav/c3nav | [
137,
31,
137,
17,
1461327231
] |
def new_level(self):
return not self.last_item or self.level.pk != self.last_item.level.pk | c3nav/c3nav | [
137,
31,
137,
17,
1461327231
] |
def onEndSpeaking(text):
mouth.setmouth(90,120)
jaw.moveTo(95)
sleep(.5)
mouth.setmouth(110,120) | MyRobotLab/pyrobotlab | [
62,
140,
62,
5,
1413898155
] |
def setUp(self):
super(JoinTest, self).setUp()
self.form_data = {'nick': 'johndoe',
'first_name': 'John',
'last_name': 'Doe',
'email': '[email protected]',
'password': 'good*password',
'confirm': 'good*password',
'hide': '1',
#'invite': ''
} | tallstreet/jaikuenginepatch | [
14,
3,
14,
1,
1238305655
] |
def assert_join_validation_error(self, response, content):
self.assertContains(response, content)
self.assertTemplateUsed(response, 'join.html')
self.assertTemplateUsed(response, 'form_error.html') | tallstreet/jaikuenginepatch | [
14,
3,
14,
1,
1238305655
] |
def test_join_with_valid_data(self):
r = self.client.post('/join', self.form_data)
r = self.assertRedirectsPrefix(r, '/welcome') | tallstreet/jaikuenginepatch | [
14,
3,
14,
1,
1238305655
] |
def test_join_with_used_email(self):
self.form_data['email'] = '[email protected]'
r = self.client.post('/join', self.form_data)
self.assert_join_validation_error(r, 'already associated') | tallstreet/jaikuenginepatch | [
14,
3,
14,
1,
1238305655
] |
def test_join_with_invalid_nick(self):
self.form_data['nick'] = 'a'
r = self.client.post('/join', self.form_data)
self.assert_join_validation_error(r, 'Invalid nick') | tallstreet/jaikuenginepatch | [
14,
3,
14,
1,
1238305655
] |
def test_join_with_banned_nick(self):
self.form_data['nick'] = 'json'
r = self.client.post('/join', self.form_data)
self.assert_join_validation_error(r, 'not allowed') | tallstreet/jaikuenginepatch | [
14,
3,
14,
1,
1238305655
] |
def test_join_with_used_nick_case_insensitive(self):
self.form_data['nick'] = 'Popular'
r = self.client.post('/join', self.form_data)
self.assert_join_validation_error(r, 'already in use') | tallstreet/jaikuenginepatch | [
14,
3,
14,
1,
1238305655
] |
def setUp(self):
super(WelcomeTest, self).setUp()
self.login('girlfriend') | tallstreet/jaikuenginepatch | [
14,
3,
14,
1,
1238305655
] |
def test_photo_view(self):
r = self.client.get('/welcome/1')
self.assertContains(r, 'Your photo')
self.assertTemplateUsed(r, 'welcome_photo.html') | tallstreet/jaikuenginepatch | [
14,
3,
14,
1,
1238305655
] |
def test_mobile_activation_view(self):
r = self.client.get('/welcome/2')
self.assertContains(r, 'SIGN IN')
self.assertTemplateUsed(r, 'welcome_mobile.html') | tallstreet/jaikuenginepatch | [
14,
3,
14,
1,
1238305655
] |
def route(unrouted):
"""
Route an UnroutedNotification to the appropriate repositories
The function will extract all the metadata and match data from any binary content associated with
the notification, and in combination from match data taken from the notification metadata itself will
determine if there is a RepositoryConfig whose criteria it matches.
If there is a match to one or more of the criteria, MatchProvenance objects will be created for
each matching repository, and persisted for later inspection.
If one or more repositories are matched, a RoutedNotification will be created and enhanced with any
metadata extracted from the associated package (if present), then persisted.
If no repositories match, a FailedNotification will be created and enhanced with any
metadata extracted from the associated package (if present), then persisted.
:param unrouted: an UnroutedNotification object
:return: True if the notification was routed to a repository, False if there were no matches
"""
app.logger.debug(u"Routing - Notification:{y}".format(y=unrouted.id))
# first get the packaging system to load and retrieve all the metadata
# and match data from the content file (if it exists)
try:
metadata, pmd = packages.PackageManager.extract(unrouted.id, unrouted.packaging_format)
except packages.PackageException as e:
app.logger.debug(u"Routing - Notification:{y} failed with error '{x}'".format(y=unrouted.id, x=e.message))
raise RoutingException(e.message)
# extract the match data from the notification and combine it with the match data from the package
match_data = unrouted.match_data()
if pmd is not None:
match_data.merge(pmd)
# iterate through all the repository configs, collecting match provenance and
# id information
# FIXME: at the moment this puts all the provenance in memory and then writes it all
# in one go later. Probably that's OK, but it will depend on the number of fields the
# repository matches and the number of repositories as to how big this gets.
match_provenance = []
match_ids = []
try:
for rc in models.RepositoryConfig.scroll(page_size=10, keepalive="1m"):
prov = models.MatchProvenance()
prov.repository = rc.repository
prov.notification = unrouted.id
app.logger.debug(u"Routing - Notification:{y} matching against Repository:{x}".format(y=unrouted.id, x=rc.repository))
match(match_data, rc, prov)
if len(prov.provenance) > 0:
match_provenance.append(prov)
match_ids.append(rc.repository)
app.logger.debug(u"Routing - Notification:{y} successfully matched Repository:{x}".format(y=unrouted.id, x=rc.repository))
else:
app.logger.debug(u"Routing - Notification:{y} did not match Repository:{x}".format(y=unrouted.id, x=rc.repository))
except esprit.tasks.ScrollException as e:
app.logger.error(u"Routing - Notification:{y} failed with error '{x}'".format(y=unrouted.id, x=e.message))
raise RoutingException(e.message)
app.logger.debug(u"Routing - Notification:{y} matched to {x} repositories".format(y=unrouted.id, x=len(match_ids)))
# write all the match provenance out to the index (could be an empty list)
for p in match_provenance:
p.save()
app.logger.debug(u"Routing - Provenance:{z} written for Notification:{y} for match to Repisitory:{x}".format(x=p.repository, y=unrouted.id, z=p.id))
# if there are matches then the routing is successful, and we want to finalise the
# notification for the routed index and its content for download
if len(match_ids) > 0:
# repackage the content that came with the unrouted notification (if necessary) into
# the formats required by the repositories for which there was a match
pack_links = repackage(unrouted, match_ids)
# update the record with the information, and then
# write it to the index
routed = unrouted.make_routed()
for pl in pack_links:
routed.add_link(pl.get("url"), pl.get("type"), pl.get("format"), pl.get("access"), pl.get("packaging"))
routed.repositories = match_ids
routed.analysis_date = dates.now()
if metadata is not None:
enhance(routed, metadata)
links(routed)
routed.save()
app.logger.debug(u"Routing - Notification:{y} successfully routed".format(y=unrouted.id))
return True
else:
# log the failure
app.logger.error(u"Routing - Notification:{y} was not routed".format(y=unrouted.id))
# if config says so, convert the unrouted notification to a failed notification, enhance and save
# for later diagnosis
if app.config.get("KEEP_FAILED_NOTIFICATIONS", False):
failed = unrouted.make_failed()
failed.analysis_date = dates.now()
if metadata is not None:
enhance(failed, metadata)
failed.save()
app.logger.debug(u"Routing - Notification:{y} as stored as a Failed Notification".format(y=unrouted.id))
return False
# Note that we don't delete the unrouted notification here - that's for the caller to decide | JiscPER/jper | [
1,
4,
1,
14,
1430814969
] |
def enhance(routed, metadata):
"""
Enhance the routed notification with the extracted metadata
:param routed: a RoutedNotification whose metadata is to be enhanced
:param metadata: a NotificationMetadata object
:return:
"""
# some of the fields are easy - we just want to accept the existing
# value if it is set, otherwise take the value from the other metadata
accept_existing = [
"title", "version", "publisher", "source_name", "type",
"language", "publication_date", "date_accepted", "date_submitted",
"license"
]
for ae in accept_existing:
if getattr(routed, ae) is None and getattr(metadata, ae) is not None:
setattr(routed, ae, getattr(metadata, ae))
# add any new identifiers to the source identifiers
mis = metadata.source_identifiers
for id in mis:
# the API prevents us from adding duplicates, so just add them all and let the model handle it
routed.add_source_identifier(id.get("type"), id.get("id"))
# add any new identifiers
ids = metadata.identifiers
for id in ids:
routed.add_identifier(id.get("id"), id.get("type"))
# add any new authors, using a slightly complex merge strategy:
# 1. If both authors have identifiers and one matches, they are equivalent and missing name/affiliation/identifiers should be added
# 2. If one does not have identifiers, match by name.
# 3. If name matches, add any missing affiliation/identifiers
mas = metadata.authors
ras = routed.authors
for ma in mas:
merged = False
# first run through all the existing authors, and see if any of them merge
for ra in ras:
merged = _merge_entities(ra, ma, "name", other_properties=["affiliation"])
# if one merges, don't continue
if merged:
break
# if we didn't get a merge, add the author from the metadata
if not merged:
routed.add_author(ma)
# merge project entities in with the same rule set as above
mps = metadata.projects
rps = routed.projects
for mp in mps:
merged = False
# first run through all the existing projects, and see if any of them merge
for rp in rps:
merged = _merge_entities(rp, mp, "name", other_properties=["grant_number"])
# if one merges, don't continue
if merged:
break
# if we didn't get a merge, add the project from the metadata
if not merged:
routed.add_project(mp)
# add any new subjects
for s in metadata.subjects:
routed.add_subject(s) | JiscPER/jper | [
1,
4,
1,
14,
1430814969
] |
def repackage(unrouted, repo_ids):
"""
Repackage any binary content associated with the notification for consumption by
the repositories identified by the list of repo_ids.
Note that this takes an unrouted notification, because of the point in the routing workflow at
which it is invoked, although in reality you could also pass it any of the other fully fledged
notification objects such as RoutedNotification
This function will check each account associated with the repository id for the package format
thats that they will accept for deposit. For each format, we look for a route to convert from
the source format that the provider gave us for the notification, and then issue a package convert
request via the PackageManager to the best possible format for the repository.
For each successful conversion the notification recieves a new link attribute containing
identification information for the converted package.
:param unrouted: notification object
:param repo_ids: list of repository account identifiers
:return: a list of the format conversions that were carried out
"""
# if there's no package format, there's no repackaging to be done
if unrouted.packaging_format is None:
return []
pm = packages.PackageFactory.converter(unrouted.packaging_format)
conversions = []
for rid in repo_ids:
acc = models.Account.pull(rid)
if acc is None:
# realistically this shouldn't happen, but if it does just carry on
app.logger.warn(u"Repackaging - no account with id {x}; carrying on regardless".format(x=rid))
continue
for pack in acc.packaging:
# if it's already in the conversion list, job done
if pack in conversions:
break
# otherwise, if the package manager can convert it, also job done
if pm.convertible(pack):
conversions.append(pack)
break
if len(conversions) == 0:
return []
# at this point we have a de-duplicated list of all formats that we need to convert
# the package to, that the package is capable of converting itself into
#
# this pulls everything from remote storage, runs the conversion, and then synchronises
# back to remote storage
done = packages.PackageManager.convert(unrouted.id, unrouted.packaging_format, conversions)
links = []
for d in done:
with app.test_request_context():
burl = app.config.get("BASE_URL")
if burl.endswith("/"):
burl = burl[:-1]
url = burl + url_for("webapi.retrieve_content", notification_id=unrouted.id, filename=d[2])
links.append({
"type": "package",
"format" : "application/zip",
"access" : "router",
"url" : url,
"packaging" : d[0]
})
return links | JiscPER/jper | [
1,
4,
1,
14,
1430814969
] |
def domain_url(domain, url):
"""
normalise the domain: strip prefixes and URL paths. If either ends with the other, it is a match
:param domain: domain string
:param url: any url
:return: True if match, False if not
"""
# keep a copy of these for the provenance reporting
od = domain
ou = url
# strip the common possible prefixes
prefixes = ["http://", "https://"]
for p in prefixes:
if domain.startswith(p):
domain = domain[len(p):]
if url.startswith(p):
url = url[len(p):]
# strip everything after a path separator
domain = domain.split("/")[0]
url = url.split("/")[0]
# now do the standard normalisation
domain = _normalise(domain)
url = _normalise(url)
if domain.endswith(url) or url.endswith(domain):
return u"Domain matched URL: '{d}' and '{u}' have the same root domains".format(d=od, u=ou)
return False | JiscPER/jper | [
1,
4,
1,
14,
1430814969
] |
def author_match(author_obj_1, author_obj_2):
"""
Match two author objects against eachother
:param author_obj_1: first author object
:param author_obj_2: second author object
:return: True if match, False if not
"""
t1 = author_obj_1.get("type", "")
i1 = _normalise(author_obj_1.get("id", ""))
t2 = author_obj_2.get("type", "")
i2 = _normalise(author_obj_2.get("id", ""))
if t1 == t2 and i1 == i2:
return u"Author ids matched: {t1} '{i1}' is the same as {t2} '{i2}'".format(t1=t1, i1=author_obj_1.get("id", ""), t2=t2, i2=author_obj_2.get("id", ""))
return False | JiscPER/jper | [
1,
4,
1,
14,
1430814969
] |
def postcode_match(pc1, pc2):
"""
Normalise postcodes: strip whitespace and lowercase, then exact match required
:param pc1: first postcode
:param pc2: second postcode
:return: True if match, False if not
"""
# first do the usual normalisation
npc1 = _normalise(pc1)
npc2 = _normalise(pc2)
# then go the final step and remove all the spaces
npc1 = npc1.replace(" ", "")
npc2 = npc2.replace(" ", "")
if npc1 == npc2:
return u"Postcodes matched: '{a}' is the same as '{b}'".format(a=pc1, b=pc2)
return False | JiscPER/jper | [
1,
4,
1,
14,
1430814969
] |
def exact(s1, s2):
"""
normalised s1 must be identical to normalised s2
:param s1: first string
:param s2: second string
:return: True if match, False if not
"""
# keep a copy of these for the provenance reporting
os1 = s1
os2 = s2
# normalise the strings
s1 = _normalise(s1)
s2 = _normalise(s2)
if s1 == s2:
return u"'{a}' is an exact match with '{b}'".format(a=os1, b=os2)
return False | JiscPER/jper | [
1,
4,
1,
14,
1430814969
] |
def __init__(self, course_id=None, partner_id=None, group_id=None,
export_type=None, anonymity_level=None,
statement_of_purpose=None, schema_names=None,
interval=None, ignore_existing=None, **kwargs):
self._course_id = course_id
if partner_id is not None:
self._partner_id = int(partner_id)
else:
self._partner_id = partner_id
self._group_id = group_id
self._export_type = export_type
self._anonymity_level = anonymity_level
self._statement_of_purpose = statement_of_purpose
self._schema_names = schema_names
self._interval = interval
self._ignore_existing = ignore_existing | coursera/courseraresearchexports | [
19,
11,
19,
12,
1472053751
] |
def from_args(cls, **kwargs):
"""
Create a ExportResource object using the parameters required. Performs
course_id/partner_id inference if possible.
:param kwargs:
:return export_request: ExportRequest
"""
if kwargs.get('course_slug') and not kwargs.get('course_id'):
kwargs['course_id'] = utils.lookup_course_id_by_slug(
kwargs['course_slug'])
elif kwargs.get('partner_short_name') and not kwargs.get('partner_id'):
kwargs['partner_id'] = utils.lookup_partner_id_by_short_name(
kwargs['partner_short_name'])
if kwargs.get('user_id_hashing'):
if kwargs['user_id_hashing'] == 'linked':
kwargs['anonymity_level'] = ANONYMITY_LEVEL_COORDINATOR
elif kwargs['user_id_hashing'] == 'isolated':
kwargs['anonymity_level'] = ANONYMITY_LEVEL_ISOLATED
return cls(**kwargs) | coursera/courseraresearchexports | [
19,
11,
19,
12,
1472053751
] |
def from_json(cls, json_request):
"""
Deserialize ExportRequest from json object.
:param json_request:
:return export_request: ExportRequest
"""
kwargs = {}
request_scope = json_request['scope']
request_scope_context = request_scope['typeName']
if request_scope_context == 'courseContext':
kwargs['course_id'] = request_scope['definition']['courseId']
elif request_scope_context == 'partnerContext':
kwargs['partner_id'] = \
request_scope['definition']['partnerId']['maestroId']
elif request_scope_context == 'groupContext':
kwargs['group_id'] = request_scope['definition']['groupId']
if json_request.get('interval'):
kwargs['interval'] = [
json_request['interval']['start'],
json_request['interval']['end']
]
return cls(
export_type=json_request.get('exportType'),
anonymity_level=json_request.get('anonymityLevel'),
statement_of_purpose=json_request.get('statementOfPurpose'),
schema_names=json_request.get('schemaNames'),
ignore_existing=json_request.get('ignoreExisting'),
**kwargs) | coursera/courseraresearchexports | [
19,
11,
19,
12,
1472053751
] |
def course_id(self):
return self._course_id | coursera/courseraresearchexports | [
19,
11,
19,
12,
1472053751
] |
def partner_id(self):
return self._partner_id | coursera/courseraresearchexports | [
19,
11,
19,
12,
1472053751
] |
def export_type(self):
return self._export_type | coursera/courseraresearchexports | [
19,
11,
19,
12,
1472053751
] |
def export_type_display(self):
if self._export_type == EXPORT_TYPE_GRADEBOOK:
return 'GRADEBOOK'
elif self._export_type == EXPORT_TYPE_CLICKSTREAM:
return 'CLICKSTREAM'
elif self._export_type == EXPORT_TYPE_TABLES:
return 'TABLES'
else:
return self._export_type | coursera/courseraresearchexports | [
19,
11,
19,
12,
1472053751
] |
def anonymity_level(self):
return self._anonymity_level | coursera/courseraresearchexports | [
19,
11,
19,
12,
1472053751
] |
def formatted_anonymity_level(self):
if self.anonymity_level == ANONYMITY_LEVEL_COORDINATOR:
return 'Linked'
elif self.anonymity_level == ANONYMITY_LEVEL_ISOLATED:
return 'Isolated'
else:
return 'Unknown' | coursera/courseraresearchexports | [
19,
11,
19,
12,
1472053751
] |
def statement_of_purpose(self):
return self._statement_of_purpose | coursera/courseraresearchexports | [
19,
11,
19,
12,
1472053751
] |
def interval(self):
return self._interval | coursera/courseraresearchexports | [
19,
11,
19,
12,
1472053751
] |
def ignore_existing(self):
return self._ignore_existing | coursera/courseraresearchexports | [
19,
11,
19,
12,
1472053751
] |
def scope_context(self):
"""
Context for this ExportRequest, assume that only one identifier for
partner/course/group is defined.
"""
if self._course_id:
return 'COURSE'
elif self._partner_id:
return 'PARTNER'
elif self._group_id:
return 'GROUP'
else:
return None | coursera/courseraresearchexports | [
19,
11,
19,
12,
1472053751
] |
def scope_id(self):
"""
Identifier for the scope, assume that only one of course/partner/group
is defined for a valid request.
:return scope_id:
"""
return self._course_id or self._partner_id or self._group_id | coursera/courseraresearchexports | [
19,
11,
19,
12,
1472053751
] |
def scope_name(self):
"""
Human readable name for this scope context. course slugs for courses,
partner short names for partners, but only group ids for groups (api is
not open)
:return:
"""
if self._course_id:
return utils.lookup_course_slug_by_id(self._course_id)
elif self._partner_id:
return utils.lookup_partner_short_name_by_id(self._partner_id)
elif self._group_id:
return self._group_id
else:
return 'UNKNOWN' | coursera/courseraresearchexports | [
19,
11,
19,
12,
1472053751
] |
def schema_names(self):
return self._schema_names | coursera/courseraresearchexports | [
19,
11,
19,
12,
1472053751
] |
def schema_names_display(self):
"""
Display only property for schemas names.
:return schemas:
"""
if self._schema_names:
if set(self._schema_names) == set(SCHEMA_NAMES):
return 'all'
else:
return ','.join(self._schema_names)
else:
return None | coursera/courseraresearchexports | [
19,
11,
19,
12,
1472053751
] |
def _get_variable_value(variable):
return BuiltIn().replace_variables('${%s}' % variable) | ox-it/talks.ox | [
5,
5,
5,
58,
1400079086
] |
def __init__(self, path):
self.path = path | ox-it/talks.ox | [
5,
5,
5,
58,
1400079086
] |
def url(self):
return "%s%s" % (_get_variable_value('HOST'), self.path) | ox-it/talks.ox | [
5,
5,
5,
58,
1400079086
] |
def of(klass, model_instance):
if isinstance(model_instance, basestring):
model_instance = _get_variable_value(model_instance)
return klass(model_instance.get_absolute_url()) | ox-it/talks.ox | [
5,
5,
5,
58,
1400079086
] |
def __init__(self, locator):
self.locator = locator | ox-it/talks.ox | [
5,
5,
5,
58,
1400079086
] |
def is_css(self):
return self.locator.startswith('css=') | ox-it/talks.ox | [
5,
5,
5,
58,
1400079086
] |
def is_xpath(self):
return self.locator.startswith('//') | ox-it/talks.ox | [
5,
5,
5,
58,
1400079086
] |
def in_(self, other):
other = _get_variable_value(other)
assert self.is_css == other.is_css, "Both locators must be of same type"
if self.is_css:
return Element(other.locator + " " + self.locator[len('css='):])
elif self.is_xpath:
return Element(other.locator + self.locator) # FIXME might fail for advanced xpath | ox-it/talks.ox | [
5,
5,
5,
58,
1400079086
] |
def mock_pipeline_service_create():
with mock.patch.object(
pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
) as mock_create_training_pipeline:
mock_create_training_pipeline.return_value = gca_training_pipeline.TrainingPipeline(
name=_TEST_PIPELINE_RESOURCE_NAME,
state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
model_to_upload=gca_model.Model(name=_TEST_MODEL_NAME),
)
yield mock_create_training_pipeline | googleapis/python-aiplatform | [
306,
205,
306,
52,
1600875819
] |
def mock_pipeline_service_get():
with mock.patch.object(
pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
) as mock_get_training_pipeline:
mock_get_training_pipeline.return_value = gca_training_pipeline.TrainingPipeline(
name=_TEST_PIPELINE_RESOURCE_NAME,
state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
model_to_upload=gca_model.Model(name=_TEST_MODEL_NAME),
training_task_metadata={
"evaluatedDataItemsBigqueryUri": _TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI
},
)
yield mock_get_training_pipeline | googleapis/python-aiplatform | [
306,
205,
306,
52,
1600875819
] |
def mock_pipeline_service_create_and_get_with_fail():
with mock.patch.object(
pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
) as mock_create_training_pipeline:
mock_create_training_pipeline.return_value = gca_training_pipeline.TrainingPipeline(
name=_TEST_PIPELINE_RESOURCE_NAME,
state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
)
with mock.patch.object(
pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
) as mock_get_training_pipeline:
mock_get_training_pipeline.return_value = gca_training_pipeline.TrainingPipeline(
name=_TEST_PIPELINE_RESOURCE_NAME,
state=gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED,
)
yield mock_create_training_pipeline, mock_get_training_pipeline | googleapis/python-aiplatform | [
306,
205,
306,
52,
1600875819
] |
def mock_model_service_get():
with mock.patch.object(
model_service_client.ModelServiceClient, "get_model"
) as mock_get_model:
mock_get_model.return_value = gca_model.Model()
yield mock_get_model | googleapis/python-aiplatform | [
306,
205,
306,
52,
1600875819
] |
def mock_dataset_time_series():
ds = mock.MagicMock(datasets.TimeSeriesDataset)
ds.name = _TEST_DATASET_NAME
ds._latest_future = None
ds._exception = None
ds._gca_resource = gca_dataset.Dataset(
display_name=_TEST_DATASET_DISPLAY_NAME,
metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TIMESERIES,
labels={},
name=_TEST_DATASET_NAME,
metadata={},
)
return ds | googleapis/python-aiplatform | [
306,
205,
306,
52,
1600875819
] |
def mock_dataset_nontimeseries():
ds = mock.MagicMock(datasets.ImageDataset)
ds.name = _TEST_DATASET_NAME
ds._latest_future = None
ds._exception = None
ds._gca_resource = gca_dataset.Dataset(
display_name=_TEST_DATASET_DISPLAY_NAME,
metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTIMESERIES,
labels={},
name=_TEST_DATASET_NAME,
metadata={},
)
return ds | googleapis/python-aiplatform | [
306,
205,
306,
52,
1600875819
] |
def setup_method(self):
importlib.reload(initializer)
importlib.reload(aiplatform) | googleapis/python-aiplatform | [
306,
205,
306,
52,
1600875819
] |
def test_run_call_pipeline_service_create(
self,
mock_pipeline_service_create,
mock_pipeline_service_get,
mock_dataset_time_series,
mock_model_service_get,
sync, | googleapis/python-aiplatform | [
306,
205,
306,
52,
1600875819
] |
Subsets and Splits