file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
metropolis.py
|
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import numpy.random as nr
import scipy.linalg
import theano
import pymc3 as pm
from pymc3.distributions import draw_values
from pymc3.step_methods.arraystep import (
ArrayStep,
ArrayStepShared,
Competence,
PopulationArrayStepShared,
metrop_select,
)
from pymc3.theanof import floatX
__all__ = [
"Metropolis",
"DEMetropolis",
"DEMetropolisZ",
"BinaryMetropolis",
"BinaryGibbsMetropolis",
"CategoricalGibbsMetropolis",
"NormalProposal",
"CauchyProposal",
"LaplaceProposal",
"PoissonProposal",
"MultivariateNormalProposal",
]
# Available proposal distributions for Metropolis
class Proposal:
def __init__(self, s):
self.s = s
class NormalProposal(Proposal):
def __call__(self):
return nr.normal(scale=self.s)
class UniformProposal(Proposal):
def __call__(self):
return nr.uniform(low=-self.s, high=self.s, size=len(self.s))
class CauchyProposal(Proposal):
def __call__(self):
return nr.standard_cauchy(size=np.size(self.s)) * self.s
class LaplaceProposal(Proposal):
def __call__(self):
size = np.size(self.s)
return (nr.standard_exponential(size=size) - nr.standard_exponential(size=size)) * self.s
class PoissonProposal(Proposal):
def __call__(self):
return nr.poisson(lam=self.s, size=np.size(self.s)) - self.s
class MultivariateNormalProposal(Proposal):
def __init__(self, s):
n, m = s.shape
if n != m:
raise ValueError("Covariance matrix is not symmetric.")
self.n = n
self.chol = scipy.linalg.cholesky(s, lower=True)
def __call__(self, num_draws=None):
if num_draws is not None:
b = np.random.randn(self.n, num_draws)
return np.dot(self.chol, b).T
else:
b = np.random.randn(self.n)
return np.dot(self.chol, b)
class Metropolis(ArrayStepShared):
"""
Metropolis-Hastings sampling step
Parameters
----------
vars: list
List of variables for sampler
S: standard deviation or covariance matrix
Some measure of variance to parameterize proposal distribution
proposal_dist: function
Function that returns zero-mean deviates when parameterized with
S (and n). Defaults to normal.
scaling: scalar or array
Initial scale factor for proposal. Defaults to 1.
tune: bool
Flag for tuning. Defaults to True.
tune_interval: int
The frequency of tuning. Defaults to 100 iterations.
model: PyMC Model
Optional model for sampling step. Defaults to None (taken from context).
mode: string or `Mode` instance.
compilation mode passed to Theano functions
"""
name = "metropolis"
default_blocked = False
generates_stats = True
stats_dtypes = [
{
"accept": np.float64,
"accepted": np.bool,
"tune": np.bool,
"scaling": np.float64,
}
]
def __init__(
self,
vars=None,
S=None,
proposal_dist=None,
scaling=1.0,
tune=True,
tune_interval=100,
model=None,
mode=None,
**kwargs
):
model = pm.modelcontext(model)
if vars is None:
vars = model.vars
vars = pm.inputvars(vars)
if S is None:
S = np.ones(sum(v.dsize for v in vars))
if proposal_dist is not None:
self.proposal_dist = proposal_dist(S)
elif S.ndim == 1:
self.proposal_dist = NormalProposal(S)
elif S.ndim == 2:
self.proposal_dist = MultivariateNormalProposal(S)
else:
raise ValueError("Invalid rank for variance: %s" % S.ndim)
self.scaling = np.atleast_1d(scaling).astype("d")
self.tune = tune
self.tune_interval = tune_interval
self.steps_until_tune = tune_interval
self.accepted = 0
# Determine type of variables
self.discrete = np.concatenate(
[[v.dtype in pm.discrete_types] * (v.dsize or 1) for v in vars]
)
self.any_discrete = self.discrete.any()
self.all_discrete = self.discrete.all()
# remember initial settings before tuning so they can be reset
self._untuned_settings = dict(
scaling=self.scaling, steps_until_tune=tune_interval, accepted=self.accepted
)
self.mode = mode
shared = pm.make_shared_replacements(vars, model)
self.delta_logp = delta_logp(model.logpt, vars, shared)
super().__init__(vars, shared)
def reset_tuning(self):
"""Resets the tuned sampler parameters to their initial values."""
for attr, initial_value in self._untuned_settings.items():
setattr(self, attr, initial_value)
return
def astep(self, q0):
if not self.steps_until_tune and self.tune:
# Tune scaling parameter
self.scaling = tune(self.scaling, self.accepted / float(self.tune_interval))
# Reset counter
self.steps_until_tune = self.tune_interval
self.accepted = 0
delta = self.proposal_dist() * self.scaling
if self.any_discrete:
if self.all_discrete:
delta = np.round(delta, 0).astype("int64")
q0 = q0.astype("int64")
q = (q0 + delta).astype("int64")
else:
delta[self.discrete] = np.round(delta[self.discrete], 0)
q = q0 + delta
else:
q = floatX(q0 + delta)
accept = self.delta_logp(q, q0)
q_new, accepted = metrop_select(accept, q, q0)
self.accepted += accepted
self.steps_until_tune -= 1
stats = {
"tune": self.tune,
"scaling": self.scaling,
"accept": np.exp(accept),
"accepted": accepted,
}
return q_new, [stats]
@staticmethod
def competence(var, has_grad):
return Competence.COMPATIBLE
def tune(scale, acc_rate):
"""
Tunes the scaling parameter for the proposal distribution
according to the acceptance rate over the last tune_interval:
Rate Variance adaptation
---- -------------------
<0.001 x 0.1
<0.05 x 0.5
<0.2 x 0.9
>0.5 x 1.1
>0.75 x 2
>0.95 x 10
"""
if acc_rate < 0.001:
# reduce by 90 percent
return scale * 0.1
elif acc_rate < 0.05:
# reduce by 50 percent
return scale * 0.5
elif acc_rate < 0.2:
# reduce by ten percent
return scale * 0.9
elif acc_rate > 0.95:
# increase by factor of ten
|
elif acc_rate > 0.75:
# increase by double
return scale * 2.0
elif acc_rate > 0.5:
# increase by ten percent
return scale * 1.1
return scale
class BinaryMetropolis(ArrayStep):
"""Metropolis-Hastings optimized for binary variables
Parameters
----------
vars: list
List of variables for sampler
scaling: scalar or array
Initial scale factor for proposal. Defaults to 1.
tune: bool
Flag for tuning. Defaults to True.
tune_interval: int
The frequency of tuning. Defaults to 100 iterations.
model: PyMC Model
Optional model for sampling step. Defaults to None (taken from context).
"""
name = "binary_metropolis"
generates_stats = True
stats_dtypes = [
{
"accept": np.float64,
"tune": np.bool,
"p_jump": np.float64,
}
]
def __init__(self, vars, scaling=1.0, tune=True, tune_interval=100, model=None):
model = pm.modelcontext(model)
self.scaling = scaling
self.tune = tune
self.tune_interval = tune_interval
self.steps_until_tune = tune_interval
self.accepted = 0
if not all([v.dtype in pm.discrete_types for v in vars]):
raise ValueError("All variables must be Bernoulli for BinaryMetropolis")
super().__init__(vars, [model.fastlogp])
def astep(self, q0, logp):
# Convert adaptive_scale_factor to a jump probability
p_jump = 1.0 - 0.5 ** self.scaling
rand_array = nr.random(q0.shape)
q = np.copy(q0)
# Locations where switches occur, according to p_jump
switch_locs = rand_array < p_jump
q[switch_locs] = True - q[switch_locs]
accept = logp(q) - logp(q0)
q_new, accepted = metrop_select(accept, q, q0)
self.accepted += accepted
stats = {
"tune": self.tune,
"accept": np.exp(accept),
"p_jump": p_jump,
}
return q_new, [stats]
@staticmethod
def competence(var):
"""
BinaryMetropolis is only suitable for binary (bool)
and Categorical variables with k=1.
"""
distribution = getattr(var.distribution, "parent_dist", var.distribution)
if isinstance(distribution, pm.Bernoulli) or (var.dtype in pm.bool_types):
return Competence.COMPATIBLE
elif isinstance(distribution, pm.Categorical) and (distribution.k == 2):
return Competence.COMPATIBLE
return Competence.INCOMPATIBLE
class BinaryGibbsMetropolis(ArrayStep):
"""A Metropolis-within-Gibbs step method optimized for binary variables
Parameters
----------
vars: list
List of variables for sampler
order: list or 'random'
List of integers indicating the Gibbs update order
e.g., [0, 2, 1, ...]. Default is random
transit_p: float
The diagonal of the transition kernel. A value > .5 gives anticorrelated proposals,
which resulting in more efficient antithetical sampling.
model: PyMC Model
Optional model for sampling step. Defaults to None (taken from context).
"""
name = "binary_gibbs_metropolis"
def __init__(self, vars, order="random", transit_p=0.8, model=None):
model = pm.modelcontext(model)
# transition probabilities
self.transit_p = transit_p
self.dim = sum(v.dsize for v in vars)
if order == "random":
self.shuffle_dims = True
self.order = list(range(self.dim))
else:
if sorted(order) != list(range(self.dim)):
raise ValueError("Argument 'order' has to be a permutation")
self.shuffle_dims = False
self.order = order
if not all([v.dtype in pm.discrete_types for v in vars]):
raise ValueError("All variables must be binary for BinaryGibbsMetropolis")
super().__init__(vars, [model.fastlogp])
def astep(self, q0, logp):
order = self.order
if self.shuffle_dims:
nr.shuffle(order)
q = np.copy(q0)
logp_curr = logp(q)
for idx in order:
# No need to do metropolis update if the same value is proposed,
# as you will get the same value regardless of accepted or reject
if nr.rand() < self.transit_p:
curr_val, q[idx] = q[idx], True - q[idx]
logp_prop = logp(q)
q[idx], accepted = metrop_select(logp_prop - logp_curr, q[idx], curr_val)
if accepted:
logp_curr = logp_prop
return q
@staticmethod
def competence(var):
"""
BinaryMetropolis is only suitable for Bernoulli
and Categorical variables with k=2.
"""
distribution = getattr(var.distribution, "parent_dist", var.distribution)
if isinstance(distribution, pm.Bernoulli) or (var.dtype in pm.bool_types):
return Competence.IDEAL
elif isinstance(distribution, pm.Categorical) and (distribution.k == 2):
return Competence.IDEAL
return Competence.INCOMPATIBLE
class CategoricalGibbsMetropolis(ArrayStep):
"""A Metropolis-within-Gibbs step method optimized for categorical variables.
This step method works for Bernoulli variables as well, but it is not
optimized for them, like BinaryGibbsMetropolis is. Step method supports
two types of proposals: A uniform proposal and a proportional proposal,
which was introduced by Liu in his 1996 technical report
"Metropolized Gibbs Sampler: An Improvement".
"""
name = "categorical_gibbs_metropolis"
def __init__(self, vars, proposal="uniform", order="random", model=None):
model = pm.modelcontext(model)
vars = pm.inputvars(vars)
dimcats = []
# The above variable is a list of pairs (aggregate dimension, number
# of categories). For example, if vars = [x, y] with x being a 2-D
# variable with M categories and y being a 3-D variable with N
# categories, we will have dimcats = [(0, M), (1, M), (2, N), (3, N), (4, N)].
for v in vars:
distr = getattr(v.distribution, "parent_dist", v.distribution)
if isinstance(distr, pm.Categorical):
k = draw_values([distr.k])[0]
elif isinstance(distr, pm.Bernoulli) or (v.dtype in pm.bool_types):
k = 2
else:
raise ValueError(
"All variables must be categorical or binary" + "for CategoricalGibbsMetropolis"
)
start = len(dimcats)
dimcats += [(dim, k) for dim in range(start, start + v.dsize)]
if order == "random":
self.shuffle_dims = True
self.dimcats = dimcats
else:
if sorted(order) != list(range(len(dimcats))):
raise ValueError("Argument 'order' has to be a permutation")
self.shuffle_dims = False
self.dimcats = [dimcats[j] for j in order]
if proposal == "uniform":
self.astep = self.astep_unif
elif proposal == "proportional":
# Use the optimized "Metropolized Gibbs Sampler" described in Liu96.
self.astep = self.astep_prop
else:
raise ValueError("Argument 'proposal' should either be 'uniform' or 'proportional'")
super().__init__(vars, [model.fastlogp])
def astep_unif(self, q0, logp):
dimcats = self.dimcats
if self.shuffle_dims:
nr.shuffle(dimcats)
q = np.copy(q0)
logp_curr = logp(q)
for dim, k in dimcats:
curr_val, q[dim] = q[dim], sample_except(k, q[dim])
logp_prop = logp(q)
q[dim], accepted = metrop_select(logp_prop - logp_curr, q[dim], curr_val)
if accepted:
logp_curr = logp_prop
return q
def astep_prop(self, q0, logp):
dimcats = self.dimcats
if self.shuffle_dims:
nr.shuffle(dimcats)
q = np.copy(q0)
logp_curr = logp(q)
for dim, k in dimcats:
logp_curr = self.metropolis_proportional(q, logp, logp_curr, dim, k)
return q
def metropolis_proportional(self, q, logp, logp_curr, dim, k):
given_cat = int(q[dim])
log_probs = np.zeros(k)
log_probs[given_cat] = logp_curr
candidates = list(range(k))
for candidate_cat in candidates:
if candidate_cat != given_cat:
q[dim] = candidate_cat
log_probs[candidate_cat] = logp(q)
probs = softmax(log_probs)
prob_curr, probs[given_cat] = probs[given_cat], 0.0
probs /= 1.0 - prob_curr
proposed_cat = nr.choice(candidates, p=probs)
accept_ratio = (1.0 - prob_curr) / (1.0 - probs[proposed_cat])
if not np.isfinite(accept_ratio) or nr.uniform() >= accept_ratio:
q[dim] = given_cat
return logp_curr
q[dim] = proposed_cat
return log_probs[proposed_cat]
@staticmethod
def competence(var):
"""
CategoricalGibbsMetropolis is only suitable for Bernoulli and
Categorical variables.
"""
distribution = getattr(var.distribution, "parent_dist", var.distribution)
if isinstance(distribution, pm.Categorical):
if distribution.k > 2:
return Competence.IDEAL
return Competence.COMPATIBLE
elif isinstance(distribution, pm.Bernoulli) or (var.dtype in pm.bool_types):
return Competence.COMPATIBLE
return Competence.INCOMPATIBLE
class DEMetropolis(PopulationArrayStepShared):
"""
Differential Evolution Metropolis sampling step.
Parameters
----------
lamb: float
Lambda parameter of the DE proposal mechanism. Defaults to 2.38 / sqrt(2 * ndim)
vars: list
List of variables for sampler
S: standard deviation or covariance matrix
Some measure of variance to parameterize proposal distribution
proposal_dist: function
Function that returns zero-mean deviates when parameterized with
S (and n). Defaults to Uniform(-S,+S).
scaling: scalar or array
Initial scale factor for epsilon. Defaults to 0.001
tune: str
Which hyperparameter to tune. Defaults to None, but can also be 'scaling' or 'lambda'.
tune_interval: int
The frequency of tuning. Defaults to 100 iterations.
model: PyMC Model
Optional model for sampling step. Defaults to None (taken from context).
mode: string or `Mode` instance.
compilation mode passed to Theano functions
References
----------
.. [Braak2006] Cajo C.F. ter Braak (2006).
A Markov Chain Monte Carlo version of the genetic algorithm
Differential Evolution: easy Bayesian computing for real parameter spaces.
Statistics and Computing
`link <https://doi.org/10.1007/s11222-006-8769-1>`__
"""
name = "DEMetropolis"
default_blocked = True
generates_stats = True
stats_dtypes = [
{
"accept": np.float64,
"accepted": np.bool,
"tune": np.bool,
"scaling": np.float64,
"lambda": np.float64,
}
]
def __init__(
self,
vars=None,
S=None,
proposal_dist=None,
lamb=None,
scaling=0.001,
tune=None,
tune_interval=100,
model=None,
mode=None,
**kwargs
):
model = pm.modelcontext(model)
if vars is None:
vars = model.cont_vars
vars = pm.inputvars(vars)
if S is None:
S = np.ones(model.ndim)
if proposal_dist is not None:
self.proposal_dist = proposal_dist(S)
else:
self.proposal_dist = UniformProposal(S)
self.scaling = np.atleast_1d(scaling).astype("d")
if lamb is None:
# default to the optimal lambda for normally distributed targets
lamb = 2.38 / np.sqrt(2 * model.ndim)
self.lamb = float(lamb)
if tune not in {None, "scaling", "lambda"}:
raise ValueError('The parameter "tune" must be one of {None, scaling, lambda}')
self.tune = tune
self.tune_interval = tune_interval
self.steps_until_tune = tune_interval
self.accepted = 0
self.mode = mode
shared = pm.make_shared_replacements(vars, model)
self.delta_logp = delta_logp(model.logpt, vars, shared)
super().__init__(vars, shared)
def astep(self, q0):
if not self.steps_until_tune and self.tune:
if self.tune == "scaling":
self.scaling = tune(self.scaling, self.accepted / float(self.tune_interval))
elif self.tune == "lambda":
self.lamb = tune(self.lamb, self.accepted / float(self.tune_interval))
# Reset counter
self.steps_until_tune = self.tune_interval
self.accepted = 0
epsilon = self.proposal_dist() * self.scaling
# differential evolution proposal
# select two other chains
ir1, ir2 = np.random.choice(self.other_chains, 2, replace=False)
r1 = self.bij.map(self.population[ir1])
r2 = self.bij.map(self.population[ir2])
# propose a jump
q = floatX(q0 + self.lamb * (r1 - r2) + epsilon)
accept = self.delta_logp(q, q0)
q_new, accepted = metrop_select(accept, q, q0)
self.accepted += accepted
self.steps_until_tune -= 1
stats = {
"tune": self.tune,
"scaling": self.scaling,
"lambda": self.lamb,
"accept": np.exp(accept),
"accepted": accepted,
}
return q_new, [stats]
@staticmethod
def competence(var, has_grad):
if var.dtype in pm.discrete_types:
return Competence.INCOMPATIBLE
return Competence.COMPATIBLE
class DEMetropolisZ(ArrayStepShared):
"""
Adaptive Differential Evolution Metropolis sampling step that uses the past to inform jumps.
Parameters
----------
lamb: float
Lambda parameter of the DE proposal mechanism. Defaults to 2.38 / sqrt(2 * ndim)
vars: list
List of variables for sampler
S: standard deviation or covariance matrix
Some measure of variance to parameterize proposal distribution
proposal_dist: function
Function that returns zero-mean deviates when parameterized with
S (and n). Defaults to Uniform(-S,+S).
scaling: scalar or array
Initial scale factor for epsilon. Defaults to 0.001
tune: str
Which hyperparameter to tune. Defaults to 'lambda', but can also be 'scaling' or None.
tune_interval: int
The frequency of tuning. Defaults to 100 iterations.
tune_drop_fraction: float
Fraction of tuning steps that will be removed from the samplers history when the tuning ends.
Defaults to 0.9 - keeping the last 10% of tuning steps for good mixing while removing 90% of
potentially unconverged tuning positions.
model: PyMC Model
Optional model for sampling step. Defaults to None (taken from context).
mode: string or `Mode` instance.
compilation mode passed to Theano functions
References
----------
.. [Braak2006] Cajo C.F. ter Braak (2006).
Differential Evolution Markov Chain with snooker updater and fewer chains.
Statistics and Computing
`link <https://doi.org/10.1007/s11222-008-9104-9>`__
"""
name = "DEMetropolisZ"
default_blocked = True
generates_stats = True
stats_dtypes = [
{
"accept": np.float64,
"accepted": np.bool,
"tune": np.bool,
"scaling": np.float64,
"lambda": np.float64,
}
]
def __init__(
self,
vars=None,
S=None,
proposal_dist=None,
lamb=None,
scaling=0.001,
tune="lambda",
tune_interval=100,
tune_drop_fraction: float = 0.9,
model=None,
mode=None,
**kwargs
):
model = pm.modelcontext(model)
if vars is None:
vars = model.cont_vars
vars = pm.inputvars(vars)
if S is None:
S = np.ones(model.ndim)
if proposal_dist is not None:
self.proposal_dist = proposal_dist(S)
else:
self.proposal_dist = UniformProposal(S)
self.scaling = np.atleast_1d(scaling).astype("d")
if lamb is None:
# default to the optimal lambda for normally distributed targets
lamb = 2.38 / np.sqrt(2 * model.ndim)
self.lamb = float(lamb)
if tune not in {None, "scaling", "lambda"}:
raise ValueError('The parameter "tune" must be one of {None, scaling, lambda}')
self.tune = True
self.tune_target = tune
self.tune_interval = tune_interval
self.tune_drop_fraction = tune_drop_fraction
self.steps_until_tune = tune_interval
self.accepted = 0
# cache local history for the Z-proposals
self._history = []
# remember initial settings before tuning so they can be reset
self._untuned_settings = dict(
scaling=self.scaling,
lamb=self.lamb,
steps_until_tune=tune_interval,
accepted=self.accepted,
)
self.mode = mode
shared = pm.make_shared_replacements(vars, model)
self.delta_logp = delta_logp(model.logpt, vars, shared)
super().__init__(vars, shared)
def reset_tuning(self):
"""Resets the tuned sampler parameters and history to their initial values."""
# history can't be reset via the _untuned_settings dict because it's a list
self._history = []
for attr, initial_value in self._untuned_settings.items():
setattr(self, attr, initial_value)
return
def astep(self, q0):
# same tuning scheme as DEMetropolis
if not self.steps_until_tune and self.tune:
if self.tune_target == "scaling":
self.scaling = tune(self.scaling, self.accepted / float(self.tune_interval))
elif self.tune_target == "lambda":
self.lamb = tune(self.lamb, self.accepted / float(self.tune_interval))
# Reset counter
self.steps_until_tune = self.tune_interval
self.accepted = 0
epsilon = self.proposal_dist() * self.scaling
it = len(self._history)
# use the DE-MCMC-Z proposal scheme as soon as the history has 2 entries
if it > 1:
# differential evolution proposal
# select two other chains
iz1 = np.random.randint(it)
iz2 = np.random.randint(it)
while iz2 == iz1:
iz2 = np.random.randint(it)
z1 = self._history[iz1]
z2 = self._history[iz2]
# propose a jump
q = floatX(q0 + self.lamb * (z1 - z2) + epsilon)
else:
# propose just with noise in the first 2 iterations
q = floatX(q0 + epsilon)
accept = self.delta_logp(q, q0)
q_new, accepted = metrop_select(accept, q, q0)
self.accepted += accepted
self._history.append(q_new)
self.steps_until_tune -= 1
stats = {
"tune": self.tune,
"scaling": self.scaling,
"lambda": self.lamb,
"accept": np.exp(accept),
"accepted": accepted,
}
return q_new, [stats]
def stop_tuning(self):
"""At the end of the tuning phase, this method removes the first x% of the history
so future proposals are not informed by unconverged tuning iterations.
"""
it = len(self._history)
n_drop = int(self.tune_drop_fraction * it)
self._history = self._history[n_drop:]
return super().stop_tuning()
@staticmethod
def competence(var, has_grad):
if var.dtype in pm.discrete_types:
return Competence.INCOMPATIBLE
return Competence.COMPATIBLE
def sample_except(limit, excluded):
candidate = nr.choice(limit - 1)
if candidate >= excluded:
candidate += 1
return candidate
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / np.sum(e_x, axis=0)
def delta_logp(logp, vars, shared):
[logp0], inarray0 = pm.join_nonshared_inputs([logp], vars, shared)
tensor_type = inarray0.type
inarray1 = tensor_type("inarray1")
logp1 = pm.CallableTensor(logp0)(inarray1)
f = theano.function([inarray1, inarray0], logp1 - logp0)
f.trust_input = True
return f
|
return scale * 10.0
|
run.go
|
// Run a test
package main
import (
"bytes"
"fmt"
"go/build"
"io"
"log"
"os"
"os/exec"
"path"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/clive2000/rclone/fs"
"github.com/clive2000/rclone/fstest/testserver"
)
// Control concurrency per backend if required
var (
oneOnlyMu sync.Mutex
oneOnly = map[string]*sync.Mutex{}
)
// Run holds info about a running test
//
// A run just runs one command line, but it can be run multiple times
// if retries are needed.
type Run struct {
// Config
Remote string // name of the test remote
Backend string // name of the backend
Path string // path to the source directory
FastList bool // add -fast-list to tests
Short bool // add -short
NoRetries bool // don't retry if set
OneOnly bool // only run test for this backend at once
NoBinary bool // set to not build a binary
SizeLimit int64 // maximum test file size
Ignore map[string]struct{}
// Internals
CmdLine []string
CmdString string
Try int
err error
output []byte
FailedTests []string
RunFlag string
LogDir string // directory to place the logs
TrialName string // name/log file name of current trial
TrialNames []string // list of all the trials
}
// Runs records multiple Run objects
type Runs []*Run
// Sort interface
func (rs Runs) Len() int { return len(rs) }
func (rs Runs) Swap(i, j int) { rs[i], rs[j] = rs[j], rs[i] }
func (rs Runs) Less(i, j int) bool {
a, b := rs[i], rs[j]
if a.Backend < b.Backend {
return true
} else if a.Backend > b.Backend {
return false
}
if a.Remote < b.Remote {
return true
} else if a.Remote > b.Remote {
return false
}
if a.Path < b.Path {
return true
} else if a.Path > b.Path {
return false
}
if !a.FastList && b.FastList {
return true
} else if a.FastList && !b.FastList {
return false
}
return false
}
// dumpOutput prints the error output
func (r *Run) dumpOutput() {
log.Println("------------------------------------------------------------")
log.Printf("---- %q ----", r.CmdString)
log.Println(string(r.output))
log.Println("------------------------------------------------------------")
}
// This converts a slice of test names into a regexp which matches
// them.
func testsToRegexp(tests []string) string {
var split []map[string]struct{}
// Make a slice with maps of the used parts at each level
for _, test := range tests {
for i, name := range strings.Split(test, "/") {
if i >= len(split) {
split = append(split, make(map[string]struct{}))
}
split[i][name] = struct{}{}
}
}
var out []string
for _, level := range split {
var testsInLevel = []string{}
for name := range level {
testsInLevel = append(testsInLevel, name)
}
sort.Strings(testsInLevel)
if len(testsInLevel) > 1 {
out = append(out, "^("+strings.Join(testsInLevel, "|")+")$")
} else {
out = append(out, "^"+testsInLevel[0]+"$")
}
}
return strings.Join(out, "/")
}
var failRe = regexp.MustCompile(`(?m)^\s*--- FAIL: (Test.*?) \(`)
// findFailures looks for all the tests which failed
func (r *Run) findFailures() {
oldFailedTests := r.FailedTests
r.FailedTests = nil
excludeParents := map[string]struct{}{}
ignored := 0
for _, matches := range failRe.FindAllSubmatch(r.output, -1) {
failedTest := string(matches[1])
// Skip any ignored failures
if _, found := r.Ignore[failedTest]; found {
ignored++
} else {
r.FailedTests = append(r.FailedTests, failedTest)
}
// Find all the parents of this test
parts := strings.Split(failedTest, "/")
for i := len(parts) - 1; i >= 1; i-- {
excludeParents[strings.Join(parts[:i], "/")] = struct{}{}
}
}
// Exclude the parents
var newTests = r.FailedTests[:0]
for _, failedTest := range r.FailedTests {
|
}
}
r.FailedTests = newTests
if len(r.FailedTests) == 0 && ignored > 0 {
log.Printf("%q - Found %d ignored errors only - marking as good", r.CmdString, ignored)
r.err = nil
r.dumpOutput()
return
}
if len(r.FailedTests) != 0 {
r.RunFlag = testsToRegexp(r.FailedTests)
} else {
r.RunFlag = ""
}
if r.passed() && len(r.FailedTests) != 0 {
log.Printf("%q - Expecting no errors but got: %v", r.CmdString, r.FailedTests)
r.dumpOutput()
} else if !r.passed() && len(r.FailedTests) == 0 {
log.Printf("%q - Expecting errors but got none: %v", r.CmdString, r.FailedTests)
r.dumpOutput()
r.FailedTests = oldFailedTests
}
}
// nextCmdLine returns the next command line
func (r *Run) nextCmdLine() []string {
CmdLine := r.CmdLine
if r.RunFlag != "" {
CmdLine = append(CmdLine, "-test.run", r.RunFlag)
}
return CmdLine
}
// trial runs a single test
func (r *Run) trial() {
CmdLine := r.nextCmdLine()
CmdString := toShell(CmdLine)
msg := fmt.Sprintf("%q - Starting (try %d/%d)", CmdString, r.Try, *maxTries)
log.Println(msg)
logName := path.Join(r.LogDir, r.TrialName)
out, err := os.Create(logName)
if err != nil {
log.Fatalf("Couldn't create log file: %v", err)
}
defer func() {
err := out.Close()
if err != nil {
log.Fatalf("Failed to close log file: %v", err)
}
}()
_, _ = fmt.Fprintln(out, msg)
// Early exit if --try-run
if *dryRun {
log.Printf("Not executing as --dry-run: %v", CmdLine)
_, _ = fmt.Fprintln(out, "--dry-run is set - not running")
return
}
// Start the test server if required
finish, err := testserver.Start(r.Remote)
if err != nil {
log.Printf("%s: Failed to start test server: %v", r.Remote, err)
_, _ = fmt.Fprintf(out, "%s: Failed to start test server: %v\n", r.Remote, err)
r.err = err
return
}
defer finish()
// Internal buffer
var b bytes.Buffer
multiOut := io.MultiWriter(out, &b)
cmd := exec.Command(CmdLine[0], CmdLine[1:]...)
cmd.Stderr = multiOut
cmd.Stdout = multiOut
cmd.Dir = r.Path
start := time.Now()
r.err = cmd.Run()
r.output = b.Bytes()
duration := time.Since(start)
r.findFailures()
if r.passed() {
msg = fmt.Sprintf("%q - Finished OK in %v (try %d/%d)", CmdString, duration, r.Try, *maxTries)
} else {
msg = fmt.Sprintf("%q - Finished ERROR in %v (try %d/%d): %v: Failed %v", CmdString, duration, r.Try, *maxTries, r.err, r.FailedTests)
}
log.Println(msg)
_, _ = fmt.Fprintln(out, msg)
}
// passed returns true if the test passed
func (r *Run) passed() bool {
return r.err == nil
}
// GOPATH returns the current GOPATH
func GOPATH() string {
gopath := os.Getenv("GOPATH")
if gopath == "" {
gopath = build.Default.GOPATH
}
return gopath
}
// BinaryName turns a package name into a binary name
func (r *Run) BinaryName() string {
binary := path.Base(r.Path) + ".test"
if runtime.GOOS == "windows" {
binary += ".exe"
}
return binary
}
// BinaryPath turns a package name into a binary path
func (r *Run) BinaryPath() string {
return path.Join(r.Path, r.BinaryName())
}
// PackagePath returns the path to the package
func (r *Run) PackagePath() string {
return path.Join(GOPATH(), "src", r.Path)
}
// MakeTestBinary makes the binary we will run
func (r *Run) MakeTestBinary() {
binary := r.BinaryPath()
binaryName := r.BinaryName()
log.Printf("%s: Making test binary %q", r.Path, binaryName)
CmdLine := []string{"go", "test", "-c"}
if *dryRun {
log.Printf("Not executing: %v", CmdLine)
return
}
cmd := exec.Command(CmdLine[0], CmdLine[1:]...)
cmd.Dir = r.Path
err := cmd.Run()
if err != nil {
log.Fatalf("Failed to make test binary: %v", err)
}
if _, err := os.Stat(binary); err != nil {
log.Fatalf("Couldn't find test binary %q", binary)
}
}
// RemoveTestBinary removes the binary made in makeTestBinary
func (r *Run) RemoveTestBinary() {
if *dryRun {
return
}
binary := r.BinaryPath()
err := os.Remove(binary) // Delete the binary when finished
if err != nil {
log.Printf("Error removing test binary %q: %v", binary, err)
}
}
// Name returns the run name as a file name friendly string
func (r *Run) Name() string {
ns := []string{
r.Backend,
strings.Replace(r.Path, "/", ".", -1),
r.Remote,
}
if r.FastList {
ns = append(ns, "fastlist")
}
ns = append(ns, fmt.Sprintf("%d", r.Try))
s := strings.Join(ns, "-")
s = strings.Replace(s, ":", "", -1)
return s
}
// Init the Run
func (r *Run) Init() {
prefix := "-test."
if r.NoBinary {
prefix = "-"
r.CmdLine = []string{"go", "test"}
} else {
r.CmdLine = []string{"./" + r.BinaryName()}
}
r.CmdLine = append(r.CmdLine, prefix+"v", prefix+"timeout", timeout.String(), "-remote", r.Remote)
if *listRetries > 0 {
r.CmdLine = append(r.CmdLine, "-list-retries", fmt.Sprint(*listRetries))
}
r.Try = 1
if *verbose {
r.CmdLine = append(r.CmdLine, "-verbose")
fs.Config.LogLevel = fs.LogLevelDebug
}
if *runOnly != "" {
r.CmdLine = append(r.CmdLine, prefix+"run", *runOnly)
}
if r.FastList {
r.CmdLine = append(r.CmdLine, "-fast-list")
}
if r.Short {
r.CmdLine = append(r.CmdLine, "-short")
}
if r.SizeLimit > 0 {
r.CmdLine = append(r.CmdLine, "-size-limit", strconv.FormatInt(r.SizeLimit, 10))
}
r.CmdString = toShell(r.CmdLine)
}
// Logs returns all the log names
func (r *Run) Logs() []string {
return r.TrialNames
}
// FailedTestsCSV returns the failed tests as a comma separated string, limiting the number
func (r *Run) FailedTestsCSV() string {
const maxTests = 5
ts := r.FailedTests
if len(ts) > maxTests {
ts = ts[:maxTests:maxTests]
ts = append(ts, fmt.Sprintf("… (%d more)", len(r.FailedTests)-maxTests))
}
return strings.Join(ts, ", ")
}
// Run runs all the trials for this test
func (r *Run) Run(LogDir string, result chan<- *Run) {
if r.OneOnly {
oneOnlyMu.Lock()
mu := oneOnly[r.Backend]
if mu == nil {
mu = new(sync.Mutex)
oneOnly[r.Backend] = mu
}
oneOnlyMu.Unlock()
mu.Lock()
defer mu.Unlock()
}
r.Init()
r.LogDir = LogDir
for r.Try = 1; r.Try <= *maxTries; r.Try++ {
r.TrialName = r.Name() + ".txt"
r.TrialNames = append(r.TrialNames, r.TrialName)
log.Printf("Starting run with log %q", r.TrialName)
r.trial()
if r.passed() || r.NoRetries {
break
}
}
if !r.passed() {
r.dumpOutput()
}
result <- r
}
|
if _, excluded := excludeParents[failedTest]; !excluded {
newTests = append(newTests, failedTest)
|
forms.py
|
from __future__ import absolute_import, unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
from .fields import TemplateField
class DocumentTemplateSandboxForm(forms.Form):
result = forms.CharField(
help_text=_('Resulting text from the evaluated template.'),
label=_('Result'), required=False, widget=forms.widgets.Textarea(
attrs={'readonly': 'readonly', 'rows': 5}
)
)
def
|
(self, *args, **kwargs):
self.model = kwargs.pop('model')
self.model_variable = kwargs.pop('model_variable')
super(DocumentTemplateSandboxForm, self).__init__(*args, **kwargs)
self.fields['template'] = TemplateField(
initial_help_text=_('The template string to be evaluated.'),
label=_('Template'), model=self.model,
model_variable=self.model_variable, required=True
)
self.order_fields(field_order=('template', 'result'))
|
__init__
|
main.go
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// [START cloudscheduler_generated_scheduler_apiv1_CloudSchedulerClient_ListJobs]
package main
import (
"context"
scheduler "cloud.google.com/go/scheduler/apiv1"
"google.golang.org/api/iterator"
schedulerpb "google.golang.org/genproto/googleapis/cloud/scheduler/v1"
|
func main() {
// import schedulerpb "google.golang.org/genproto/googleapis/cloud/scheduler/v1"
// import "google.golang.org/api/iterator"
ctx := context.Background()
c, err := scheduler.NewCloudSchedulerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &schedulerpb.ListJobsRequest{
// TODO: Fill request struct fields.
}
it := c.ListJobs(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
// [END cloudscheduler_generated_scheduler_apiv1_CloudSchedulerClient_ListJobs]
|
)
|
download.py
|
# ===============================================================================
# Author: Xianyuan Liu, [email protected]
# Raivo Koot, [email protected]
# Haiping Lu, [email protected] or [email protected]
# ===============================================================================
"""Data downloading and compressed data extraction functions, Based on
https://github.com/pytorch/vision/blob/master/torchvision/datasets/utils.py
https://github.com/pytorch/pytorch/blob/master/torch/hub.py
"""
import logging
import os
from pathlib import Path
from torch.hub import download_url_to_file
from torchvision.datasets.utils import download_and_extract_archive, download_file_from_google_drive, extract_archive
|
def download_file_by_url(url, output_directory, output_file_name, file_format=None):
"""Download file/compressed file by url.
Args:
url (string): URL of the object to download
output_directory (string, optional): Full path where object will be saved
Abosolute path recommended. Relative path also works.
output_file_name (string, optional): File name which object will be saved as
file_format (string, optional): File format
For compressed file, support ["tar.xz", "tar", "tar.gz", "tgz", "gz", "zip"]
Example: (Grab the raw link from GitHub. Notice that using "raw" in the URL.)
>>> url = "https://github.com/pykale/data/raw/main/videos/video_test_data/ADL/annotations/labels_train_test/adl_P_04_train.pkl"
>>> download_file_by_url(url, "data", "a.pkl", "pkl")
>>> url = "https://github.com/pykale/data/raw/main/videos/video_test_data.zip"
>>> download_file_by_url(url, "data", "video_test_data.zip", "zip")
"""
output_directory = Path(output_directory).absolute()
file = Path(output_directory).joinpath(output_file_name)
if os.path.exists(file):
logging.info("Skipping Download and Extraction")
return
if not os.path.exists(output_directory):
os.makedirs(output_directory)
if file_format in ["tar.xz", "tar", "tar.gz", "tgz", "gz", "zip"]:
logging.info("Downloading and extracting {}.".format(output_file_name))
download_and_extract_archive(url=url, download_root=output_directory, filename=output_file_name)
logging.info("Datasets downloaded and extracted in {}".format(file))
else:
logging.info("Downloading {}.".format(output_file_name))
download_url_to_file(url, file)
logging.info("Datasets downloaded in {}".format(file))
def download_file_gdrive(id, output_directory, output_file_name, file_format=None):
"""Download file/compressed file by Google Drive id.
Args:
id (string): Google Drive file id of the object to download
output_directory (string, optional): Full path where object will be saved
Abosolute path recommended. Relative path also works.
output_file_name (string, optional): File name which object will be saved as
file_format (string, optional): File format
For compressed file, support ["tar.xz", "tar", "tar.gz", "tgz", "gz", "zip"]
Example:
>>> gdrive_id = "1U4D23R8u8MJX9KVKb92bZZX-tbpKWtga"
>>> download_file_gdrive(gdrive_id, "data", "demo_datasets.zip", "zip")
>>> gdrive_id = "1SV7fmAnWj-6AU9X5BGOrvGMoh2Gu9Nih"
>>> download_file_gdrive(gdrive_id, "data", "dummy_data.csv", "csv")
"""
output_directory = Path(output_directory).absolute()
file = Path(output_directory).joinpath(output_file_name)
if os.path.exists(file):
logging.info("Skipping Download and Extraction")
return
os.makedirs(output_directory, exist_ok=True)
logging.info("Downloading {}.".format(output_file_name))
download_file_from_google_drive(id, output_directory, output_file_name)
if file_format is not None and file_format in ["tar.xz", "tar", "tar.gz", "tgz", "gz", "zip"]:
logging.info("Extracting {}.".format(output_file_name))
extract_archive(file.as_posix())
logging.info("Datasets downloaded and extracted in {}".format(file))
else:
logging.info("Datasets downloaded in {}".format(file))
| |
window.rs
|
/*
* Copyright (2021) by Marcel Lambert.
* This project is dual licensed under either MIT or Apache-2.0.
*/
use crate::mock::traits::MockChildOf;
use crate::utils::children::{ChildOf, ChildrenList};
use crate::utils::outlet::{ChildrenOutlet, MenuOutlet, Outlet, OutletHolder};
use crate::widgets::{Widget, Window, WindowOutlet};
use crate::PlatingResult;
//TODO: fully automate
#[derive(Debug, Clone, Hash, Default)]
pub struct MockWindowOutlet<MAIN, MENU>
where
MAIN: ChildrenList,
MENU: ChildrenList,
{
pub children: MAIN,
pub menu: MENU,
}
impl<MAIN, MENU> From<WindowOutlet<MAIN, MENU>> for MockWindowOutlet<MAIN, MENU>
where
MAIN: ChildrenList,
MENU: ChildrenList,
{
fn from(app: WindowOutlet<MAIN, MENU>) -> Self {
Self {
children: app.children,
menu: app.menu,
}
}
}
impl<MAIN, MENU> Outlet<ChildrenOutlet> for MockWindowOutlet<MAIN, MENU>
where
MAIN: ChildrenList,
MENU: ChildrenList,
{
type Children = MAIN;
fn get(&self) -> &Self::Children {
&self.children
}
fn get_mut(&mut self) -> &mut Self::Children {
&mut self.children
}
}
impl<MAIN, MENU> Outlet<MenuOutlet> for MockWindowOutlet<MAIN, MENU>
where
MAIN: ChildrenList,
MENU: ChildrenList,
{
type Children = MENU;
fn get(&self) -> &Self::Children {
&self.menu
}
fn get_mut(&mut self) -> &mut Self::Children {
&mut self.menu
}
}
impl<MAIN, MENU> OutletHolder for MockWindowOutlet<MAIN, MENU>
where
MAIN: ChildrenList,
MENU: ChildrenList,
{
}
/*
impl<CHILDREN, MENU> Native<Cocoa> for MockWindowOutlet<MAIN_CHILDREN, MENU_CHILDREN>
where
MAIN_CHILDREN: ChildrenList,
MENU_CHILDREN: ChildrenList,
{ }*/
//TODO: end fully automate
pub struct MockWindowWidget<OUTLET>
where
OUTLET: OutletHolder + Outlet<MenuOutlet> + Outlet<ChildrenOutlet>,
{
outlet: OUTLET,
_private: crate::Private, // Creation is limited to our constructors
}
pub trait MockWindow<OUTLET>
where
OUTLET: OutletHolder + Outlet<MenuOutlet> + Outlet<ChildrenOutlet>,
Self: Sized,
{
fn new(outlet: OUTLET) -> PlatingResult<Self>
where
<OUTLET as Outlet<MenuOutlet>>::Children: MockChildOf<Self, MenuOutlet>,
<OUTLET as Outlet<ChildrenOutlet>>::Children: MockChildOf<Self, ChildrenOutlet>;
}
impl<OUTLET> MockWindow<OUTLET> for MockWindowWidget<OUTLET>
where
OUTLET: OutletHolder + Outlet<MenuOutlet> + Outlet<ChildrenOutlet>,
{
fn new(outlet: OUTLET) -> PlatingResult<Self>
where
<OUTLET as Outlet<MenuOutlet>>::Children: MockChildOf<Self, MenuOutlet>,
<OUTLET as Outlet<ChildrenOutlet>>::Children: MockChildOf<Self, ChildrenOutlet>,
{
let result = Self {
outlet,
_private: crate::Private {},
};
<OUTLET as Outlet<MenuOutlet>>::get(&result.outlet).connect(&result);
<OUTLET as Outlet<ChildrenOutlet>>::get(&result.outlet).connect(&result);
Ok(result)
}
}
impl<OUTLET> Widget<OUTLET> for MockWindowWidget<OUTLET>
where
OUTLET: OutletHolder + Outlet<MenuOutlet> + Outlet<ChildrenOutlet>,
{
fn outlet(&self) -> &OUTLET {
|
}
}
impl<OUTLET> Window<OUTLET> for MockWindowWidget<OUTLET>
where
OUTLET: OutletHolder + Outlet<MenuOutlet> + Outlet<ChildrenOutlet>,
Self: Sized,
{
fn new(outlet: OUTLET) -> PlatingResult<Self>
where
<OUTLET as Outlet<MenuOutlet>>::Children: ChildOf<Self, MenuOutlet>,
<OUTLET as Outlet<ChildrenOutlet>>::Children: ChildOf<Self, ChildrenOutlet>,
{
<Self as MockWindow<OUTLET>>::new(outlet)
}
}
|
&self.outlet
|
resolver.py
|
import json
import getpass
import shortuuid # type: ignore
from datetime import datetime
from functools import lru_cache
from collections import defaultdict
from typing import Any, Dict, Generator, Generic, List, Optional, Set, Tuple, Union
from followthemoney.types import registry
from nomenklatura.entity import CE
from nomenklatura.judgement import Judgement
from nomenklatura.util import PathLike, is_qid
StrIdent = Union[str, "Identifier"]
Pair = Tuple["Identifier", "Identifier"]
class ResolverLogicError(Exception):
pass
class Identifier(object):
PREFIX = "NK-"
__slots__ = ("id", "canonical", "weight")
def __init__(self, id: str):
self.id = id
self.weight: int = 1
if self.id.startswith(self.PREFIX):
self.weight = 2
elif is_qid(id):
self.weight = 3
self.canonical = self.weight > 1
def __eq__(self, other: Any) -> bool:
return str(self) == str(other)
def __lt__(self, other: Any) -> bool:
return (self.weight, self.id) < (other.weight, other.id)
def __str__(self) -> str:
return self.id
def __hash__(self) -> int:
return hash(self.id)
def __len__(self) -> int:
return len(self.id)
def __repr__(self) -> str:
return f"<I({self.id})>"
@classmethod
def get(cls, id: StrIdent) -> "Identifier":
if isinstance(id, str):
return cls(id)
return id
@classmethod
def pair(cls, left_id: StrIdent, right_id: StrIdent) -> Pair:
left = cls.get(left_id)
right = cls.get(right_id)
if left == right:
raise ResolverLogicError()
return (max(left, right), min(left, right))
@classmethod
def make(cls, value: Optional[str] = None) -> "Identifier":
key = value or shortuuid.uuid()
return cls.get(f"{cls.PREFIX}{key}")
class Edge(object):
__slots__ = ("key", "source", "target", "judgement", "score", "user", "timestamp")
def __init__(
self,
left_id: StrIdent,
right_id: StrIdent,
judgement: Judgement = Judgement.NO_JUDGEMENT,
score: Optional[float] = None,
user: Optional[str] = None,
timestamp: Optional[str] = None,
):
self.key = Identifier.pair(left_id, right_id)
self.target, self.source = self.key
self.judgement = judgement
self.score = score
self.user = user
self.timestamp = timestamp
def other(self, cur: Identifier) -> Identifier:
if cur == self.target:
return self.source
return self.target
def to_line(self) -> str:
row = [
self.target.id,
self.source.id,
self.judgement.value,
self.score,
self.user,
self.timestamp,
]
return json.dumps(row) + "\n"
def __str__(self) -> str:
return self.to_line()
def __hash__(self) -> int:
return hash(self.key)
def __eq__(self, other: Any) -> bool:
return hash(self) == hash(other)
def __lt__(self, other: Any) -> bool:
return bool(self.key < other.key)
def __repr__(self) -> str:
return f"<E({self.target.id}, {self.source.id}, {self.judgement.value})>"
@classmethod
def from_line(cls, line: str) -> "Edge":
data = json.loads(line)
return cls(
data[0],
data[1],
judgement=Judgement(data[2]),
score=data[3],
user=data[4],
|
class Resolver(Generic[CE]):
UNDECIDED = (Judgement.NO_JUDGEMENT, Judgement.UNSURE)
def __init__(self, path: Optional[PathLike] = None) -> None:
self.path = path
self.edges: Dict[Pair, Edge] = {}
self.nodes: Dict[Identifier, Set[Edge]] = defaultdict(set)
def get_edge(self, left_id: StrIdent, right_id: StrIdent) -> Optional[Edge]:
key = Identifier.pair(left_id, right_id)
return self.edges.get(key)
def _traverse(self, node: Identifier, seen: Set[Identifier]) -> Set[Identifier]:
connected = set([node])
if node in seen:
return connected
seen.add(node)
for edge in self.nodes.get(node, []):
if edge.judgement == Judgement.POSITIVE:
other = edge.other(node)
rec = self._traverse(other, seen)
connected.update(rec)
return connected
@lru_cache(maxsize=None)
def connected(self, node: Identifier) -> Set[Identifier]:
return self._traverse(node, set())
def get_canonical(self, entity_id: StrIdent) -> str:
"""Return the canonical identifier for the given entity ID."""
node = Identifier.get(entity_id)
best = max(self.connected(node))
if best.canonical:
return best.id
return node.id
def canonicals(self) -> Generator[Identifier, None, None]:
"""Return all the canonical cluster identifiers."""
for node in self.nodes.keys():
if not node.canonical:
continue
canonical = self.get_canonical(node)
if canonical == node.id:
yield node
def get_referents(
self, canonical_id: StrIdent, canonicals: bool = True
) -> Set[str]:
"""Get all the non-canonical entity identifiers which refer to a given
canonical identifier."""
node = Identifier.get(canonical_id)
referents: Set[str] = set()
for connected in self.connected(node):
if not canonicals and connected.canonical:
continue
if connected == node:
continue
referents.add(connected.id)
return referents
def get_resolved_edge(
self, left_id: StrIdent, right_id: StrIdent
) -> Optional[Edge]:
(left, right) = Identifier.pair(left_id, right_id)
left_connected = self.connected(left)
right_connected = self.connected(right)
for e in left_connected:
for o in right_connected:
edge = self.edges.get(Identifier.pair(e, o))
if edge is None:
continue
return edge
return None
def get_judgement(self, entity_id: StrIdent, other_id: StrIdent) -> Judgement:
"""Get the existing decision between two entities with dedupe factored in."""
entity = Identifier.get(entity_id)
other = Identifier.get(other_id)
if entity == other:
return Judgement.POSITIVE
if is_qid(entity.id) and is_qid(other.id):
return Judgement.NEGATIVE
entity_connected = self.connected(entity)
if other in entity_connected:
return Judgement.POSITIVE
other_connected = self.connected(other)
for e in entity_connected:
for o in other_connected:
edge = self.edges.get(Identifier.pair(e, o))
if edge is None:
continue
if edge.judgement == Judgement.NEGATIVE:
return edge.judgement
return Judgement.NO_JUDGEMENT
def check_candidate(self, left: StrIdent, right: StrIdent) -> bool:
"""Check if the two IDs could be merged, i.e. if there's no existing
judgement."""
judgement = self.get_judgement(left, right)
return judgement == Judgement.NO_JUDGEMENT
def _get_suggested(self) -> List[Edge]:
"""Get all NO_JUDGEMENT edges in descending order of score."""
edges_all = self.edges.values()
candidates = (e for e in edges_all if e.judgement == Judgement.NO_JUDGEMENT)
cmp = lambda x: x.score or -1.0
return sorted(candidates, key=cmp, reverse=True)
def get_candidates(
self, limit: int = 100
) -> Generator[Tuple[str, str, Optional[float]], None, None]:
returned = 0
for edge in self._get_suggested():
if not self.check_candidate(edge.source, edge.target):
continue
yield edge.target.id, edge.source.id, edge.score
returned += 1
if returned >= limit:
break
def suggest(
self, left_id: StrIdent, right_id: StrIdent, score: float
) -> Identifier:
"""Make a NO_JUDGEMENT link between two identifiers to suggest that a user
should make a decision about whether they are the same or not."""
edge = self.get_edge(left_id, right_id)
if edge is not None:
if edge.judgement in self.UNDECIDED:
edge.score = score
return edge.target
return self.decide(left_id, right_id, Judgement.NO_JUDGEMENT, score=score)
def decide(
self,
left_id: StrIdent,
right_id: StrIdent,
judgement: Judgement,
user: Optional[str] = None,
score: Optional[float] = None,
) -> Identifier:
edge = self.get_edge(left_id, right_id)
if edge is None:
edge = Edge(left_id, right_id, judgement=judgement)
# Canonicalise positive matches, i.e. make both identifiers refer to a
# canonical identifier, instead of making a direct link.
if judgement == Judgement.POSITIVE:
connected = set(self.connected(edge.target))
connected.update(self.connected(edge.source))
target = max(connected)
if not target.canonical:
canonical = Identifier.make()
self._remove(edge)
self.decide(edge.source, canonical, judgement=judgement, user=user)
self.decide(edge.target, canonical, judgement=judgement, user=user)
return canonical
edge.judgement = judgement
edge.timestamp = datetime.utcnow().isoformat()[:16]
edge.user = user or getpass.getuser()
edge.score = score or edge.score
self._register(edge)
return edge.target
def _register(self, edge: Edge) -> None:
if edge.judgement != Judgement.NO_JUDGEMENT:
edge.score = None
self.edges[edge.key] = edge
self.nodes[edge.source].add(edge)
self.nodes[edge.target].add(edge)
self.connected.cache_clear()
def _remove(self, edge: Edge) -> None:
"""Remove an edge from the graph."""
self.edges.pop(edge.key, None)
for node in (edge.source, edge.target):
if node in self.nodes:
self.nodes[node].discard(edge)
def explode(self, node_id: StrIdent) -> Set[str]:
"""Dissolve all edges linked to the cluster to which the node belongs.
This is the hard way to make sure we re-do context once we realise
there's been a mistake."""
node = Identifier.get(node_id)
affected: Set[str] = set()
for part in self.connected(node):
affected.add(str(part))
edges = self.nodes.get(part)
if edges is None:
continue
for edge in list(edges):
if edge.judgement != Judgement.NO_JUDGEMENT:
self._remove(edge)
self.connected.cache_clear()
return affected
def prune(self, keep: int = 0) -> None:
"""Remove suggested (i.e. NO_JUDGEMENT) edges, keep only the n with the
highest score. This also checks if a transitive judgement has been
established in the mean time and removes those candidates."""
kept = 0
for edge in self._get_suggested():
judgement = self.get_judgement(edge.source, edge.target)
if judgement != Judgement.NO_JUDGEMENT:
self._remove(edge)
if kept >= keep:
self._remove(edge)
kept += 1
self.connected.cache_clear()
def apply(self, proxy: CE) -> CE:
"""Replace all entity references in a given proxy with their canonical
identifiers. This is essentially the harmonisation post de-dupe."""
canonical_id = self.get_canonical(proxy.id)
if canonical_id != proxy.id:
proxy.referents = self.get_referents(canonical_id)
proxy.id = canonical_id
for prop in proxy.iterprops():
if prop.type != registry.entity:
continue
for value in proxy.pop(prop):
canonical = self.get_canonical(value)
proxy.unsafe_add(prop, canonical, cleaned=True)
return proxy
def save(self) -> None:
"""Store the resolver adjacency list to a plain text JSON list."""
if self.path is None:
raise RuntimeError("Resolver has no path")
edges = sorted(self.edges.values())
with open(self.path, "w") as fh:
for edge in edges:
fh.write(edge.to_line())
@classmethod
def load(cls, path: PathLike) -> "Resolver[CE]":
resolver = cls(path=path)
if not path.exists():
return resolver
with open(path, "r") as fh:
while True:
line = fh.readline()
if not line:
break
edge = Edge.from_line(line)
resolver._register(edge)
return resolver
def __repr__(self) -> str:
path = self.path.name if self.path is not None else ":memory:"
return f"<Resolver({path!r}, {len(self.edges)})>"
|
timestamp=data[5],
)
|
selftest.rs
|
//! This module handles selftesting of the kernel. A selftest can be either a unit test or an
//! integration test.
//! The kernel uses the serial communication interface to transmit the results of the selftests to
//! another machine.
//use crate::device::serial;
use core::any::type_name;
/// Boolean value telling whether selftesting is running.
static mut RUNNING: bool = false;
/// This module contains utilities to manipulate QEMU for testing.
#[cfg(config_debug_qemu)]
pub mod qemu {
use crate::io;
/// The port used to trigger QEMU emulator exit with the given exit code.
const EXIT_PORT: u16 = 0xf4;
/// QEMU exit code for success.
pub const SUCCESS: u32 = 0x10;
/// QEMU exit code for failure.
pub const FAILURE: u32 = 0x11;
/// Exits QEMU with the given status.
pub fn exit(status: u32) -> ! {
unsafe {
io::outl(EXIT_PORT, status);
}
crate::halt();
}
}
/// Trait for any testable feature.
pub trait Testable {
/// Function called to run the corresponding test.
fn run(&self);
}
impl<T> Testable for T where T: Fn() {
// TODO Use a special format on serial to be parsed by host?
fn run(&self) {
//let serial_guard = serial::get(serial::COM1).lock();
let name = type_name::<T>();
crate::print!("test {} ... ", name);
self();
//let status = "ok"; // TODO On panic, retrieve message and print on serial
//if let Some(s) = serial {
// // TODO Add an additional message on fail
// s.write(b"{\"name\": \"");
// s.write(name.as_bytes());
// s.write(b"\", \"status\": \"");
// s.write(status.as_bytes());
// s.write(b"\"}\n");
//}
crate::println!("ok");
}
}
/// The test runner for the kernel. This function runs every tests for the kernel and halts the
/// kernel or exits the emulator if possible.
pub fn
|
(tests: &[&dyn Testable]) {
crate::println!("Running {} tests", tests.len());
unsafe { // Safe because the function is called by only one thread
RUNNING = true;
}
for test in tests {
test.run();
}
unsafe { // Safe because the function is called by only one thread
RUNNING = false;
}
crate::println!("No more tests to run");
#[cfg(config_debug_qemu)]
qemu::exit(qemu::SUCCESS);
#[cfg(not(config_debug_qemu))]
crate::halt();
}
/// Tells whether selftesting is running.
pub fn is_running() -> bool {
unsafe { // Safe because the function is called by only one thread
RUNNING
}
}
|
runner
|
input.rs
|
use glutin;
use std::collections::HashSet;
pub use glutin::MouseButton;
pub use glutin::VirtualKeyCode;
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Mouse {
pub at: (i32, i32), // could make this optional for off screen? might be a stupid idea.
pub down: HashSet<glutin::MouseButton>,
pub pushed: HashSet<glutin::MouseButton>,
pub released: HashSet<glutin::MouseButton>,
pub mouse_wheel_delta: i32, // we multiply the float delta by 100 and round it
}
impl Mouse {
pub fn left_pushed(&self) -> bool {
self.pushed.contains(&glutin::MouseButton::Left)
}
pub fn left_down(&self) -> bool {
self.down.contains(&glutin::MouseButton::Left)
}
pub fn left_released(&self) -> bool {
self.released.contains(&glutin::MouseButton::Left)
}
pub fn right_pushed(&self) -> bool {
self.pushed.contains(&glutin::MouseButton::Right)
}
pub fn right_down(&self) -> bool {
self.down.contains(&glutin::MouseButton::Right)
}
pub fn right_released(&self) -> bool {
self.released.contains(&glutin::MouseButton::Right)
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Keys {
pub down: HashSet<glutin::VirtualKeyCode>,
pub pushed: HashSet<glutin::VirtualKeyCode>,
pub released: HashSet<glutin::VirtualKeyCode>,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Input{
pub mouse:Mouse,
pub keys:Keys,
pub close:bool,
}
pub fn is_close_event(event: &glutin::Event) -> bool {
use glutin::Event;
use glutin::WindowEvent;
use glutin::KeyboardInput;
// glutin::WindowEvent::KeyboardInput {}
match event {
&Event::WindowEvent { event: WindowEvent::Closed , .. } => true,
&Event::WindowEvent {
event: WindowEvent::KeyboardInput {
input: KeyboardInput { virtual_keycode: Some(glutin::VirtualKeyCode::Escape), ..}
, .. },
..} => true,
_ => false,
}
}
pub fn produce(input:&Input, events: &Vec<glutin::Event>) -> Input {
let mut next_input = input.clone();
next_input.keys.pushed.clear();
next_input.keys.released.clear();
next_input.mouse.pushed.clear();
next_input.mouse.released.clear();
next_input.mouse.mouse_wheel_delta = 0;
for event in events {
if is_close_event(&event) {
next_input.close = true;
}
use glutin::{Event, WindowEvent, KeyboardInput, ElementState};
match event {
&Event::WindowEvent { ref event, .. } => {
match event {
&WindowEvent::Resized(_, _) => {},
&WindowEvent::KeyboardInput { input: KeyboardInput { virtual_keycode: Some(key_code), state, .. }, .. } => {
match state {
ElementState::Pressed => {
let was_down = next_input.keys.down.contains(&key_code);
next_input.keys.down.insert(key_code);
if !was_down {
next_input.keys.pushed.insert(key_code);
}
},
ElementState::Released => {
let was_down = next_input.keys.down.contains(&key_code);
next_input.keys.down.remove(&key_code);
if !was_down {
next_input.keys.released.insert(key_code);
}
},
}
},
&WindowEvent::MouseInput { state, button, .. } => {
match state {
ElementState::Pressed =>
|
,
ElementState::Released => {
let was_down = next_input.mouse.down.contains(&button);
next_input.mouse.down.remove(&button);
if was_down {
next_input.mouse.released.insert(button);
}
},
}
},
&WindowEvent::MouseWheel { delta, .. } => {
println!("mouse delta -> {:?}", delta);
// next_input.mouse.mouse_wheel_delta += (delta * 100.0) as i32;
},
&WindowEvent::MouseMoved { position: (x, y), .. } => {
next_input.mouse.at = (x as i32, y as i32);
}
_ => (),
}
},
_ => (),
}
}
next_input
}
impl Input {
pub fn default() -> Input {
Input{
mouse: Mouse {
at: (0, 0),
down: HashSet::new(),
pushed: HashSet::new(),
released: HashSet::new(),
mouse_wheel_delta: 0,
},
keys: Keys {
down: HashSet::new(),
pushed: HashSet::new(),
released: HashSet::new(),
},
close: false,
}
}
}
|
{
let was_down = next_input.mouse.down.contains(&button);
next_input.mouse.down.insert(button);
if !was_down {
next_input.mouse.pushed.insert(button);
}
}
|
main.go
|
package main
import "github.com/rmanzoku/mackerel-plugin-resque/lib"
func
|
() {
mpresque.Do()
}
|
main
|
label script.py
|
from __future__ import division
from tkinter import *
import tkMessageBox
from PIL import Image, ImageTk
import os
import glob
import random
w0 = 1 #图片原始宽度
h0 = 1 #图片原始高度
# colors for the bboxes
COLORS = ['red','blue','yellow','pink','cyan','green','black']
#image size
SIZE = 256,256
#指定缩放后的图像大小
DEST_SIZE = 500,500
class LabelTool():
def __init__(self,master):
#set up the main frame
self.parent = master
self.parent.title('LabelTool')
self.frame = Frame(self.parent)
self.frame.pack(fill=BOTH,expand=1)
self.parent.resizable(width=TRUE,height=TRUE)
#initialize global state
self.imageDir = ''
self.imageList = []
self.egDir = ''
self.egList = []
self.outDir =''
self.cur = 0
self.total = 0
self.category =0
self.imagename=''
self.labelfilename=''
self.tkimg = None
# initialize mouse state
self.STATE={}
self.STATE['click']=0
self.STATE['x'],self.STATE['y']=0,0
#reference to bbox
self.bboxIdList = []
self.bboxId = None
self.bboxList = []
self.hl=None
self.vl=None
# ----------------- GUI stuff ---------------------
# dir entry & load
self.label = Label(self.frame,text='Image Dir:')
self.label.grid(row=0,column=0,sticky=E)
self.entry=Entry(self.frame)
self.entry.grid(row=0, column=1, sticky=W + E)
self.ldBtn = Button(self.frame, text="Load", command=self.loadDir)
self.ldBtn.grid(row=0, column=2, sticky=W + E)
# main panel for labeling
self.mainPanel = Canvas(self.frame, cursor='tcross')
self.mainPanel.bind("<Button-1>", self.mouseClick)
self.mainPanel.bind("<Motion>", self.mouseMove)
self.parent.bind("<Escape>", self.cancelBBox) # press <Espace> to cancel current bbox
self.parent.bind("s", self.cancelBBox)
self.parent.bind("a", self.prevImage) # press 'a' to go backforward
self.parent.bind("d", self.nextImage) # press 'd' to go forward
self.mainPanel.grid(row=1, column=1, rowspan=4, sticky=W + N)
# showing bbox info & delete bbox
self.lb1 = Label(self.frame, text='Bounding boxes:')
self.lb1.grid(row=1, column=2, sticky=W + N)
self.listbox = Listbox(self.frame, width=28, height=12)
self.listbox.grid(row=2, column=2, sticky=N)
self.btnDel = Button(self.frame, text='Delete', command=self.delBBox)
self.btnDel.grid(row=3, column=2, sticky=W + E + N)
self.btnClear = Button(self.frame, text='ClearAll', command=self.clearBBox)
self.btnClear.grid(row=4, column=2, sticky=W + E + N)
# control panel for image navigation
self.ctrPanel = Frame(self.frame)
self.ctrPanel.grid(row=5, column=1, columnspan=2, sticky=W + E)
self.prevBtn = Button(self.ctrPanel, text='<< Prev', width=10, command=self.prevImage)
self.prevBtn.pack(side=LEFT, padx=5, pady=3)
self.nextBtn = Button(self.ctrPanel, text='Next >>', width=10, command=self.nextImage)
self.nextBtn.pack(side=LEFT, padx=5, pady=3)
self.progLabel = Label(self.ctrPanel, text="Progress: / ")
self.progLabel.pack(side=LEFT, padx=5)
self.tmpLabel = Label(self.ctrPanel, text="Go to Image No.")
self.tmpLabel.pack(side=LEFT, padx=5)
self.idxEntry = Entry(self.ctrPanel, width=5)
self.idxEntry.pack(side=LEFT)
self.goBtn = Button(self.ctrPanel, text='Go', command=self.gotoImage)
self.goBtn.pack(side=LEFT)
# example pannel for illustration
self.egPanel = Frame(self.frame, border=10)
self.egPanel.grid(row=1, column=0, rowspan=5, sticky=N)
self.tmpLabel2 = Label(self.egPanel, text="Examples:")
self.tmpLabel2.pack(side=TOP, pady=5)
self.egLabels = []
for i in range(3):
self.egLabels.append(Label(self.egPanel))
self.egLabels[-1].pack(side=TOP)
# display mouse position
self.disp = Label(self.ctrPanel, text='')
self.disp.pack(side=RIGHT)
self.frame.columnconfigure(1, weight=1)
self.frame.rowconfigure(4, weight=1)
def loadDir(self,dbg=False):
if not dbg:
s = self.entry.get()
self.parent.focus()
self.category=int(s)
else:
s = r'D:\Data store file\labelGUI'
print('self.category =%d' % (self.category))
self.imageDir = os.path.join(r'./images', '%03d' % (self.category))
print(self.imageDir)
self.imageList = glob.glob(os.path.join(self.imageDir, '*.jpg'))
if len(self.imageList) == 0:
print
'No .jpg images found in the specified dir!'
return
else:
print
'num=%d' % (len(self.imageList))
# default to the 1st image in the collection
self.cur = 1
self.total = len(self.imageList)
# set up output dir
self.outDir = os.path.join(r'./labels', '%03d' % (self.category))
if not os.path.exists(self.outDir):
os.mkdir(self.outDir)
# load example bboxes
self.egDir = os.path.join(r'./Examples', '%03d' % (self.category))
# if not os.path.exists(self.egDir):
# return
filelist = glob.glob(os.path.join(self.egDir, '*.jpg'))
self.tmp = []
self.egList = []
random.shuffle(filelist)
for (i, f) in enumerate(filelist):
if i == 3:
break
im = Image.open(f)
r = min(SIZE[0] / im.size[0], SIZE[1] / im.size[1])
new_size = int(r * im.size[0]), int(r * im.size[1])
self.tmp.append(im.resize(new_size, Image.ANTIALIAS))
self.egList.append(ImageTk.PhotoImage(self.tmp[-1]))
self.egLabels[i].config(image=self.egList[-1], width=SIZE[0], height=SIZE[1])
self.loadImage()
print
'%d images loaded from %s' % (self.total, s)
def loadImage(self):
# load image
imagepath = self.imageList[self.cur - 1]
pil_image = Image.open(imagepath)
global w0,h0
w0,h0=pil_image.size
# 缩放到指定大小
pil_image = pil_image.resize((DEST_SIZE[0], DEST_SIZE[1]), Image.ANTIALIAS)
# pil_image = imgresize(w, h, w_box, h_box, pil_image)
self.img = pil_image
self.tkimg = ImageTk.PhotoImage(pil_image)
self.mainPanel.config(width=max(self.tkimg.width(), 400), height=max(self.tkimg.height(), 400))
self.mainPanel.create_image(0, 0, image=self.tkimg, anchor=NW)
self.progLabel.config(text="%04d/%04d" % (self.cur, self.total))
# load labels
self.clearBBox()
self.imagename = os.path.split(imagepath)[-1].split('.')[0]
labelname = self.imagename + '.txt'
self.labelfilename = os.path.join(self.outDir, labelname)
bbox_cnt = 0
if os.path.exists(self.labelfilename):
with open(self.labelfilename) as f:
for (i, line) in enumerate(f):
if i == 0:
bbox_cnt = int(line.strip())
continue
print
line
tmp = [(t.strip()) for t in line.split()]
print
"********************"
print
DEST_SIZE
# tmp = (0.1, 0.3, 0.5, 0.5)
print
"tmp[0,1,2,3]===%.2f, %.2f, %.2f, %.2f" % (float(tmp[0]), float(tmp[1]), float(tmp[2]), float(tmp[3]))
# print "%.2f,%.2f,%.2f,%.2f" %(tmp[0] tmp[1] tmp[2] tmp[3] )
print
"********************"
# tx = (10, 20, 30, 40)
# self.bboxList.append(tuple(tx))
self.bboxList.append(tuple(tmp))
tmp[0] = float(tmp[0])
tmp[1] = float(tmp[1])
tmp[2] = float(tmp[2])
tmp[3] = float(tmp[3])
tx0 = int(tmp[0] * DEST_SIZE[0])
ty0 = int(tmp[1] * DEST_SIZE[1])
tx1 = int(tmp[2] * DEST_SIZE[0])
ty1 = int(tmp[3] * DEST_SIZE[1])
print
"tx0, ty0, tx1, ty1"
print
tx0, ty0, tx1, ty1
tmpId = self.mainPanel.create_rectangle(tx0, ty0, tx1, ty1, \
width=2, \
outline=COLORS[(len(self.bboxList) - 1) % len(COLORS)])
self.bboxIdList.append(tmpId)
self.listbox.insert(END, '(%.2f,%.2f)-(%.2f,%.2f)' % (tmp[0], tmp[1], tmp[2], tmp[3]))
# self.listbox.insert(END, '(%d, %d) -> (%d, %d)' %(tmp[0], tmp[1], tmp[2], tmp[3]))
self.listbox.itemconfig(len(self.bboxIdList) - 1, fg=COLORS[(len(self.bboxIdList) - 1) % len(COLORS)])
def saveImage(self):
# print "-----1--self.bboxList---------"
print
self.bboxList
# print "-----2--self.bboxList---------"
with open(self.labelfilename, 'w') as f:
f.write('%d\n' % len(self.bboxList))
for bbox in self.bboxList:
f.write(' '.join(map(str, bbox)) + '\n')
print('Image No. %d saved' % (self.cur))
def mouseClick(self, event):
if self.STATE['click'] == 0:
self.STATE['x'], self.STATE['y'] = event.x, event.y
else:
x1, x2 = min(self.STATE['x'], event.x), max(self.STATE['x'], event.x)
y1, y2 = min(self.STATE['y'], event.y), max(self.STATE['y'], event.y)
x1, x2 = x1 / DEST_SIZE[0], x2 / DEST_SIZE[0];
y1, y2 = y1 / DEST_SIZE[1], y2 / DEST_SIZE[1];
self.bboxList.append((x1, y1, x2, y2))
self.bboxIdList.append(self.bboxId)
self.bboxId = None
self.listbox.insert(END, '(%.2f, %.2f)-(%.2f, %.2f)' % (x1, y1, x2, y2))
self.listbox.itemconfig(len(self.bboxIdList) - 1, fg=COLORS[(len(self.bboxIdList) - 1) % len(COLORS)])
self.STATE['click'] = 1 - self.STATE['click']
def mouseMove(self, event):
self.disp.config(text='x: %.2f, y: %.2f' % (event.x / DEST_SIZE[0], event.y / DEST_SIZE[1]))
if self.tkimg:
if self.hl:
self.mainPanel.delete(self.hl)
self.hl = self.mainPanel.create_line(0, event.y, self.tkimg.width(), event.y, width=2)
if self.vl:
self.mainPanel.delete(self.vl)
self.vl = self.mainPanel.create_line(event.x, 0, event.x, self.tkimg.height(), width=2)
if 1 == self.STATE['click']:
if self.bboxId:
self.mainPanel.delete(self.bboxId)
self.bboxId = self.mainPanel.create_rectangle(self.STATE['x'], self.STATE['y'], \
event.x, event.y, \
width=2, \
outline=COLORS[len(self.bboxList) % len(COLORS)])
def cancelBBox(self, event):
if 1 == self.STATE['click']:
if self.bboxId:
self.mainPanel.delete(self.bboxId)
self.bboxId = None
self.STATE['click'] = 0
def delBBox(self):
sel = self.listbox.curselection()
if len(sel) != 1:
return
idx = int(sel[0])
self.mainPanel.delete(self.bboxIdList[idx])
self.bboxIdList.pop(idx)
self.bboxList.pop(idx)
self.listbox.delete(idx)
def clearBBox(self):
for idx in range(len(self.bbo
|
:
self.mainPanel.delete(self.bboxIdList[idx])
self.listbox.delete(0, len(self.bboxList))
self.bboxIdList = []
self.bboxList = []
def prevImage(self, event=None):
self.saveImage()
if self.cur > 1:
self.cur -= 1
self.loadImage()
def nextImage(self, event=None):
self.saveImage()
if self.cur < self.total:
self.cur += 1
self.loadImage()
def gotoImage(self):
idx = int(self.idxEntry.get())
if 1 <= idx and idx <= self.total:
self.saveImage()
self.cur = idx
self.loadImage()
## def setImage(self, imagepath = r'test2.png'):
## self.img = Image.open(imagepath)
## self.tkimg = ImageTk.PhotoImage(self.img)
## self.mainPanel.config(width = self.tkimg.width())
## self.mainPanel.config(height = self.tkimg.height())
## self.mainPanel.create_image(0, 0, image = self.tkimg, anchor=NW)
def imgresize(w, h, w_box, h_box, pil_image):
'''
resize a pil_image object so it will fit into
a box of size w_box times h_box, but retain aspect ratio
'''
f1 = 1.0 * w_box / w # 1.0 forces float division in Python2
f2 = 1.0 * h_box / h
factor = min([f1, f2])
# print(f1, f2, factor) # test
# use best down-sizing filter
width = int(w * factor)
height = int(h * factor)
return pil_image.resize((width, height), Image.ANTIALIAS)
if __name__ == '__main__':
root = Tk()
tool = LabelTool(root)
root.mainloop()
|
xIdList))
|
blur_filter.rs
|
use crate::add_field_accessors;
use crate::avm1::error::Error;
use crate::avm1::{Object, ScriptObject, TObject, Value};
use crate::impl_custom_object_without_set;
use gc_arena::{Collect, GcCell, MutationContext};
use crate::avm1::activation::Activation;
use std::fmt;
/// A BlurFilter
#[derive(Clone, Copy, Collect)]
#[collect(no_drop)]
pub struct BlurFilterObject<'gc>(GcCell<'gc, BlurFilterData<'gc>>);
#[derive(Clone, Collect)]
#[collect(no_drop)]
pub struct BlurFilterData<'gc> {
/// The underlying script object.
base: ScriptObject<'gc>,
|
impl fmt::Debug for BlurFilterObject<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let this = self.0.read();
f.debug_struct("BlurFilter")
.field("blurX", &this.blur_x)
.field("blurY", &this.blur_y)
.field("quality", &this.quality)
.finish()
}
}
impl<'gc> BlurFilterObject<'gc> {
add_field_accessors!(
[set_blur_x, blur_x, blur_x, f64],
[set_blur_y, blur_y, blur_y, f64],
[set_quality, quality, quality, i32],
);
pub fn empty_object(gc_context: MutationContext<'gc, '_>, proto: Option<Object<'gc>>) -> Self {
BlurFilterObject(GcCell::allocate(
gc_context,
BlurFilterData {
base: ScriptObject::object(gc_context, proto),
blur_x: 4.0,
blur_y: 4.0,
quality: 1,
},
))
}
}
impl<'gc> TObject<'gc> for BlurFilterObject<'gc> {
impl_custom_object_without_set!(base);
fn set(
&self,
name: &str,
value: Value<'gc>,
activation: &mut Activation<'_, 'gc, '_>,
) -> Result<(), Error<'gc>> {
let base = self.0.read().base;
base.internal_set(
name,
value,
activation,
(*self).into(),
Some(activation.context.avm1.prototypes.blur_filter),
)
}
fn as_blur_filter_object(&self) -> Option<BlurFilterObject<'gc>> {
Some(*self)
}
fn create_bare_object(
&self,
activation: &mut Activation<'_, 'gc, '_>,
this: Object<'gc>,
) -> Result<Object<'gc>, Error<'gc>> {
Ok(BlurFilterObject::empty_object(activation.context.gc_context, Some(this)).into())
}
}
|
blur_x: f64,
blur_y: f64,
quality: i32,
}
|
widgets_table.rs
|
use tui::{
backend::TestBackend,
buffer::Buffer,
layout::Constraint,
style::{Color, Modifier, Style},
text::{Span, Spans},
widgets::{Block, Borders, Cell, Row, Table, TableState},
Terminal,
};
#[test]
fn widgets_table_column_spacing_can_be_changed() {
let test_case = |column_spacing, expected| {
let backend = TestBackend::new(30, 10);
let mut terminal = Terminal::new(backend).unwrap();
terminal
.draw(|f| {
let size = f.size();
let table = Table::new(vec![
Row::new(vec!["Row11", "Row12", "Row13"]),
Row::new(vec!["Row21", "Row22", "Row23"]),
Row::new(vec!["Row31", "Row32", "Row33"]),
Row::new(vec!["Row41", "Row42", "Row43"]),
])
.header(Row::new(vec!["Head1", "Head2", "Head3"]).bottom_margin(1))
.block(Block::default().borders(Borders::ALL))
.widths(&[
Constraint::Length(5),
Constraint::Length(5),
Constraint::Length(5),
])
.column_spacing(column_spacing);
f.render_widget(table, size);
})
.unwrap();
terminal.backend().assert_buffer(&expected);
};
// no space between columns
test_case(
0,
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│Head1Head2Head3 │",
"│ │",
"│Row11Row12Row13 │",
"│Row21Row22Row23 │",
"│Row31Row32Row33 │",
"│Row41Row42Row43 │",
"│ │",
"│ │",
"└────────────────────────────┘",
]),
);
// one space between columns
test_case(
1,
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│Head1 Head2 Head3 │",
"│ │",
"│Row11 Row12 Row13 │",
"│Row21 Row22 Row23 │",
"│Row31 Row32 Row33 │",
"│Row41 Row42 Row43 │",
"│ │",
"│ │",
"└────────────────────────────┘",
]),
);
// enough space to just not hide the third column
test_case(
6,
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│Head1 Head2 Head3 │",
"│ │",
"│Row11 Row12 Row13 │",
"│Row21 Row22 Row23 │",
"│Row31 Row32 Row33 │",
"│Row41 Row42 Row43 │",
"│ │",
"│ │",
"└────────────────────────────┘",
]),
);
// enough space to hide part of the third column
test_case(
7,
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│Head1 Head2 Head│",
"│ │",
"│Row11 Row12 Row1│",
"│Row21 Row22 Row2│",
"│Row31 Row32 Row3│",
"│Row41 Row42 Row4│",
"│ │",
"│ │",
"└────────────────────────────┘",
]),
);
}
#[test]
fn widgets_table_columns_widths_can_use_fixed_length_constraints() {
let test_case = |widths, expected| {
let backend = TestBackend::new(30, 10);
let mut terminal = Terminal::new(backend).unwrap();
terminal
.draw(|f| {
let size = f.size();
let table = Table::new(vec![
Row::new(vec!["Row11", "Row12", "Row13"]),
Row::new(vec!["Row21", "Row22", "Row23"]),
Row::new(vec!["Row31", "Row32", "Row33"]),
Row::new(vec!["Row41", "Row42", "Row43"]),
])
.header(Row::new(vec!["Head1", "Head2", "Head3"]).bottom_margin(1))
.block(Block::default().borders(Borders::ALL))
.widths(widths);
f.render_widget(table, size);
})
.unwrap();
terminal.backend().assert_buffer(&expected);
|
// columns of zero width show nothing
test_case(
&[
Constraint::Length(0),
Constraint::Length(0),
Constraint::Length(0),
],
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│ │",
"│ │",
"│ │",
"│ │",
"│ │",
"│ │",
"│ │",
"│ │",
"└────────────────────────────┘",
]),
);
// columns of 1 width trim
test_case(
&[
Constraint::Length(1),
Constraint::Length(1),
Constraint::Length(1),
],
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│H H H │",
"│ │",
"│R R R │",
"│R R R │",
"│R R R │",
"│R R R │",
"│ │",
"│ │",
"└────────────────────────────┘",
]),
);
// columns of large width just before pushing a column off
test_case(
&[
Constraint::Length(8),
Constraint::Length(8),
Constraint::Length(8),
],
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│Head1 Head2 Head3 │",
"│ │",
"│Row11 Row12 Row13 │",
"│Row21 Row22 Row23 │",
"│Row31 Row32 Row33 │",
"│Row41 Row42 Row43 │",
"│ │",
"│ │",
"└────────────────────────────┘",
]),
);
}
#[test]
fn widgets_table_columns_widths_can_use_percentage_constraints() {
let test_case = |widths, expected| {
let backend = TestBackend::new(30, 10);
let mut terminal = Terminal::new(backend).unwrap();
terminal
.draw(|f| {
let size = f.size();
let table = Table::new(vec![
Row::new(vec!["Row11", "Row12", "Row13"]),
Row::new(vec!["Row21", "Row22", "Row23"]),
Row::new(vec!["Row31", "Row32", "Row33"]),
Row::new(vec!["Row41", "Row42", "Row43"]),
])
.header(Row::new(vec!["Head1", "Head2", "Head3"]).bottom_margin(1))
.block(Block::default().borders(Borders::ALL))
.widths(widths)
.column_spacing(0);
f.render_widget(table, size);
})
.unwrap();
terminal.backend().assert_buffer(&expected);
};
// columns of zero width show nothing
test_case(
&[
Constraint::Percentage(0),
Constraint::Percentage(0),
Constraint::Percentage(0),
],
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│ │",
"│ │",
"│ │",
"│ │",
"│ │",
"│ │",
"│ │",
"│ │",
"└────────────────────────────┘",
]),
);
// columns of not enough width trims the data
test_case(
&[
Constraint::Percentage(11),
Constraint::Percentage(11),
Constraint::Percentage(11),
],
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│HeaHeaHea │",
"│ │",
"│RowRowRow │",
"│RowRowRow │",
"│RowRowRow │",
"│RowRowRow │",
"│ │",
"│ │",
"└────────────────────────────┘",
]),
);
// columns of large width just before pushing a column off
test_case(
&[
Constraint::Percentage(33),
Constraint::Percentage(33),
Constraint::Percentage(33),
],
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│Head1 Head2 Head3 │",
"│ │",
"│Row11 Row12 Row13 │",
"│Row21 Row22 Row23 │",
"│Row31 Row32 Row33 │",
"│Row41 Row42 Row43 │",
"│ │",
"│ │",
"└────────────────────────────┘",
]),
);
// percentages summing to 100 should give equal widths
test_case(
&[Constraint::Percentage(50), Constraint::Percentage(50)],
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│Head1 Head2 │",
"│ │",
"│Row11 Row12 │",
"│Row21 Row22 │",
"│Row31 Row32 │",
"│Row41 Row42 │",
"│ │",
"│ │",
"└────────────────────────────┘",
]),
);
}
#[test]
fn widgets_table_columns_widths_can_use_mixed_constraints() {
let test_case = |widths, expected| {
let backend = TestBackend::new(30, 10);
let mut terminal = Terminal::new(backend).unwrap();
terminal
.draw(|f| {
let size = f.size();
let table = Table::new(vec![
Row::new(vec!["Row11", "Row12", "Row13"]),
Row::new(vec!["Row21", "Row22", "Row23"]),
Row::new(vec!["Row31", "Row32", "Row33"]),
Row::new(vec!["Row41", "Row42", "Row43"]),
])
.header(Row::new(vec!["Head1", "Head2", "Head3"]).bottom_margin(1))
.block(Block::default().borders(Borders::ALL))
.widths(widths);
f.render_widget(table, size);
})
.unwrap();
terminal.backend().assert_buffer(&expected);
};
// columns of zero width show nothing
test_case(
&[
Constraint::Percentage(0),
Constraint::Length(0),
Constraint::Percentage(0),
],
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│ │",
"│ │",
"│ │",
"│ │",
"│ │",
"│ │",
"│ │",
"│ │",
"└────────────────────────────┘",
]),
);
// columns of not enough width trims the data
test_case(
&[
Constraint::Percentage(11),
Constraint::Length(20),
Constraint::Percentage(11),
],
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│Hea Head2 Hea│",
"│ │",
"│Row Row12 Row│",
"│Row Row22 Row│",
"│Row Row32 Row│",
"│Row Row42 Row│",
"│ │",
"│ │",
"└────────────────────────────┘",
]),
);
// columns of large width just before pushing a column off
test_case(
&[
Constraint::Percentage(33),
Constraint::Length(10),
Constraint::Percentage(33),
],
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│Head1 Head2 Head3 │",
"│ │",
"│Row11 Row12 Row13 │",
"│Row21 Row22 Row23 │",
"│Row31 Row32 Row33 │",
"│Row41 Row42 Row43 │",
"│ │",
"│ │",
"└────────────────────────────┘",
]),
);
// columns of large size (>100% total) hide the last column
test_case(
&[
Constraint::Percentage(60),
Constraint::Length(10),
Constraint::Percentage(60),
],
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│Head1 Head2 │",
"│ │",
"│Row11 Row12 │",
"│Row21 Row22 │",
"│Row31 Row32 │",
"│Row41 Row42 │",
"│ │",
"│ │",
"└────────────────────────────┘",
]),
);
}
#[test]
fn widgets_table_columns_widths_can_use_ratio_constraints() {
let test_case = |widths, expected| {
let backend = TestBackend::new(30, 10);
let mut terminal = Terminal::new(backend).unwrap();
terminal
.draw(|f| {
let size = f.size();
let table = Table::new(vec![
Row::new(vec!["Row11", "Row12", "Row13"]),
Row::new(vec!["Row21", "Row22", "Row23"]),
Row::new(vec!["Row31", "Row32", "Row33"]),
Row::new(vec!["Row41", "Row42", "Row43"]),
])
.header(Row::new(vec!["Head1", "Head2", "Head3"]).bottom_margin(1))
.block(Block::default().borders(Borders::ALL))
.widths(widths)
.column_spacing(0);
f.render_widget(table, size);
})
.unwrap();
terminal.backend().assert_buffer(&expected);
};
// columns of zero width show nothing
test_case(
&[
Constraint::Ratio(0, 1),
Constraint::Ratio(0, 1),
Constraint::Ratio(0, 1),
],
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│ │",
"│ │",
"│ │",
"│ │",
"│ │",
"│ │",
"│ │",
"│ │",
"└────────────────────────────┘",
]),
);
// columns of not enough width trims the data
test_case(
&[
Constraint::Ratio(1, 9),
Constraint::Ratio(1, 9),
Constraint::Ratio(1, 9),
],
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│HeaHeaHea │",
"│ │",
"│RowRowRow │",
"│RowRowRow │",
"│RowRowRow │",
"│RowRowRow │",
"│ │",
"│ │",
"└────────────────────────────┘",
]),
);
// columns of large width just before pushing a column off
test_case(
&[
Constraint::Ratio(1, 3),
Constraint::Ratio(1, 3),
Constraint::Ratio(1, 3),
],
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│Head1 Head2 Head3 │",
"│ │",
"│Row11 Row12 Row13 │",
"│Row21 Row22 Row23 │",
"│Row31 Row32 Row33 │",
"│Row41 Row42 Row43 │",
"│ │",
"│ │",
"└────────────────────────────┘",
]),
);
// percentages summing to 100 should give equal widths
test_case(
&[Constraint::Ratio(1, 2), Constraint::Ratio(1, 2)],
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│Head1 Head2 │",
"│ │",
"│Row11 Row12 │",
"│Row21 Row22 │",
"│Row31 Row32 │",
"│Row41 Row42 │",
"│ │",
"│ │",
"└────────────────────────────┘",
]),
);
}
#[test]
fn widgets_table_can_have_rows_with_multi_lines() {
let test_case = |state: &mut TableState, expected: Buffer| {
let backend = TestBackend::new(30, 8);
let mut terminal = Terminal::new(backend).unwrap();
terminal
.draw(|f| {
let size = f.size();
let table = Table::new(vec![
Row::new(vec!["Row11", "Row12", "Row13"]),
Row::new(vec!["Row21", "Row22", "Row23"]).height(2),
Row::new(vec!["Row31", "Row32", "Row33"]),
Row::new(vec!["Row41", "Row42", "Row43"]).height(2),
])
.header(Row::new(vec!["Head1", "Head2", "Head3"]).bottom_margin(1))
.block(Block::default().borders(Borders::ALL))
.highlight_symbol(">> ")
.widths(&[
Constraint::Length(5),
Constraint::Length(5),
Constraint::Length(5),
])
.column_spacing(1);
f.render_stateful_widget(table, size, state);
})
.unwrap();
terminal.backend().assert_buffer(&expected);
};
let mut state = TableState::default();
// no selection
test_case(
&mut state,
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│Head1 Head2 Head3 │",
"│ │",
"│Row11 Row12 Row13 │",
"│Row21 Row22 Row23 │",
"│ │",
"│Row31 Row32 Row33 │",
"└────────────────────────────┘",
]),
);
// select first
state.select(Some(0));
test_case(
&mut state,
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│ Head1 Head2 Head3 │",
"│ │",
"│>> Row11 Row12 Row13 │",
"│ Row21 Row22 Row23 │",
"│ │",
"│ Row31 Row32 Row33 │",
"└────────────────────────────┘",
]),
);
// select second (we don't show partially the 4th row)
state.select(Some(1));
test_case(
&mut state,
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│ Head1 Head2 Head3 │",
"│ │",
"│ Row11 Row12 Row13 │",
"│>> Row21 Row22 Row23 │",
"│ │",
"│ Row31 Row32 Row33 │",
"└────────────────────────────┘",
]),
);
// select 4th (we don't show partially the 1st row)
state.select(Some(3));
test_case(
&mut state,
Buffer::with_lines(vec![
"┌────────────────────────────┐",
"│ Head1 Head2 Head3 │",
"│ │",
"│ Row31 Row32 Row33 │",
"│>> Row41 Row42 Row43 │",
"│ │",
"│ │",
"└────────────────────────────┘",
]),
);
}
#[test]
fn widgets_table_can_have_elements_styled_individually() {
let backend = TestBackend::new(30, 4);
let mut terminal = Terminal::new(backend).unwrap();
let mut state = TableState::default();
state.select(Some(0));
terminal
.draw(|f| {
let size = f.size();
let table = Table::new(vec![
Row::new(vec!["Row11", "Row12", "Row13"]).style(Style::default().fg(Color::Green)),
Row::new(vec![
Cell::from("Row21"),
Cell::from("Row22").style(Style::default().fg(Color::Yellow)),
Cell::from(Spans::from(vec![
Span::raw("Row"),
Span::styled("23", Style::default().fg(Color::Blue)),
]))
.style(Style::default().fg(Color::Red)),
])
.style(Style::default().fg(Color::LightGreen)),
])
.header(Row::new(vec!["Head1", "Head2", "Head3"]).bottom_margin(1))
.block(Block::default().borders(Borders::LEFT | Borders::RIGHT))
.highlight_symbol(">> ")
.highlight_style(Style::default().add_modifier(Modifier::BOLD))
.widths(&[
Constraint::Length(6),
Constraint::Length(6),
Constraint::Length(6),
])
.column_spacing(1);
f.render_stateful_widget(table, size, &mut state);
})
.unwrap();
let mut expected = Buffer::with_lines(vec![
"│ Head1 Head2 Head3 │",
"│ │",
"│>> Row11 Row12 Row13 │",
"│ Row21 Row22 Row23 │",
]);
// First row = row color + highlight style
for col in 1..=28 {
expected.get_mut(col, 2).set_style(
Style::default()
.fg(Color::Green)
.add_modifier(Modifier::BOLD),
);
}
// Second row:
// 1. row color
for col in 1..=28 {
expected
.get_mut(col, 3)
.set_style(Style::default().fg(Color::LightGreen));
}
// 2. cell color
for col in 11..=16 {
expected
.get_mut(col, 3)
.set_style(Style::default().fg(Color::Yellow));
}
for col in 18..=23 {
expected
.get_mut(col, 3)
.set_style(Style::default().fg(Color::Red));
}
// 3. text color
for col in 21..=22 {
expected
.get_mut(col, 3)
.set_style(Style::default().fg(Color::Blue));
}
terminal.backend().assert_buffer(&expected);
}
#[test]
fn widgets_table_should_render_even_if_empty() {
let backend = TestBackend::new(30, 4);
let mut terminal = Terminal::new(backend).unwrap();
terminal
.draw(|f| {
let size = f.size();
let table = Table::new(vec![])
.header(Row::new(vec!["Head1", "Head2", "Head3"]))
.block(Block::default().borders(Borders::LEFT | Borders::RIGHT))
.widths(&[
Constraint::Length(6),
Constraint::Length(6),
Constraint::Length(6),
])
.column_spacing(1);
f.render_widget(table, size);
})
.unwrap();
let expected = Buffer::with_lines(vec![
"│Head1 Head2 Head3 │",
"│ │",
"│ │",
"│ │",
]);
terminal.backend().assert_buffer(&expected);
}
|
};
|
prometheus.py
|
from server import app
from flask import Response, request
from prometheus_client import generate_latest, Counter
from functools import wraps
# route to display configured Prometheus metrics
# note that you will need to set up custom metric observers for your app
@app.route('/metrics')
def prometheus_metrics():
MIMETYPE = 'text/plain; version=0.0.4; charset=utf-8'
return Response(generate_latest(), mimetype=MIMETYPE)
|
# @prometheus.track_requests
# def example():
# pass
route_counter = Counter('requests_for_routes', 'Number of requests for specififed routes', ['method', 'endpoint'])
def track_requests(route):
@wraps(route)
def wrapper(*args, **kwargs):
route_labels = {
"method": request.method,
"endpoint": str(request.path)
}
route_counter.labels(**route_labels).inc()
return route(*args, **kwargs)
return wrapper
|
# creates a Prometheus Counter to track requests for specified routes
# usage:
# @app.route('/example')
|
service_test.go
|
// Copyright 2022 The CubeFS Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
package mqproxy
import (
"context"
"net/http/httptest"
"sync"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
"github.com/cubefs/blobstore/api/clustermgr"
"github.com/cubefs/blobstore/api/mqproxy"
"github.com/cubefs/blobstore/common/kafka"
"github.com/cubefs/blobstore/common/rpc"
|
var (
ctx = context.Background()
mqproxyServer *httptest.Server
once sync.Once
)
func runMockService(s *Service) string {
once.Do(func() {
mqproxyServer = httptest.NewServer(NewHandler(s))
})
return mqproxyServer.URL
}
func newMockService(t *testing.T) *Service {
ctr := gomock.NewController(t)
register := NewMockRegister(ctr)
register.EXPECT().Register(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, info c.RegisterInfo) error {
return nil
})
blobDeleteMgr := NewMockBlobDeleteHandler(ctr)
blobDeleteMgr.EXPECT().SendDeleteMsg(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, info *mqproxy.DeleteArgs) error {
if len(info.Blobs) > 1 {
return errors.New("fake send delete message failed")
}
return nil
},
)
shardRepairMgr := NewMockShardRepairHandler(ctr)
shardRepairMgr.EXPECT().SendShardRepairMsg(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, info *mqproxy.ShardRepairArgs) error {
if info.Vid == 100 {
return errors.New("fake send shard repair message failed")
}
return nil
})
return &Service{
clusterMgrClient: register,
blobDeleteMgr: blobDeleteMgr,
shardRepairMgr: shardRepairMgr,
Config: Config{
ClusterID: 1,
},
}
}
func newClient() rpc.Client {
return rpc.NewClient(&rpc.Config{})
}
func TestService(t *testing.T) {
runMockService(newMockService(t))
cli := newClient()
deleteCases := []struct {
args mqproxy.DeleteArgs
code int
}{
{
args: mqproxy.DeleteArgs{
ClusterID: 1,
Blobs: []mqproxy.BlobDelete{{Bid: 0, Vid: 0}},
},
code: 200,
},
{
args: mqproxy.DeleteArgs{
ClusterID: 2,
Blobs: []mqproxy.BlobDelete{{Bid: 0, Vid: 0}},
},
code: 706,
},
{
args: mqproxy.DeleteArgs{
ClusterID: 1,
Blobs: []mqproxy.BlobDelete{{Bid: 0, Vid: 0}, {Bid: 1, Vid: 1}},
},
code: 500,
},
}
for _, tc := range deleteCases {
err := cli.PostWith(ctx, mqproxyServer.URL+"/deletemsg", nil, tc.args)
require.Equal(t, tc.code, rpc.DetectStatusCode(err))
}
shardRepairCases := []struct {
args mqproxy.ShardRepairArgs
code int
}{
{
args: mqproxy.ShardRepairArgs{
ClusterID: 1,
Bid: 1,
Vid: 1,
BadIdxes: nil,
Reason: "",
},
code: 200,
},
{
args: mqproxy.ShardRepairArgs{
ClusterID: 2,
Bid: 1,
Vid: 1,
BadIdxes: nil,
Reason: "",
},
code: 706,
},
{
args: mqproxy.ShardRepairArgs{
ClusterID: 1,
Bid: 1,
Vid: 100,
BadIdxes: nil,
Reason: "",
},
code: 500,
},
}
for _, tc := range shardRepairCases {
err := cli.PostWith(ctx, mqproxyServer.URL+"/repairmsg", nil, tc.args)
require.Equal(t, tc.code, rpc.DetectStatusCode(err))
}
}
func TestConfigFix(t *testing.T) {
testCases := []struct {
cfg *Config
err error
}{
{cfg: &Config{}, err: ErrIllegalTopic},
{cfg: &Config{MQ: MQConfig{BlobDeleteTopic: "test"}}, err: ErrIllegalTopic},
{cfg: &Config{MQ: MQConfig{BlobDeleteTopic: "test", ShardRepairTopic: "test1"}}, err: ErrIllegalTopic},
{cfg: &Config{MQ: MQConfig{BlobDeleteTopic: "test", ShardRepairTopic: "test", ShardRepairPriorityTopic: "test3"}}, err: ErrIllegalTopic},
{cfg: &Config{MQ: MQConfig{BlobDeleteTopic: "test", ShardRepairTopic: "test1", ShardRepairPriorityTopic: "test"}}, err: ErrIllegalTopic},
{cfg: &Config{MQ: MQConfig{BlobDeleteTopic: "test", ShardRepairTopic: "test1", ShardRepairPriorityTopic: "test3"}}, err: nil},
}
for _, tc := range testCases {
err := tc.cfg.checkAndFix()
require.Equal(t, true, errors.Is(err, tc.err))
tc.cfg.shardRepairCfg()
tc.cfg.blobDeleteCfg()
}
}
func TestRegister(t *testing.T) {
service := newMockService(t)
err := service.register()
require.NoError(t, err)
}
func TestNewMqService(t *testing.T) {
seedBroker, leader := NewBrokersWith2Responses(t)
defer seedBroker.Close()
defer leader.Close()
testCases := []struct {
cfg Config
}{
{
cfg: Config{},
},
// todo wait cm chang rpc
{
cfg: Config{
MQ: MQConfig{
BlobDeleteTopic: "test1",
ShardRepairTopic: "test2",
ShardRepairPriorityTopic: "test3",
MsgSender: kafka.ProducerCfg{
BrokerList: []string{seedBroker.Addr()},
TimeoutMs: 1,
},
},
Clustermgr: clustermgr.Config{
LbConfig: rpc.LbConfig{Hosts: []string{"http://127.0.0.1:9321"}},
},
},
},
{
cfg: Config{
MQ: MQConfig{
BlobDeleteTopic: "test1",
ShardRepairTopic: "test2",
ShardRepairPriorityTopic: "test3",
},
Clustermgr: clustermgr.Config{
LbConfig: rpc.LbConfig{Hosts: []string{"http://127.0.0.1:9321"}},
},
},
},
}
for _, tc := range testCases {
_, err := NewService(tc.cfg)
require.Error(t, err)
}
}
|
c "github.com/cubefs/blobstore/mqproxy/client"
"github.com/cubefs/blobstore/util/errors"
)
|
exprs2.go
|
/*
Copyright 2018 Simon Schmidt
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package query
import "gopkg.in/src-d/go-mysql-server.v0/sql"
import "strings"
type Lowest []sql.Expression
var _ sql.Expression = (Lowest)(nil)
func(e Lowest) Resolved() bool { return true }
func(e Lowest) String() string {
s := make([]string,len(e))
for i,ee := range e { s[i] = ee.String() }
return "lowest("+strings.Join(s,", ")+")"
}
func(e Lowest) Type() sql.Type {
if len(e)==0 { return sql.Null }
return e[0].Type()
}
func(e Lowest) IsNullable() bool {
if len(e)==0 { return true }
return e[0].IsNullable()
}
func(e Lowest) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
if len(e)<2 { return true,nil }
tp := e[0].Type()
ref,err := e[0].Eval(ctx,row)
if err!=nil { return nil,err }
for _,ee := range e[1:] {
oth,err := ee.Eval(ctx,row)
if err!=nil { return nil,err }
cmp,err := tp.Compare(ref,oth)
if err!=nil { return nil,err }
if cmp>0 {
ref = oth
}
}
return true,nil
}
func(e Lowest) TransformUp(tf sql.TransformExprFunc) (_ sql.Expression, err error) {
ne := make(Lowest,len(e))
for i,ee := range e {
ne[i],err = tf(ee)
if err!=nil { return }
}
return tf(ne)
}
func(e Lowest) Children() []sql.Expression { return e }
func NewLowest(exprs ...sql.Expression) (sql.Expression, error)
|
type Highest []sql.Expression
var _ sql.Expression = (Highest)(nil)
func(e Highest) Resolved() bool { return true }
func(e Highest) String() string {
s := make([]string,len(e))
for i,ee := range e { s[i] = ee.String() }
return "highest("+strings.Join(s,", ")+")"
}
func(e Highest) Type() sql.Type {
if len(e)==0 { return sql.Null }
return e[0].Type()
}
func(e Highest) IsNullable() bool {
if len(e)==0 { return true }
return e[0].IsNullable()
}
func(e Highest) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
if len(e)<2 { return true,nil }
tp := e[0].Type()
ref,err := e[0].Eval(ctx,row)
if err!=nil { return nil,err }
for _,ee := range e[1:] {
oth,err := ee.Eval(ctx,row)
if err!=nil { return nil,err }
cmp,err := tp.Compare(ref,oth)
if err!=nil { return nil,err }
if cmp>0 {
ref = oth
}
}
return true,nil
}
func(e Highest) TransformUp(tf sql.TransformExprFunc) (_ sql.Expression, err error) {
ne := make(Highest,len(e))
for i,ee := range e {
ne[i],err = tf(ee)
if err!=nil { return }
}
return tf(ne)
}
func(e Highest) Children() []sql.Expression { return e }
func NewHighest(exprs ...sql.Expression) (sql.Expression, error) { return Highest(exprs),nil }
|
{ return Lowest(exprs),nil }
|
Training JS #21 Methods of String object--trim() and the string template.js
|
/*
This is the last lesson about the string object, we will learn the two knowledge used to format the string.
The first is a simple method: trim(). Usage is very simple:
stringObject.trim()
Its role is very simple too: remove the whitespace at the edge of the string. example:
var str=" abc ";
console.log(str.trim());
str="\n\n\n\nabc\t\t\t"
console.log(str.trim());
//output:
abc
abc
Various non visible characters such as space tab are called whitespace. more detailed information please refer to:whitespace
trim() only remove whitespace at edge of string, whitespace in the middle of the string will not be removed.
var str=" a b c ";
console.log(str.trim());
//output:
a b c
Next we will learn a new member of the ES6: string template, We look at the following code:
var s1="My name is John.";
var s2='My name is John.';
var s3=`My name is John.`;
Are their values equal? Yes, they are equal. Do you see the difference in s2 and s3? Bingo! single quotes ' and backtick ` are different. Of course, this is not the biggest difference between them. Using double quotes " or single quotes ', we get a fixed string value. Use the backtick `, we are defining a string template.
We can use ${variable} insert a variable into string template. like this:
var a=1,b=2;
console.log(`${a} + ${b} = ${a+b}`);
//output:
1 + 2 = 3
Or we can write this:
function plus(a,b){
console.log(`${a} + ${b} = ${a+b}`);
}
plus(1,2);
plus(3,4);
//output:
1 + 2 = 3
3 + 4 = 7
Isn't it interesting? There are more interesting things to happen. When the string template appears in the back of a function, It will be used as a parameter. Look at the following example:
function repeatIt(s){
console.log(`${s} ${s} ${s}`);
}
repeatIt `a`;
repeatIt `ab`;
//output:
a a a
ab ab ab
Ok, lesson is over. let's do some task.
Task
Coding in function fiveLine, function accept 1 parameter:s. s is a string.
Please return a string of 5 lines(newline symbol is \n). The first line has one s; Second line have two s; and so on..Fifth line have five s;
Note1: The two sides of the parameter s may contain some whitespace, please clear them before using s.
Note2: Using a string template can make your job easier.
Example:
fiveLine(" a") should return "a\naa\naaa\naaaa\naaaaa"
a
aa
aaa
aaaa
aaaaa <---The effect when you console.log it
|
fiveLine(" xy ")
should return "xy\nxyxy\nxyxyxy\nxyxyxyxy\nxyxyxyxyxy"
xy
xyxy
xyxyxy
xyxyxyxy
xyxyxyxyxy <---The effect when you console.log it
*/
function fiveLine(s) {
const sTrim = s.trim();
let res = "";
for (let i = 1; i <= 5; i++) {
if (i === 5) {
res += `${sTrim.repeat(i)}`;
break;
}
res += `${sTrim.repeat(i)}\n`;
}
return res;
}
| |
options.go
|
// Copyright (c) 2018-2019, AT&T Intellectual Property.
// All rights reserved.
//
// SPDX-License-Identifier: MPL-2.0
package union
type unionOptions struct {
auth Auther
includeDefaults bool
hideSecrets bool
forceShowSecrets bool
}
type UnionOption func(*unionOptions)
func Authorizer(auth Auther) UnionOption {
return func(opts *unionOptions) {
opts.auth = auth
}
}
func IncludeDefaults(opts *unionOptions) {
opts.includeDefaults = true
}
func
|
(opts *unionOptions) {
opts.hideSecrets = true
}
// ForceShowSecrets forces secrets to not be filtered, even if the usual secret
// filtering logic would suggest they should be filtered.
func ForceShowSecrets(opts *unionOptions) {
opts.forceShowSecrets = true
}
func (opts *unionOptions) shouldHideSecrets(path []string) bool {
// The interaction between the Authorizer, HideSecrets, and ForceShowSecrets
// can be a bit confusing, so here's a table that should help:
//
// | HideSecrets | Auther.AuthReadSecrets | ForceShowSecrets | Shown? |
// |-------------|------------------------|------------------|---------|
// | Show | Hide | - | Hide |
// | Hide | Hide | - | Hide |
// | Show | - | - | Show |
// | Hide | - | - | Hide |
// | Show | Hide | Show | Show |
// | Hide | Hide | Show | Show |
// | Show | - | Show | Show |
// | Hide | - | Show | Show |
hideSecrets := opts.hideSecrets || !authorize(opts.auth, path, "secrets")
if opts.forceShowSecrets {
hideSecrets = false
}
return hideSecrets
}
|
HideSecrets
|
matching_function.pb.go
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.13.0
// source: google/ads/googleads/v2/common/matching_function.proto
package common
import (
reflect "reflect"
sync "sync"
proto "github.com/golang/protobuf/proto"
enums "google.golang.org/genproto/googleapis/ads/googleads/v2/enums"
_ "google.golang.org/genproto/googleapis/api/annotations"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// Matching function associated with a
// CustomerFeed, CampaignFeed, or AdGroupFeed. The matching function is used
// to filter the set of feed items selected.
type MatchingFunction struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// String representation of the Function.
//
// Examples:
//
// 1. IDENTITY(true) or IDENTITY(false). All or no feed items served.
// 2. EQUALS(CONTEXT.DEVICE,"Mobile")
// 3. IN(FEED_ITEM_ID,{1000001,1000002,1000003})
// 4. CONTAINS_ANY(FeedAttribute[12345678,0],{"Mars cruise","Venus cruise"})
// 5. AND(IN(FEED_ITEM_ID,{10001,10002}),EQUALS(CONTEXT.DEVICE,"Mobile"))
//
// For more details, visit
// https://developers.google.com/adwords/api/docs/guides/feed-matching-functions
//
// Note that because multiple strings may represent the same underlying
// function (whitespace and single versus double quotation marks, for
// example), the value returned may not be identical to the string sent in a
// mutate request.
FunctionString *wrapperspb.StringValue `protobuf:"bytes,1,opt,name=function_string,json=functionString,proto3" json:"function_string,omitempty"`
// Operator for a function.
Operator enums.MatchingFunctionOperatorEnum_MatchingFunctionOperator `protobuf:"varint,4,opt,name=operator,proto3,enum=google.ads.googleads.v2.enums.MatchingFunctionOperatorEnum_MatchingFunctionOperator" json:"operator,omitempty"`
// The operands on the left hand side of the equation. This is also the
// operand to be used for single operand expressions such as NOT.
LeftOperands []*Operand `protobuf:"bytes,2,rep,name=left_operands,json=leftOperands,proto3" json:"left_operands,omitempty"`
// The operands on the right hand side of the equation.
RightOperands []*Operand `protobuf:"bytes,3,rep,name=right_operands,json=rightOperands,proto3" json:"right_operands,omitempty"`
}
func (x *MatchingFunction) Reset() {
*x = MatchingFunction{}
if protoimpl.UnsafeEnabled {
mi := &file_google_ads_googleads_v2_common_matching_function_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *MatchingFunction) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MatchingFunction) ProtoMessage() {}
func (x *MatchingFunction) ProtoReflect() protoreflect.Message {
mi := &file_google_ads_googleads_v2_common_matching_function_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MatchingFunction.ProtoReflect.Descriptor instead.
func (*MatchingFunction) Descriptor() ([]byte, []int) {
return file_google_ads_googleads_v2_common_matching_function_proto_rawDescGZIP(), []int{0}
}
func (x *MatchingFunction) GetFunctionString() *wrapperspb.StringValue {
if x != nil {
return x.FunctionString
}
return nil
}
func (x *MatchingFunction) GetOperator() enums.MatchingFunctionOperatorEnum_MatchingFunctionOperator {
if x != nil {
return x.Operator
}
return enums.MatchingFunctionOperatorEnum_UNSPECIFIED
}
func (x *MatchingFunction) GetLeftOperands() []*Operand {
if x != nil {
return x.LeftOperands
}
return nil
}
func (x *MatchingFunction) GetRightOperands() []*Operand {
if x != nil {
return x.RightOperands
}
return nil
}
// An operand in a matching function.
type Operand struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Different operands that can be used in a matching function. Required.
//
// Types that are assignable to FunctionArgumentOperand:
// *Operand_ConstantOperand_
// *Operand_FeedAttributeOperand_
// *Operand_FunctionOperand_
// *Operand_RequestContextOperand_
FunctionArgumentOperand isOperand_FunctionArgumentOperand `protobuf_oneof:"function_argument_operand"`
}
func (x *Operand) Reset() {
*x = Operand{}
if protoimpl.UnsafeEnabled {
mi := &file_google_ads_googleads_v2_common_matching_function_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Operand) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Operand) ProtoMessage() {}
func (x *Operand) ProtoReflect() protoreflect.Message {
mi := &file_google_ads_googleads_v2_common_matching_function_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Operand.ProtoReflect.Descriptor instead.
func (*Operand) Descriptor() ([]byte, []int) {
return file_google_ads_googleads_v2_common_matching_function_proto_rawDescGZIP(), []int{1}
}
func (m *Operand) GetFunctionArgumentOperand() isOperand_FunctionArgumentOperand {
if m != nil {
return m.FunctionArgumentOperand
}
return nil
}
func (x *Operand) GetConstantOperand() *Operand_ConstantOperand {
if x, ok := x.GetFunctionArgumentOperand().(*Operand_ConstantOperand_); ok {
return x.ConstantOperand
}
return nil
}
func (x *Operand) GetFeedAttributeOperand() *Operand_FeedAttributeOperand {
if x, ok := x.GetFunctionArgumentOperand().(*Operand_FeedAttributeOperand_); ok {
return x.FeedAttributeOperand
}
return nil
}
func (x *Operand) GetFunctionOperand() *Operand_FunctionOperand {
if x, ok := x.GetFunctionArgumentOperand().(*Operand_FunctionOperand_); ok {
return x.FunctionOperand
}
return nil
}
func (x *Operand) GetRequestContextOperand() *Operand_RequestContextOperand {
if x, ok := x.GetFunctionArgumentOperand().(*Operand_RequestContextOperand_); ok {
return x.RequestContextOperand
}
return nil
}
type isOperand_FunctionArgumentOperand interface {
isOperand_FunctionArgumentOperand()
}
type Operand_ConstantOperand_ struct {
// A constant operand in a matching function.
ConstantOperand *Operand_ConstantOperand `protobuf:"bytes,1,opt,name=constant_operand,json=constantOperand,proto3,oneof"`
}
type Operand_FeedAttributeOperand_ struct {
// This operand specifies a feed attribute in feed.
FeedAttributeOperand *Operand_FeedAttributeOperand `protobuf:"bytes,2,opt,name=feed_attribute_operand,json=feedAttributeOperand,proto3,oneof"`
}
type Operand_FunctionOperand_ struct {
// A function operand in a matching function.
// Used to represent nested functions.
FunctionOperand *Operand_FunctionOperand `protobuf:"bytes,3,opt,name=function_operand,json=functionOperand,proto3,oneof"`
}
type Operand_RequestContextOperand_ struct {
// An operand in a function referring to a value in the request context.
RequestContextOperand *Operand_RequestContextOperand `protobuf:"bytes,4,opt,name=request_context_operand,json=requestContextOperand,proto3,oneof"`
}
func (*Operand_ConstantOperand_) isOperand_FunctionArgumentOperand() {}
func (*Operand_FeedAttributeOperand_) isOperand_FunctionArgumentOperand() {}
func (*Operand_FunctionOperand_) isOperand_FunctionArgumentOperand() {}
func (*Operand_RequestContextOperand_) isOperand_FunctionArgumentOperand() {}
// A constant operand in a matching function.
type Operand_ConstantOperand struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Constant operand values. Required.
//
// Types that are assignable to ConstantOperandValue:
// *Operand_ConstantOperand_StringValue
// *Operand_ConstantOperand_LongValue
// *Operand_ConstantOperand_BooleanValue
// *Operand_ConstantOperand_DoubleValue
ConstantOperandValue isOperand_ConstantOperand_ConstantOperandValue `protobuf_oneof:"constant_operand_value"`
}
func (x *Operand_ConstantOperand) Reset() {
*x = Operand_ConstantOperand{}
if protoimpl.UnsafeEnabled {
mi := &file_google_ads_googleads_v2_common_matching_function_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Operand_ConstantOperand) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Operand_ConstantOperand) ProtoMessage() {}
func (x *Operand_ConstantOperand) ProtoReflect() protoreflect.Message {
mi := &file_google_ads_googleads_v2_common_matching_function_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Operand_ConstantOperand.ProtoReflect.Descriptor instead.
func (*Operand_ConstantOperand) Descriptor() ([]byte, []int) {
return file_google_ads_googleads_v2_common_matching_function_proto_rawDescGZIP(), []int{1, 0}
}
func (m *Operand_ConstantOperand) GetConstantOperandValue() isOperand_ConstantOperand_ConstantOperandValue {
if m != nil {
return m.ConstantOperandValue
}
return nil
}
func (x *Operand_ConstantOperand) GetStringValue() *wrapperspb.StringValue {
if x, ok := x.GetConstantOperandValue().(*Operand_ConstantOperand_StringValue); ok {
return x.StringValue
}
return nil
}
func (x *Operand_ConstantOperand) GetLongValue() *wrapperspb.Int64Value {
if x, ok := x.GetConstantOperandValue().(*Operand_ConstantOperand_LongValue); ok {
return x.LongValue
}
return nil
}
func (x *Operand_ConstantOperand) GetBooleanValue() *wrapperspb.BoolValue {
if x, ok := x.GetConstantOperandValue().(*Operand_ConstantOperand_BooleanValue); ok {
return x.BooleanValue
}
return nil
}
func (x *Operand_ConstantOperand) GetDoubleValue() *wrapperspb.DoubleValue {
if x, ok := x.GetConstantOperandValue().(*Operand_ConstantOperand_DoubleValue); ok {
return x.DoubleValue
}
return nil
}
type isOperand_ConstantOperand_ConstantOperandValue interface {
isOperand_ConstantOperand_ConstantOperandValue()
}
type Operand_ConstantOperand_StringValue struct {
// String value of the operand if it is a string type.
StringValue *wrapperspb.StringValue `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"`
}
type Operand_ConstantOperand_LongValue struct {
// Int64 value of the operand if it is a int64 type.
LongValue *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=long_value,json=longValue,proto3,oneof"`
}
type Operand_ConstantOperand_BooleanValue struct {
// Boolean value of the operand if it is a boolean type.
BooleanValue *wrapperspb.BoolValue `protobuf:"bytes,3,opt,name=boolean_value,json=booleanValue,proto3,oneof"`
}
type Operand_ConstantOperand_DoubleValue struct {
// Double value of the operand if it is a double type.
DoubleValue *wrapperspb.DoubleValue `protobuf:"bytes,4,opt,name=double_value,json=doubleValue,proto3,oneof"`
}
func (*Operand_ConstantOperand_StringValue) isOperand_ConstantOperand_ConstantOperandValue() {}
func (*Operand_ConstantOperand_LongValue) isOperand_ConstantOperand_ConstantOperandValue() {}
func (*Operand_ConstantOperand_BooleanValue) isOperand_ConstantOperand_ConstantOperandValue() {}
func (*Operand_ConstantOperand_DoubleValue) isOperand_ConstantOperand_ConstantOperandValue() {}
// A feed attribute operand in a matching function.
// Used to represent a feed attribute in feed.
type Operand_FeedAttributeOperand struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The associated feed. Required.
FeedId *wrapperspb.Int64Value `protobuf:"bytes,1,opt,name=feed_id,json=feedId,proto3" json:"feed_id,omitempty"`
// Id of the referenced feed attribute. Required.
FeedAttributeId *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=feed_attribute_id,json=feedAttributeId,proto3" json:"feed_attribute_id,omitempty"`
}
func (x *Operand_FeedAttributeOperand) Reset() {
*x = Operand_FeedAttributeOperand{}
if protoimpl.UnsafeEnabled {
mi := &file_google_ads_googleads_v2_common_matching_function_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Operand_FeedAttributeOperand) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Operand_FeedAttributeOperand) ProtoMessage() {}
func (x *Operand_FeedAttributeOperand) ProtoReflect() protoreflect.Message {
mi := &file_google_ads_googleads_v2_common_matching_function_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Operand_FeedAttributeOperand.ProtoReflect.Descriptor instead.
func (*Operand_FeedAttributeOperand) Descriptor() ([]byte, []int) {
return file_google_ads_googleads_v2_common_matching_function_proto_rawDescGZIP(), []int{1, 1}
}
func (x *Operand_FeedAttributeOperand) GetFeedId() *wrapperspb.Int64Value {
if x != nil {
return x.FeedId
}
return nil
}
func (x *Operand_FeedAttributeOperand) GetFeedAttributeId() *wrapperspb.Int64Value {
if x != nil {
return x.FeedAttributeId
}
return nil
}
// A function operand in a matching function.
// Used to represent nested functions.
type Operand_FunctionOperand struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The matching function held in this operand.
MatchingFunction *MatchingFunction `protobuf:"bytes,1,opt,name=matching_function,json=matchingFunction,proto3" json:"matching_function,omitempty"`
}
func (x *Operand_FunctionOperand) Reset() {
*x = Operand_FunctionOperand{}
if protoimpl.UnsafeEnabled {
mi := &file_google_ads_googleads_v2_common_matching_function_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Operand_FunctionOperand) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Operand_FunctionOperand) ProtoMessage() {}
func (x *Operand_FunctionOperand) ProtoReflect() protoreflect.Message {
mi := &file_google_ads_googleads_v2_common_matching_function_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Operand_FunctionOperand.ProtoReflect.Descriptor instead.
func (*Operand_FunctionOperand) Descriptor() ([]byte, []int) {
return file_google_ads_googleads_v2_common_matching_function_proto_rawDescGZIP(), []int{1, 2}
}
func (x *Operand_FunctionOperand) GetMatchingFunction() *MatchingFunction {
if x != nil {
return x.MatchingFunction
}
return nil
}
// An operand in a function referring to a value in the request context.
type Operand_RequestContextOperand struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Type of value to be referred in the request context.
ContextType enums.MatchingFunctionContextTypeEnum_MatchingFunctionContextType `protobuf:"varint,1,opt,name=context_type,json=contextType,proto3,enum=google.ads.googleads.v2.enums.MatchingFunctionContextTypeEnum_MatchingFunctionContextType" json:"context_type,omitempty"`
}
func (x *Operand_RequestContextOperand) Reset() {
*x = Operand_RequestContextOperand{}
if protoimpl.UnsafeEnabled {
mi := &file_google_ads_googleads_v2_common_matching_function_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Operand_RequestContextOperand) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Operand_RequestContextOperand) ProtoMessage() {}
func (x *Operand_RequestContextOperand) ProtoReflect() protoreflect.Message {
mi := &file_google_ads_googleads_v2_common_matching_function_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Operand_RequestContextOperand.ProtoReflect.Descriptor instead.
func (*Operand_RequestContextOperand) Descriptor() ([]byte, []int) {
return file_google_ads_googleads_v2_common_matching_function_proto_rawDescGZIP(), []int{1, 3}
}
func (x *Operand_RequestContextOperand) GetContextType() enums.MatchingFunctionContextTypeEnum_MatchingFunctionContextType {
if x != nil {
return x.ContextType
}
return enums.MatchingFunctionContextTypeEnum_UNSPECIFIED
}
var File_google_ads_googleads_v2_common_matching_function_proto protoreflect.FileDescriptor
var file_google_ads_googleads_v2_common_matching_function_proto_rawDesc = []byte{
0x0a, 0x36, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76,
0x32, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x42, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76,
0x32, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67,
0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78,
0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x3e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
0x64, 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x6d, 0x61, 0x74, 0x63,
0x68, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x70,
0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72,
0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe9, 0x02, 0x0a, 0x10, 0x4d,
0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12,
0x45, 0x0a, 0x0f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x72, 0x69,
0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e,
0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e,
0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x70, 0x0a, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x54, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e,
0x76, 0x32, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e,
0x67, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f,
0x72, 0x45, 0x6e, 0x75, 0x6d, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x46, 0x75,
0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x08,
0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x4c, 0x0a, 0x0d, 0x6c, 0x65, 0x66, 0x74,
0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x52, 0x0c, 0x6c, 0x65, 0x66, 0x74, 0x4f, 0x70,
0x65, 0x72, 0x61, 0x6e, 0x64, 0x73, 0x12, 0x4e, 0x0a, 0x0e, 0x72, 0x69, 0x67, 0x68, 0x74, 0x5f,
0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,
0x4f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x52, 0x0d, 0x72, 0x69, 0x67, 0x68, 0x74, 0x4f, 0x70,
0x65, 0x72, 0x61, 0x6e, 0x64, 0x73, 0x22, 0xb9, 0x09, 0x0a, 0x07, 0x4f, 0x70, 0x65, 0x72, 0x61,
0x6e, 0x64, 0x12, 0x64, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6f,
0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x61, 0x64, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x4f, 0x70,
0x65, 0x72, 0x61, 0x6e, 0x64, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x4f, 0x70,
0x65, 0x72, 0x61, 0x6e, 0x64, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e,
0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x74, 0x0a, 0x16, 0x66, 0x65, 0x65, 0x64,
0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61,
0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e,
0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x6e,
0x64, 0x2e, 0x46, 0x65, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x4f,
0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x48, 0x00, 0x52, 0x14, 0x66, 0x65, 0x65, 0x64, 0x41, 0x74,
0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x64,
0x0a, 0x10, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61,
0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e,
0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x6e,
0x64, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x6e,
0x64, 0x48, 0x00, 0x52, 0x0f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x65,
0x72, 0x61, 0x6e, 0x64, 0x12, 0x77, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f,
0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18,
0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x32, 0x2e,
0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x2e, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4f, 0x70, 0x65,
0x72, 0x61, 0x6e, 0x64, 0x48, 0x00, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43,
0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x1a, 0xb2, 0x02,
0x0a, 0x0f, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x6e,
0x64, 0x12, 0x41, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67,
0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56,
0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0a, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34,
0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x6f, 0x6e, 0x67, 0x56, 0x61, 0x6c,
0x75, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x5f, 0x76, 0x61,
0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c,
0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e,
0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x41, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f,
0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75,
0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x18, 0x0a, 0x16, 0x63, 0x6f, 0x6e, 0x73,
0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x5f, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x1a, 0x95, 0x01, 0x0a, 0x14, 0x46, 0x65, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69,
0x62, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x34, 0x0a, 0x07, 0x66,
0x65, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49,
0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x66, 0x65, 0x65, 0x64, 0x49,
0x64, 0x12, 0x47, 0x0a, 0x11, 0x66, 0x65, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62,
0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49,
0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x66, 0x65, 0x65, 0x64, 0x41,
0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x49, 0x64, 0x1a, 0x70, 0x0a, 0x0f, 0x46, 0x75,
0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x5d, 0x0a,
0x11, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e,
0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x69,
0x6e, 0x67, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x6d, 0x61, 0x74, 0x63,
0x68, 0x69, 0x6e, 0x67, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x96, 0x01, 0x0a,
0x15, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4f,
0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x7d, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78,
0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5a, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x61, 0x64, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x4d, 0x61, 0x74,
0x63, 0x68, 0x69, 0x6e, 0x67, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e,
0x74, 0x65, 0x78, 0x74, 0x54, 0x79, 0x70, 0x65, 0x45, 0x6e, 0x75, 0x6d, 0x2e, 0x4d, 0x61, 0x74,
0x63, 0x68, 0x69, 0x6e, 0x67, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e,
0x74, 0x65, 0x78, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78,
0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x1b, 0x0a, 0x19, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f,
0x6e, 0x5f, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61,
0x6e, 0x64, 0x42, 0xf0, 0x01, 0x0a, 0x22, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e,
0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x42, 0x15, 0x4d, 0x61, 0x74, 0x63, 0x68,
0x69, 0x6e, 0x67, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f,
0x50, 0x01, 0x5a, 0x44, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
0x6e, 0x3b, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0xa2, 0x02, 0x03, 0x47, 0x41, 0x41, 0xaa, 0x02,
0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x73, 0x2e, 0x47, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x41, 0x64, 0x73, 0x2e, 0x56, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0xca,
0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x73, 0x5c, 0x47, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x5c, 0x56, 0x32, 0x5c, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x73, 0x3a, 0x3a,
0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x3a, 0x3a, 0x56, 0x32, 0x3a, 0x3a, 0x43,
0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_google_ads_googleads_v2_common_matching_function_proto_rawDescOnce sync.Once
file_google_ads_googleads_v2_common_matching_function_proto_rawDescData = file_google_ads_googleads_v2_common_matching_function_proto_rawDesc
)
func
|
() []byte {
file_google_ads_googleads_v2_common_matching_function_proto_rawDescOnce.Do(func() {
file_google_ads_googleads_v2_common_matching_function_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_ads_googleads_v2_common_matching_function_proto_rawDescData)
})
return file_google_ads_googleads_v2_common_matching_function_proto_rawDescData
}
var file_google_ads_googleads_v2_common_matching_function_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_google_ads_googleads_v2_common_matching_function_proto_goTypes = []interface{}{
(*MatchingFunction)(nil), // 0: google.ads.googleads.v2.common.MatchingFunction
(*Operand)(nil), // 1: google.ads.googleads.v2.common.Operand
(*Operand_ConstantOperand)(nil), // 2: google.ads.googleads.v2.common.Operand.ConstantOperand
(*Operand_FeedAttributeOperand)(nil), // 3: google.ads.googleads.v2.common.Operand.FeedAttributeOperand
(*Operand_FunctionOperand)(nil), // 4: google.ads.googleads.v2.common.Operand.FunctionOperand
(*Operand_RequestContextOperand)(nil), // 5: google.ads.googleads.v2.common.Operand.RequestContextOperand
(*wrapperspb.StringValue)(nil), // 6: google.protobuf.StringValue
(enums.MatchingFunctionOperatorEnum_MatchingFunctionOperator)(0), // 7: google.ads.googleads.v2.enums.MatchingFunctionOperatorEnum.MatchingFunctionOperator
(*wrapperspb.Int64Value)(nil), // 8: google.protobuf.Int64Value
(*wrapperspb.BoolValue)(nil), // 9: google.protobuf.BoolValue
(*wrapperspb.DoubleValue)(nil), // 10: google.protobuf.DoubleValue
(enums.MatchingFunctionContextTypeEnum_MatchingFunctionContextType)(0), // 11: google.ads.googleads.v2.enums.MatchingFunctionContextTypeEnum.MatchingFunctionContextType
}
var file_google_ads_googleads_v2_common_matching_function_proto_depIdxs = []int32{
6, // 0: google.ads.googleads.v2.common.MatchingFunction.function_string:type_name -> google.protobuf.StringValue
7, // 1: google.ads.googleads.v2.common.MatchingFunction.operator:type_name -> google.ads.googleads.v2.enums.MatchingFunctionOperatorEnum.MatchingFunctionOperator
1, // 2: google.ads.googleads.v2.common.MatchingFunction.left_operands:type_name -> google.ads.googleads.v2.common.Operand
1, // 3: google.ads.googleads.v2.common.MatchingFunction.right_operands:type_name -> google.ads.googleads.v2.common.Operand
2, // 4: google.ads.googleads.v2.common.Operand.constant_operand:type_name -> google.ads.googleads.v2.common.Operand.ConstantOperand
3, // 5: google.ads.googleads.v2.common.Operand.feed_attribute_operand:type_name -> google.ads.googleads.v2.common.Operand.FeedAttributeOperand
4, // 6: google.ads.googleads.v2.common.Operand.function_operand:type_name -> google.ads.googleads.v2.common.Operand.FunctionOperand
5, // 7: google.ads.googleads.v2.common.Operand.request_context_operand:type_name -> google.ads.googleads.v2.common.Operand.RequestContextOperand
6, // 8: google.ads.googleads.v2.common.Operand.ConstantOperand.string_value:type_name -> google.protobuf.StringValue
8, // 9: google.ads.googleads.v2.common.Operand.ConstantOperand.long_value:type_name -> google.protobuf.Int64Value
9, // 10: google.ads.googleads.v2.common.Operand.ConstantOperand.boolean_value:type_name -> google.protobuf.BoolValue
10, // 11: google.ads.googleads.v2.common.Operand.ConstantOperand.double_value:type_name -> google.protobuf.DoubleValue
8, // 12: google.ads.googleads.v2.common.Operand.FeedAttributeOperand.feed_id:type_name -> google.protobuf.Int64Value
8, // 13: google.ads.googleads.v2.common.Operand.FeedAttributeOperand.feed_attribute_id:type_name -> google.protobuf.Int64Value
0, // 14: google.ads.googleads.v2.common.Operand.FunctionOperand.matching_function:type_name -> google.ads.googleads.v2.common.MatchingFunction
11, // 15: google.ads.googleads.v2.common.Operand.RequestContextOperand.context_type:type_name -> google.ads.googleads.v2.enums.MatchingFunctionContextTypeEnum.MatchingFunctionContextType
16, // [16:16] is the sub-list for method output_type
16, // [16:16] is the sub-list for method input_type
16, // [16:16] is the sub-list for extension type_name
16, // [16:16] is the sub-list for extension extendee
0, // [0:16] is the sub-list for field type_name
}
func init() { file_google_ads_googleads_v2_common_matching_function_proto_init() }
func file_google_ads_googleads_v2_common_matching_function_proto_init() {
if File_google_ads_googleads_v2_common_matching_function_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_google_ads_googleads_v2_common_matching_function_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MatchingFunction); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_ads_googleads_v2_common_matching_function_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Operand); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_ads_googleads_v2_common_matching_function_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Operand_ConstantOperand); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_ads_googleads_v2_common_matching_function_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Operand_FeedAttributeOperand); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_ads_googleads_v2_common_matching_function_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Operand_FunctionOperand); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_ads_googleads_v2_common_matching_function_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Operand_RequestContextOperand); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
file_google_ads_googleads_v2_common_matching_function_proto_msgTypes[1].OneofWrappers = []interface{}{
(*Operand_ConstantOperand_)(nil),
(*Operand_FeedAttributeOperand_)(nil),
(*Operand_FunctionOperand_)(nil),
(*Operand_RequestContextOperand_)(nil),
}
file_google_ads_googleads_v2_common_matching_function_proto_msgTypes[2].OneofWrappers = []interface{}{
(*Operand_ConstantOperand_StringValue)(nil),
(*Operand_ConstantOperand_LongValue)(nil),
(*Operand_ConstantOperand_BooleanValue)(nil),
(*Operand_ConstantOperand_DoubleValue)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_ads_googleads_v2_common_matching_function_proto_rawDesc,
NumEnums: 0,
NumMessages: 6,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_google_ads_googleads_v2_common_matching_function_proto_goTypes,
DependencyIndexes: file_google_ads_googleads_v2_common_matching_function_proto_depIdxs,
MessageInfos: file_google_ads_googleads_v2_common_matching_function_proto_msgTypes,
}.Build()
File_google_ads_googleads_v2_common_matching_function_proto = out.File
file_google_ads_googleads_v2_common_matching_function_proto_rawDesc = nil
file_google_ads_googleads_v2_common_matching_function_proto_goTypes = nil
file_google_ads_googleads_v2_common_matching_function_proto_depIdxs = nil
}
|
file_google_ads_googleads_v2_common_matching_function_proto_rawDescGZIP
|
DRLAgentsTournament_ROUND1_3.py
|
import random
import time
import math
import os.path
import numpy as np
import pandas as pd
from pysc2.agents import base_agent
from pysc2.env import sc2_env, run_loop
from pysc2.lib import actions, features, units
from absl import app
from baseline.sc2.agent.DRLAgentWithVanillaDQN import TerranRLAgentWithRawActsAndRawObs
from s10073.sc2.agent.DRLAgentWithVanillaDQN_phil import ProtossRLAgentWithRawActsAndRawObs as Agent10073
from s09287.ProtossDQN import ProtossRLAgentWithRawActsAndRawObs as Agent09287
from s09360.sc2.agent.DRLAgentWithVanillaDQN import TerranRLAgentWithRawActsAndRawObs as Agent09360
from s10472.sc2.agent.RLAgent import ZergAgent as Agent10472
from s10336.sc2.agent.DRLAgentWithVanillaDQN import TerranRLAgentWithRawActsAndRawObs as Agent10336
from s10071.sc2.agent.DRLAgentWithVDQN_mod_final import TerranRLAgentWithRawActsAndRawObs as Agent10071
from s10395.sc2.agent.protoss_DRLAgentWithVanillaDQN import ProtossRLAgentWithRawActsAndRawObs as Agent10395
from s10274.sc2.agent.DRLAgentWithDuelingDQN import TerranRLAgentWithRawActsAndRawObs as Agent10274
from s05026.dqn_with_fixedtarget_my import TerranRLAgentWithRawActsAndRawObs as Agent05026
def
|
(unused_argv):
agent_baseline = TerranRLAgentWithRawActsAndRawObs()
T_09360 = Agent09360() # sc2_env.Race.terran, "09360 조용준"
Z_10472 = Agent10472() # sc2_env.Race.zerg, "10472 오수은"
T_05026 = Agent05026() # sc2_env.Race.terran, "05026 박상원"
P_09287 = Agent09287() # sc2_env.Race.protoss, "09287 서대웅"
T_10336 = Agent10336() # sc2_env.Race.terran, "10336 김명환"
T_10071 = Agent10071() # sc2_env.Race.terran, "10071 오동훈"
P_10395 = Agent10395() # sc2_env.Race.protoss, "10395 이현호"
P_10073 = Agent10073() # sc2_env.Race.protoss, "10073 오필훈"
T_10274 = Agent10274() # sc2_env.Race.terran, "10274 최지은"
try:
with sc2_env.SC2Env(
map_name="Simple64",
# players=[sc2_env.Agent(sc2_env.Race.terran, "09360 조용준"),
# sc2_env.Agent(sc2_env.Race.terran, "05026 박상원")],
# players=[sc2_env.Agent(sc2_env.Race.protoss, "09287 서대웅"),
# sc2_env.Agent(sc2_env.Race.terran, "10336 김명환")],
players=[sc2_env.Agent(sc2_env.Race.terran, "10071 오동훈"),
sc2_env.Agent(sc2_env.Race.protoss, "10395 이현호")],
# players=[sc2_env.Agent(sc2_env.Race.protoss, "10073 오필훈"),
# sc2_env.Agent(sc2_env.Race.terran, "10274 최지은")],
agent_interface_format=features.AgentInterfaceFormat(
action_space=actions.ActionSpace.RAW,
use_feature_units=True,
feature_dimensions=features.Dimensions(screen=32, minimap=32),
use_raw_units=True,
use_raw_actions=True,
raw_resolution=64,
),
step_mul=8,
disable_fog=True,
visualize=False
) as env:
run_loop.run_loop([T_10071, P_10395], env, max_episodes=1)
env.save_replay("DRLAgentsTournament_ROUND1")
except KeyboardInterrupt:
pass
# def main(unused_argv):
# agent = TerranRLAgentWithRawActsAndRawObs()
# try:
# with sc2_env.SC2Env(
# map_name="Simple64",
# players=[sc2_env.Agent(sc2_env.Race.terran),
# sc2_env.Bot(sc2_env.Race.terran,
# sc2_env.Difficulty.very_easy)],
# agent_interface_format=features.AgentInterfaceFormat(
# action_space=actions.ActionSpace.RAW,
# use_raw_units=True,
# raw_resolution=64,
# ),
# step_mul=8,
# disable_fog=True,
# ) as env:
# agent.setup(env.observation_spec(), env.action_spec())
#
# timesteps = env.reset()
# agent.reset()
#
# while True:
# step_actions = [agent.step(timesteps[0])]
# if timesteps[0].last():
# break
# timesteps = env.step(step_actions)
# except KeyboardInterrupt:
# pass
# def main(unused_argv):
# agent = TerranRLAgentWithRawActsAndRawObs()
# try:
# while True:
# with sc2_env.SC2Env(
# map_name="Simple64",
# players=[sc2_env.Agent(sc2_env.Race.terran),
# sc2_env.Bot(sc2_env.Race.terran,
# sc2_env.Difficulty.very_easy)],
# agent_interface_format=features.AgentInterfaceFormat(
# action_space=actions.ActionSpace.RAW,
# use_raw_units=True,
# raw_resolution=64,
# ),
# step_mul=8,
# disable_fog=True,
# game_steps_per_episode=0,
# visualize=False) as env:
#
# agent.setup(env.observation_spec(), env.action_spec())
#
# timesteps = env.reset()
# agent.reset()
#
# while True:
# step_actions = [agent.step(timesteps[0])]
# if timesteps[0].last():
# break
# timesteps = env.step(step_actions)
#
# except KeyboardInterrupt:
# pass
# def main(unused_argv):
# agent1 = TerranRLAgentWithRawActsAndRawObs()
# try:
# with sc2_env.SC2Env(
# map_name="Simple64",
# players=[sc2_env.Agent(sc2_env.Race.terran),
# sc2_env.Bot(sc2_env.Race.terran,
# sc2_env.Difficulty.very_easy)],
# agent_interface_format=features.AgentInterfaceFormat(
# action_space=actions.ActionSpace.RAW,
# use_raw_units=True,
# raw_resolution=64,
# ),
# step_mul=8,
# disable_fog=True,
# visualize=False
# ) as env:
# run_loop.run_loop([agent1], env, max_episodes=1)
# except KeyboardInterrupt:
# pass
#
# def main(unused_argv):
# agent1 = ProtossRLAgentWithRawActsAndRawObs()
# try:
# with sc2_env.SC2Env(
# map_name="Simple64",
# players=[sc2_env.Agent(sc2_env.Race.protoss),
# sc2_env.Bot(sc2_env.Race.terran,
# sc2_env.Difficulty.very_easy)],
# agent_interface_format=features.AgentInterfaceFormat(
# action_space=actions.ActionSpace.RAW,
# use_raw_units=True,
# raw_resolution=64,
# ),
# step_mul=8,
# disable_fog=True,
# visualize=False
# ) as env:
# run_loop.run_loop([agent1], env, max_episodes=1)
# env.save_replay("DRLAgentsTournamentTest")
# except KeyboardInterrupt:
# pass
if __name__ == "__main__":
app.run(main)
|
main
|
solution.go
|
package main
import "fmt"
/**
* Forward declaration of isBadVersion API.
* @param version your guess about first bad version
* @return true if current version is bad
* false if current version is good
* func isBadVersion(version int) bool;
*/
func search(start, end int) int {
half := int((end-start)/2) + start
if !isBadVersion(half) {
return search(half+1, end)
}
if isBadVersion(half - 1) {
return search(start, half)
}
return half
}
func isBadVersion(v int) bool {
switch v {
case 8, 9, 10:
return true
default:
return false
}
return false
}
func firstBadVersion(n int) int {
return search(1, n+1)
}
func main()
|
{
fmt.Println(firstBadVersion(10))
}
|
|
test_zs_upgd_1.3_latest_on_cos7.py
|
'''
@author: MengLai
'''
import os
import tempfile
import uuid
import time
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstacklib.utils.ssh as ssh
import zstackwoodpecker.operations.scenario_operations as scen_ops
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
tmp_file = '/tmp/%s' % uuid.uuid1().get_hex()
vm_inv = None
def test():
|
#Will be called only if exception happens in test().
def error_cleanup():
global vm_inv
os.system('rm -f %s' % tmp_file)
if vm_inv:
test_stub.destroy_vm_scenario(vm_inv.uuid)
test_lib.lib_error_cleanup(test_obj_dict)
|
global vm_inv
test_util.test_dsc('Create test vm to test zstack upgrade by -u.')
image_name = os.environ.get('imageName_i_c7_z_1.3')
iso_path = os.environ.get('iso_path')
zstack_latest_version = os.environ.get('zstackLatestVersion')
zstack_latest_path = os.environ.get('zstackLatestInstaller')
vm_name = os.environ.get('vmName')
upgrade_script_path = os.environ.get('upgradeScript')
vm_inv = test_stub.create_vm_scenario(image_name, vm_name)
vm_ip = vm_inv.vmNics[0].ip
test_lib.lib_wait_target_up(vm_ip, 22)
test_stub.make_ssh_no_password(vm_ip, tmp_file)
test_util.test_logger('Update MN IP')
test_stub.update_mn_ip(vm_ip, vm_ip, tmp_file)
test_stub.reset_rabbitmq_for_13(vm_ip, tmp_file)
test_stub.start_mn(vm_ip, tmp_file)
test_stub.check_installation(vm_ip, tmp_file)
test_util.test_logger('Upgrade zstack to latest')
test_stub.update_iso(vm_ip, tmp_file, iso_path, upgrade_script_path)
test_stub.upgrade_zstack(vm_ip, zstack_latest_path, tmp_file)
test_stub.check_zstack_version(vm_ip, tmp_file, zstack_latest_version)
test_stub.start_mn(vm_ip, tmp_file)
test_stub.check_installation(vm_ip, tmp_file)
os.system('rm -f %s' % tmp_file)
test_stub.destroy_vm_scenario(vm_inv.uuid)
test_util.test_pass('ZStack upgrade Test Success')
|
inverted_double_pendulum.rs
|
use crate::{
physics::{self, Particle, Spring},
Model, Solid,
};
use cgmath::{prelude::*, Vector2, Vector3};
#[derive(Clone)]
pub struct IDPWorld {
pub base_pos: Vector2<f32>,
pub base_vel: Vector2<f32>,
pub mid_pos: Vector3<f32>,
pub mid_vel: Vector3<f32>,
pub top_pos: Vector3<f32>,
pub top_vel: Vector3<f32>,
}
pub struct IDPSignals {
pub base_accel: Vector2<f32>,
}
pub struct InvertedDoublePendulum;
const NODE_RADIUS: f32 = 0.15;
impl Model for InvertedDoublePendulum {
type World = IDPWorld;
type Signals = IDPSignals;
fn new_world() -> Self::World {
let disturbance = || {
Vector3::new(
fastrand::f32() / 20.0,
fastrand::f32() / 20.0,
fastrand::f32() / 20.0,
)
};
Self::World {
base_pos: Zero::zero(),
base_vel: Zero::zero(),
mid_pos: (Vector3::unit_z() + disturbance()).normalize(),
mid_vel: Vector3::zero(),
top_pos: Vector3::unit_z() * 2.0 + disturbance(),
top_vel: Vector3::zero(),
}
}
fn new_signals() -> Self::Signals {
Self::Signals {
base_accel: Zero::zero(),
}
}
|
let particles = [
Particle::new(w.base_pos.extend(0.0), w.base_vel.extend(0.0), NODE_RADIUS),
Particle::new(w.mid_pos, w.mid_vel, NODE_RADIUS),
Particle::new(w.top_pos, w.top_vel, NODE_RADIUS),
];
let new = physics::time_step_with_rk4(&particles, signals, idp_accels);
w.base_pos = new[0].pos.truncate();
w.base_vel = new[0].vel.truncate();
w.mid_pos = new[1].pos;
w.mid_vel = new[1].vel;
w.top_pos = new[2].pos;
w.top_vel = new[2].vel;
fn idp_accels(particles: &[Particle], signals: &IDPSignals) -> Vec<Vector3<f32>> {
const GRAVITY_ACCEL: f32 = 0.3;
if let [base, mid, top] = particles {
vec![
// Base
signals.base_accel.extend(0.0),
// Mid
mid.accel_from_spring_to(top, Spring::UNIT_ROD)
+ mid.accel_from_spring_to(base, Spring::UNIT_ROD)
+ mid.accel_from_collision_with(top)
+ mid.accel_from_collision_with(base)
- Vector3::unit_z() * GRAVITY_ACCEL,
// Top
top.accel_from_spring_to(mid, Spring::UNIT_ROD)
+ top.accel_from_collision_with(mid)
+ top.accel_from_collision_with(base)
- Vector3::unit_z() * GRAVITY_ACCEL,
]
} else {
unreachable!()
}
}
}
fn get_solids(world: &Self::World) -> Vec<Solid> {
const CONTROL_COLOR: Vector3<f32> = Vector3::new(0.0, 0.5, 0.3);
const NODE_COLOR: Vector3<f32> = Vector3::new(0.5, 0.2, 0.3);
const ROD_COLOR: Vector3<f32> = Vector3::new(0.0, 0.3, 0.6);
const ROD_RADIUS: f32 = 0.1;
vec![
Solid::new_sphere(world.base_pos.extend(0.0), NODE_RADIUS, CONTROL_COLOR),
Solid::new_sphere(world.mid_pos, NODE_RADIUS, NODE_COLOR),
Solid::new_sphere(world.top_pos, NODE_RADIUS, NODE_COLOR),
Solid::new_cylinder(
world.base_pos.extend(0.0),
world.mid_pos,
ROD_RADIUS,
ROD_COLOR,
),
Solid::new_cylinder(world.mid_pos, world.top_pos, ROD_RADIUS, ROD_COLOR),
]
}
}
|
fn update(w: &mut Self::World, signals: &Self::Signals) {
|
ExampleExportAccountQR.ts
|
/**
* Copyright 2019 NEM
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {
NetworkType,
} from 'nem2-sdk';
// internal dependencies
import {
AccountQR,
QRCodeSettings,
QRCodeType,
} from '../index';
import {Example} from './Example';
class
|
extends Example {
/**
* The `execute()` method should run the underlying
* example business flow.
*
* This example uses an encryption password value of `password`
* and following account details
*
* Public Key: 9741183860ED711BD986A464004DB9A6D26B25F4CBB51F3B0FF1B220510B86B0
* Private Key: 749F1FF1972CD465CAB74566FF0AA021F846FBE3916ABB6A6C1373E962C76331
*
* @return {number}
*/
public async execute(): Promise<number> {
// Arrange
const accountInfo = {
v: 3,
type: QRCodeType.ExportAccount,
network_id: NetworkType.MIJIN_TEST,
chain_id: '9F1979BEBA29C47E59B40393ABB516801A353CFC0C18BC241FEDE41939C907E7',
data: {
ciphertext: '56d310848ee93d0794eb1f64a5195778ded2q7IxvtPbO+sA7jZZyhpu/khbaNdx1pzuoGoPJRw1A4aBsWPlex3y/gy5da8WjF0i4d+/D0B5ESy+zX5P+AoFAw3EFi3UVBdnav4rnqg=',
salt: '42c8615bc6b2bc88cd239f08a5a17cc62bb0ebaece53f3e458a1cd67cd0888bc'
}
};
// create QR Code with JSON content and password
const accountQR = AccountQR.fromJSON(
JSON.stringify(accountInfo),
'password'
);
console.log("AccountQR JSON: ", accountQR.toJSON());
console.log("AccountQR BASE64: ", await accountQR.toBase64().toPromise());
console.log("AccountQR OBJECT: ", await accountQR.toString(new QRCodeSettings('M', 100)).toPromise());
console.log("");
return this.resolve(0);
}
}
export {ExampleExportAccountQR};
|
ExampleExportAccountQR
|
convenience.py
|
# -*- coding: utf-8 -*-
# author: Adrian Rosebrock
# website: http://www.pyimagesearch.com
# import the necessary packages
import numpy as np
import cv2
import sys
# import any special Python 2.7 packages
if sys.version_info.major == 2:
from urllib import urlopen
# import any special Python 3 packages
elif sys.version_info.major == 3:
from urllib.request import urlopen
def translate(image, x, y):
# define the translation matrix and perform the translation
M = np.float32([[1, 0, x], [0, 1, y]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
# return the translated image
return shifted
def rotate(image, angle, center=None, scale=1.0):
# grab the dimensions of the image
(h, w) = image.shape[:2]
# if the center is None, initialize it as the center of
# the image
if center is None:
center = (w // 2, h // 2)
# perform the rotation
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h))
# return the rotated image
return rotated
def rotate_bound(image, angle):
# grab the dimensions of the image and then determine the
# center
(h, w) = image.shape[:2]
(cX, cY) = (w / 2, h / 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv2.warpAffine(image, M, (nW, nH))
def resize(image, width=None, height=None, inter=cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation=inter)
# return the resized image
return resized
def skeletonize(image, size, structuring=cv2.MORPH_RECT):
# determine the area (i.e. total number of pixels in the image),
# initialize the output skeletonized image, and construct the
# morphological structuring element
area = image.shape[0] * image.shape[1]
skeleton = np.zeros(image.shape, dtype="uint8")
elem = cv2.getStructuringElement(structuring, size)
# keep looping until the erosions remove all pixels from the
# image
while True:
# erode and dilate the image using the structuring element
eroded = cv2.erode(image, elem)
temp = cv2.dilate(eroded, elem)
# subtract the temporary image from the original, eroded
# image, then take the bitwise 'or' between the skeleton
# and the temporary image
temp = cv2.subtract(image, temp)
skeleton = cv2.bitwise_or(skeleton, temp)
image = eroded.copy()
# if there are no more 'white' pixels in the image, then
# break from the loop
if area == area - cv2.countNonZero(image):
break
# return the skeletonized image
return skeleton
def opencv2matplotlib(image):
# OpenCV represents images in BGR order; however, Matplotlib
# expects the image in RGB order, so simply convert from BGR
# to RGB and return
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
def url_to_image(url, readFlag=cv2.IMREAD_COLOR):
# download the image, convert it to a NumPy array, and then read
# it into OpenCV format
resp = urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, readFlag)
# return the image
return image
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def grab_contours(cnts):
# if the length the contours tuple returned by cv2.findContours
# is '2' then we are using either OpenCV v2.4, v4-beta, or
# v4-official
if len(cnts) == 2:
cnts = cnts[0]
# if the length of the contours tuple is '3' then we are using
# either OpenCV v3, v4-pre, or v4-alpha
elif len(cnts) == 3:
cnts = cnts[1]
# otherwise OpenCV has changed their cv2.findContours return
# signature yet again and I have no idea WTH is going on
else:
|
# return the actual contours array
return cnts
def is_cv2(or_better=False):
# grab the OpenCV major version number
major = get_opencv_major_version()
# check to see if we are using *at least* OpenCV 2
if or_better:
return major >= 2
# otherwise we want to check for *strictly* OpenCV 2
return major == 2
def is_cv3(or_better=False):
# grab the OpenCV major version number
major = get_opencv_major_version()
# check to see if we are using *at least* OpenCV 3
if or_better:
return major >= 3
# otherwise we want to check for *strictly* OpenCV 3
return major == 3
def is_cv4(or_better=False):
# grab the OpenCV major version number
major = get_opencv_major_version()
# check to see if we are using *at least* OpenCV 4
if or_better:
return major >= 4
# otherwise we want to check for *strictly* OpenCV 4
return major == 4
def get_opencv_major_version(lib=None):
# if the supplied library is None, import OpenCV
if lib is None:
import cv2 as lib
# return the major version number
return int(lib.__version__.split(".")[0])
def check_opencv_version(major, lib=None):
# this function may be removed in a future release as we now
# use the get_opencv_major_function to obtain the current OpenCV
# version and then perform the actual version check *within* the
# respective function
import warnings
message = """
The check_opencv_version function is deprecated and may be
removed in a future release. Use at your own risk.
"""
warnings.warn(message, DeprecationWarning, stacklevel=2)
# if the supplied library is None, import OpenCV
if lib is None:
import cv2 as lib
# return whether or not the current OpenCV version matches the
# major version number
return lib.__version__.startswith(major)
def build_montages(image_list, image_shape, montage_shape):
"""
---------------------------------------------------------------------------------------------
author: Kyle Hounslow
---------------------------------------------------------------------------------------------
Converts a list of single images into a list of 'montage' images of specified rows and columns.
A new montage image is started once rows and columns of montage image is filled.
Empty space of incomplete montage images are filled with black pixels
---------------------------------------------------------------------------------------------
:param image_list: python list of input images
:param image_shape: tuple, size each image will be resized to for display (width, height)
:param montage_shape: tuple, shape of image montage (width, height)
:return: list of montage images in numpy array format
---------------------------------------------------------------------------------------------
example usage:
# load single image
img = cv2.imread('lena.jpg')
# duplicate image 25 times
num_imgs = 25
img_list = []
for i in xrange(num_imgs):
img_list.append(img)
# convert image list into a montage of 256x256 images tiled in a 5x5 montage
montages = make_montages_of_images(img_list, (256, 256), (5, 5))
# iterate through montages and display
for montage in montages:
cv2.imshow('montage image', montage)
cv2.waitKey(0)
----------------------------------------------------------------------------------------------
"""
if len(image_shape) != 2:
raise Exception('image shape must be list or tuple of length 2 (rows, cols)')
if len(montage_shape) != 2:
raise Exception('montage shape must be list or tuple of length 2 (rows, cols)')
image_montages = []
# start with black canvas to draw images onto
montage_image = np.zeros(shape=(image_shape[1] * (montage_shape[1]), image_shape[0] * montage_shape[0], 3),
dtype=np.uint8)
cursor_pos = [0, 0]
start_new_img = False
for img in image_list:
if type(img).__module__ != np.__name__:
raise Exception('input of type {} is not a valid numpy array'.format(type(img)))
start_new_img = False
img = cv2.resize(img, image_shape)
# draw image to black canvas
montage_image[cursor_pos[1]:cursor_pos[1] + image_shape[1], cursor_pos[0]:cursor_pos[0] + image_shape[0]] = img
cursor_pos[0] += image_shape[0] # increment cursor x position
if cursor_pos[0] >= montage_shape[0] * image_shape[0]:
cursor_pos[1] += image_shape[1] # increment cursor y position
cursor_pos[0] = 0
if cursor_pos[1] >= montage_shape[1] * image_shape[1]:
cursor_pos = [0, 0]
image_montages.append(montage_image)
# reset black canvas
montage_image = np.zeros(shape=(image_shape[1] * (montage_shape[1]), image_shape[0] * montage_shape[0], 3),
dtype=np.uint8)
start_new_img = True
if start_new_img is False:
image_montages.append(montage_image) # add unfinished montage
return image_montages
def adjust_brightness_contrast(image, brightness=0., contrast=0.):
"""
Adjust the brightness and/or contrast of an image
:param image: OpenCV BGR image
:param contrast: Float, contrast adjustment with 0 meaning no change
:param brightness: Float, brightness adjustment with 0 meaning no change
"""
beta = 0
# See the OpenCV docs for more info on the `beta` parameter to addWeighted
# https://docs.opencv.org/3.4.2/d2/de8/group__core__array.html#gafafb2513349db3bcff51f54ee5592a19
return cv2.addWeighted(image,
1 + float(contrast) / 100.,
image,
beta,
float(brightness))
|
raise Exception(("Contours tuple must have length 2 or 3, "
"otherwise OpenCV changed their cv2.findContours return "
"signature yet again. Refer to OpenCV's documentation "
"in that case"))
|
storage.go
|
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
"context"
"crypto/md5"
"encoding/hex"
"fmt"
"sync"
"time"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/logging"
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/storage"
)
// String constants for instrumentation.
const (
namespace = "prometheus"
subsystem = "remote_storage"
remoteName = "remote_name"
endpoint = "url"
)
type ReadyScrapeManager interface {
Get() (*scrape.Manager, error)
}
// startTimeCallback is a callback func that return the oldest timestamp stored in a storage.
type startTimeCallback func() (int64, error)
// Storage represents all the remote read and write endpoints. It implements
// storage.Storage.
type Storage struct {
logger log.Logger
mtx sync.Mutex
Write *WriteStorage
// For reads.
queryables []storage.SampleAndChunkQueryable
localStartTimeCallback startTimeCallback
}
// NewStorage returns a remote.Storage.
func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager) *Storage {
if l == nil {
l = log.NewNopLogger()
}
s := &Storage{
logger: logging.Dedupe(l, 1*time.Minute),
localStartTimeCallback: stCallback,
}
s.Write = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm)
return s
}
// ApplyConfig updates the state as the new config requires.
func (s *Storage) ApplyConfig(conf *config.Config) error {
s.mtx.Lock()
defer s.mtx.Unlock()
if err := s.Write.ApplyConfig(conf); err != nil {
return err
}
// Update read clients
readHashes := make(map[string]struct{})
queryables := make([]storage.SampleAndChunkQueryable, 0, len(conf.RemoteReadConfigs))
for _, rrConf := range conf.RemoteReadConfigs {
hash, err := toHash(rrConf)
if err != nil {
return err
}
// Don't allow duplicate remote read configs.
if _, ok := readHashes[hash]; ok {
return fmt.Errorf("duplicate remote read configs are not allowed, found duplicate for URL: %s", rrConf.URL)
}
readHashes[hash] = struct{}{}
// Set the queue name to the config hash if the user has not set
// a name in their remote write config so we can still differentiate
// between queues that have the same remote write endpoint.
name := hash[:6]
if rrConf.Name != "" {
name = rrConf.Name
}
c, err := NewReadClient(name, &ClientConfig{
URL: rrConf.URL,
Timeout: rrConf.RemoteTimeout,
HTTPClientConfig: rrConf.HTTPClientConfig,
})
if err != nil {
return err
}
queryables = append(queryables, NewSampleAndChunkQueryableClient(
c,
conf.GlobalConfig.ExternalLabels,
labelsToEqualityMatchers(rrConf.RequiredMatchers),
rrConf.ReadRecent,
s.localStartTimeCallback,
))
}
s.queryables = queryables
return nil
}
// StartTime implements the Storage interface.
func (s *Storage) StartTime() (int64, error) {
return int64(model.Latest), nil
}
// Querier returns a storage.MergeQuerier combining the remote client queriers
// of each configured remote read endpoint.
// Returned querier will never return error as all queryables are assumed best effort.
// Additionally all returned queriers ensure that its Select's SeriesSets have ready data after first `Next` invoke.
// This is because Prometheus (fanout and secondary queries) can't handle the stream failing half way through by design.
func (s *Storage) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
s.mtx.Lock()
queryables := s.queryables
s.mtx.Unlock()
queriers := make([]storage.Querier, 0, len(queryables))
for _, queryable := range queryables {
q, err := queryable.Querier(ctx, mint, maxt)
if err != nil {
return nil, err
}
queriers = append(queriers, q)
}
return storage.NewMergeQuerier(nil, queriers, storage.ChainedSeriesMerge), nil
}
// ChunkQuerier returns a storage.MergeQuerier combining the remote client queriers
// of each configured remote read endpoint.
func (s *Storage) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) {
s.mtx.Lock()
queryables := s.queryables
s.mtx.Unlock()
queriers := make([]storage.ChunkQuerier, 0, len(queryables))
for _, queryable := range queryables {
q, err := queryable.ChunkQuerier(ctx, mint, maxt)
if err != nil {
return nil, err
}
queriers = append(queriers, q)
}
return storage.NewMergeChunkQuerier(nil, queriers, storage.NewCompactingChunkSeriesMerger(storage.ChainedSeriesMerge)), nil
}
// Appender implements storage.Storage.
func (s *Storage) Appender(ctx context.Context) storage.Appender {
return s.Write.Appender(ctx)
}
// Close the background processing of the storage queues.
func (s *Storage) Close() error {
s.mtx.Lock()
defer s.mtx.Unlock()
return s.Write.Close()
}
func labelsToEqualityMatchers(ls model.LabelSet) []*labels.Matcher {
ms := make([]*labels.Matcher, 0, len(ls))
for k, v := range ls {
ms = append(ms, &labels.Matcher{
Type: labels.MatchEqual,
Name: string(k),
|
}
return ms
}
// Used for hashing configs and diff'ing hashes in ApplyConfig.
func toHash(data interface{}) (string, error) {
bytes, err := yaml.Marshal(data)
if err != nil {
return "", err
}
hash := md5.Sum(bytes)
return hex.EncodeToString(hash[:]), nil
}
|
Value: string(v),
})
|
user.entity.ts
|
import { Column, Entity, PrimaryGeneratedColumn } from 'typeorm';
@Entity('users')
export class User {
@PrimaryGeneratedColumn()
id: string;
@Column({ unique: true })
email: string;
|
@Column({ nullable: true })
displayName: string;
}
|
@Column({ unique: true })
username: string;
|
utils.rs
|
use std::env;
use std::fmt;
use std::fs::{self, File};
use std::path::{Path, PathBuf};
use rustc_serialize::{Encodable, Encoder};
use url::Url;
use git2::{self, ObjectType};
use core::GitReference;
use util::{CargoResult, ChainError, human, ToUrl, internal, Config, network};
#[derive(PartialEq, Clone, Debug)]
pub struct GitRevision(git2::Oid);
impl fmt::Display for GitRevision {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
}
}
/// GitRemote represents a remote repository. It gets cloned into a local
/// GitDatabase.
#[derive(PartialEq,Clone,Debug)]
pub struct GitRemote {
url: Url,
}
#[derive(PartialEq,Clone,RustcEncodable)]
struct EncodableGitRemote {
url: String,
}
impl Encodable for GitRemote {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
EncodableGitRemote {
url: self.url.to_string()
}.encode(s)
}
}
/// GitDatabase is a local clone of a remote repository's database. Multiple
/// GitCheckouts can be cloned from this GitDatabase.
pub struct GitDatabase {
remote: GitRemote,
path: PathBuf,
repo: git2::Repository,
}
#[derive(RustcEncodable)]
pub struct EncodableGitDatabase {
remote: GitRemote,
path: String,
}
impl Encodable for GitDatabase {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
EncodableGitDatabase {
remote: self.remote.clone(),
path: self.path.display().to_string()
}.encode(s)
}
}
/// GitCheckout is a local checkout of a particular revision. Calling
/// `clone_into` with a reference will resolve the reference into a revision,
/// and return a CargoError if no revision for that reference was found.
pub struct GitCheckout<'a> {
database: &'a GitDatabase,
location: PathBuf,
revision: GitRevision,
repo: git2::Repository,
}
#[derive(RustcEncodable)]
pub struct EncodableGitCheckout {
database: EncodableGitDatabase,
location: String,
revision: String,
}
impl<'a> Encodable for GitCheckout<'a> {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
EncodableGitCheckout {
location: self.location.display().to_string(),
revision: self.revision.to_string(),
database: EncodableGitDatabase {
remote: self.database.remote.clone(),
path: self.database.path.display().to_string(),
},
}.encode(s)
}
}
// Implementations
impl GitRemote {
pub fn new(url: &Url) -> GitRemote {
GitRemote { url: url.clone() }
}
pub fn url(&self) -> &Url {
&self.url
}
pub fn rev_for(&self, path: &Path, reference: &GitReference)
-> CargoResult<GitRevision> {
let db = try!(self.db_at(path));
db.rev_for(reference)
}
pub fn checkout(&self, into: &Path, cargo_config: &Config) -> CargoResult<GitDatabase> {
let repo = match git2::Repository::open(into) {
Ok(repo) => {
try!(self.fetch_into(&repo, &cargo_config).chain_error(|| {
human(format!("failed to fetch into {}", into.display()))
}));
repo
}
Err(..) => {
try!(self.clone_into(into, &cargo_config).chain_error(|| {
human(format!("failed to clone into: {}", into.display()))
}))
}
};
Ok(GitDatabase {
remote: self.clone(),
path: into.to_path_buf(),
repo: repo,
})
}
pub fn db_at(&self, db_path: &Path) -> CargoResult<GitDatabase> {
let repo = try!(git2::Repository::open(db_path));
Ok(GitDatabase {
remote: self.clone(),
path: db_path.to_path_buf(),
repo: repo,
})
}
fn fetch_into(&self, dst: &git2::Repository, cargo_config: &Config) -> CargoResult<()> {
// Create a local anonymous remote in the repository to fetch the url
let url = self.url.to_string();
let refspec = "refs/heads/*:refs/heads/*";
fetch(dst, &url, refspec, &cargo_config)
}
fn clone_into(&self, dst: &Path, cargo_config: &Config) -> CargoResult<git2::Repository> {
let url = self.url.to_string();
if fs::metadata(&dst).is_ok() {
try!(fs::remove_dir_all(dst));
}
try!(fs::create_dir_all(dst));
let repo = try!(git2::Repository::init_bare(dst));
try!(fetch(&repo, &url, "refs/heads/*:refs/heads/*", &cargo_config));
Ok(repo)
}
}
impl GitDatabase {
fn path(&self) -> &Path {
&self.path
}
pub fn copy_to(&self, rev: GitRevision, dest: &Path, cargo_config: &Config)
-> CargoResult<GitCheckout> {
let checkout = match git2::Repository::open(dest) {
Ok(repo) => {
let checkout = GitCheckout::new(dest, self, rev, repo);
if !checkout.is_fresh() {
try!(checkout.fetch(&cargo_config));
try!(checkout.reset());
assert!(checkout.is_fresh());
}
checkout
}
Err(..) => try!(GitCheckout::clone_into(dest, self, rev)),
};
try!(checkout.update_submodules(&cargo_config).chain_error(|| {
internal("failed to update submodules")
}));
Ok(checkout)
}
pub fn rev_for(&self, reference: &GitReference) -> CargoResult<GitRevision> {
let id = match *reference {
GitReference::Tag(ref s) => {
try!((|| {
let refname = format!("refs/tags/{}", s);
let id = try!(self.repo.refname_to_id(&refname));
let obj = try!(self.repo.find_object(id, None));
let obj = try!(obj.peel(ObjectType::Commit));
Ok(obj.id())
}).chain_error(|| {
human(format!("failed to find tag `{}`", s))
}))
}
GitReference::Branch(ref s) => {
try!((|| {
let b = try!(self.repo.find_branch(s, git2::BranchType::Local));
b.get().target().chain_error(|| {
human(format!("branch `{}` did not have a target", s))
})
}).chain_error(|| {
human(format!("failed to find branch `{}`", s))
}))
}
GitReference::Rev(ref s) => {
let obj = try!(self.repo.revparse_single(s));
obj.id()
}
};
Ok(GitRevision(id))
}
pub fn has_ref(&self, reference: &str) -> CargoResult<()> {
try!(self.repo.revparse_single(reference));
Ok(())
}
}
impl<'a> GitCheckout<'a> {
fn new(path: &Path, database: &'a GitDatabase, revision: GitRevision,
repo: git2::Repository)
-> GitCheckout<'a>
{
GitCheckout {
location: path.to_path_buf(),
database: database,
revision: revision,
repo: repo,
}
}
fn clone_into(into: &Path, database: &'a GitDatabase,
revision: GitRevision)
-> CargoResult<GitCheckout<'a>>
{
let repo = try!(GitCheckout::clone_repo(database.path(), into));
let checkout = GitCheckout::new(into, database, revision, repo);
try!(checkout.reset());
Ok(checkout)
}
fn clone_repo(source: &Path, into: &Path) -> CargoResult<git2::Repository> {
let dirname = into.parent().unwrap();
try!(fs::create_dir_all(&dirname).chain_error(|| {
human(format!("Couldn't mkdir {}", dirname.display()))
}));
if fs::metadata(&into).is_ok() {
try!(fs::remove_dir_all(into).chain_error(|| {
human(format!("Couldn't rmdir {}", into.display()))
}));
}
let url = try!(source.to_url());
let url = url.to_string();
let repo = try!(git2::Repository::clone(&url, into).chain_error(|| {
internal(format!("failed to clone {} into {}", source.display(),
into.display()))
}));
Ok(repo)
}
fn is_fresh(&self) -> bool {
match self.repo.revparse_single("HEAD") {
Ok(ref head) if head.id() == self.revision.0 => {
// See comments in reset() for why we check this
fs::metadata(self.location.join(".cargo-ok")).is_ok()
}
_ => false,
}
}
fn fetch(&self, cargo_config: &Config) -> CargoResult<()> {
info!("fetch {}", self.repo.path().display());
let url = try!(self.database.path.to_url());
let url = url.to_string();
let refspec = "refs/heads/*:refs/heads/*";
try!(fetch(&self.repo, &url, refspec, &cargo_config));
Ok(())
}
fn reset(&self) -> CargoResult<()> {
// If we're interrupted while performing this reset (e.g. we die because
// of a signal) Cargo needs to be sure to try to check out this repo
// again on the next go-round.
//
// To enable this we have a dummy file in our checkout, .cargo-ok, which
// if present means that the repo has been successfully reset and is
// ready to go. Hence if we start to do a reset, we make sure this file
// *doesn't* exist, and then once we're done we create the file.
let ok_file = self.location.join(".cargo-ok");
let _ = fs::remove_file(&ok_file);
info!("reset {} to {}", self.repo.path().display(), self.revision);
let object = try!(self.repo.find_object(self.revision.0, None));
try!(self.repo.reset(&object, git2::ResetType::Hard, None));
try!(File::create(ok_file));
Ok(())
}
fn update_submodules(&self, cargo_config: &Config) -> CargoResult<()> {
return update_submodules(&self.repo, &cargo_config);
fn
|
(repo: &git2::Repository, cargo_config: &Config) -> CargoResult<()> {
info!("update submodules for: {:?}", repo.workdir().unwrap());
for mut child in try!(repo.submodules()).into_iter() {
try!(child.init(false));
let url = try!(child.url().chain_error(|| {
internal("non-utf8 url for submodule")
}));
// A submodule which is listed in .gitmodules but not actually
// checked out will not have a head id, so we should ignore it.
let head = match child.head_id() {
Some(head) => head,
None => continue,
};
// If the submodule hasn't been checked out yet, we need to
// clone it. If it has been checked out and the head is the same
// as the submodule's head, then we can bail out and go to the
// next submodule.
let head_and_repo = child.open().and_then(|repo| {
let target = try!(repo.head()).target();
Ok((target, repo))
});
let repo = match head_and_repo {
Ok((head, repo)) => {
if child.head_id() == head {
continue
}
repo
}
Err(..) => {
let path = repo.workdir().unwrap().join(child.path());
let _ = fs::remove_dir_all(&path);
try!(git2::Repository::clone(url, &path))
}
};
// Fetch data from origin and reset to the head commit
let refspec = "refs/heads/*:refs/heads/*";
try!(fetch(&repo, url, refspec, &cargo_config).chain_error(|| {
internal(format!("failed to fetch submodule `{}` from {}",
child.name().unwrap_or(""), url))
}));
let obj = try!(repo.find_object(head, None));
try!(repo.reset(&obj, git2::ResetType::Hard, None));
try!(update_submodules(&repo, &cargo_config));
}
Ok(())
}
}
}
/// Prepare the authentication callbacks for cloning a git repository.
///
/// The main purpose of this function is to construct the "authentication
/// callback" which is used to clone a repository. This callback will attempt to
/// find the right authentication on the system (without user input) and will
/// guide libgit2 in doing so.
///
/// The callback is provided `allowed` types of credentials, and we try to do as
/// much as possible based on that:
///
/// * Prioritize SSH keys from the local ssh agent as they're likely the most
/// reliable. The username here is prioritized from the credential
/// callback, then from whatever is configured in git itself, and finally
/// we fall back to the generic user of `git`.
///
/// * If a username/password is allowed, then we fallback to git2-rs's
/// implementation of the credential helper. This is what is configured
/// with `credential.helper` in git, and is the interface for the OSX
/// keychain, for example.
///
/// * After the above two have failed, we just kinda grapple attempting to
/// return *something*.
///
/// If any form of authentication fails, libgit2 will repeatedly ask us for
/// credentials until we give it a reason to not do so. To ensure we don't
/// just sit here looping forever we keep track of authentications we've
/// attempted and we don't try the same ones again.
fn with_authentication<T, F>(url: &str, cfg: &git2::Config, mut f: F)
-> CargoResult<T>
where F: FnMut(&mut git2::Credentials) -> CargoResult<T>
{
let mut cred_helper = git2::CredentialHelper::new(url);
cred_helper.config(cfg);
let mut ssh_username_requested = false;
let mut cred_helper_bad = None;
let mut ssh_agent_attempts = Vec::new();
let mut any_attempts = false;
let mut tried_sshkey = false;
let mut res = f(&mut |url, username, allowed| {
any_attempts = true;
// libgit2's "USERNAME" authentication actually means that it's just
// asking us for a username to keep going. This is currently only really
// used for SSH authentication and isn't really an authentication type.
// The logic currently looks like:
//
// let user = ...;
// if (user.is_null())
// user = callback(USERNAME, null, ...);
//
// callback(SSH_KEY, user, ...)
//
// So if we're being called here then we know that (a) we're using ssh
// authentication and (b) no username was specified in the URL that
// we're trying to clone. We need to guess an appropriate username here,
// but that may involve a few attempts. Unfortunately we can't switch
// usernames during one authentication session with libgit2, so to
// handle this we bail out of this authentication session after setting
// the flag `ssh_username_requested`, and then we handle this below.
if allowed.contains(git2::USERNAME) {
debug_assert!(username.is_none());
ssh_username_requested = true;
return Err(git2::Error::from_str("gonna try usernames later"))
}
// An "SSH_KEY" authentication indicates that we need some sort of SSH
// authentication. This can currently either come from the ssh-agent
// process or from a raw in-memory SSH key. Cargo only supports using
// ssh-agent currently.
//
// If we get called with this then the only way that should be possible
// is if a username is specified in the URL itself (e.g. `username` is
// Some), hence the unwrap() here. We try custom usernames down below.
if allowed.contains(git2::SSH_KEY) && !tried_sshkey {
// If ssh-agent authentication fails, libgit2 will keep
// calling this callback asking for other authentication
// methods to try. Make sure we only try ssh-agent once,
// to avoid looping forever.
tried_sshkey = true;
let username = username.unwrap();
debug_assert!(!ssh_username_requested);
ssh_agent_attempts.push(username.to_string());
return git2::Cred::ssh_key_from_agent(&username)
}
// Sometimes libgit2 will ask for a username/password in plaintext. This
// is where Cargo would have an interactive prompt if we supported it,
// but we currently don't! Right now the only way we support fetching a
// plaintext password is through the `credential.helper` support, so
// fetch that here.
if allowed.contains(git2::USER_PASS_PLAINTEXT) {
let r = git2::Cred::credential_helper(cfg, url, username);
cred_helper_bad = Some(r.is_err());
return r
}
// I'm... not sure what the DEFAULT kind of authentication is, but seems
// easy to support?
if allowed.contains(git2::DEFAULT) {
return git2::Cred::default()
}
// Whelp, we tried our best
Err(git2::Error::from_str("no authentication available"))
});
// Ok, so if it looks like we're going to be doing ssh authentication, we
// want to try a few different usernames as one wasn't specified in the URL
// for us to use. In order, we'll try:
//
// * A credential helper's username for this URL, if available.
// * This account's username.
// * "git"
//
// We have to restart the authentication session each time (due to
// constraints in libssh2 I guess? maybe this is inherent to ssh?), so we
// call our callback, `f`, in a loop here.
if ssh_username_requested {
debug_assert!(res.is_err());
let mut attempts = Vec::new();
attempts.push("git".to_string());
if let Ok(s) = env::var("USER").or_else(|_| env::var("USERNAME")) {
attempts.push(s);
}
if let Some(ref s) = cred_helper.username {
attempts.push(s.clone());
}
while let Some(s) = attempts.pop() {
// We should get `USERNAME` first, where we just return our attempt,
// and then after that we should get `SSH_KEY`. If the first attempt
// fails we'll get called again, but we don't have another option so
// we bail out.
let mut attempts = 0;
res = f(&mut |_url, username, allowed| {
if allowed.contains(git2::USERNAME) {
return git2::Cred::username(&s);
}
if allowed.contains(git2::SSH_KEY) {
debug_assert_eq!(Some(&s[..]), username);
attempts += 1;
if attempts == 1 {
ssh_agent_attempts.push(s.to_string());
return git2::Cred::ssh_key_from_agent(&s)
}
}
Err(git2::Error::from_str("no authentication available"))
});
// If we made two attempts then that means:
//
// 1. A username was requested, we returned `s`.
// 2. An ssh key was requested, we returned to look up `s` in the
// ssh agent.
// 3. For whatever reason that lookup failed, so we were asked again
// for another mode of authentication.
//
// Essentially, if `attempts == 2` then in theory the only error was
// that this username failed to authenticate (e.g. no other network
// errors happened). Otherwise something else is funny so we bail
// out.
if attempts != 2 {
break
}
}
}
if res.is_ok() || !any_attempts {
return res.map_err(From::from)
}
// In the case of an authentication failure (where we tried something) then
// we try to give a more helpful error message about precisely what we
// tried.
res.chain_error(|| {
let mut msg = "failed to authenticate when downloading \
repository".to_string();
if ssh_agent_attempts.len() > 0 {
let names = ssh_agent_attempts.iter()
.map(|s| format!("`{}`", s))
.collect::<Vec<_>>()
.join(", ");
msg.push_str(&format!("\nattempted ssh-agent authentication, but \
none of the usernames {} succeeded", names));
}
if let Some(failed_cred_helper) = cred_helper_bad {
if failed_cred_helper {
msg.push_str("\nattempted to find username/password via \
git's `credential.helper` support, but failed");
} else {
msg.push_str("\nattempted to find username/password via \
`credential.helper`, but maybe the found \
credentials were incorrect");
}
}
human(msg)
})
}
pub fn fetch(repo: &git2::Repository,
url: &str,
refspec: &str,
config: &Config) -> CargoResult<()> {
if !config.network_allowed() {
bail!("attempting to update a git repository, but --frozen \
was specified")
}
with_authentication(url, &try!(repo.config()), |f| {
let mut cb = git2::RemoteCallbacks::new();
cb.credentials(f);
// Create a local anonymous remote in the repository to fetch the url
let mut remote = try!(repo.remote_anonymous(&url));
let mut opts = git2::FetchOptions::new();
opts.remote_callbacks(cb)
.download_tags(git2::AutotagOption::All);
try!(network::with_retry(config, ||{
remote.fetch(&[refspec], Some(&mut opts), None)
}));
Ok(())
})
}
|
update_submodules
|
header.js
|
import { Selection } from './../../../3rdparty/walkontable/src';
/**
* Creates the new instance of Selection, responsible for highlighting row and column headers. This type of selection
* can occur multiple times.
*
* @return {Selection}
*/
function createHighlight({ headerClassName, rowClassName, columnClassName }) {
const s = new Selection({
className: 'highlight',
highlightHeaderClassName: headerClassName,
highlightRowClassName: rowClassName,
highlightColumnClassName: columnClassName,
});
return s;
}
|
export default createHighlight;
|
|
main.go
|
package main
import (
"fmt"
"net/http"
)
type dollars float32
func (d dollars) String() string { return fmt.Sprintf("$%.2f", d) }
type database map[string]dollars
func (db database) ServeHTTP(w http.ResponseWriter, req *http.Request) {
switch req.URL.Path {
case "/list":
for item, price := range db {
fmt.Fprintf(w, "%s: %s\n", item, price)
}
case "/price":
item := req.URL.Query().Get("item")
price, ok := db[item]
if !ok {
w.WriteHeader(http.StatusNotFound)
fmt.Fprintf(w, "no such item: %q\n", item)
return
}
fmt.Fprintf(w, "%s\n", price)
default:
w.WriteHeader(http.StatusNotFound)
fmt.Fprintf(w, "no such page: %s\n", req.URL)
}
}
func
|
() {
db := database{"shoes": 50, "socks": 5}
http.ListenAndServe("localhost:8000", db)
}
|
main
|
target_graph.rs
|
// Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::iter::Iterator;
use std::str::FromStr;
use crate::hab_core::package::PackageTarget;
use crate::package_graph::PackageGraph;
use crate::protocol::originsrv;
pub struct TargetGraphStats {
pub target: PackageTarget,
pub node_count: usize,
pub edge_count: usize,
}
#[derive(Default)]
pub struct TargetGraph {
graphs: HashMap<PackageTarget, PackageGraph>,
}
impl TargetGraph {
pub fn new() -> Self
|
pub fn graph(&self, target_str: &str) -> Option<&PackageGraph> {
match PackageTarget::from_str(target_str) {
Ok(target) => self.graphs.get(&target),
Err(err) => {
error!(
"Invalid target specified for TargetGraph: {}! Err: {}",
target_str, err
);
None
}
}
}
pub fn graph_mut(&mut self, target_str: &str) -> Option<&mut PackageGraph> {
match PackageTarget::from_str(target_str) {
Ok(target) => self.graphs.get_mut(&target),
Err(err) => {
error!(
"Invalid target specified for TargetGraph: {}! Err: {}",
target_str, err
);
None
}
}
}
pub fn build<T>(&mut self, packages: T) -> Vec<TargetGraphStats>
where
T: Iterator<Item = originsrv::OriginPackage>,
{
for p in packages {
if let Some(ref mut graph) = self.graph_mut(p.get_target()) {
graph.extend(&p);
}
}
let mut target_stats = Vec::new();
for (target, graph) in self.graphs.iter() {
let stats = graph.stats();
let ts = TargetGraphStats {
target: *target,
node_count: stats.node_count,
edge_count: stats.edge_count,
};
target_stats.push(ts);
}
target_stats
}
}
|
{
let mut graphs = HashMap::new();
// We only support the following targets currently
for target_str in &["x86_64-linux", "x86_64-linux-kernel2", "x86_64-windows"] {
graphs.insert(
PackageTarget::from_str(target_str).unwrap(),
PackageGraph::new(),
);
}
TargetGraph { graphs }
}
|
event.rs
|
use criterion::{criterion_group, Criterion};
use serde_json::{json, Value};
use vector::{
event::{self, Event, LogEvent},
transforms::{
json_parser::{JsonParser, JsonParserConfig},
Transform,
},
};
fn benchmark_event(c: &mut Criterion) {
c.bench_function("unflatten baseline", |b| {
b.iter_with_setup(
|| {
let mut e = Event::new_empty_log().into_log();
e.insert("key1", "value1");
e.insert("key2", "value2");
e.insert("key3", "value3");
e
},
|e| e.unflatten(),
)
});
c.bench_function("unflatten single-level", |b| {
b.iter_with_setup(
|| {
create_event(json!({
"key1": "value1",
"key2": "value2",
"key3": "value3"
}))
},
|e| e.unflatten(),
)
});
c.bench_function("unflatten nested-keys", |b| {
b.iter_with_setup(
|| {
create_event(json!({
"key1": {
"nested1": {
"nested2": "value1",
"nested3": "value4"
}
},
"key3": "value3"
}))
},
|e| e.unflatten(),
)
});
c.bench_function("unflatten array", |b| {
b.iter_with_setup(
|| {
create_event(json!({
"key1": {
"nested1": [
"value1",
"value2"
]
},
}))
},
|e| e.unflatten(),
)
});
}
fn create_event(json: Value) -> LogEvent
|
criterion_group!(event, benchmark_event);
|
{
let s = serde_json::to_string(&json).unwrap();
let mut event = Event::new_empty_log();
event
.as_mut_log()
.insert(event::log_schema().message_key().clone(), s);
let mut parser = JsonParser::from(JsonParserConfig::default());
parser.transform(event).unwrap().into_log()
}
|
test_java.py
|
import pytest
from thedarn.rules.java import match, get_new_command
from thedarn.types import Command
@pytest.mark.parametrize('command', [
Command('java foo.java', ''),
Command('java bar.java', '')])
|
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command, new_command', [
(Command('java foo.java', ''), 'java foo'),
(Command('java bar.java', ''), 'java bar')])
def test_get_new_command(command, new_command):
assert get_new_command(command) == new_command
| |
localfile.rs
|
// Copyright 2019 Alibaba Cloud. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
use std::fs::File;
use std::io::{self, Read, Seek, SeekFrom, Write};
use std::mem::ManuallyDrop;
use std::os::linux::fs::MetadataExt;
use std::os::unix::io::{AsRawFd, RawFd};
use log::{info, warn};
use virtio_bindings::bindings::virtio_blk::{VIRTIO_BLK_S_IOERR, VIRTIO_BLK_S_OK};
use super::{IoDataDesc, IoEngine, Ufile};
pub struct LocalFile<E> {
pub(crate) file: ManuallyDrop<File>,
no_drop: bool,
capacity: u64,
io_engine: E,
}
impl<E> LocalFile<E> {
/// Creates a LocalFile instance.
pub fn new(mut file: File, no_drop: bool, io_engine: E) -> io::Result<Self> {
let capacity = file.seek(SeekFrom::End(0))?;
Ok(Self {
file: ManuallyDrop::new(file),
no_drop,
capacity,
io_engine,
})
}
}
// Implement our own Drop for LocalFile, as we don't want to close LocalFile.file if no_drop is
// enabled.
impl<E> Drop for LocalFile<E> {
fn drop(&mut self) {
if self.no_drop {
info!("LocalFile: no_drop is enabled, don't close file on drop");
} else {
// Close the raw fd directly.
let fd = self.file.as_raw_fd();
if let Err(e) = nix::unistd::close(fd) {
warn!("LocalFile: failed to close disk file: {:?}", e);
}
}
}
}
impl<E> Read for LocalFile<E> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.file.read(buf)
}
}
impl<E> Write for LocalFile<E> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.file.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.file.flush()
}
}
impl<E> Seek for LocalFile<E> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.file.seek(pos)
}
}
impl<E: IoEngine + Send> Ufile for LocalFile<E> {
fn
|
(&self) -> u64 {
self.capacity
}
fn get_max_size(&self) -> u32 {
// Set max size to 1M to avoid interferes with rate limiter.
0x100000
}
fn get_device_id(&self) -> io::Result<String> {
let blk_metadata = self.file.metadata()?;
// This is how kvmtool does it.
Ok(format!(
"{}{}{}",
blk_metadata.st_dev(),
blk_metadata.st_rdev(),
blk_metadata.st_ino()
))
}
fn get_data_evt_fd(&self) -> RawFd {
self.io_engine.event_fd().as_raw_fd()
}
fn io_read_submit(
&mut self,
offset: i64,
iovecs: &mut Vec<IoDataDesc>,
user_data: u16,
) -> io::Result<usize> {
self.io_engine.readv(offset, iovecs, user_data as u64)
}
fn io_write_submit(
&mut self,
offset: i64,
iovecs: &mut Vec<IoDataDesc>,
user_data: u16,
) -> io::Result<usize> {
self.io_engine.writev(offset, iovecs, user_data as u64)
}
fn io_complete(&mut self) -> io::Result<Vec<(u16, u32)>> {
Ok(self
.io_engine
.complete()?
.iter()
.map(|(user_data, res)| {
(
*user_data as u16,
if *res >= 0 {
VIRTIO_BLK_S_OK
} else {
VIRTIO_BLK_S_IOERR
},
)
})
.collect())
}
}
|
get_capacity
|
response.rs
|
use serde::Deserialize;
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Response<T> {
pub message: String,
pub status: Option<String>,
pub data: Option<T>,
pub error: Option<String>,
pub status_code: u32,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RateGetResponse {
pub message: String,
pub status: Option<String>,
pub rate: f64,
pub error: Option<String>,
pub status_code: u32,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct PaymentSendData {
pub reference: String,
pub business_name: String,
pub business_email: String,
pub business_logo: String,
pub customer_name: String,
pub customer_email: String,
pub address: String,
pub crypto_amount: f64,
pub fiat_amount: f64,
pub fee_in_crypto: f64,
pub currency: String,
pub coin: String,
pub accept_partial_payment: bool,
pub network: String,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct PaymentConfirmData {
pub id: String,
pub reference: String,
pub sender_address: Option<String>,
pub recipient_address: String,
pub actual_amount: f64,
pub amount_paid: Option<String>,
pub amount_paid_fiat: Option<String>,
pub fiat_amount: f64,
pub amount_recieved: Option<String>,
pub amount_recieved_fiat: Option<String>,
|
pub hash: Option<String>,
pub block_number: Option<String>,
#[serde(rename = "type")]
pub kind: Option<String>,
pub accept_partial_payment: bool,
pub status: String,
pub network: String,
pub blockchain: String,
pub fee_in_crypto: f64,
pub customer: PaymentConfirmCustomerData,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct PaymentConfirmCustomerData {
pub customer_name: String,
pub customer_email: String,
pub customer_phone: Option<String>,
pub id: String,
pub network: String,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct TransferSendData {
pub id: String,
pub transaction_hash: String,
pub wallet_address: String,
pub amount: f64,
pub coin: String,
pub created_at: String,
pub updated_at: String,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct AcceptedCoinData {
pub id: String,
pub name: String,
pub symbol: String,
pub logo: String,
pub address: String,
pub network: String,
pub blockchain: String,
pub status: String,
pub created_at: String,
pub updated_at: String,
}
|
pub coin: String,
pub currency: String,
|
Synopsis.go
|
package animediff
// Synopsis describes differing synopsis.
type Synopsis struct {
SynopsisA string
SynopsisB string
NumericHash uint64
}
// TypeName returns the diff type.
func (diff *Synopsis) TypeName() string {
return "Synopsis"
}
// Explanation returns the description.
func (diff *Synopsis) Explanation() string {
return "Synopsis is shorter"
}
// DetailsA shows the details for the first anime.
func (diff *Synopsis) DetailsA() string {
return diff.SynopsisA
}
// DetailsB shows the details for the second anime.
func (diff *Synopsis) DetailsB() string {
return diff.SynopsisB
}
// Hash returns the hash for the suggested value (from anime B).
func (diff *Synopsis) Hash() uint64 {
|
return diff.NumericHash
}
|
|
TeleportMesh.js
|
import { Clock, Mesh, Vector2, LatheBufferGeometry, Color, ShaderMaterial } from './three/three.module.js';
class TeleportMesh extends Mesh{
constructor(){
super();
this.clock = new Clock();
let points = [];
const baseRadius = 0.25;
const baseHeight = 0.1;
const height = 2;
const poleRadius = 0.05;
const topRadius = 0.1
const baseSegments = 8;
const topSegments = 16;
let theta = Math.PI/baseSegments;
let pt;
let offset;
let yOffset;
points.push( new Vector2(0, 0) );
for(let i=0; i<baseSegments; i++){
offset = (i<baseSegments/2) ? Math.PI * 1.5 : -Math.PI/2;
pt = new Vector2( Math.cos( theta * i + offset ) * baseHeight/2 + baseRadius, Math.sin( theta * i + offset ) * baseHeight/2 + baseHeight/2);
points.push(pt);
}
offset = Math.PI * 1.0;
for(let i=baseSegments/2; i>=0; i--){
pt = new Vector2( Math.cos( theta * i + offset ) * baseHeight/2 + poleRadius + baseHeight/2, Math.sin( theta * i + offset ) * baseHeight/2 + baseHeight + baseHeight/2);
points.push(pt);
}
|
for(let i=0; i<topSegments; i++){
offset = (i<topSegments/2) ? Math.PI * 1.5 : -Math.PI/2;
pt = new Vector2( Math.cos( theta * i + offset ) * topRadius + poleRadius, Math.sin( theta * i + offset ) * topRadius + height - topRadius);
points.push(pt);
}
points.push( new Vector2(0, height) );
//points.forEach( pt => console.log( `${pt.x.toFixed(3)}, ${pt.y.toFixed(3)}`));
this.geometry = new LatheBufferGeometry( points, 16 );
this.material = new ShaderMaterial( {
uniforms: {
uDisplayHeight: { value: 0 },
uColor: { value: new Color( 0xFFDD00 )},
uSelected: { value: false },
uActive: { value: false }
},
vertexShader:`
varying vec3 vPosition;
varying vec3 vWorldNormal;
varying mat4 vModelMatrix;
void main(){
vPosition = position;
vWorldNormal = normalize((modelMatrix * vec4(normal, 0.0)).xyz);
vModelMatrix = modelMatrix;
gl_Position = projectionMatrix * modelViewMatrix * vec4( vPosition, 1.0 );
}`,
fragmentShader:`
varying vec3 vPosition;
varying vec3 vWorldNormal;
varying mat4 vModelMatrix;
uniform float uDisplayHeight;
uniform vec3 uColor;
uniform bool uSelected;
uniform bool uActive;
void main(){
vec3 worldPosition = ( vModelMatrix * vec4( vPosition, 1.0 )).xyz;
vec3 viewVector = normalize(cameraPosition - worldPosition);
float glow = max(0.0, 1.0 - clamp(dot(vWorldNormal, viewVector), 0.0, 1.0));
float alpha = (1.0 - smoothstep( uDisplayHeight - 0.2, uDisplayHeight, vPosition.y )) * glow;
if (uSelected && uActive){
alpha = clamp(alpha + 0.5, 0.0, 1.0);
}
gl_FragColor = vec4( uColor, alpha );
}`,
transparent: true
} );
points = [];
points.push( new Vector2( baseRadius, 0 ));
points.push( new Vector2( baseRadius, height ));
const geometry = new LatheBufferGeometry( points );
const material = new ShaderMaterial({
visible: false
});
const mesh = new Mesh( geometry, material );
this.add( mesh );
this.height = height;
}
fadeIn( time ){
this.visible = true;
this.startTime = this.clock.getElapsedTime();
this.duration = time;
this.material.uniforms.uDisplayHeight.value = 0;
this.material.uniforms.uActive.value = false;
this.state = 'fadeIn';
}
fadeOut( time ){
this.startTime = this.clock.getElapsedTime();
this.duration = time;
this.material.uniforms.uActive.value = false;
this.state = 'fadeOut';
}
set selected( value ){
this.material.uniforms.uSelected.value = value;
}
get selected(){
return this.material.uniforms.uSelected.value;
}
update(){
let elapsedTime = this.clock.getElapsedTime() - this.startTime;
let delta;
switch(this.state){
case 'fadeIn':
delta = elapsedTime/this.duration;
if (delta>1.0){
delta = 1.0;
this.state = 'active';
this.material.uniforms.uActive.value = true;
}
this.selected = false;
this.material.uniforms.uDisplayHeight.value = (this.height + 0.2) * delta;
break;
case 'fadeOut':
delta = elapsedTime/this.duration;
if (delta>1.0){
delta = 1.0;
this.state = 'inactive';
}
this.selected = false;
this.material.uniforms.uDisplayHeight.value = (this.height + 0.2) * (1.0 - delta);
break;
default:
break;
}
//console.log( this.material.uniforms.uDisplayHeight.value.toFixed(2) );
}
}
export { TeleportMesh };
|
theta = Math.PI/topSegments;
|
noxGateway.py
|
import sys
import prctl # Used to set thread name (visible in htop)
import zmq
from time import sleep
from threading import Thread, Event, current_thread
from datetime import datetime
from flask import current_app as app
from . import zmq_socket_config
context = zmq.Context()
""" Configure logger """
import logging
logger = logging.getLogger('alarm.thread')
class ThreadNoxAlarmGateway(Thread):
""" Thread used as a "gateway" between the Flask app and the Alarm process.
Forwards Alarm status from Alarm Process to Flask app
Forwards commands (start/stop alarm) from Flask app to Alarm Process
Use zmq PUB/SUB pattern to communicate with Alarm process.
Use socketio instance (parameter given at init) to communicate with Flask app.
Thread started when a first client connects to the web socket.
Any new client will use the existing thread.
Why using a thread:
- Need a while loop to receive status continuously from Alarm Process
- Only one thread needed whatever how many web clients.
- Commands could be received directly from web server socketio handlers but
it is cleaner to centralize all inter-process comminication here, commands and status
(moreover, this thread is initialized with an instance of flask socketio allowing
to communicate easily with the web app).
"""
def __init__(self, socketio):
|
def run(self):
""" Start the Gateway thread and run infinite loop
Forwards Alarm status from Alarm Process to Flask app
Forwards commands (start/stop alarm) from Flask app to Alarm Process
"""
prctl.set_name("NoxGateway") # set thread name visible in htop
logger.info('Init thread (delay %ss) %s' %(self.cycle_delay, str(current_thread().ident)))
while (True):
self.forward_command_from_web_to_alarm()
self.forward_status_from_alarm_to_web()
self.forward_request_status_from_web_to_alarm()
sleep(self.cycle_delay)
def forward_status_from_alarm_to_web(self):
""" Forward to web app the status received from Alarm Process.
Receive status using zmq SUB socket.
Forward to web client using socketio instance.
"""
try:
payload = self.SUB_STATE.recv_string(flags=zmq.NOBLOCK)
topic, message = payload.split()
if (topic == zmq_socket_config.TOPIC_STATE):
logger.debug('Noxalarm gateway forwading state %s' %(message))
self.socketio.emit('noxalarmstate', {'state': message}, namespace='/noxalarm')
elif (topic == zmq_socket_config.TOPIC_EVENT):
logger.debug('Noxalarm gateway forwading state %s' %(message))
date = datetime.now().strftime("%d/%m %H:%M")
self.socketio.emit('noxalarmevent', {'alarm_event': message, 'scope': 'nox', 'date': date, 'user': '-'},
namespace='/noxalarm')
# No command received, do nothing
except zmq.error.Again:
pass
def forward_command_from_web_to_alarm(self):
""" Forward to Alarm Process the commands received from web app.
If a command is triggered from web app, a flag is set.
If flag is set, this function forward the command to Alarm Process, then reset flag to None.
Command forwarded using zmq PUB socket.
The Alarm process will call its private methods to start/stop alarm (set Unipi IO)
"""
if self.command_alarm is not None:
if self.command_alarm is True:
self.command_alarm = None
self.PUB_COMMAND.send_string(zmq_socket_config.TOPIC_REQUEST + " " + zmq_socket_config.COMMAND_START)
logger.debug('Noxalarm gateway forwad command Start')
if self.command_alarm is False:
self.command_alarm = None
self.PUB_COMMAND.send_string(zmq_socket_config.TOPIC_REQUEST + " " + zmq_socket_config.COMMAND_STOP)
logger.debug('Noxalarm gateway forwad command Stop')
def forward_request_status_from_web_to_alarm(self):
""" Forward to Alarm Process a request to update the display.
If a new web client connects, a flag is set.
If flag is set, this function forward the "status update" request to Alarm Process,
then reset flag to None.
The request is forwarded using zmq PUB socket.
The Alarm process will call its private methods to send the status
"""
# if self.event_request_status.is_set():
# self.event_request_status.clear()
if self.event_request_status is True:
self.event_request_status = None
self.PUB_COMMAND.send_string(zmq_socket_config.TOPIC_REQUEST + " " + zmq_socket_config.STATUS_UPDATE)
logger.debug('Noxalarm gateway forward request status update')
|
self.socketio = socketio # Instance of socketio so that the thread interacts with web flask websoket
self.cycle_delay = 1 # cycle delay for execution of the thread while loop
self.command_alarm = None # Flag to receive commands from websocket to thread (to alarm machine)
# Flag to receive "status update" request from web app to thread (to alarm machine)
# self.event_request_status = Event()
self.event_request_status = None
# Create a zmq PUB server to send message to the Alarm Process zmq client
# using socket PUB_COMMAND to send commands start/stop to the Alarm Process
self.PUB_COMMAND = context.socket(zmq.PUB)
self.PUB_COMMAND.bind("tcp://*:%s" % zmq_socket_config.port_socket_noxalarm_command)
# Connect a zmq SUB client connected to the Alarm Process zmq server
# using the Socket SUB_STATE to receive status/event from Alarm Process
self.SUB_STATE = context.socket(zmq.SUB)
self.SUB_STATE.connect ("tcp://localhost:%s" % zmq_socket_config.port_socket_noxalarm_state)
self.SUB_STATE.setsockopt_string(zmq.SUBSCRIBE, zmq_socket_config.TOPIC_EVENT)
self.SUB_STATE.setsockopt_string(zmq.SUBSCRIBE, zmq_socket_config.TOPIC_STATE)
# Call the super class __init__ method (the suêr class is Thread)
super(ThreadNoxAlarmGateway, self).__init__()
|
Gholami.py
|
# *** References ***
# Gholami & Mohammadi, A Novel Combination of Bees and Firefly Algorithm to Optimize Continuous Problems
# Türker Tuncer, LDW-SCSA: Logistic Dynamic Weight based Sine Cosine Search Algorithm for Numerical Functions Optimization
# https://arxiv.org/ftp/arxiv/papers/1809/1809.03055.pdf
# Hartmut Pohlheim, Examples of Objective Functions
# http://www.geatbx.com/download/GEATbx_ObjFunExpl_v38.pdf
# Wikipedia, Test functions for optimization
# https://en.wikipedia.org/wiki/Test_functions_for_optimization
import numpy as np
from .Base_Coster import Base_Coster
class F1(Base_Coster):
"""
Function F1 from Gholami & Mohammadi FA-BA Hybrid paper
De Jong / Sphere (ND) cost function; optimum @ (0,...
"""
@staticmethod
def extents(Ndim):
return -5.12 * np.ones(Ndim), 5.12 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = sum(np.power(self.XA, 2))
def meso(self):
None
class F2(Base_Coster):
"""
Function F2 from Gholami & Mohammadi FA-BA Hybrid paper
Schwefel 2.22 (ND) cost function; optimum @ (0,...
"""
@staticmethod
def extents(Ndim):
return -10 * np.ones(Ndim), 10 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = sum(np.abs(self.XA)) + np.prod(np.abs(self.XA))
def meso(self):
None
class F3(Base_Coster):
"""
Function F3 from Gholami & Mohammadi FA-BA Hybrid paper
Schwefel 1.2 - Rotated hyper-ellipsoid (ND) cost function; optimum @ (0,...
"""
@staticmethod
def extents(Ndim):
return -65.536 * np.ones(Ndim), 65.536 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = 0
for i in range(0, len(self.XA)):
self.cost = self.cost + (sum(self.XA[0:(i+1)]))**2
def meso(self):
None
class F4(Base_Coster):
"""
Function F4 from Gholami & Mohammadi FA-BA Hybrid paper
Schwefel 2.21 (ND) cost function; optimum @ (0,...
"""
@staticmethod
def extents(Ndim):
return -100 * np.ones(Ndim), 100 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = max(np.abs(self.XA))
def meso(self):
None
class F5(Base_Coster):
"""
Function F5 from Gholami & Mohammadi FA-BA Hybrid paper
Rosenbrock (ND) cost function; optimum @ (0,...
"""
@staticmethod
def extents(Ndim):
return -2.048 * np.ones(Ndim), 2.048 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = sum(100 * np.power(self.XA[1:len(self.XA)] - np.power(self.XA[0:(len(self.XA)-1)], 2), 2) + np.power(1 - self.XA[0:(len(self.XA)-1)], 2))
def meso(self):
None
class F6(Base_Coster):
"""
Function F6 from Gholami & Mohammadi FA-BA Hybrid paper
Step (ND) cost function; optimum @ (-0.5,...
"""
@staticmethod
def extents(Ndim):
return -100 * np.ones(Ndim), 100 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = sum(np.floor(np.power(self.XA + 0.5, 2)))
def meso(self):
None
class F7(Base_Coster):
"""
Function F7 from Gholami & Mohammadi FA-BA Hybrid paper
Noise (ND) cost function; optimum @ (0,...
"""
@staticmethod
def extents(Ndim):
return -1.28 * np.ones(Ndim), 1.28 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = sum(np.power(self.XA, 4) * np.asarray(range(1, 1 + len(self.XA)))) + np.random.rand(1)
def meso(self):
None
class F8(Base_Coster):
"""
Function F8 from Gholami & Mohammadi FA-BA Hybrid paper
Schwefel (ND) cost function
"""
@staticmethod
def extents(Ndim):
return -500 * np.ones(Ndim), 500 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = -sum(self.XA * np.sin(np.sqrt(abs(self.XA))))
def meso(self):
None
class F9(Base_Coster):
"""
Function F9 from Gholami & Mohammadi FA-BA Hybrid paper
Rastrigin (ND) cost function; optimum @ (0,...
"""
@staticmethod
def extents(Ndim):
return -5.12 * np.ones(Ndim), 5.12 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
def evaluate_cost(self):
self.cost = sum(np.power(self.XA, 2) - 10 * np.cos(2 * np.pi * self.XA) + 10)
def meso(self):
None
class F10(Base_Coster):
"""
Function F10 from Gholami & Mohammadi FA-BA Hybrid paper
Ackley (ND) cost function; optimum @ (0,...
"""
@staticmethod
def extents(Ndim):
return -32.768 * np.ones(Ndim), 32.768 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
@staticmethod
def rms(X):
return np.sqrt(X.dot(X) / len(X))
def evaluate_cost(self):
self.cost = np.exp(1) + 20 * (1 - np.exp(-F10.rms(self.XA) / 5)) - np.exp(sum(np.cos(2 * np.pi * self.XA)) / len(self.XA))
def meso(self):
None
class F11(Base_Coster):
"""
Function F11 from Gholami & Mohammadi FA-BA Hybrid paper
Griewangk (ND) cost function; optimum @ (0,...
"""
@staticmethod
def extents(Ndim):
return -600 * np.ones(Ndim), 600 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
r
|
def evaluate_cost(self):
self.cost = sum(np.power(self.XA, 2)) / 4000 - np.prod(np.cos(np.power(self.XA, 2) / np.power(range(1, 1+len(self.XA)), 0.5))) + 1
def meso(self):
None
class F12(Base_Coster):
"""
Function F12 from Gholami & Mohammadi FA-BA Hybrid paper
Generalised Penalised 1 (ND) cost function; optimum @ (0,...
"""
@staticmethod
def extents(Ndim):
return -50 * np.ones(Ndim), 50 * np.ones(Ndim)
def __init__(self, base_optimiser):
Base_Coster.__init__(self, base_optimiser)
def map_to_solution_space(self, X):
return X
@staticmethod
def u(xi, a, k, m):
if xi > a:
v = k * (xi - a)**m
elif xi < -a:
v = k * (-xi - a)**m
else:
v = 0
return v
def evaluate_cost(self):
y = 1 + (self.XA + 1) / 4
c = 0
for i in range(0, len(self.XA)):
c = c + F12.u(self.XA[i], 10, 100, 4)
self.cost = sum(np.power(y[0:(len(self.XA)-1)] - 1, 2) * (1 + 10 * np.power(np.sin(np.pi * y[1:len(self.XA)]), 2)))
self.cost = (self.cost + 10 * np.sin(np.pi * y[0]) + (y[len(self.XA)-1] - 1)**2) * np.pi / len(self.XA) + c
def meso(self):
None
def Gholami_TestFunction_Extents(number, Ndim=30):
minima = None
maxima = None
if number == 1:
minima, maxima = F1.extents(Ndim)
if number == 2:
minima, maxima = F2.extents(Ndim)
if number == 3:
minima, maxima = F3.extents(Ndim)
if number == 4:
minima, maxima = F4.extents(Ndim)
if number == 5:
minima, maxima = F5.extents(Ndim)
if number == 6:
minima, maxima = F6.extents(Ndim)
if number == 7:
minima, maxima = F7.extents(Ndim)
if number == 8:
minima, maxima = F8.extents(Ndim)
if number == 9:
minima, maxima = F9.extents(Ndim)
if number == 10:
minima, maxima = F10.extents(Ndim)
if number == 11:
minima, maxima = F11.extents(Ndim)
if number == 12:
minima, maxima = F12.extents(Ndim)
return minima, maxima
def Gholami_TestFunction_Coster(number, base_optimiser):
coster = None
if number == 1:
coster = F1(base_optimiser)
if number == 2:
coster = F2(base_optimiser)
if number == 3:
coster = F3(base_optimiser)
if number == 4:
coster = F4(base_optimiser)
if number == 5:
coster = F5(base_optimiser)
if number == 6:
coster = F6(base_optimiser)
if number == 7:
coster = F7(base_optimiser)
if number == 8:
coster = F8(base_optimiser)
if number == 9:
coster = F9(base_optimiser)
if number == 10:
coster = F10(base_optimiser)
if number == 11:
coster = F11(base_optimiser)
if number == 12:
coster = F12(base_optimiser)
return coster
|
eturn X
|
TaobaoItemSkuGetRequest.go
|
package product
import (
"net/url"
"github.com/bububa/opentaobao/model"
)
/*
获取SKU APIRequest
taobao.item.sku.get
获取sku_id所对应的sku数据
sku_id对应的sku要属于传入的nick对应的卖家
<br/><strong><a href="https://console.open.taobao.com/dingWeb.htm?from=itemapi" target="_blank">点击查看更多商品API说明</a></strong>
*/
type TaobaoItemSkuGetRequest struct {
model.Params
// 需返回的字段列表。可选值:Sku结构体中的所有字段;字段之间用“,”分隔。
fields string
// Sku的id。可以通过taobao.item.seller.get得到
skuId int64
// 商品的数字IID(num_iid和nick必传一个,推荐用num_iid),传商品的数字id返回的结果里包含cspu(SKu上的产品规格信息)。
numIid int64
}
func NewTaobaoItemSkuGetRequest() *TaobaoItemSkuGetRequest{
return &TaobaoItemSkuGetRequest{
Params: model.NewParams(),
}
}
func (r TaobaoItemSkuGetRequest) GetApiMethodName() string {
return "taobao.item.sku.get"
}
f
|
uest) GetApiParams() url.Values {
params := url.Values{}
for k, v := range r.GetRawParams() {
params.Set(k, v.String())
}
return params
}
func (r *TaobaoItemSkuGetRequest) SetFields(fields string) error {
r.fields = fields
r.Set("fields", fields)
return nil
}
func (r TaobaoItemSkuGetRequest) GetFields() string {
return r.fields
}
func (r *TaobaoItemSkuGetRequest) SetSkuId(skuId int64) error {
r.skuId = skuId
r.Set("sku_id", skuId)
return nil
}
func (r TaobaoItemSkuGetRequest) GetSkuId() int64 {
return r.skuId
}
func (r *TaobaoItemSkuGetRequest) SetNumIid(numIid int64) error {
r.numIid = numIid
r.Set("num_iid", numIid)
return nil
}
func (r TaobaoItemSkuGetRequest) GetNumIid() int64 {
return r.numIid
}
|
unc (r TaobaoItemSkuGetReq
|
github.go
|
package util
import (
"fmt"
"io"
"net/http"
"os"
"github.com/Optum/dce-cli/configs"
"github.com/Optum/dce-cli/internal/constants"
observ "github.com/Optum/dce-cli/internal/observation"
)
type GithubUtil struct {
Config *configs.Root
Observation *observ.ObservationContainer
}
func (u *GithubUtil) DownloadGithubReleaseAsset(assetName string, releaseName string) error {
// There is an open issue on being able to get different versions. That
// would go here...
assetDownloadURL := fmt.Sprintf(constants.GithubAssetDownloadURLFormat, releaseName, assetName)
req, err := http.NewRequest("GET", assetDownloadURL, nil)
if err != nil {
log.Fatalf("error: %v", err)
}
|
}
defer resp.Body.Close()
out, err := os.Create(assetName)
if err != nil {
return err
}
// #nosec
defer out.Close()
_, err = io.Copy(out, resp.Body)
if err != nil {
return err
}
return nil
}
|
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
|
orchestration.go
|
package mcis
import (
"encoding/json"
"fmt"
"strconv"
"strings"
"github.com/cloud-barista/poc-cicd-tumblebug/src/core/common"
)
// Status for mcis automation
const (
// AutoStatusReady is const for "Ready" status.
AutoStatusReady string = "Ready"
// AutoStatusChecking is const for "Checking" status.
AutoStatusChecking string = "Checking"
// AutoStatusDetected is const for "Detected" status.
AutoStatusDetected string = "Detected"
// AutoStatusOperating is const for "Operating" status.
AutoStatusOperating string = "Operating"
// AutoStatusStabilizing is const for "Stabilizing" status.
AutoStatusStabilizing string = "Stabilizing"
// AutoStatusTimeout is const for "Timeout" status.
AutoStatusTimeout string = "Timeout"
// AutoStatusError is const for "Failed" status.
AutoStatusError string = "Failed"
// AutoStatusSuspended is const for "Suspended" status.
AutoStatusSuspended string = "Suspended"
)
// Action for mcis automation
const (
// AutoActionScaleOut is const for "ScaleOut" action.
AutoActionScaleOut string = "ScaleOut"
// AutoActionScaleIn is const for "ScaleIn" action.
AutoActionScaleIn string = "ScaleIn"
)
// AutoCondition is struct for MCIS auto-control condition.
type AutoCondition struct {
Metric string `json:"metric"`
Operator string `json:"operator"` // <, <=, >, >=, ...
Operand string `json:"operand"` // 10, 70, 80, 98, ...
EvaluationPeriod string `json:"evaluationPeriod"` // evaluationPeriod
EvaluationValue []string `json:"evaluationValue"`
//InitTime string `json:"initTime"` // to check start of duration
//Duration string `json:"duration"` // duration for checking
}
// AutoAction is struct for MCIS auto-control action.
type AutoAction struct {
ActionType string `json:"actionType"`
Vm TbVmInfo `json:"vm"`
PostCommand McisCmdReq `json:"postCommand"`
PlacementAlgo string `json:"placementAlgo"`
}
// Policy is struct for MCIS auto-control Policy request that includes AutoCondition, AutoAction, Status.
type Policy struct {
AutoCondition AutoCondition `json:"autoCondition"`
AutoAction AutoAction `json:"autoAction"`
Status string `json:"status"`
}
// McisPolicyInfo is struct for MCIS auto-control Policy object.
type McisPolicyInfo struct {
Name string `json:"Name"` //MCIS Name (for request)
Id string `json:"Id"` //MCIS Id (generated ID by the Name)
Policy []Policy `json:"policy"`
ActionLog string `json:"actionLog"`
Description string `json:"description"`
}
// OrchestrationController is responsible for executing MCIS automation policy.
// OrchestrationController will be periodically involked by a time.NewTicker in main.go.
func OrchestrationController() {
nsList, err := common.ListNsId()
if err != nil {
common.CBLog.Error(err)
err = fmt.Errorf("an error occurred while getting namespaces' list: " + err.Error())
return
}
//fmt.Println("")
for _, nsId := range nsList {
mcisPolicyList := ListMcisPolicyId(nsId)
for _, m := range mcisPolicyList {
fmt.Println("NS[" + nsId + "]" + "McisPolicy[" + m + "]")
}
for _, v := range mcisPolicyList {
key := common.GenMcisPolicyKey(nsId, v, "")
//fmt.Println(key)
keyValue, _ := common.CBStore.Get(key)
if keyValue == nil {
//mapA := map[string]string{"message": "Cannot find " + key}
//return c.JSON(http.StatusOK, &mapA)
fmt.Println("keyValue is nil")
}
//fmt.Println("<" + keyValue.Key + "> \n" + keyValue.Value)
mcisPolicyTmp := McisPolicyInfo{}
json.Unmarshal([]byte(keyValue.Value), &mcisPolicyTmp)
/* FYI
const AutoStatusReady string = "Ready"
const AutoStatusChecking string = "Checking"
const AutoStatusHappened string = "Happened"
const AutoStatusOperating string = "Operating"
const AutoStatusTimeout string = "Timeout"
const AutoStatusError string = "Error"
const AutoStatusSuspend string = "Suspend"
*/
for policyIndex := range mcisPolicyTmp.Policy {
fmt.Println("\n[MCIS-Policy-StateMachine]")
common.PrintJsonPretty(mcisPolicyTmp.Policy[policyIndex])
switch {
case mcisPolicyTmp.Policy[policyIndex].Status == AutoStatusReady:
fmt.Println("- PolicyStatus[" + AutoStatusReady + "],[" + v + "]")
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusChecking
UpdateMcisPolicyInfo(nsId, mcisPolicyTmp)
fmt.Println("[Check MCIS Policy] " + mcisPolicyTmp.Id)
check, _ := CheckMcis(nsId, mcisPolicyTmp.Id)
fmt.Println("[Check existence of MCIS] " + mcisPolicyTmp.Id)
//keyValueMcis, _ := common.CBStore.Get(common.GenMcisKey(nsId, mcisPolicyTmp.Id, ""))
if !check {
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusError
UpdateMcisPolicyInfo(nsId, mcisPolicyTmp)
fmt.Println("[MCIS is not exist] " + mcisPolicyTmp.Id)
break
} else { // need to enhance : loop for each policies and realize metric
//Checking (measuring)
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusChecking
UpdateMcisPolicyInfo(nsId, mcisPolicyTmp)
fmt.Println("- PolicyStatus[" + mcisPolicyTmp.Policy[policyIndex].Status + "],[" + v + "]")
fmt.Println("[MCIS is exist] " + mcisPolicyTmp.Id)
content, err := GetMonitoringData(nsId, mcisPolicyTmp.Id, mcisPolicyTmp.Policy[policyIndex].AutoCondition.Metric)
if err != nil {
common.CBLog.Error(err)
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusError
break
}
//common.PrintJsonPretty(content)
//Statistic
sumMcis := 0.0
for _, monData := range content.McisMonitoring {
//fmt.Println("[monData.Value: ] " + monData.Value)
monDataValue, _ := strconv.ParseFloat(monData.Value, 64)
sumMcis += monDataValue
}
averMcis := (sumMcis / float64(len(content.McisMonitoring)))
fmt.Printf("[monData.Value] AverMcis: %f, SumMcis: %f \n", averMcis, sumMcis)
evaluationPeriod, _ := strconv.Atoi(mcisPolicyTmp.Policy[policyIndex].AutoCondition.EvaluationPeriod)
evaluationValue := mcisPolicyTmp.Policy[policyIndex].AutoCondition.EvaluationValue
evaluationValue = append([]string{fmt.Sprintf("%f", averMcis)}, evaluationValue...) // prepend current aver date
mcisPolicyTmp.Policy[policyIndex].AutoCondition.EvaluationValue = evaluationValue
sum := 0.0
aver := -0.1
// accumerate previous evaluation value
fmt.Printf("[Evaluation History]\n")
for evi, evv := range evaluationValue {
evvFloat, _ := strconv.ParseFloat(evv, 64)
sum += evvFloat
fmt.Printf("[%v] %f ", evi, evvFloat)
// break with outside evaluationValue
if evi >= evaluationPeriod-1 {
break
}
}
// average for evaluationPeriod (if data for the period is not enough, skip)
if evaluationPeriod != 0 && len(evaluationValue) >= evaluationPeriod {
aver = sum / float64(evaluationPeriod)
}
fmt.Printf("\n[Evaluation] Aver: %f, Period: %v \n", aver, evaluationPeriod)
//Detecting
operator := mcisPolicyTmp.Policy[policyIndex].AutoCondition.Operator
operand, _ := strconv.ParseFloat(mcisPolicyTmp.Policy[policyIndex].AutoCondition.Operand, 64)
if evaluationPeriod == 0 {
fmt.Println("[Checking] Not available evaluationPeriod ")
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusError
UpdateMcisPolicyInfo(nsId, mcisPolicyTmp)
break
}
// not enough evaluationPeriod
if aver == -0.1 {
fmt.Println("[Checking] Not enough evaluationPeriod ")
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusReady
UpdateMcisPolicyInfo(nsId, mcisPolicyTmp)
break
}
switch {
case operator == ">=":
if aver >= operand {
fmt.Printf("[Detected] Aver: %f >= Operand: %f \n", aver, operand)
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusDetected
} else {
fmt.Printf("[Not Detected] Aver: %f >= Operand: %f \n", aver, operand)
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusReady
}
case operator == ">":
if aver > operand {
fmt.Printf("[Detected] Aver: %f > Operand: %f \n", aver, operand)
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusDetected
} else {
fmt.Printf("[Not Detected] Aver: %f > Operand: %f \n", aver, operand)
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusReady
}
case operator == "<=":
if aver <= operand {
fmt.Printf("[Detected] Aver: %f <= Operand: %f \n", aver, operand)
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusDetected
} else {
fmt.Printf("[Not Detected] Aver: %f <= Operand: %f \n", aver, operand)
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusReady
}
case operator == "<":
if aver < operand {
fmt.Printf("[Detected] Aver: %f < Operand: %f \n", aver, operand)
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusDetected
} else {
fmt.Printf("[Not Detected] Aver: %f < Operand: %f \n", aver, operand)
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusReady
}
default:
fmt.Println("[Checking] Not available operator " + operator)
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusError
}
}
UpdateMcisPolicyInfo(nsId, mcisPolicyTmp)
fmt.Println("- PolicyStatus[" + mcisPolicyTmp.Policy[policyIndex].Status + "],[" + v + "]")
case mcisPolicyTmp.Policy[policyIndex].Status == AutoStatusChecking:
fmt.Println("- PolicyStatus[" + mcisPolicyTmp.Policy[policyIndex].Status + "],[" + v + "]")
//mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusDetected
case mcisPolicyTmp.Policy[policyIndex].Status == AutoStatusDetected:
fmt.Println("- PolicyStatus[" + mcisPolicyTmp.Policy[policyIndex].Status + "],[" + v + "]")
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusOperating
UpdateMcisPolicyInfo(nsId, mcisPolicyTmp)
fmt.Println("- PolicyStatus[" + mcisPolicyTmp.Policy[policyIndex].Status + "],[" + v + "]")
//Action
/*
// Actions for mcis automation
const AutoActionScaleOut string = "ScaleOut"
const AutoActionScaleIn string = "ScaleIn"
*/
autoAction := mcisPolicyTmp.Policy[policyIndex].AutoAction
fmt.Println("[autoAction] " + autoAction.ActionType)
switch {
case autoAction.ActionType == AutoActionScaleOut:
autoAction.Vm.Label = labelAutoGen
// append UUID to given vm name to avoid duplicated vm ID.
autoAction.Vm.Name = autoAction.Vm.Name + "-" + common.GenUuid()
//vmReqTmp := autoAction.Vm
if autoAction.PlacementAlgo == "random" {
fmt.Println("[autoAction.PlacementAlgo] " + autoAction.PlacementAlgo)
var vmTmpErr error
autoAction.Vm, vmTmpErr = GetVmTemplate(nsId, mcisPolicyTmp.Id, autoAction.PlacementAlgo)
if vmTmpErr != nil {
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusError
UpdateMcisPolicyInfo(nsId, mcisPolicyTmp)
}
autoAction.Vm.Name = autoAction.Vm.Name + "-Random"
autoAction.Vm.Label = labelAutoGen
}
common.PrintJsonPretty(autoAction.Vm)
fmt.Println("[Action] " + autoAction.ActionType)
// ScaleOut MCIS according to the VM requirement.
fmt.Println("[Generating VM]")
result, vmCreateErr := CorePostMcisVm(nsId, mcisPolicyTmp.Id, &autoAction.Vm)
if vmCreateErr != nil {
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusError
UpdateMcisPolicyInfo(nsId, mcisPolicyTmp)
}
common.PrintJsonPretty(*result)
nullMcisCmdReq := McisCmdReq{}
if autoAction.PostCommand != nullMcisCmdReq {
fmt.Println("[Post Command to VM] " + autoAction.PostCommand.Command)
_, cmdErr := CorePostCmdMcisVm(nsId, mcisPolicyTmp.Id, autoAction.Vm.Name, &autoAction.PostCommand)
if cmdErr != nil {
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusError
UpdateMcisPolicyInfo(nsId, mcisPolicyTmp)
}
}
case autoAction.ActionType == AutoActionScaleIn:
fmt.Println("[Action] " + autoAction.ActionType)
// ScaleIn MCIS.
fmt.Println("[Removing VM]")
vmList, vmListErr := GetVmListByLabel(nsId, mcisPolicyTmp.Id, labelAutoGen)
if vmListErr != nil {
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusError
UpdateMcisPolicyInfo(nsId, mcisPolicyTmp)
}
if len(vmList) != 0 {
removeTargetVm := vmList[len(vmList)-1]
fmt.Println("[Removing VM ID] " + removeTargetVm)
delVmErr := DelMcisVm(nsId, mcisPolicyTmp.Id, removeTargetVm, "")
if delVmErr != nil {
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusError
UpdateMcisPolicyInfo(nsId, mcisPolicyTmp)
}
}
default:
}
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusStabilizing
UpdateMcisPolicyInfo(nsId, mcisPolicyTmp)
fmt.Println("- PolicyStatus[" + mcisPolicyTmp.Policy[policyIndex].Status + "],[" + v + "]")
case mcisPolicyTmp.Policy[policyIndex].Status == AutoStatusStabilizing:
fmt.Println("- PolicyStatus[" + mcisPolicyTmp.Policy[policyIndex].Status + "],[" + v + "]")
//initialize Evaluation history so that controller does not act too early.
//with this we can stablize MCIS by init previously measures.
//Will invoke [Checking] Not enough evaluationPeriod
mcisPolicyTmp.Policy[policyIndex].AutoCondition.EvaluationValue = nil
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusReady
UpdateMcisPolicyInfo(nsId, mcisPolicyTmp)
case mcisPolicyTmp.Policy[policyIndex].Status == AutoStatusOperating:
fmt.Println("- PolicyStatus[" + mcisPolicyTmp.Policy[policyIndex].Status + "],[" + v + "]")
//mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusReady
//UpdateMcisPolicyInfo(nsId, mcisPolicyTmp)
case mcisPolicyTmp.Policy[policyIndex].Status == AutoStatusTimeout:
fmt.Println("- PolicyStatus[" + mcisPolicyTmp.Policy[policyIndex].Status + "],[" + v + "]")
case mcisPolicyTmp.Policy[policyIndex].Status == AutoStatusError:
fmt.Println("- PolicyStatus[" + mcisPolicyTmp.Policy[policyIndex].Status + "],[" + v + "]")
mcisPolicyTmp.Policy[policyIndex].Status = AutoStatusReady
UpdateMcisPolicyInfo(nsId, mcisPolicyTmp)
case mcisPolicyTmp.Policy[policyIndex].Status == AutoStatusSuspended:
fmt.Println("- PolicyStatus[" + mcisPolicyTmp.Policy[policyIndex].Status + "],[" + v + "]")
default:
}
}
}
}
}
// UpdateMcisPolicyInfo updates McisPolicyInfo object in DB.
func UpdateMcisPolicyInfo(nsId string, mcisPolicyInfoData McisPolicyInfo) {
key := common.GenMcisPolicyKey(nsId, mcisPolicyInfoData.Id, "")
val, _ := json.Marshal(mcisPolicyInfoData)
err := common.CBStore.Put(string(key), string(val))
if err != nil && !strings.Contains(err.Error(), common.CbStoreKeyNotFoundErrorString) {
common.CBLog.Error(err)
}
//fmt.Println("===========================")
//vmkeyValue, _ := common.CBStore.Get(string(key))
//fmt.Println("<" + vmkeyValue.Key + "> \n" + vmkeyValue.Value)
//fmt.Println("===========================")
}
// CreateMcisPolicy create McisPolicyInfo object in DB according to user's requirements.
func CreateMcisPolicy(nsId string, mcisId string, u *McisPolicyInfo) (McisPolicyInfo, error) {
err := common.CheckString(nsId)
if err != nil {
temp := McisPolicyInfo{}
common.CBLog.Error(err)
return temp, err
}
err = common.CheckString(mcisId)
if err != nil {
temp := McisPolicyInfo{}
common.CBLog.Error(err)
return temp, err
}
check, _ := CheckMcisPolicy(nsId, mcisId)
u.Name = mcisId
u.Id = mcisId
//u.Status = AutoStatusReady
if check {
temp := McisPolicyInfo{}
err := fmt.Errorf("The MCIS Policy Obj " + u.Name + " already exists.")
return temp, err
}
for policyIndex := range u.Policy {
u.Policy[policyIndex].Status = AutoStatusReady
}
content := *u
// cb-store
fmt.Println("=========================== PUT CreateMcisPolicy")
Key := common.GenMcisPolicyKey(nsId, content.Id, "")
Val, _ := json.Marshal(content)
//fmt.Println("Key: ", Key)
//fmt.Println("Val: ", Val)
err = common.CBStore.Put(string(Key), string(Val))
if err != nil {
common.CBLog.Error(err)
return content, err
}
keyValue, _ := common.CBStore.Get(string(Key))
fmt.Println("<KEY>\n" + keyValue.Key + "\n<VAL>\n" + keyValue.Value)
fmt.Println("===========================")
return content, nil
}
// GetMcisPolicyObject returns McisPolicyInfo object.
func GetMcisPolicyObject(nsId string, mcisId string) (McisPolicyInfo, error) {
fmt.Println("[GetMcisPolicyObject]" + mcisId)
err := common.CheckString(nsId)
if err != nil {
temp := McisPolicyInfo{}
common.CBLog.Error(err)
return temp, err
}
err = common.CheckString(mcisId)
if err != nil {
temp := McisPolicyInfo{}
common.CBLog.Error(err)
return temp, err
}
key := common.GenMcisPolicyKey(nsId, mcisId, "")
fmt.Println("Key: ", key)
keyValue, err := common.CBStore.Get(key)
if err != nil {
common.CBLog.Error(err)
return McisPolicyInfo{}, err
}
if keyValue == nil {
return McisPolicyInfo{}, err
}
fmt.Println("<KEY>\n" + keyValue.Key + "\n<VAL>\n" + keyValue.Value)
mcisPolicyTmp := McisPolicyInfo{}
json.Unmarshal([]byte(keyValue.Value), &mcisPolicyTmp)
return mcisPolicyTmp, nil
}
// GetAllMcisPolicyObject returns all McisPolicyInfo objects.
func GetAllMcisPolicyObject(nsId string) ([]McisPolicyInfo, error) {
err := common.CheckString(nsId)
if err != nil {
common.CBLog.Error(err)
return nil, err
}
Mcis := []McisPolicyInfo{}
mcisList := ListMcisPolicyId(nsId)
for _, v := range mcisList {
key := common.GenMcisPolicyKey(nsId, v, "")
keyValue, _ := common.CBStore.Get(key)
if keyValue == nil {
return nil, fmt.Errorf("Cannot find " + key)
}
mcisTmp := McisPolicyInfo{}
json.Unmarshal([]byte(keyValue.Value), &mcisTmp)
Mcis = append(Mcis, mcisTmp)
}
return Mcis, nil
}
// ListMcisPolicyId returns a list of Ids for all McisPolicyInfo objects .
func ListMcisPolicyId(nsId string) []string {
err := common.CheckString(nsId)
if err != nil {
common.CBLog.Error(err)
return nil
}
//fmt.Println("[Get MCIS Policy ID list]")
key := "/ns/" + nsId + "/policy/mcis"
keyValue, _ := common.CBStore.GetList(key, true)
var mcisList []string
for _, v := range keyValue {
if !strings.Contains(v.Key, "vm") {
mcisList = append(mcisList, strings.TrimPrefix(v.Key, "/ns/"+nsId+"/policy/mcis/"))
}
}
return mcisList
}
// DelMcisPolicy deletes McisPolicyInfo object by mcisId.
func DelMcisPolicy(nsId string, mcisId string) error {
err := common.CheckString(nsId)
if err != nil {
common.CBLog.Error(err)
return err
}
err = common.CheckString(mcisId)
if err != nil {
common.CBLog.Error(err)
return err
}
check, _ := CheckMcisPolicy(nsId, mcisId)
if !check {
err := fmt.Errorf("The mcis Policy" + mcisId + " does not exist.")
return err
}
|
key := common.GenMcisPolicyKey(nsId, mcisId, "")
fmt.Println(key)
// delete mcis Policy info
err = common.CBStore.Delete(key)
if err != nil {
common.CBLog.Error(err)
return err
}
return nil
}
// DelAllMcisPolicy deletes all McisPolicyInfo objects.
func DelAllMcisPolicy(nsId string) (string, error) {
err := common.CheckString(nsId)
if err != nil {
common.CBLog.Error(err)
return "", err
}
mcisList := ListMcisPolicyId(nsId)
if len(mcisList) == 0 {
return "No MCIS Policy to delete", nil
}
for _, v := range mcisList {
err := DelMcisPolicy(nsId, v)
if err != nil {
common.CBLog.Error(err)
return "", fmt.Errorf("Failed to delete All MCIS Policies")
}
}
return "All MCIS Policies has been deleted", nil
}
|
fmt.Println("[Delete MCIS Policy] " + mcisId)
|
connectFactory.js
|
/* @flow */
import objectReduce from 'fast-loops/lib/objectReduce'
import objectEach from 'fast-loops/lib/objectEach'
import { combineMultiRules } from 'fela-tools'
import shallowCompare from 'react-addons-shallow-compare'
import generateDisplayName from './generateDisplayName'
import hoistStatics from './hoistStatics'
export type ConnectConfig = {
pure?: boolean,
}
const defaultConfig: ConnectConfig = {
pure: true,
}
export default function connectFactory(
BaseComponent: any,
createElement: Function,
withTheme: Function,
contextTypes?: Object
): Function {
return function connect(
rules: Object | Function,
config: ConnectConfig = {}
): Function {
const connectConfig = {
...defaultConfig,
...config,
}
return (component: any): any => {
class
|
extends BaseComponent {
static displayName = generateDisplayName(component)
static _isFelaComponent = true
shouldComponentUpdate(nextProps, nextState) {
if (connectConfig.pure) {
return shallowCompare(this, nextProps, nextState)
}
return true
}
render() {
const { renderer } = this.context
const { extend, _felaTheme, _felaRules, ...otherProps } = this.props
const allRules = [rules]
if (_felaRules) {
allRules.push(_felaRules)
}
if (extend) {
allRules.push(extend)
}
const combinedRules = combineMultiRules(...allRules)
const preparedRules = combinedRules(
{
...otherProps,
theme: _felaTheme,
},
renderer
)
// improve developer experience with monolithic renderer
if (
process.env.NODE_ENV !== 'production' &&
renderer.prettySelectors
) {
const componentName =
typeof component === 'string'
? component
: component.displayName || component.name || ''
objectEach(preparedRules, (rule, name) => {
const displayName = rule.name ? rule.name : 'FelaComponent'
rule.selectorPrefix = `${displayName}_${componentName}_${name}_`
})
}
if (component._isFelaComponent) {
return createElement(component, {
_felaRules: combinedRules,
...otherProps,
})
}
const styles = objectReduce(
preparedRules,
(styleMap, rule, name) => {
styleMap[name] = renderer.renderRule(rule, {
...otherProps,
theme: _felaTheme,
})
return styleMap
},
{}
)
const boundRules = objectReduce(
preparedRules,
(ruleMap, rule, name) => {
ruleMap[name] = props =>
rule(
{
theme: _felaTheme,
...props,
},
renderer
)
return ruleMap
},
{}
)
return createElement(component, {
...otherProps,
styles,
rules: boundRules,
})
}
}
if (contextTypes) {
EnhancedComponent.contextTypes = contextTypes
}
const themedComponent = withTheme(EnhancedComponent, '_felaTheme')
return hoistStatics(themedComponent, component)
}
}
}
|
EnhancedComponent
|
main.js
|
import Vue from 'vue';
import { PopupManager } from 'element-ui/src/utils/popup';
import { isVNode } from 'element-ui/src/utils/vdom';
let MessageConstructor = Vue.extend(require('./main.vue'));
let instance;
let instances = [];
let seed = 1;
var Message = function(options) {
if (Vue.prototype.$isServer) return;
options = options || {};
if (typeof options === 'string') {
options = {
message: options
};
}
let userOnClose = options.onClose;
let id = 'message_' + seed++;
options.onClose = function() {
Message.close(id, userOnClose);
|
});
instance.id = id;
if (isVNode(instance.message)) {
instance.$slots.default = [instance.message];
instance.message = null;
}
instance.vm = instance.$mount();
document.body.appendChild(instance.vm.$el);
instance.vm.visible = true;
instance.dom = instance.vm.$el;
instance.dom.style.zIndex = PopupManager.nextZIndex();
instances.push(instance);
return instance.vm;
};
['success', 'warning', 'info', 'error'].forEach(type => {
Message[type] = options => {
if (typeof options === 'string') {
options = {
message: options
};
}
options.type = type;
return Message(options);
};
});
Message.close = function(id, userOnClose) {
for (let i = 0, len = instances.length; i < len; i++) {
if (id === instances[i].id) {
if (typeof userOnClose === 'function') {
userOnClose(instances[i]);
}
instances.splice(i, 1);
break;
}
}
};
Message.closeAll = function() {
for (let i = instances.length - 1; i >= 0; i--) {
instances[i].close();
}
};
export default Message;
|
};
instance = new MessageConstructor({
data: options
|
imageconfig.go
|
package operator
import (
"context"
"fmt"
"reflect"
"sort"
"strings"
"time"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
corev1informers "k8s.io/client-go/informers/core/v1"
corev1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog"
configapi "github.com/openshift/api/config/v1"
operatorv1 "github.com/openshift/api/operator/v1"
configset "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1"
routev1informers "github.com/openshift/client-go/route/informers/externalversions/route/v1"
routev1lister "github.com/openshift/client-go/route/listers/route/v1"
"github.com/openshift/library-go/pkg/operator/v1helpers"
"github.com/openshift/cluster-image-registry-operator/pkg/defaults"
"github.com/openshift/cluster-image-registry-operator/pkg/resource"
)
// ImageConfigController controls image.config.openshift.io/cluster.
//
// Watches for changes on image registry routes and services, updating
// the resource status appropriately.
type ImageConfigController struct {
configClient configset.ConfigV1Interface
operatorClient v1helpers.OperatorClient
routeLister routev1lister.RouteNamespaceLister
serviceLister corev1listers.ServiceNamespaceLister
cachesToSync []cache.InformerSynced
queue workqueue.RateLimitingInterface
}
func NewImageConfigController(
configClient configset.ConfigV1Interface,
operatorClient v1helpers.OperatorClient,
routeInformer routev1informers.RouteInformer,
serviceInformer corev1informers.ServiceInformer,
) *ImageConfigController {
icc := &ImageConfigController{
configClient: configClient,
operatorClient: operatorClient,
routeLister: routeInformer.Lister().Routes(defaults.ImageRegistryOperatorNamespace),
serviceLister: serviceInformer.Lister().Services(defaults.ImageRegistryOperatorNamespace),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ImageConfigController"),
}
serviceInformer.Informer().AddEventHandler(icc.eventHandler())
icc.cachesToSync = append(icc.cachesToSync, serviceInformer.Informer().HasSynced)
routeInformer.Informer().AddEventHandler(icc.eventHandler())
icc.cachesToSync = append(icc.cachesToSync, routeInformer.Informer().HasSynced)
return icc
}
func (icc *ImageConfigController) eventHandler() cache.ResourceEventHandler {
return cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { icc.queue.Add("instance") },
UpdateFunc: func(old, new interface{}) { icc.queue.Add("instance") },
|
}
func (icc *ImageConfigController) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer icc.queue.ShutDown()
klog.Infof("Starting ImageConfigController")
if !cache.WaitForCacheSync(stopCh, icc.cachesToSync...) {
return
}
go wait.Until(icc.runWorker, time.Second, stopCh)
klog.Infof("Started ImageConfigController")
<-stopCh
klog.Infof("Shutting down ImageConfigController")
}
func (icc *ImageConfigController) runWorker() {
for icc.processNextWorkItem() {
}
}
func (icc *ImageConfigController) processNextWorkItem() bool {
obj, shutdown := icc.queue.Get()
if shutdown {
return false
}
defer icc.queue.Done(obj)
klog.V(1).Infof("get event from workqueue")
if err := icc.sync(); err != nil {
icc.queue.AddRateLimited(workqueueKey)
klog.Errorf("ImageConfigController: unable to sync: %s, requeuing", err)
} else {
icc.queue.Forget(obj)
klog.Infof("ImageConfigController: event from workqueue processed")
}
return true
}
// sync keeps image.config.openshift.io/cluster status updated.
func (icc *ImageConfigController) syncImageStatus() error {
cfg, err := icc.configClient.Images().Get(context.TODO(), defaults.ImageConfigName, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
return err
}
if errors.IsNotFound(err) {
if cfg, err = icc.configClient.Images().Create(
context.TODO(),
&configapi.Image{
ObjectMeta: metav1.ObjectMeta{
Name: defaults.ImageConfigName,
},
},
metav1.CreateOptions{},
); err != nil {
return err
}
}
externalHostnames, err := icc.getRouteHostnames()
if err != nil {
return err
}
internalHostname, err := icc.getServiceHostname()
if err != nil {
return err
}
modified := false
if !reflect.DeepEqual(externalHostnames, cfg.Status.ExternalRegistryHostnames) {
cfg.Status.ExternalRegistryHostnames = externalHostnames
modified = true
}
if cfg.Status.InternalRegistryHostname != internalHostname {
cfg.Status.InternalRegistryHostname = internalHostname
modified = true
}
if modified {
if _, err := icc.configClient.Images().UpdateStatus(context.TODO(), cfg, metav1.UpdateOptions{}); err != nil {
return err
}
}
return nil
}
func (icc *ImageConfigController) sync() error {
err := icc.syncImageStatus()
if err != nil {
_, _, updateError := v1helpers.UpdateStatus(icc.operatorClient, v1helpers.UpdateConditionFn(operatorv1.OperatorCondition{
Type: "ImageConfigControllerDegraded",
Status: operatorv1.ConditionTrue,
Reason: "Error",
Message: err.Error(),
}))
return utilerrors.NewAggregate([]error{err, updateError})
}
_, _, err = v1helpers.UpdateStatus(icc.operatorClient, v1helpers.UpdateConditionFn(operatorv1.OperatorCondition{
Type: "ImageConfigControllerDegraded",
Status: operatorv1.ConditionFalse,
Reason: "AsExpected",
}))
return err
}
// getServiceHostname returns the image registry internal service url if it
// exists, empty string is returned otherwise.
func (icc *ImageConfigController) getServiceHostname() (string, error) {
svc, err := icc.serviceLister.Get(defaults.ServiceName)
if errors.IsNotFound(err) {
return "", nil
}
if err != nil {
return "", err
}
port := ""
if svc.Spec.Ports[0].Port != 443 {
port = fmt.Sprintf(":%d", svc.Spec.Ports[0].Port)
}
return fmt.Sprintf("%s.%s.svc%s", svc.Name, svc.Namespace, port), nil
}
// getRouteHostnames returns all image registry exposed routes.
func (icc *ImageConfigController) getRouteHostnames() ([]string, error) {
var routeNames []string
routes, err := icc.routeLister.List(labels.Everything())
if err != nil {
return nil, err
}
defaultHost := ""
for _, route := range routes {
if !resource.RouteIsCreatedByOperator(route) {
continue
}
for _, ingress := range route.Status.Ingress {
hostname := ingress.Host
if len(hostname) == 0 {
continue
}
defaultHostPrefix := fmt.Sprintf(
"%s-%s",
defaults.RouteName,
defaults.ImageRegistryOperatorNamespace,
)
if strings.HasPrefix(hostname, defaultHostPrefix) {
defaultHost = hostname
continue
}
routeNames = append(routeNames, hostname)
}
}
// ensure a stable order for these values so we don't cause flapping in the
// downstream controllers that watch this array
sort.Strings(routeNames)
// make sure the default route hostname comes first in the list because the
// first entry will be used as the public repository hostname by the cluster
// configuration
if len(defaultHost) > 0 {
routeNames = append([]string{defaultHost}, routeNames...)
}
return routeNames, nil
}
|
DeleteFunc: func(obj interface{}) { icc.queue.Add("instance") },
}
|
GenerateData.py
|
# This file will generate a synthetic dataset to predict employee attrition
# Like most datasets it will have a feature vector and a Y label for each instance.
# However, unlike most datasets it will also have an Explanation (E) for each instance, encoded as an non-negative integer.
# This is motivated by the TED framework, but can be used by other explainability algorithms as a metric for explainability
# See the AIES'19 paper by Hind et al for more information on the TED framework.
# See the tutorial notebook TED_Cartesian_test for information about how to use this dataset and the TED framework.
# The comments in this code also provide some insight into how this dataset is generated
import random
from random import choices
import pandas as pd
Any = -99 # This is only applicable in the rule
Low = -1 # These 3, Low, Med, High, can be values in the dataset and are used in the rules
Med = -2
High = -3
Yes = -10 # This is the positive Y label
No = -11 # This is the negative Y label
Random = -12 # This signfiies a random choice should be made for the Y label (either Yes or No) ]
# Features, values, and distribution, details below
featureThresholds = [
# 1 Position: 4(5%), 3(20%), 2(30%), 1(45%)
[4, [0.05, 0.20, 0.30, 0.45]],
# 2 Organization "Org": 3(30%); 2(30%); 1(40%)
[3, [0.30, 0.30, 0.40]],
# 3 Potential "Pot": Yes (50%), No (50%)
[2, [0.50, 0.50]],
# 4 Rating value "Rat": High(15%), Med(80%), Low(5%)
[3, [0.15, 0.80, 0.05]],
# 5 Rating Slope "Slope": High (15%), Med(80%), Low(5%)
[3, [0.15, 0.80, 0.05]],
# 6 Salary Competitiveness "Sal": High (10%); Med(70%); Low(20%)
[3, [0.10, 0.70, 0.20]],
# 7 Tenure Low "TenL" & High Values "TenH": [0..360], 30% in 0..24; 30% in 25..60; 40% in 61..360
[3, [0.30, 0.30, 0.40], [[0, 24], [25, 60], [61, 360]]],
# 8 Position Tenure Low "BTenL" & High Values "BTenH": [0..360], 70% in 0..12; 20% in 13..24; 10% in 25..360
# Position tenure needs to be lower than tenure, ensured in generation code below
[3, [0.70, 0.20, 0.10], [[0, 12], [13, 24], [25, 360]]]
]
# Some convenient population lists
HighMedLowPopulation = [High, Med, Low]
YesNoPopulation = [Yes, No]
Index3Population = [0, 1, 2]
Integer4Population = [4, 3, 2, 1]
Integer3Population = [3, 2, 1]
# Rules used to label a feature vector with a label and an explanation
# Format: features, label, explanation #, Explanation String
RetentionRules = [
#POS ORG Pot RAT Slope SALC TENL H BTEN LH
[Any, 1, Any, High, Any, Low, Any, Any, Any, Any, #0
Yes, 2, "Seeking Higher Salary in Org 1"],
[1, 1, Any, Any, Any, Any, Any, Any, 15, Any, #1
Yes, 3, "Promotion Lag, Org 1, Position 1"],
[2, 1, Any, Any, Any, Any, Any, Any, 15, Any, #2
Yes, 3, "Promotion Lag, Org 1, Position 2"],
[3, 1, Any, Any, Any, Any, Any, Any, 15, Any, #3
Yes, 3, "Promotion Lag, Org 1, Position 3"],
[1, 2, Any, Any, Any, Any, Any, Any, 20, Any, #4
Yes, 4, "Promotion Lag, Org 2, Position 1"],
[2, 2, Any, Any, Any, Any, Any, Any, 20, Any, #5
Yes, 4, "Promotion Lag, Org 2, Position 2"],
[3, 2, Any, Any, Any, Any, Any, Any, 30, Any, #6
Yes, 5, "Promotion Lag, Org 2, Position 3"],
[1, 3, Any, Any, Any, Any, Any, Any, 20, Any, #7
Yes, 6, "Promotion Lag, Org 3, Position 1"],
[2, 3, Any, Any, Any, Any, Any, Any, 30, Any, #8
Yes, 7, "Promotion Lag, Org 3, Position 2"],
[3, 3, Any, Any, Any, Any, Any, Any, 30, Any, #9
Yes, 7, "Promotion Lag, Org 3, Position 3"],
[1, 1, Any, Any, Any, Any, 0, 12, Any, Any, #10
Yes, 8, "New employee, Org 1, Position 1"],
[2, 1, Any, Any, Any, Any, 0, 12, Any, Any, #11
Yes, 8, "New employee, Org 1, Position 2"],
[3, 1, Any, Any, Any, Any, 0, 30, Any, Any, #12
Yes, 9, "New employee, Org 1, Position 3"],
[1, 2, Any, Any, Any, Any, 0, 24, Any, Any, #13
Yes, 10, "New employee, Org 2, Position 1"],
[2, 2, Any, Any, Any, Any, 0, 30, Any, Any, #14
Yes, 11, "New employee, Org 2, Position 2"],
[Any, 1, Any, Low, High, Any, Any, Any, Any, Any, #15
Yes, 13, "Disappointing evaluation, Org 1"],
[Any, 2, Any, Low, High, Any, Any, Any, Any, Any, #16
Yes, 14, "Disappointing evaluation, Org 2"],
[Any, Any, Yes, Med, High, Low, Any, Any, Any, Any, #17
Yes, 15, "Compensation doesn't match evaluations, Med rating"],
[Any, Any, Yes, High, High, Low, Any, Any, Any, Any, #18
Yes, 15, "Compensation doesn't match evaluations, High rating"],
[Any, 1, Yes, Med, High, Med, Any, Any, Any, Any, #19
Yes, 16, "Compensation doesn't match evaluations, Org 1, Med rating"],
[Any, 2, Yes, Med, High, Med, Any, Any, Any, Any, #20
Yes, 16, "Compensation doesn't match evaluations, Org 2, Med rating"],
[Any, 1, Yes, High, High, Med, Any, Any, Any, Any, #21
Yes, 16, "Compensation doesn't match evaluations, Org 1, High rating"],
[Any, 2, Yes, High, High, Med, Any, Any, Any, Any, #22
Yes, 16, "Compensation doesn't match evaluations, Org 2, High rating"],
[Any, 1, Any, Any, Med, Med, 120, 180, Any, Any, #23
Yes, 17, "Mid-career crisis, Org 1"],
[Any, 2, Yes, Any, Any, Med, 130, 190, Any, Any, #24
Yes, 18, "Mid-career crisis, Org 2"]
]
def
|
(val):
""" Convert the value passed into a string """
if val == Any :
return "Any"
elif val == Low :
return "Low"
elif val == Med :
return "Med"
elif val == High :
return "High"
elif val == Yes :
return "Yes"
elif val == No :
return "No"
elif val == Random :
return "Random"
else :
return str(val)
def printFeatureStringHeader() :
""" Print the feature headings """
print(" Feature Headings")
print("[Pos, Org, Pot, Rating, Slope, Salary Competitiveness, Tenure, Position Tenure]")
def featuresToString(featureVector) :
""" Convert a feature vector into is string format"""
val = "["
for i in range(0, 2) : # These features are just ints, Position, Organization
val += str(featureVector[i])
val += " "
for i in range(2, 6) : # show encoding for these: Potential, Rating, Rating Slope, Salary Competiveness
val += ruleValToString(featureVector[i])
val += " "
for i in range(6, 8) : # These features are just ints: Tenure and Position Tenure
val += str(featureVector[i])
val += " "
val += "]"
return val
def printRule(rule) :
""" Print the passed rule """
print("Rule: ", end='')
for i in rule[0:1]: # ints or Any: Position and Organization
if i == Any:
print(ruleValToString(i) + ", ", end='')
for i in rule[2:5]: # encoded: Potentional, Rating, Rating Slope, Salary Competitiveness
print(ruleValToString(i) + ", ", end='')
for i in rule[6:9]: # next 4 are ints or ANY: Tenure Low, Tenure High, Position Tenure Low, Position Tenure High
if i == Any :
print(ruleValToString(i) + ", ", end='')
else :
print(str(i) + ", ", end='')
print("==> "+ ruleValToString(rule[10]) + "[" + str(rule[11]) + "] " + str(rule[12]))
def printRules(rules) :
""" print all rules"""
for r in rules:
printRule(r)
########################################################################
def chooseRangeValue(thresholds, rangeList):
""" Generate a random value based on the probability weights (thresholds) and list of ranges passed
Args:
thresholds : list of probalities for each choice
rangeList: a list of pair lists giving the lower and upper bounds to choose value from
"""
# pick a number 1..3 from weights
rangeVal = choices(Index3Population, thresholds)
# get the appropriate range given rangeVal
interval = rangeList[rangeVal[0]]
# construct a population list from the result
intervalPopulation = list(range(interval[0], interval[1]))
# construct a equally prob weights list
numElements = interval[1] - interval[0]
probVal = 1.0 / numElements
probList = [probVal] * numElements
# now choose the value from the population based on the weights
val = choices(intervalPopulation, probList)
return val[0]
def chooseValueAndAppend(instance, population, weights) :
""" Choose a random value from the population using weights list and append it to the passed instance
"""
val = choices(population, weights)
instance.append(val[0])
def generateFeatures(numInstances) :
""" generate the features (X) values for the dataset
Args:
numInstances (int) : number of instances to genreate
Returns:
dataset (list of lists) : the dataset with features, but no labels or explanations yet
"""
assert(numInstances > 0)
dataset = []
for i in range(numInstances) :
instance = []
#POS ORG Pot Rating Slope SALC TENL H BTEN LH
chooseValueAndAppend(instance, Integer4Population, featureThresholds[0][1]) # Position
chooseValueAndAppend(instance, Integer3Population, featureThresholds[1][1]) # Org
chooseValueAndAppend(instance, YesNoPopulation, featureThresholds[2][1]) # Potential
chooseValueAndAppend(instance, HighMedLowPopulation, featureThresholds[3][1]) # Rating
chooseValueAndAppend(instance, HighMedLowPopulation, featureThresholds[4][1]) # Rating slope
chooseValueAndAppend(instance, HighMedLowPopulation, featureThresholds[5][1]) # Sal competitiveness
val1 = chooseRangeValue(featureThresholds[6][1], featureThresholds[6][2]) # Tenure
instance.append(val1)
# Position tenure needs to be <= Tenure
val2 = chooseRangeValue(featureThresholds[7][1], featureThresholds[7][2]) # Pos Tenure
if val2 > val1 :
val2 = val1
instance.append(val2)
dataset.append(instance)
return dataset
#####################################################################################################
def match(ruleVal, featureVal) :
""" Check if passed ruleVal matches the featureVal or if ruleVal is Any, which matches everything
"""
# print("Match called: "+ ruleValToString(ruleVal) + " " + ruleValToString(featureVal))
if ruleVal == Any :
return True
return (ruleVal == featureVal)
def intervalMatch(ruleValLower, ruleValUpper, featureVal) :
""" Check to see if featureVal is in the interval defined by [ruleValLower, ruleValUpper)
"""
# Any in lower bound matches all values, (upper bound doesn't matter)
if ruleValLower == Any :
return True
if ruleValLower <= featureVal :
# Any in upper bound means infinitity
if featureVal < ruleValUpper or ruleValUpper == Any :
return True
return False
def ruleMatch(rule, featureVector) :
""" Determine if the passed featureVector matches the passed rule
"""
if (False) :
print("ruleMatch called, ", end="")
printRule(rule)
print(" feature vector: " + featuresToString(featureVector) )
for i in range(0, 6) : # loop over first 6 features, 0..5
if not match(rule[i], featureVector[i]) : # if we don't find a feature match, the rule doesn't match
# print("Didn't match feature #", i, ruleValToString(featureVector[i]))
return False
# These features are interval-based, so need a different matching routine
if not intervalMatch(rule[6], rule[7], featureVector[6]) : # rule[6] and rule[7] have the lower and upper bounds of interval
# print("Didn't match feature # 6: ", featureVector[6])
return False
if not intervalMatch(rule[8], rule[9], featureVector[7]) : # rule[8] and rule[9] have the lower and upper bounds of interval
# print("Didn't match feature # 7: ", featureVector[7])
return False
# print("Matched all features")
return True # if we didn't find a non-match by now, we found a match
def findRule(instance, ruleSet) :
""" find the rule(s) that matches the feture vector passed
"""
# print("*Looking for rule match for Feature vector: " + featuresToString(instance))
ruleNumber = 0 # counter to track rule number
ruleMatches = [] # will hold all rule numbers that matched
for rule in ruleSet :
if (ruleMatch(rule, instance)) :
ruleMatches.append(ruleNumber)
counts[ruleNumber] += 1 # update global histogram of rule matches for stats reporting
if (False) :
print(" ruleMatch found at rule #" + str(ruleNumber))
print(" ", end="")
printRule(rule)
ruleNumber += 1
return ruleMatches
def countAnys(rule) :
""" Count the number of Anys in the passed rule. An "Any" is a wildcard that matches all values
"""
count = 0
for feature in RetentionRules[rule] :
if feature == Any :
count += 1
return count
def pickBestRule(ruleList) :
""" Choose the rule with the least number of Any's in it
"""
assert(len(ruleList) > 0)
# print("ruleList: ", ruleList)
minAnys = len(RetentionRules[0]) + 1 # initialize to a value larger than possible # of Anys in a rule
bestRule = -1
for rule in ruleList :
# Count # of Any's in rule # rule
count = countAnys(rule)
if count < minAnys :
minAnys = count
bestRule = rule
assert(bestRule != -1) # We should find a best rule
return bestRule
def addLabelsAndExplanations(dataset, rules) :
""" This function will use a ruleset to add labels (Y) and explanations/rules (E) to a passed dataset
Arg:
dataset (list of lists) : a list of feature vectors (list)
rules (list of lists) : a list of rules
"""
noMatches = 0 # Counters to record how often there are no (Yes) matches, 1 (Yes) match, and multiple (Yes) matches
multiMatches = 0
oneMatches = 0
for instance in dataset :
ruleMatches = findRule(instance, rules)
if len(ruleMatches) == 0 : # We didn't match a (Yes) rule, so this ia No situation
rule = NoRiskRuleNum
label = No
noMatches +=1
elif len(ruleMatches) > 1 : # Matched multiple Yes rules, need to pick one
rule = pickBestRule(ruleMatches)
assert(rule >= 0 and rule < len(rules)) # Ensure rule number is valid
label = Yes
multiMatches += 1
else : # Found 1 Yes rule match, it's the winner
rule = ruleMatches[0]
label = Yes
oneMatches += 1
assert(rule >= 0 and rule < len(rules)) # Ensure rule number is valid
# print("Label: " + ruleValToString(label) + ", Rule: " + ruleValToString(rule))
instance.append(label)
instance.append(rule) # add the label and explanation (rule #) to the featureVector
if (True) :
print("\nRule matching statistics: ")
totalYes = oneMatches + multiMatches
total = oneMatches + multiMatches + noMatches
print(" Yes Labels: {}/{} ({:.2f}%)".format(totalYes, total, totalYes/total*100))
print(" Matched 1 Yes rule: {}/{} ({:.2f}%)".format(oneMatches, totalYes, oneMatches/totalYes*100))
print(" Matched multiple Yes rules: {}/{} ({:.2f}%)".format(multiMatches, totalYes, multiMatches/totalYes*100))
print(" No Laels: {}/{} ({:.2f}%)".format(noMatches, total, noMatches/total*100))
def printRuleUsage(counts, total) :
print("\nHistogram of rule usage:")
ruleNum = 0
for num in counts :
print(" Rule {} was used {} times, {:.2f}%".format(ruleNum, num, num/total*100))
ruleNum += 1
numRentionRules = len(RetentionRules)
counts = [0]*numRentionRules
NoRiskRuleNum = numRentionRules # the No Risk to leave rule is 1 more than than the total rules [0..]
random.seed(1)
# printFeatureStringHeader()
numInstances = 10000
dataset = generateFeatures(numInstances)
addLabelsAndExplanations(dataset, RetentionRules)
printRuleUsage(counts, numInstances)
# insert TED headers
NumFeatures = len(featureThresholds)
header = list(range(NumFeatures))
header.append("Y")
header.append("E")
dataset.insert(0, header)
# write to csv file
my_df = pd.DataFrame(dataset)
my_df.to_csv('Retention.csv', index=False, header=False)
|
ruleValToString
|
tests3.rs
|
// tests3.rs
// This test isn't testing our function -- make it do that in such a way that
// the test passes. Then write a second test that tests whether we get the result
// we expect to get when we call `is_even(5)`.
// Execute `rustlings hint tests3` for hints :)
pub fn is_even(num: i32) -> bool {
num % 2 == 0
}
#[cfg(test)]
|
#[test]
fn is_true_when_even() {
assert!(is_even(8), true);
}
#[test]
fn is_false_when_odd() {
assert!(!is_even(5), false);
}
}
|
mod tests {
use super::*;
|
mainThreadWorkspace.ts
|
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import { isPromiseCanceledError } from 'vs/base/common/errors';
import URI from 'vs/base/common/uri';
import { ISearchService, QueryType, ISearchQuery, ISearchProgressItem, ISearchComplete } from 'vs/platform/search/common/search';
import { IWorkspaceContextService, WorkbenchState } from 'vs/platform/workspace/common/workspace';
import { ITextFileService } from 'vs/workbench/services/textfile/common/textfiles';
import { TPromise, PPromise } from 'vs/base/common/winjs.base';
import { MainThreadWorkspaceShape, ExtHostWorkspaceShape, ExtHostContext, MainContext, IExtHostContext } from '../node/extHost.protocol';
import { IFileService } from 'vs/platform/files/common/files';
import { IDisposable, dispose, combinedDisposable } from 'vs/base/common/lifecycle';
import { RemoteFileService } from 'vs/workbench/services/files/electron-browser/remoteFileService';
import { Emitter } from 'vs/base/common/event';
import { extHostNamedCustomer } from 'vs/workbench/api/electron-browser/extHostCustomers';
import { IExperimentService } from 'vs/platform/telemetry/common/experiments';
@extHostNamedCustomer(MainContext.MainThreadWorkspace)
export class
|
implements MainThreadWorkspaceShape {
private readonly _toDispose: IDisposable[] = [];
private readonly _activeSearches: { [id: number]: TPromise<URI[]> } = Object.create(null);
private readonly _proxy: ExtHostWorkspaceShape;
constructor(
extHostContext: IExtHostContext,
@ISearchService private readonly _searchService: ISearchService,
@IWorkspaceContextService private readonly _contextService: IWorkspaceContextService,
@ITextFileService private readonly _textFileService: ITextFileService,
@IExperimentService private experimentService: IExperimentService,
@IFileService private readonly _fileService: IFileService
) {
this._proxy = extHostContext.get(ExtHostContext.ExtHostWorkspace);
this._contextService.onDidChangeWorkbenchState(this._onDidChangeWorkspaceState, this, this._toDispose);
}
dispose(): void {
dispose(this._toDispose);
for (let requestId in this._activeSearches) {
const search = this._activeSearches[requestId];
search.cancel();
}
}
// --- workspace ---
private _onDidChangeWorkspaceState(): void {
this._proxy.$acceptWorkspaceData(this._contextService.getWorkbenchState() === WorkbenchState.EMPTY ? null : this._contextService.getWorkspace());
}
// --- search ---
$startSearch(include: string, exclude: string, maxResults: number, requestId: number): Thenable<URI[]> {
const workspace = this._contextService.getWorkspace();
if (!workspace.folders.length) {
return undefined;
}
const query: ISearchQuery = {
folderQueries: workspace.folders.map(folder => ({ folder: folder.uri })),
type: QueryType.File,
maxResults,
includePattern: { [include]: true },
excludePattern: { [exclude]: true },
useRipgrep: this.experimentService.getExperiments().ripgrepQuickSearch
};
this._searchService.extendQuery(query);
const search = this._searchService.search(query).then(result => {
return result.results.map(m => m.resource);
}, err => {
if (!isPromiseCanceledError(err)) {
return TPromise.wrapError(err);
}
return undefined;
});
this._activeSearches[requestId] = search;
const onDone = () => delete this._activeSearches[requestId];
search.done(onDone, onDone);
return search;
}
$cancelSearch(requestId: number): Thenable<boolean> {
const search = this._activeSearches[requestId];
if (search) {
delete this._activeSearches[requestId];
search.cancel();
return TPromise.as(true);
}
return undefined;
}
// --- save & edit resources ---
$saveAll(includeUntitled?: boolean): Thenable<boolean> {
return this._textFileService.saveAll(includeUntitled).then(result => {
return result.results.every(each => each.success === true);
});
}
// --- EXPERIMENT: workspace provider
private _idPool: number = 0;
private readonly _provider = new Map<number, [IDisposable, Emitter<URI>]>();
private readonly _searchSessions = new Map<number, { resolve: (result: ISearchComplete) => void, reject: Function, progress: (item: ISearchProgressItem) => void, matches: URI[] }>();
$registerFileSystemProvider(handle: number, authority: string): void {
if (!(this._fileService instanceof RemoteFileService)) {
throw new Error();
}
const emitter = new Emitter<URI>();
const provider = {
onDidChange: emitter.event,
resolve: (resource: URI) => {
return this._proxy.$resolveFile(handle, resource);
},
update: (resource: URI, value: string) => {
return this._proxy.$storeFile(handle, resource, value);
}
};
const searchProvider = {
search: (query: ISearchQuery) => {
if (query.type !== QueryType.File) {
return undefined;
}
const session = ++this._idPool;
return new PPromise<any, any>((resolve, reject, progress) => {
this._searchSessions.set(session, { resolve, reject, progress, matches: [] });
this._proxy.$startSearch(handle, session, query.filePattern);
}, () => {
this._proxy.$cancelSearch(handle, session);
});
}
};
const registrations = combinedDisposable([
this._fileService.registerProvider(authority, provider),
this._searchService.registerSearchResultProvider(searchProvider),
]);
this._provider.set(handle, [registrations, emitter]);
}
$unregisterFileSystemProvider(handle: number): void {
if (this._provider.has(handle)) {
dispose(this._provider.get(handle)[0]);
this._provider.delete(handle);
}
}
$onFileSystemChange(handle: number, resource: URI) {
const [, emitter] = this._provider.get(handle);
emitter.fire(resource);
};
$updateSearchSession(session: number, data: URI): void {
if (this._searchSessions.has(session)) {
this._searchSessions.get(session).progress({ resource: data });
this._searchSessions.get(session).matches.push(data);
}
}
$finishSearchSession(session: number, err?: any): void {
if (this._searchSessions.has(session)) {
const { matches, resolve, reject } = this._searchSessions.get(session);
this._searchSessions.delete(session);
if (err) {
reject(err);
} else {
resolve({
limitHit: false,
stats: undefined,
results: matches.map(resource => ({ resource }))
});
}
}
}
}
|
MainThreadWorkspace
|
operations.rs
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub mod role_assignment_schedules {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
scope: &str,
role_assignment_schedule_name: &str,
) -> std::result::Result<RoleAssignmentSchedule, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleAssignmentSchedules/{}",
operation_config.base_path(),
scope,
role_assignment_schedule_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RoleAssignmentSchedule =
serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_for_scope(
operation_config: &crate::OperationConfig,
scope: &str,
filter: Option<&str>,
) -> std::result::Result<RoleAssignmentScheduleListResult, list_for_scope::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleAssignmentSchedules",
operation_config.base_path(),
scope
);
let mut url = url::Url::parse(url_str).context(list_for_scope::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_for_scope::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list_for_scope::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(list_for_scope::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RoleAssignmentScheduleListResult =
serde_json::from_slice(rsp_body).context(list_for_scope::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).context(list_for_scope::DeserializeError { body: rsp_body.clone() })?;
list_for_scope::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_for_scope {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod role_assignment_schedule_instances {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn list_for_scope(
operation_config: &crate::OperationConfig,
scope: &str,
filter: Option<&str>,
) -> std::result::Result<RoleAssignmentScheduleInstanceListResult, list_for_scope::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleAssignmentScheduleInstances",
operation_config.base_path(),
scope
);
let mut url = url::Url::parse(url_str).context(list_for_scope::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_for_scope::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list_for_scope::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(list_for_scope::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RoleAssignmentScheduleInstanceListResult =
serde_json::from_slice(rsp_body).context(list_for_scope::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).context(list_for_scope::DeserializeError { body: rsp_body.clone() })?;
list_for_scope::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_for_scope {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
scope: &str,
role_assignment_schedule_instance_name: &str,
) -> std::result::Result<RoleAssignmentScheduleInstance, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleAssignmentScheduleInstances/{}",
operation_config.base_path(),
scope,
role_assignment_schedule_instance_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RoleAssignmentScheduleInstance =
serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod role_assignment_schedule_requests {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
scope: &str,
role_assignment_schedule_request_name: &str,
) -> std::result::Result<RoleAssignmentScheduleRequest, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleAssignmentScheduleRequests/{}",
operation_config.base_path(),
scope,
role_assignment_schedule_request_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RoleAssignmentScheduleRequest =
serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
scope: &str,
role_assignment_schedule_request_name: &str,
parameters: &RoleAssignmentScheduleRequest,
) -> std::result::Result<RoleAssignmentScheduleRequest, create::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleAssignmentScheduleRequests/{}",
operation_config.base_path(),
scope,
role_assignment_schedule_request_name
);
let mut url = url::Url::parse(url_str).context(create::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).context(create::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(create::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: RoleAssignmentScheduleRequest =
serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?;
create::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_for_scope(
operation_config: &crate::OperationConfig,
scope: &str,
filter: Option<&str>,
) -> std::result::Result<RoleAssignmentScheduleRequestListResult, list_for_scope::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleAssignmentScheduleRequests",
operation_config.base_path(),
scope
);
let mut url = url::Url::parse(url_str).context(list_for_scope::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_for_scope::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list_for_scope::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(list_for_scope::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RoleAssignmentScheduleRequestListResult =
serde_json::from_slice(rsp_body).context(list_for_scope::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).context(list_for_scope::DeserializeError { body: rsp_body.clone() })?;
list_for_scope::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_for_scope {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn cancel(
operation_config: &crate::OperationConfig,
scope: &str,
role_assignment_schedule_request_name: &str,
) -> std::result::Result<(), cancel::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleAssignmentScheduleRequests/{}/cancel",
operation_config.base_path(),
scope,
role_assignment_schedule_request_name
);
let mut url = url::Url::parse(url_str).context(cancel::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(cancel::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(cancel::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(cancel::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(()),
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).context(cancel::DeserializeError { body: rsp_body.clone() })?;
cancel::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod cancel {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod role_eligibility_schedules {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
scope: &str,
role_eligibility_schedule_name: &str,
) -> std::result::Result<RoleEligibilitySchedule, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleEligibilitySchedules/{}",
operation_config.base_path(),
scope,
role_eligibility_schedule_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RoleEligibilitySchedule =
serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_for_scope(
operation_config: &crate::OperationConfig,
scope: &str,
filter: Option<&str>,
) -> std::result::Result<RoleEligibilityScheduleListResult, list_for_scope::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleEligibilitySchedules",
operation_config.base_path(),
scope
);
let mut url = url::Url::parse(url_str).context(list_for_scope::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_for_scope::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list_for_scope::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(list_for_scope::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RoleEligibilityScheduleListResult =
serde_json::from_slice(rsp_body).context(list_for_scope::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).context(list_for_scope::DeserializeError { body: rsp_body.clone() })?;
list_for_scope::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_for_scope {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod role_eligibility_schedule_instances {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn list_for_scope(
operation_config: &crate::OperationConfig,
scope: &str,
filter: Option<&str>,
) -> std::result::Result<RoleEligibilityScheduleInstanceListResult, list_for_scope::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleEligibilityScheduleInstances",
operation_config.base_path(),
scope
);
let mut url = url::Url::parse(url_str).context(list_for_scope::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_for_scope::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list_for_scope::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(list_for_scope::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RoleEligibilityScheduleInstanceListResult =
serde_json::from_slice(rsp_body).context(list_for_scope::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).context(list_for_scope::DeserializeError { body: rsp_body.clone() })?;
list_for_scope::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_for_scope {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
scope: &str,
role_eligibility_schedule_instance_name: &str,
) -> std::result::Result<RoleEligibilityScheduleInstance, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleEligibilityScheduleInstances/{}",
operation_config.base_path(),
scope,
role_eligibility_schedule_instance_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RoleEligibilityScheduleInstance =
serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod role_eligibility_schedule_requests {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
scope: &str,
role_eligibility_schedule_request_name: &str,
) -> std::result::Result<RoleEligibilityScheduleRequest, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleEligibilityScheduleRequests/{}",
operation_config.base_path(),
scope,
role_eligibility_schedule_request_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RoleEligibilityScheduleRequest =
serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
scope: &str,
role_eligibility_schedule_request_name: &str,
parameters: &RoleEligibilityScheduleRequest,
) -> std::result::Result<RoleEligibilityScheduleRequest, create::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleEligibilityScheduleRequests/{}",
operation_config.base_path(),
scope,
role_eligibility_schedule_request_name
);
let mut url = url::Url::parse(url_str).context(create::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).context(create::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(create::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: RoleEligibilityScheduleRequest =
serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?;
create::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_for_scope(
operation_config: &crate::OperationConfig,
scope: &str,
filter: Option<&str>,
) -> std::result::Result<RoleEligibilityScheduleRequestListResult, list_for_scope::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleEligibilityScheduleRequests",
operation_config.base_path(),
scope
);
let mut url = url::Url::parse(url_str).context(list_for_scope::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_for_scope::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list_for_scope::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(list_for_scope::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK =>
|
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).context(list_for_scope::DeserializeError { body: rsp_body.clone() })?;
list_for_scope::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_for_scope {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn cancel(
operation_config: &crate::OperationConfig,
scope: &str,
role_eligibility_schedule_request_name: &str,
) -> std::result::Result<(), cancel::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleEligibilityScheduleRequests/{}/cancel",
operation_config.base_path(),
scope,
role_eligibility_schedule_request_name
);
let mut url = url::Url::parse(url_str).context(cancel::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(cancel::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(cancel::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(cancel::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(()),
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).context(cancel::DeserializeError { body: rsp_body.clone() })?;
cancel::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod cancel {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod role_management_policies {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
scope: &str,
role_management_policy_name: &str,
) -> std::result::Result<RoleManagementPolicy, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleManagementPolicies/{}",
operation_config.base_path(),
scope,
role_management_policy_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RoleManagementPolicy =
serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
scope: &str,
role_management_policy_name: &str,
parameters: &RoleManagementPolicy,
) -> std::result::Result<RoleManagementPolicy, create::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleManagementPolicies/{}",
operation_config.base_path(),
scope,
role_management_policy_name
);
let mut url = url::Url::parse(url_str).context(create::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).context(create::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(create::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: RoleManagementPolicy =
serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?;
create::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
scope: &str,
role_management_policy_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleManagementPolicies/{}",
operation_config.base_path(),
scope,
role_management_policy_name
);
let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(delete::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).context(delete::DeserializeError { body: rsp_body.clone() })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_for_scope(
operation_config: &crate::OperationConfig,
scope: &str,
) -> std::result::Result<RoleManagementPolicyListResult, list_for_scope::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleManagementPolicies",
operation_config.base_path(),
scope
);
let mut url = url::Url::parse(url_str).context(list_for_scope::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_for_scope::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list_for_scope::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(list_for_scope::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RoleManagementPolicyListResult =
serde_json::from_slice(rsp_body).context(list_for_scope::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).context(list_for_scope::DeserializeError { body: rsp_body.clone() })?;
list_for_scope::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_for_scope {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod role_management_policy_assignments {
use crate::models::*;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
scope: &str,
role_management_policy_assignment_name: &str,
) -> std::result::Result<RoleManagementPolicyAssignment, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleManagementPolicyAssignments/{}",
operation_config.base_path(),
scope,
role_management_policy_assignment_name
);
let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(get::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(get::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RoleManagementPolicyAssignment =
serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
scope: &str,
role_management_policy_assignment_name: &str,
parameters: &RoleManagementPolicyAssignment,
) -> std::result::Result<RoleManagementPolicyAssignment, create::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleManagementPolicyAssignments/{}",
operation_config.base_path(),
scope,
role_management_policy_assignment_name
);
let mut url = url::Url::parse(url_str).context(create::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(create::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = azure_core::to_json(parameters).context(create::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(create::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(create::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: RoleManagementPolicyAssignment =
serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?;
create::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
scope: &str,
role_management_policy_assignment_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleManagementPolicyAssignments/{}",
operation_config.base_path(),
scope,
role_management_policy_assignment_name
);
let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(delete::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).context(delete::DeserializeError { body: rsp_body.clone() })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_for_scope(
operation_config: &crate::OperationConfig,
scope: &str,
) -> std::result::Result<RoleManagementPolicyAssignmentListResult, list_for_scope::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Authorization/roleManagementPolicyAssignments",
operation_config.base_path(),
scope
);
let mut url = url::Url::parse(url_str).context(list_for_scope::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.context(list_for_scope::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).context(list_for_scope::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.context(list_for_scope::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: RoleManagementPolicyAssignmentListResult =
serde_json::from_slice(rsp_body).context(list_for_scope::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: CloudError =
serde_json::from_slice(rsp_body).context(list_for_scope::DeserializeError { body: rsp_body.clone() })?;
list_for_scope::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_for_scope {
use crate::{models, models::*};
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
ParseUrlError {
source: url::ParseError,
},
BuildRequestError {
source: http::Error,
},
ExecuteRequestError {
source: Box<dyn std::error::Error + Sync + Send>,
},
SerializeError {
source: Box<dyn std::error::Error + Sync + Send>,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
|
{
let rsp_body = rsp.body();
let rsp_value: RoleEligibilityScheduleRequestListResult =
serde_json::from_slice(rsp_body).context(list_for_scope::DeserializeError { body: rsp_body.clone() })?;
Ok(rsp_value)
}
|
test_objectpropertyaxioms.py
|
import unittest
from tests.utils.base import TestBase
class ObjectPropertyAxiomsTestCase(TestBase):
@unittest.expectedFailure
def test_something(self):
self.assertEqual(True, False)
|
if __name__ == '__main__':
unittest.main()
| |
random.rs
|
use core::ops::{Div};
use num_traits::Float;
use rand::Rng;
use rand_distr::Distribution;
use super::*;
pub use rand_distr::StandardNormal;
/// Distribution that only guarantees to produce an element which norm is greater than epsilon.
pub struct NonZero;
/// Distribution that provides points uniformly distubuted on the N-dimensional sphere,
/// where N is the number of dimensions of a specified hypercomplex number.
pub struct Unit;
impl<T, U> Distribution<Construct<T, U>> for StandardNormal where StandardNormal: Distribution<U> {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Construct<T, U> {
Construct::new(rng.sample(Self), rng.sample(Self))
}
}
impl<T: Float, U: NormSqr<Output=T> + Clone> Distribution<Construct<T, U>> for NonZero where StandardNormal: Distribution<Construct<T, U>> {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Construct<T, U> {
loop {
let x = rng.sample(&StandardNormal);
if x.clone().norm() > T::epsilon() {
break x;
}
}
}
}
impl<T: Float, U: NormSqr<Output=T> + Div<T, Output=U> + Clone> Distribution<Construct<T, U>> for Unit where NonZero: Distribution<Construct<T, U>> {
fn
|
<R: Rng + ?Sized>(&self, rng: &mut R) -> Construct<T, U> {
rng.sample(&NonZero).normalize()
}
}
|
sample
|
parsers.py
|
import logging
import re
from PyCRC.CRC16 import CRC16
from dsmr_parser.objects import MBusObject, CosemObject
from dsmr_parser.exceptions import ParseContentError, InvalidChecksumError, NoChecksumError
logger = logging.getLogger(__name__)
class TelegramParser(object):
def __init__(self, telegram_specification, apply_checksum_validation=True):
"""
:param telegram_specification: determines how the telegram is parsed
:param apply_checksum_validation: validate checksum if applicable for
telegram DSMR version (v4 and up).
:type telegram_specification: dict
"""
self.telegram_specification = telegram_specification
self.apply_checksum_validation = apply_checksum_validation
def parse(self, telegram_data):
"""
Parse telegram from string to dict.
The telegram str type makes python 2.x integration easier.
:param str telegram_data: full telegram from start ('/') to checksum
('!ABCD') including line endings in between the telegram's lines
:rtype: dict
:returns: Shortened example:
{
..
r'\d-\d:96\.1\.1.+?\r\n': <CosemObject>, # EQUIPMENT_IDENTIFIER
r'\d-\d:1\.8\.1.+?\r\n': <CosemObject>, # ELECTRICITY_USED_TARIFF_1
r'\d-\d:24\.3\.0.+?\r\n.+?\r\n': <MBusObject>, # GAS_METER_READING
..
}
:raises ParseError:
:raises InvalidChecksumError:
"""
if self.apply_checksum_validation \
and self.telegram_specification['checksum_support']:
self.validate_checksum(telegram_data)
telegram = {}
for signature, parser in self.telegram_specification['objects'].items():
match = re.search(signature, telegram_data, re.DOTALL)
# Some signatures are optional and may not be present,
# so only parse lines that match
if match:
telegram[signature] = parser.parse(match.group(0))
return telegram
@staticmethod
def validate_checksum(telegram):
"""
:param str telegram:
:raises ParseError:
:raises InvalidChecksumError:
"""
# Extract the part for which the checksum applies.
checksum_contents = re.search(r'\/.+\!', telegram, re.DOTALL)
# Extract the hexadecimal checksum value itself.
# The line ending '\r\n' for the checksum line can be ignored.
checksum_hex = re.search(r'((?<=\!)[0-9A-Z]{4})+', telegram)
if not checksum_contents:
raise ParseContentError(
'Failed to perform CRC validation because the telegram is '
'incomplete: The content value is missing.'
)
elif checksum_contents and not checksum_hex:
raise NoChecksumError(
'Failed to perform CRC validation because the telegram is '
'incomplete: The CRC is missing.'
)
calculated_crc = CRC16().calculate(checksum_contents.group(0))
expected_crc = int(checksum_hex.group(0), base=16)
if calculated_crc != expected_crc:
raise InvalidChecksumError(
"Invalid telegram. The CRC checksum '{}' does not match the "
"expected '{}'".format(
calculated_crc,
expected_crc
)
)
class DSMRObjectParser(object):
"""
Parses an object (can also be see as a 'line') from a telegram.
"""
def
|
(self, *value_formats):
self.value_formats = value_formats
def _parse(self, line):
# Match value groups, but exclude the parentheses
pattern = re.compile(r'((?<=\()[0-9a-zA-Z\.\*]{0,}(?=\)))+')
values = re.findall(pattern, line)
# Convert empty value groups to None for clarity.
values = [None if value == '' else value for value in values]
if not values or len(values) != len(self.value_formats):
raise ParseError("Invalid '%s' line for '%s'", line, self)
return [self.value_formats[i].parse(value)
for i, value in enumerate(values)]
class MBusParser(DSMRObjectParser):
"""
Gas meter value parser.
These are lines with a timestamp and gas meter value.
Line format:
'ID (TST) (Mv1*U1)'
1 2 3 4
1) OBIS Reduced ID-code
2) Time Stamp (TST) of capture time of measurement value
3) Measurement value 1 (most recent entry of buffer attribute without unit)
4) Unit of measurement values (Unit of capture objects attribute)
"""
def parse(self, line):
return MBusObject(self._parse(line))
class CosemParser(DSMRObjectParser):
"""
Cosem object parser.
These are data objects with a single value that optionally have a unit of
measurement.
Line format:
ID (Mv*U)
1 23 45
1) OBIS Reduced ID-code
2) Separator "(", ASCII 28h
3) COSEM object attribute value
4) Unit of measurement values (Unit of capture objects attribute) - only if
applicable
5) Separator ")", ASCII 29h
"""
def parse(self, line):
return CosemObject(self._parse(line))
class ProfileGenericParser(DSMRObjectParser):
"""
Power failure log parser.
These are data objects with multiple repeating groups of values.
Line format:
ID (z) (ID1) (TST) (Bv1*U1) (TST) (Bvz*Uz)
1 2 3 4 5 6 7 8 9
1) OBIS Reduced ID-code
2) Number of values z (max 10).
3) Identifications of buffer values (OBIS Reduced ID codes of capture objects attribute)
4) Time Stamp (TST) of power failure end time
5) Buffer value 1 (most recent entry of buffer attribute without unit)
6) Unit of buffer values (Unit of capture objects attribute)
7) Time Stamp (TST) of power failure end time
8) Buffer value 2 (oldest entry of buffer attribute without unit)
9) Unit of buffer values (Unit of capture objects attribute)
"""
def parse(self, line):
raise NotImplementedError()
class ValueParser(object):
"""
Parses a single value from DSMRObject's.
Example with coerce_type being int:
(002*A) becomes {'value': 1, 'unit': 'A'}
Example with coerce_type being str:
(42) becomes {'value': '42', 'unit': None}
"""
def __init__(self, coerce_type):
self.coerce_type = coerce_type
def parse(self, value):
unit_of_measurement = None
if value and '*' in value:
value, unit_of_measurement = value.split('*')
# A value group is not required to have a value, and then coercing does
# not apply.
value = self.coerce_type(value) if value is not None else value
return {
'value': value,
'unit': unit_of_measurement
}
|
__init__
|
plateSearchModal.tsx
|
import * as React from "react";
import Modal, { XButton } from "../index";
import lang from "../../../language.json";
import Vehicle from "../../../interfaces/Vehicle";
import State from "../../../interfaces/State";
import AlertMessage from "../../alert-message";
import { searchPlate } from "../../../lib/actions/officer";
import { connect } from "react-redux";
import { Item, Span } from "../../../pages/citizen/citizen-info";
export interface Search extends Vehicle {
type: "plate";
}
interface Props {
search: Search;
searchPlate: (plate: string) => void;
}
const PlateSearchModal: React.FC<Props> = ({ search, searchPlate }) => {
const [plate, setPlate] = React.useState("");
const btnRef = React.createRef<HTMLButtonElement>();
function onSubmit(e: React.FormEvent) {
e.preventDefault();
searchPlate(plate);
}
return (
<Modal size="lg" id="plateSearchModal">
<div className="modal-header">
<h5 className="modal-title">{lang.global.plate_search}</h5>
<XButton ref={btnRef}></XButton>
</div>
<form onSubmit={onSubmit}>
<div className="modal-body">
<div className="mb-3">
<label className="form-label" htmlFor="plate">
{window.lang.officers.plate_or_vin}
</label>
<input
type="search"
className="form-control bg-secondary border-secondary text-light"
|
</div>
{search !== null && search?.type === "plate" ? (
search?.plate ? (
<div className="mt-3">
<Item id="plate">
<Span>{lang.global.plate}: </Span>
{search.plate.toUpperCase()}
</Item>
<Item id="vehicle">
<Span>{lang.global.vehicle}: </Span>
{search.vehicle}
</Item>
<Item id="owner">
<Span>{lang.record.owner}: </Span>
{search.owner}
</Item>
<Item id="vin_number">
<Span>{lang.record.vin_number}: </Span>
{search.vin_number}
</Item>
<Item id="color">
<Span>{lang.global.color}: </Span>
{search.color}
</Item>
<Item id="in_status">
<Span>{lang.citizen.vehicle.status}: </Span>
{search.in_status}
</Item>
<Item id="company">
<Span>{lang.citizen.weapon.company}: </Span>
{search.company}
</Item>
</div>
) : (
<AlertMessage message={{ msg: lang.record.no_plate, type: "warning" }} />
)
) : null}
</div>
<div className="modal-footer">
<button type="button" className="btn btn-secondary" data-bs-dismiss="modal">
{lang.global.close}
</button>
<button type="submit" disabled={plate === ""} className="btn btn-primary">
{lang.global.search}
</button>
</div>
</form>
</Modal>
);
};
const mapToProps = (state: State) => ({
search: state.officers.search,
});
export default connect(mapToProps, { searchPlate })(PlateSearchModal);
|
id="plate"
onChange={(e) => setPlate(e.target.value)}
/>
|
main.go
|
/*
Copyright 2018 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"flag"
"log"
"net/http"
"time"
// Injection related imports.
kubeclient "knative.dev/pkg/client/injection/kube/client"
"knative.dev/pkg/injection"
"knative.dev/pkg/injection/sharedmain"
"knative.dev/pkg/profiling"
"go.opencensus.io/stats/view"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"k8s.io/apimachinery/pkg/runtime/schema"
"knative.dev/pkg/configmap"
"knative.dev/pkg/logging"
"knative.dev/pkg/logging/logkey"
"knative.dev/pkg/metrics"
"knative.dev/pkg/signals"
"knative.dev/pkg/system"
"knative.dev/pkg/version"
"knative.dev/pkg/webhook"
autoscalingv1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1"
net "knative.dev/serving/pkg/apis/networking/v1alpha1"
v1 "knative.dev/serving/pkg/apis/serving/v1"
"knative.dev/serving/pkg/apis/serving/v1alpha1"
"knative.dev/serving/pkg/apis/serving/v1beta1"
"knative.dev/serving/pkg/deployment"
"knative.dev/serving/pkg/gc"
"knative.dev/serving/pkg/network"
// config validation constructors
tracingconfig "knative.dev/pkg/tracing/config"
defaultconfig "knative.dev/serving/pkg/apis/config"
"knative.dev/serving/pkg/autoscaler"
metricsconfig "knative.dev/serving/pkg/metrics"
certconfig "knative.dev/serving/pkg/reconciler/certificate/config"
istioconfig "knative.dev/serving/pkg/reconciler/ingress/config"
domainconfig "knative.dev/serving/pkg/reconciler/route/config"
)
const (
component = "webhook"
)
var (
masterURL = flag.String("master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
kubeconfig = flag.String("kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
)
func main()
|
{
flag.Parse()
// Set up signals so we handle the first shutdown signal gracefully.
ctx := signals.NewContext()
// Report stats on Go memory usage every 30 seconds.
msp := metrics.NewMemStatsAll()
msp.Start(ctx, 30*time.Second)
if err := view.Register(msp.DefaultViews()...); err != nil {
log.Fatalf("Error exporting go memstats view: %v", err)
}
cfg, err := sharedmain.GetConfig(*masterURL, *kubeconfig)
if err != nil {
log.Fatal("Failed to get cluster config:", err)
}
log.Printf("Registering %d clients", len(injection.Default.GetClients()))
log.Printf("Registering %d informer factories", len(injection.Default.GetInformerFactories()))
log.Printf("Registering %d informers", len(injection.Default.GetInformers()))
ctx, _ = injection.Default.SetupInformers(ctx, cfg)
kubeClient := kubeclient.Get(ctx)
config, err := sharedmain.GetLoggingConfig(ctx)
if err != nil {
log.Fatal("Error loading/parsing logging configuration:", err)
}
logger, atomicLevel := logging.NewLoggerFromConfig(config, component)
defer logger.Sync()
logger = logger.With(zap.String(logkey.ControllerType, component))
if err := version.CheckMinimumVersion(kubeClient.Discovery()); err != nil {
logger.Fatalw("Version check failed", zap.Error(err))
}
profilingHandler := profiling.NewHandler(logger, false)
// Watch the logging config map and dynamically update logging levels.
configMapWatcher := configmap.NewInformedWatcher(kubeClient, system.Namespace())
// Watch the observability config map and dynamically update request logs.
configMapWatcher.Watch(logging.ConfigMapName(), logging.UpdateLevelFromConfigMap(logger, atomicLevel, component))
// Watch the observability config map
configMapWatcher.Watch(metrics.ConfigMapName(),
metrics.UpdateExporterFromConfigMap(component, logger),
profilingHandler.UpdateFromConfigMap)
store := defaultconfig.NewStore(logger.Named("config-store"))
store.WatchConfigs(configMapWatcher)
if err = configMapWatcher.Start(ctx.Done()); err != nil {
logger.Fatalw("Failed to start the ConfigMap watcher", zap.Error(err))
}
options := webhook.ControllerOptions{
ServiceName: "webhook",
Namespace: system.Namespace(),
Port: 8443,
SecretName: "webhook-certs",
// Leave this resource name unprefixed for compatibility with <0.9
// TODO(mattmoor): This can be changed after 0.10, once the lifecycle of
// this object is not managed by OwnerReferences.
ResourceMutatingWebhookName: "webhook.serving.knative.dev",
ResourceAdmissionControllerPath: "/",
ConfigValidationWebhookName: "config.webhook.serving.knative.dev",
ConfigValidationControllerPath: "/config-validation",
}
resourceHandlers := map[schema.GroupVersionKind]webhook.GenericCRD{
v1alpha1.SchemeGroupVersion.WithKind("Revision"): &v1alpha1.Revision{},
v1alpha1.SchemeGroupVersion.WithKind("Configuration"): &v1alpha1.Configuration{},
v1alpha1.SchemeGroupVersion.WithKind("Route"): &v1alpha1.Route{},
v1alpha1.SchemeGroupVersion.WithKind("Service"): &v1alpha1.Service{},
v1beta1.SchemeGroupVersion.WithKind("Revision"): &v1beta1.Revision{},
v1beta1.SchemeGroupVersion.WithKind("Configuration"): &v1beta1.Configuration{},
v1beta1.SchemeGroupVersion.WithKind("Route"): &v1beta1.Route{},
v1beta1.SchemeGroupVersion.WithKind("Service"): &v1beta1.Service{},
v1.SchemeGroupVersion.WithKind("Revision"): &v1.Revision{},
v1.SchemeGroupVersion.WithKind("Configuration"): &v1.Configuration{},
v1.SchemeGroupVersion.WithKind("Route"): &v1.Route{},
v1.SchemeGroupVersion.WithKind("Service"): &v1.Service{},
autoscalingv1alpha1.SchemeGroupVersion.WithKind("PodAutoscaler"): &autoscalingv1alpha1.PodAutoscaler{},
autoscalingv1alpha1.SchemeGroupVersion.WithKind("Metric"): &autoscalingv1alpha1.Metric{},
net.SchemeGroupVersion.WithKind("Certificate"): &net.Certificate{},
net.SchemeGroupVersion.WithKind("Ingress"): &net.Ingress{},
net.SchemeGroupVersion.WithKind("ServerlessService"): &net.ServerlessService{},
}
configHandlers := configmap.Constructors{
tracingconfig.ConfigName: tracingconfig.NewTracingConfigFromConfigMap,
autoscaler.ConfigName: autoscaler.NewConfigFromConfigMap,
certconfig.CertManagerConfigName: certconfig.NewCertManagerConfigFromConfigMap,
gc.ConfigName: gc.NewConfigFromConfigMapFunc(ctx),
network.ConfigName: network.NewConfigFromConfigMap,
istioconfig.IstioConfigName: istioconfig.NewIstioFromConfigMap,
deployment.ConfigName: deployment.NewConfigFromConfigMap,
metrics.ConfigMapName(): metricsconfig.NewObservabilityConfigFromConfigMap,
logging.ConfigMapName(): logging.NewConfigFromConfigMap,
domainconfig.DomainConfigName: domainconfig.NewDomainFromConfigMap,
defaultconfig.DefaultsConfigName: defaultconfig.NewDefaultsConfigFromConfigMap,
}
configValidationController := webhook.NewConfigValidationController(configHandlers, options)
resourceAdmissionController := webhook.NewResourceAdmissionController(resourceHandlers, options, true)
admissionControllers := map[string]webhook.AdmissionController{
options.ResourceAdmissionControllerPath: resourceAdmissionController,
options.ConfigValidationControllerPath: configValidationController,
}
// Decorate contexts with the current state of the config.
ctxFunc := func(ctx context.Context) context.Context {
return v1.WithUpgradeViaDefaulting(store.ToContext(ctx))
}
controller, err := webhook.New(kubeClient, options, admissionControllers, logger, ctxFunc)
if err != nil {
logger.Fatalw("Failed to create admission controller", zap.Error(err))
}
profilingServer := profiling.NewServer(profilingHandler)
eg, egCtx := errgroup.WithContext(ctx)
eg.Go(func() error {
return controller.Run(ctx.Done())
})
eg.Go(profilingServer.ListenAndServe)
// This will block until either a signal arrives or one of the grouped functions
// returns an error.
<-egCtx.Done()
profilingServer.Shutdown(context.Background())
// Don't forward ErrServerClosed as that indicates we're already shutting down.
if err := eg.Wait(); err != nil && err != http.ErrServerClosed {
logger.Errorw("Error while running server", zap.Error(err))
}
}
|
|
index.ts
|
export * from './right-arrow';
|
export * from './logo';
export * from './left-arrow';
|
|
identity_schema.rs
|
/*
* Ory Kratos API
*
* Documentation for all public and administrative Ory Kratos APIs. Public and administrative APIs are exposed on different ports. Public APIs can face the public internet without any protection while administrative APIs should never be exposed without prior authorization. To protect the administative API port you should use something like Nginx, Ory Oathkeeper, or any other technology capable of authorizing incoming requests.
*
* The version of the OpenAPI document: v0.8.0-alpha.2
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IdentitySchema {
/// The ID of the Identity JSON Schema
#[serde(rename = "id", skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
/// The actual Identity JSON Schema
#[serde(rename = "schema", skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
}
|
impl IdentitySchema {
pub fn new() -> IdentitySchema {
IdentitySchema {
id: None,
schema: None,
}
}
}
| |
GeMS_DMUtoDocx_Arc10.py
|
"""
Translates DMU table in NCGMP09-style geodatabase into a fully formatted
Microsoft Word .docx file.
Assumes formatting and style names in USGS Pubs template MapManuscript_v1-0_04-11.dotx
Arguments
Input geodatabase
Output workspace
Output filename (if it doesn't end in .docx, .docx will be appended)
UseMapUnitForUnitLabl (Boolean, either 'true' or 'false')
"""
import sys, os.path, arcpy
from GeMS_utilityFunctions import *
from docxModified import *
versionString = 'GeMS_DMUtoDocx_Arc10.py, version of 2 September 2017'
addMsgAndPrint( versionString )
debug = False
debug2 = False
tab = '<tab></tab>'
emDash = u"\u2014"
startTags = []
endTags = []
tags = ['b','i','g','ul','sup','sub','tab']
for tag in tags:
startTags.append('<'+tag+'>')
endTags.append('</'+tag+'>')
def isNotBlank(thing):
if thing <> '' and thing <> None:
return True
else:
return False
def isKnownStyle(pStyle):
if pStyle == 'DMUHeadnote' or pStyle.find('DMU-Heading') > -1 or pStyle.find('DMUUnit') > -1:
return True
else:
return False
def notNullText(txt):
if txt == '#null' or txt == None or txt == '#Null' or txt == '#' or txt == '' or len(txt.split()) == 0:
return False
else:
return True
gdb = sys.argv[1]
outfl = sys.argv[3]
if outfl.lower()[-5:] <> '.docx':
outfl = outfl+'.docx'
outDMUdocx = os.path.join(sys.argv[2],outfl)
if sys.argv[4] == 'true':
useMapUnitForUnitLabl = True
else:
|
if sys.argv[5] == 'true': # LMU only
notLMU = False
else:
notLMU = True
arcpy.env.workspace = gdb
relationships = relationshiplist()
document = newdocument()
docbody = document.xpath('/w:document/w:body', namespaces=nsprefixes)[0]
if notLMU:
docbody.append(paragraph('Description of Map Units','DMU-Heading1'))
else:
docbody.append(paragraph('List of Map Units','DMU-Heading1'))
lastParaWasHeading = True
"""
DMU has many rows
Each row has content for 1 or more paragraphs. 1st paragraph has style 'row.ParagraphStyle'
2nd and subsequent paragraphs have style 'DMUParagraph'
Each paragraph is composed of one or more runs, each of which _may_ include
markup tags
We sort DMU on HierarchyKey and then step through the rows, constructing rowtext w/ markup
according to row.paragraphStyle.
We then divide the newly-built rowtext into paragraphs.
For each paragraph, we
"""
addMsgAndPrint('Getting DMU rows and creating output paragraphs')
dmuRows = arcpy.SearchCursor('DescriptionOfMapUnits',"","","",'HierarchyKey')
for row in dmuRows:
rowText = ''
if isNotBlank(row.HierarchyKey) and isKnownStyle(row.ParagraphStyle):
addMsgAndPrint(' '+ str(row.HierarchyKey)+' '+str(row.ParagraphStyle))
#if row.ParagraphStyle == 'DMUHeadnote':
# rowText = '['+row.Description+']'
if row.ParagraphStyle.find('DMU-Heading') > -1: # is a heading
rowText = row.Name
paraStyle = row.ParagraphStyle
if notNullText(row.Description): # heading has headnote. Append heading to docbody and make new row
addMsgAndPrint('Description='+row.Description)
docbody.append(paragraph(rowText,row.ParagraphStyle))
rowText = row.Description
paraStyle = 'DMUHeadnote'
elif row.ParagraphStyle.find('DMUUnit') > -1: # is a unit
if not useMapUnitForUnitLabl and notNullText(row.Label):
rowText = '<ul>'+row.Label+'</ul>'
elif useMapUnitForUnitLabl and notNullText(row.MapUnit):
rowText = '<ul>'+row.MapUnit+'</ul>'
rowText = rowText + tab
if row.ParagraphStyle[-1:] in ('4','5'):
rowText = rowText + tab # add second tab for DMUUnit4 and DMUUnit4
if isNotBlank(row.Name):
rowText = rowText + '<b>'+row.Name+'</b>'
if isNotBlank(row.Age):
rowText = rowText + '<b> ('+row.Age+')</b>'
if isNotBlank(row.Description) and notLMU:
rowText = rowText + emDash + row.Description
paraStyle = row.ParagraphStyle
else: # Unrecognized paragraph style
addMsgAndPrint('Do not recognize paragraph style '+row.ParagraphStyle)
## divide into paragraphs and build list of [paraText, paraStyle]
if debug: addMsgAndPrint(' dividing into paragraphs')
paras = []
if paraStyle == 'DMUUnit1' and lastParaWasHeading:
paraStyle = 'DMUUnit11stafterheading'
if rowText.find('<br>') > 0:
print ' got multiple paragraphs!'
while rowText.find('<br>') > 0:
paras.append([rowText.partition('<br>')[0],paraStyle])
rowText = rowText.partition('<br>')[2]
paraStyle = 'DMUParagraph'
paras.append([rowText,paraStyle])
if paraStyle.find('Head') > -1:
lastParaWasHeading = True
else:
lastParaWasHeading = False
if debug: addMsgAndPrint(' finding formatting')
# for each paragraph:
for pgraph in paras:
para = pgraph[0]; paraStyle = pgraph[1]
runs = []
while len(para) > 0:
## Look for initial unformatted text chunk
firstPos = len(para)
for tag in startTags:
pos = para.find(tag)
if pos > -1 and pos < firstPos:
firstPos = pos
if firstPos > 0:
runs.append([[],para[0:firstPos]])
newPara = para[firstPos:]
para = newPara
elif firstPos == len(para):
runs.append([[],para])
para = ''
## or may be no initial unformatted chunk (firstpos = 0), in which case do nothing
## Then pull succeeding chunks
runTags = []
isTag = True
# trim starting tags (and append them to runTags)
while len(para) > 0 and isTag == True:
isTag = False
for tag in startTags:
if para.find(tag) == 0:
runTags.append(tag.replace('<','').replace('>',''))
newPara = para[len(tag):]
para = newPara
isTag = True
# find first endTag
endPos = len(para)
for tag in endTags:
tagPos = para.find(tag)
if tagPos > -1 and tagPos < endPos:
endPos = tagPos
runs.append([runTags,para[0:endPos]])
newPara = para[endPos:]
para = newPara
# strip end tags
isTag = True
while len(para) > 0 and isTag:
isTag = False
for tag in endTags:
if para.find(tag) == 0:
isTag = True
newPara = para[len(tag):]
para = newPara
pText = []
for run in runs:
#if debug: addMsgAndPrint(str(run))
text = run[1]
if text <> '':
tags = ''
if 'b' in run[0]:
tags = tags+'b'
if 'i' in run[0]:
tags = tags+'i'
if 'g' in run[0]:
tags = tags+'g'
if 'ul' in run[0]:
tags = tags+'l'
if 'sup' in run[0]:
tags = tags+'p'
if 'sub' in run[0]:
tags = tags+'d'
pText.append([text,tags])
elif 'tab' in run[0]:
# if this run is a tab, ignore any other tags and set tags to 'tab'
tags = 'tab'
pText.append(['',tags])
docbody.append(paragraph(pText,paraStyle))
addMsgAndPrint(' finished appending paragraphs')
if sys.argv[4] == 3:
print Null
pass
addMsgAndPrint('Setting core properties')
coreprops = coreproperties(title='DMU for '+gdb,subject='',creator=versionString,keywords=['python','NCGMP09','Word'])
appprops = appproperties()
contenttypes = contenttypes()
websettings = websettings()
wordrelationships = wordrelationships(relationships)
# Save our document
addMsgAndPrint('Saving to file '+outDMUdocx)
savedocx(document,coreprops,appprops,contenttypes,websettings,wordrelationships,outDMUdocx)
|
useMapUnitForUnitLabl = False
|
remove.js
|
'use strict'
var path = require('path')
var fs = require('graceful-fs')
var rimraf = require('rimraf')
var asyncMap = require('slide').asyncMap
var mkdirp = require('gentle-fs').mkdir
var npm = require('../../npm.js')
var andIgnoreErrors = require('../and-ignore-errors.js')
var move = require('../../utils/move.js')
var isInside = require('path-is-inside')
var vacuum = require('fs-vacuum')
// This is weird because we want to remove the module but not it's node_modules folder
// allowing for this allows us to not worry about the order of operations
module.exports = function (staging, pkg, log, next) {
log.silly('remove', pkg.path)
if (pkg.target) {
removeLink(pkg, next)
} else {
removeDir(pkg, log, next)
}
}
function
|
(pkg, next) {
var base = isInside(pkg.path, npm.prefix) ? npm.prefix : pkg.path
rimraf(pkg.path, (err) => {
if (err) return next(err)
vacuum(pkg.path, {base: base}, next)
})
}
function removeDir (pkg, log, next) {
var modpath = path.join(path.dirname(pkg.path), '.' + path.basename(pkg.path) + '.MODULES')
move(path.join(pkg.path, 'node_modules'), modpath).then(unbuildPackage, unbuildPackage)
function unbuildPackage (moveEr) {
rimraf(pkg.path, moveEr ? andRemoveEmptyParents(pkg.path) : moveModulesBack)
}
function andRemoveEmptyParents (path) {
return function (er) {
if (er) return next(er)
removeEmptyParents(pkg.path)
}
}
function moveModulesBack () {
fs.readdir(modpath, makeTarget)
}
function makeTarget (readdirEr, files) {
if (readdirEr) return cleanup()
if (!files.length) return cleanup()
mkdirp(path.join(pkg.path, 'node_modules'), function (mkdirEr) { moveModules(mkdirEr, files) })
}
function moveModules (mkdirEr, files) {
if (mkdirEr) return next(mkdirEr)
asyncMap(files, function (file, done) {
var from = path.join(modpath, file)
var to = path.join(pkg.path, 'node_modules', file)
// we ignore errors here, because they can legitimately happen, for instance,
// bundled modules will be in both node_modules folders
move(from, to).then(andIgnoreErrors(done), andIgnoreErrors(done))
}, cleanup)
}
function cleanup () {
rimraf(modpath, afterCleanup)
}
function afterCleanup (rimrafEr) {
if (rimrafEr) log.warn('remove', rimrafEr)
removeEmptyParents(path.resolve(pkg.path, '..'))
}
function removeEmptyParents (pkgdir) {
fs.rmdir(pkgdir, function (er) {
// FIXME: Make sure windows does what we want here
if (er && er.code !== 'ENOENT') return next()
removeEmptyParents(path.resolve(pkgdir, '..'))
})
}
}
|
removeLink
|
authService.js
|
import Cookies from 'universal-cookie'
const cookies = new Cookies()
import { CookieDomain } from '../config.js'
let cookieConfig = {}
if(CookieDomain !== ''){
cookieConfig = { domain: CookieDomain } //path:'/',maxAge:365*24*60*60
}
export function saveCookie(name,value) {
cookies.set(name, value, cookieConfig)
//window.localStorage.setItem(name,value)
}
export function getCookie(name) {
return cookies.get(name)
//return window.localStorage.getItem(name)
}
export function
|
(name) {
cookies.remove(name, cookieConfig)
//window.localStorage.removeItem(name)
}
export function signOut() {
cookies.remove('token', cookieConfig)
//window.localStorage.removeItem('token')
}
export function isLogin() {
return !!cookies.get('token')
//return !!window.localStorage.getItem('token')
}
|
removeCookie
|
image_decoding_measurement.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class ImageDecodingMeasurementPage(page_module.Page):
|
class ImageDecodingMeasurementPageSet(page_set_module.PageSet):
""" A directed benchmark of image decoding performance """
def __init__(self):
super(ImageDecodingMeasurementPageSet, self).__init__()
self.image_decoding_measurement_limit_results_to_min_iterations = True
urls_list = [
'file://../../../chrome/test/data/image_decoding/image_decoding.html?gif',
'file://../../../chrome/test/data/image_decoding/image_decoding.html?jpg',
'file://../../../chrome/test/data/image_decoding/image_decoding.html?png',
'file://../../../chrome/test/data/image_decoding/image_decoding.html?webp'
]
for url in urls_list:
self.AddPage(ImageDecodingMeasurementPage(url, self))
|
def __init__(self, url, page_set):
super(ImageDecodingMeasurementPage, self).__init__(url=url,
page_set=page_set)
self.image_decoding_measurement_limit_results_to_min_iterations = True
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.ExecuteJavaScript('runBenchmark();')
action_runner.WaitForJavaScriptCondition('isDone')
|
push_test.go
|
package e2e
import (
"fmt"
"strings"
"testing"
"github.com/minio/minio-go/v6"
)
const (
// copied from main
defaultChartsContentType = "application/gzip"
)
func TestPush(t *testing.T) {
t.Log("Test basic push action")
name := "test-push"
dir := "charts"
setupRepo(t, name, dir)
defer teardownRepo(t, name)
key := dir + "/foo-1.2.3.tgz"
// set a cleanup in beforehand
defer removeObject(t, name, key)
cmd, stdout, stderr := command(fmt.Sprintf("helm s3 push testdata/foo-1.2.3.tgz %s", name))
if err := cmd.Run(); err != nil {
t.Errorf("Unexpected error: %v", err)
}
if stdout.String() != "" {
t.Errorf("Expected stdout to be empty, but got %q", stdout.String())
}
if stderr.String() != "" {
t.Errorf("Expected stderr to be empty, but got %q", stderr.String())
}
// Check that chart was actually pushed
obj, err := mc.StatObject(name, key, minio.StatObjectOptions{})
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if obj.Key != key {
t.Errorf("Expected key to be %q but got %q", key, obj.Key)
}
}
func TestPushWithContentTypeDefault(t *testing.T) {
contentType := defaultChartsContentType
t.Logf("Test basic push action with default Content-Type '%s'", contentType)
name := "test-push"
dir := "charts"
setupRepo(t, name, dir)
defer teardownRepo(t, name)
key := dir + "/foo-1.2.3.tgz"
// set a cleanup in beforehand
defer removeObject(t, name, key)
cmd, stdout, stderr := command(fmt.Sprintf("helm s3 push testdata/foo-1.2.3.tgz %s", name))
if err := cmd.Run(); err != nil {
t.Errorf("Unexpected error: %v", err)
}
if stdout.String() != "" {
t.Errorf("Expected stdout to be empty, but got %q", stdout.String())
}
if stderr.String() != "" {
t.Errorf("Expected stderr to be empty, but got %q", stderr.String())
}
assertContentType(t, contentType, name, key)
}
func TestPushWithContentTypeCustom(t *testing.T) {
contentType := fmt.Sprintf("%s-test", defaultChartsContentType)
t.Logf("Test basic push action with --content-type='%s'", contentType)
name := "test-push"
dir := "charts"
setupRepo(t, name, dir)
defer teardownRepo(t, name)
key := dir + "/foo-1.2.3.tgz"
// set a cleanup in beforehand
defer removeObject(t, name, key)
cmd, stdout, stderr := command(fmt.Sprintf("helm s3 push --content-type=%s testdata/foo-1.2.3.tgz %s", contentType, name))
if err := cmd.Run(); err != nil {
t.Errorf("Unexpected error: %v", err)
}
if stdout.String() != "" {
t.Errorf("Expected stdout to be empty, but got %q", stdout.String())
}
if stderr.String() != "" {
t.Errorf("Expected stderr to be empty, but got %q", stderr.String())
}
assertContentType(t, contentType, name, key)
}
func TestPushDryRun(t *testing.T) {
t.Log("Test push action with --dry-run flag")
name := "test-push-dry-run"
dir := "charts"
setupRepo(t, name, dir)
defer teardownRepo(t, name)
cmd, stdout, stderr := command(fmt.Sprintf("helm s3 push testdata/foo-1.2.3.tgz %s --dry-run", name))
if err := cmd.Run(); err != nil {
t.Errorf("Unexpected error: %v", err)
}
if stdout.String() != "" {
t.Errorf("Expected stdout to be empty, but got %q", stdout.String())
}
if stderr.String() != "" {
t.Errorf("Expected stderr to be empty, but got %q", stderr.String())
}
// Check that actually nothing got pushed
_, err := mc.StatObject(name, dir+"/foo-1.2.3.tgz", minio.StatObjectOptions{})
if minio.ToErrorResponse(err).Code != "NoSuchKey" {
t.Fatalf("Expected chart not to be pushed")
}
}
func TestPushIgnoreIfExists(t *testing.T) {
t.Log("Test push action with --ignore-if-exists flag")
name := "test-push-ignore-if-exists"
dir := "charts"
setupRepo(t, name, dir)
defer teardownRepo(t, name)
key := dir + "/foo-1.2.3.tgz"
// set a cleanup in beforehand
defer removeObject(t, name, key)
// first, push a chart
cmd, stdout, stderr := command(fmt.Sprintf("helm s3 push testdata/foo-1.2.3.tgz %s", name))
if err := cmd.Run(); err != nil {
t.Errorf("Unexpected error: %v", err)
}
if stdout.String() != "" {
t.Errorf("Expected stdout to be empty, but got %q", stdout.String())
}
if stderr.String() != "" {
t.Errorf("Expected stderr to be empty, but got %q", stderr.String())
}
// check that chart was actually pushed and remember last modification time
obj, err := mc.StatObject(name, key, minio.StatObjectOptions{})
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if obj.Key != key {
t.Errorf("Expected key to be %q but got %q", key, obj.Key)
}
lastModified := obj.LastModified
// push a chart again with --ignore-if-exists
cmd, stdout, stderr = command(fmt.Sprintf("helm s3 push testdata/foo-1.2.3.tgz %s --ignore-if-exists", name))
if err := cmd.Run(); err != nil {
t.Errorf("Unexpected error: %v", err)
}
if stdout.String() != "" {
t.Errorf("Expected stdout to be empty, but got %q", stdout.String())
}
if stderr.String() != "" {
t.Errorf("Expected stderr to be empty, but got %q", stderr.String())
}
// sanity check that chart was not overwritten
obj, err = mc.StatObject(name, key, minio.StatObjectOptions{})
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if !obj.LastModified.Equal(lastModified) {
t.Errorf("Expected chart not to be modified")
}
}
func TestPushForceAndIgnoreIfExists(t *testing.T)
|
func assertContentType(t *testing.T, contentType, name, key string) {
t.Helper()
obj, err := mc.StatObject(name, key, minio.StatObjectOptions{})
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if obj.Key != key {
t.Errorf("Expected key to be %q but got %q", key, obj.Key)
}
if obj.ContentType != contentType {
t.Errorf("Expected ContentType to be %q but got %q", contentType, obj.ContentType)
}
}
func removeObject(t *testing.T, name, key string) {
t.Helper()
if err := mc.RemoveObject(name, key); err != nil {
t.Errorf("Unexpected error: %v", err)
}
}
|
{
t.Log("Test push action with both --force and --ignore-if-exists flags")
name := "test-push-force-and-ignore-if-exists"
dir := "charts"
setupRepo(t, name, dir)
defer teardownRepo(t, name)
cmd, stdout, stderr := command(fmt.Sprintf("helm s3 push testdata/foo-1.2.3.tgz %s --force --ignore-if-exists", name))
if err := cmd.Run(); err == nil {
t.Errorf("Expected error")
}
if stdout.String() != "" {
t.Errorf("Expected stdout to be empty, but got %q", stdout.String())
}
expectedErrorMessage := "The --force and --ignore-if-exists flags are mutually exclusive and cannot be specified together."
if !strings.HasPrefix(stderr.String(), expectedErrorMessage) {
t.Errorf("Expected stderr to begin with %q, but got %q", expectedErrorMessage, stderr.String())
}
}
|
model_train_eval.py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple speech recognition to spot a limited number of keywords.
It is based on tensorflow/examples/speech_commands
This is a self-contained example script that will train a very basic audio
recognition model in TensorFlow. It downloads the necessary training data and
runs with reasonable defaults to train within a few hours even only using a CPU.
It is intended as an introduction to using neural networks for audio
recognition, and is not a full speech recognition system. This network uses a
keyword detection style to spot discrete words from a small vocabulary,
consisting of
"yes", "no", "up", "down", "left", "right", "on", "off", "stop", and "go".
To run the training process, use:
bazel run model_train_eval.py
This will write out checkpoints to /tmp/speech_commands_train/, and will
download over 1GB of open source training data, so you'll need enough free space
and a good internet connection. The default data is a collection of thousands of
one-second .wav files, each containing one spoken word. This data set is
collected from https://aiyprojects.withgoogle.com/open_speech_recording, please
consider contributing to help improve this and other models!
As training progresses, it will print out its accuracy metrics, which should
rise above 90% by the end. Once it's complete, it will produce
Keras, SavedModel, TFLite and graphdef representations.
If you want to train on your own data, you'll need to create .wavs with your
recordings, all at a consistent length, and then arrange them into subfolders
organized by label. For example, here's a possible file structure:
data >
up >
audio_0.wav
audio_1.wav
down >
audio_2.wav
audio_3.wav
other>
audio_4.wav
audio_5.wav
You'll also need to tell the script what labels to look for, using the
`--wanted_words` argument. In this case, 'up,down' might be what you want, and
the audio in the 'other' folder would be used to train an 'unknown' category.
To pull this all together, you'd run:
bazel run tensorflow/examples/speech_commands:train --
--data_dir /data --wanted_words up,down
Above script will automatically split data into training/validation and testing.
If you prefer to split the data on your own, then you should set flag
"--split_data 0" and prepare folders with structure:
data >
training >
up >
audio_0.wav
audio_1.wav
down >
audio_2.wav
audio_3.wav
validation >
up >
audio_6.wav
audio_7.wav
down >
audio_8.wav
audio_9.wav
testing >
up >
audio_12.wav
audio_13.wav
down >
audio_14.wav
audio_15.wav
_background_noise_ >
audio_18.wav
To pull this all together, you'd run:
bazel run tensorflow/examples/speech_commands:train --
--data_dir /data --wanted_words up,down --split_data 0
"""
import json
import os
import sys
from absl import logging
import tensorflow.compat.v1 as tf
from kws_streaming.layers import modes
import kws_streaming.models.att_mh_rnn as att_mh_rnn
import kws_streaming.models.att_rnn as att_rnn
import kws_streaming.models.cnn as cnn
import kws_streaming.models.crnn as crnn
import kws_streaming.models.dnn as dnn
import kws_streaming.models.dnn_raw as dnn_raw
import kws_streaming.models.ds_cnn as ds_cnn
import kws_streaming.models.ds_tc_resnet as ds_tc_resnet
import kws_streaming.models.gru as gru
import kws_streaming.models.inception as inception
import kws_streaming.models.inception_resnet as inception_resnet
import kws_streaming.models.lstm as lstm
import kws_streaming.models.mobilenet as mobilenet
import kws_streaming.models.mobilenet_v2 as mobilenet_v2
import kws_streaming.models.svdf as svdf
import kws_streaming.models.svdf_resnet as svdf_resnet
import kws_streaming.models.tc_resnet as tc_resnet
from kws_streaming.models.utils import parse
import kws_streaming.models.xception as xception
from kws_streaming.train import base_parser
from kws_streaming.train import model_flags
from kws_streaming.train import train
import kws_streaming.train.test as test
FLAGS = None
def main(_):
# Update flags
|
if __name__ == '__main__':
# parser for training/testing data and speach feature flags
parser = base_parser.base_parser()
# sub parser for model settings
subparsers = parser.add_subparsers(dest='model_name', help='NN model name')
# DNN model settings
parser_dnn = subparsers.add_parser('dnn')
dnn.model_parameters(parser_dnn)
# DNN raw model settings
parser_dnn_raw = subparsers.add_parser('dnn_raw')
dnn_raw.model_parameters(parser_dnn_raw)
# LSTM model settings
parser_lstm = subparsers.add_parser('lstm')
lstm.model_parameters(parser_lstm)
# GRU model settings
parser_gru = subparsers.add_parser('gru')
gru.model_parameters(parser_gru)
# SVDF model settings
parser_svdf = subparsers.add_parser('svdf')
svdf.model_parameters(parser_svdf)
# CNN model settings
parser_cnn = subparsers.add_parser('cnn')
cnn.model_parameters(parser_cnn)
# CRNN model settings
parser_crnn = subparsers.add_parser('crnn')
crnn.model_parameters(parser_crnn)
# ATT MH RNN model settings
parser_att_mh_rnn = subparsers.add_parser('att_mh_rnn')
att_mh_rnn.model_parameters(parser_att_mh_rnn)
# ATT RNN model settings
parser_att_rnn = subparsers.add_parser('att_rnn')
att_rnn.model_parameters(parser_att_rnn)
# DS_CNN model settings
parser_ds_cnn = subparsers.add_parser('ds_cnn')
ds_cnn.model_parameters(parser_ds_cnn)
# TC Resnet model settings
parser_tc_resnet = subparsers.add_parser('tc_resnet')
tc_resnet.model_parameters(parser_tc_resnet)
# Mobilenet model settings
parser_mobilenet = subparsers.add_parser('mobilenet')
mobilenet.model_parameters(parser_mobilenet)
# Mobilenet V2 model settings
parser_mobilenet_v2 = subparsers.add_parser('mobilenet_v2')
mobilenet_v2.model_parameters(parser_mobilenet_v2)
# xception model settings
parser_xception = subparsers.add_parser('xception')
xception.model_parameters(parser_xception)
# inception model settings
parser_inception = subparsers.add_parser('inception')
inception.model_parameters(parser_inception)
# inception resnet model settings
parser_inception_resnet = subparsers.add_parser('inception_resnet')
inception_resnet.model_parameters(parser_inception_resnet)
# svdf resnet model settings
parser_svdf_resnet = subparsers.add_parser('svdf_resnet')
svdf_resnet.model_parameters(parser_svdf_resnet)
# ds_tc_resnet model settings
parser_ds_tc_resnet = subparsers.add_parser('ds_tc_resnet')
ds_tc_resnet.model_parameters(parser_ds_tc_resnet)
FLAGS, unparsed = parser.parse_known_args()
if unparsed and tuple(unparsed) != ('--alsologtostderr',):
raise ValueError('Unknown argument: {}'.format(unparsed))
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
flags = model_flags.update_flags(FLAGS)
if flags.train:
# Create model folders where logs and model will be stored
os.makedirs(flags.train_dir)
os.mkdir(flags.summaries_dir)
# Model training
train.train(flags)
else:
if not os.path.isdir(flags.train_dir):
raise ValueError('model is not trained set "--train 1" and retrain it')
# write all flags settings into json
with open(os.path.join(flags.train_dir, 'flags.json'), 'wt') as f:
json.dump(flags.__dict__, f)
# convert to SavedModel
test.convert_model_saved(flags, 'non_stream',
modes.Modes.NON_STREAM_INFERENCE)
try:
test.convert_model_saved(flags, 'stream_state_internal',
modes.Modes.STREAM_INTERNAL_STATE_INFERENCE)
except (ValueError, IndexError) as e:
logging.info('FAILED to run TF streaming: %s', e)
logging.info('run TF non streaming model accuracy evaluation')
# with TF
folder_name = 'tf'
test.tf_non_stream_model_accuracy(flags, folder_name)
# with TF.
# We can apply non stream model on stream data, by running inference
# every 200ms (for example), so that total latency will be similar with
# streaming model which is executed every 20ms.
# To measure the impact of sampling on model accuracy,
# we introduce time_shift_ms during accuracy evaluation.
# Convert milliseconds to samples:
time_shift_samples = int(
(flags.time_shift_ms * flags.sample_rate) / model_flags.MS_PER_SECOND)
test.tf_non_stream_model_accuracy(
flags,
folder_name,
time_shift_samples,
accuracy_name='tf_non_stream_model_sampling_stream_accuracy.txt')
name2opt = {
'': None,
'quantize_opt_for_size_': [tf.lite.Optimize.OPTIMIZE_FOR_SIZE],
}
for opt_name, optimizations in name2opt.items():
if (opt_name and flags.feature_type == 'mfcc_tf' and
flags.preprocess == 'raw'):
logging.info('feature type mfcc_tf needs quantization aware training '
'for quantization - it is not implemented')
continue
folder_name = opt_name + 'tflite_non_stream'
file_name = 'non_stream.tflite'
mode = modes.Modes.NON_STREAM_INFERENCE
test.convert_model_tflite(flags, folder_name, mode, file_name,
optimizations=optimizations)
test.tflite_non_stream_model_accuracy(flags, folder_name, file_name)
# these models are using bi-rnn, so they are non streamable by default
# also models using striding or pooling are not supported for streaming now
non_streamable_models = {'att_mh_rnn', 'att_rnn', 'tc_resnet'}
model_is_streamable = True
if flags.model_name in non_streamable_models:
model_is_streamable = False
# below models can use striding in time dimension,
# but this is currently unsupported
elif flags.model_name == 'cnn':
for strides in parse(flags.cnn_strides):
if strides[0] > 1:
model_is_streamable = False
break
elif flags.model_name == 'ds_cnn':
if parse(flags.cnn1_strides)[0] > 1:
model_is_streamable = False
for strides in parse(flags.dw2_strides):
if strides[0] > 1:
model_is_streamable = False
break
# if model can be streamed, then run conversion/evaluation in streaming mode
if model_is_streamable:
# ---------------- TF streaming model accuracy evaluation ----------------
# Streaming model with external state evaluation using TF with state reset
if not opt_name:
logging.info('run TF evalution only without optimization/quantization')
try:
folder_name = 'tf'
test.tf_stream_state_external_model_accuracy(
flags,
folder_name,
accuracy_name='stream_state_external_model_accuracy_sub_set_reset1.txt',
reset_state=True) # with state reset between test sequences
# Streaming (with external state) evaluation using TF no state reset
test.tf_stream_state_external_model_accuracy(
flags,
folder_name,
accuracy_name='stream_state_external_model_accuracy_sub_set_reset0.txt',
reset_state=False) # without state reset
# Streaming (with internal state) evaluation using TF no state reset
test.tf_stream_state_internal_model_accuracy(flags, folder_name)
except (ValueError, IndexError) as e:
logging.info('FAILED to run TF streaming: %s', e)
logging.info('run TFlite streaming model accuracy evaluation')
try:
# convert model to TFlite
folder_name = opt_name + 'tflite_stream_state_external'
file_name = 'stream_state_external.tflite'
mode = modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE
test.convert_model_tflite(flags, folder_name, mode, file_name,
optimizations=optimizations)
# Streaming model accuracy evaluation with TFLite with state reset
test.tflite_stream_state_external_model_accuracy(
flags,
folder_name,
file_name,
accuracy_name='tflite_stream_state_external_model_accuracy_reset1.txt',
reset_state=True)
# Streaming model accuracy evaluation with TFLite without state reset
test.tflite_stream_state_external_model_accuracy(
flags,
folder_name,
file_name,
accuracy_name='tflite_stream_state_external_model_accuracy_reset0.txt',
reset_state=False)
except (ValueError, IndexError) as e:
logging.info('FAILED to run TFLite streaming: %s', e)
|
yaml.go
|
package yaml
import (
"github.com/danieltaub96/git-faker/object"
util "github.com/danieltaub96/git-faker/util/file"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v3"
"io/ioutil"
"sync"
)
func ReadDataFile(name string) (*object.DataFile, error) {
dataFile := &object.DataFile{}
data, err := ioutil.ReadFile(name)
if err != nil {
log.Errorf("Error while reading data file, maybe not exists")
return nil, err
}
if err := yaml.Unmarshal(data, &dataFile); err != nil {
log.Errorf("Error while parsing data file")
return nil, err
}
log.Infof("Data loaded from file seccufully")
return dataFile, nil
}
func ConfigDataFile(dataFilePath string) *object.DataFile {
var dataFile = &object.DataFile{}
var fileErr error
if util.IsFileExists(dataFilePath) {
log.Infof("File %s exists, starting reading...\n", util.AbsPath(dataFilePath))
dataFile, fileErr = ReadDataFile(dataFilePath)
if fileErr != nil {
log.Infoln("Loading default data to git-faker")
dataFile.LoadDefaults()
} else {
if len(dataFile.Messages) == 0 {
log.Infoln("Loading default messages to git-faker")
dataFile.SetDefaultMessages()
}
if len(dataFile.Emails) == 0 {
log.Infoln("Loading default emails to git-faker")
dataFile.SetDefaultEmails()
}
if len(dataFile.Names) == 0 {
log.Infoln("Loading default names to git-faker")
dataFile.SetDefaultNames()
}
}
} else {
log.Infoln("Loading default data to git-faker")
dataFile.LoadDefaults()
}
log.Infof("git-faker data is: %s\n", dataFile)
return dataFile
}
var (
dataFileOnce sync.Once
dataFile *object.DataFile
)
func InitDataFile(dataFilePath string)
|
func GetDataFile() *object.DataFile {
return dataFile
}
|
{
dataFileOnce.Do(func() {
log.Infoln("Starting load data for git-faker")
dataFile = ConfigDataFile(dataFilePath)
})
}
|
test_contextutil.py
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import pstats
import shutil
import signal
import sys
import unittest
import uuid
import zipfile
from builtins import next, object, range, str
from contextlib import contextmanager
import mock
from pants.util.contextutil import (HardSystemExit, InvalidZipPath, Timer, environment_as,
exception_logging, hard_exit_handler, hermetic_environment_as,
maybe_profiled, open_zip, pushd, signal_handler_as, stdio_as,
temporary_dir, temporary_file)
from pants.util.process_handler import subprocess
PATCH_OPTS = dict(autospec=True, spec_set=True)
class ContextutilTest(unittest.TestCase):
def test_empty_environment(self):
with environment_as():
pass
def test_override_single_variable(self):
with temporary_file() as output:
# test that the override takes place
with environment_as(HORK='BORK'):
subprocess.Popen([sys.executable, '-c', 'import os; print(os.environ["HORK"])'],
stdout=output).wait()
output.seek(0)
self.assertEquals('BORK\n', output.read())
# test that the variable is cleared
with temporary_file() as new_output:
subprocess.Popen([sys.executable, '-c', 'import os; print("HORK" in os.environ)'],
stdout=new_output).wait()
new_output.seek(0)
self.assertEquals('False\n', new_output.read())
def test_environment_negation(self):
with temporary_file() as output:
with environment_as(HORK='BORK'):
with environment_as(HORK=None):
# test that the variable is cleared
subprocess.Popen([sys.executable, '-c', 'import os; print("HORK" in os.environ)'],
stdout=output).wait()
output.seek(0)
self.assertEquals('False\n', output.read())
def test_hermetic_environment(self):
self.assertIn('USER', os.environ)
with hermetic_environment_as(**{}):
self.assertNotIn('USER', os.environ)
def test_hermetic_environment_subprocesses(self):
self.assertIn('USER', os.environ)
with hermetic_environment_as(**dict(AAA='333')):
output = subprocess.check_output('env', shell=True)
self.assertNotIn('USER=', output)
self.assertIn('AAA', os.environ)
self.assertEquals(os.environ['AAA'], '333')
self.assertIn('USER', os.environ)
self.assertNotIn('AAA', os.environ)
def test_hermetic_environment_unicode(self):
UNICODE_CHAR = '¡'
ENCODED_CHAR = UNICODE_CHAR.encode('utf-8')
with environment_as(**dict(XXX=UNICODE_CHAR)):
self.assertEquals(os.environ['XXX'], ENCODED_CHAR)
with hermetic_environment_as(**dict(AAA=UNICODE_CHAR)):
self.assertIn('AAA', os.environ)
self.assertEquals(os.environ['AAA'], ENCODED_CHAR)
self.assertEquals(os.environ['XXX'], ENCODED_CHAR)
def test_simple_pushd(self):
pre_cwd = os.getcwd()
with temporary_dir() as tempdir:
with pushd(tempdir) as path:
self.assertEquals(tempdir, path)
self.assertEquals(os.path.realpath(tempdir), os.getcwd())
self.assertEquals(pre_cwd, os.getcwd())
self.assertEquals(pre_cwd, os.getcwd())
def test_nested_pushd(self):
pre_cwd = os.getcwd()
with temporary_dir() as tempdir1:
with pushd(tempdir1):
self.assertEquals(os.path.realpath(tempdir1), os.getcwd())
with temporary_dir(root_dir=tempdir1) as tempdir2:
with pushd(tempdir2):
self.assertEquals(os.path.realpath(tempdir2), os.getcwd())
self.assertEquals(os.path.realpath(tempdir1), os.getcwd())
self.assertEquals(os.path.realpath(tempdir1), os.getcwd())
self.assertEquals(pre_cwd, os.getcwd())
self.assertEquals(pre_cwd, os.getcwd())
def test_temporary_file_no_args(self):
with temporary_file() as fp:
self.assertTrue(os.path.exists(fp.name), 'Temporary file should exist within the context.')
self.assertTrue(os.path.exists(fp.name) == False,
'Temporary file should not exist outside of the context.')
def test_temporary_file_without_cleanup(self):
with temporary_file(cleanup=False) as fp:
self.assertTrue(os.path.exists(fp.name), 'Temporary file should exist within the context.')
self.assertTrue(os.path.exists(fp.name),
'Temporary file should exist outside of context if cleanup=False.')
os.unlink(fp.name)
def t
|
self):
with temporary_dir() as path:
with temporary_file(root_dir=path) as f:
self.assertTrue(os.path.realpath(f.name).startswith(os.path.realpath(path)),
'file should be created in root_dir if specified.')
def test_temporary_dir_no_args(self):
with temporary_dir() as path:
self.assertTrue(os.path.exists(path), 'Temporary dir should exist within the context.')
self.assertTrue(os.path.isdir(path), 'Temporary dir should be a dir and not a file.')
self.assertFalse(os.path.exists(path), 'Temporary dir should not exist outside of the context.')
def test_temporary_dir_without_cleanup(self):
with temporary_dir(cleanup=False) as path:
self.assertTrue(os.path.exists(path), 'Temporary dir should exist within the context.')
self.assertTrue(os.path.exists(path),
'Temporary dir should exist outside of context if cleanup=False.')
shutil.rmtree(path)
def test_temporary_dir_with_root_dir(self):
with temporary_dir() as path1:
with temporary_dir(root_dir=path1) as path2:
self.assertTrue(os.path.realpath(path2).startswith(os.path.realpath(path1)),
'Nested temporary dir should be created within outer dir.')
def test_timer(self):
class FakeClock(object):
def __init__(self):
self._time = 0.0
def time(self):
ret = self._time
self._time += 0.0001 # Force a little time to elapse.
return ret
def sleep(self, duration):
self._time += duration
clock = FakeClock()
# Note: to test with the real system clock, use this instead:
# import time
# clock = time
with Timer(clock=clock) as t:
self.assertLess(t.start, clock.time())
self.assertGreater(t.elapsed, 0)
clock.sleep(0.1)
self.assertGreater(t.elapsed, 0.1)
clock.sleep(0.1)
self.assertTrue(t.finish is None)
self.assertGreater(t.elapsed, 0.2)
self.assertLess(t.finish, clock.time())
def test_open_zipDefault(self):
with temporary_dir() as tempdir:
with open_zip(os.path.join(tempdir, 'test'), 'w') as zf:
self.assertTrue(zf._allowZip64)
def test_open_zipTrue(self):
with temporary_dir() as tempdir:
with open_zip(os.path.join(tempdir, 'test'), 'w', allowZip64=True) as zf:
self.assertTrue(zf._allowZip64)
def test_open_zipFalse(self):
with temporary_dir() as tempdir:
with open_zip(os.path.join(tempdir, 'test'), 'w', allowZip64=False) as zf:
self.assertFalse(zf._allowZip64)
def test_open_zip_raises_exception_on_falsey_paths(self):
falsey = (None, '', False)
for invalid in falsey:
with self.assertRaises(InvalidZipPath):
next(open_zip(invalid).gen)
def test_open_zip_returns_realpath_on_badzipfile(self):
# In case of file corruption, deleting a Pants-constructed symlink would not resolve the error.
with temporary_file() as not_zip:
with temporary_dir() as tempdir:
file_symlink = os.path.join(tempdir, 'foo')
os.symlink(not_zip.name, file_symlink)
self.assertEquals(os.path.realpath(file_symlink), os.path.realpath(not_zip.name))
with self.assertRaisesRegexp(zipfile.BadZipfile, r'{}'.format(not_zip.name)):
next(open_zip(file_symlink).gen)
@contextmanager
def _stdio_as_tempfiles(self):
"""Harness to replace `sys.std*` with tempfiles.
Validates that all files are read/written/flushed correctly, and acts as a
contextmanager to allow for recursive tests.
"""
# Prefix contents written within this instance with a unique string to differentiate
# them from other instances.
uuid_str = str(uuid.uuid4())
def u(string):
return '{}#{}'.format(uuid_str, string)
stdin_data = u('stdio')
stdout_data = u('stdout')
stderr_data = u('stderr')
with temporary_file() as tmp_stdin,\
temporary_file() as tmp_stdout,\
temporary_file() as tmp_stderr:
print(stdin_data, file=tmp_stdin)
tmp_stdin.seek(0)
# Read prepared content from stdin, and write content to stdout/stderr.
with stdio_as(stdout_fd=tmp_stdout.fileno(),
stderr_fd=tmp_stderr.fileno(),
stdin_fd=tmp_stdin.fileno()):
self.assertEquals(sys.stdin.fileno(), 0)
self.assertEquals(sys.stdout.fileno(), 1)
self.assertEquals(sys.stderr.fileno(), 2)
self.assertEquals(stdin_data, sys.stdin.read().strip())
print(stdout_data, file=sys.stdout)
yield
print(stderr_data, file=sys.stderr)
tmp_stdout.seek(0)
tmp_stderr.seek(0)
self.assertEquals(stdout_data, tmp_stdout.read().strip())
self.assertEquals(stderr_data, tmp_stderr.read().strip())
def test_stdio_as(self):
self.assertTrue(sys.stderr.fileno() > 2,
"Expected a pseudofile as stderr, got: {}".format(sys.stderr))
old_stdout, old_stderr, old_stdin = sys.stdout, sys.stderr, sys.stdin
# The first level tests that when `sys.std*` are file-likes (in particular, the ones set up in
# pytest's harness) rather than actual files, we stash and restore them properly.
with self._stdio_as_tempfiles():
# The second level stashes the first level's actual file objects and then re-opens them.
with self._stdio_as_tempfiles():
pass
# Validate that after the second level completes, the first level still sees valid
# fds on `sys.std*`.
self.assertEquals(sys.stdin.fileno(), 0)
self.assertEquals(sys.stdout.fileno(), 1)
self.assertEquals(sys.stderr.fileno(), 2)
self.assertEquals(sys.stdout, old_stdout)
self.assertEquals(sys.stderr, old_stderr)
self.assertEquals(sys.stdin, old_stdin)
def test_stdio_as_dev_null(self):
# Capture output to tempfiles.
with self._stdio_as_tempfiles():
# Read/write from/to `/dev/null`, which will be validated by the harness as not
# affecting the tempfiles.
with stdio_as(stdout_fd=-1, stderr_fd=-1, stdin_fd=-1):
self.assertEquals(b'', sys.stdin.read())
print('garbage', file=sys.stdout)
print('garbage', file=sys.stderr)
def test_signal_handler_as(self):
mock_initial_handler = 1
mock_new_handler = 2
with mock.patch('signal.signal', **PATCH_OPTS) as mock_signal:
mock_signal.return_value = mock_initial_handler
try:
with signal_handler_as(signal.SIGUSR2, mock_new_handler):
raise NotImplementedError('blah')
except NotImplementedError:
pass
self.assertEquals(mock_signal.call_count, 2)
mock_signal.assert_has_calls([
mock.call(signal.SIGUSR2, mock_new_handler),
mock.call(signal.SIGUSR2, mock_initial_handler)
])
def test_permissions(self):
with temporary_file(permissions=0o700) as f:
self.assertEquals(0o700, os.stat(f.name)[0] & 0o777)
with temporary_dir(permissions=0o644) as path:
self.assertEquals(0o644, os.stat(path)[0] & 0o777)
def test_exception_logging(self):
fake_logger = mock.Mock()
with self.assertRaises(AssertionError):
with exception_logging(fake_logger, 'error!'):
assert True is False
fake_logger.exception.assert_called_once_with('error!')
def test_maybe_profiled(self):
with temporary_dir() as td:
profile_path = os.path.join(td, 'profile.prof')
with maybe_profiled(profile_path):
for _ in range(5):
print('test')
# Ensure the profile data was written.
self.assertTrue(os.path.exists(profile_path))
# Ensure the profile data is valid.
pstats.Stats(profile_path).print_stats()
def test_hard_exit_handler(self):
with mock.patch('os._exit', **PATCH_OPTS) as mock_exit:
with hard_exit_handler():
raise HardSystemExit()
mock_exit.assert_called_once_with(0)
|
est_temporary_file_within_other_dir(
|
lib.rs
|
//! Terminal model
use serde_derive::*;
use failure::Error;
use std::ops::{Deref, DerefMut, Range};
use std::str;
pub mod input;
pub use crate::input::*;
pub use termwiz::cell::{self, *};
pub use termwiz::surface::line::*;
pub mod screen;
pub use crate::screen::*;
pub mod selection;
use crate::selection::{SelectionCoordinate, SelectionRange};
use termwiz::hyperlink::Hyperlink;
pub mod terminal;
pub use crate::terminal::*;
pub mod terminalstate;
pub use crate::terminalstate::*;
/// Represents the index into screen.lines. Index 0 is the top of
/// the scrollback (if any). The index of the top of the visible screen
/// depends on the terminal dimensions and the scrollback size.
pub type PhysRowIndex = usize;
/// Represents an index into the visible portion of the screen.
/// Value 0 is the first visible row. `VisibleRowIndex` needs to be
/// resolved into a `PhysRowIndex` to obtain an actual row. It is not
/// valid to have a negative `VisibleRowIndex` value so this type logically
/// should be unsigned, however, having a different sign is helpful to
/// have the compiler catch accidental arithmetic performed between
/// `PhysRowIndex` and `VisibleRowIndex`. We could define our own type with
/// its own `Add` and `Sub` operators, but then we'd not be able to iterate
/// over `Ranges` of these types without also laboriously implementing an
/// iterator `Skip` trait that is currently only in unstable rust.
pub type VisibleRowIndex = i64;
/// Like `VisibleRowIndex` above, but can index backwards into scrollback.
/// This is deliberately a differently sized signed type to catch
/// accidentally blending together the wrong types of indices.
/// This is explicitly 32-bit rather than 64-bit as it seems unreasonable
/// to want to scroll back or select more than ~2billion lines of scrollback.
pub type ScrollbackOrVisibleRowIndex = i32;
/// Returns true if r1 intersects r2
pub fn
|
<T: Ord + Copy>(r1: Range<T>, r2: Range<T>) -> bool {
use std::cmp::{max, min};
let start = max(r1.start, r2.start);
let end = min(r1.end, r2.end);
end > start
}
/// Position allows referring to an absolute visible row number
/// or a position relative to some existing row number (typically
/// where the cursor is located). Both of the cases are represented
/// as signed numbers so that the math and error checking for out
/// of range values can be deferred to the point where we execute
/// the request.
#[derive(Debug)]
pub enum Position {
Absolute(VisibleRowIndex),
Relative(i64),
}
/// Describes the location of the cursor in the visible portion
/// of the screen.
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Deserialize, Serialize)]
pub struct CursorPosition {
pub x: usize,
pub y: VisibleRowIndex,
}
pub mod color;
#[cfg(test)]
mod test;
/// The response we given when queries for device attributes.
/// This particular string says "we are a VT102".
/// TODO: Consider VT220 extended response which can advertise
/// certain feature sets.
pub const DEVICE_IDENT: &[u8] = b"\x1b[?6c";
pub const CSI: &[u8] = b"\x1b[";
pub const OSC: &[u8] = b"\x1b]";
pub const ST: &[u8] = b"\x1b\\";
pub const DCS: &[u8] = b"\x1bP";
|
intersects_range
|
download_and_extract_archive.py
|
#!/usr/bin/env python
# Copyright (c) YugaByte, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations
# under the License.
"""
Downloads and extracts an archive with pre-built third-party dependencies.
"""
# This script should not use any non-standard modules and should run with Python 2 and Python 3.
# It could be run before the main Python interpreter we'll be using for most of our scripts is
# even installed.
import os
import sys
import re
import logging
import socket
import random
import atexit
import subprocess
import argparse
import tempfile
import time
import getpass
import platform
import fcntl
import errno
g_verbose = False
EXPECTED_ARCHIVE_EXTENSION = '.tar.gz'
CHECKSUM_EXTENSION = '.sha256'
def remove_ignore_errors(file_path):
file_path = os.path.abspath(file_path)
if os.path.isfile(file_path):
try:
os.remove(file_path)
except Exception as e:
logging.warning("Error removing %s: %s, ignoring", file_path, e)
def run_cmd(args):
if g_verbose:
logging.info("Running command: %s", args)
try:
subprocess.check_call(args)
except: # noqa
logging.error("Error trying to run command: %s", args)
raise
def validate_sha256sum(checksum_str):
|
def read_file_and_strip(file_path):
with open(file_path) as f:
return f.read().strip()
def compute_sha256sum(file_path):
cmd_line = None
if sys.platform.startswith('linux'):
cmd_line = ['sha256sum', file_path]
elif sys.platform.startswith('darwin'):
cmd_line = ['shasum', '--algorithm', '256', file_path]
else:
raise ValueError("Don't know how to compute SHA256 checksum on platform %s" % sys.platform)
checksum_str = subprocess.check_output(cmd_line).strip().split()[0].decode('utf-8')
validate_sha256sum(checksum_str)
return checksum_str
def verify_sha256sum(checksum_file_path, data_file_path):
if not os.path.exists(checksum_file_path):
raise IOError("Checksum file does not exist: %s" % checksum_file_path)
if not os.path.exists(data_file_path):
raise IOError("Data file does not exist: %s", data_file_path)
if not checksum_file_path.endswith(CHECKSUM_EXTENSION):
raise ValueError("Checksum file path must end with '%s', got: %s" % (
CHECKSUM_EXTENSION, checksum_file_path))
# Guard against someone passing in the actual data file instead of the checksum file.
checksum_file_size = os.stat(checksum_file_path).st_size
if checksum_file_size > 4096:
raise IOError("Checksum file size is too big: %d bytes (file path: %s)" % (
checksum_file_size, checksum_file_path))
expected_checksum = read_file_and_strip(checksum_file_path).split()[0]
actual_checksum = compute_sha256sum(data_file_path)
if actual_checksum == expected_checksum:
return True
err_msg = "Invalid checksum for file %s: got %s, expected %s" % (
data_file_path, actual_checksum, expected_checksum)
logging.warning(err_msg)
return False
def download_url(url, dest_path):
start_time_sec = time.time()
logging.info("Downloading %s to %s", url, dest_path)
dest_dir = os.path.dirname(dest_path)
if not os.path.isdir(dest_dir):
raise IOError("Destination directory %s does not exist" % dest_dir)
run_cmd(['curl', '-LsS', url, '-o', dest_path])
if not os.path.exists(dest_path):
raise IOError("Failed to download %s: file %s does not exist" % (url, dest_path))
elapsed_sec = time.time() - start_time_sec
logging.info("Downloaded %s to %s in %.1fs" % (url, dest_path, elapsed_sec))
def move_file(src_path, dest_path):
if g_verbose:
logging.info("Trying to move file %s to %s", src_path, dest_path)
if not os.path.exists(src_path):
raise IOError("Does not exist: %s" % src_path)
if not os.path.isfile(src_path):
raise IOError("Not a file: %s" % src_path)
if os.path.isdir(dest_path):
raise IOError("Destination path can't be a directory: %s" % dest_path)
if os.path.exists(dest_path):
logging.warning("Destination path already exists: %s, moving %s there anyway" % (
dest_path, src_path))
dest_parent_dir = os.path.dirname(dest_path)
if not os.path.isdir(dest_parent_dir):
raise IOError("Destination directory %s does not exist" % dest_parent_dir)
os.rename(src_path, dest_path)
def check_dir_exists_and_is_writable(dir_path, description):
if not os.path.isdir(dir_path):
raise IOError("%s directory %s does not exist" % (description, dir_path))
if not os.access(dir_path, os.W_OK):
raise IOError("%s directory %s is not writable by current user (%s)" % (
description, dir_path, getpass.getuser()))
# From https://github.com/ianlini/mkdir-p/blob/master/mkdir_p/mkdir_p.py
def mkdir_p(path, mode=0o777):
try:
os.makedirs(path, mode=mode)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def exists_or_is_link(dest):
"""
A file could be a link to a non-existent directory, or to a directory owned by a different
user in a directory with sticky bit set. In such cases os.path.exists might return false, but
islink will return true.
"""
return os.path.exists(dest) or os.path.islink(dest)
def download_and_extract(url, dest_dir_parent, local_cache_dir, nfs_cache_dir):
tar_gz_name = os.path.basename(url)
checksum_file_name = tar_gz_name + CHECKSUM_EXTENSION
install_dir_name = tar_gz_name[:-len(EXPECTED_ARCHIVE_EXTENSION)]
dest_dir = os.path.join(dest_dir_parent, install_dir_name)
if os.path.isdir(dest_dir):
logging.info("Directory %s already exists, no need to install." % dest_dir)
return
if not os.path.isdir(local_cache_dir):
logging.info("Directory %s does not exist, trying to create", local_cache_dir)
try:
mkdir_p(local_cache_dir)
except Exception as ex:
logging.info("Failed creating directory '%s': %s", local_cache_dir, ex)
check_dir_exists_and_is_writable(local_cache_dir, "Local cache")
if not url.endswith(EXPECTED_ARCHIVE_EXTENSION):
raise ValueError("Archive download URL is expected to end with %s, got: %s" % (
url, EXPECTED_ARCHIVE_EXTENSION))
if os.path.isdir(dest_dir):
logging.info("Directory %s already exists, someone must have created it concurrently.",
dest_dir)
return
start_time_sec = time.time()
logging.info("Installing %s into directory %s", url, dest_dir)
tmp_dir_prefix = os.path.abspath(os.path.join(dest_dir_parent, install_dir_name + '.tmp.'))
mkdir_p(dest_dir_parent)
tmp_dir = tempfile.mkdtemp(prefix=tmp_dir_prefix)
def cleanup():
if os.path.isdir(tmp_dir):
run_cmd(['rm', '-rf', tmp_dir])
atexit.register(cleanup)
for cache_dir in [local_cache_dir, nfs_cache_dir]:
cached_tar_gz_path = os.path.join(cache_dir, tar_gz_name)
cached_checksum_path = cached_tar_gz_path + CHECKSUM_EXTENSION
tar_gz_path = None
if os.path.exists(cached_tar_gz_path) and os.path.exists(cached_checksum_path):
logging.info("Verifying the checksum of %s", cached_tar_gz_path)
if verify_sha256sum(cached_checksum_path, cached_tar_gz_path):
tar_gz_path = os.path.join(cache_dir, tar_gz_name)
break
else:
remove_ignore_errors(cached_tar_gz_path)
remove_ignore_errors(cached_checksum_path)
if tar_gz_path is None:
tmp_tar_gz_path = os.path.join(tmp_dir, tar_gz_name)
tmp_checksum_path = os.path.join(tmp_dir, checksum_file_name)
download_url(url + CHECKSUM_EXTENSION, tmp_checksum_path)
download_url(url, tmp_tar_gz_path)
if not verify_sha256sum(tmp_checksum_path, tmp_tar_gz_path):
raise ValueError("Checksum verification failed for the download of %s" % url)
file_names = [tar_gz_name, checksum_file_name]
for file_name in file_names:
move_file(os.path.join(tmp_dir, file_name),
os.path.join(local_cache_dir, file_name))
tar_gz_path = os.path.join(local_cache_dir, tar_gz_name)
nfs_tar_gz_path = os.path.join(nfs_cache_dir, tar_gz_name)
nfs_checksum_file_path = os.path.join(nfs_cache_dir, checksum_file_name)
if (os.path.isdir(nfs_cache_dir) and
os.access(nfs_cache_dir, os.W_OK) and
(not os.path.exists(nfs_tar_gz_path) or
not os.path.exists(nfs_checksum_file_path))):
for file_name in file_names:
run_cmd(['cp',
os.path.join(local_cache_dir, file_name),
os.path.join(nfs_cache_dir, file_name)])
logging.info("Extracting %s in %s", tar_gz_path, tmp_dir)
run_cmd(['tar', 'xf', tar_gz_path, '-C', tmp_dir])
tmp_extracted_dir = os.path.join(tmp_dir, install_dir_name)
if not os.path.exists(tmp_extracted_dir):
raise IOError(
"Extracted '%s' in '%s' but a directory named '%s' did not appear" % (
tar_gz_path, os.getcwd(), tmp_extracted_dir))
if exists_or_is_link(dest_dir):
logging.info("Looks like %s was created concurrently", dest_dir)
return
if install_dir_name.startswith('linuxbrew'):
orig_brew_home_file = os.path.join(tmp_extracted_dir, 'ORIG_BREW_HOME')
if not os.path.exists(orig_brew_home_file):
raise IOError("File '%s' not found after extracting '%s'" % (
orig_brew_home_file, tar_gz_name))
orig_brew_home = read_file_and_strip(orig_brew_home_file)
if not orig_brew_home.startswith(dest_dir):
raise ValueError(
"Original Homebrew/Linuxbrew install home directory is '%s'"
" but we are trying to install it in '%s', and that is not a prefix of"
" the former." % (orig_brew_home, dest_dir))
already_installed_msg = (
"'%s' already exists, cannot move '%s' to it. Someone else must have "
"installed it concurrently. This is OK." % (
orig_brew_home, dest_dir))
def create_brew_symlink_if_needed():
brew_link_src = os.path.basename(orig_brew_home)
# dest_dir will now be a symlink pointing to brew_link_src. We are NOT creating a
# symlink inside dest_dir.
if not exists_or_is_link(dest_dir):
logging.info("Creating a symlink '%s' -> '%s'", dest_dir, brew_link_src)
try:
os.symlink(brew_link_src, dest_dir)
except OSError as os_error:
if os_error.errno == errno.EEXIST:
if exists_or_is_link(dest_dir):
logging.info(
"Symlink '%s' was created concurrently. This is probably OK.",
dest_dir)
else:
err_msg = (
"Failed creating symlink '%s' -> '%s' with error: %s, but the "
"symlink does not actually exist!" % (
dest_dir, brew_link_src, os_error))
logging.error(err_msg)
raise IOError(err_msg)
else:
logging.error("Unexpected error when creating symlink '%s' -> '%s': %s",
dest_dir, brew_link_src, os_error)
raise os_error
assert exists_or_is_link(dest_dir)
if not os.path.islink(dest_dir):
# A defensive sanity check.
err_msg = "%s exists but is not a symbolic link" % dest_dir
logging.error(err_msg)
raise IOError(err_msg)
else:
actual_link_src = os.readlink(dest_dir)
if actual_link_src != brew_link_src:
err_msg = "Symlink %s is not pointing to %s but instead points to %s" % (
dest_dir, brew_link_src, actual_link_src)
logging.error(err_msg)
raise IOError(err_msg)
if os.path.exists(orig_brew_home):
logging.info(already_installed_msg)
create_brew_symlink_if_needed()
return
logging.info("Moving '%s' to '%s'" % (tmp_extracted_dir, orig_brew_home))
try:
os.rename(tmp_extracted_dir, orig_brew_home)
except IOError as io_error:
# A defensive sanity check in case locking is not working properly.
if io_error == errno.ENOTEMPTY:
# For whatever reason, this is what we get when the destination directory
# already exists.
logging.info(already_installed_msg)
create_brew_symlink_if_needed()
return
create_brew_symlink_if_needed()
else:
if g_verbose:
logging.info("Moving %s to %s", tmp_extracted_dir, dest_dir)
os.rename(tmp_extracted_dir, dest_dir)
logging.info("Installation of %s took %.1f sec", dest_dir, time.time() - start_time_sec)
def main():
# Created files/directories should be writable by the group.
os.umask(2)
logging.basicConfig(
level=logging.INFO,
format="%(filename)s:%(lineno)d " + socket.gethostname() + " pid " + str(os.getpid()) +
" %(asctime)s %(levelname)s: %(message)s")
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--url', help='URL to download. Must end with .tar.gz.', required=True)
parser.add_argument(
'--dest-dir-parent', help='Parent directory in which to extract the archive',
required=True)
parser.add_argument(
'--local-cache-dir',
default='/opt/yb-build/download_cache',
help='Download cache on the local disk')
parser.add_argument(
'--nfs-cache-dir',
default='/Volumes/n/jenkins/download_cache',
help='Download cache on NFS')
parser.add_argument('--verbose', action='store_true', help='Verbose logging')
args = parser.parse_args()
if args.verbose or os.getenv('YB_VERBOSE') == '1':
global g_verbose
g_verbose = True
download_and_extract(
url=args.url,
dest_dir_parent=args.dest_dir_parent,
local_cache_dir=args.local_cache_dir,
nfs_cache_dir=args.nfs_cache_dir)
if __name__ == '__main__':
main()
|
if not re.match(r'^[0-9a-f]{64}$', checksum_str):
raise ValueError("Invalid SHA256 checksum: '%s', expected 64 hex characters", checksum_str)
|
oven.rs
|
use crate::execution::{ExecutionResult, ExecutionX86, ExecutionX86Arch};
use crate::params::Parameters;
use crate::result::Result;
use crate::stack::Stack;
use memflow::prelude::v1::*;
pub trait Oven<'a> {
fn set_stack(&mut self, stack: Stack) -> Result<()>;
fn set_params(&mut self, params: Parameters<'a>) -> Result<()>;
fn set_entry_point(&mut self, entry_point: Address) -> Result<()>;
fn reflow<'b>(&'b mut self) -> Result<ExecutionResult<'b>>;
}
pub trait OvenBuilder<'a>: Oven<'a> {
fn stack(&mut self, stack: Stack) -> Result<&mut Self> {
Oven::set_stack(self, stack)?;
Ok(self)
}
fn params(&mut self, params: Parameters<'a>) -> Result<&mut Self> {
Oven::set_params(self, params)?;
Ok(self)
}
fn entry_point(&mut self, entry_point: Address) -> Result<&mut Self> {
Oven::set_entry_point(self, entry_point)?;
Ok(self)
}
}
impl<'a, T: Oven<'a> + ?Sized> OvenBuilder<'a> for T {}
pub fn new_oven<'a, P: 'a + Process + VirtualMemory>(
process: &'a mut P,
) -> Result<Box<dyn Oven<'a> + 'a>> {
let arch = process.info().proc_arch;
new_oven_with_arch(process, arch)
}
pub fn
|
<'a, V: 'a + VirtualMemory>(
mem: &'a mut V,
arch: ArchitectureIdent,
) -> Result<Box<dyn Oven<'a> + 'a>> {
match arch {
ArchitectureIdent::X86(32, _) => x86_oven(ExecutionX86Arch::X8632, mem),
ArchitectureIdent::X86(64, _) => x86_oven(ExecutionX86Arch::X8664, mem),
ArchitectureIdent::X86(_, _) => unreachable!("invalid x86 bit width"),
ArchitectureIdent::AArch64(_) => Err("AArch64 is not supported yet".into()),
ArchitectureIdent::Unknown => Err("Unknown process architecture".into()),
}
}
fn x86_oven<'a, V: 'a + VirtualMemory>(
arch: ExecutionX86Arch,
mem: &'a mut V,
) -> Result<Box<dyn Oven<'a> + 'a>> {
let execution = ExecutionX86::<V>::new(arch, mem)?;
Ok(Box::new(execution))
}
|
new_oven_with_arch
|
index.js
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
var _exportNames = {
CommonActions: true,
BaseRouter: true,
StackRouter: true,
StackActions: true,
StackActionHelpers: true,
StackActionType: true,
StackRouterOptions: true,
StackNavigationState: true,
TabRouter: true,
TabActions: true,
TabActionHelpers: true,
TabActionType: true,
TabRouterOptions: true,
TabNavigationState: true,
DrawerRouter: true,
DrawerActions: true,
DrawerActionHelpers: true,
DrawerActionType: true,
DrawerRouterOptions: true,
DrawerNavigationState: true
};
Object.defineProperty(exports, "BaseRouter", {
enumerable: true,
get: function get() {
return _BaseRouter.default;
}
});
Object.defineProperty(exports, "StackRouter", {
enumerable: true,
get: function get() {
return _StackRouter.default;
}
});
Object.defineProperty(exports, "StackActions", {
enumerable: true,
get: function get() {
return _StackRouter.StackActions;
}
});
Object.defineProperty(exports, "StackActionHelpers", {
enumerable: true,
get: function get() {
|
return _StackRouter.StackActionHelpers;
}
});
Object.defineProperty(exports, "StackActionType", {
enumerable: true,
get: function get() {
return _StackRouter.StackActionType;
}
});
Object.defineProperty(exports, "StackRouterOptions", {
enumerable: true,
get: function get() {
return _StackRouter.StackRouterOptions;
}
});
Object.defineProperty(exports, "StackNavigationState", {
enumerable: true,
get: function get() {
return _StackRouter.StackNavigationState;
}
});
Object.defineProperty(exports, "TabRouter", {
enumerable: true,
get: function get() {
return _TabRouter.default;
}
});
Object.defineProperty(exports, "TabActions", {
enumerable: true,
get: function get() {
return _TabRouter.TabActions;
}
});
Object.defineProperty(exports, "TabActionHelpers", {
enumerable: true,
get: function get() {
return _TabRouter.TabActionHelpers;
}
});
Object.defineProperty(exports, "TabActionType", {
enumerable: true,
get: function get() {
return _TabRouter.TabActionType;
}
});
Object.defineProperty(exports, "TabRouterOptions", {
enumerable: true,
get: function get() {
return _TabRouter.TabRouterOptions;
}
});
Object.defineProperty(exports, "TabNavigationState", {
enumerable: true,
get: function get() {
return _TabRouter.TabNavigationState;
}
});
Object.defineProperty(exports, "DrawerRouter", {
enumerable: true,
get: function get() {
return _DrawerRouter.default;
}
});
Object.defineProperty(exports, "DrawerActions", {
enumerable: true,
get: function get() {
return _DrawerRouter.DrawerActions;
}
});
Object.defineProperty(exports, "DrawerActionHelpers", {
enumerable: true,
get: function get() {
return _DrawerRouter.DrawerActionHelpers;
}
});
Object.defineProperty(exports, "DrawerActionType", {
enumerable: true,
get: function get() {
return _DrawerRouter.DrawerActionType;
}
});
Object.defineProperty(exports, "DrawerRouterOptions", {
enumerable: true,
get: function get() {
return _DrawerRouter.DrawerRouterOptions;
}
});
Object.defineProperty(exports, "DrawerNavigationState", {
enumerable: true,
get: function get() {
return _DrawerRouter.DrawerNavigationState;
}
});
exports.CommonActions = void 0;
var CommonActions = _interopRequireWildcard(require("./CommonActions"));
exports.CommonActions = CommonActions;
var _BaseRouter = _interopRequireDefault(require("./BaseRouter"));
var _StackRouter = _interopRequireWildcard(require("./StackRouter"));
var _TabRouter = _interopRequireWildcard(require("./TabRouter"));
var _DrawerRouter = _interopRequireWildcard(require("./DrawerRouter"));
var _types = require("./types");
Object.keys(_types).forEach(function (key) {
if (key === "default" || key === "__esModule") return;
if (Object.prototype.hasOwnProperty.call(_exportNames, key)) return;
Object.defineProperty(exports, key, {
enumerable: true,
get: function get() {
return _types[key];
}
});
});
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _getRequireWildcardCache() { if (typeof WeakMap !== "function") return null; var cache = new WeakMap(); _getRequireWildcardCache = function _getRequireWildcardCache() { return cache; }; return cache; }
function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } if (obj === null || typeof obj !== "object" && typeof obj !== "function") { return { default: obj }; } var cache = _getRequireWildcardCache(); if (cache && cache.has(obj)) { return cache.get(obj); } var newObj = {}; var hasPropertyDescriptor = Object.defineProperty && Object.getOwnPropertyDescriptor; for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { var desc = hasPropertyDescriptor ? Object.getOwnPropertyDescriptor(obj, key) : null; if (desc && (desc.get || desc.set)) { Object.defineProperty(newObj, key, desc); } else { newObj[key] = obj[key]; } } } newObj.default = obj; if (cache) { cache.set(obj, newObj); } return newObj; }
//# sourceMappingURL=index.js.map
| |
map.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::cmp::Ordering;
use core::fmt::Debug;
use core::hash::{Hash, Hasher};
use core::iter::{FromIterator, Peekable, FusedIterator};
use core::marker::PhantomData;
use core::ops::Index;
use core::{fmt, intrinsics, mem, ptr};
use borrow::Borrow;
use Bound::{Excluded, Included, Unbounded};
use range::RangeArgument;
use super::node::{self, Handle, NodeRef, marker};
use super::search;
use super::node::InsertResult::*;
use super::node::ForceResult::*;
use super::search::SearchResult::*;
use self::UnderflowResult::*;
use self::Entry::*;
/// A map based on a B-Tree.
///
/// B-Trees represent a fundamental compromise between cache-efficiency and actually minimizing
/// the amount of work performed in a search. In theory, a binary search tree (BST) is the optimal
/// choice for a sorted map, as a perfectly balanced BST performs the theoretical minimum amount of
/// comparisons necessary to find an element (log<sub>2</sub>n). However, in practice the way this
/// is done is *very* inefficient for modern computer architectures. In particular, every element
/// is stored in its own individually heap-allocated node. This means that every single insertion
/// triggers a heap-allocation, and every single comparison should be a cache-miss. Since these
/// are both notably expensive things to do in practice, we are forced to at very least reconsider
/// the BST strategy.
///
/// A B-Tree instead makes each node contain B-1 to 2B-1 elements in a contiguous array. By doing
/// this, we reduce the number of allocations by a factor of B, and improve cache efficiency in
/// searches. However, this does mean that searches will have to do *more* comparisons on average.
/// The precise number of comparisons depends on the node search strategy used. For optimal cache
/// efficiency, one could search the nodes linearly. For optimal comparisons, one could search
/// the node using binary search. As a compromise, one could also perform a linear search
/// that initially only checks every i<sup>th</sup> element for some choice of i.
///
/// Currently, our implementation simply performs naive linear search. This provides excellent
/// performance on *small* nodes of elements which are cheap to compare. However in the future we
/// would like to further explore choosing the optimal search strategy based on the choice of B,
/// and possibly other factors. Using linear search, searching for a random element is expected
/// to take O(B log<sub>B</sub>n) comparisons, which is generally worse than a BST. In practice,
/// however, performance is excellent.
///
/// It is a logic error for a key to be modified in such a way that the key's ordering relative to
/// any other key, as determined by the [`Ord`] trait, changes while it is in the map. This is
/// normally only possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code.
///
/// [`Ord`]: ../../std/cmp/trait.Ord.html
/// [`Cell`]: ../../std/cell/struct.Cell.html
/// [`RefCell`]: ../../std/cell/struct.RefCell.html
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// // type inference lets us omit an explicit type signature (which
/// // would be `BTreeMap<&str, &str>` in this example).
/// let mut movie_reviews = BTreeMap::new();
///
/// // review some movies.
/// movie_reviews.insert("Office Space", "Deals with real issues in the workplace.");
/// movie_reviews.insert("Pulp Fiction", "Masterpiece.");
/// movie_reviews.insert("The Godfather", "Very enjoyable.");
/// movie_reviews.insert("The Blues Brothers", "Eye lyked it alot.");
///
/// // check for a specific one.
/// if !movie_reviews.contains_key("Les Misérables") {
/// println!("We've got {} reviews, but Les Misérables ain't one.",
/// movie_reviews.len());
/// }
///
/// // oops, this review has a lot of spelling mistakes, let's delete it.
/// movie_reviews.remove("The Blues Brothers");
///
/// // look up the values associated with some keys.
/// let to_find = ["Up!", "Office Space"];
/// for book in &to_find {
/// match movie_reviews.get(book) {
/// Some(review) => println!("{}: {}", book, review),
/// None => println!("{} is unreviewed.", book)
/// }
/// }
///
/// // iterate over everything.
/// for (movie, review) in &movie_reviews {
/// println!("{}: \"{}\"", movie, review);
/// }
/// ```
///
/// `BTreeMap` also implements an [`Entry API`](#method.entry), which allows
/// for more complex methods of getting, setting, updating and removing keys and
/// their values:
///
/// ```
/// use std::collections::BTreeMap;
///
/// // type inference lets us omit an explicit type signature (which
/// // would be `BTreeMap<&str, u8>` in this example).
/// let mut player_stats = BTreeMap::new();
///
/// fn random_stat_buff() -> u8 {
/// // could actually return some random value here - let's just return
/// // some fixed value for now
/// 42
/// }
///
/// // insert a key only if it doesn't already exist
/// player_stats.entry("health").or_insert(100);
///
/// // insert a key using a function that provides a new value only if it
/// // doesn't already exist
/// player_stats.entry("defence").or_insert_with(random_stat_buff);
///
/// // update a key, guarding against the key possibly not being set
/// let stat = player_stats.entry("attack").or_insert(100);
/// *stat += random_stat_buff();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub struct BTreeMap<K, V> {
root: node::Root<K, V>,
length: usize,
}
#[stable(feature = "btree_drop", since = "1.7.0")]
unsafe impl<#[may_dangle] K, #[may_dangle] V> Drop for BTreeMap<K, V> {
fn drop(&mut self) {
unsafe {
drop(ptr::read(self).into_iter());
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Clone, V: Clone> Clone for BTreeMap<K, V> {
fn clone(&self) -> BTreeMap<K, V> {
fn clone_subtree<K: Clone, V: Clone>(node: node::NodeRef<marker::Immut,
K,
V,
marker::LeafOrInternal>)
-> BTreeMap<K, V> {
match node.force() {
Leaf(leaf) => {
let mut out_tree = BTreeMap {
root: node::Root::new_leaf(),
length: 0,
};
{
let mut out_node = match out_tree.root.as_mut().force() {
Leaf(leaf) => leaf,
Internal(_) => unreachable!(),
};
let mut in_edge = leaf.first_edge();
while let Ok(kv) = in_edge.right_kv() {
let (k, v) = kv.into_kv();
in_edge = kv.right_edge();
out_node.push(k.clone(), v.clone());
out_tree.length += 1;
}
}
out_tree
}
Internal(internal) => {
let mut out_tree = clone_subtree(internal.first_edge().descend());
{
let mut out_node = out_tree.root.push_level();
let mut in_edge = internal.first_edge();
while let Ok(kv) = in_edge.right_kv() {
let (k, v) = kv.into_kv();
in_edge = kv.right_edge();
let k = (*k).clone();
let v = (*v).clone();
let subtree = clone_subtree(in_edge.descend());
// We can't destructure subtree directly
// because BTreeMap implements Drop
let (subroot, sublength) = unsafe {
let root = ptr::read(&subtree.root);
let length = subtree.length;
mem::forget(subtree);
(root, length)
};
out_node.push(k, v, subroot);
out_tree.length += 1 + sublength;
}
}
out_tree
}
}
}
clone_subtree(self.root.as_ref())
}
}
impl<K, Q: ?Sized> super::Recover<Q> for BTreeMap<K, ()>
where K: Borrow<Q> + Ord,
Q: Ord
{
type Key = K;
fn get(&self, key: &Q) -> Option<&K> {
match search::search_tree(self.root.as_ref(), key) {
Found(handle) => Some(handle.into_kv().0),
GoDown(_) => None,
}
}
fn take(&mut self, key: &Q) -> Option<K> {
match search::search_tree(self.root.as_mut(), key) {
Found(handle) => {
Some(OccupiedEntry {
handle,
length: &mut self.length,
_marker: PhantomData,
}
.remove_kv()
.0)
}
GoDown(_) => None,
}
}
fn replace(&mut self, key: K) -> Option<K> {
match search::search_tree::<marker::Mut, K, (), K>(self.root.as_mut(), &key) {
Found(handle) => Some(mem::replace(handle.into_kv_mut().0, key)),
GoDown(handle) => {
VacantEntry {
key,
handle,
length: &mut self.length,
_marker: PhantomData,
}
.insert(());
None
}
}
}
}
/// An iterator over the entries of a `BTreeMap`.
///
/// This `struct` is created by the [`iter`] method on [`BTreeMap`]. See its
/// documentation for more.
///
/// [`iter`]: struct.BTreeMap.html#method.iter
/// [`BTreeMap`]: struct.BTreeMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, K: 'a, V: 'a> {
range: Range<'a, K, V>,
length: usize,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<'a, K: 'a + fmt::Debug, V: 'a + fmt::Debug> fmt::Debug for Iter<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list().entries(self.clone()).finish()
}
}
/// A mutable iterator over the entries of a `BTreeMap`.
///
/// This `struct` is created by the [`iter_mut`] method on [`BTreeMap`]. See its
/// documentation for more.
///
/// [`iter_mut`]: struct.BTreeMap.html#method.iter_mut
/// [`BTreeMap`]: struct.BTreeMap.html
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct IterMut<'a, K: 'a, V: 'a> {
range: RangeMut<'a, K, V>,
length: usize,
}
/// An owning iterator over the entries of a `BTreeMap`.
///
/// This `struct` is created by the [`into_iter`] method on [`BTreeMap`][`BTreeMap`]
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`into_iter`]: struct.BTreeMap.html#method.into_iter
/// [`BTreeMap`]: struct.BTreeMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<K, V> {
front: Handle<NodeRef<marker::Owned, K, V, marker::Leaf>, marker::Edge>,
back: Handle<NodeRef<marker::Owned, K, V, marker::Leaf>, marker::Edge>,
length: usize,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IntoIter<K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let range = Range {
front: self.front.reborrow(),
back: self.back.reborrow(),
};
f.debug_list().entries(range).finish()
}
}
/// An iterator over the keys of a `BTreeMap`.
///
/// This `struct` is created by the [`keys`] method on [`BTreeMap`]. See its
/// documentation for more.
///
/// [`keys`]: struct.BTreeMap.html#method.keys
/// [`BTreeMap`]: struct.BTreeMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Keys<'a, K: 'a, V: 'a> {
inner: Iter<'a, K, V>,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<'a, K: 'a + fmt::Debug, V: 'a> fmt::Debug for Keys<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list().entries(self.clone()).finish()
}
}
/// An iterator over the values of a `BTreeMap`.
///
/// This `struct` is created by the [`values`] method on [`BTreeMap`]. See its
/// documentation for more.
///
/// [`values`]: struct.BTreeMap.html#method.values
/// [`BTreeMap`]: struct.BTreeMap.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Values<'a, K: 'a, V: 'a> {
inner: Iter<'a, K, V>,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<'a, K: 'a, V: 'a + fmt::Debug> fmt::Debug for Values<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list().entries(self.clone()).finish()
}
}
/// A mutable iterator over the values of a `BTreeMap`.
///
/// This `struct` is created by the [`values_mut`] method on [`BTreeMap`]. See its
/// documentation for more.
///
/// [`values_mut`]: struct.BTreeMap.html#method.values_mut
/// [`BTreeMap`]: struct.BTreeMap.html
#[stable(feature = "map_values_mut", since = "1.10.0")]
#[derive(Debug)]
pub struct ValuesMut<'a, K: 'a, V: 'a> {
inner: IterMut<'a, K, V>,
}
/// An iterator over a sub-range of entries in a `BTreeMap`.
///
/// This `struct` is created by the [`range`] method on [`BTreeMap`]. See its
/// documentation for more.
///
/// [`range`]: struct.BTreeMap.html#method.range
/// [`BTreeMap`]: struct.BTreeMap.html
#[stable(feature = "btree_range", since = "1.17.0")]
pub struct Range<'a, K: 'a, V: 'a> {
front: Handle<NodeRef<marker::Immut<'a>, K, V, marker::Leaf>, marker::Edge>,
back: Handle<NodeRef<marker::Immut<'a>, K, V, marker::Leaf>, marker::Edge>,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<'a, K: 'a + fmt::Debug, V: 'a + fmt::Debug> fmt::Debug for Range<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list().entries(self.clone()).finish()
}
}
/// A mutable iterator over a sub-range of entries in a `BTreeMap`.
///
/// This `struct` is created by the [`range_mut`] method on [`BTreeMap`]. See its
/// documentation for more.
///
/// [`range_mut`]: struct.BTreeMap.html#method.range_mut
/// [`BTreeMap`]: struct.BTreeMap.html
#[stable(feature = "btree_range", since = "1.17.0")]
pub struct RangeMut<'a, K: 'a, V: 'a> {
front: Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>,
back: Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>,
// Be invariant in `K` and `V`
_marker: PhantomData<&'a mut (K, V)>,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<'a, K: 'a + fmt::Debug, V: 'a + fmt::Debug> fmt::Debug for RangeMut<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let range = Range {
front: self.front.reborrow(),
back: self.back.reborrow(),
};
f.debug_list().entries(range).finish()
}
}
/// A view into a single entry in a map, which may either be vacant or occupied.
///
/// This `enum` is constructed from the [`entry`] method on [`BTreeMap`].
///
/// [`BTreeMap`]: struct.BTreeMap.html
/// [`entry`]: struct.BTreeMap.html#method.entry
#[stable(feature = "rust1", since = "1.0.0")]
pub enum Entry<'a, K: 'a, V: 'a> {
/// A vacant entry.
#[stable(feature = "rust1", since = "1.0.0")]
Vacant(#[stable(feature = "rust1", since = "1.0.0")]
VacantEntry<'a, K, V>),
/// An occupied entry.
#[stable(feature = "rust1", since = "1.0.0")]
Occupied(#[stable(feature = "rust1", since = "1.0.0")]
OccupiedEntry<'a, K, V>),
}
#[stable(feature= "debug_btree_map", since = "1.12.0")]
impl<'a, K: 'a + Debug + Ord, V: 'a + Debug> Debug for Entry<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Vacant(ref v) => f.debug_tuple("Entry")
.field(v)
.finish(),
Occupied(ref o) => f.debug_tuple("Entry")
.field(o)
.finish(),
}
}
}
/// A view into a vacant entry in a `BTreeMap`.
/// It is part of the [`Entry`] enum.
///
/// [`Entry`]: enum.Entry.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct VacantEntry<'a, K: 'a, V: 'a> {
key: K,
handle: Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>,
length: &'a mut usize,
// Be invariant in `K` and `V`
_marker: PhantomData<&'a mut (K, V)>,
}
#[stable(feature= "debug_btree_map", since = "1.12.0")]
impl<'a, K: 'a + Debug + Ord, V: 'a> Debug for VacantEntry<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("VacantEntry")
.field(self.key())
.finish()
}
}
/// A view into an occupied entry in a `BTreeMap`.
/// It is part of the [`Entry`] enum.
///
/// [`Entry`]: enum.Entry.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct OccupiedEntry<'a, K: 'a, V: 'a> {
handle: Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::KV>,
length: &'a mut usize,
|
#[stable(feature= "debug_btree_map", since = "1.12.0")]
impl<'a, K: 'a + Debug + Ord, V: 'a + Debug> Debug for OccupiedEntry<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("OccupiedEntry")
.field("key", self.key())
.field("value", self.get())
.finish()
}
}
// An iterator for merging two sorted sequences into one
struct MergeIter<K, V, I: Iterator<Item = (K, V)>> {
left: Peekable<I>,
right: Peekable<I>,
}
impl<K: Ord, V> BTreeMap<K, V> {
/// Makes a new empty BTreeMap with a reasonable choice for B.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
///
/// // entries can now be inserted into the empty map
/// map.insert(1, "a");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new() -> BTreeMap<K, V> {
BTreeMap {
root: node::Root::new_leaf(),
length: 0,
}
}
/// Clears the map, removing all values.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut a = BTreeMap::new();
/// a.insert(1, "a");
/// a.clear();
/// assert!(a.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn clear(&mut self) {
// FIXME(gereeter) .clear() allocates
*self = BTreeMap::new();
}
/// Returns a reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but the ordering
/// on the borrowed form *must* match the ordering on the key type.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
/// map.insert(1, "a");
/// assert_eq!(map.get(&1), Some(&"a"));
/// assert_eq!(map.get(&2), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get<Q: ?Sized>(&self, key: &Q) -> Option<&V>
where K: Borrow<Q>,
Q: Ord
{
match search::search_tree(self.root.as_ref(), key) {
Found(handle) => Some(handle.into_kv().1),
GoDown(_) => None,
}
}
/// Returns `true` if the map contains a value for the specified key.
///
/// The key may be any borrowed form of the map's key type, but the ordering
/// on the borrowed form *must* match the ordering on the key type.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
/// map.insert(1, "a");
/// assert_eq!(map.contains_key(&1), true);
/// assert_eq!(map.contains_key(&2), false);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn contains_key<Q: ?Sized>(&self, key: &Q) -> bool
where K: Borrow<Q>,
Q: Ord
{
self.get(key).is_some()
}
/// Returns a mutable reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but the ordering
/// on the borrowed form *must* match the ordering on the key type.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
/// map.insert(1, "a");
/// if let Some(x) = map.get_mut(&1) {
/// *x = "b";
/// }
/// assert_eq!(map[&1], "b");
/// ```
// See `get` for implementation notes, this is basically a copy-paste with mut's added
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut<Q: ?Sized>(&mut self, key: &Q) -> Option<&mut V>
where K: Borrow<Q>,
Q: Ord
{
match search::search_tree(self.root.as_mut(), key) {
Found(handle) => Some(handle.into_kv_mut().1),
GoDown(_) => None,
}
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If the map did have this key present, the value is updated, and the old
/// value is returned. The key is not updated, though; this matters for
/// types that can be `==` without being identical. See the [module-level
/// documentation] for more.
///
/// [module-level documentation]: index.html#insert-and-complex-keys
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
/// assert_eq!(map.insert(37, "a"), None);
/// assert_eq!(map.is_empty(), false);
///
/// map.insert(37, "b");
/// assert_eq!(map.insert(37, "c"), Some("b"));
/// assert_eq!(map[&37], "c");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn insert(&mut self, key: K, value: V) -> Option<V> {
match self.entry(key) {
Occupied(mut entry) => Some(entry.insert(value)),
Vacant(entry) => {
entry.insert(value);
None
}
}
}
/// Removes a key from the map, returning the value at the key if the key
/// was previously in the map.
///
/// The key may be any borrowed form of the map's key type, but the ordering
/// on the borrowed form *must* match the ordering on the key type.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
/// map.insert(1, "a");
/// assert_eq!(map.remove(&1), Some("a"));
/// assert_eq!(map.remove(&1), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn remove<Q: ?Sized>(&mut self, key: &Q) -> Option<V>
where K: Borrow<Q>,
Q: Ord
{
match search::search_tree(self.root.as_mut(), key) {
Found(handle) => {
Some(OccupiedEntry {
handle,
length: &mut self.length,
_marker: PhantomData,
}
.remove())
}
GoDown(_) => None,
}
}
/// Moves all elements from `other` into `Self`, leaving `other` empty.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut a = BTreeMap::new();
/// a.insert(1, "a");
/// a.insert(2, "b");
/// a.insert(3, "c");
///
/// let mut b = BTreeMap::new();
/// b.insert(3, "d");
/// b.insert(4, "e");
/// b.insert(5, "f");
///
/// a.append(&mut b);
///
/// assert_eq!(a.len(), 5);
/// assert_eq!(b.len(), 0);
///
/// assert_eq!(a[&1], "a");
/// assert_eq!(a[&2], "b");
/// assert_eq!(a[&3], "d");
/// assert_eq!(a[&4], "e");
/// assert_eq!(a[&5], "f");
/// ```
#[stable(feature = "btree_append", since = "1.11.0")]
pub fn append(&mut self, other: &mut Self) {
// Do we have to append anything at all?
if other.len() == 0 {
return;
}
// We can just swap `self` and `other` if `self` is empty.
if self.len() == 0 {
mem::swap(self, other);
return;
}
// First, we merge `self` and `other` into a sorted sequence in linear time.
let self_iter = mem::replace(self, BTreeMap::new()).into_iter();
let other_iter = mem::replace(other, BTreeMap::new()).into_iter();
let iter = MergeIter {
left: self_iter.peekable(),
right: other_iter.peekable(),
};
// Second, we build a tree from the sorted sequence in linear time.
self.from_sorted_iter(iter);
self.fix_right_edge();
}
/// Constructs a double-ended iterator over a sub-range of elements in the map.
/// The simplest way is to use the range syntax `min..max`, thus `range(min..max)` will
/// yield elements from min (inclusive) to max (exclusive).
/// The range may also be entered as `(Bound<T>, Bound<T>)`, so for example
/// `range((Excluded(4), Included(10)))` will yield a left-exclusive, right-inclusive
/// range from 4 to 10.
///
/// # Panics
///
/// Panics if range `start > end`.
/// Panics if range `start == end` and both bounds are `Excluded`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::collections::BTreeMap;
/// use std::collections::Bound::Included;
///
/// let mut map = BTreeMap::new();
/// map.insert(3, "a");
/// map.insert(5, "b");
/// map.insert(8, "c");
/// for (&key, &value) in map.range((Included(&4), Included(&8))) {
/// println!("{}: {}", key, value);
/// }
/// assert_eq!(Some((&5, &"b")), map.range(4..).next());
/// ```
#[stable(feature = "btree_range", since = "1.17.0")]
pub fn range<T: ?Sized, R>(&self, range: R) -> Range<K, V>
where T: Ord, K: Borrow<T>, R: RangeArgument<T>
{
let root1 = self.root.as_ref();
let root2 = self.root.as_ref();
let (f, b) = range_search(root1, root2, range);
Range { front: f, back: b}
}
/// Constructs a mutable double-ended iterator over a sub-range of elements in the map.
/// The simplest way is to use the range syntax `min..max`, thus `range(min..max)` will
/// yield elements from min (inclusive) to max (exclusive).
/// The range may also be entered as `(Bound<T>, Bound<T>)`, so for example
/// `range((Excluded(4), Included(10)))` will yield a left-exclusive, right-inclusive
/// range from 4 to 10.
///
/// # Panics
///
/// Panics if range `start > end`.
/// Panics if range `start == end` and both bounds are `Excluded`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map: BTreeMap<&str, i32> = ["Alice", "Bob", "Carol", "Cheryl"].iter()
/// .map(|&s| (s, 0))
/// .collect();
/// for (_, balance) in map.range_mut("B".."Cheryl") {
/// *balance += 100;
/// }
/// for (name, balance) in &map {
/// println!("{} => {}", name, balance);
/// }
/// ```
#[stable(feature = "btree_range", since = "1.17.0")]
pub fn range_mut<T: ?Sized, R>(&mut self, range: R) -> RangeMut<K, V>
where T: Ord, K: Borrow<T>, R: RangeArgument<T>
{
let root1 = self.root.as_mut();
let root2 = unsafe { ptr::read(&root1) };
let (f, b) = range_search(root1, root2, range);
RangeMut {
front: f,
back: b,
_marker: PhantomData,
}
}
/// Gets the given key's corresponding entry in the map for in-place manipulation.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut count: BTreeMap<&str, usize> = BTreeMap::new();
///
/// // count the number of occurrences of letters in the vec
/// for x in vec!["a","b","a","c","a","b"] {
/// *count.entry(x).or_insert(0) += 1;
/// }
///
/// assert_eq!(count["a"], 3);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn entry(&mut self, key: K) -> Entry<K, V> {
match search::search_tree(self.root.as_mut(), &key) {
Found(handle) => {
Occupied(OccupiedEntry {
handle,
length: &mut self.length,
_marker: PhantomData,
})
}
GoDown(handle) => {
Vacant(VacantEntry {
key,
handle,
length: &mut self.length,
_marker: PhantomData,
})
}
}
}
fn from_sorted_iter<I: Iterator<Item = (K, V)>>(&mut self, iter: I) {
let mut cur_node = last_leaf_edge(self.root.as_mut()).into_node();
// Iterate through all key-value pairs, pushing them into nodes at the right level.
for (key, value) in iter {
// Try to push key-value pair into the current leaf node.
if cur_node.len() < node::CAPACITY {
cur_node.push(key, value);
} else {
// No space left, go up and push there.
let mut open_node;
let mut test_node = cur_node.forget_type();
loop {
match test_node.ascend() {
Ok(parent) => {
let parent = parent.into_node();
if parent.len() < node::CAPACITY {
// Found a node with space left, push here.
open_node = parent;
break;
} else {
// Go up again.
test_node = parent.forget_type();
}
}
Err(node) => {
// We are at the top, create a new root node and push there.
open_node = node.into_root_mut().push_level();
break;
}
}
}
// Push key-value pair and new right subtree.
let tree_height = open_node.height() - 1;
let mut right_tree = node::Root::new_leaf();
for _ in 0..tree_height {
right_tree.push_level();
}
open_node.push(key, value, right_tree);
// Go down to the right-most leaf again.
cur_node = last_leaf_edge(open_node.forget_type()).into_node();
}
self.length += 1;
}
}
fn fix_right_edge(&mut self) {
// Handle underfull nodes, start from the top.
let mut cur_node = self.root.as_mut();
while let Internal(internal) = cur_node.force() {
// Check if right-most child is underfull.
let mut last_edge = internal.last_edge();
let right_child_len = last_edge.reborrow().descend().len();
if right_child_len < node::MIN_LEN {
// We need to steal.
let mut last_kv = match last_edge.left_kv() {
Ok(left) => left,
Err(_) => unreachable!(),
};
last_kv.bulk_steal_left(node::MIN_LEN - right_child_len);
last_edge = last_kv.right_edge();
}
// Go further down.
cur_node = last_edge.descend();
}
}
/// Splits the collection into two at the given key. Returns everything after the given key,
/// including the key.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut a = BTreeMap::new();
/// a.insert(1, "a");
/// a.insert(2, "b");
/// a.insert(3, "c");
/// a.insert(17, "d");
/// a.insert(41, "e");
///
/// let b = a.split_off(&3);
///
/// assert_eq!(a.len(), 2);
/// assert_eq!(b.len(), 3);
///
/// assert_eq!(a[&1], "a");
/// assert_eq!(a[&2], "b");
///
/// assert_eq!(b[&3], "c");
/// assert_eq!(b[&17], "d");
/// assert_eq!(b[&41], "e");
/// ```
#[stable(feature = "btree_split_off", since = "1.11.0")]
pub fn split_off<Q: ?Sized + Ord>(&mut self, key: &Q) -> Self
where K: Borrow<Q>
{
if self.is_empty() {
return Self::new();
}
let total_num = self.len();
let mut right = Self::new();
for _ in 0..(self.root.as_ref().height()) {
right.root.push_level();
}
{
let mut left_node = self.root.as_mut();
let mut right_node = right.root.as_mut();
loop {
let mut split_edge = match search::search_node(left_node, key) {
// key is going to the right tree
Found(handle) => handle.left_edge(),
GoDown(handle) => handle,
};
split_edge.move_suffix(&mut right_node);
match (split_edge.force(), right_node.force()) {
(Internal(edge), Internal(node)) => {
left_node = edge.descend();
right_node = node.first_edge().descend();
}
(Leaf(_), Leaf(_)) => {
break;
}
_ => {
unreachable!();
}
}
}
}
self.fix_right_border();
right.fix_left_border();
if self.root.as_ref().height() < right.root.as_ref().height() {
self.recalc_length();
right.length = total_num - self.len();
} else {
right.recalc_length();
self.length = total_num - right.len();
}
right
}
/// Calculates the number of elements if it is incorrect.
fn recalc_length(&mut self) {
fn dfs<K, V>(node: NodeRef<marker::Immut, K, V, marker::LeafOrInternal>) -> usize {
let mut res = node.len();
if let Internal(node) = node.force() {
let mut edge = node.first_edge();
loop {
res += dfs(edge.reborrow().descend());
match edge.right_kv() {
Ok(right_kv) => {
edge = right_kv.right_edge();
}
Err(_) => {
break;
}
}
}
}
res
}
self.length = dfs(self.root.as_ref());
}
/// Removes empty levels on the top.
fn fix_top(&mut self) {
loop {
{
let node = self.root.as_ref();
if node.height() == 0 || node.len() > 0 {
break;
}
}
self.root.pop_level();
}
}
fn fix_right_border(&mut self) {
self.fix_top();
{
let mut cur_node = self.root.as_mut();
while let Internal(node) = cur_node.force() {
let mut last_kv = node.last_kv();
if last_kv.can_merge() {
cur_node = last_kv.merge().descend();
} else {
let right_len = last_kv.reborrow().right_edge().descend().len();
// `MINLEN + 1` to avoid readjust if merge happens on the next level.
if right_len < node::MIN_LEN + 1 {
last_kv.bulk_steal_left(node::MIN_LEN + 1 - right_len);
}
cur_node = last_kv.right_edge().descend();
}
}
}
self.fix_top();
}
/// The symmetric clone of `fix_right_border`.
fn fix_left_border(&mut self) {
self.fix_top();
{
let mut cur_node = self.root.as_mut();
while let Internal(node) = cur_node.force() {
let mut first_kv = node.first_kv();
if first_kv.can_merge() {
cur_node = first_kv.merge().descend();
} else {
let left_len = first_kv.reborrow().left_edge().descend().len();
if left_len < node::MIN_LEN + 1 {
first_kv.bulk_steal_right(node::MIN_LEN + 1 - left_len);
}
cur_node = first_kv.left_edge().descend();
}
}
}
self.fix_top();
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K: 'a, V: 'a> IntoIterator for &'a BTreeMap<K, V> {
type Item = (&'a K, &'a V);
type IntoIter = Iter<'a, K, V>;
fn into_iter(self) -> Iter<'a, K, V> {
self.iter()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K: 'a, V: 'a> Iterator for Iter<'a, K, V> {
type Item = (&'a K, &'a V);
fn next(&mut self) -> Option<(&'a K, &'a V)> {
if self.length == 0 {
None
} else {
self.length -= 1;
unsafe { Some(self.range.next_unchecked()) }
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.length, Some(self.length))
}
}
#[unstable(feature = "fused", issue = "35602")]
impl<'a, K, V> FusedIterator for Iter<'a, K, V> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K: 'a, V: 'a> DoubleEndedIterator for Iter<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K, &'a V)> {
if self.length == 0 {
None
} else {
self.length -= 1;
unsafe { Some(self.range.next_back_unchecked()) }
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K: 'a, V: 'a> ExactSizeIterator for Iter<'a, K, V> {
fn len(&self) -> usize {
self.length
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Clone for Iter<'a, K, V> {
fn clone(&self) -> Iter<'a, K, V> {
Iter {
range: self.range.clone(),
length: self.length,
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K: 'a, V: 'a> IntoIterator for &'a mut BTreeMap<K, V> {
type Item = (&'a K, &'a mut V);
type IntoIter = IterMut<'a, K, V>;
fn into_iter(self) -> IterMut<'a, K, V> {
self.iter_mut()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K: 'a, V: 'a> Iterator for IterMut<'a, K, V> {
type Item = (&'a K, &'a mut V);
fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
if self.length == 0 {
None
} else {
self.length -= 1;
unsafe { Some(self.range.next_unchecked()) }
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.length, Some(self.length))
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K: 'a, V: 'a> DoubleEndedIterator for IterMut<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K, &'a mut V)> {
if self.length == 0 {
None
} else {
self.length -= 1;
unsafe { Some(self.range.next_back_unchecked()) }
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K: 'a, V: 'a> ExactSizeIterator for IterMut<'a, K, V> {
fn len(&self) -> usize {
self.length
}
}
#[unstable(feature = "fused", issue = "35602")]
impl<'a, K, V> FusedIterator for IterMut<'a, K, V> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V> IntoIterator for BTreeMap<K, V> {
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
fn into_iter(self) -> IntoIter<K, V> {
let root1 = unsafe { ptr::read(&self.root).into_ref() };
let root2 = unsafe { ptr::read(&self.root).into_ref() };
let len = self.length;
mem::forget(self);
IntoIter {
front: first_leaf_edge(root1),
back: last_leaf_edge(root2),
length: len,
}
}
}
#[stable(feature = "btree_drop", since = "1.7.0")]
impl<K, V> Drop for IntoIter<K, V> {
fn drop(&mut self) {
for _ in &mut *self {
}
unsafe {
let leaf_node = ptr::read(&self.front).into_node();
if let Some(first_parent) = leaf_node.deallocate_and_ascend() {
let mut cur_node = first_parent.into_node();
while let Some(parent) = cur_node.deallocate_and_ascend() {
cur_node = parent.into_node()
}
}
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V> Iterator for IntoIter<K, V> {
type Item = (K, V);
fn next(&mut self) -> Option<(K, V)> {
if self.length == 0 {
return None;
} else {
self.length -= 1;
}
let handle = unsafe { ptr::read(&self.front) };
let mut cur_handle = match handle.right_kv() {
Ok(kv) => {
let k = unsafe { ptr::read(kv.reborrow().into_kv().0) };
let v = unsafe { ptr::read(kv.reborrow().into_kv().1) };
self.front = kv.right_edge();
return Some((k, v));
}
Err(last_edge) => unsafe {
unwrap_unchecked(last_edge.into_node().deallocate_and_ascend())
},
};
loop {
match cur_handle.right_kv() {
Ok(kv) => {
let k = unsafe { ptr::read(kv.reborrow().into_kv().0) };
let v = unsafe { ptr::read(kv.reborrow().into_kv().1) };
self.front = first_leaf_edge(kv.right_edge().descend());
return Some((k, v));
}
Err(last_edge) => unsafe {
cur_handle = unwrap_unchecked(last_edge.into_node().deallocate_and_ascend());
},
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.length, Some(self.length))
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V> DoubleEndedIterator for IntoIter<K, V> {
fn next_back(&mut self) -> Option<(K, V)> {
if self.length == 0 {
return None;
} else {
self.length -= 1;
}
let handle = unsafe { ptr::read(&self.back) };
let mut cur_handle = match handle.left_kv() {
Ok(kv) => {
let k = unsafe { ptr::read(kv.reborrow().into_kv().0) };
let v = unsafe { ptr::read(kv.reborrow().into_kv().1) };
self.back = kv.left_edge();
return Some((k, v));
}
Err(last_edge) => unsafe {
unwrap_unchecked(last_edge.into_node().deallocate_and_ascend())
},
};
loop {
match cur_handle.left_kv() {
Ok(kv) => {
let k = unsafe { ptr::read(kv.reborrow().into_kv().0) };
let v = unsafe { ptr::read(kv.reborrow().into_kv().1) };
self.back = last_leaf_edge(kv.left_edge().descend());
return Some((k, v));
}
Err(last_edge) => unsafe {
cur_handle = unwrap_unchecked(last_edge.into_node().deallocate_and_ascend());
},
}
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V> ExactSizeIterator for IntoIter<K, V> {
fn len(&self) -> usize {
self.length
}
}
#[unstable(feature = "fused", issue = "35602")]
impl<K, V> FusedIterator for IntoIter<K, V> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for Keys<'a, K, V> {
type Item = &'a K;
fn next(&mut self) -> Option<&'a K> {
self.inner.next().map(|(k, _)| k)
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> DoubleEndedIterator for Keys<'a, K, V> {
fn next_back(&mut self) -> Option<&'a K> {
self.inner.next_back().map(|(k, _)| k)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> {
fn len(&self) -> usize {
self.inner.len()
}
}
#[unstable(feature = "fused", issue = "35602")]
impl<'a, K, V> FusedIterator for Keys<'a, K, V> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Clone for Keys<'a, K, V> {
fn clone(&self) -> Keys<'a, K, V> {
Keys { inner: self.inner.clone() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for Values<'a, K, V> {
type Item = &'a V;
fn next(&mut self) -> Option<&'a V> {
self.inner.next().map(|(_, v)| v)
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> DoubleEndedIterator for Values<'a, K, V> {
fn next_back(&mut self) -> Option<&'a V> {
self.inner.next_back().map(|(_, v)| v)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> {
fn len(&self) -> usize {
self.inner.len()
}
}
#[unstable(feature = "fused", issue = "35602")]
impl<'a, K, V> FusedIterator for Values<'a, K, V> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Clone for Values<'a, K, V> {
fn clone(&self) -> Values<'a, K, V> {
Values { inner: self.inner.clone() }
}
}
#[stable(feature = "btree_range", since = "1.17.0")]
impl<'a, K, V> Iterator for Range<'a, K, V> {
type Item = (&'a K, &'a V);
fn next(&mut self) -> Option<(&'a K, &'a V)> {
if self.front == self.back {
None
} else {
unsafe { Some(self.next_unchecked()) }
}
}
}
#[stable(feature = "map_values_mut", since = "1.10.0")]
impl<'a, K, V> Iterator for ValuesMut<'a, K, V> {
type Item = &'a mut V;
fn next(&mut self) -> Option<&'a mut V> {
self.inner.next().map(|(_, v)| v)
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "map_values_mut", since = "1.10.0")]
impl<'a, K, V> DoubleEndedIterator for ValuesMut<'a, K, V> {
fn next_back(&mut self) -> Option<&'a mut V> {
self.inner.next_back().map(|(_, v)| v)
}
}
#[stable(feature = "map_values_mut", since = "1.10.0")]
impl<'a, K, V> ExactSizeIterator for ValuesMut<'a, K, V> {
fn len(&self) -> usize {
self.inner.len()
}
}
#[unstable(feature = "fused", issue = "35602")]
impl<'a, K, V> FusedIterator for ValuesMut<'a, K, V> {}
impl<'a, K, V> Range<'a, K, V> {
unsafe fn next_unchecked(&mut self) -> (&'a K, &'a V) {
let handle = self.front;
let mut cur_handle = match handle.right_kv() {
Ok(kv) => {
let ret = kv.into_kv();
self.front = kv.right_edge();
return ret;
}
Err(last_edge) => {
let next_level = last_edge.into_node().ascend().ok();
unwrap_unchecked(next_level)
}
};
loop {
match cur_handle.right_kv() {
Ok(kv) => {
let ret = kv.into_kv();
self.front = first_leaf_edge(kv.right_edge().descend());
return ret;
}
Err(last_edge) => {
let next_level = last_edge.into_node().ascend().ok();
cur_handle = unwrap_unchecked(next_level);
}
}
}
}
}
#[stable(feature = "btree_range", since = "1.17.0")]
impl<'a, K, V> DoubleEndedIterator for Range<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K, &'a V)> {
if self.front == self.back {
None
} else {
unsafe { Some(self.next_back_unchecked()) }
}
}
}
impl<'a, K, V> Range<'a, K, V> {
unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a V) {
let handle = self.back;
let mut cur_handle = match handle.left_kv() {
Ok(kv) => {
let ret = kv.into_kv();
self.back = kv.left_edge();
return ret;
}
Err(last_edge) => {
let next_level = last_edge.into_node().ascend().ok();
unwrap_unchecked(next_level)
}
};
loop {
match cur_handle.left_kv() {
Ok(kv) => {
let ret = kv.into_kv();
self.back = last_leaf_edge(kv.left_edge().descend());
return ret;
}
Err(last_edge) => {
let next_level = last_edge.into_node().ascend().ok();
cur_handle = unwrap_unchecked(next_level);
}
}
}
}
}
#[unstable(feature = "fused", issue = "35602")]
impl<'a, K, V> FusedIterator for Range<'a, K, V> {}
#[stable(feature = "btree_range", since = "1.17.0")]
impl<'a, K, V> Clone for Range<'a, K, V> {
fn clone(&self) -> Range<'a, K, V> {
Range {
front: self.front,
back: self.back,
}
}
}
#[stable(feature = "btree_range", since = "1.17.0")]
impl<'a, K, V> Iterator for RangeMut<'a, K, V> {
type Item = (&'a K, &'a mut V);
fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
if self.front == self.back {
None
} else {
unsafe { Some(self.next_unchecked()) }
}
}
}
impl<'a, K, V> RangeMut<'a, K, V> {
unsafe fn next_unchecked(&mut self) -> (&'a K, &'a mut V) {
let handle = ptr::read(&self.front);
let mut cur_handle = match handle.right_kv() {
Ok(kv) => {
let (k, v) = ptr::read(&kv).into_kv_mut();
self.front = kv.right_edge();
return (k, v);
}
Err(last_edge) => {
let next_level = last_edge.into_node().ascend().ok();
unwrap_unchecked(next_level)
}
};
loop {
match cur_handle.right_kv() {
Ok(kv) => {
let (k, v) = ptr::read(&kv).into_kv_mut();
self.front = first_leaf_edge(kv.right_edge().descend());
return (k, v);
}
Err(last_edge) => {
let next_level = last_edge.into_node().ascend().ok();
cur_handle = unwrap_unchecked(next_level);
}
}
}
}
}
#[stable(feature = "btree_range", since = "1.17.0")]
impl<'a, K, V> DoubleEndedIterator for RangeMut<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K, &'a mut V)> {
if self.front == self.back {
None
} else {
unsafe { Some(self.next_back_unchecked()) }
}
}
}
#[unstable(feature = "fused", issue = "35602")]
impl<'a, K, V> FusedIterator for RangeMut<'a, K, V> {}
impl<'a, K, V> RangeMut<'a, K, V> {
unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a mut V) {
let handle = ptr::read(&self.back);
let mut cur_handle = match handle.left_kv() {
Ok(kv) => {
let (k, v) = ptr::read(&kv).into_kv_mut();
self.back = kv.left_edge();
return (k, v);
}
Err(last_edge) => {
let next_level = last_edge.into_node().ascend().ok();
unwrap_unchecked(next_level)
}
};
loop {
match cur_handle.left_kv() {
Ok(kv) => {
let (k, v) = ptr::read(&kv).into_kv_mut();
self.back = last_leaf_edge(kv.left_edge().descend());
return (k, v);
}
Err(last_edge) => {
let next_level = last_edge.into_node().ascend().ok();
cur_handle = unwrap_unchecked(next_level);
}
}
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Ord, V> FromIterator<(K, V)> for BTreeMap<K, V> {
fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> BTreeMap<K, V> {
let mut map = BTreeMap::new();
map.extend(iter);
map
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Ord, V> Extend<(K, V)> for BTreeMap<K, V> {
#[inline]
fn extend<T: IntoIterator<Item = (K, V)>>(&mut self, iter: T) {
for (k, v) in iter {
self.insert(k, v);
}
}
}
#[stable(feature = "extend_ref", since = "1.2.0")]
impl<'a, K: Ord + Copy, V: Copy> Extend<(&'a K, &'a V)> for BTreeMap<K, V> {
fn extend<I: IntoIterator<Item = (&'a K, &'a V)>>(&mut self, iter: I) {
self.extend(iter.into_iter().map(|(&key, &value)| (key, value)));
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Hash, V: Hash> Hash for BTreeMap<K, V> {
fn hash<H: Hasher>(&self, state: &mut H) {
for elt in self {
elt.hash(state);
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Ord, V> Default for BTreeMap<K, V> {
/// Creates an empty `BTreeMap<K, V>`.
fn default() -> BTreeMap<K, V> {
BTreeMap::new()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K: PartialEq, V: PartialEq> PartialEq for BTreeMap<K, V> {
fn eq(&self, other: &BTreeMap<K, V>) -> bool {
self.len() == other.len() && self.iter().zip(other).all(|(a, b)| a == b)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Eq, V: Eq> Eq for BTreeMap<K, V> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K: PartialOrd, V: PartialOrd> PartialOrd for BTreeMap<K, V> {
#[inline]
fn partial_cmp(&self, other: &BTreeMap<K, V>) -> Option<Ordering> {
self.iter().partial_cmp(other.iter())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Ord, V: Ord> Ord for BTreeMap<K, V> {
#[inline]
fn cmp(&self, other: &BTreeMap<K, V>) -> Ordering {
self.iter().cmp(other.iter())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Debug, V: Debug> Debug for BTreeMap<K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_map().entries(self.iter()).finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K: Ord, Q: ?Sized, V> Index<&'a Q> for BTreeMap<K, V>
where K: Borrow<Q>,
Q: Ord
{
type Output = V;
#[inline]
fn index(&self, key: &Q) -> &V {
self.get(key).expect("no entry found for key")
}
}
fn first_leaf_edge<BorrowType, K, V>
(mut node: NodeRef<BorrowType, K, V, marker::LeafOrInternal>)
-> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
loop {
match node.force() {
Leaf(leaf) => return leaf.first_edge(),
Internal(internal) => {
node = internal.first_edge().descend();
}
}
}
}
fn last_leaf_edge<BorrowType, K, V>
(mut node: NodeRef<BorrowType, K, V, marker::LeafOrInternal>)
-> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
loop {
match node.force() {
Leaf(leaf) => return leaf.last_edge(),
Internal(internal) => {
node = internal.last_edge().descend();
}
}
}
}
fn range_search<BorrowType, K, V, Q: ?Sized, R: RangeArgument<Q>>(
root1: NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
root2: NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
range: R
)-> (Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>,
Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>)
where Q: Ord, K: Borrow<Q>
{
match (range.start(), range.end()) {
(Excluded(s), Excluded(e)) if s==e =>
panic!("range start and end are equal and excluded in BTreeMap"),
(Included(s), Included(e)) |
(Included(s), Excluded(e)) |
(Excluded(s), Included(e)) |
(Excluded(s), Excluded(e)) if s>e =>
panic!("range start is greater than range end in BTreeMap"),
_ => {},
};
let mut min_node = root1;
let mut max_node = root2;
let mut min_found = false;
let mut max_found = false;
let mut diverged = false;
loop {
let min_edge = match (min_found, range.start()) {
(false, Included(key)) => match search::search_linear(&min_node, key) {
(i, true) => { min_found = true; i },
(i, false) => i,
},
(false, Excluded(key)) => match search::search_linear(&min_node, key) {
(i, true) => { min_found = true; i+1 },
(i, false) => i,
},
(_, Unbounded) => 0,
(true, Included(_)) => min_node.keys().len(),
(true, Excluded(_)) => 0,
};
let max_edge = match (max_found, range.end()) {
(false, Included(key)) => match search::search_linear(&max_node, key) {
(i, true) => { max_found = true; i+1 },
(i, false) => i,
},
(false, Excluded(key)) => match search::search_linear(&max_node, key) {
(i, true) => { max_found = true; i },
(i, false) => i,
},
(_, Unbounded) => max_node.keys().len(),
(true, Included(_)) => 0,
(true, Excluded(_)) => max_node.keys().len(),
};
if !diverged {
if max_edge < min_edge { panic!("Ord is ill-defined in BTreeMap range") }
if min_edge != max_edge { diverged = true; }
}
let front = Handle::new_edge(min_node, min_edge);
let back = Handle::new_edge(max_node, max_edge);
match (front.force(), back.force()) {
(Leaf(f), Leaf(b)) => {
return (f, b);
},
(Internal(min_int), Internal(max_int)) => {
min_node = min_int.descend();
max_node = max_int.descend();
},
_ => unreachable!("BTreeMap has different depths"),
};
}
}
#[inline(always)]
unsafe fn unwrap_unchecked<T>(val: Option<T>) -> T {
val.unwrap_or_else(|| {
if cfg!(debug_assertions) {
panic!("'unchecked' unwrap on None in BTreeMap");
} else {
intrinsics::unreachable();
}
})
}
impl<K, V> BTreeMap<K, V> {
/// Gets an iterator over the entries of the map, sorted by key.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
/// map.insert(3, "c");
/// map.insert(2, "b");
/// map.insert(1, "a");
///
/// for (key, value) in map.iter() {
/// println!("{}: {}", key, value);
/// }
///
/// let (first_key, first_value) = map.iter().next().unwrap();
/// assert_eq!((*first_key, *first_value), (1, "a"));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter(&self) -> Iter<K, V> {
Iter {
range: Range {
front: first_leaf_edge(self.root.as_ref()),
back: last_leaf_edge(self.root.as_ref()),
},
length: self.length,
}
}
/// Gets a mutable iterator over the entries of the map, sorted by key.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// // add 10 to the value if the key isn't "a"
/// for (key, value) in map.iter_mut() {
/// if key != &"a" {
/// *value += 10;
/// }
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter_mut(&mut self) -> IterMut<K, V> {
let root1 = self.root.as_mut();
let root2 = unsafe { ptr::read(&root1) };
IterMut {
range: RangeMut {
front: first_leaf_edge(root1),
back: last_leaf_edge(root2),
_marker: PhantomData,
},
length: self.length,
}
}
/// Gets an iterator over the keys of the map, in sorted order.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut a = BTreeMap::new();
/// a.insert(2, "b");
/// a.insert(1, "a");
///
/// let keys: Vec<_> = a.keys().cloned().collect();
/// assert_eq!(keys, [1, 2]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn keys<'a>(&'a self) -> Keys<'a, K, V> {
Keys { inner: self.iter() }
}
/// Gets an iterator over the values of the map, in order by key.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut a = BTreeMap::new();
/// a.insert(1, "hello");
/// a.insert(2, "goodbye");
///
/// let values: Vec<&str> = a.values().cloned().collect();
/// assert_eq!(values, ["hello", "goodbye"]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn values<'a>(&'a self) -> Values<'a, K, V> {
Values { inner: self.iter() }
}
/// Gets a mutable iterator over the values of the map, in order by key.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut a = BTreeMap::new();
/// a.insert(1, String::from("hello"));
/// a.insert(2, String::from("goodbye"));
///
/// for value in a.values_mut() {
/// value.push_str("!");
/// }
///
/// let values: Vec<String> = a.values().cloned().collect();
/// assert_eq!(values, [String::from("hello!"),
/// String::from("goodbye!")]);
/// ```
#[stable(feature = "map_values_mut", since = "1.10.0")]
pub fn values_mut(&mut self) -> ValuesMut<K, V> {
ValuesMut { inner: self.iter_mut() }
}
/// Returns the number of elements in the map.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut a = BTreeMap::new();
/// assert_eq!(a.len(), 0);
/// a.insert(1, "a");
/// assert_eq!(a.len(), 1);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn len(&self) -> usize {
self.length
}
/// Returns `true` if the map contains no elements.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut a = BTreeMap::new();
/// assert!(a.is_empty());
/// a.insert(1, "a");
/// assert!(!a.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<'a, K: Ord, V> Entry<'a, K, V> {
/// Ensures a value is in the entry by inserting the default if empty, and returns
/// a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// assert_eq!(map["poneyland"], 12);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn or_insert(self, default: V) -> &'a mut V {
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(default),
}
}
/// Ensures a value is in the entry by inserting the result of the default function if empty,
/// and returns a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map: BTreeMap<&str, String> = BTreeMap::new();
/// let s = "hoho".to_string();
///
/// map.entry("poneyland").or_insert_with(|| s);
///
/// assert_eq!(map["poneyland"], "hoho".to_string());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn or_insert_with<F: FnOnce() -> V>(self, default: F) -> &'a mut V {
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(default()),
}
}
/// Returns a reference to this entry's key.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
/// assert_eq!(map.entry("poneyland").key(), &"poneyland");
/// ```
#[stable(feature = "map_entry_keys", since = "1.10.0")]
pub fn key(&self) -> &K {
match *self {
Occupied(ref entry) => entry.key(),
Vacant(ref entry) => entry.key(),
}
}
}
impl<'a, K: Ord, V> VacantEntry<'a, K, V> {
/// Gets a reference to the key that would be used when inserting a value
/// through the VacantEntry.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
/// assert_eq!(map.entry("poneyland").key(), &"poneyland");
/// ```
#[stable(feature = "map_entry_keys", since = "1.10.0")]
pub fn key(&self) -> &K {
&self.key
}
/// Take ownership of the key.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
/// use std::collections::btree_map::Entry;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
///
/// if let Entry::Vacant(v) = map.entry("poneyland") {
/// v.into_key();
/// }
/// ```
#[stable(feature = "map_entry_recover_keys2", since = "1.12.0")]
pub fn into_key(self) -> K {
self.key
}
/// Sets the value of the entry with the `VacantEntry`'s key,
/// and returns a mutable reference to it.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut count: BTreeMap<&str, usize> = BTreeMap::new();
///
/// // count the number of occurrences of letters in the vec
/// for x in vec!["a","b","a","c","a","b"] {
/// *count.entry(x).or_insert(0) += 1;
/// }
///
/// assert_eq!(count["a"], 3);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn insert(self, value: V) -> &'a mut V {
*self.length += 1;
let out_ptr;
let mut ins_k;
let mut ins_v;
let mut ins_edge;
let mut cur_parent = match self.handle.insert(self.key, value) {
(Fit(handle), _) => return handle.into_kv_mut().1,
(Split(left, k, v, right), ptr) => {
ins_k = k;
ins_v = v;
ins_edge = right;
out_ptr = ptr;
left.ascend().map_err(|n| n.into_root_mut())
}
};
loop {
match cur_parent {
Ok(parent) => {
match parent.insert(ins_k, ins_v, ins_edge) {
Fit(_) => return unsafe { &mut *out_ptr },
Split(left, k, v, right) => {
ins_k = k;
ins_v = v;
ins_edge = right;
cur_parent = left.ascend().map_err(|n| n.into_root_mut());
}
}
}
Err(root) => {
root.push_level().push(ins_k, ins_v, ins_edge);
return unsafe { &mut *out_ptr };
}
}
}
}
}
impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> {
/// Gets a reference to the key in the entry.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
/// map.entry("poneyland").or_insert(12);
/// assert_eq!(map.entry("poneyland").key(), &"poneyland");
/// ```
#[stable(feature = "map_entry_keys", since = "1.10.0")]
pub fn key(&self) -> &K {
self.handle.reborrow().into_kv().0
}
/// Take ownership of the key and value from the map.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
/// use std::collections::btree_map::Entry;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// // We delete the entry from the map.
/// o.remove_entry();
/// }
///
/// // If now try to get the value, it will panic:
/// // println!("{}", map["poneyland"]);
/// ```
#[stable(feature = "map_entry_recover_keys2", since = "1.12.0")]
pub fn remove_entry(self) -> (K, V) {
self.remove_kv()
}
/// Gets a reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
/// use std::collections::btree_map::Entry;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// assert_eq!(o.get(), &12);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get(&self) -> &V {
self.handle.reborrow().into_kv().1
}
/// Gets a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
/// use std::collections::btree_map::Entry;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// assert_eq!(map["poneyland"], 12);
/// if let Entry::Occupied(mut o) = map.entry("poneyland") {
/// *o.get_mut() += 10;
/// }
/// assert_eq!(map["poneyland"], 22);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut(&mut self) -> &mut V {
self.handle.kv_mut().1
}
/// Converts the entry into a mutable reference to its value.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
/// use std::collections::btree_map::Entry;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// assert_eq!(map["poneyland"], 12);
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// *o.into_mut() += 10;
/// }
/// assert_eq!(map["poneyland"], 22);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_mut(self) -> &'a mut V {
self.handle.into_kv_mut().1
}
/// Sets the value of the entry with the `OccupiedEntry`'s key,
/// and returns the entry's old value.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
/// use std::collections::btree_map::Entry;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// if let Entry::Occupied(mut o) = map.entry("poneyland") {
/// assert_eq!(o.insert(15), 12);
/// }
/// assert_eq!(map["poneyland"], 15);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn insert(&mut self, value: V) -> V {
mem::replace(self.get_mut(), value)
}
/// Takes the value of the entry out of the map, and returns it.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
/// use std::collections::btree_map::Entry;
///
/// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
/// map.entry("poneyland").or_insert(12);
///
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// assert_eq!(o.remove(), 12);
/// }
/// // If we try to get "poneyland"'s value, it'll panic:
/// // println!("{}", map["poneyland"]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn remove(self) -> V {
self.remove_kv().1
}
fn remove_kv(self) -> (K, V) {
*self.length -= 1;
let (small_leaf, old_key, old_val) = match self.handle.force() {
Leaf(leaf) => {
let (hole, old_key, old_val) = leaf.remove();
(hole.into_node(), old_key, old_val)
}
Internal(mut internal) => {
let key_loc = internal.kv_mut().0 as *mut K;
let val_loc = internal.kv_mut().1 as *mut V;
let to_remove = first_leaf_edge(internal.right_edge().descend()).right_kv().ok();
let to_remove = unsafe { unwrap_unchecked(to_remove) };
let (hole, key, val) = to_remove.remove();
let old_key = unsafe { mem::replace(&mut *key_loc, key) };
let old_val = unsafe { mem::replace(&mut *val_loc, val) };
(hole.into_node(), old_key, old_val)
}
};
// Handle underflow
let mut cur_node = small_leaf.forget_type();
while cur_node.len() < node::CAPACITY / 2 {
match handle_underfull_node(cur_node) {
AtRoot => break,
EmptyParent(_) => unreachable!(),
Merged(parent) => {
if parent.len() == 0 {
// We must be at the root
parent.into_root_mut().pop_level();
break;
} else {
cur_node = parent.forget_type();
}
}
Stole(_) => break,
}
}
(old_key, old_val)
}
}
enum UnderflowResult<'a, K, V> {
AtRoot,
EmptyParent(NodeRef<marker::Mut<'a>, K, V, marker::Internal>),
Merged(NodeRef<marker::Mut<'a>, K, V, marker::Internal>),
Stole(NodeRef<marker::Mut<'a>, K, V, marker::Internal>),
}
fn handle_underfull_node<'a, K, V>(node: NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>)
-> UnderflowResult<'a, K, V> {
let parent = if let Ok(parent) = node.ascend() {
parent
} else {
return AtRoot;
};
let (is_left, mut handle) = match parent.left_kv() {
Ok(left) => (true, left),
Err(parent) => {
match parent.right_kv() {
Ok(right) => (false, right),
Err(parent) => {
return EmptyParent(parent.into_node());
}
}
}
};
if handle.can_merge() {
Merged(handle.merge().into_node())
} else {
if is_left {
handle.steal_left();
} else {
handle.steal_right();
}
Stole(handle.into_node())
}
}
impl<K: Ord, V, I: Iterator<Item = (K, V)>> Iterator for MergeIter<K, V, I> {
type Item = (K, V);
fn next(&mut self) -> Option<(K, V)> {
let res = match (self.left.peek(), self.right.peek()) {
(Some(&(ref left_key, _)), Some(&(ref right_key, _))) => left_key.cmp(right_key),
(Some(_), None) => Ordering::Less,
(None, Some(_)) => Ordering::Greater,
(None, None) => return None,
};
// Check which elements comes first and only advance the corresponding iterator.
// If two keys are equal, take the value from `right`.
match res {
Ordering::Less => self.left.next(),
Ordering::Greater => self.right.next(),
Ordering::Equal => {
self.left.next();
self.right.next()
}
}
}
}
|
// Be invariant in `K` and `V`
_marker: PhantomData<&'a mut (K, V)>,
}
|
adapter_bloodtiesfancom.py
|
# -*- coding: utf-8 -*-
# Copyright 2011 Fanficdownloader team, 2018 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Software: eFiction
from __future__ import absolute_import
import logging
logger = logging.getLogger(__name__)
import re
from bs4.element import Tag
from ..htmlcleanup import stripHTML
from .. import exceptions as exceptions
# py2 vs py3 transition
from ..six import text_type as unicode
from ..six.moves.urllib.error import HTTPError
from .base_adapter import BaseSiteAdapter, makeDate
# By virtue of being recent and requiring both is_adult and user/pass,
# adapter_fanficcastletvnet.py is the best choice for learning to
# write adapters--especially for sites that use the eFiction system.
# Most sites that have ".../viewstory.php?sid=123" in the story URL
# are eFiction.
# For non-eFiction sites, it can be considerably more complex, but
# this is still a good starting point.
# In general an 'adapter' needs to do these five things:
# - 'Register' correctly with the downloader
# - Site Login (if needed)
# - 'Are you adult?' check (if needed--some do one, some the other, some both)
# - Grab the chapter list
# - Grab the story meta-data (some (non-eFiction) adapters have to get it from the author page)
# - Grab the chapter texts
# Search for XXX comments--that's where things are most likely to need changing.
# This function is called by the downloader in all adapter_*.py files
# in this dir to register the adapter class. So it needs to be
# updated to reflect the class below it. That, plus getSiteDomain()
# take care of 'Registering'.
def getClass():
return BloodTiesFansComAdapter # XXX
# Class name has to be unique. Our convention is camel case the
# sitename with Adapter at the end. www is skipped.
class BloodTiesFansComAdapter(BaseSiteAdapter): # XXX
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
self.is_adult=False
# get storyId from url--url validation guarantees query is only sid=1234
self.story.setMetadata('storyId',self.parsedUrl.query.split('=',)[1])
# normalized story URL.
# XXX Most sites don't have the /fanfic part. Replace all to remove it usually.
self._setURL('http://' + self.getSiteDomain() + '/fiction/viewstory.php?sid='+self.story.getMetadata('storyId'))
# Each adapter needs to have a unique site abbreviation.
self.story.setMetadata('siteabbrev','btf') # XXX
# The date format will vary from site to site.
# http://docs.python.org/library/datetime.html#strftime-strptime-behavior
self.dateformat = "%d %b %Y" # XXX
@staticmethod # must be @staticmethod, don't remove it.
def getSiteDomain():
# The site domain. Does have www here, if it uses it.
return 'bloodties-fans.com' # XXX
@classmethod
def getSiteExampleURLs(cls):
return "http://"+cls.getSiteDomain()+"/fiction/viewstory.php?sid=1234"
def getSiteURLPattern(self):
return re.escape("http://"+self.getSiteDomain()+"/fiction/viewstory.php?sid=")+r"\d+$"
## Login seems to be reasonably standard across eFiction sites.
def needToLoginCheck(self, data):
if 'Registered Users Only' in data \
or 'There is no such account on our website' in data \
or "That password doesn't match the one in our database" in data:
return True
else:
return False
def performLogin(self, url):
params = {}
if self.password:
params['penname'] = self.username
params['password'] = self.password
else:
params['penname'] = self.getConfig("username")
params['password'] = self.getConfig("password")
params['cookiecheck'] = '1'
params['submit'] = 'Submit'
loginUrl = 'http://' + self.getSiteDomain() + '/fiction/user.php?action=login'
logger.debug("Will now login to URL (%s) as (%s)" % (loginUrl,
params['penname']))
d = self._fetchUrl(loginUrl, params)
if "Member Account" not in d : #Member Account
logger.info("Failed to login to URL %s as %s" % (loginUrl,
params['penname']))
raise exceptions.FailedToLogin(url,params['penname'])
return False
else:
return True
## Getting the chapter list and the meta data, plus 'is adult' checking.
def extractChapterUrlsAndMetadata(self):
if self.is_adult or self.getConfig("is_adult"):
# Weirdly, different sites use different warning numbers.
# If the title search below fails, there's a good chance
# you need a different number. print data at that point
# and see what the 'click here to continue' url says.
# Furthermore, there's a couple sites now with more than
# one warning level for different ratings. And they're
# fussy about it. midnightwhispers has three: 4, 2 & 1.
# we'll try 1 first.
addurl = "&ageconsent=ok&warning=4" # XXX
else:
addurl=""
# index=1 makes sure we see the story chapter index. Some
# sites skip that for one-chapter stories.
url = self.url+'&index=1'+addurl
logger.debug("URL: "+url)
try:
data = self._fetchUrl(url)
except HTTPError as e:
if e.code == 404:
raise exceptions.StoryDoesNotExist(self.url)
else:
raise e
# The actual text that is used to announce you need to be an
# adult varies from site to site. Again, print data before
# the title search to troubleshoot.
# Since the warning text can change by warning level, let's
# look for the warning pass url. nfacommunity uses
# &warning= -- actually, so do other sites. Must be an
# eFiction book.
# viewstory.php?sid=561&warning=4
# viewstory.php?sid=561&warning=1
# viewstory.php?sid=561&warning=2
#print data
#m = re.search(r"'viewstory.php\?sid=1882(&warning=4)'",data)
m = re.search(r"'viewstory.php\?sid=\d+((?:&ageconsent=ok)?&warning=\d+)'",data)
if m != None:
if self.is_adult or self.getConfig("is_adult"):
# We tried the default and still got a warning, so
# let's pull the warning number from the 'continue'
# link and reload data.
addurl = m.group(1)
# correct stupid & error in url.
addurl = addurl.replace("&","&")
url = self.url+'&index=1'+addurl
logger.debug("URL 2nd try: "+url)
try:
data = self._fetchUrl(url)
except HTTPError as e:
if e.code == 404:
raise exceptions.StoryDoesNotExist(self.url)
else:
raise e
else:
raise exceptions.AdultCheckRequired(self.url)
if "Access denied. This story has not been validated by the adminstrators of this site." in data:
raise exceptions.AccessDenied(self.getSiteDomain() +" says: Access denied. This story has not been validated by the adminstrators of this site.")
# use BeautifulSoup HTML parser to make everything easier to find.
soup = self.make_soup(data)
# print data
# Now go hunting for all the meta data and the chapter list.
## Title
a = soup.find('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"$"))
self.story.setMetadata('title',stripHTML(a))
# Find authorid and URL from... author url.
a = soup.find('a', href=re.compile(r"viewuser.php\?uid=\d+"))
self.story.setMetadata('authorId',a['href'].split('=')[1])
self.story.setMetadata('authorUrl','http://'+self.host+'/fiction/'+a['href'])
self.story.setMetadata('author',a.string)
# Find the chapters:
for chapter in soup.findAll('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+r"&chapter=\d+$")):
# just in case there's tags, like <i> in chapter titles.
self.add_chapter(chapter,'http://'+self.host+'/fiction/'+chapter['href']+addurl)
# eFiction sites don't help us out a lot with their meta data
# formating, so it's a little ugly.
# utility method
def
|
(d,k):
try:
return d[k]
except:
return ""
listbox = soup.find('div',{'class':'listbox'})
# <strong>Rating:</strong> M<br /> etc
labels = listbox.findAll('strong')
for labelspan in labels:
value = labelspan.nextSibling
label = labelspan.string
if 'Summary' in label:
## Everything until the next strong tag.
svalue = ""
while not isinstance(value,Tag) or value.name != 'strong':
svalue += unicode(value)
value = value.nextSibling
self.setDescription(url,svalue)
#self.story.setMetadata('description',stripHTML(svalue))
if 'Rating' in label:
self.story.setMetadata('rating', value)
if 'Words' in label:
value=re.sub(r"\|",r"",value)
self.story.setMetadata('numWords', value)
if 'Categories' in label:
cats = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=categories'))
catstext = [cat.string for cat in cats]
for cat in catstext:
self.story.addToList('category',cat.string)
if 'Characters' in label:
chars = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=characters'))
charstext = [char.string for char in chars]
for char in charstext:
self.story.addToList('characters',char.string)
if 'Completed' in label:
if 'Yes' in value:
self.story.setMetadata('status', 'Completed')
else:
self.story.setMetadata('status', 'In-Progress')
if 'Published' in label:
value=re.sub(r"\|",r"",value)
self.story.setMetadata('datePublished', makeDate(stripHTML(value), self.dateformat))
if 'Updated' in label:
value=re.sub(r"\|",r"",value)
self.story.setMetadata('dateUpdated', makeDate(stripHTML(value), self.dateformat))
# moved outside because they changed *most*, but not *all* labels to <strong>
ships = listbox.findAll('a',href=re.compile(r'browse.php.type=class&(amp;)?type_id=2')) # crappy html: & vs & in url.
shipstext = [ship.string for ship in ships]
for ship in shipstext:
self.story.addToList('ships',ship.string)
genres = listbox.findAll('a',href=re.compile(r'browse.php\?type=class&(amp;)?type_id=1')) # crappy html: & vs & in url.
genrestext = [genre.string for genre in genres]
for genre in genrestext:
self.story.addToList('genre',genre.string)
try:
# Find Series name from series URL.
a = soup.find('a', href=re.compile(r"viewseries.php\?seriesid=\d+"))
series_name = a.string
series_url = 'http://'+self.host+'/fiction/'+a['href']
# use BeautifulSoup HTML parser to make everything easier to find.
seriessoup = self.make_soup(self._fetchUrl(series_url))
storyas = seriessoup.findAll('a', href=re.compile(r'^viewstory.php\?sid=\d+$'))
i=1
for a in storyas:
if a['href'] == ('viewstory.php?sid='+self.story.getMetadata('storyId')):
self.setSeries(series_name, i)
self.story.setMetadata('seriesUrl',series_url)
break
i+=1
except:
# I find it hard to care if the series parsing fails
pass
# grab the text for an individual chapter.
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
soup = self.make_soup(self._fetchUrl(url))
div = soup.find('div', {'id' : 'story'})
if None == div:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
return self.utf8FromSoup(url,div)
|
defaultGetattr
|
reconciler_test.go
|
/*
Copyright 2022 TriggerMesh Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azureblobstoragesource
import (
"context"
"fmt"
"net/http"
"testing"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
clientgotesting "k8s.io/client-go/testing"
"knative.dev/eventing/pkg/reconciler/source"
"knative.dev/pkg/apis"
"knative.dev/pkg/controller"
"knative.dev/pkg/logging"
rt "knative.dev/pkg/reconciler/testing"
"github.com/Azure/azure-sdk-for-go/profiles/latest/eventgrid/mgmt/eventgrid"
"github.com/Azure/azure-sdk-for-go/profiles/latest/eventhub/mgmt/eventhub"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/go-autorest/autorest"
commonv1alpha1 "github.com/triggermesh/triggermesh/pkg/apis/common/v1alpha1"
"github.com/triggermesh/triggermesh/pkg/apis/sources"
"github.com/triggermesh/triggermesh/pkg/apis/sources/v1alpha1"
fakeinjectionclient "github.com/triggermesh/triggermesh/pkg/client/generated/injection/client/fake"
reconcilerv1alpha1 "github.com/triggermesh/triggermesh/pkg/client/generated/injection/reconciler/sources/v1alpha1/azureblobstoragesource"
common "github.com/triggermesh/triggermesh/pkg/reconciler"
. "github.com/triggermesh/triggermesh/pkg/reconciler/testing"
"github.com/triggermesh/triggermesh/pkg/sources/client/azure/storage"
eventtesting "github.com/triggermesh/triggermesh/pkg/testing/event"
)
// adapterCfg is used in every instance of Reconciler defined in reconciler tests.
var adapterCfg = &adapterConfig{
Image: "registry/image:tag",
configs: &source.EmptyVarsGenerator{},
}
func TestReconcileSource(t *testing.T) {
ctor := reconcilerCtor(adapterCfg)
src := newEventSource()
ab := adapterBuilder(adapterCfg)
TestReconcileAdapter(t, ctor, src, ab)
}
// reconcilerCtor returns a Ctor for a source Reconciler.
func reconcilerCtor(cfg *adapterConfig) Ctor {
return func(t *testing.T, ctx context.Context, tr *rt.TableRow, ls *Listers) controller.Reconciler {
esCli := &mockedEventSubscriptionsClient{
eventSubs: getMockEventSubscriptions(tr),
}
ehCli := &mockedEventHubsClient{}
// inject clients into test data so that table tests can perform
// assertions on it
if tr.OtherTestData == nil {
tr.OtherTestData = make(map[string]interface{}, 2)
}
tr.OtherTestData[testEventSubscriptionsClientDataKey] = esCli
tr.OtherTestData[testEventHubsClientDataKey] = ehCli
r := &Reconciler{
cg: staticClientGetter(esCli, ehCli),
adapterCfg: cfg,
}
r.base = NewTestDeploymentReconciler[*v1alpha1.AzureBlobStorageSource](ctx, ls,
ls.GetAzureBlobStorageSourceLister().AzureBlobStorageSources,
)
return reconcilerv1alpha1.NewReconciler(ctx, logging.FromContext(ctx),
fakeinjectionclient.Get(ctx), ls.GetAzureBlobStorageSourceLister(),
controller.GetEventRecorder(ctx), r)
}
}
// newEventSource returns a test source object with a minimal set of pre-filled attributes.
func newEventSource() *v1alpha1.AzureBlobStorageSource {
src := &v1alpha1.AzureBlobStorageSource{
Spec: v1alpha1.AzureBlobStorageSourceSpec{
StorageAccountID: tStorageAccID,
Endpoint: v1alpha1.AzureEventGridSourceEndpoint{
EventHubs: v1alpha1.AzureEventGridSourceDestinationEventHubs{
NamespaceID: tEventHubNamespaceID,
HubName: &tEventHubID.ResourceName,
},
},
Auth: v1alpha1.AzureAuth{
ServicePrincipal: &v1alpha1.AzureServicePrincipal{
TenantID: commonv1alpha1.ValueFromField{
Value: "00000000-0000-0000-0000-000000000000",
},
ClientID: commonv1alpha1.ValueFromField{
Value: "00000000-0000-0000-0000-000000000000",
},
ClientSecret: commonv1alpha1.ValueFromField{
Value: "some_secret",
},
},
},
},
}
// assume finalizer is already set to prevent the generated reconciler
// from generating an extra Patch action
src.Finalizers = []string{sources.AzureBlobStorageSourceResource.String()}
Populate(src)
return src
}
// adapterBuilder returns a slim Reconciler containing only the fields accessed
// by r.BuildAdapter().
func adapterBuilder(cfg *adapterConfig) common.AdapterBuilder[*appsv1.Deployment] {
return &Reconciler{
adapterCfg: cfg,
}
}
// TestReconcileSubscription contains tests specific to the Azure Blob Storage source.
func TestReconcileSubscription(t *testing.T) {
newReconciledAdapter := mustNewReconciledAdapter(t)
newReconciledSource := mustNewReconciledSource(t)
testCases := rt.TableTest{
// Regular lifecycle
{
Name: "Not yet subscribed",
Key: tKey,
Objects: []runtime.Object{
newReconciledSource(),
newReconciledServiceAccount(),
newReconciledRoleBinding(),
newReconciledAdapter(),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: newReconciledSource(subscribed),
}},
WantEvents: []string{
createdEventSubsEvent(),
},
PostConditions: []func(*testing.T, *rt.TableRow){
calledGetEventSubscription(true),
calledCreateUpdateEventSubscription(true),
},
},
{
Name: "Already subscribed and up-to-date",
Key: tKey,
OtherTestData: makeMockEventSubscriptions(true),
Objects: []runtime.Object{
newReconciledSource(subscribed),
newReconciledServiceAccount(),
newReconciledRoleBinding(),
newReconciledAdapter(),
},
PostConditions: []func(*testing.T, *rt.TableRow){
calledGetEventSubscription(true),
calledCreateUpdateEventSubscription(false),
},
},
{
Name: "Already subscribed but outdated",
Key: tKey,
OtherTestData: makeMockEventSubscriptions(false),
Objects: []runtime.Object{
newReconciledSource(subscribed),
newReconciledServiceAccount(),
newReconciledRoleBinding(),
newReconciledAdapter(),
},
WantEvents: []string{
updatedEventSubsEvent(),
},
PostConditions: []func(*testing.T, *rt.TableRow){
calledGetEventSubscription(true),
calledCreateUpdateEventSubscription(true),
},
},
// Finalization
{
Name: "Deletion while subscribed",
Key: tKey,
OtherTestData: makeMockEventSubscriptions(true),
Objects: []runtime.Object{
newReconciledSource(subscribed, deleted),
newReconciledServiceAccount(),
newReconciledRoleBinding(),
newReconciledAdapter(),
},
WantPatches: []clientgotesting.PatchActionImpl{
unsetFinalizerPatch(),
},
WantEvents: []string{
deletedEventSubsEvent(),
finalizedEvent(),
},
PostConditions: []func(*testing.T, *rt.TableRow){
calledGetEventSubscription(false),
calledCreateUpdateEventSubscription(false),
calledDeleteEventSubscription(true),
},
},
{
Name: "Deletion while not subscribed",
Key: tKey,
Objects: []runtime.Object{
newReconciledSource(deleted),
newReconciledServiceAccount(),
newReconciledRoleBinding(),
newReconciledAdapter(),
},
WantPatches: []clientgotesting.PatchActionImpl{
unsetFinalizerPatch(),
},
WantEvents: []string{
skippedDeleteEventSubsEvent(),
finalizedEvent(),
},
PostConditions: []func(*testing.T, *rt.TableRow){
calledGetEventSubscription(false),
calledCreateUpdateEventSubscription(false),
calledDeleteEventSubscription(true),
},
},
}
ctor := reconcilerCtor(adapterCfg)
testCases.Test(t, MakeFactory(ctor))
}
// tNs/tName match the namespace/name set by (reconciler/testing).Populate.
const (
tNs = "testns"
tName = "test"
tKey = tNs + "/" + tName
tEventSubs = "io.triggermesh.azureblobstoragesource." + tNs + "." + tName
)
var (
tSinkURI = &apis.URL{
Scheme: "http",
Host: "default.default.svc.example.com",
Path: "/",
}
tStorageAccID = v1alpha1.AzureResourceID{
SubscriptionID: "00000000-0000-0000-0000-000000000000",
ResourceGroup: "MyGroup",
ResourceProvider: "Microsoft.Storage",
ResourceType: "storageAccounts",
ResourceName: "mystorageaccount",
}
tEventHubNamespaceID = v1alpha1.AzureResourceID{
SubscriptionID: "00000000-0000-0000-0000-000000000000",
ResourceGroup: "MyGroup",
ResourceProvider: "Microsoft.EventHub",
ResourceType: "namespaces",
ResourceName: "MyNamespace",
}
tEventHubID = v1alpha1.AzureResourceID{
SubscriptionID: "00000000-0000-0000-0000-000000000000",
ResourceGroup: "MyGroup",
ResourceProvider: "Microsoft.EventHub",
Namespace: "MyNamespace",
ResourceType: "eventhubs",
ResourceName: "MyEventHub",
}
)
/* Source and receive adapter */
// sourceOption is a functional option for an event source.
type sourceOption func(*v1alpha1.AzureBlobStorageSource)
// newReconciledSource returns a test event source object that is identical to
// what ReconcileKind generates.
func newReconciledSource(opts ...sourceOption) (*v1alpha1.AzureBlobStorageSource, error) {
src := newEventSource()
// assume the sink URI is resolved
src.Spec.Sink.Ref = nil
src.Spec.Sink.URI = tSinkURI
a, err := newReconciledAdapter()
if err != nil {
return nil, err
}
// assume status conditions are already set to True to ensure
// ReconcileKind is a no-op
status := src.GetStatusManager()
status.MarkSink(tSinkURI)
status.PropagateDeploymentAvailability(context.Background(), a, nil)
for _, opt := range opts {
opt(src)
}
return src, nil
}
func mustNewReconciledSource(t *testing.T) func(...sourceOption) *v1alpha1.AzureBlobStorageSource {
return func(opts ...sourceOption) *v1alpha1.AzureBlobStorageSource {
src, err := newReconciledSource(opts...)
require.NoError(t, err)
return src
}
}
// subscribed sets the Subscribed status condition to True and reports the
// resource ID of the destination Event Hub in the source's status.
func subscribed(src *v1alpha1.AzureBlobStorageSource) {
src.Status.MarkSubscribed()
src.Status.EventHubID = &tEventHubID
}
// deleted marks the source as deleted.
func deleted(src *v1alpha1.AzureBlobStorageSource) {
t := metav1.Unix(0, 0)
src.SetDeletionTimestamp(&t)
}
// newReconciledServiceAccount returns a test ServiceAccount object that is
// identical to what ReconcileKind generates.
func newReconciledServiceAccount() *corev1.ServiceAccount {
return NewServiceAccount(newEventSource())()
}
// newReconciledRoleBinding returns a test RoleBinding object that is
// identical to what ReconcileKind generates.
func newReconciledRoleBinding() *rbacv1.RoleBinding {
return NewConfigWatchRoleBinding(newReconciledServiceAccount())()
}
// newReconciledAdapter returns a test receive adapter object that is identical
// to what ReconcileKind generates.
func newReconciledAdapter() (*appsv1.Deployment, error) {
// hack: we need to pass a source which has status.eventHubID already
// set for the deployment to contain an AZURE_HUB_NAME env var with the
// expected value
src := newEventSource()
src.Status.EventHubID = &tEventHubID
adapter, err := adapterBuilder(adapterCfg).BuildAdapter(src, tSinkURI)
if err != nil {
return nil, fmt.Errorf("building adapter object using provided Reconcilable: %w", err)
}
adapter.Status.Conditions = []appsv1.DeploymentCondition{{
Type: appsv1.DeploymentAvailable,
Status: corev1.ConditionTrue,
}}
return adapter, nil
}
func mustNewReconciledAdapter(t *testing.T) func() *appsv1.Deployment {
return func() *appsv1.Deployment {
a, err := newReconciledAdapter()
require.NoError(t, err)
return a
}
}
/* Azure clients */
// staticClientGetter transforms the given client interfaces into a
// ClientGetter.
func staticClientGetter(esCli storage.EventSubscriptionsClient, ehCli storage.EventHubsClient) storage.ClientGetterFunc {
return func(*v1alpha1.AzureBlobStorageSource) (storage.EventSubscriptionsClient, storage.EventHubsClient, error) {
return esCli, ehCli, nil
}
}
type mockedEventSubscriptionsClient struct {
storage.EventSubscriptionsClient
eventSubs mockEventSubscriptions
calledGet bool
calledCreateUpdate bool
calledDelete bool
}
// the fake client expects keys in the format <storage acc id>/<subscription name>
type mockEventSubscriptions map[string]eventgrid.EventSubscription
const testEventSubscriptionsClientDataKey = "esClient"
func (c *mockedEventSubscriptionsClient) Get(ctx context.Context, scope, name string) (eventgrid.EventSubscription, error) {
c.calledGet = true
if len(c.eventSubs) == 0 {
return eventgrid.EventSubscription{}, notFoundAzureErr()
}
sub, ok := c.eventSubs[scope+"/"+name]
if !ok {
return eventgrid.EventSubscription{}, notFoundAzureErr()
}
return sub, nil
}
func (c *mockedEventSubscriptionsClient) CreateOrUpdate(ctx context.Context, scope, name string,
info eventgrid.EventSubscription) (eventgrid.EventSubscriptionsCreateOrUpdateFuture, error) {
c.calledCreateUpdate = true
return eventgrid.EventSubscriptionsCreateOrUpdateFuture{}, nil
}
func (c *mockedEventSubscriptionsClient) Delete(ctx context.Context, scope, name string) (eventgrid.EventSubscriptionsDeleteFuture, error) {
c.calledDelete = true
if len(c.eventSubs) == 0 {
return eventgrid.EventSubscriptionsDeleteFuture{}, notFoundAzureErr()
}
var err error
if _, ok := c.eventSubs[scope+"/"+name]; !ok {
err = notFoundAzureErr()
}
return eventgrid.EventSubscriptionsDeleteFuture{}, err
}
const mockEventSubscriptionsDataKey = "eventSubs"
// makeMockEventSubscriptions returns a mocked list of event subscriptions to
// be used as TableRow data.
func makeMockEventSubscriptions(inSync bool) map[string]interface{} {
sub := newEventSubscription(tEventHubID.String(), newEventSource().GetEventTypes())
sub.ID = to.Ptr("/irrelevant/resource/id")
if !inSync {
// inject arbitrary change to cause comparison to be false
*sub.EventSubscriptionProperties.RetryPolicy.EventTimeToLiveInMinutes++
}
// key format expected by mocked client impl
subKey := tStorageAccID.String() + "/" + tEventSubs
return map[string]interface{}{
mockEventSubscriptionsDataKey: mockEventSubscriptions{
subKey: sub,
},
}
}
// getMockEventSubscriptions gets mocked event subscriptions from the
// TableRow's data.
func getMockEventSubscriptions(tr *rt.TableRow) mockEventSubscriptions {
hubs, ok := tr.OtherTestData[mockEventSubscriptionsDataKey]
if !ok {
return nil
}
return hubs.(mockEventSubscriptions)
}
func calledGetEventSubscription(expectCall bool) func(*testing.T, *rt.TableRow) {
return func(t *testing.T, tr *rt.TableRow) {
cli := tr.OtherTestData[testEventSubscriptionsClientDataKey].(*mockedEventSubscriptionsClient)
if expectCall && !cli.calledGet {
t.Error("Did not call Get() on event subscription")
}
if !expectCall && cli.calledGet
|
}
}
func calledCreateUpdateEventSubscription(expectCall bool) func(*testing.T, *rt.TableRow) {
return func(t *testing.T, tr *rt.TableRow) {
cli := tr.OtherTestData[testEventSubscriptionsClientDataKey].(*mockedEventSubscriptionsClient)
if expectCall && !cli.calledCreateUpdate {
t.Error("Did not call CreateOrUpdate() on event subscription")
}
if !expectCall && cli.calledCreateUpdate {
t.Error("Unexpected call to CreateOrUpdate() on event subscription")
}
}
}
func calledDeleteEventSubscription(expectCall bool) func(*testing.T, *rt.TableRow) {
return func(t *testing.T, tr *rt.TableRow) {
cli := tr.OtherTestData[testEventSubscriptionsClientDataKey].(*mockedEventSubscriptionsClient)
if expectCall && !cli.calledDelete {
t.Error("Did not call Delete() on event subscription")
}
if !expectCall && cli.calledDelete {
t.Error("Unexpected call to Delete() on event subscription")
}
}
}
type mockedEventHubsClient struct {
storage.EventHubsClient
}
const testEventHubsClientDataKey = "ehClient"
func (c *mockedEventHubsClient) Get(ctx context.Context, rg, ns, name string) (eventhub.Model, error) {
return eventhub.Model{}, nil
}
func (c *mockedEventHubsClient) CreateOrUpdate(ctx context.Context, rg, ns, name string, params eventhub.Model) (eventhub.Model, error) {
return eventhub.Model{}, nil
}
func (c *mockedEventHubsClient) Delete(ctx context.Context, rg, ns, name string) (autorest.Response, error) {
return autorest.Response{}, nil
}
func notFoundAzureErr() error {
return autorest.DetailedError{
StatusCode: http.StatusNotFound,
}
}
/* Patches */
func unsetFinalizerPatch() clientgotesting.PatchActionImpl {
return clientgotesting.PatchActionImpl{
Name: tName,
PatchType: types.MergePatchType,
Patch: []byte(`{"metadata":{"finalizers":[],"resourceVersion":""}}`),
}
}
/* Events */
func createdEventSubsEvent() string {
tStorageAccount := tStorageAccID.ResourceName
return eventtesting.Eventf(corev1.EventTypeNormal, ReasonSubscribed,
"Created event subscription %q for storage account %q", tEventSubs, tStorageAccount)
}
func updatedEventSubsEvent() string {
tStorageAccount := tStorageAccID.ResourceName
return eventtesting.Eventf(corev1.EventTypeNormal, ReasonSubscribed,
"Updated event subscription %q for storage account %q", tEventSubs, tStorageAccount)
}
func deletedEventSubsEvent() string {
tStorageAccount := tStorageAccID.ResourceName
return eventtesting.Eventf(corev1.EventTypeNormal, ReasonUnsubscribed,
"Deleted event subscription %q for storage account %q", tEventSubs, tStorageAccount)
}
func skippedDeleteEventSubsEvent() string {
return eventtesting.Eventf(corev1.EventTypeWarning, ReasonUnsubscribed,
"Event subscription not found, skipping deletion")
}
func finalizedEvent() string {
return eventtesting.Eventf(corev1.EventTypeNormal, "FinalizerUpdate", "Updated %q finalizers", tName)
}
|
{
t.Error("Unexpected call to Get() on event subscription")
}
|
plugin.go
|
package mock
import "gitlab.com/browserker/browserk"
type Plugin struct {
NameFn func() string
NameCalled bool
IDFn func() string
IDCalled bool
ConfigFn func() *browserk.PluginConfig
ConfigCalled bool
OptionsFn func() *browserk.PluginOpts
OptionsCalled bool
ReadyFn func(browser browserk.Browser) (bool, error) // ready for injection or whatever, ret true if injected
ReadyCalled bool
OnEventFn func(evt *browserk.PluginEvent)
OnEventCalled bool
}
func (p *Plugin) Name() string {
p.NameCalled = true
return p.NameFn()
}
func (p *Plugin) ID() string {
p.IDCalled = true
return p.IDFn()
}
func (p *Plugin) Config() *browserk.PluginConfig {
p.ConfigCalled = true
return p.ConfigFn()
}
func (p *Plugin) Options() *browserk.PluginOpts {
p.OptionsCalled = true
return p.OptionsFn()
}
func (p *Plugin) Ready(browser browserk.Browser) (bool, error) {
p.ReadyCalled = true
return p.ReadyFn(browser)
}
func (p *Plugin) OnEvent(evt *browserk.PluginEvent) {
p.OnEventCalled = true
p.OnEventFn(evt)
}
func
|
() *Plugin {
p := &Plugin{}
p.NameFn = func() string {
return "TestPlugin"
}
p.IDFn = func() string {
return "BR-P-9999"
}
p.ConfigFn = func() *browserk.PluginConfig {
return &browserk.PluginConfig{
Class: "",
Plugin: "",
Language: "Go",
ID: 9,
}
}
p.OptionsFn = func() *browserk.PluginOpts {
return &browserk.PluginOpts{
IsolatedRequests: true,
WriteResponses: true,
WriteRequests: true,
WriteJS: true,
ListenResponses: true,
ListenRequests: true,
ListenStorage: true,
ListenCookies: true,
ListenConsole: true,
ListenURL: true,
ListenJS: true,
ExecutionType: browserk.ExecAlways,
Mimes: nil,
Injections: nil,
}
}
p.OnEventFn = func(evt *browserk.PluginEvent) {}
return p
}
|
MakeMockPlugin
|
validation_service_request.py
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class ValidationServiceRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ValidationServiceRequest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'date_import_ended': 'datetime',
'file_url': 'str'
}
self.attribute_map = {
'date_import_ended': 'dateImportEnded',
'file_url': 'fileUrl'
}
self._date_import_ended = None
self._file_url = None
@property
def date_import_ended(self):
"""
Gets the date_import_ended of this ValidationServiceRequest.
The last day of the data you are importing. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The date_import_ended of this ValidationServiceRequest.
:rtype: datetime
"""
return self._date_import_ended
@date_import_ended.setter
def date_import_ended(self, date_import_ended):
"""
Sets the date_import_ended of this ValidationServiceRequest.
The last day of the data you are importing. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param date_import_ended: The date_import_ended of this ValidationServiceRequest.
:type: datetime
"""
self._date_import_ended = date_import_ended
@property
def file_url(self):
"""
Gets the file_url of this ValidationServiceRequest.
Path to the file in the storage including the file name
:return: The file_url of this ValidationServiceRequest.
:rtype: str
"""
return self._file_url
@file_url.setter
def file_url(self, file_url):
"""
Sets the file_url of this ValidationServiceRequest.
Path to the file in the storage including the file name
:param file_url: The file_url of this ValidationServiceRequest.
:type: str
"""
self._file_url = file_url
def
|
(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
to_dict
|
retry_test.go
|
// Copyright 2011, 2012, 2013 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package retry_test // import "gopkg.in/retry.v1"
import (
"time"
"github.com/juju/testing"
gc "gopkg.in/check.v1"
"github.com/juju/utils/clock"
"gopkg.in/retry.v1"
)
type retrySuite struct{}
var _ = gc.Suite(&retrySuite{})
func (*retrySuite) TestAttemptTiming(c *gc.C) {
testAttempt := retry.Regular{
Total: 0.25e9,
Delay: 0.1e9,
}
want := []time.Duration{0, 0.1e9, 0.2e9, 0.2e9}
got := make([]time.Duration, 0, len(want)) // avoid allocation when testing timing
t0 := time.Now()
a := testAttempt.Start(nil)
for a.Next() {
got = append(got, time.Now().Sub(t0))
}
got = append(got, time.Now().Sub(t0))
c.Assert(a.Stopped(), gc.Equals, false)
c.Assert(got, gc.HasLen, len(want))
const margin = 0.01e9
for i, got := range want {
lo := want[i] - margin
hi := want[i] + margin
if got < lo || got > hi {
c.Errorf("attempt %d want %g got %g", i, want[i].Seconds(), got.Seconds())
}
}
}
func (*retrySuite) TestAttemptNextMore(c *gc.C) {
a := retry.Regular{}.Start(nil)
c.Assert(a.Next(), gc.Equals, true)
c.Assert(a.Next(), gc.Equals, false)
a = retry.Regular{}.Start(nil)
c.Assert(a.Next(), gc.Equals, true)
c.Assert(a.More(), gc.Equals, false)
c.Assert(a.Next(), gc.Equals, false)
a = retry.Regular{Total: 2e8}.Start(nil)
c.Assert(a.Next(), gc.Equals, true)
c.Assert(a.More(), gc.Equals, true)
time.Sleep(2e8)
c.Assert(a.More(), gc.Equals, true)
c.Assert(a.Next(), gc.Equals, true)
c.Assert(a.Next(), gc.Equals, false)
a = retry.Regular{Total: 1e8, Min: 2}.Start(nil)
time.Sleep(1e8)
c.Assert(a.Next(), gc.Equals, true)
c.Assert(a.More(), gc.Equals, true)
c.Assert(a.Next(), gc.Equals, true)
c.Assert(a.More(), gc.Equals, false)
c.Assert(a.Next(), gc.Equals, false)
}
func (*retrySuite) TestAttemptWithStop(c *gc.C) {
stop := make(chan struct{})
close(stop)
done := make(chan struct{})
go func() {
strategy := retry.Regular{
Delay: 5 * time.Second,
Total: 30 * time.Second,
}
a := retry.StartWithCancel(strategy, nil, stop)
for a.Next() {
c.Errorf("unexpected attempt")
}
c.Check(a.Stopped(), gc.Equals, true)
close(done)
}()
assertReceive(c, done, "attempt loop abort")
}
func (*retrySuite) TestAttemptWithLaterStop(c *gc.C) {
clock := testing.NewClock(time.Now())
stop := make(chan struct{})
done := make(chan struct{})
progress := make(chan struct{}, 10)
go func() {
strategy := retry.Regular{
Delay: 5 * time.Second,
Total: 30 * time.Second,
}
a := retry.StartWithCancel(strategy, clock, stop)
for a.Next() {
progress <- struct{}{}
}
c.Check(a.Stopped(), gc.Equals, true)
close(done)
}()
assertReceive(c, progress, "progress")
clock.Advance(5 * time.Second)
assertReceive(c, progress, "progress")
clock.Advance(2 * time.Second)
close(stop)
assertReceive(c, done, "attempt loop abort")
select {
case <-progress:
c.Fatalf("unxpected loop iteration after stop")
default:
}
}
func (*retrySuite) TestAttemptWithMockClock(c *gc.C) {
clock := testing.NewClock(time.Now())
strategy := retry.Regular{
Delay: 5 * time.Second,
Total: 30 * time.Second,
}
progress := make(chan struct{})
done := make(chan struct{})
go func() {
for a := strategy.Start(clock); a.Next(); {
progress <- struct{}{}
}
close(done)
}()
assertReceive(c, progress, "progress first time")
clock.Advance(5 * time.Second)
assertReceive(c, progress, "progress second time")
clock.Advance(5 * time.Second)
assertReceive(c, progress, "progress third time")
clock.Advance(30 * time.Second)
assertReceive(c, progress, "progress fourth time")
assertReceive(c, done, "loop finish")
}
type strategyTest struct {
about string
strategy retry.Strategy
calls []nextCall
terminates bool
}
type nextCall struct {
// t holds the time since the timer was started that
// the Next call will be made.
t time.Duration
// delay holds the length of time that a call made at
// time t is expected to sleep for.
sleep time.Duration
}
var strategyTests = []strategyTest{{
about: "regular retry (same params as TestAttemptTiming)",
strategy: retry.Regular{
Total: 0.25e9,
Delay: 0.1e9,
},
calls: []nextCall{
{0, 0},
{0, 0.1e9},
{0.1e9, 0.1e9},
{0.2e9, 0},
},
terminates: true,
}, {
about: "regular retry with calls at different times",
strategy: retry.Regular{
Total: 2.5e9,
Delay: 1e9,
},
calls: []nextCall{
{0.5e9, 0},
{0.5e9, 0.5e9},
{1.1e9, 0.9e9},
{2.2e9, 0},
},
terminates: true,
}, {
about: "regular retry with call after next deadline",
strategy: retry.Regular{
Total: 3.5e9,
Delay: 1e9,
},
calls: []nextCall{
{0.5e9, 0},
// We call Next at well beyond the deadline,
// so we get a zero delay, but subsequent events
// resume pace.
{2e9, 0},
{2.1e9, 0.9e9},
{3e9, 0},
},
terminates: true,
}, {
about: "exponential retry",
strategy: retry.Exponential{
Initial: 1e9,
Factor: 2,
},
calls: []nextCall{
{0, 0},
{0.1e9, 0.9e9},
{1e9, 2e9},
{3e9, 4e9},
{7e9, 8e9},
},
}, {
about: "time-limited exponential retry",
strategy: retry.LimitTime(5e9, retry.Exponential{
Initial: 1e9,
Factor: 2,
}),
calls: []nextCall{
{0, 0},
{0.1e9, 0.9e9},
{1e9, 2e9},
{3e9, 0},
},
terminates: true,
}, {
about: "count-limited exponential retry",
strategy: retry.LimitCount(2, retry.Exponential{
Initial: 1e9,
Factor: 2,
}),
calls: []nextCall{
{0, 0},
{0.1e9, 0.9e9},
{1e9, 0},
},
terminates: true,
}}
func (*retrySuite) TestStrategies(c *gc.C) {
for i, test := range strategyTests {
c.Logf("test %d: %s", i, test.about)
testStrategy(c, test)
}
}
func
|
(c *gc.C, test strategyTest) {
t0 := time.Now()
clk := &mockClock{
now: t0,
}
a := retry.Start(test.strategy, clk)
for i, call := range test.calls {
c.Logf("call %d - %v", i, call.t)
clk.now = t0.Add(call.t)
ok := a.Next()
expectTerminate := test.terminates && i == len(test.calls)-1
c.Assert(ok, gc.Equals, !expectTerminate)
if got, want := clk.now.Sub(t0), call.t+call.sleep; !closeTo(got, want) {
c.Fatalf("incorrect time after Next; got %v want %v", got, want)
}
if ok {
c.Assert(a.Count(), gc.Equals, i+1)
}
}
}
func (*retrySuite) TestGapBetweenMoreAndNext(c *gc.C) {
t0 := time.Now().UTC()
clk := &mockClock{
now: t0,
}
a := (&retry.Regular{
Min: 3,
Delay: time.Second,
}).Start(clk)
c.Assert(a.Next(), gc.Equals, true)
c.Assert(clk.now, gc.Equals, t0)
clk.now = clk.now.Add(500 * time.Millisecond)
// Sanity check that the first iteration sleeps for half a second.
c.Assert(a.More(), gc.Equals, true)
c.Assert(a.Next(), gc.Equals, true)
c.Assert(clk.now.Sub(t0), gc.Equals, t0.Add(time.Second).Sub(t0))
clk.now = clk.now.Add(500 * time.Millisecond)
c.Assert(a.More(), gc.Equals, true)
// Add a delay between calling More and Next.
// Next should wait until the correct time anyway.
clk.now = clk.now.Add(250 * time.Millisecond)
c.Assert(a.More(), gc.Equals, true)
c.Assert(a.Next(), gc.Equals, true)
c.Assert(clk.now.Sub(t0), gc.Equals, t0.Add(2*time.Second).Sub(t0))
}
func (*retrySuite) TestOnlyOneHitOnZeroTotal(c *gc.C) {
t0 := time.Now().UTC()
clk := &mockClock{
now: t0,
}
a := (&retry.Regular{
Total: 0,
Delay: 0,
Min: 0,
}).Start(clk)
// Even if the clock didn't advanced we want to have only one hit
c.Check(a.Next(), gc.Equals, true)
c.Check(a.More(), gc.Equals, false)
}
// closeTo reports whether d0 and d1 are close enough
// to one another to cater for inaccuracies of floating point arithmetic.
func closeTo(d0, d1 time.Duration) bool {
const margin = 20 * time.Nanosecond
diff := d1 - d0
if diff < 0 {
diff = -diff
}
return diff < margin
}
type mockClock struct {
clock.Clock
now time.Time
sleep func(d time.Duration)
}
func (c *mockClock) After(d time.Duration) <-chan time.Time {
c.now = c.now.Add(d)
ch := make(chan time.Time)
close(ch)
return ch
}
func (c *mockClock) Now() time.Time {
return c.now
}
func assertReceive(c *gc.C, ch <-chan struct{}, what string) {
select {
case <-ch:
case <-time.After(time.Second):
c.Fatalf("timed out waiting for %s", what)
}
}
|
testStrategy
|
cli.py
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import argparse
import inspect
import json
import logging
|
import os
import pkgutil
import warnings
from collections import namedtuple, defaultdict
from importlib import import_module
from pprint import pprint
ArgInfo = namedtuple('ArgInfo', ['name', 'type', 'required', 'default', 'doc'])
def sasctl_command(name, subname=None):
"""Decorator that tags the function as being usable from the command line.
Parameters
----------
name : str
the name of the command that will be shown on the command line.
subname : str
the name of the service that the command will be listed under
Returns
-------
function
Examples
--------
Define a command called 'cmd' not associated with a service
>>> @sasctl_command('cmd')
>>> def func():
...
Define a command called 'cmd' associated with the 'svc' service
>>> @sasctl_command('svc', 'cmd')
>>> def func():
...
Define a command and allow it's name and service to be auto-assigned
>>> @sasctl_command
>>> def func():
...
"""
def decorator(func):
if isinstance(name, str):
if isinstance(subname, str):
command_name = subname
service_name = name
else:
command_name = name
service_name = subname
else:
command_name = func.__name__
if any(
command_name.startswith(x)
for x in ['list_', 'update_', 'get_', 'create_', 'delete_']
):
parts = command_name.split('_')
command_name = parts[0]
service_name = parts[-1]
else:
service_name = subname
def parse_args():
"""Retrieve argument metadata from function signature and docstring."""
arg_spec = inspect.getargspec(func)
defaults = list(arg_spec.defaults) if arg_spec.defaults is not None else []
required = [True] * (len(arg_spec.args) - len(defaults)) + [False] * len(
defaults
)
defaults = [None] * (len(arg_spec.args) - len(defaults)) + defaults
types = []
help_doc = []
doc = inspect.getdoc(func)
if doc and doc.find('Parameters\n'):
doc_lines = doc[doc.find('Parameters\n') :].splitlines()
doc_lines.pop(0) # First line is "Parameters"
if doc_lines and doc_lines[0].startswith('---'):
doc_lines.pop(
0
) # Discard ----------- line under "Parameters" heading
while doc_lines:
var = doc_lines.pop(0)
if var.startswith('Returns') or var.strip() == '':
break
if ':' in var:
types.append(var.split(':')[-1].strip())
else:
types.append('str')
if doc_lines and doc_lines[0].startswith(' '):
help_doc.append(doc_lines.pop(0).strip())
else:
help_doc.append('')
else:
types = ['str'] * len(arg_spec.args)
help_doc = [None] * len(arg_spec.args)
return [
ArgInfo(n, t, r, d, o)
for n, t, r, d, o in zip(
arg_spec.args, types, required, defaults, help_doc
)
]
func._cli_command = command_name
func._cli_service = service_name
func._cli_arguments = parse_args
return func
if callable(name):
# allow direct decoration without arguments
return decorator(name)
return decorator
def _find_services(module='sasctl'):
"""Recursively find all functions in all modules that have been decorated as CLI commands."""
m = __import__(module, fromlist=['']) # returns a module
def find_recurse(module, services):
for obj in dir(module):
obj = getattr(module, obj)
source_module = getattr(obj, '__module__', type(obj).__module__)
# Module-level functions that are tagged as commands
if hasattr(obj, '_cli_command') and hasattr(obj, '_cli_service'):
services[obj._cli_service][obj._cli_command] = obj
# Check methods on service classes
elif source_module.startswith('sasctl._services'):
for atr in dir(obj):
atr = getattr(obj, atr)
if hasattr(atr, '_cli_command') and hasattr(atr, '_cli_service'):
services[atr._cli_service][atr._cli_command] = atr
# recurse into submodules
submodules = pkgutil.iter_modules(getattr(module, '__path__', []))
for submodule in submodules:
# ModuleInfo returned py 3.6 has .name
# Tuple of (module_loader, name, ispkg) returned by older versions
submodule_name = getattr(submodule, 'name', submodule[1])
# TODO: Temporary until pzmm fully merged with sasctl
if submodule_name == 'pzmm':
continue
submodule = import_module('.' + submodule_name, package=module.__name__)
# if hasattr(submodule, 'name'):
# # ModuleInfo returned py 3.6
# submodule = import_module('.' + submodule.name, package=module.__name__)
# else:
# # Tuple of (module_loader, name, ispkg) returned by older versions
# submodule = import_module('.' + submodule[1], package=module.__name__)
services = find_recurse(submodule, services)
return services
services = find_recurse(m, defaultdict(dict))
return services
def _get_func_description(func):
description = getattr(func, '__doc__', '')
lines = description.split('\n')
if lines:
return lines[0]
def _build_parser(services):
from sasctl import __version__
# TODO: Set command docstring
# Create standard, top-level arguments
parser = argparse.ArgumentParser(
prog='sasctl', description='sasctl interacts with a SAS Viya environment.'
)
parser.add_argument(
'-k', '--insecure', action='store_true', help='skip SSL verification'
)
parser.add_argument(
'-f', '--format', choices=['json'], default='json', help='output format'
)
parser.add_argument('-v', '--verbose', action='count')
parser.add_argument(
'--version', action='version', version='%(prog)s ' + __version__
)
subparsers = parser.add_subparsers(title='service', dest='service')
subparsers.required = True
for service, commands in services.items():
service_parser = subparsers.add_parser(service)
service_subparser = service_parser.add_subparsers(
title='command', dest='command'
)
service_subparser.required = True
# Add the command and arguments for each command
for command in commands:
func = services[service][command]
cmd_parser = service_subparser.add_parser(
command, help=_get_func_description(func)
)
for arg in func._cli_arguments():
if arg.name in ('self', 'cls'):
continue
if arg.required:
cmd_parser.add_argument(arg.name, help=arg.doc)
else:
cmd_parser.add_argument(
'--' + arg.name,
required=arg.required,
default=arg.default,
help=arg.doc,
)
return parser
def main(args=None):
"""Main entry point when executed as a command line utility."""
from sasctl import Session, current_session
# Find all services and associated commands
services = _find_services()
parser = _build_parser(services)
args = parser.parse_args(args)
if args.verbose is None or args.verbose == 0:
lvl = logging.WARNING
elif args.verbose == 1:
lvl = logging.INFO
else:
lvl = logging.DEBUG
handler = logging.StreamHandler()
handler.setLevel(lvl)
logging.getLogger('sasctl.core').addHandler(handler)
logging.getLogger('sasctl.core').setLevel(lvl)
warnings.simplefilter('ignore')
func = services[args.service][args.command]
kwargs = vars(args).copy()
# Remove args that shouldn't be passed to the underlying command
for k in ['command', 'service', 'insecure', 'verbose', 'format']:
kwargs.pop(k, None)
username = os.environ.get('SASCTL_USER_NAME')
password = os.environ.get('SASCTL_PASSWORD')
server = os.environ.get('SASCTL_SERVER_NAME')
if server is None:
parser.error(
"Hostname must be specified in the 'SASCTL_SERVER_NAME' environment variable."
)
verify_ssl = not args.insecure
try:
# current_session() should never be set when executing from the
# command line but it allows us to provide a pre-created session
# during testing
with current_session() or Session(
server, username, password, verify_ssl=verify_ssl
):
result = func(**kwargs)
if isinstance(result, list):
pprint([str(x) for x in result])
elif isinstance(result, dict) and args.format == 'json':
print(json.dumps(result, indent=2))
else:
pprint(result)
except RuntimeError as e:
parser.error(e)
| |
request_parent_reference.go
|
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aip0132
import (
"github.com/googleapis/api-linter/lint"
"github.com/googleapis/api-linter/rules/internal/utils"
"github.com/jhump/protoreflect/desc"
)
var requestParentReference = &lint.FieldRule{
Name: lint.NewRuleName(132, "request-parent-reference"),
OnlyIf: func(f *desc.FieldDescriptor) bool {
return isListRequestMessage(f.GetOwner()) && f.GetName() == "parent"
},
LintField: func(f *desc.FieldDescriptor) []lint.Problem {
if ref := utils.GetResourceReference(f); ref == nil
|
return nil
},
}
|
{
return []lint.Problem{{
Message: "List methods: The `parent` field should include a `google.api.resource_reference` annotation.",
Descriptor: f,
}}
}
|
MainPage.py
|
#Author:Azrael
import sys
from PyQt5.QtWidgets import QApplication, QDialog, QStackedWidget,QListWidget,\
QTextEdit,QVBoxLayout,QListWidgetItem
class MainPage(QDialog):
def __init__(self, parent=None):
super(MainPage, self).__init__(parent)
self.initUI()
def
|
(self):
self.setWindowTitle("sa1tFish")
self.setGeometry(200, 200, 800, 400)
self.selectList = QListWidget()
self.Item = QListWidgetItem()
self.selectList.setFlow(QListWidget.LeftToRight)
self.selectList.addItems(["function1","function2","function3"])
self.selectList.setMaximumHeight(40)
self.selectList.setMinimumHeight(20)
self.resultEdit1 = QTextEdit("function1--result1--111",self)
self.resultEdit2 = QTextEdit("function2--result2--222",self)
self.resultEdit3 = QTextEdit("function3--result3--333",self)
self.stack = QStackedWidget()
self.stack.addWidget(self.resultEdit1)
self.stack.addWidget(self.resultEdit2)
self.stack.addWidget(self.resultEdit3)
layout = QVBoxLayout(self)
layout.addWidget(self.selectList)
layout.addWidget(self.stack)
layout.setStretch(0,1)
layout.setStretch(1,20)
self.selectList.currentRowChanged.connect(self.stack.setCurrentIndex)
self.setMinimumHeight(200)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MainPage()
sys.exit(app.exec_())
|
initUI
|
_data.py
|
"""Central data class and associated."""
# --- import --------------------------------------------------------------------------------------
import collections
import operator
import functools
import warnings
import numpy as np
import h5py
import scipy
from scipy.interpolate import griddata, interp1d
from .._group import Group
from .. import collection as wt_collection
from .. import exceptions as wt_exceptions
from .. import kit as wt_kit
from .. import units as wt_units
from ._axis import Axis, identifier_to_operator
from ._channel import Channel
from ._constant import Constant
from ._variable import Variable
# --- define --------------------------------------------------------------------------------------
__all__ = ["Data"]
# --- class ---------------------------------------------------------------------------------------
class Data(Group):
"""Multidimensional dataset."""
class_name = "Data"
def __init__(self, *args, **kwargs):
self._axes = []
self._constants = []
Group.__init__(self, *args, **kwargs)
# populate axes, constants from attrs string
for identifier in self.attrs.get("axes", []):
if hasattr(identifier, "decode"):
identifier = identifier.decode()
expression, units = identifier.split("{")
units = units.replace("}", "").strip()
if units == "None":
units = None
# Should not be needed for wt5 >= 1.0.3, kept for opening older wt5 files.
for i in identifier_to_operator.keys():
expression = expression.replace(i, identifier_to_operator[i])
expression = expression.replace(" ", "") # remove all whitespace
axis = Axis(self, expression, units)
self._axes.append(axis)
for identifier in self.attrs.get("constants", []):
if hasattr(identifier, "decode"):
identifier = identifier.decode()
expression, units = identifier.split("{")
units = units.replace("}", "").strip()
if units == "None":
units = None
for i in identifier_to_operator.keys():
expression = expression.replace(i, identifier_to_operator[i])
expression = expression.replace(" ", "") # remove all whitespace
const = Constant(self, expression, units)
self._constants.append(const)
self._current_axis_identities_in_natural_namespace = []
if self.file.mode is not None and self.file.mode != "r":
self._on_constants_updated()
self._on_axes_updated()
# the following are populated if not already recorded
self.channel_names
self.source
self.variable_names
def __repr__(self) -> str:
return "<WrightTools.Data '{0}' {1} at {2}>".format(
self.natural_name, str(self.axis_names), "::".join([self.filepath, self.name])
)
@property
def axes(self) -> tuple:
return tuple(self._axes)
@property
def axis_expressions(self) -> tuple:
"""Axis expressions."""
return tuple(a.expression for a in self._axes)
@property
def axis_names(self) -> tuple:
"""Axis names."""
return tuple(a.natural_name for a in self._axes)
@property
def constants(self) -> tuple:
return tuple(self._constants)
@property
def constant_expressions(self) -> tuple:
"""Axis expressions."""
return tuple(a.expression for a in self._constants)
@property
def constant_names(self) -> tuple:
"""Axis names."""
return tuple(a.natural_name for a in self._constants)
@property
def channel_names(self) -> tuple:
"""Channel names."""
if "channel_names" not in self.attrs.keys():
self.attrs["channel_names"] = np.array([], dtype="S")
return tuple(s.decode() for s in self.attrs["channel_names"])
@channel_names.setter
def channel_names(self, value):
"""Set channel names."""
self.attrs["channel_names"] = np.array(value, dtype="S")
@property
def channels(self) -> tuple:
"""Channels."""
return tuple(self[n] for n in self.channel_names)
@property
def datasets(self) -> tuple:
"""Datasets."""
return tuple(v for _, v in self.items() if isinstance(v, h5py.Dataset))
@property
def kind(self):
"""Kind."""
if "kind" not in self.attrs.keys():
self.attrs["kind"] = "None"
value = self.attrs["kind"]
return value if not value == "None" else None
@property
def ndim(self) -> int:
"""Get number of dimensions."""
try:
assert self._ndim is not None
except (AssertionError, AttributeError):
if len(self.variables) == 0:
self._ndim = 0
else:
self._ndim = self.variables[0].ndim
finally:
return self._ndim
@property
def shape(self) -> tuple:
"""Shape."""
try:
assert self._shape is not None
except (AssertionError, AttributeError):
self._shape = wt_kit.joint_shape(*self.variables)
finally:
return self._shape
@property
def size(self) -> int:
"""Size."""
return functools.reduce(operator.mul, self.shape)
@property
def source(self):
"""Source."""
if "source" not in self.attrs.keys():
self.attrs["source"] = "None"
value = self.attrs["source"]
return value if not value == "None" else None
@property
def units(self) -> tuple:
"""All axis units."""
return tuple(a.units for a in self._axes)
@property
def constant_units(self) -> tuple:
"""All constant units."""
return tuple(a.units for a in self._constants)
@property
def variable_names(self) -> tuple:
"""Variable names."""
if "variable_names" not in self.attrs.keys():
self.attrs["variable_names"] = np.array([], dtype="S")
return tuple(s.decode() for s in self.attrs["variable_names"])
@variable_names.setter
def variable_names(self, value):
"""Set variable names."""
self.attrs["variable_names"] = np.array(value, dtype="S")
@property
def variables(self) -> tuple:
"""Variables."""
try:
assert self._variables is not None
except (AssertionError, AttributeError):
self._variables = [self[n] for n in self.variable_names]
finally:
return tuple(self._variables)
@property
def _leaf(self):
return "{0} {1}".format(self.natural_name, self.shape)
def _on_axes_updated(self):
"""Method to run when axes are changed in any way.
Propagates updated axes properly.
"""
# update attrs
self.attrs["axes"] = np.array([a.identity.encode() for a in self._axes], dtype="S")
# remove old attributes
while len(self._current_axis_identities_in_natural_namespace) > 0:
key = self._current_axis_identities_in_natural_namespace.pop(0)
try:
delattr(self, key)
except AttributeError:
pass # already gone
# populate new attributes
for a in self._axes:
key = a.natural_name
setattr(self, key, a)
self._current_axis_identities_in_natural_namespace.append(key)
def _on_constants_updated(self):
"""Method to run when constants are changed in any way.
Propagates updated constants properly.
"""
# update attrs
self.attrs["constants"] = np.array(
[a.identity.encode() for a in self._constants], dtype="S"
)
def _print_branch(self, prefix, depth, verbose):
def print_leaves(prefix, lis, vline=True):
for i, item in enumerate(lis):
if vline:
a = "│ "
else:
a = " "
if i + 1 == len(lis):
b = "└── "
else:
b = "├── "
s = prefix + a + b + "{0}: {1}".format(i, item._leaf)
print(s)
if verbose:
# axes
print(prefix + "├── axes")
print_leaves(prefix, self.axes)
# constants
print(prefix + "├── constants")
print_leaves(prefix, self.constants)
# variables
print(prefix + "├── variables")
print_leaves(prefix, self.variables)
# channels
print(prefix + "└── channels")
print_leaves(prefix, self.channels, vline=False)
else:
# axes
s = "axes: "
s += ", ".join(["{0} ({1})".format(a.expression, a.units) for a in self.axes])
print(prefix + "├── " + s)
# constants
s = "constants: "
s += ", ".join(
["{0} ({1} {2})".format(a.expression, a.value, a.units) for a in self.constants]
)
print(prefix + "├── " + s)
# channels
s = "channels: "
s += ", ".join(self.channel_names)
print(prefix + "└── " + s)
def bring_to_front(self, channel):
"""Bring a specific channel to the zero-indexed position in channels.
All other channels get pushed back but remain in order.
Parameters
----------
channel : int or str
Channel index or name.
"""
channel_index = wt_kit.get_index(self.channel_names, channel)
new = list(self.channel_names)
new.insert(0, new.pop(channel_index))
self.channel_names = new
def chop(self, *args, at={}, parent=None, verbose=True) -> wt_collection.Collection:
"""Divide the dataset into its lower-dimensionality components.
Parameters
----------
axis : str or int (args)
Axes of the returned data objects. Strings refer to the names of
axes in this object, integers refer to their index. Provide multiple
axes to return multidimensional data objects.
at : dict (optional)
Choice of position along an axis. Keys are axis names, values are lists
``[position, input units]``. If exact position does not exist,
the closest valid position is used.
parent : WrightTools Collection instance (optional)
Collection to place the new "chop" collection within. Default is
None (new parent).
verbose : bool (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools Collection
Collection of chopped data objects.
Examples
--------
>>> data.axis_names
['d2', 'w1', 'w2']
Get all w1 wigners.
>>> datas = data.chop('d2', 'w1')
>>> len(datas)
51
Get 2D frequency at d2=0 fs.
>>> datas = data.chop('w1', 'w2', at={'d2': [0, 'fs']})
>>> len(datas)
0
>>> datas[0].axis_names
['w1', 'w2']
>>> datas[0].d2[:]
0.
See Also
--------
collapse
Collapse the dataset along one axis.
split
Split the dataset while maintaining its dimensionality.
"""
from ._axis import operators, operator_to_identifier
# parse args
args = list(args)
for i, arg in enumerate(args):
if isinstance(arg, int):
args[i] = self._axes[arg].natural_name
elif isinstance(arg, str):
# same normalization that occurs in the natural_name @property
arg = arg.strip()
for op in operators:
arg = arg.replace(op, operator_to_identifier[op])
args[i] = wt_kit.string2identifier(arg)
# normalize the at keys to the natural name
for k in [ak for ak in at.keys() if type(ak) == str]:
for op in operators:
if op in k:
nk = k.replace(op, operator_to_identifier[op])
at[nk] = at[k]
at.pop(k)
k = nk
# get output collection
out = wt_collection.Collection(name="chop", parent=parent)
# get output shape
kept = args + [ak for ak in at.keys() if type(ak) == str]
kept_axes = [self._axes[self.axis_names.index(a)] for a in kept]
removed_axes = [a for a in self._axes if a not in kept_axes]
removed_shape = wt_kit.joint_shape(*removed_axes)
if removed_shape == ():
removed_shape = (1,) * self.ndim
removed_shape = list(removed_shape)
for i in at.keys():
if type(i) == int:
removed_shape[i] = 1
for ax in kept_axes:
if ax.shape.count(1) == ax.ndim - 1:
removed_shape[ax.shape.index(ax.size)] = 1
removed_shape = tuple(removed_shape)
# iterate
i = 0
for idx in np.ndindex(removed_shape):
idx = np.array(idx, dtype=object)
idx[np.array(removed_shape) == 1] = slice(None)
for axis, point in at.items():
if type(axis) == int:
idx[axis] = point
continue
point, units = point
destination_units = self._axes[self.axis_names.index(axis)].units
point = wt_units.converter(point, units, destination_units)
axis_index = self.axis_names.index(axis)
axis = self._axes[axis_index]
idx_index = np.array(axis.shape) > 1
if np.sum(idx_index) > 1:
raise wt_exceptions.MultidimensionalAxisError("chop", axis.natural_name)
idx_index = list(idx_index).index(True)
idx[idx_index] = np.argmin(np.abs(axis[tuple(idx)] - point))
data = out.create_data(name="chop%03i" % i)
for v in self.variables:
kwargs = {}
kwargs["name"] = v.natural_name
kwargs["values"] = v[idx]
kwargs["units"] = v.units
kwargs["label"] = v.label
kwargs.update(v.attrs)
data.create_variable(**kwargs)
for c in self.channels:
kwargs = {}
kwargs["name"] = c.natural_name
kwargs["values"] = c[idx]
kwargs["units"] = c.units
kwargs["label"] = c.label
kwargs["signed"] = c.signed
kwargs.update(c.attrs)
data.create_channel(**kwargs)
new_axes = [a.expression for a in kept_axes if a.expression not in at.keys()]
new_axis_units = [a.units for a in kept_axes if a.expression not in at.keys()]
data.transform(*new_axes)
for const in self.constant_expressions:
data.create_constant(const, verbose=False)
for ax in self.axis_expressions:
if ax not in new_axes:
data.create_constant(ax, verbose=False)
for j, units in enumerate(new_axis_units):
data.axes[j].convert(units)
i += 1
out.flush()
# return
if verbose:
print("chopped data into %d piece(s)" % len(out), "in", new_axes)
return out
def gradient(self, axis, *, channel=0):
"""
Compute the gradient along one axis.
New channels have names ``<channel name>_<axis name>_gradient``.
Parameters
----------
axis : int or str
The axis to differentiate along.
If given as an integer, the axis in the underlying array is used,
and unitary spacing is assumed.
If given as a string, the axis must exist, and be a 1D array-aligned axis.
(i.e. have a shape with a single value which is not ``1``)
The axis to collapse along is inferred from the shape of the axis.
channel : int or str
The channel to differentiate.
Default is the first channel.
"""
# get axis index --------------------------------------------------------------------------
if isinstance(axis, int):
axis_index = axis
elif isinstance(axis, str):
index = self.axis_names.index(axis)
axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]
if len(axes) > 1:
raise wt_exceptions.MultidimensionalAxisError(axis, "collapse")
elif len(axes) == 0:
raise wt_exceptions.ValueError(
"Axis '{}' is a single point, cannot compute gradient".format(axis)
)
axis_index = axes[0]
else:
raise wt_exceptions.TypeError("axis: expected {int, str}, got %s" % type(axis))
channel_index = wt_kit.get_index(self.channel_names, channel)
channel = self.channel_names[channel_index]
if self[channel].shape[axis_index] == 1:
raise wt_exceptions.ValueError(
"Channel '{}' has a single point along Axis '{}', cannot compute gradient".format(
channel, axis
)
)
rtype = np.result_type(self[channel].dtype, float)
new = self.create_channel(
"{}_{}_gradient".format(channel, axis),
values=np.empty(self[channel].shape, dtype=rtype),
)
channel = self[channel]
if axis == axis_index:
new[:] = np.gradient(channel[:], axis=axis_index)
else:
new[:] = np.gradient(channel[:], self[axis].points, axis=axis_index)
def moment(self, axis, channel=0, moment=1, *, resultant=None):
"""Take the nth moment the dataset along one axis, adding lower rank channels.
New channels have names ``<channel name>_<axis name>_moment_<moment num>``.
Moment 0 is the integral of the slice.
Moment 1 is the weighted average or "Center of Mass", normalized by the integral
Moment 2 is the variance, the central moment about the center of mass,
normalized by the integral
Moments 3+ are central moments about the center of mass, normalized by the integral
and by the standard deviation to the power of the moment.
Moments, especially higher order moments, are susceptible to noise and baseline.
It is recommended when used with real data to use :meth:`WrightTools.data.Channel.clip`
in conjunction with moments to reduce effects of noise.
Parameters
----------
axis : int or str
The axis to take the moment along.
If given as an integer, the axis with that index is used.
If given as a string, the axis with that name is used.
The axis must exist, and be a 1D array-aligned axis.
(i.e. have a shape with a single value which is not ``1``)
The collapsed axis must be monotonic to produce correct results.
The axis to collapse along is inferred from the shape of the axis.
channel : int or str
The channel to take the moment.
If given as an integer, the channel with that index is used.
If given as a string, the channel with that name is used.
The channel must have values along the axis
(i.e. its shape must not be ``1`` in the dimension for which the axis is not ``1``)
Default is 0, the first channel.
moment : int or tuple of int
The moments to take.
One channel will be created for each number given.
Default is 1, the center of mass.
resultant : tuple of int
The resultant shape after the moment operation.
By default, it is intuited by the axis along which the moment is being taken.
This default only works if that axis is 1D, so resultant is required if a
multidimensional axis is passed as the first argument.
The requirement of monotonicity applies on a per pixel basis.
See Also
--------
collapse
Reduce dimensionality by some mathematical operation
clip
Set values above/below a threshold to a particular value
WrightTools.kit.joint_shape
Useful for setting `resultant` kwarg based off of axes not collapsed.
"""
# get axis index --------------------------------------------------------------------------
axis_index = None
if resultant is not None:
for i, (s, r) in enumerate(zip(wt_kit.joint_shape(*self.axes), resultant)):
if s != r and r == 1 and axis_index is None:
axis_index = i
elif s == r:
continue
else:
raise wt_exceptions.ValueError(
f"Invalid resultant shape '{resultant}' for shape {wt_kit.joint_shape(*self.axes)}. "
+ "Consider using `wt.kit.joint_shape` to join non-collapsed axes."
)
index = wt_kit.get_index(self.axis_names, axis)
if axis_index is None:
axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]
if len(axes) > 1:
raise wt_exceptions.MultidimensionalAxisError(axis, "mom
|
wt_exceptions.ValueError(
"Axis {} is a single point, cannot compute moment".format(axis)
)
axis_index = axes[0]
warnings.warn("moment", category=wt_exceptions.EntireDatasetInMemoryWarning)
channel_index = wt_kit.get_index(self.channel_names, channel)
channel = self.channel_names[channel_index]
if self[channel].shape[axis_index] == 1:
raise wt_exceptions.ValueError(
"Channel '{}' has a single point along Axis '{}', cannot compute moment".format(
channel, axis
)
)
new_shape = list(self[channel].shape)
new_shape[axis_index] = 1
channel = self[channel]
axis_inp = axis
axis = self.axes[index]
x = axis[:]
if np.any(np.isnan(x)):
raise wt_exceptions.ValueError("Axis '{}' includes NaN".format(axis_inp))
y = np.nan_to_num(channel[:])
try:
moments = tuple(moment)
except TypeError:
moments = (moment,)
multiplier = 1
if 0 in moments:
# May be possible to optimize, probably doesn't need the sum
# only matters for integral, all others normalize by integral
multiplier = np.sign(
np.sum(np.diff(x, axis=axis_index), axis=axis_index, keepdims=True)
)
for moment in moments:
about = 0
norm = 1
if moment > 0:
norm = np.trapz(y, x, axis=axis_index)
norm = np.array(norm)
norm.shape = new_shape
if moment > 1:
about = np.trapz(x * y, x, axis=axis_index)
about = np.array(about)
about.shape = new_shape
about /= norm
if moment > 2:
sigma = np.trapz((x - about) ** 2 * y, x, axis=axis_index)
sigma = np.array(sigma)
sigma.shape = new_shape
sigma /= norm
sigma **= 0.5
norm *= sigma ** moment
values = np.trapz((x - about) ** moment * y, x, axis=axis_index)
values = np.array(values)
values.shape = new_shape
values /= norm
if moment == 0:
values *= multiplier
self.create_channel(
"{}_{}_{}_{}".format(channel.natural_name, axis_inp, "moment", moment),
values=values,
)
def collapse(self, axis, method="sum"):
"""Collapse the dataset along one axis, adding lower rank channels.
New channels have names ``<channel name>_<axis name>_<method>``.
Parameters
----------
axis : int or str
The axis to collapse along.
If given as an integer, the axis in the underlying array is used.
If given as a string, the axis must exist, and be a 1D array-aligned axis.
(i.e. have a shape with a single value which is not ``1``)
The axis to collapse along is inferred from the shape of the axis.
method : {'average', 'sum', 'max', 'min'} (optional)
The method of collapsing the given axis. Method may also be list
of methods corresponding to the channels of the object. Default
is sum. NaNs are ignored.
Can also be a list, allowing for different treatment for varied channels.
In this case, None indicates that no change to that channel should occur.
See Also
--------
chop
Divide the dataset into its lower-dimensionality components.
split
Split the dataset while maintaining its dimensionality.
moment
Take the moment along a particular axis
"""
if method in ("int", "integrate"):
warnings.warn(
"integrate method of collapse is deprecated, use moment(moment=0) instead",
wt_exceptions.VisibleDeprecationWarning,
)
for channel in self.channel_names:
try:
self.moment(axis, channel, moment=0)
self.rename_channels(
**{self.channel_names[-1]: f"{channel}_{axis}_{method}"}, verbose=False
)
except wt_exceptions.ValueError:
pass # may have some channels which fail, do so silently
return
# get axis index --------------------------------------------------------------------------
if isinstance(axis, int):
axis_index = axis
elif isinstance(axis, str):
index = self.axis_names.index(axis)
axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]
if len(axes) > 1:
raise wt_exceptions.MultidimensionalAxisError(axis, "collapse")
elif len(axes) == 0:
raise wt_exceptions.ValueError(
"Axis {} is a single point, cannot collapse".format(axis)
)
axis_index = axes[0]
else:
raise wt_exceptions.TypeError("axis: expected {int, str}, got %s" % type(axis))
new_shape = list(self.shape)
new_shape[axis_index] = 1
func = {
"sum": np.nansum,
"max": np.nanmax,
"maximum": np.nanmax,
"min": np.nanmin,
"minimum": np.nanmin,
"ave": np.nanmean,
"average": np.nanmean,
"mean": np.nanmean,
}
# methods ---------------------------------------------------------------------------------
if isinstance(method, str):
methods = [method for _ in self.channels]
if isinstance(method, list):
if len(method) == len(self.channels):
methods = method
else:
raise wt_exceptions.ValueError(
"method argument must have same number of elements as there are channels"
)
for m in methods:
if m not in func.keys():
raise wt_exceptions.ValueError("method '{}' not recognized".format(m))
warnings.warn("collapse", category=wt_exceptions.EntireDatasetInMemoryWarning)
# collapse --------------------------------------------------------------------------------
for method, channel in zip(methods, self.channel_names):
if method is None:
continue
if self[channel].shape[axis_index] == 1:
continue # Cannot collapse any further, don't clutter data object
new_shape = list(self[channel].shape)
new_shape[axis_index] = 1
rtype = self[channel].dtype
if method in ["ave", "average", "mean"]:
rtype = np.result_type(self[channel].dtype, float)
new = self.create_channel(
"{}_{}_{}".format(channel, axis, method),
values=np.empty(new_shape, dtype=rtype),
units=self[channel].units,
)
new[:] = func[method](self[channel], axis=axis_index, keepdims=True)
def convert(self, destination_units, *, convert_variables=False, verbose=True):
"""Convert all compatable axes and constants to given units.
Parameters
----------
destination_units : str
Destination units.
convert_variables : boolean (optional)
Toggle conversion of stored arrays. Default is False
verbose : bool (optional)
Toggle talkback. Default is True.
See Also
--------
Axis.convert
Convert a single axis object to compatable units. Call on an
axis object in data.axes.
"""
# apply to all compatible axes
for axis in self.axes:
if wt_units.is_valid_conversion(axis.units, destination_units):
orig = axis.units
axis.convert(destination_units, convert_variables=convert_variables)
if verbose:
print(
"axis {} converted from {} to {}".format(
axis.expression, orig, destination_units
)
)
# apply to all compatible constants
for constant in self.constants:
if wt_units.is_valid_conversion(constant.units, destination_units):
orig = constant.units
constant.convert(destination_units, convert_variables=convert_variables)
if verbose:
print(
"constant {} converted from {} to {}".format(
constant.expression, orig, destination_units
)
)
if convert_variables:
for var in self.variables:
if wt_units.is_valid_conversion(var.units, destination_units):
orig = var.units
var.convert(destination_units)
if verbose:
print(
"variable {} converted from {} to {}".format(
var.natural_name, orig, destination_units
)
)
self._on_axes_updated()
self._on_constants_updated()
def create_channel(
self, name, values=None, *, shape=None, units=None, dtype=None, **kwargs
) -> Channel:
"""Append a new channel.
Parameters
----------
name : string
Unique name for this channel.
values : array (optional)
Array. If None, an empty array equaling the data shape is
created. Default is None.
shape : tuple of int
Shape to use. Must broadcast with the full shape.
Only used if `values` is None.
Default is the full shape of self.
units : string (optional)
Channel units. Default is None.
dtype : numpy.dtype (optional)
dtype to use for dataset, default is np.float64.
Only used if `values` is None.
kwargs : dict
Additional keyword arguments passed to Channel instantiation.
Returns
-------
Channel
Created channel.
"""
if name in self.channel_names:
warnings.warn(name, wt_exceptions.ObjectExistsWarning)
return self[name]
elif name in self.variable_names:
raise wt_exceptions.NameNotUniqueError(name)
require_kwargs = {"chunks": True}
if values is None:
if shape is None:
require_kwargs["shape"] = self.shape
else:
require_kwargs["shape"] = shape
if dtype is None:
require_kwargs["dtype"] = np.dtype(np.float64)
else:
require_kwargs["dtype"] = dtype
if require_kwargs["dtype"].kind in "fcmM":
require_kwargs["fillvalue"] = np.nan
else:
require_kwargs["fillvalue"] = 0
else:
require_kwargs["data"] = values
require_kwargs["shape"] = values.shape
require_kwargs["dtype"] = values.dtype
if np.prod(require_kwargs["shape"]) == 1:
require_kwargs["chunks"] = None
# create dataset
dataset_id = self.require_dataset(name=name, **require_kwargs).id
channel = Channel(self, dataset_id, units=units, **kwargs)
# finish
self.attrs["channel_names"] = np.append(self.attrs["channel_names"], name.encode())
return channel
def create_variable(
self, name, values=None, *, shape=None, units=None, dtype=None, **kwargs
) -> Variable:
"""Add new child variable.
Parameters
----------
name : string
Unique identifier.
values : array-like (optional)
Array to populate variable with. If None, an variable will be filled with NaN.
Default is None.
shape : tuple of int
Shape to use. must broadcast with the full shape.
Only used if `values` is None.
Default is the full shape of self.
units : string (optional)
Variable units. Default is None.
dtype : numpy.dtype (optional)
dtype to use for dataset, default is np.float64.
Only used if `values` is None.
kwargs
Additional kwargs to variable instantiation.
Returns
-------
WrightTools Variable
New child variable.
"""
if name in self.variable_names:
warnings.warn(name, wt_exceptions.ObjectExistsWarning)
return self[name]
elif name in self.channel_names:
raise wt_exceptions.NameNotUniqueError(name)
if values is None:
if shape is None:
shape = self.shape
if dtype is None:
dtype = np.dtype(np.float64)
if dtype.kind in "fcmM":
fillvalue = np.nan
else:
fillvalue = 0
else:
shape = values.shape
dtype = values.dtype
fillvalue = None
# create dataset
id = self.require_dataset(
name=name, data=values, shape=shape, dtype=dtype, fillvalue=fillvalue
).id
variable = Variable(self, id, units=units, **kwargs)
# finish
self._variables = None
self.attrs["variable_names"] = np.append(self.attrs["variable_names"], name.encode())
return variable
def get_nadir(self, channel=0) -> tuple:
"""Get the coordinates, in units, of the minimum in a channel.
Parameters
----------
channel : int or str (optional)
Channel. Default is 0.
Returns
-------
generator of numbers
Coordinates in units for each axis.
"""
# get channel
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channel = self.channels[channel_index]
# get indicies
idx = channel.argmin()
# finish
return tuple(a[idx] for a in self._axes)
def get_zenith(self, channel=0) -> tuple:
"""Get the coordinates, in units, of the maximum in a channel.
Parameters
----------
channel : int or str (optional)
Channel. Default is 0.
Returns
-------
generator of numbers
Coordinates in units for each axis.
"""
# get channel
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channel = self.channels[channel_index]
# get indicies
idx = channel.argmax()
# finish
return tuple(a[idx] for a in self._axes)
def heal(self, channel=0, method="linear", fill_value=np.nan, verbose=True):
"""
Remove nans from channel using interpolation.
Parameters
----------
channel : int or str (optional)
Channel to heal. Default is 0.
method : {'linear', 'nearest', 'cubic'} (optional)
The interpolation method. Note that cubic interpolation is only
possible for 1D and 2D data. See `griddata`__ for more information.
Default is linear.
fill_value : number-like (optional)
The value written to pixels that cannot be filled by interpolation.
Default is nan.
verbose : bool (optional)
Toggle talkback. Default is True.
__ http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html
.. note:: Healing may take several minutes for large datasets.
Interpolation time goes as nearest, linear, then cubic.
"""
warnings.warn("heal", category=wt_exceptions.EntireDatasetInMemoryWarning)
timer = wt_kit.Timer(verbose=False)
with timer:
# channel
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channel = self.channels[channel_index]
values = self.channels[channel_index][:]
points = [axis[:] for axis in self._axes]
xi = tuple(np.meshgrid(*points, indexing="ij"))
# 'undo' gridding
arr = np.zeros((len(self._axes) + 1, values.size))
for i in range(len(self._axes)):
arr[i] = xi[i].flatten()
arr[-1] = values.flatten()
# remove nans
arr = arr[:, ~np.isnan(arr).any(axis=0)]
# grid data wants tuples
tup = tuple([arr[i] for i in range(len(arr) - 1)])
# grid data
out = griddata(tup, arr[-1], xi, method=method, fill_value=fill_value)
self.channels[channel_index][:] = out
# print
if verbose:
print(
"channel {0} healed in {1} seconds".format(
channel.name, np.around(timer.interval, decimals=3)
)
)
def level(self, channel, axis, npts, *, verbose=True):
"""Subtract the average value of npts at the edge of a given axis.
Parameters
----------
channel : int or str
Channel to level.
axis : int
Axis to level along.
npts : int
Number of points to average for each slice. Positive numbers
take points at leading indicies and negative numbers take points
at trailing indicies.
verbose : bool (optional)
Toggle talkback. Default is True.
"""
warnings.warn("level", category=wt_exceptions.EntireDatasetInMemoryWarning)
channel_index = wt_kit.get_index(self.channel_names, channel)
channel = self.channels[channel_index]
# verify npts not zero
npts = int(npts)
if npts == 0:
raise wt_exceptions.ValueError("npts must not be zero")
# get subtrahend
ss = [slice(None)] * self.ndim
if npts > 0:
ss[axis] = slice(0, npts, None)
else:
ss[axis] = slice(npts, None, None)
subtrahend = np.nanmean(channel[ss], axis=axis)
if self.ndim > 1:
subtrahend = np.expand_dims(subtrahend, axis=axis)
# level
channel -= subtrahend
# finish
channel._null = 0
if verbose:
print("channel {0} leveled along axis {1}".format(channel.natural_name, axis))
def map_variable(
self, variable, points, input_units="same", *, name=None, parent=None, verbose=True
) -> "Data":
"""Map points of an axis to new points using linear interpolation.
Out-of-bounds points are written nan.
Parameters
----------
variable : string
The variable to map onto.
points : array-like or int
If array, the new points. If int, new points will have the same
limits, with int defining the number of evenly spaced points
between.
input_units : str (optional)
The units of the new points. Default is same, which assumes
the new points have the same units as the axis.
name : string (optional)
The name of the new data object. If None, generated from
natural_name. Default is None.
parent : WrightTools.Collection (optional)
Parent of new data object. If None, data is made at root of a
new temporary file.
verbose : bool (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools.Data
New data object.
"""
# get variable index
variable_index = wt_kit.get_index(self.variable_names, variable)
variable = self.variables[variable_index]
# get points
if isinstance(points, int):
points = np.linspace(variable.min(), variable.max(), points)
points = np.array(points)
# points dimensionality
if points.ndim < variable.ndim:
for i, d in enumerate(variable.shape):
if d == 1:
points = np.expand_dims(points, axis=i)
# convert points
if input_units == "same":
pass
else:
points = wt_units.converter(points, input_units, variable.units)
# construct new data object
special = ["name", "axes", "constants", "channel_names", "variable_names"]
kwargs = {k: v for k, v in self.attrs.items() if k not in special}
if name is None:
name = "{0}_{1}_mapped".format(self.natural_name, variable.natural_name)
kwargs["name"] = name
kwargs["parent"] = parent
out = Data(**kwargs)
# mapped variable
values = points
out.create_variable(values=values, **variable.attrs)
# orthogonal variables
for v in self.variables:
if wt_kit.orthogonal(v.shape, variable.shape):
out.create_variable(values=v[:], **v.attrs)
out.transform(*self.axis_expressions)
# interpolate
if self.ndim == 1:
def interpolate(dataset, points):
function = scipy.interpolate.interp1d(variable[:], dataset[:], bounds_error=False)
return function(points)
else:
pts = np.array([a.full.flatten() for a in self.axes]).T
out_pts = np.array([a.full.flatten() for a in out.axes]).T
def interpolate(dataset, points):
values = dataset.full.flatten()
function = scipy.interpolate.LinearNDInterpolator(pts, values, rescale=True)
new = function(out_pts)
new.shape = out.shape
return new
for v in self.variables:
if v.natural_name not in out.variable_names:
out.create_variable(values=interpolate(v, points), **v.attrs)
out.variable_names = self.variable_names # enforce old order
out._variables = None # force regeneration of variables @property
for channel in self.channels:
out.create_channel(values=interpolate(channel, points), **channel.attrs)
# finish
if verbose:
print("data mapped from {0} to {1}".format(self.shape, out.shape))
return out
def offset(
self,
points,
offsets,
along,
offset_axis,
units="same",
offset_units="same",
mode="valid",
method="linear",
verbose=True,
):
"""Offset one axis based on another axis' values.
Useful for correcting instrumental artifacts such as zerotune.
Parameters
----------
points : 1D array-like
Points.
offsets : 1D array-like
Offsets.
along : str or int
Axis that points array lies along.
offset_axis : str or int
Axis to offset using offsets.
units : str (optional)
Units of points array.
offset_units : str (optional)
Units of offsets aray.
mode : {'valid', 'full', 'old'} (optional)
Define how far the new axis will extend. Points outside of valid
interpolation range will be written nan.
method : {'linear', 'nearest', 'cubic'} (optional)
The interpolation method. Note that cubic interpolation is only
possible for 1D and 2D data. See `griddata`__ for more information.
Default is linear.
verbose : bool (optional)
Toggle talkback. Default is True.
__ http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html
>>> points # an array of w1 points
>>> offsets # an array of d1 corrections
>>> data.offset(points, offsets, 'w1', 'd1')
"""
raise NotImplementedError
# axis ------------------------------------------------------------------------------------
if isinstance(along, int):
axis_index = along
elif isinstance(along, str):
axis_index = self.axis_names.index(along)
else:
raise TypeError("along: expected {int, str}, got %s" % type(along))
axis = self._axes[axis_index]
# values & points -------------------------------------------------------------------------
# get values, points, units
if units == "same":
input_units = axis.units
else:
input_units = units
# check offsets is 1D or 0D
if len(offsets.shape) == 1:
pass
else:
raise RuntimeError("values must be 1D or 0D in offset!")
# check if units is compatible, convert
dictionary = getattr(wt_units, axis.units_kind)
if input_units in dictionary.keys():
pass
else:
raise RuntimeError("units incompatible in offset!")
points = wt_units.converter(points, input_units, axis.units)
# create correction array
function = interp1d(points, offsets, bounds_error=False)
corrections = function(axis[:])
# remove nans
finite_indicies = np.where(np.isfinite(corrections))[0]
left_pad_width = finite_indicies[0]
right_pad_width = len(corrections) - finite_indicies[-1] - 1
corrections = np.pad(
corrections[np.isfinite(corrections)],
(int(left_pad_width), int(right_pad_width)),
mode="edge",
)
# do correction ---------------------------------------------------------------------------
# transpose so axis is last
transpose_order = np.arange(len(self._axes))
transpose_order[axis_index] = len(self._axes) - 1
transpose_order[-1] = axis_index
self.transpose(transpose_order, verbose=False)
# get offset axis index
if isinstance(offset_axis, int):
offset_axis_index = offset_axis
elif isinstance(offset_axis, str):
offset_axis_index = self.axis_names.index(offset_axis)
else:
raise TypeError("offset_axis: expected {int, str}, got %s" % type(offset_axis))
# new points
new_points = [a[:] for a in self._axes]
old_offset_axis_points = self._axes[offset_axis_index][:]
spacing = abs(
(old_offset_axis_points.max() - old_offset_axis_points.min())
/ float(len(old_offset_axis_points))
)
if mode == "old":
new_offset_axis_points = old_offset_axis_points
elif mode == "valid":
_max = old_offset_axis_points.max() + corrections.min()
_min = old_offset_axis_points.min() + corrections.max()
n = int(abs(np.ceil((_max - _min) / spacing)))
new_offset_axis_points = np.linspace(_min, _max, n)
elif mode == "full":
_max = old_offset_axis_points.max() + corrections.max()
_min = old_offset_axis_points.min() + corrections.min()
n = np.ceil((_max - _min) / spacing)
new_offset_axis_points = np.linspace(_min, _max, n)
new_points[offset_axis_index] = new_offset_axis_points
new_xi = tuple(np.meshgrid(*new_points, indexing="ij"))
xi = tuple(np.meshgrid(*[a[:] for a in self._axes], indexing="ij"))
for channel in self.channels:
# 'undo' gridding
arr = np.zeros((len(self._axes) + 1, channel[:].size))
for i in range(len(self._axes)):
arr[i] = xi[i].flatten()
arr[-1] = channel[:].flatten()
# do corrections
corrections = list(corrections)
corrections = corrections * int((len(arr[0]) / len(corrections)))
arr[offset_axis_index] += corrections
# grid data
tup = tuple([arr[i] for i in range(len(arr) - 1)])
# note that rescale is crucial in this operation
out = griddata(tup, arr[-1], new_xi, method=method, fill_value=np.nan, rescale=True)
channel[:] = out
self._axes[offset_axis_index][:] = new_offset_axis_points
# transpose out
self.transpose(transpose_order, verbose=False)
def print_tree(self, *, verbose=True):
"""Print a ascii-formatted tree representation of the data contents."""
print("{0} ({1})".format(self.natural_name, self.filepath))
self._print_branch("", depth=0, verbose=verbose)
def prune(self, keep_channels=True, *, verbose=True):
"""Remove unused variables and (optionally) channels from the Data object.
Unused variables are those that are not included in either axes or constants.
Unused channels are those not specified in keep_channels, or the first channel.
Parameters
----------
keep_channels : boolean or int or str or tuple
If False, removes all but the first channel.
If int or str, removes all but that index/name channel.
If tuple, removes all channels except those in the tuple by index or name.
Default is True: do not delete channels
verbose : boolean
Toggle talkback. Default is True.
"""
for v in self.variables:
for var in wt_kit.flatten_list([ax.variables for ax in self._axes + self._constants]):
if v == var:
break
else:
self.remove_variable(v.natural_name, implied=False, verbose=verbose)
if keep_channels is not True:
try:
if isinstance(keep_channels, str):
raise TypeError
indexes = tuple(keep_channels)
except TypeError:
indexes = (keep_channels,)
for i, ch in enumerate(self.channels):
if i not in indexes and not ch.natural_name in indexes:
self.remove_channel(ch.natural_name, verbose=verbose)
def remove_channel(self, channel, *, verbose=True):
"""Remove channel from data.
Parameters
----------
channel : int or str
Channel index or name to remove.
verbose : boolean (optional)
Toggle talkback. Default is True.
"""
channel_index = wt_kit.get_index(self.channel_names, channel)
new = list(self.channel_names)
name = new.pop(channel_index)
del self[name]
self.channel_names = new
if verbose:
print("channel {0} removed".format(name))
def remove_variable(self, variable, *, implied=True, verbose=True):
"""Remove variable from data.
Parameters
----------
variable : int or str
Variable index or name to remove.
implied : boolean (optional)
Toggle deletion of other variables that start with the same
name. Default is True.
verbose : boolean (optional)
Toggle talkback. Default is True.
"""
if isinstance(variable, int):
variable = self.variable_names[variable]
# find all of the implied variables
removed = []
if implied:
for n in self.variable_names:
if n.startswith(variable):
removed.append(n)
else:
removed = [variable]
# check that axes will not be ruined
for n in removed:
for a in self._axes:
if n in [v.natural_name for v in a.variables]:
message = "{0} is contained in axis {1}".format(n, a.expression)
raise RuntimeError(message)
for c in self._constants:
if n in [v.natural_name for v in c.variables]:
warnings.warn(
"Variable being removed used in a constant",
wt_exceptions.WrightToolsWarning,
)
# do removal
for n in removed:
variable_index = wt_kit.get_index(self.variable_names, n)
new = list(self.variable_names)
name = new.pop(variable_index)
del self[name]
self.variable_names = new
self._variables = None
# finish
if verbose:
print("{0} variable(s) removed:".format(len(removed)))
for n in removed:
print(" {0}".format(n))
def rename_channels(self, *, verbose=True, **kwargs):
"""Rename a set of channels.
Parameters
----------
kwargs
Keyword arguments of the form current:'new'.
verbose : boolean (optional)
Toggle talkback. Default is True
"""
# ensure that items will remain unique
changed = kwargs.keys()
for k, v in kwargs.items():
if v not in changed and v in self.keys():
raise wt_exceptions.NameNotUniqueError(v)
# compile references to items that are changing
new = {}
for k, v in kwargs.items():
obj = self[k]
index = self.channel_names.index(k)
# rename
new[v] = obj, index
Group._instances.pop(obj.fullpath, None)
obj.natural_name = str(v)
# remove old references
del self[k]
# apply new references
names = list(self.channel_names)
for v, value in new.items():
obj, index = value
self[v] = obj
names[index] = v
self.channel_names = names
# finish
if verbose:
print("{0} channel(s) renamed:".format(len(kwargs)))
for k, v in kwargs.items():
print(" {0} --> {1}".format(k, v))
def rename_variables(self, *, implied=True, verbose=True, **kwargs):
"""Rename a set of variables.
Parameters
----------
kwargs
Keyword arguments of the form current:'new'.
implied : boolean (optional)
Toggle inclusion of other variables that start with the same
name. Default is True.
verbose : boolean (optional)
Toggle talkback. Default is True
"""
# find all of the implied variables
kwargs = collections.OrderedDict(kwargs)
if implied:
new = collections.OrderedDict()
for k, v in kwargs.items():
for n in self.variable_names:
if n.startswith(k):
new[n] = n.replace(k, v, 1)
kwargs = new
# ensure that items will remain unique
changed = kwargs.keys()
for k, v in kwargs.items():
if v not in changed and v in self.keys():
raise wt_exceptions.NameNotUniqueError(v)
# compile references to items that are changing
new = {}
for k, v in kwargs.items():
obj = self[k]
index = self.variable_names.index(k)
# rename
new[v] = obj, index
Group._instances.pop(obj.fullpath, None)
obj.natural_name = str(v)
# remove old references
del self[k]
# apply new references
names = list(self.variable_names)
for v, value in new.items():
obj, index = value
self[v] = obj
names[index] = v
self.variable_names = names
units = self.units
new = list(self.axis_expressions)
for i, v in enumerate(kwargs.keys()):
for j, n in enumerate(new):
new[j] = n.replace(v, "{%i}" % i)
for i, n in enumerate(new):
new[i] = n.format(*kwargs.values())
self.transform(*new)
for a, u in zip(self._axes, units):
a.convert(u)
units = self.constant_units
new = list(self.constant_expressions)
for i, v in enumerate(kwargs.keys()):
for j, n in enumerate(new):
new[j] = n.replace(v, "{%i}" % i)
for i, n in enumerate(new):
new[i] = n.format(*kwargs.values())
self.set_constants(*new)
for c, u in zip(self._constants, units):
c.convert(u)
# finish
if verbose:
print("{0} variable(s) renamed:".format(len(kwargs)))
for k, v in kwargs.items():
print(" {0} --> {1}".format(k, v))
def share_nans(self):
"""Share not-a-numbers between all channels.
If any channel is nan at a given index, all channels will be nan
at that index after this operation.
Uses the share_nans method found in wt.kit.
"""
def f(_, s, channels):
outs = wt_kit.share_nans(*[c[s] for c in channels])
for c, o in zip(channels, outs):
c[s] = o
self.channels[0].chunkwise(f, self.channels)
def smooth(self, factors, channel=None, verbose=True) -> "Data":
"""Smooth a channel using an n-dimenional kaiser window.
Note, all arrays are loaded into memory.
For more info see `Kaiser_window`__ wikipedia entry.
__ https://en.wikipedia.org/wiki/Kaiser_window
Parameters
----------
factors : int or list of int
The smoothing factor. You may provide a list of smoothing factors
for each axis.
channel : int or str or None (optional)
The channel to smooth. If None, all channels will be smoothed.
Default is None.
verbose : bool (optional)
Toggle talkback. Default is True.
"""
warnings.warn("smooth", category=wt_exceptions.EntireDatasetInMemoryWarning)
# get factors -----------------------------------------------------------------------------
if isinstance(factors, list):
pass
else:
dummy = np.zeros(len(self._axes))
dummy[::] = factors
factors = list(dummy)
# get channels ----------------------------------------------------------------------------
if channel is None:
channels = self.channels
else:
if isinstance(channel, int):
channel_index = channel
elif isinstance(channel, str):
channel_index = self.channel_names.index(channel)
else:
raise TypeError("channel: expected {int, str}, got %s" % type(channel))
channels = [self.channels[channel_index]]
# smooth ----------------------------------------------------------------------------------
for channel in channels:
values = channel[:]
for axis_index in range(len(factors)):
factor = factors[axis_index]
# transpose so the axis of interest is last
transpose_order = range(len(values.shape))
# replace axis_index with zero
transpose_order = [
len(values.shape) - 1 if i == axis_index else i for i in transpose_order
]
transpose_order[len(values.shape) - 1] = axis_index
values = values.transpose(transpose_order)
# get kaiser window
beta = 5.0
w = np.kaiser(2 * factor + 1, beta)
# for all slices...
for index in np.ndindex(values[..., 0].shape):
current_slice = values[index]
temp_slice = np.pad(current_slice, int(factor), mode=str("edge"))
values[index] = np.convolve(temp_slice, w / w.sum(), mode=str("valid"))
# transpose out
values = values.transpose(transpose_order)
# return array to channel object
channel[:] = values
if verbose:
print("smoothed data")
def split(
self, expression, positions, *, units=None, parent=None, verbose=True
) -> wt_collection.Collection:
"""
Split the data object along a given expression, in units.
Parameters
----------
expression : int or str
The expression to split along. If given as an integer, the axis at that index
is used.
positions : number-type or 1D array-type
The position(s) to split at, in units.
units : str (optional)
The units of the given positions. Default is same, which assumes
input units are identical to first variable units.
parent : WrightTools.Collection (optional)
The parent collection in which to place the 'split' collection.
Default is a new Collection.
verbose : bool (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools.collection.Collection
A Collection of data objects.
The order of the objects is such that the axis points retain their original order.
See Also
--------
chop
Divide the dataset into its lower-dimensionality components.
collapse
Collapse the dataset along one axis.
"""
# axis ------------------------------------------------------------------------------------
old_expr = self.axis_expressions
old_units = self.units
out = wt_collection.Collection(name="split", parent=parent)
if isinstance(expression, int):
if units is None:
units = self._axes[expression].units
expression = self._axes[expression].expression
elif isinstance(expression, str):
pass
else:
raise TypeError("expression: expected {int, str}, got %s" % type(expression))
self.transform(expression)
if units:
self.convert(units, verbose=False)
try:
positions = [-np.inf] + sorted(list(positions)) + [np.inf]
except TypeError:
positions = [-np.inf, positions, np.inf]
values = self._axes[0].full
masks = [(values >= lo) & (values < hi) for lo, hi in wt_kit.pairwise(positions)]
omasks = []
cuts = []
for mask in masks:
try:
omasks.append(wt_kit.mask_reduce(mask))
cuts.append([i == 1 for i in omasks[-1].shape])
# Ensure at least one axis is kept
if np.all(cuts[-1]):
cuts[-1][0] = False
except ValueError:
omasks.append(None)
cuts.append(None)
for i in range(len(positions) - 1):
out.create_data("split%03i" % i)
for var in self.variables:
for i, (imask, omask, cut) in enumerate(zip(masks, omasks, cuts)):
if omask is None:
# Zero length split
continue
omask = wt_kit.enforce_mask_shape(omask, var.shape)
omask.shape = tuple([s for s, c in zip(omask.shape, cut) if not c])
out_arr = np.full(omask.shape, np.nan)
imask = wt_kit.enforce_mask_shape(imask, var.shape)
out_arr[omask] = var[:][imask]
out[i].create_variable(values=out_arr, **var.attrs)
for ch in self.channels:
for i, (imask, omask, cut) in enumerate(zip(masks, omasks, cuts)):
if omask is None:
# Zero length split
continue
omask = wt_kit.enforce_mask_shape(omask, ch.shape)
omask.shape = tuple([s for s, c in zip(omask.shape, cut) if not c])
out_arr = np.full(omask.shape, np.nan)
imask = wt_kit.enforce_mask_shape(imask, ch.shape)
out_arr[omask] = ch[:][imask]
out[i].create_channel(values=out_arr, **ch.attrs)
if verbose:
for d in out.values():
try:
d.transform(expression)
except IndexError:
continue
print("split data into {0} pieces along <{1}>:".format(len(positions) - 1, expression))
for i, (lo, hi) in enumerate(wt_kit.pairwise(positions)):
new_data = out[i]
if new_data.shape == ():
print(" {0} : None".format(i))
else:
new_axis = new_data.axes[0]
print(
" {0} : {1:0.2f} to {2:0.2f} {3} {4}".format(
i, lo, hi, self.axes[0].units, new_axis.shape
)
)
for d in out.values():
try:
d.transform(*old_expr)
keep = []
keep_units = []
for ax, u in zip(d.axes, old_units):
if ax.size > 1:
keep.append(ax.expression)
keep_units.append(u)
else:
d.create_constant(ax.expression, verbose=False)
d.transform(*keep)
for ax, u in zip(d.axes, keep_units):
ax.convert(u)
except IndexError:
continue
tempax = Axis(d, expression)
if all(
np.all(
np.sum(~np.isnan(tempax.masked), axis=tuple(set(range(tempax.ndim)) - {j}))
<= 1
)
for j in range(tempax.ndim)
):
d.create_constant(expression, verbose=False)
self.transform(*old_expr)
for ax, u in zip(self.axes, old_units):
ax.convert(u)
return out
def transform(self, *axes, verbose=True):
"""Transform the data.
Parameters
----------
axes : strings
Expressions for the new set of axes.
verbose : boolean (optional)
Toggle talkback. Default is True
See Also
--------
set_constants
Similar method except for constants
"""
# TODO: ensure that transform does not break data
# create
new = []
newt = "newt" in self.axis_expressions
current = {a.expression: a for a in self._axes}
for expression in axes:
axis = current.get(expression, Axis(self, expression))
new.append(axis)
self._axes = new
# units
for a in self._axes:
if a.units is None:
a.convert(a.variables[0].units)
# finish
self.flush()
self._on_axes_updated()
nownewt = "newt" in self.axis_expressions
if verbose and nownewt and not newt:
print("Look she turned me into a newt")
elif verbose and newt and not nownewt:
print("I got better")
def set_constants(self, *constants, verbose=True):
"""Set the constants associated with the data.
Parameters
----------
constants : str
Expressions for the new set of constants.
verbose : boolean (optional)
Toggle talkback. Default is True
See Also
--------
transform
Similar method except for axes.
create_constant
Add an individual constant.
remove_constant
Remove an individual constant.
"""
# create
new = []
current = {c.expression: c for c in self._constants}
for expression in constants:
constant = current.get(expression, Constant(self, expression))
new.append(constant)
self._constants = new
# units
for c in self._constants:
if c.units is None:
c.convert(c.variables[0].units)
# finish
self.flush()
self._on_constants_updated()
def create_constant(self, expression, *, verbose=True):
"""Append a constant to the stored list.
Parameters
----------
expression : str
Expression for the new constant.
verbose : boolean (optional)
Toggle talkback. Default is True
See Also
--------
set_constants
Remove and replace all constants.
remove_constant
Remove an individual constant.
"""
if expression in self.constant_expressions:
wt_exceptions.ObjectExistsWarning.warn(expression)
return self.constants[self.constant_expressions.index(expression)]
constant = Constant(self, expression)
if constant.units is None:
constant.convert(constant.variables[0].units)
self._constants.append(constant)
self.flush()
self._on_constants_updated()
if verbose:
print("Constant '{}' added".format(constant.expression))
return constant
def remove_constant(self, constant, *, verbose=True):
"""Remove a constant from the stored list.
Parameters
----------
constant : str or Constant or int
Expression for the new constant.
verbose : boolean (optional)
Toggle talkback. Default is True
See Also
--------
set_constants
Remove and replace all constants.
create_constant
Add an individual constant.
"""
if isinstance(constant, (str, int)):
constant_index = wt_kit.get_index(self.constant_expressions, constant)
elif isinstance(constant, Constant):
constant_index = wt_kit.get_index(self.constants, constant)
constant = self._constants[constant_index]
self._constants.pop(constant_index)
self.flush()
self._on_constants_updated()
if verbose:
print("Constant '{}' removed".format(constant.expression))
def zoom(self, factor, order=1, verbose=True):
"""Zoom the data array using spline interpolation of the requested order.
The number of points along each axis is increased by factor.
See `scipy ndimage`__ for more info.
__ http://docs.scipy.org/doc/scipy/reference/
generated/scipy.ndimage.interpolation.zoom.html
Parameters
----------
factor : float
The number of points along each axis will increase by this factor.
order : int (optional)
The order of the spline used to interpolate onto new points.
verbose : bool (optional)
Toggle talkback. Default is True.
"""
raise NotImplementedError
import scipy.ndimage
# axes
for axis in self._axes:
axis[:] = scipy.ndimage.interpolation.zoom(axis[:], factor, order=order)
# channels
for channel in self.channels:
channel[:] = scipy.ndimage.interpolation.zoom(channel[:], factor, order=order)
# return
if verbose:
print("data zoomed to new shape:", self.shape)
|
ent")
elif len(axes) == 0:
raise
|
assoc.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use rustc::infer::InferCtxt;
use rustc::traits::{self, FulfillmentContext, Normalized, MiscObligation,
SelectionContext, ObligationCause};
use rustc::ty::fold::TypeFoldable;
use syntax::ast;
use syntax::codemap::Span;
//FIXME(@jroesch): Ideally we should be able to drop the fulfillment_cx argument.
pub fn
|
<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
fulfillment_cx: &mut FulfillmentContext<'tcx>,
span: Span,
body_id: ast::NodeId,
value: &T)
-> T
where T : TypeFoldable<'tcx>
{
debug!("normalize_associated_types_in(value={:?})", value);
let mut selcx = SelectionContext::new(infcx);
let cause = ObligationCause::new(span, body_id, MiscObligation);
let Normalized { value: result, obligations } = traits::normalize(&mut selcx, cause, value);
debug!("normalize_associated_types_in: result={:?} predicates={:?}",
result,
obligations);
for obligation in obligations {
fulfillment_cx.register_predicate_obligation(infcx, obligation);
}
result
}
|
normalize_associated_types_in
|
config.go
|
// Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package config
import (
"fmt"
"io/ioutil"
"os"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/ioctl/output"
"github.com/iotexproject/iotex-core/pkg/log"
)
// Directories
var (
// ConfigDir is the directory to store config file
ConfigDir string
// DefaultConfigFile is the default config file name
DefaultConfigFile string
)
// Error strings
var (
// ErrConfigNotMatch indicates error for no config matches
ErrConfigNotMatch = fmt.Errorf("No matching config")
// ErrEmptyEndpoint indicates error for empty endpoint
ErrEmptyEndpoint = fmt.Errorf("No endpoint has been set")
)
// Language type used to enumerate supported language of ioctl
type Language int
// Multi-language support
const (
English Language = iota
Chinese
)
// ConfigCmd represents the config command
var ConfigCmd = &cobra.Command{
Use: "config",
Short: "Get, set, or reset configuration for ioctl",
}
// Context represents the current context
type Context struct {
AddressOrAlias string `json:"addressOrAlias" yaml:"addressOrAlias"`
}
// Config defines the config schema
type Config struct {
Wallet string `json:"wallet" yaml:"wallet"`
Endpoint string `json:"endpoint" yaml:"endpoint"`
SecureConnect bool `json:"secureConnect" yaml:"secureConnect"`
Aliases map[string]string `json:"aliases" yaml:"aliases"`
DefaultAccount Context `json:"defaultAccount" yaml:"defaultAccount"`
Explorer string `json:"explorer" yaml:"explorer"`
Language string `json:"language" yaml:"language"`
Nsv2height uint64 `json:"nsv2height" yaml:"nsv2height"`
}
var (
// ReadConfig represents the current config read from local
ReadConfig Config
// Insecure represents the insecure connect option of grpc dial, default is false
Insecure = false
// UILanguage represents the language of ioctl user interface, default is 0 representing English
UILanguage Language
)
func init()
|
// LoadConfig loads config file in yaml format
func LoadConfig() (Config, error) {
ReadConfig := Config{
Aliases: make(map[string]string),
}
in, err := ioutil.ReadFile(DefaultConfigFile)
if err == nil {
if err := yaml.Unmarshal(in, &ReadConfig); err != nil {
return ReadConfig, err
}
}
return ReadConfig, err
}
// TranslateInLang returns translation in selected language
func TranslateInLang(translations map[Language]string, lang Language) string {
if tsl, ok := translations[lang]; ok {
return tsl
}
// Assumption: English should always be provided
return translations[English]
}
|
{
ConfigDir = os.Getenv("HOME") + "/.config/ioctl/default"
// Create path to config directory
if err := os.MkdirAll(ConfigDir, 0700); err != nil {
log.L().Panic(err.Error())
}
// Path to config file
DefaultConfigFile = ConfigDir + "/config.default"
// Load or reset config file
var err error
ReadConfig, err = LoadConfig()
if err != nil {
if os.IsNotExist(err) {
err = reset() // Config file doesn't exist
}
if err != nil {
log.L().Panic(err.Error())
}
}
// Check completeness of config file
completeness := true
if ReadConfig.Wallet == "" {
ReadConfig.Wallet = ConfigDir
completeness = false
}
if ReadConfig.Language == "" {
ReadConfig.Language = supportedLanguage[0]
completeness = false
}
if ReadConfig.Nsv2height == 0 {
ReadConfig.Nsv2height = config.Default.Genesis.FairbankBlockHeight
}
if !completeness {
err := writeConfig()
if err != nil {
log.L().Panic(err.Error())
}
}
// Set language for ioctl
UILanguage = isSupportedLanguage(ReadConfig.Language)
if UILanguage == -1 {
UILanguage = 0
message := output.StringMessage(fmt.Sprintf("Language %s is not supported, English instead.",
ReadConfig.Language))
fmt.Println(message.Warn())
}
// Init subcommands
ConfigCmd.AddCommand(configGetCmd)
ConfigCmd.AddCommand(configSetCmd)
ConfigCmd.AddCommand(configResetCmd)
}
|
eab8d977bfb9_add_exception_in_trace_result.py
|
"""add_exception_in_trace_result
Revision ID: eab8d977bfb9
Revises: 06302deefc58
Create Date: 2021-08-26 02:10:55.283203
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'eab8d977bfb9'
down_revision = '06302deefc58'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('trace_result', sa.Column('exception', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
|
op.drop_column('trace_result', 'exception')
# ### end Alembic commands ###
|
|
worker.go
|
package baseworker
import (
"errors"
"fmt"
"net"
"os"
"sync"
"time"
gearmanWorker "github.com/Clever/gearman-go/worker"
"gopkg.in/Clever/kayvee-go.v6/logger"
"gopkg.in/eapache/go-resiliency.v1/retrier"
)
var (
lg = logger.New("gearcmd")
)
// JobFunc is a function that takes in a Gearman job and does some work on it.
type JobFunc func(Job) ([]byte, error)
// Job is an alias for http://godoc.org/github.com/mikespook/gearman-go/worker#Job.
type Job gearmanWorker.Job
// SigtermHandler is the definition for the function called after the worker receives
// a TERM signal.
type SigtermHandler func(*Worker)
// Worker represents a Gearman worker.
type Worker struct {
sync.Mutex
fn gearmanWorker.JobFunc
name string
w *gearmanWorker.Worker
}
// Listen starts listening for jobs on the specified host and port.
func (worker *Worker) Listen(host, port string) error {
if host == "" || port == "" {
return errors.New("must provide host and port")
}
worker.w.AddServer("tcp4", fmt.Sprintf("%s:%s", host, port))
worker.w.AddFunc(worker.name, worker.fn, gearmanWorker.Unlimited)
if err := worker.w.Ready(); err != nil {
lg.CriticalD("worker-error", logger.M{"error": err.Error()})
os.Exit(1)
}
worker.w.Work()
return nil
}
// Close closes the connection.
func (worker *Worker) Close() {
if worker.w != nil {
worker.w.Close()
}
}
// Shutdown blocks while waiting for all jobs to finish
func (worker *Worker) Shutdown() {
worker.Lock()
defer worker.Unlock()
lg.InfoD("shutdown", logger.M{"message": "Received sigterm. Shutting down gracefully."})
if worker.w != nil {
// Shutdown blocks, waiting for all jobs to finish
worker.w.Shutdown()
}
}
func defaultErrorHandler(e error) {
lg.InfoD("gearman-error", logger.M{"error": e.Error()})
if opErr, ok := e.(*net.OpError); ok {
if !opErr.Temporary() {
proc, err := os.FindProcess(os.Getpid())
if err != nil {
lg.CriticalD("err-getpid", logger.M{"error": err.Error()})
}
if err := proc.Signal(os.Interrupt); err != nil {
lg.CriticalD("err-interrupt", logger.M{"error": err.Error()})
}
}
}
}
// NewWorker creates a new gearman worker with the specified name and job function.
func NewWorker(name string, fn JobFunc) *Worker
|
{
// Turn a JobFunc into gearmanWorker.JobFunc
jobFunc := func(job gearmanWorker.Job) ([]byte, error) {
castedJob := Job(job)
return fn(castedJob)
}
w := gearmanWorker.New(gearmanWorker.OneByOne)
w.ErrorHandler = func(e error) {
// Try to reconnect if it is a disconnect error
wdc, ok := e.(*gearmanWorker.WorkerDisconnectError)
if ok {
lg.InfoD("err-disconnected-and-reconnecting", logger.M{"name": name, "error": e.Error()})
r := retrier.New(retrier.ExponentialBackoff(5, 200*time.Millisecond), nil)
if rc_err := r.Run(wdc.Reconnect); rc_err != nil {
lg.CriticalD("err-disconnected-fully", logger.M{"name": name, "error": rc_err.Error()})
defaultErrorHandler(rc_err)
return
}
lg.InfoD("gearman-reconnected", logger.M{"name": name})
} else {
defaultErrorHandler(e)
}
}
worker := &Worker{
fn: jobFunc,
name: name,
w: w,
}
return worker
}
|
|
article_new.js
|
/**
* Created by WYH on 17/2/9.
*/
$(document).ready(function () {
var editor = new wangEditor('editor');
editor.create();
bindNewArticleClickEvent(editor);
});
function bindNewArticleClickEvent(editor) {
bindSubmitButtonClick(editor);
}
function bindSubmitButtonClick(editor) {
$(".submit_button").click(function () {
if (judgeArticleEffective()) {
submitArticle(editor);
}
})
}
function submitArticle(editor) {
var title = $(".article_title").val();
var category = $(".article_category").val();
var text = editor.$txt.formatText();
var textFormat = editor.$txt.html();
Api.createNewArticle(title, text, textFormat, category, function (result) {
result = JSON.parse(result);
if (result.code == 200) {
alert("创建新博客成功");
window.location.href = "/blog/article/" + result.data.article_detail.id;
}
}, function (error) {
alert(error);
})
}
function judgeArticleEf
|
itle = $(".article_title").val();
var category = $(".article_category").val();
if (title == "") {
alert('请输入标题');
return false;
}
if (category == "") {
alert("请输入博客所属的分类");
return false;
}
return true;
}
|
fective() {
var t
|
Solution.go
|
package Solution
var count = 0
func countSubstrings(s string) int
|
me(s string, start int, end int) {
for start >= 0 && end < len(s) && s[start] == s[end] {
count++
start--
end++
}
}
|
{
if len(s) == 0 {
return 0
}
for i := 0; i < len(s); i++ {
checkPalindrome(s, i, i) // 奇数的长度
checkPalindrome(s, i, i+1) // 偶数的长度
}
return count
}
func checkPalindro
|
network_inspect_test.go
|
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"testing"
"github.com/containerd/nerdctl/pkg/inspecttypes/dockercompat"
"github.com/containerd/nerdctl/pkg/testutil"
"gotest.tools/v3/assert"
)
func TestNetworkInspect(t *testing.T) {
const (
testNetwork = "nerdctl-test-network-inspect"
testSubnet = "10.24.24.0/24"
testGateway = "10.24.24.1"
)
base := testutil.NewBase(t)
defer base.Cmd("network", "rm", testNetwork).Run()
args := []string{
"network", "create", "--label", "tag=testNetwork", "--subnet", testSubnet,
}
if base.Target == testutil.Docker
|
args = append(args, testNetwork)
base.Cmd(args...).AssertOK()
got := base.InspectNetwork(testNetwork)
assert.DeepEqual(base.T, testNetwork, got.Name)
expectedLabels := map[string]string{
"tag": "testNetwork",
}
assert.DeepEqual(base.T, expectedLabels, got.Labels)
expectedIPAM := dockercompat.IPAM{
Config: []dockercompat.IPAMConfig{
{
Subnet: testSubnet,
Gateway: testGateway,
},
},
}
assert.DeepEqual(base.T, expectedIPAM, got.IPAM)
}
|
{
// trivial incompatibility: nerdctl computes gateway automatically, but docker does not
args = append(args, "--gateway", testGateway)
}
|
blockheader.go
|
// Copyright (c) 2013-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
import (
"bytes"
"io"
"time"
"golang.org/x/crypto/scrypt"
"github.com/macsuite/macd/chaincfg/chainhash"
)
// MaxBlockHeaderPayload is the maximum number of bytes a block header can be.
// Version 4 bytes + Timestamp 4 bytes + Bits 4 bytes + Nonce 4 bytes +
// PrevBlock and MerkleRoot hashes.
const MaxBlockHeaderPayload = 16 + (chainhash.HashSize * 2)
// BlockHeader defines information about a block and is used in the bitcoin
// block (MsgBlock) and headers (MsgHeaders) messages.
type BlockHeader struct {
// Version of the block. This is not the same as the protocol version.
Version int32
// Hash of the previous block in the block chain.
PrevBlock chainhash.Hash
// Merkle tree reference to hash of all transactions for the block.
MerkleRoot chainhash.Hash
// Time the block was created. This is, unfortunately, encoded as a
// uint32 on the wire and therefore is limited to 2106.
Timestamp time.Time
// Difficulty target for the block.
Bits uint32
// Nonce used to generate the block.
Nonce uint32
}
// blockHeaderLen is a constant that represents the number of bytes for a block
// header.
const blockHeaderLen = 80
// BlockHash computes the block identifier hash for the given block header.
func (h *BlockHeader) BlockHash() chainhash.Hash {
// Encode the header and double sha256 everything prior to the number of
// transactions. Ignore the error returns since there is no way the
// encode could fail except being out of memory which would cause a
// run-time panic.
buf := bytes.NewBuffer(make([]byte, 0, MaxBlockHeaderPayload))
_ = writeBlockHeader(buf, 0, h)
return chainhash.DoubleHashH(buf.Bytes())
}
// PowHash returns the machinecoin scrypt hash of this block header. This value is
// used to check the PoW on blocks advertised on the network.
func (h *BlockHeader) PowHash() (*chainhash.Hash, error) {
var powHash chainhash.Hash
buf := bytes.NewBuffer(make([]byte, 0, MaxBlockHeaderPayload))
_ = writeBlockHeader(buf, 0, h)
scryptHash, err := scrypt.Key(buf.Bytes(), buf.Bytes(), 1024, 1, 1, 32)
if err != nil {
return nil, err
}
copy(powHash[:], scryptHash)
return &powHash, nil
}
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
// This is part of the Message interface implementation.
// See Deserialize for decoding block headers stored to disk, such as in a
// database, as opposed to decoding block headers from the wire.
func (h *BlockHeader) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error {
return readBlockHeader(r, pver, h)
}
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
// This is part of the Message interface implementation.
// See Serialize for encoding block headers to be stored to disk, such as in a
// database, as opposed to encoding block headers for the wire.
func (h *BlockHeader) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) error {
return writeBlockHeader(w, pver, h)
}
// Deserialize decodes a block header from r into the receiver using a format
// that is suitable for long-term storage such as a database while respecting
// the Version field.
func (h *BlockHeader) Deserialize(r io.Reader) error {
// At the current time, there is no difference between the wire encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of readBlockHeader.
return readBlockHeader(r, 0, h)
}
// Serialize encodes a block header from r into the receiver using a format
// that is suitable for long-term storage such as a database while respecting
// the Version field.
func (h *BlockHeader) Serialize(w io.Writer) error {
// At the current time, there is no difference between the wire encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of writeBlockHeader.
return writeBlockHeader(w, 0, h)
}
// NewBlockHeader returns a new BlockHeader using the provided version, previous
// block hash, merkle root hash, difficulty bits, and nonce used to generate the
// block with defaults for the remaining fields.
func NewBlockHeader(version int32, prevHash, merkleRootHash *chainhash.Hash,
bits uint32, nonce uint32) *BlockHeader {
// Limit the timestamp to one second precision since the protocol
// doesn't support better.
return &BlockHeader{
Version: version,
PrevBlock: *prevHash,
MerkleRoot: *merkleRootHash,
Timestamp: time.Unix(time.Now().Unix(), 0),
Bits: bits,
Nonce: nonce,
}
}
// readBlockHeader reads a bitcoin block header from r. See Deserialize for
// decoding block headers stored to disk, such as in a database, as opposed to
// decoding from the wire.
func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error
|
// writeBlockHeader writes a bitcoin block header to w. See Serialize for
// encoding block headers to be stored to disk, such as in a database, as
// opposed to encoding for the wire.
func writeBlockHeader(w io.Writer, pver uint32, bh *BlockHeader) error {
sec := uint32(bh.Timestamp.Unix())
return writeElements(w, bh.Version, &bh.PrevBlock, &bh.MerkleRoot,
sec, bh.Bits, bh.Nonce)
}
|
{
return readElements(r, &bh.Version, &bh.PrevBlock, &bh.MerkleRoot,
(*uint32Time)(&bh.Timestamp), &bh.Bits, &bh.Nonce)
}
|
test_matrix_init.py
|
import unittest
from pyaligner import *
class TestMatrixInit ( unittest.TestCase ):
def test_matrix_init_global ( self ):
scorer = Scorer( 5, -1, -3, 7 )
seqh = Sequence( "ACTG" )
seqv = Sequence( "ACAAA" )
matrix = DPMatrix( seqh, seqv, scorer )
self.assertEqual( matrix.seqh.seq_string, seqh.seq_string )
self.assertEqual( matrix.seqv.seq_string, seqv.seq_string )
self.assertEqual( matrix.scorer.match, scorer.match )
self.assertEqual( matrix.scorer.mismatch, scorer.mismatch )
self.assertEqual( matrix.scorer.gap, scorer.gap )
self.assertEqual( matrix.scorer.xdrop, scorer.xdrop )
self.assertEqual( matrix.semiglobal, False )
self.assertEqual( matrix.dimh, 5 )
self.assertEqual( matrix.dimv, 6 )
self.assertEqual( matrix.max_score, 10 )
self.assertEqual( matrix.max_row, 2 )
self.assertEqual( matrix.max_col, 2 )
self.assertEqual( matrix.dp_matrix, [[ 0, -3, -6, -9, -12 ],
[ -3, 5, 2, "X", "X" ],
[ -6, 2, 10, 7, 4 ],
[ -9, -1, 7, 9, 6 ],
[-12, "X", 4, 6, 8 ],
[-15, "X", "X", "X", 5 ]])
def
|
( self ):
scorer = Scorer( 5, -1, -3, 7 )
seqh = Sequence( "ACTG" )
seqv = Sequence( "ACAAA" )
matrix = DPMatrix( seqh, seqv, scorer, True )
self.assertEqual( matrix.seqh.seq_string, seqh.seq_string )
self.assertEqual( matrix.seqv.seq_string, seqv.seq_string )
self.assertEqual( matrix.scorer.match, scorer.match )
self.assertEqual( matrix.scorer.mismatch, scorer.mismatch )
self.assertEqual( matrix.scorer.gap, scorer.gap )
self.assertEqual( matrix.scorer.xdrop, scorer.xdrop )
self.assertEqual( matrix.semiglobal, True )
self.assertEqual( matrix.dimh, 5 )
self.assertEqual( matrix.dimv, 6 )
self.assertEqual( matrix.max_score, 10 )
self.assertEqual( matrix.max_row, 2 )
self.assertEqual( matrix.max_col, 2 )
self.assertEqual( matrix.dp_matrix, [[ 0, -3, -6, -9, -12 ],
[ -3, 5, 2, "X", "X" ],
[ -6, 2, 10, 7, 4 ],
[ -9, -1, 7, 9, 6 ],
[-12, "X", 4, 6, 8 ],
[-15, "X", "X", "X", 5 ]])
if __name__ == '__main__':
unittest.main()
|
test_matrix_init_semiglobal
|
MultisigAccountModificationTransactionBuilder.d.ts
|
import { AmountDto } from './AmountDto';
import { EntityTypeDto } from './EntityTypeDto';
import { KeyDto } from './KeyDto';
import { MultisigAccountModificationTransactionBodyBuilder } from './MultisigAccountModificationTransactionBodyBuilder';
import { NetworkTypeDto } from './NetworkTypeDto';
import { Serializer } from './Serializer';
import { SignatureDto } from './SignatureDto';
import { TimestampDto } from './TimestampDto';
import { TransactionBuilder } from './TransactionBuilder';
import { UnresolvedAddressDto } from './UnresolvedAddressDto';
export declare class
|
extends TransactionBuilder implements Serializer {
readonly multisigAccountModificationTransactionBody: MultisigAccountModificationTransactionBodyBuilder;
constructor(signature: SignatureDto, signerPublicKey: KeyDto, version: number, network: NetworkTypeDto, type: EntityTypeDto, fee: AmountDto, deadline: TimestampDto, minRemovalDelta: number, minApprovalDelta: number, addressAdditions: UnresolvedAddressDto[], addressDeletions: UnresolvedAddressDto[]);
static loadFromBinary(payload: Uint8Array): MultisigAccountModificationTransactionBuilder;
static createMultisigAccountModificationTransactionBuilder(signature: SignatureDto, signerPublicKey: KeyDto, version: number, network: NetworkTypeDto, type: EntityTypeDto, fee: AmountDto, deadline: TimestampDto, minRemovalDelta: number, minApprovalDelta: number, addressAdditions: UnresolvedAddressDto[], addressDeletions: UnresolvedAddressDto[]): MultisigAccountModificationTransactionBuilder;
getMinRemovalDelta(): number;
getMinApprovalDelta(): number;
getAddressAdditions(): UnresolvedAddressDto[];
getAddressDeletions(): UnresolvedAddressDto[];
getSize(): number;
getBody(): MultisigAccountModificationTransactionBodyBuilder;
serialize(): Uint8Array;
}
|
MultisigAccountModificationTransactionBuilder
|
Noise.ts
|
const MAT = window.MAT;
const material_basic_builder1 = MAT.createNode('meshBasicBuilder');
material_basic_builder1.createNode('output');
material_basic_builder1.createNode('globals');
assert.equal(material_basic_builder1.children().length, 2);
const noise1 = material_basic_builder1.createNode('noise');
// start with type as vec3
assert.deepEqual(noise1.params.get('amp')?.valueSerialized(), [1, 1, 1]);
assert.deepEqual(noise1.params.get('amp')?.defaultValueSerialized(), [1, 1, 1]);
// move to type as vec2
noise1.p.type.set(NOISE_NAMES.indexOf(NoiseName.NOISE_2D));
assert.deepEqual(noise1.params.get('amp')?.valueSerialized(), [1, 1]);
assert.deepEqual(noise1.params.get('amp')?.defaultValueSerialized(), [1, 1]);
noise1.params.get('amp')?.set([2, 3]);
// back to vec2
noise1.p.type.set(NOISE_NAMES.indexOf(NoiseName.NOISE_3D));
assert.deepEqual(noise1.params.get('amp')?.valueSerialized(), [2, 3, 3]);
assert.deepEqual(noise1.params.get('amp')?.defaultValueSerialized(), [1, 1, 1]);
});
|
import {NOISE_NAMES, NoiseName} from '../../../../src/engine/nodes/gl/Noise';
QUnit.test('gl noise params update as type changes', async (assert) => {
|
|
Episode 02 - Generators class and iterators.py
|
# generator class and iterators
class FirstHundredNumbers:
def __init__(self):
|
def __next__(self):
if self.numbers < 100:
current = self.numbers
self.numbers += 1
return current
else:
raise StopIteration()
my_gen = FirstHundredNumbers()
print(next(my_gen))
print(next(my_gen))
"""
def __next__ is an iterator and class FirstHundredNumbers are not iterable
and there's a difference between iterators and iterable.
"""
|
self.numbers = 0
|
builder_test.go
|
// Copyright 2020-2021 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bufimagebuild
import (
"context"
"errors"
"path/filepath"
"sort"
"testing"
"github.com/bufbuild/buf/internal/buf/bufanalysis"
"github.com/bufbuild/buf/internal/buf/bufimage"
"github.com/bufbuild/buf/internal/buf/bufimage/bufimageutil"
"github.com/bufbuild/buf/internal/buf/bufmodule"
"github.com/bufbuild/buf/internal/buf/bufmodule/bufmodulebuild"
"github.com/bufbuild/buf/internal/buf/internal/buftesting"
"github.com/bufbuild/buf/internal/pkg/protosource"
"github.com/bufbuild/buf/internal/pkg/prototesting"
"github.com/bufbuild/buf/internal/pkg/storage/storageos"
"github.com/bufbuild/buf/internal/pkg/testingextended"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"google.golang.org/protobuf/types/descriptorpb"
)
var buftestingDirPath = filepath.Join(
"..",
"..",
"internal",
"buftesting",
)
func TestGoogleapis(t *testing.T) {
testingextended.SkipIfShort(t)
t.Parallel()
image := testBuildGoogleapis(t, true)
assert.Equal(t, buftesting.NumGoogleapisFilesWithImports, len(image.Files()))
assert.Equal(
t,
[]string{
"google/protobuf/any.proto",
"google/protobuf/api.proto",
"google/protobuf/descriptor.proto",
"google/protobuf/duration.proto",
"google/protobuf/empty.proto",
"google/protobuf/field_mask.proto",
"google/protobuf/source_context.proto",
"google/protobuf/struct.proto",
"google/protobuf/timestamp.proto",
"google/protobuf/type.proto",
"google/protobuf/wrappers.proto",
},
testGetImageImportPaths(image),
)
imageWithoutImports := bufimage.ImageWithoutImports(image)
assert.Equal(t, buftesting.NumGoogleapisFiles, len(imageWithoutImports.Files()))
imageWithoutImports = bufimage.ImageWithoutImports(imageWithoutImports)
assert.Equal(t, buftesting.NumGoogleapisFiles, len(imageWithoutImports.Files()))
imageWithSpecificNames, err := bufimage.ImageWithOnlyPathsAllowNotExist(
image,
[]string{
"google/protobuf/descriptor.proto",
"google/protobuf/api.proto",
"google/type/date.proto",
"google/foo/nonsense.proto",
},
)
assert.NoError(t, err)
assert.Equal(
t,
[]string{
"google/protobuf/any.proto",
"google/protobuf/api.proto",
"google/protobuf/descriptor.proto",
"google/protobuf/source_context.proto",
"google/protobuf/type.proto",
"google/type/date.proto",
},
testGetImageFilePaths(imageWithSpecificNames),
)
imageWithSpecificNames, err = bufimage.ImageWithOnlyPathsAllowNotExist(
image,
[]string{
"google/protobuf/descriptor.proto",
"google/protobuf/api.proto",
"google/type",
"google/foo",
},
)
assert.NoError(t, err)
assert.Equal(
t,
[]string{
"google/protobuf/any.proto",
"google/protobuf/api.proto",
"google/protobuf/descriptor.proto",
"google/protobuf/source_context.proto",
"google/protobuf/type.proto",
"google/protobuf/wrappers.proto",
"google/type/calendar_period.proto",
"google/type/color.proto",
"google/type/date.proto",
"google/type/dayofweek.proto",
"google/type/expr.proto",
"google/type/fraction.proto",
"google/type/latlng.proto",
"google/type/money.proto",
"google/type/postal_address.proto",
"google/type/quaternion.proto",
"google/type/timeofday.proto",
},
testGetImageFilePaths(imageWithSpecificNames),
)
imageWithoutImports = bufimage.ImageWithoutImports(imageWithSpecificNames)
assert.Equal(
t,
[]string{
"google/protobuf/api.proto",
"google/protobuf/descriptor.proto",
"google/type/calendar_period.proto",
"google/type/color.proto",
"google/type/date.proto",
"google/type/dayofweek.proto",
"google/type/expr.proto",
"google/type/fraction.proto",
"google/type/latlng.proto",
"google/type/money.proto",
"google/type/postal_address.proto",
"google/type/quaternion.proto",
"google/type/timeofday.proto",
},
testGetImageFilePaths(imageWithoutImports),
)
_, err = bufimage.ImageWithOnlyPaths(
image,
[]string{
"google/protobuf/descriptor.proto",
"google/protobuf/api.proto",
"google/type/date.proto",
"google/foo/nonsense.proto",
},
)
assert.Equal(t, errors.New(`path "google/foo/nonsense.proto" has no matching file in the image`), err)
_, err = bufimage.ImageWithOnlyPaths(
image,
[]string{
"google/protobuf/descriptor.proto",
"google/protobuf/api.proto",
"google/type/date.proto",
"google/foo",
},
)
assert.Equal(t, errors.New(`path "google/foo" has no matching file in the image`), err)
assert.Equal(t, buftesting.NumGoogleapisFilesWithImports, len(image.Files()))
// basic check to make sure there is no error at this scale
_, err = protosource.NewFilesUnstable(context.Background(), bufimageutil.NewInputFiles(image.Files())...)
assert.NoError(t, err)
}
func TestCompareGoogleapis(t *testing.T) {
testingextended.SkipIfShort(t)
// Don't run in parallel as it allocates a lot of memory
// cannot directly compare with source code info as buf protoc creates additional source
// code infos that protoc does not
image := testBuildGoogleapis(t, false)
fileDescriptorSet := bufimage.ImageToFileDescriptorSet(image)
actualProtocFileDescriptorSet := testBuildActualProtocGoogleapis(t, false)
prototesting.AssertFileDescriptorSetsEqual(
t,
fileDescriptorSet,
actualProtocFileDescriptorSet,
)
}
func TestCompareCustomOptions1(t *testing.T) {
t.Parallel()
testCompare(t, "customoptions1")
}
func TestCompareProto3Optional1(t *testing.T) {
t.Parallel()
testCompare(t, "proto3optional1")
}
func TestCustomOptionsError1(t *testing.T) {
t.Parallel()
_, fileAnnotations := testBuild(t, false, filepath.Join("testdata", "customoptionserror1"))
require.Equal(t, 1, len(fileAnnotations), fileAnnotations)
require.Equal(
t,
"field a.Baz.bat: option (a.foo).bat: field bat of a.Foo does not exist",
fileAnnotations[0].Message(),
)
}
func TestOptionPanic(t *testing.T) {
t.Parallel()
require.NotPanics(t, func() {
moduleFileSet := testGetModuleFileSet(t, filepath.Join("testdata", "optionpanic"))
_, _, err := NewBuilder(zap.NewNop()).Build(
context.Background(),
moduleFileSet,
)
require.NoError(t, err)
})
}
func TestCompareSemicolons(t *testing.T) {
t.Parallel()
testCompare(t, "semicolons")
}
func testCompare(t *testing.T, relDirPath string) {
dirPath := filepath.Join("testdata", relDirPath)
image, fileAnnotations := testBuild(t, false, dirPath)
require.Equal(t, 0, len(fileAnnotations), fileAnnotations)
image = bufimage.ImageWithoutImports(image)
fileDescriptorSet := bufimage.ImageToFileDescriptorSet(image)
filePaths := buftesting.GetProtocFilePaths(t, dirPath, 0)
actualProtocFileDescriptorSet := buftesting.GetActualProtocFileDescriptorSet(t, false, false, dirPath, filePaths)
prototesting.AssertFileDescriptorSetsEqual(t, fileDescriptorSet, actualProtocFileDescriptorSet)
}
func testBuildGoogleapis(t *testing.T, includeSourceInfo bool) bufimage.Image {
googleapisDirPath := buftesting.GetGoogleapisDirPath(t, buftestingDirPath)
image, fileAnnotations := testBuild(t, includeSourceInfo, googleapisDirPath)
require.Equal(t, 0, len(fileAnnotations), fileAnnotations)
return image
}
func testBuildActualProtocGoogleapis(t *testing.T, includeSourceInfo bool) *descriptorpb.FileDescriptorSet {
googleapisDirPath := buftesting.GetGoogleapisDirPath(t, buftestingDirPath)
filePaths := buftesting.GetProtocFilePaths(t, googleapisDirPath, 0)
fileDescriptorSet := buftesting.GetActualProtocFileDescriptorSet(t, true, includeSourceInfo, googleapisDirPath, filePaths)
assert.Equal(t, buftesting.NumGoogleapisFilesWithImports, len(fileDescriptorSet.GetFile()))
return fileDescriptorSet
}
func testBuild(t *testing.T, includeSourceInfo bool, dirPath string) (bufimage.Image, []bufanalysis.FileAnnotation) {
moduleFileSet := testGetModuleFileSet(t, dirPath)
var options []BuildOption
if !includeSourceInfo {
options = append(options, WithExcludeSourceCodeInfo())
}
image, fileAnnotations, err := NewBuilder(zap.NewNop()).Build(
context.Background(),
moduleFileSet,
options...,
)
require.NoError(t, err)
return image, fileAnnotations
}
func
|
(t *testing.T, dirPath string) bufmodule.ModuleFileSet {
storageosProvider := storageos.NewProvider(storageos.ProviderWithSymlinks())
readWriteBucket, err := storageosProvider.NewReadWriteBucket(
dirPath,
storageos.ReadWriteBucketWithSymlinksIfSupported(),
)
require.NoError(t, err)
config, err := bufmodulebuild.NewConfigV1(bufmodulebuild.ExternalConfigV1{})
require.NoError(t, err)
module, err := bufmodulebuild.NewModuleBucketBuilder(zap.NewNop()).BuildForBucket(
context.Background(),
readWriteBucket,
config,
)
require.NoError(t, err)
moduleFileSet, err := bufmodulebuild.NewModuleFileSetBuilder(
zap.NewNop(),
bufmodule.NewNopModuleReader(),
).Build(
context.Background(),
module,
)
require.NoError(t, err)
return moduleFileSet
}
func testGetImageFilePaths(image bufimage.Image) []string {
var fileNames []string
for _, file := range image.Files() {
fileNames = append(fileNames, file.Path())
}
sort.Strings(fileNames)
return fileNames
}
func testGetImageImportPaths(image bufimage.Image) []string {
var importNames []string
for _, file := range image.Files() {
if file.IsImport() {
importNames = append(importNames, file.Path())
}
}
sort.Strings(importNames)
return importNames
}
|
testGetModuleFileSet
|
app.module.ts
|
import {BrowserModule} from '@angular/platform-browser';
import {NgModule} from '@angular/core';
import {FormsModule} from '@angular/forms'; //Template Driven Forms
import {ReactiveFormsModule} from '@angular/forms'; //Model Driven Forms
import {HttpModule} from '@angular/http';
import {PrettyJsonModule} from "angular2-prettyjson";
import {AppComponent} from './app.component';
import {NavbarComponent} from './components/navbar/navbar.component';
import {ProductsComponent} from './components/products/products.component';
import {DashboardComponent} from './components/dashboard/dashboard.component';
import {LocalApiService} from "./services/local-api.service";
import {ProductFormComponent} from './components/product-form/product-form.component';
import {LocalHelperService} from "./services/local-helper.service";
import {AppRouteModule} from "./app.routes";
import {TestAppComponent} from './components/test-app/test-app.component';
import {TestAppService} from "./components/test-app/test-app.service";
import {ModalModule} from "ngx-bootstrap";
import {ConfirmModalComponent} from './components/bootstrap/confirm-modal/confirm-modal.component';
import {LocalBootstrapService} from "./components/bootstrap/local-bootstrap.service";
import {NotifyComponent} from './components/bootstrap/notify/notify.component';
import {AuthGuard} from "./guards/auth.guard";
import {AuthService} from "./services/auth.service";
import { HomeComponent } from './components/home/home.component';
import { LoginComponent } from './components/login/login.component';
import { BlockPageLoaderComponent } from './components/bootstrap/block-page-loader/block-page-loader.component';
import { AdminFormComponent } from './components/admin-form/admin-form.component';
import { AdminsComponent } from './components/admins/admins.component';
import { OrdersComponent } from './components/orders/orders.component';
@NgModule({
declarations: [
AppComponent,
NavbarComponent,
ProductsComponent,
DashboardComponent,
ProductFormComponent,
TestAppComponent,
ConfirmModalComponent,
NotifyComponent,
HomeComponent,
LoginComponent,
BlockPageLoaderComponent,
AdminFormComponent,
AdminsComponent,
OrdersComponent
],
imports: [
BrowserModule,
FormsModule,
HttpModule,
ReactiveFormsModule,
PrettyJsonModule,
AppRouteModule,
ModalModule.forRoot()
],
providers: [LocalApiService, LocalHelperService, TestAppService, LocalBootstrapService, AuthService, AuthGuard],
bootstrap: [AppComponent]
})
export class
|
{
}
|
AppModule
|
sbc.rs
|
// Copyright (c) 2018 Caleb Boylan
// [This program is licensed under the "MIT License"]
// Please see the file LICENSE in the source
// distribution of this software for license terms.
// SBC A,n
pub fn sbc(val: u8, a: &mut u8, flags: &mut u8)
|
{
let old_a = *a;
let c = (*flags & 0b00010000) >> 4;
*a = *a - val - c;
*flags = 0b01000000;
if *a == 0 {
*flags |= 0b10000000;
}
if val == 0xFF && c == 1 {
*flags |= 0b00010000;
} else if old_a < *a {
*flags |= 0b00010000;
}
if val & 0x0F == 0x0F && c == 1 {
*flags |= 0b00100000;
} else if old_a & 0x0F < *a & 0x0F {
*flags |= 0b00100000;
}
//else if (old_a & 0x0F) - (val & 0x0F) - c > 0x0F { *flags |= 0b00100000; }
}
|
|
api.xpack.watcher.stats.go
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// Code generated from specification version 8.2.0: DO NOT EDIT
package esapi
import (
"context"
"net/http"
"strconv"
"strings"
)
func
|
(t Transport) WatcherStats {
return func(o ...func(*WatcherStatsRequest)) (*Response, error) {
var r = WatcherStatsRequest{}
for _, f := range o {
f(&r)
}
return r.Do(r.ctx, t)
}
}
// ----- API Definition -------------------------------------------------------
// WatcherStats - Retrieves the current Watcher metrics.
//
// See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stats.html.
//
type WatcherStats func(o ...func(*WatcherStatsRequest)) (*Response, error)
// WatcherStatsRequest configures the Watcher Stats API request.
//
type WatcherStatsRequest struct {
Metric []string
EmitStacktraces *bool
Pretty bool
Human bool
ErrorTrace bool
FilterPath []string
Header http.Header
ctx context.Context
}
// Do executes the request and returns response or error.
//
func (r WatcherStatsRequest) Do(ctx context.Context, transport Transport) (*Response, error) {
var (
method string
path strings.Builder
params map[string]string
)
method = "GET"
path.Grow(7 + 1 + len("_watcher") + 1 + len("stats") + 1 + len(strings.Join(r.Metric, ",")))
path.WriteString("http://")
path.WriteString("/")
path.WriteString("_watcher")
path.WriteString("/")
path.WriteString("stats")
if len(r.Metric) > 0 {
path.WriteString("/")
path.WriteString(strings.Join(r.Metric, ","))
}
params = make(map[string]string)
if r.EmitStacktraces != nil {
params["emit_stacktraces"] = strconv.FormatBool(*r.EmitStacktraces)
}
if len(r.Metric) > 0 {
params["metric"] = strings.Join(r.Metric, ",")
}
if r.Pretty {
params["pretty"] = "true"
}
if r.Human {
params["human"] = "true"
}
if r.ErrorTrace {
params["error_trace"] = "true"
}
if len(r.FilterPath) > 0 {
params["filter_path"] = strings.Join(r.FilterPath, ",")
}
req, err := newRequest(method, path.String(), nil)
if err != nil {
return nil, err
}
if len(params) > 0 {
q := req.URL.Query()
for k, v := range params {
q.Set(k, v)
}
req.URL.RawQuery = q.Encode()
}
if len(r.Header) > 0 {
if len(req.Header) == 0 {
req.Header = r.Header
} else {
for k, vv := range r.Header {
for _, v := range vv {
req.Header.Add(k, v)
}
}
}
}
if ctx != nil {
req = req.WithContext(ctx)
}
res, err := transport.Perform(req)
if err != nil {
return nil, err
}
response := Response{
StatusCode: res.StatusCode,
Body: res.Body,
Header: res.Header,
}
return &response, nil
}
// WithContext sets the request context.
//
func (f WatcherStats) WithContext(v context.Context) func(*WatcherStatsRequest) {
return func(r *WatcherStatsRequest) {
r.ctx = v
}
}
// WithMetric - controls what additional stat metrics should be include in the response.
//
func (f WatcherStats) WithMetric(v ...string) func(*WatcherStatsRequest) {
return func(r *WatcherStatsRequest) {
r.Metric = v
}
}
// WithEmitStacktraces - emits stack traces of currently running watches.
//
func (f WatcherStats) WithEmitStacktraces(v bool) func(*WatcherStatsRequest) {
return func(r *WatcherStatsRequest) {
r.EmitStacktraces = &v
}
}
// WithPretty makes the response body pretty-printed.
//
func (f WatcherStats) WithPretty() func(*WatcherStatsRequest) {
return func(r *WatcherStatsRequest) {
r.Pretty = true
}
}
// WithHuman makes statistical values human-readable.
//
func (f WatcherStats) WithHuman() func(*WatcherStatsRequest) {
return func(r *WatcherStatsRequest) {
r.Human = true
}
}
// WithErrorTrace includes the stack trace for errors in the response body.
//
func (f WatcherStats) WithErrorTrace() func(*WatcherStatsRequest) {
return func(r *WatcherStatsRequest) {
r.ErrorTrace = true
}
}
// WithFilterPath filters the properties of the response body.
//
func (f WatcherStats) WithFilterPath(v ...string) func(*WatcherStatsRequest) {
return func(r *WatcherStatsRequest) {
r.FilterPath = v
}
}
// WithHeader adds the headers to the HTTP request.
//
func (f WatcherStats) WithHeader(h map[string]string) func(*WatcherStatsRequest) {
return func(r *WatcherStatsRequest) {
if r.Header == nil {
r.Header = make(http.Header)
}
for k, v := range h {
r.Header.Add(k, v)
}
}
}
// WithOpaqueID adds the X-Opaque-Id header to the HTTP request.
//
func (f WatcherStats) WithOpaqueID(s string) func(*WatcherStatsRequest) {
return func(r *WatcherStatsRequest) {
if r.Header == nil {
r.Header = make(http.Header)
}
r.Header.Set("X-Opaque-Id", s)
}
}
|
newWatcherStatsFunc
|
reader.go
|
package routing
import (
"bytes"
"fmt"
"net"
"github.com/qdm12/gluetun/internal/constants"
"github.com/vishvananda/netlink"
)
func (r *routing) DefaultRoute() (defaultInterface string, defaultGateway net.IP, err error) {
routes, err := netlink.RouteList(nil, netlink.FAMILY_ALL)
if err != nil {
return "", nil, fmt.Errorf("cannot list routes: %w", err)
}
for _, route := range routes {
if route.Dst == nil {
defaultGateway = route.Gw
linkIndex := route.LinkIndex
link, err := netlink.LinkByIndex(linkIndex)
if err != nil {
return "", nil, fmt.Errorf("cannot obtain link with index %d for default route: %w", linkIndex, err)
}
attributes := link.Attrs()
defaultInterface = attributes.Name
if r.verbose {
r.logger.Info("default route found: interface %s, gateway %s", defaultInterface, defaultGateway.String())
}
return defaultInterface, defaultGateway, nil
}
}
return "", nil, fmt.Errorf("cannot find default route in %d routes", len(routes))
}
func (r *routing) DefaultIP() (ip net.IP, err error) {
routes, err := netlink.RouteList(nil, netlink.FAMILY_ALL)
if err != nil {
return nil, fmt.Errorf("cannot get default IP address: %w", err)
}
defaultLinkName := ""
for _, route := range routes {
if route.Dst == nil {
linkIndex := route.LinkIndex
link, err := netlink.LinkByIndex(linkIndex)
if err != nil {
return nil, fmt.Errorf("cannot get default IP address: %w", err)
}
defaultLinkName = link.Attrs().Name
}
}
if len(defaultLinkName) == 0 {
return nil, fmt.Errorf("cannot find default link name in %d routes", len(routes))
}
return r.assignedIP(defaultLinkName)
}
func (r *routing) LocalSubnet() (defaultSubnet net.IPNet, err error) {
routes, err := netlink.RouteList(nil, netlink.FAMILY_ALL)
if err != nil {
return defaultSubnet, fmt.Errorf("cannot find local subnet: %w", err)
}
defaultLinkIndex := -1
for _, route := range routes {
if route.Dst == nil {
defaultLinkIndex = route.LinkIndex
break
}
}
if defaultLinkIndex == -1 {
return defaultSubnet, fmt.Errorf("cannot find local subnet: cannot find default link")
}
for _, route := range routes {
if route.Gw != nil || route.LinkIndex != defaultLinkIndex {
continue
}
defaultSubnet = *route.Dst
if r.verbose {
r.logger.Info("local subnet found: %s", defaultSubnet.String())
}
return defaultSubnet, nil
}
return defaultSubnet, fmt.Errorf("cannot find default subnet in %d routes", len(routes))
}
func (r *routing) assignedIP(interfaceName string) (ip net.IP, err error) {
iface, err := net.InterfaceByName(interfaceName)
if err != nil {
return nil, err
}
addresses, err := iface.Addrs()
if err != nil {
return nil, err
}
for _, address := range addresses {
switch value := address.(type) {
case *net.IPAddr:
return value.IP, nil
case *net.IPNet:
return value.IP, nil
}
}
return nil, fmt.Errorf("IP address not found in addresses of interface %s", interfaceName)
}
func (r *routing) VPNDestinationIP() (ip net.IP, err error) {
routes, err := netlink.RouteList(nil, netlink.FAMILY_ALL)
if err != nil {
return nil, fmt.Errorf("cannot find VPN destination IP: %w", err)
}
defaultLinkIndex := -1
for _, route := range routes {
if route.Dst == nil {
defaultLinkIndex = route.LinkIndex
break
}
}
if defaultLinkIndex == -1 {
return nil, fmt.Errorf("cannot find VPN destination IP: cannot find default link")
}
for _, route := range routes {
if route.LinkIndex == defaultLinkIndex &&
route.Dst != nil &&
!IPIsPrivate(route.Dst.IP) &&
bytes.Equal(route.Dst.Mask, net.IPMask{255, 255, 255, 255}) {
return route.Dst.IP, nil
}
}
return nil, fmt.Errorf("cannot find VPN destination IP address from ip routes")
}
func (r *routing) VPNLocalGatewayIP() (ip net.IP, err error) {
routes, err := netlink.RouteList(nil, netlink.FAMILY_ALL)
if err != nil {
return nil, fmt.Errorf("cannot find VPN local gateway IP: %w", err)
}
for _, route := range routes {
link, err := netlink.LinkByIndex(route.LinkIndex)
if err != nil {
return nil, fmt.Errorf("cannot find VPN local gateway IP: %w", err)
}
interfaceName := link.Attrs().Name
if interfaceName == string(constants.TUN) &&
route.Dst != nil &&
route.Dst.IP.Equal(net.IP{0, 0, 0, 0}) {
return route.Gw, nil
}
}
return nil, fmt.Errorf("cannot find VPN local gateway IP address from ip routes")
}
func
|
(ip net.IP) bool {
if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() {
return true
}
privateCIDRBlocks := [8]string{
"127.0.0.0/8", // localhost
"10.0.0.0/8", // 24-bit block
"172.16.0.0/12", // 20-bit block
"192.168.0.0/16", // 16-bit block
"169.254.0.0/16", // link local address
"::1/128", // localhost IPv6
"fc00::/7", // unique local address IPv6
"fe80::/10", // link local address IPv6
}
for i := range privateCIDRBlocks {
_, CIDR, _ := net.ParseCIDR(privateCIDRBlocks[i])
if CIDR.Contains(ip) {
return true
}
}
return false
}
|
IPIsPrivate
|
upgrade.go
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
|
"github.com/spf13/cobra"
"k8s.io/kops/cmd/kops/util"
)
// upgradeCmd represents the upgrade command
func NewCmdUpgrade(f *util.Factory, out io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "upgrade",
Short: upgradeClusterShort,
}
// create subcommands
cmd.AddCommand(NewCmdUpgradeCluster(f, out))
return cmd
}
|
import (
"io"
|
0003_auto_20190209_1520.py
|
# Generated by Django 2.1.4 on 2019-02-09 15:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
|
dependencies = [
('society_bureau', '0002_sitesettings'),
]
operations = [
migrations.AlterField(
model_name='societybureau',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='society_bureau', to=settings.AUTH_USER_MODEL),
),
]
|
|
topic.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Topic(pulumi.CustomResource):
application_failure_feedback_role_arn: pulumi.Output[str]
"""
IAM role for failure feedback
"""
application_success_feedback_role_arn: pulumi.Output[str]
"""
The IAM role permitted to receive success feedback for this topic
"""
application_success_feedback_sample_rate: pulumi.Output[float]
"""
Percentage of success to sample
"""
arn: pulumi.Output[str]
"""
The ARN of the SNS topic, as a more obvious property (clone of id)
"""
delivery_policy: pulumi.Output[str]
"""
The SNS delivery policy. More on [AWS documentation](https://docs.aws.amazon.com/sns/latest/dg/DeliveryPolicies.html)
"""
display_name: pulumi.Output[str]
"""
The display name for the SNS topic
"""
http_failure_feedback_role_arn: pulumi.Output[str]
"""
IAM role for failure feedback
"""
http_success_feedback_role_arn: pulumi.Output[str]
"""
The IAM role permitted to receive success feedback for this topic
"""
http_success_feedback_sample_rate: pulumi.Output[float]
"""
Percentage of success to sample
"""
kms_master_key_id: pulumi.Output[str]
"""
The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see [Key Terms](https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html#sse-key-terms)
"""
lambda_failure_feedback_role_arn: pulumi.Output[str]
"""
IAM role for failure feedback
"""
lambda_success_feedback_role_arn: pulumi.Output[str]
"""
The IAM role permitted to receive success feedback for this topic
"""
lambda_success_feedback_sample_rate: pulumi.Output[float]
"""
Percentage of success to sample
"""
name: pulumi.Output[str]
"""
The friendly name for the SNS topic. By default generated by this provider.
"""
name_prefix: pulumi.Output[str]
"""
The friendly name for the SNS topic. Conflicts with `name`.
"""
policy: pulumi.Output[str]
sqs_failure_feedback_role_arn: pulumi.Output[str]
"""
IAM role for failure feedback
"""
sqs_success_feedback_role_arn: pulumi.Output[str]
"""
The IAM role permitted to receive success feedback for this topic
"""
sqs_success_feedback_sample_rate: pulumi.Output[float]
"""
Percentage of success to sample
"""
tags: pulumi.Output[dict]
"""
Key-value mapping of resource tags
"""
def __init__(__self__, resource_name, opts=None, application_failure_feedback_role_arn=None, application_success_feedback_role_arn=None, application_success_feedback_sample_rate=None, delivery_policy=None, display_name=None, http_failure_feedback_role_arn=None, http_success_feedback_role_arn=None, http_success_feedback_sample_rate=None, kms_master_key_id=None, lambda_failure_feedback_role_arn=None, lambda_success_feedback_role_arn=None, lambda_success_feedback_sample_rate=None, name=None, name_prefix=None, policy=None, sqs_failure_feedback_role_arn=None, sqs_success_feedback_role_arn=None, sqs_success_feedback_sample_rate=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Provides an SNS topic resource
## Message Delivery Status Arguments
The `<endpoint>_success_feedback_role_arn` and `<endpoint>_failure_feedback_role_arn` arguments are used to give Amazon SNS write access to use CloudWatch Logs on your behalf. The `<endpoint>_success_feedback_sample_rate` argument is for specifying the sample rate percentage (0-100) of successfully delivered messages. After you configure the `<endpoint>_failure_feedback_role_arn` argument, then all failed message deliveries generate CloudWatch Logs.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] application_failure_feedback_role_arn: IAM role for failure feedback
:param pulumi.Input[str] application_success_feedback_role_arn: The IAM role permitted to receive success feedback for this topic
:param pulumi.Input[float] application_success_feedback_sample_rate: Percentage of success to sample
:param pulumi.Input[str] delivery_policy: The SNS delivery policy. More on [AWS documentation](https://docs.aws.amazon.com/sns/latest/dg/DeliveryPolicies.html)
:param pulumi.Input[str] display_name: The display name for the SNS topic
:param pulumi.Input[str] http_failure_feedback_role_arn: IAM role for failure feedback
:param pulumi.Input[str] http_success_feedback_role_arn: The IAM role permitted to receive success feedback for this topic
:param pulumi.Input[float] http_success_feedback_sample_rate: Percentage of success to sample
:param pulumi.Input[str] kms_master_key_id: The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see [Key Terms](https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html#sse-key-terms)
:param pulumi.Input[str] lambda_failure_feedback_role_arn: IAM role for failure feedback
:param pulumi.Input[str] lambda_success_feedback_role_arn: The IAM role permitted to receive success feedback for this topic
:param pulumi.Input[float] lambda_success_feedback_sample_rate: Percentage of success to sample
:param pulumi.Input[str] name: The friendly name for the SNS topic. By default generated by this provider.
:param pulumi.Input[str] name_prefix: The friendly name for the SNS topic. Conflicts with `name`.
:param pulumi.Input[str] sqs_failure_feedback_role_arn: IAM role for failure feedback
:param pulumi.Input[str] sqs_success_feedback_role_arn: The IAM role permitted to receive success feedback for this topic
:param pulumi.Input[float] sqs_success_feedback_sample_rate: Percentage of success to sample
:param pulumi.Input[dict] tags: Key-value mapping of resource tags
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/sns_topic.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['application_failure_feedback_role_arn'] = application_failure_feedback_role_arn
__props__['application_success_feedback_role_arn'] = application_success_feedback_role_arn
__props__['application_success_feedback_sample_rate'] = application_success_feedback_sample_rate
__props__['delivery_policy'] = delivery_policy
__props__['display_name'] = display_name
__props__['http_failure_feedback_role_arn'] = http_failure_feedback_role_arn
__props__['http_success_feedback_role_arn'] = http_success_feedback_role_arn
__props__['http_success_feedback_sample_rate'] = http_success_feedback_sample_rate
__props__['kms_master_key_id'] = kms_master_key_id
__props__['lambda_failure_feedback_role_arn'] = lambda_failure_feedback_role_arn
__props__['lambda_success_feedback_role_arn'] = lambda_success_feedback_role_arn
__props__['lambda_success_feedback_sample_rate'] = lambda_success_feedback_sample_rate
__props__['name'] = name
__props__['name_prefix'] = name_prefix
__props__['policy'] = policy
__props__['sqs_failure_feedback_role_arn'] = sqs_failure_feedback_role_arn
__props__['sqs_success_feedback_role_arn'] = sqs_success_feedback_role_arn
__props__['sqs_success_feedback_sample_rate'] = sqs_success_feedback_sample_rate
__props__['tags'] = tags
__props__['arn'] = None
super(Topic, __self__).__init__(
'aws:sns/topic:Topic',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, application_failure_feedback_role_arn=None, application_success_feedback_role_arn=None, application_success_feedback_sample_rate=None, arn=None, delivery_policy=None, display_name=None, http_failure_feedback_role_arn=None, http_success_feedback_role_arn=None, http_success_feedback_sample_rate=None, kms_master_key_id=None, lambda_failure_feedback_role_arn=None, lambda_success_feedback_role_arn=None, lambda_success_feedback_sample_rate=None, name=None, name_prefix=None, policy=None, sqs_failure_feedback_role_arn=None, sqs_success_feedback_role_arn=None, sqs_success_feedback_sample_rate=None, tags=None):
"""
Get an existing Topic resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] application_failure_feedback_role_arn: IAM role for failure feedback
:param pulumi.Input[str] application_success_feedback_role_arn: The IAM role permitted to receive success feedback for this topic
:param pulumi.Input[float] application_success_feedback_sample_rate: Percentage of success to sample
:param pulumi.Input[str] arn: The ARN of the SNS topic, as a more obvious property (clone of id)
:param pulumi.Input[str] delivery_policy: The SNS delivery policy. More on [AWS documentation](https://docs.aws.amazon.com/sns/latest/dg/DeliveryPolicies.html)
:param pulumi.Input[str] display_name: The display name for the SNS topic
:param pulumi.Input[str] http_failure_feedback_role_arn: IAM role for failure feedback
:param pulumi.Input[str] http_success_feedback_role_arn: The IAM role permitted to receive success feedback for this topic
:param pulumi.Input[float] http_success_feedback_sample_rate: Percentage of success to sample
:param pulumi.Input[str] kms_master_key_id: The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see [Key Terms](https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html#sse-key-terms)
:param pulumi.Input[str] lambda_failure_feedback_role_arn: IAM role for failure feedback
:param pulumi.Input[str] lambda_success_feedback_role_arn: The IAM role permitted to receive success feedback for this topic
:param pulumi.Input[float] lambda_success_feedback_sample_rate: Percentage of success to sample
:param pulumi.Input[str] name: The friendly name for the SNS topic. By default generated by this provider.
:param pulumi.Input[str] name_prefix: The friendly name for the SNS topic. Conflicts with `name`.
:param pulumi.Input[str] sqs_failure_feedback_role_arn: IAM role for failure feedback
:param pulumi.Input[str] sqs_success_feedback_role_arn: The IAM role permitted to receive success feedback for this topic
:param pulumi.Input[float] sqs_success_feedback_sample_rate: Percentage of success to sample
:param pulumi.Input[dict] tags: Key-value mapping of resource tags
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/sns_topic.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["application_failure_feedback_role_arn"] = application_failure_feedback_role_arn
__props__["application_success_feedback_role_arn"] = application_success_feedback_role_arn
__props__["application_success_feedback_sample_rate"] = application_success_feedback_sample_rate
__props__["arn"] = arn
__props__["delivery_policy"] = delivery_policy
__props__["display_name"] = display_name
__props__["http_failure_feedback_role_arn"] = http_failure_feedback_role_arn
__props__["http_success_feedback_role_arn"] = http_success_feedback_role_arn
__props__["http_success_feedback_sample_rate"] = http_success_feedback_sample_rate
__props__["kms_master_key_id"] = kms_master_key_id
__props__["lambda_failure_feedback_role_arn"] = lambda_failure_feedback_role_arn
__props__["lambda_success_feedback_role_arn"] = lambda_success_feedback_role_arn
__props__["lambda_success_feedback_sample_rate"] = lambda_success_feedback_sample_rate
__props__["name"] = name
__props__["name_prefix"] = name_prefix
__props__["policy"] = policy
__props__["sqs_failure_feedback_role_arn"] = sqs_failure_feedback_role_arn
__props__["sqs_success_feedback_role_arn"] = sqs_success_feedback_role_arn
__props__["sqs_success_feedback_sample_rate"] = sqs_success_feedback_sample_rate
__props__["tags"] = tags
return Topic(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
|
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
|
canvasReducer.test.js
|
import {canvas as reducer} from '../../src/reducers/reducers.js';
import createAction from '../../src/actions/ActionFactory.js';
/**
* Builder pattern for canvas state
*
* @param past
* @param present
* @param future
* @returns {{past: Array, present: *, future: Array}}
*/
const buildCanvasState = (past = [], present = null, future = []) => {
return {
past,
present,
future
}
};
describe('Canvas reducer', () => {
const img1 = {image: 1},
img2 = {image: 2},
img3 = {image: 3};
describe('upon DRAW_STROKE', () => {
let action = createAction('DRAW_STROKE', img1);
it('should update the present canvas', () => {
let actual = reducer(buildCanvasState([img2], img3), action);
actual.present.should.deep.equal(img1);
})
it('should update the present canvas when present is null', () => {
let actual = reducer(buildCanvasState(), action);
actual.present.should.deep.equal(img1);
})
it('should add the previous canvas to the past', () => {
let actual = reducer(buildCanvasState([img2], img3), action);
actual.past.should.deep.equal([img2, img3]);
})
it('should reset the future', () => {
let actual = reducer(buildCanvasState([], img2, [img3]), action);
actual.future.should.deep.equal([]);
})
});
describe('upon EXTEND_STROKE', () => {
let action = createAction('EXTEND_STROKE', img1);
it('should update the present canvas', () => {
let actual = reducer(buildCanvasState([img2], img3), action);
actual.present.should.deep.equal(img1);
})
|
it('should not modify the past', () => {
let actual = reducer(buildCanvasState([img2], img3), action);
actual.past.should.deep.equal([img2]);
})
})
describe('upon UNDO', () => {
let action = createAction('UNDO');
it('should put the last past into the present', () => {
let actual = reducer(buildCanvasState([img2], img3), action);
actual.present.should.deep.equal(img2);
})
it('should remove last element from past', () => {
let actual = reducer(buildCanvasState([img1, img2], img3), action);
actual.past.should.deep.equal([img1]);
})
it('should put present into first element of the future', () => {
let actual = reducer(buildCanvasState([img1], img2, [img3]), action);
actual.future.should.deep.equal([img2, img3]);
})
it('should not modify present if past is empty', () => {
let actual = reducer(buildCanvasState([], img2, [img3]), action);
actual.present.should.deep.equal(img2);
})
})
describe('upon REDO', () => {
let action = createAction('REDO');
it('should put the first future into the present', () => {
let actual = reducer(buildCanvasState([], img1, [img2, img3]), action);
actual.present.should.deep.equal(img2);
})
it('should remove first element from future', () => {
let actual = reducer(buildCanvasState([], img1, [img2, img3]), action);
actual.future.should.deep.equal([img3]);
})
it('should put present into last element of the past', () => {
let actual = reducer(buildCanvasState([img1], img2, [img3]), action);
actual.past.should.deep.equal([img1, img2]);
})
it('should not modify present if future is empty', () => {
let actual = reducer(buildCanvasState([img1], img2, []), action);
actual.present.should.deep.equal(img2);
})
})
});
|
it('should update the present canvas when present is null', () => {
let actual = reducer(buildCanvasState(), action);
actual.present.should.deep.equal(img1);
})
|
events.py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide events that represent various changes to Bokeh Documents.
These events are used internally to signal changes to Documents. For
information about user-facing (e.g. UI or tool) events, see the reference
for :ref:`bokeh.events`.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..util.dependencies import import_optional
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
pd = import_optional('pandas')
__all__ = (
'ColumnDataChangedEvent',
'ColumnsStreamedEvent',
'ColumnsPatchedEvent',
'DocumentChangedEvent',
'DocumentPatchedEvent',
'ModelChangedEvent',
'RootAddedEvent',
'RootRemovedEvent',
'SessionCallbackAdded',
'SessionCallbackRemoved',
'TitleChangedEvent',
'MessageSentEvent',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class DocumentChangedEvent(object):
''' Base class for all internal events representing a change to a
Bokeh Document.
'''
def __init__(self, document, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
self.document = document
self.setter = setter
self.callback_invoker = callback_invoker
def combine(self, event):
'''
'''
return False
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._document_changed`` if it exists.
'''
if hasattr(receiver, '_document_changed'):
receiver._document_changed(self)
class DocumentPatchedEvent(DocumentChangedEvent):
''' A Base class for events that represent updating Bokeh Models and
their properties.
'''
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._document_patched`` if it exists.
'''
super().dispatch(receiver)
if hasattr(receiver, '_document_patched'):
receiver._document_patched(self)
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
*Sub-classes must implement this method.*
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
raise NotImplementedError()
class MessageSentEvent(DocumentPatchedEvent):
""" """
def __init__(self, document, msg_type, msg_data, setter=None, callback_invoker=None):
super(MessageSentEvent, self).__init__(document, setter, callback_invoker)
self.msg_type = msg_type
self.msg_data = msg_data
def dispatch(self, receiver):
super(MessageSentEvent, self).dispatch(receiver)
if hasattr(receiver, '_document_message_sent'):
receiver._document_message_sent(self)
def generate(self, references, buffers):
return {
'kind' : 'MessageSent',
'msg_type' : self.msg_type,
'msg_data' : self.msg_data,
}
class ModelChangedEvent(DocumentPatchedEvent):
''' A concrete event representing updating an attribute and value of a
specific Bokeh Model.
This is the "standard" way of updating most Bokeh model attributes. For
special casing situations that can optimized (e.g. streaming, etc.), a
``hint`` may be supplied that overrides normal mechanisms.
'''
def __init__(self, document, model, attr, old, new, serializable_new, hint=None, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
model (Model) :
A Model to update
attr (str) :
The name of the attribute to update on the model.
old (object) :
The old value of the attribute
new (object) :
The new value of the attribute
serializable_new (object) :
A serialized (JSON) version of the new value. It may be
``None`` if a hint is supplied.
hint (DocumentPatchedEvent, optional) :
When appropriate, a secondary event may be supplied that
modifies the normal update process. For example, in order
to stream or patch data more efficiently than the standard
update mechanism.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
if setter is None and isinstance(hint, (ColumnsStreamedEvent, ColumnsPatchedEvent)):
setter = hint.setter
super().__init__(document, setter, callback_invoker)
self.model = model
self.attr = attr
self.old = old
self.new = new
self.serializable_new = serializable_new
self.hint = hint
def combine(self, event):
'''
'''
if not isinstance(event, ModelChangedEvent): return False
# If these are not true something weird is going on, maybe updates from
# Python bokeh.client, don't try to combine
if self.setter != event.setter: return False
if self.document != event.document: return False
if self.hint:
return self.hint.combine(event.hint)
if (self.model == event.model) and (self.attr == event.attr):
self.new = event.new
self.serializable_new = event.serializable_new
self.callback_invoker = event.callback_invoker
return True
return False
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._document_model_changed`` if it exists.
'''
super().dispatch(receiver)
if hasattr(receiver, '_document_model_changed'):
receiver._document_model_changed(self)
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
from ..model import collect_models
if self.hint is not None:
return self.hint.generate(references, buffers)
value = self.serializable_new
# the new value is an object that may have
# not-yet-in-the-remote-doc references, and may also
# itself not be in the remote doc yet. the remote may
# already have some of the references, but
# unfortunately we don't have an easy way to know
# unless we were to check BEFORE the attr gets changed
# (we need the old _all_models before setting the
# property). So we have to send all the references the
# remote could need, even though it could be inefficient.
# If it turns out we need to fix this we could probably
# do it by adding some complexity.
value_refs = set(collect_models(value))
# we know we don't want a whole new copy of the obj we're patching
# unless it's also the new value
if self.model != value:
value_refs.discard(self.model)
references.update(value_refs)
return { 'kind' : 'ModelChanged',
'model' : self.model.ref,
'attr' : self.attr,
'new' : value }
class ColumnDataChangedEvent(DocumentPatchedEvent):
''' A concrete event representing efficiently replacing *all*
existing data for a :class:`~bokeh.models.sources.ColumnDataSource`
'''
def __init__(self, document, column_source, cols=None, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
column_source (ColumnDataSource) :
cols (list[str]) :
optional explicit list of column names to update. If None, all
columns will be updated (default: None)
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super().__init__(document, setter, callback_invoker)
self.column_source = column_source
self.cols = cols
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._column_data_changed`` if it exists.
'''
super().dispatch(receiver)
if hasattr(receiver, '_column_data_changed'):
receiver._column_data_changed(self)
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'ColumnDataChanged'
'column_source' : <reference to a CDS>
'new' : <new data to steam to column_source>
'cols' : <specific columns to update>
}
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
|
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
from ..util.serialization import transform_column_source_data
data_dict = transform_column_source_data(self.column_source.data, buffers=buffers, cols=self.cols)
return { 'kind' : 'ColumnDataChanged',
'column_source' : self.column_source.ref,
'new' : data_dict,
'cols' : self.cols}
class ColumnsStreamedEvent(DocumentPatchedEvent):
''' A concrete event representing efficiently streaming new data
to a :class:`~bokeh.models.sources.ColumnDataSource`
'''
def __init__(self, document, column_source, data, rollover, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
column_source (ColumnDataSource) :
The data source to stream new data to.
data (dict or DataFrame) :
New data to stream.
If a DataFrame, will be stored as ``{c: df[c] for c in df.columns}``
rollover (int) :
A rollover limit. If the data source columns exceed this
limit, earlier values will be discarded to maintain the
column length under the limit.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super().__init__(document, setter, callback_invoker)
self.column_source = column_source
if pd and isinstance(data, pd.DataFrame):
data = {c: data[c] for c in data.columns}
self.data = data
self.rollover = rollover
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._columns_streamed`` if it exists.
'''
super().dispatch(receiver)
if hasattr(receiver, '_columns_streamed'):
receiver._columns_streamed(self)
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'ColumnsStreamed'
'column_source' : <reference to a CDS>
'data' : <new data to steam to column_source>
'rollover' : <rollover limit>
}
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
return { 'kind' : 'ColumnsStreamed',
'column_source' : self.column_source.ref,
'data' : self.data,
'rollover' : self.rollover }
class ColumnsPatchedEvent(DocumentPatchedEvent):
''' A concrete event representing efficiently applying data patches
to a :class:`~bokeh.models.sources.ColumnDataSource`
'''
def __init__(self, document, column_source, patches, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
column_source (ColumnDataSource) :
The data source to apply patches to.
patches (list) :
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super().__init__(document, setter, callback_invoker)
self.column_source = column_source
self.patches = patches
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._columns_patched`` if it exists.
'''
super().dispatch(receiver)
if hasattr(receiver, '_columns_patched'):
receiver._columns_patched(self)
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'ColumnsPatched'
'column_source' : <reference to a CDS>
'patches' : <patches to apply to column_source>
}
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
return { 'kind' : 'ColumnsPatched',
'column_source' : self.column_source.ref,
'patches' : self.patches }
class TitleChangedEvent(DocumentPatchedEvent):
''' A concrete event representing a change to the title of a Bokeh
Document.
'''
def __init__(self, document, title, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
title (str) :
The new title to set on the Document
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super().__init__(document, setter, callback_invoker)
self.title = title
def combine(self, event):
'''
'''
if not isinstance(event, TitleChangedEvent): return False
# If these are not true something weird is going on, maybe updates from
# Python bokeh.client, don't try to combine
if self.setter != event.setter: return False
if self.document != event.document: return False
self.title = event.title
self.callback_invoker = event.callback_invoker
return True
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'TitleChanged'
'title' : <new title to set>
}
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
return { 'kind' : 'TitleChanged',
'title' : self.title }
class RootAddedEvent(DocumentPatchedEvent):
''' A concrete event representing a change to add a new Model to a
Document's collection of "root" models.
'''
def __init__(self, document, model, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
model (Model) :
The Bokeh Model to add as a Document root.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super().__init__(document, setter, callback_invoker)
self.model = model
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'RootAdded'
'title' : <reference to a Model>
}
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
references.update(self.model.references())
return { 'kind' : 'RootAdded',
'model' : self.model.ref }
class RootRemovedEvent(DocumentPatchedEvent):
''' A concrete event representing a change to remove an existing Model
from a Document's collection of "root" models.
'''
def __init__(self, document, model, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
model (Model) :
The Bokeh Model to remove as a Document root.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super().__init__(document, setter, callback_invoker)
self.model = model
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'RootRemoved'
'title' : <reference to a Model>
}
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
return { 'kind' : 'RootRemoved',
'model' : self.model.ref }
class SessionCallbackAdded(DocumentChangedEvent):
''' A concrete event representing a change to add a new callback (e.g.
periodic, timeout, or "next tick") to a Document.
'''
def __init__(self, document, callback):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
callback (SessionCallback) :
The callback to add
'''
super().__init__(document)
self.callback = callback
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._session_callback_added`` if
it exists.
'''
super().dispatch(receiver)
if hasattr(receiver, '_session_callback_added'):
receiver._session_callback_added(self)
class SessionCallbackRemoved(DocumentChangedEvent):
''' A concrete event representing a change to remove an existing callback
(e.g. periodic, timeout, or "next tick") from a Document.
'''
def __init__(self, document, callback):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
callback (SessionCallback) :
The callback to remove
'''
super().__init__(document)
self.callback = callback
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._session_callback_removed`` if
it exists.
'''
super().dispatch(receiver)
if hasattr(receiver, '_session_callback_removed'):
receiver._session_callback_removed(self)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.