Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
19,700 | def set_meta_refresh_enabled(self, enabled):
warn("Cluster.set_meta_refresh_enabled is deprecated and will be removed in 4.0. Set "
"Cluster.schema_metadata_enabled and Cluster.token_metadata_enabled instead.", DeprecationWarning)
self.schema_metadata_enabled = enabled
self.token_metadata_enabled = enabled | *Deprecated:* set :attr:`~.Cluster.schema_metadata_enabled` :attr:`~.Cluster.token_metadata_enabled` instead
Sets a flag to enable (True) or disable (False) all metadata refresh queries.
This applies to both schema and node topology.
Disabling this is useful to minimize refreshes during multiple changes.
Meta refresh must be enabled for the driver to become aware of any cluster
topology changes or schema updates. |
19,701 | def pivot_wavelength_ee(bpass):
from scipy.integrate import simps
return np.sqrt(simps(bpass.resp, bpass.wlen) /
simps(bpass.resp / bpass.wlen**2, bpass.wlen)) | Compute pivot wavelength assuming equal-energy convention.
`bpass` should have two properties, `resp` and `wlen`. The units of `wlen`
can be anything, and `resp` need not be normalized in any particular way. |
19,702 | def subdivide(self):
r
nodes_a, nodes_b, nodes_c, nodes_d = _surface_helpers.subdivide_nodes(
self._nodes, self._degree
)
return (
Surface(nodes_a, self._degree, _copy=False),
Surface(nodes_b, self._degree, _copy=False),
Surface(nodes_c, self._degree, _copy=False),
Surface(nodes_d, self._degree, _copy=False),
) | r"""Split the surface into four sub-surfaces.
Does so by taking the unit triangle (i.e. the domain
of the surface) and splitting it into four sub-triangles
.. image:: ../../images/surface_subdivide1.png
:align: center
Then the surface is re-parameterized via the map to / from the
given sub-triangles and the unit triangle.
For example, when a degree two surface is subdivided:
.. image:: ../../images/surface_subdivide2.png
:align: center
.. doctest:: surface-subdivide
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [-1.0, 0.5, 2.0, 0.25, 2.0, 0.0],
... [ 0.0, 0.5, 0.0, 1.75, 3.0, 4.0],
... ])
>>> surface = bezier.Surface(nodes, degree=2)
>>> _, sub_surface_b, _, _ = surface.subdivide()
>>> sub_surface_b
<Surface (degree=2, dimension=2)>
>>> sub_surface_b.nodes
array([[ 1.5 , 0.6875, -0.125 , 1.1875, 0.4375, 0.5 ],
[ 2.5 , 2.3125, 1.875 , 1.3125, 1.3125, 0.25 ]])
.. testcleanup:: surface-subdivide
import make_images
make_images.surface_subdivide1()
make_images.surface_subdivide2(surface, sub_surface_b)
Returns:
Tuple[Surface, Surface, Surface, Surface]: The lower left, central,
lower right and upper left sub-surfaces (in that order). |
19,703 | def uv_to_color(uv, image):
if image is None or uv is None:
return None
uv = np.asanyarray(uv, dtype=np.float64)
x = (uv[:, 0] * (image.width - 1))
y = ((1 - uv[:, 1]) * (image.height - 1))
x = x.round().astype(np.int64) % image.width
y = y.round().astype(np.int64) % image.height
colors = np.asanyarray(image.convert())[y, x]
assert colors.ndim == 2 and colors.shape[1] == 4
return colors | Get the color in a texture image.
Parameters
-------------
uv : (n, 2) float
UV coordinates on texture image
image : PIL.Image
Texture image
Returns
----------
colors : (n, 4) float
RGBA color at each of the UV coordinates |
19,704 | def coords_to_vec(lon, lat):
phi = np.radians(lon)
theta = (np.pi / 2) - np.radians(lat)
sin_t = np.sin(theta)
cos_t = np.cos(theta)
xVals = sin_t * np.cos(phi)
yVals = sin_t * np.sin(phi)
zVals = cos_t
out = np.vstack((xVals, yVals, zVals)).swapaxes(0, 1)
return out | Converts longitute and latitude coordinates to a unit 3-vector
return array(3,n) with v_x[i],v_y[i],v_z[i] = directional cosines |
19,705 | def urls_old(self, protocol=Resource.Protocol.http):
url_patterns = []
for endpoint, resource_class in self._registry.items():
setattr(resource_class, , self.name)
setattr(resource_class, , endpoint)
nested = []
for route in resource_class.nested_routes( % (self.name, endpoint)):
route = route._replace(handler=resource_class.wrap_handler(route.handler, protocol))
nested.append(route)
url_patterns.extend(nested)
url_patterns.append(Route(
path= % (self.name, endpoint),
handler=resource_class.as_list(protocol),
methods=resource_class.route_methods(),
name=.format(self.name, endpoint).replace(, )
))
url_patterns.append(Route(
path= % (self.name, endpoint, resource_class.route_param()),
handler=resource_class.as_detail(protocol),
methods=resource_class.route_methods(),
name=.format(self.name, endpoint).replace(, )
))
return url_patterns | Iterate through all resources registered with this router
and create a list endpoint and a detail endpoint for each one.
Uses the router name as prefix and endpoint name of the resource when registered, to assemble the url pattern.
Uses the constructor-passed url method or class for generating urls |
19,706 | def write_to_buffer(self, buf):
doc = self.to_dict()
if config.rxt_as_yaml:
content = dump_yaml(doc)
else:
content = json.dumps(doc, indent=4, separators=(",", ": "))
buf.write(content) | Save the context to a buffer. |
19,707 | def filter_entities(self, model, context=None):
if model is None:
return
for property_name in self.entities:
prop = self.entities[property_name]
value = self.get(model, property_name)
filtered_value = prop.filter(
value=value,
model=model,
context=context
)
if value != filtered_value:
self.set(model, property_name, filtered_value)
prop.filter_with_schema(
model=value,
context=context
) | Filter entities
Runs filters on entity properties changing them in place.
:param model: object or dict
:param context: object, dict or None
:return: None |
19,708 | def get_common_properties(root):
properties = {}
for elem in root.iterfind():
name = elem.attrib[]
if name == :
properties[] = {: [], : None}
for child in elem.iter():
spec = {}
spec[] = child.find().attrib[]
units = child.find().attrib[]
try:
spec[] = child.find().attrib[]
except KeyError:
warn( + spec[])
pass
if units in [, , ]:
spec[] = [float(child.find().text)]
elif units == :
warn()
spec[] = [float(child.find().text)]
units =
elif units == :
warn()
spec[] = [float(child.find().text) * 1.e-6]
units =
elif units == :
warn()
spec[] = [float(child.find().text) * 1.e-9]
units =
else:
raise KeywordError(
)
properties[][].append(spec)
if properties[][] is None:
properties[][] = units
elif properties[][] != units:
raise KeywordError( + units +
+
properties[][]
)
elif name in datagroup_properties:
field = name.replace(, )
units = elem.attrib[]
if units == :
units =
quantity = 1.0 * unit_registry(units)
try:
quantity.to(property_units[field])
except pint.DimensionalityError:
raise KeywordError( + name)
properties[field] = [.join([elem.find().text, units])]
else:
raise KeywordError( + name + )
return properties | Read common properties from root of ReSpecTh XML file.
Args:
root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file
Returns:
properties (`dict`): Dictionary with common properties |
19,709 | def model_fn(hparams, seed):
rng = random.Random(seed)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Input(INPUT_SHAPE))
model.add(tf.keras.layers.Reshape(INPUT_SHAPE + (1,)))
conv_filters = 8
for _ in xrange(hparams[HP_CONV_LAYERS]):
model.add(tf.keras.layers.Conv2D(
filters=conv_filters,
kernel_size=hparams[HP_CONV_KERNEL_SIZE],
padding="same",
activation="relu",
))
model.add(tf.keras.layers.MaxPool2D(pool_size=2, padding="same"))
conv_filters *= 2
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(hparams[HP_DROPOUT], seed=rng.random()))
dense_neurons = 32
for _ in xrange(hparams[HP_DENSE_LAYERS]):
model.add(tf.keras.layers.Dense(dense_neurons, activation="relu"))
dense_neurons *= 2
model.add(tf.keras.layers.Dense(OUTPUT_CLASSES, activation="softmax"))
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=hparams[HP_OPTIMIZER],
metrics=["accuracy"],
)
return model | Create a Keras model with the given hyperparameters.
Args:
hparams: A dict mapping hyperparameters in `HPARAMS` to values.
seed: A hashable object to be used as a random seed (e.g., to
construct dropout layers in the model).
Returns:
A compiled Keras model. |
19,710 | def model_page(self, request, app_label, model_name, rest_of_url=None):
try:
model = get_model(app_label, model_name)
except LookupError:
model = None
if model is None:
raise http.Http404("App %r, model %r, not found." %
(app_label, model_name))
try:
databrowse_class = self.registry[model]
except KeyError:
raise http.Http404("This model exists but has not been registered "
"with databrowse.")
return databrowse_class(model, self).root(request, rest_of_url) | Handles the model-specific functionality of the databrowse site,
delegating<to the appropriate ModelDatabrowse class. |
19,711 | def OnChar(self, event):
key = event.GetKeyCode()
entry = wx.TextCtrl.GetValue(self).strip()
pos = wx.TextCtrl.GetSelection(self)
if key == wx.WXK_RETURN:
if not self.is_valid:
wx.TextCtrl.SetValue(self, self.format % set_float(self.__bound_val))
else:
self.SetValue(entry)
return
if (key < wx.WXK_SPACE or key == wx.WXK_DELETE or key > 255):
event.Skip()
return
has_minus = in entry
ckey = chr(key)
if ((ckey == and (self.__prec == 0 or in entry) ) or
(ckey == and (has_minus or pos[0] != 0)) or
(ckey != and has_minus and pos[0] == 0)):
return
if chr(key) in self.__digits:
event.Skip() | on Character event |
19,712 | def get_docargs(self, args=None, prt=None):
docargs = self.objdoc.get_docargs(args, prt)
self._chk_docopts(docargs)
return docargs | Pare down docopt. Return a minimal dictionary and a set containing runtime arg values. |
19,713 | def clean_virtualenv(self):
self.user_run_script(
script=scripts.get_script_path(),
args=[],
rw_venv=True,
) | Empty our virtualenv so that new (or older) dependencies may be
installed |
19,714 | def get_session(account_info):
s = getattr(CONN_CACHE, % account_info[], None)
if s is not None:
return s
if account_info.get():
s = assumed_session(account_info[], SESSION_NAME)
else:
s = boto3.Session()
setattr(CONN_CACHE, % account_info[], s)
return s | Get a boto3 sesssion potentially cross account sts assumed
assumed sessions are automatically refreshed. |
19,715 | def fitSphere(coords):
coords = np.array(coords)
n = len(coords)
A = np.zeros((n, 4))
A[:, :-1] = coords * 2
A[:, 3] = 1
f = np.zeros((n, 1))
x = coords[:, 0]
y = coords[:, 1]
z = coords[:, 2]
f[:, 0] = x * x + y * y + z * z
C, residue, rank, sv = np.linalg.lstsq(A, f)
if rank < 4:
return None
t = (C[0] * C[0]) + (C[1] * C[1]) + (C[2] * C[2]) + C[3]
radius = np.sqrt(t)[0]
center = np.array([C[0][0], C[1][0], C[2][0]])
if len(residue):
residue = np.sqrt(residue[0]) / n
else:
residue = 0
s = vs.Sphere(center, radius, c="r", alpha=1).wire(1)
s.info["radius"] = radius
s.info["center"] = center
s.info["residue"] = residue
return s | Fits a sphere to a set of points.
Extra info is stored in ``actor.info['radius']``, ``actor.info['center']``, ``actor.info['residue']``.
.. hint:: Example: |fitspheres1.py|_
|fitspheres2| |fitspheres2.py|_ |
19,716 | def setup():
l_mitogen = logging.getLogger()
l_mitogen_io = logging.getLogger()
l_ansible_mitogen = logging.getLogger()
for logger in l_mitogen, l_mitogen_io, l_ansible_mitogen:
logger.handlers = [Handler(display.vvv)]
logger.propagate = False
if display.verbosity > 2:
l_ansible_mitogen.setLevel(logging.DEBUG)
l_mitogen.setLevel(logging.DEBUG)
else:
l_mitogen.setLevel(logging.ERROR)
l_ansible_mitogen.setLevel(logging.ERROR)
if display.verbosity > 3:
l_mitogen_io.setLevel(logging.DEBUG) | Install handlers for Mitogen loggers to redirect them into the Ansible
display framework. Ansible installs its own logging framework handlers when
C.DEFAULT_LOG_PATH is set, therefore disable propagation for our handlers. |
19,717 | def export_saved_model(sess, export_dir, tag_set, signatures):
import tensorflow as tf
g = sess.graph
g._unsafe_unfinalize()
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
logging.info("===== signatures: {}".format(signatures))
signature_def_map = {}
for key, sig in signatures.items():
signature_def_map[key] = tf.saved_model.signature_def_utils.build_signature_def(
inputs={name: tf.saved_model.utils.build_tensor_info(tensor) for name, tensor in sig[].items()},
outputs={name: tf.saved_model.utils.build_tensor_info(tensor) for name, tensor in sig[].items()},
method_name=sig[] if in sig else key)
logging.info("===== signature_def_map: {}".format(signature_def_map))
builder.add_meta_graph_and_variables(
sess,
tag_set.split(),
signature_def_map=signature_def_map,
clear_devices=True)
g.finalize()
builder.save() | Convenience function to export a saved_model using provided arguments
The caller specifies the saved_model signatures in a simplified python dictionary form, as follows::
signatures = {
'signature_def_key': {
'inputs': { 'input_tensor_alias': input_tensor_name },
'outputs': { 'output_tensor_alias': output_tensor_name },
'method_name': 'method'
}
}
And this function will generate the `signature_def_map` and export the saved_model.
Args:
:sess: a tf.Session instance
:export_dir: path to save exported saved_model
:tag_set: string tag_set to identify the exported graph
:signatures: simplified dictionary representation of a TensorFlow signature_def_map
Returns:
A saved_model exported to disk at ``export_dir``. |
19,718 | def worker_wrapper(worker_instance, pid_path):
def exit_handler(*args):
if len(args) > 0:
print("Exit py signal {signal}".format(signal=args[0]))
remove(pid_path)
atexit.register(exit_handler)
signal.signal(signal.SIGINT, exit_handler)
signal.signal(signal.SIGTERM, exit_handler)
worker_instance.work()
exit_handler(2) | A wrapper to start RQ worker as a new process.
:param worker_instance: RQ's worker instance
:param pid_path: A file to check if the worker
is running or not |
19,719 | def require_instance(obj, types=None, name=None, type_name=None, truncate_at=80):
if not isinstance(obj, types):
obj_string = str(obj)
if len(obj_string) > truncate_at:
obj_string = obj_string[:truncate_at - 3] + "..."
if type_name is None:
try:
type_name = "one of " + ", ".join(str(t) for t in types)
except TypeError:
type_name = str(types)
name_string = ("%s: " % name) if name else ""
error_message = "%sexpected %s. Got: of type " % (
name_string, type_name, obj_string, type(obj))
raise TypeError(error_message) | Raise an exception if obj is not an instance of one of the specified types.
Similarly to isinstance, 'types' may be either a single type or a tuple of
types.
If name or type_name is provided, it is used in the exception message.
The object's string representation is also included in the message,
truncated to 'truncate_at' number of characters. |
19,720 | def hsv_to_rgb(hsv):
h, s, v = hsv
if s == 0:
return (v, v, v)
h /= 60.0
i = math.floor(h)
f = h-i
p = v*(1.0-s)
q = v*(1.0-s*f)
t = v*(1.0-s*(1.0-f))
if i == 0:
return (v, t, p)
elif i == 1:
return (q, v, p)
elif i == 2:
return (p, v, t)
elif i == 3:
return (p, q, v)
elif i == 4:
return (t, p, v)
else:
return (v, p, q) | Converts a tuple of hue, saturation, value to a tuple of red, green blue.
Hue should be an angle from 0.0 to 359.0. Saturation and value should be a
value from 0.0 to 1.0, where saturation controls the intensity of the hue and
value controls the brightness. |
19,721 | def eigen(X, P, NSIG=None, method=, threshold=None, NFFT=default_NFFT,
criteria=, verbose=False):
r
if method not in [, ]:
raise ValueError("method must be or ")
if NSIG != None and threshold != None:
raise ValueError("NSIG and threshold cannot be provided together")
if NSIG is not None:
if NSIG < 0:
raise ValueError()
if NSIG >= P:
raise ValueError("NSIG must be stricly less than IP")
N = len(X)
NP = N - P
assert 2 * NP > P-1,
if NP > 100:
NP = 100
FB = np.zeros((2*NP, P), dtype=complex)
Z = np.zeros(NFFT, dtype=complex)
PSD = np.zeros(NFFT)
for I in range(0, NP):
for K in range(0, P):
FB[I, K] = X[I-K+P-1]
FB[I+NP, K] = X[I+K+1].conjugate()
_U, S, V = svd (FB)
V = -V.transpose()
NSIG = _get_signal_space(S, 2*NP,
verbose=verbose, threshold=threshold,
NSIG=NSIG, criteria=criteria)
for I in range(NSIG, P):
Z[0:P] = V[0:P, I]
Z[P:NFFT] = 0
Z = fft(Z, NFFT)
if method == :
PSD = PSD + abs(Z)**2.
elif method == :
PSD = PSD + abs(Z)**2. / S[I]
PSD = 1./PSD
nby2 = int(NFFT/2)
newpsd = np.append(PSD[nby2:0:-1], PSD[nby2*2-1:nby2-1:-1])
return newpsd, S | r"""Pseudo spectrum using eigenvector method (EV or Music)
This function computes either the Music or EigenValue (EV) noise
subspace frequency estimator.
First, an autocorrelation matrix of order `P` is computed from
the data. Second, this matrix is separated into vector subspaces,
one a signal subspace and the other a noise
subspace using a SVD method to obtain the eigen values and vectors.
From the eigen values :math:`\lambda_i`, and eigen vectors :math:`v_k`,
the **pseudo spectrum** (see note below) is computed as follows:
.. math:: P_{ev}(f) = \frac{1}{e^H(f)\left(\sum\limits_{k=M+1}^{p} \frac{1}{\lambda_k}v_kv_k^H\right)e(f)}
The separation of the noise and signal subspaces requires expertise
of the signal. However, AIC and MDL criteria may be used to automatically
perform this task.
You still need to provide the parameter `P` to indicate the maximum number
of eigen values to be computed. The criteria will just select a subset
to estimate the pseudo spectrum (see :func:`~spectrum.criteria.aic_eigen`
and :func:`~spectrum.criteria.mdl_eigen` for details.
.. note:: **pseudo spectrum**. func:`eigen` does not compute a PSD estimate.
Indeed, the method does not preserve the measured process power.
:param X: Array data samples
:param int P: maximum number of eigen values to compute. NSIG (if
specified) must therefore be less than P.
:param str method: 'music' or 'ev'.
:param int NSIG: If specified, the signal sub space uses NSIG eigen values.
:param float threshold: If specified, the signal sub space is made of the
eigen values larger than :math:`\rm{threshold} \times \lambda_{min}`,
where :math:`\lambda_{min}` is the minimum eigen values.
:param int NFFT: total length of the final data sets (padded with zero
if needed; default is 4096)
:return:
* PSD: Array of real frequency estimator values (two sided for
complex data and one sided for real data)
* S, the eigen values
.. plot::
:width: 80%
:include-source:
from spectrum import eigen, marple_data
from pylab import plot, log10, linspace, legend, axis
psd, ev = eigen(marple_data, 15, NSIG=11)
f = linspace(-0.5, 0.5, len(psd))
plot(f, 10 * log10(psd/max(psd)), label='User defined')
psd, ev = eigen(marple_data, 15, threshold=2)
plot(f, 10 * log10(psd/max(psd)), label='threshold method (100)')
psd, ev = eigen(marple_data, 15)
plot(f, 10 * log10(psd/max(psd)), label='AIC method (8)')
legend()
axis([-0.5, 0.5, -120, 0])
.. seealso::
:func:`pev`,
:func:`pmusic`,
:func:`~spectrum.criteria.aic_eigen`
:References: [Marple]_, Chap 13
.. todo:: for developers:
* what should be the second argument of the criteria N, N-P, P...?
* what should be the max value of NP |
19,722 | def needs_refresh(self, source):
now = time.time()
if source._tok in self:
s0 = self[source._tok]
if self[source._tok].metadata.get(, None):
then = s0.metadata[]
if s0.metadata[] < then - now:
return True
return False
return True | Has the (persisted) source expired in the store
Will return True if the source is not in the store at all, if it's
TTL is set to None, or if more seconds have passed than the TTL. |
19,723 | def build(self, corpus, state_size):
model = {}
for run in corpus:
items = ([ BEGIN ] * state_size) + run + [ END ]
for i in range(len(run) + 1):
state = tuple(items[i:i+state_size])
follow = items[i+state_size]
if state not in model:
model[state] = {}
if follow not in model[state]:
model[state][follow] = 0
model[state][follow] += 1
return model | Build a Python representation of the Markov model. Returns a dict
of dicts where the keys of the outer dict represent all possible states,
and point to the inner dicts. The inner dicts represent all possibilities
for the "next" item in the chain, along with the count of times it
appears. |
19,724 | def identify_triggers(
cfg,
sources,
sinks,
lattice,
nosec_lines
):
assignment_nodes = filter_cfg_nodes(cfg, AssignmentNode)
tainted_nodes = filter_cfg_nodes(cfg, TaintedNode)
tainted_trigger_nodes = [
TriggerNode(
Source(),
cfg_node=node
) for node in tainted_nodes
]
sources_in_file = find_triggers(assignment_nodes, sources, nosec_lines)
sources_in_file.extend(tainted_trigger_nodes)
find_secondary_sources(assignment_nodes, sources_in_file, lattice)
sinks_in_file = find_triggers(cfg.nodes, sinks, nosec_lines)
sanitiser_node_dict = build_sanitiser_node_dict(cfg, sinks_in_file)
return Triggers(sources_in_file, sinks_in_file, sanitiser_node_dict) | Identify sources, sinks and sanitisers in a CFG.
Args:
cfg(CFG): CFG to find sources, sinks and sanitisers in.
sources(tuple): list of sources, a source is a (source, sanitiser) tuple.
sinks(tuple): list of sources, a sink is a (sink, sanitiser) tuple.
nosec_lines(set): lines with # nosec whitelisting
Returns:
Triggers tuple with sink and source nodes and a sanitiser node dict. |
19,725 | def est_propensity_s(self, lin_B=None, C_lin=1, C_qua=2.71):
lin_basic = parse_lin_terms(self.raw_data[], lin_B)
self.propensity = PropensitySelect(self.raw_data, lin_basic,
C_lin, C_qua)
self.raw_data._dict[] = self.propensity[]
self._post_pscore_init() | Estimates the propensity score with covariates selected using
the algorithm suggested by [1]_.
The propensity score is the conditional probability of
receiving the treatment given the observed covariates.
Estimation is done via a logistic regression.
The covariate selection algorithm is based on a sequence
of likelihood ratio tests.
Parameters
----------
lin_B: list, optional
Column numbers (zero-based) of variables of
the original covariate matrix X to include
linearly. Defaults to empty list, meaning
every column of X is subjected to the
selection algorithm.
C_lin: scalar, optional
Critical value used in likelihood ratio tests
to decide whether candidate linear terms should
be included. Defaults to 1 as in [1]_.
C_qua: scalar, optional
Critical value used in likelihood ratio tests
to decide whether candidate quadratic terms
should be included. Defaults to 2.71 as in
[1]_.
References
----------
.. [1] Imbens, G. & Rubin, D. (2015). Causal Inference in
Statistics, Social, and Biomedical Sciences: An
Introduction. |
19,726 | def combined_download(accounts, days=60):
client = Client(institution=None)
out_file = StringIO()
out_file.write(client.header())
out_file.write()
for a in accounts:
ofx = a.download(days=days).read()
stripped = ofx.partition()[2].partition()[0]
out_file.write(stripped)
out_file.write("</OFX>")
out_file.seek(0)
return out_file | Download OFX files and combine them into one
It expects an 'accounts' list of ofxclient.Account objects
as well as an optional 'days' specifier which defaults to 60 |
19,727 | def set_ioloop(self, ioloop=None):
self._server.set_ioloop(ioloop)
self.ioloop = self._server.ioloop | Set the tornado IOLoop to use.
Sets the tornado.ioloop.IOLoop instance to use, defaulting to
IOLoop.current(). If set_ioloop() is never called the IOLoop is
started in a new thread, and will be stopped if self.stop() is called.
Notes
-----
Must be called before start() is called. |
19,728 | def add_samples(self, samples: Iterable[Sample]) -> None:
for sample in samples:
self.add_sample(sample) | Add samples in an iterable to this :class:`SampleSheet`. |
19,729 | def save_state(self):
for key, check_box in list(self.boolean_settings.items()):
self.save_boolean_setting(key, check_box)
for key, line_edit in list(self.text_settings.items()):
self.save_text_setting(key, line_edit)
set_setting(
, self.leNorthArrowPath.text(), self.settings)
set_setting(
,
self.organisation_logo_path_line_edit.text(),
self.settings)
set_setting(
,
self.leReportTemplatePath.text(),
self.settings)
set_setting(
,
self.txtDisclaimer.toPlainText(),
self.settings)
set_setting(
,
self.leUserDirectoryPath.text(),
self.settings)
index = self.earthquake_function.currentIndex()
value = self.earthquake_function.itemData(index)
set_setting(, value, qsettings=self.settings)
currency_index = self.currency_combo_box.currentIndex()
currency_key = self.currency_combo_box.itemData(currency_index)
set_setting(, currency_key, qsettings=self.settings)
self.save_default_values()
self.save_population_parameters() | Store the options into the user's stored session info. |
19,730 | def send_exception(self, code, exc_info=None, headers=None):
"send an error response including a backtrace to the client"
if headers is None:
headers = {}
if not exc_info:
exc_info = sys.exc_info()
self.send_error_msg(code,
traceback.format_exception(*exc_info),
headers) | send an error response including a backtrace to the client |
19,731 | def intersection(self,other):
if self.everything:
if other.everything:
return DiscreteSet()
else:
return DiscreteSet(other.elements)
else:
if other.everything:
return DiscreteSet(self.elements)
else:
return DiscreteSet(self.elements.intersection(other.elements)) | Return a new DiscreteSet with the intersection of the two sets, i.e.
all elements that are in both self and other.
:param DiscreteSet other: Set to intersect with
:rtype: DiscreteSet |
19,732 | def generate(self, dir_pattern, file_pattern, action_ch=, recursively=False, force=False):
directories = [os.path.abspath(directory) for directory in glob.glob(os.path.join(self.__root_directory, dir_pattern)) if os.path.isdir(directory)]
extensions = self.__extensions.keys()
for directory in directories:
for b, f in find_files(directory, file_pattern, recursively=recursively):
file_basename, file_ext = os.path.splitext(f)
if file_ext in extensions:
rel_path = os.path.relpath(os.path.join(b,f), self.__root_directory)
rel_basename, rel_filename = os.path.split(rel_path)
rel_filename_without_ext, rel_ext = os.path.splitext(rel_filename)
in_file_name = os.path.join(b, f)
generator_action_container = self.__actions.retrieve_element_or_default(rel_basename, None)
action = None
if generator_action_container is not None:
action = generator_action_container.get_compatible_generator_action(f)
if action is None:
action = self.__default_action
if action:
if action_ch == :
print("Process file with function :" % (rel_path, action.action_function_name()))
for filename_end, context in action.run():
out_file_name = os.path.join(b, rel_filename_without_ext + filename_end + self.__extensions[file_ext])
if action_ch == :
self.__generate_file(template_filename=in_file_name, context=context, generated_filename=out_file_name, force=force)
elif action_ch == :
try:
os.remove(out_file_name)
self.log_info("Removed file " % out_file_name)
except OSError:
pass
elif action_ch == :
print(" -> %s" % os.path.join(rel_basename, rel_filename_without_ext + filename_end + self.__extensions[file_ext])) | Main method to generate (source code) files from templates.
See documentation about the directory and file patterns and their possible combinations.
Args:
dir_pattern: ``glob`` pattern taken from the root directory. **Only** used for directories.
file_pattern: ``fnmatch`` pattern taken from all matching directories. **Only** used for files.
action (char): Denote action to be taken. Can be:
- g: Generate all files that match both directory and file patterns. This is the default behavior.
- d: Same as `g` but with doing anything, i.e. dry run.
- c: Same as `g` but erasing the generated files instead, i.e. clean.
recursively: Do we do the actions in the sub-directories? Note that in this case **only** the file pattern applies as **all**
the subdirectories are visited.
force (boolean): Do we force the generation or not? |
19,733 | def _parse_authors(html_chunk):
authors = html_chunk.match(
["div", {"class": "comment"}],
"h3",
"a",
)
if not authors:
return []
authors = map(
lambda x: Author(
x.getContent().strip(),
normalize_url(BASE_URL, x.params.get("href", None))
),
authors
)
return filter(lambda x: x.name.strip(), authors) | Parse authors of the book.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
list: List of :class:`structures.Author` objects. Blank if no author \
found. |
19,734 | def build_from_queue(cls, input_queue, replay_size, batch_size):
return cls(
lambda: input_queue.dequeue_many(batch_size),
replay_size,
batch_size=batch_size) | Builds a `ReplayableQueue` that draws from a regular `input_queue`.
Args:
input_queue: The queue to draw from.
replay_size: The size of the replay buffer.
batch_size: The size of each batch.
Returns:
A ReplayableQueue. |
19,735 | def search_index_advanced(self, index, query):
imchi
request = self.session
url = % (self.host, self.port, index)
if self.params:
content = dict(query=query, **self.params)
else:
content = dict(query=query)
if self.verbose:
print content
response = request.post(url,content)
return response | Advanced search query against an entire index
> query = ElasticQuery().query_string(query='imchi')
> search = ElasticSearch() |
19,736 | def readPopulations(inputFileName, requiredPopulation):
populations = {}
requiredPopulation = set(requiredPopulation)
with open(inputFileName, "r") as inputFile:
for line in inputFile:
row = line.rstrip("\r\n").split("\t")
famID = row[0]
indID = row[1]
pop = row[2]
if (famID, indID) in populations:
if pop != populations[(famID, indID)]:
msg = ("{} {}: sample has multiple population ({} and "
"{})".format(famID, indID, pop,
populations[(famID, indID)]))
raise ProgramError(msg)
if pop in requiredPopulation:
populations[(famID, indID)] = pop
popMissing = requiredPopulation - set(populations.values())
if len(popMissing) != 0:
msg = "Population that were asked for doesn't exists in " \
"population file: %s" % str(popMissing)
raise ProgramError(msg)
return populations | Reads a population file.
:param inputFileName: the name of the population file.
:param requiredPopulation: the required population.
:type inputFileName: str
:type requiredPopulation: list
:returns: a :py:class:`dict` containing the population of each samples. |
19,737 | def reprcall(name, args=(), kwargs=(), keywords=, sep=,
argfilter=repr):
if keywords:
keywords = (( if (args or kwargs) else ) +
+ keywords)
argfilter = argfilter or repr
return "{name}({args}{sep}{kwargs}{keywords})".format(
name=name, args=reprargs(args, filter=argfilter),
sep=(args and kwargs) and sep or "",
kwargs=reprkwargs(kwargs, sep), keywords=keywords or ) | Format a function call for display. |
19,738 | def get_account_invitation(self, account_id, invitation_id, **kwargs):
kwargs[] = True
if kwargs.get():
return self.get_account_invitation_with_http_info(account_id, invitation_id, **kwargs)
else:
(data) = self.get_account_invitation_with_http_info(account_id, invitation_id, **kwargs)
return data | Details of a user invitation. # noqa: E501
An endpoint for retrieving the details of an active user invitation sent for a new or an existing user to join the account. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{account-id}/user-invitations/{invitation-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_account_invitation(account_id, invitation_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str invitation_id: The ID of the invitation to be retrieved. (required)
:return: UserInvitationResp
If the method is called asynchronously,
returns the request thread. |
19,739 | def _to_array(value):
if isinstance(value, (tuple, list)):
return array(value)
elif isinstance(value, (float, int)):
return np.float64(value)
else:
return value | As a convenience, turn Python lists and tuples into NumPy arrays. |
19,740 | def _set_igmp_snooping_state(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=igmp_snooping_state.igmp_snooping_state, is_container=, presence=False, yang_name="igmp-snooping-state", rest_name="igmp-snooping-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__igmp_snooping_state = t
if hasattr(self, ):
self._set() | Setter method for igmp_snooping_state, mapped from YANG variable /igmp_snooping_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_igmp_snooping_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_igmp_snooping_state() directly.
YANG Description: IGMP Snooping Root MO |
19,741 | def kitchen_delete(backend, kitchen):
click.secho( % (get_datetime(), kitchen), fg=)
master =
if kitchen.lower() != master.lower():
check_and_print(DKCloudCommandRunner.delete_kitchen(backend.dki, kitchen))
else:
raise click.ClickException( % master) | Provide the name of the kitchen to delete |
19,742 | def ToPhotlam(self, wave, flux, **kwargs):
if hasattr(flux,):
return flux.copy()
else:
return flux | Convert to ``photlam``.
Since there is no real conversion necessary, this returns
a copy of input flux (if array) or just the input (if scalar).
An input array is copied to avoid modifying the input
in subsequent **pysynphot** processing.
Parameters
----------
wave, flux : number or array_like
Wavelength (not used) and flux values to be used for conversion.
kwargs : dict
Extra keywords (not used).
Returns
-------
result : number or array_like
Converted values. |
19,743 | def from_rest(model, props):
req = goldman.sess.req
_from_rest_blank(model, props)
_from_rest_hide(model, props)
_from_rest_ignore(model, props)
_from_rest_lower(model, props)
if req.is_posting:
_from_rest_on_create(model, props)
elif req.is_patching:
_from_rest_on_update(model, props)
model.merge(props, validate=True)
if req.is_patching:
_from_rest_reject_update(model) | Map the REST data onto the model
Additionally, perform the following tasks:
* set all blank strings to None where needed
* purge all fields not allowed as incoming data
* purge all unknown fields from the incoming data
* lowercase certain fields that need it
* merge new data with existing & validate
* mutate the existing model
* abort on validation errors
* coerce all the values |
19,744 | def function(self, x, y, amp, alpha, beta, center_x, center_y):
x_shift = x - center_x
y_shift = y - center_y
return amp * (1. + (x_shift**2+y_shift**2)/alpha**2)**(-beta) | returns Moffat profile |
19,745 | def add_input_stream(self, datastream, data_type, options, data):
url = self.get_add_input_data_url(datastream, options)
form_data = {
: {
: (
Utils.random_string(10)+( if data_type == else ),
data,
,
{: }
)
}
}
response = self.http.upstream(url, form_data)
return response | To add data stream to a Datastream
:param datastream: string
:param data_type: string
:param options: dict
:param data: Stream |
19,746 | def __check_mapping(self, landmarks):
sc_udiff = numpy.asarray(self.__sc_umaxs)[1:] - numpy.asarray(self.__sc_umins)[:-1]
l_diff = numpy.asarray(landmarks)[1:] - numpy.asarray(landmarks)[:-1]
return numpy.all(sc_udiff > numpy.asarray(l_diff)) | Checks whether the image, from which the supplied landmarks were extracted, can
be transformed to the learned standard intensity space without loss of
information. |
19,747 | def libvlc_media_set_user_data(p_md, p_new_user_data):
s user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a L{Media} pointer.
@param p_md: media descriptor object.
@param p_new_user_data: pointer to user data.
libvlc_media_set_user_datalibvlc_media_set_user_data', ((1,), (1,),), None,
None, Media, ctypes.c_void_p)
return f(p_md, p_new_user_data) | Sets media descriptor's user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a L{Media} pointer.
@param p_md: media descriptor object.
@param p_new_user_data: pointer to user data. |
19,748 | def remove_dataset_from_collection(dataset_id, collection_id, **kwargs):
collection_i = _get_collection(collection_id)
collection_item = _get_collection_item(collection_id, dataset_id)
if collection_item is None:
raise HydraError("Dataset %s is not in collection %s.",
dataset_id,
collection_id)
db.DBSession.delete(collection_item)
db.DBSession.flush()
db.DBSession.expunge_all()
return | Add a single dataset to a dataset collection. |
19,749 | def address(cls, address, bits = None):
if bits is None:
address_size = cls.address_size
bits = win32.bits
else:
address_size = bits / 4
if address < 0:
address = ((2 ** bits) - 1) ^ ~address
return ( % address_size) % address | @type address: int
@param address: Memory address.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text output. |
19,750 | def open_readable(self, name):
assert compat.is_native(name)
out = SpooledTemporaryFile(max_size=self.MAX_SPOOL_MEM, mode="w+b")
self.ftp.retrbinary(
"RETR {}".format(name), out.write, FtpTarget.DEFAULT_BLOCKSIZE
)
out.seek(0)
return out | Open cur_dir/name for reading.
Note: we read everything into a buffer that supports .read().
Args:
name (str): file name, located in self.curdir
Returns:
file-like (must support read() method) |
19,751 | def post_event_access_code(self, id, access_code_id, **data):
return self.post("/events/{0}/access_codes/{0}/".format(id,access_code_id), data=data) | POST /events/:id/access_codes/:access_code_id/
Updates an access code; returns the result as a :format:`access_code` as the
key ``access_code``. |
19,752 | def _udf_cell(args, js):
variable_name = args[]
if not variable_name:
raise Exception()
spec_pattern = r
spec_part_pattern = r
specs = re.findall(spec_pattern, js)
if len(specs) < 2:
raise Exception(
)
inputs = []
input_spec_parts = re.findall(spec_part_pattern, specs[0], flags=re.IGNORECASE)
if len(input_spec_parts) % 2 != 0:
raise Exception(
)
for n, t in zip(input_spec_parts[0::2], input_spec_parts[1::2]):
inputs.append((n, t))
outputs = []
output_spec_parts = re.findall(spec_part_pattern, specs[1], flags=re.IGNORECASE)
if len(output_spec_parts) % 2 != 0:
raise Exception(
)
for n, t in zip(output_spec_parts[0::2], output_spec_parts[1::2]):
outputs.append((n, t))
import_pattern = r
imports = re.findall(import_pattern, js)
split_pattern = r
parts = re.match(split_pattern, js, re.MULTILINE | re.DOTALL)
support_code =
if parts:
support_code = (parts.group(1) + parts.group(3)).strip()
if len(support_code):
js = parts.group(2)
udf = datalab.bigquery.UDF(inputs, outputs, variable_name, js, support_code, imports)
datalab.utils.commands.notebook_environment()[variable_name] = udf | Implements the bigquery_udf cell magic for ipython notebooks.
The supported syntax is:
%%bigquery udf --module <var>
<js function>
Args:
args: the optional arguments following '%%bigquery udf'.
js: the UDF declaration (inputs and outputs) and implementation in javascript.
Returns:
The results of executing the UDF converted to a dataframe if no variable
was specified. None otherwise. |
19,753 | def load_json_file(json_file):
with io.open(json_file, encoding=) as data_file:
try:
json_content = json.load(data_file)
except exceptions.JSONDecodeError:
err_msg = u"JSONDecodeError: JSON file format error: {}".format(json_file)
logger.log_error(err_msg)
raise exceptions.FileFormatError(err_msg)
_check_format(json_file, json_content)
return json_content | load json file and check file content format |
19,754 | def select_all(self, table, limit=MAX_ROWS_PER_QUERY, execute=True):
num_rows = self.count_rows(table)
if num_rows > limit:
return self._select_batched(table, , num_rows, limit, execute=execute)
else:
return self.select(table, , execute=execute) | Query all rows and columns from a table. |
19,755 | def intervalSum(self, a, b):
return self.prefixSum(b) - self.prefixSum(a-1) | :param int a b: with 1 <= a <= b
:returns: t[a] + ... + t[b] |
19,756 | def _build(self, leaves):
new, odd = [], None
if len(leaves) % 2 == 1:
odd = leaves.pop(-1)
for i in range(0, len(leaves), 2):
newnode = Node(leaves[i].val + leaves[i + 1].val)
newnode.l, newnode.r = leaves[i], leaves[i + 1]
leaves[i].side, leaves[i + 1].side, leaves[i].p, leaves[i + 1].p = , , newnode, newnode
leaves[i].sib, leaves[i + 1].sib = leaves[i + 1], leaves[i]
new.append(newnode)
if odd:
new.append(odd)
return new | Private helper function to create the next aggregation level and put all references in place. |
19,757 | def pypackable(name, pytype, format):
size, items = _formatinfo(format)
return type(Packable)(name, (pytype, Packable), {
: format,
: size,
: items,
}) | Create a "mix-in" class with a python type and a
Packable with the given struct format |
19,758 | def get_python_json(scala_json):
def convert_node(node):
if node.__class__.__name__ in (,
):
values_raw = get_python_dict(node.values())
values = {}
for k, v in values_raw.items():
values[k] = convert_node(v)
return values
elif node.__class__.__name__.startswith() or \
node.__class__.__name__ == \
:
values_raw = get_python_dict(node)
values = {}
for k, v in values_raw.items():
values[k] = convert_node(v)
return values
elif node.__class__.__name__ == :
entries_raw = get_python_list(node.values())
entries = []
for entry in entries_raw:
entries.append(convert_node(entry))
return entries
elif node.__class__.__name__ == :
entries_raw = get_python_list(node)
entries = []
for entry in entries_raw:
entries.append(convert_node(entry))
return entries
elif node.__class__.__name__ == :
return node.intValue()
elif node.__class__.__name__ == :
return None
elif node.__class__.__name__ == :
return []
elif isinstance(node, (str, int, float)):
return node
else:
logger.error( %
node.__class__.__name__)
return node.__class__.__name__
python_json = convert_node(scala_json)
return python_json | Return a JSON dict from a org.json4s.JsonAST |
19,759 | def acoustic_similarity_mapping(path_mapping, analysis_function, distance_function, stop_check=None, call_back=None, multiprocessing=True):
num_cores = int((3 * cpu_count()) / 4)
segments = set()
for x in path_mapping:
segments.update(x)
if multiprocessing:
cache = generate_cache_mp(segments, analysis_function, num_cores, call_back, stop_check)
asim = calculate_distances_mp(path_mapping, cache, distance_function, num_cores, call_back, stop_check)
else:
cache = generate_cache_th(segments, analysis_function, num_cores, call_back, stop_check)
asim = calculate_distances_th(path_mapping, cache, distance_function, num_cores, call_back, stop_check)
return asim | Takes in an explicit mapping of full paths to .wav files to have
acoustic similarity computed.
Parameters
----------
path_mapping : iterable of iterables
Explicit mapping of full paths of .wav files, in the form of a
list of tuples to be compared.
Returns
-------
dict
Returns a list of tuples corresponding to the `path_mapping` input,
with a new final element in the tuple being the similarity/distance
score for that mapping. |
19,760 | def export_table(datatable_code, **kwargs):
if in kwargs:
raise InvalidRequestError(Message.ERROR_AUTHTOKEN_NOT_SUPPORTED)
ApiKeyUtil.init_api_key_from_args(kwargs)
filename = kwargs.pop(, )
return Datatable(datatable_code).download_file(filename, **kwargs) | Downloads an entire table as a zip file.
:param str datatable_code: The datatable code to download, such as MER/F1
:param str filename: The filename for the download. \
If not specified, will download to the current working directory
:param str api_key: Most databases require api_key for bulk download |
19,761 | def data_to_df(self, sysbase=False):
p_dict_comp = self.data_to_dict(sysbase=sysbase)
self._check_pd()
self.param_df = pd.DataFrame(data=p_dict_comp).set_index()
return self.param_df | Return a pandas.DataFrame of device parameters.
:param sysbase: save per unit values in system base |
19,762 | def to_char(token):
if ord(token) in _range(9216, 9229 + 1):
token = _unichr(ord(token) - 9216)
return token | Transforms the ASCII control character symbols to their real char.
Note: If the token is not an ASCII control character symbol, just
return the token.
Keyword arguments:
token -- the token to transform |
19,763 | def p_throttling(p):
throttling = NoThrottlingSettings()
if len(p) == 7:
throttling = TailDropSettings(int(p[5]))
p[0] = {"throttling": throttling} | throttling : THROTTLING COLON NONE
| THROTTLING COLON TAIL_DROP OPEN_BRACKET NUMBER CLOSE_BRACKET |
19,764 | async def _get_response(self, msg):
try:
protocol = await self._get_protocol()
pr = protocol.request(msg)
r = await pr.response
return pr, r
except ConstructionRenderableError as e:
raise ClientError("There was an error with the request.", e)
except RequestTimedOut as e:
await self._reset_protocol(e)
raise RequestTimeout(, e)
except (OSError, socket.gaierror, Error) as e:
await self._reset_protocol(e)
raise ServerError("There was an error with the request.", e)
except asyncio.CancelledError as e:
await self._reset_protocol(e)
raise e | Perform the request, get the response. |
19,765 | def draw_step(self):
ostep = self.ordering_step()
for s in ostep:
self.setxy()
self.draw_edges()
yield s | iterator that computes all vertices coordinates and edge routing after
just one step (one layer after the other from top to bottom to top).
Purely inefficient ! Use it only for "animation" or debugging purpose. |
19,766 | def json(self):
data = super(CalTRACKHourlyModel, self).json()
data.update(
{
"occupancy_lookup": self.occupancy_lookup.to_json(orient="split"),
"temperature_bins": self.temperature_bins.to_json(orient="split"),
}
)
return data | Return a JSON-serializable representation of this result.
The output of this function can be converted to a serialized string
with :any:`json.dumps`. |
19,767 | def p(self, path):
if path.startswith("/"):
path = path[1:]
p = os.path.join(self.mount_point, path)
logger.debug("path = %s", p)
return p | provide absolute path within the container
:param path: path with container
:return: str |
19,768 | def set_pixel(self,x,y,state):
self.send_cmd("P"+str(x+1)+","+str(y+1)+","+state) | Set pixel at "x,y" to "state" where state can be one of "ON", "OFF"
or "TOGGLE" |
19,769 | def binary_size(self):
return (
1 +
2 +
1 + len(self.name.encode()) +
1 +
1 + len(self.dimensions) +
self.total_bytes +
1 + len(self.desc.encode())
) | Return the number of bytes needed to store this parameter. |
19,770 | def acknowledged_by(self):
if (self.is_acknowledged and
self._proto.acknowledgeInfo.HasField()):
return self._proto.acknowledgeInfo.acknowledgedBy
return None | Username of the acknowledger. |
19,771 | def update_iteration(self):
iteration_config = self.get_iteration_config()
if not iteration_config:
return
experiments_metrics = self.experiment_group.get_experiments_metrics(
experiment_ids=iteration_config.experiment_ids,
metric=self.get_metric_name()
)
experiments_configs = self.experiment_group.get_experiments_declarations(
experiment_ids=iteration_config.experiment_ids
)
iteration_config.experiments_configs = list(experiments_configs)
iteration_config.experiments_metrics = [m for m in experiments_metrics if m[1] is not None]
self._update_config(iteration_config) | Update the last experiment group's iteration with experiment performance. |
19,772 | def apply(self, dir_or_plan=None, input=False, skip_plan=False, no_color=IsFlagged,
**kwargs):
default = kwargs
default[] = input
default[] = no_color
default[] = (skip_plan == True)
option_dict = self._generate_default_options(default)
args = self._generate_default_args(dir_or_plan)
return self.cmd(, *args, **option_dict) | refer to https://terraform.io/docs/commands/apply.html
no-color is flagged by default
:param no_color: disable color of stdout
:param input: disable prompt for a missing variable
:param dir_or_plan: folder relative to working folder
:param skip_plan: force apply without plan (default: false)
:param kwargs: same as kwags in method 'cmd'
:returns return_code, stdout, stderr |
19,773 | def calculate_output(self, variable_name, period):
variable = self.tax_benefit_system.get_variable(variable_name, check_existence = True)
if variable.calculate_output is None:
return self.calculate(variable_name, period)
return variable.calculate_output(self, variable_name, period) | Calculate the value of a variable using the ``calculate_output`` attribute of the variable. |
19,774 | def get_event_q(self, event_name):
self.lock.acquire()
if not event_name in self.event_dict or self.event_dict[
event_name] is None:
self.event_dict[event_name] = queue.Queue()
self.lock.release()
event_queue = self.event_dict[event_name]
return event_queue | Obtain the queue storing events of the specified name.
If no event of this name has been polled, wait for one to.
Returns:
A queue storing all the events of the specified name.
None if timed out.
Raises:
queue.Empty: Raised if the queue does not exist and timeout has
passed. |
19,775 | def build_columns(self, X, verbose=False):
splines = self._terms[0].build_columns(X, verbose=verbose)
for term in self._terms[1:]:
marginal_splines = term.build_columns(X, verbose=verbose)
splines = tensor_product(splines, marginal_splines)
if self.by is not None:
splines *= X[:, self.by][:, np.newaxis]
return sp.sparse.csc_matrix(splines) | construct the model matrix columns for the term
Parameters
----------
X : array-like
Input dataset with n rows
verbose : bool
whether to show warnings
Returns
-------
scipy sparse array with n rows |
19,776 | def retcode_pillar(pillar_name):
*
groups = __salt__[](pillar_name)
check = {}
data = {}
for group in groups:
commands = groups[group]
for command in commands:
if isinstance(command, dict):
plugin = next(six.iterkeys(command))
args = command[plugin]
else:
plugin = command
args =
check.update(retcode(plugin, args, group))
current_value = 0
new_value = int(check[group][])
if group in data:
current_value = int(data[group][])
if (new_value > current_value) or (group not in data):
if group not in data:
data[group] = {}
data[group][] = new_value
return data | Run one or more nagios plugins from pillar data and get the result of cmd.retcode
The pillar have to be in this format::
------
webserver:
Ping_google:
- check_icmp: 8.8.8.8
- check_icmp: google.com
Load:
- check_load: -w 0.8 -c 1
APT:
- check_apt
-------
webserver is the role to check, the next keys are the group and the items
the check with the arguments if needed
You must to group different checks(one o more) and always it will return
the highest value of all the checks
CLI Example:
.. code-block:: bash
salt '*' nagios.retcode webserver |
19,777 | def send_media_file(self, filename):
cache_timeout = self.get_send_file_max_age(filename)
return send_from_directory(self.config[], filename,
cache_timeout=cache_timeout) | Function used to send media files from the media folder to the browser. |
19,778 | def accept(self, addr):
ip_addr = _parse_ip(addr)
if ip_addr is None:
LOG.warn("Cannot add address %r to proxy %s: "
"invalid address" % (addr, self.address))
else:
self.accepted.add(addr) | Add an address to the set of addresses this proxy is permitted
to introduce.
:param addr: The address to add. |
19,779 | def line_line(origins,
directions,
plane_normal=None):
origins, is_2D = stack_3D(origins, return_2D=True)
directions, is_2D = stack_3D(directions, return_2D=True)
directions /= np.linalg.norm(directions,
axis=1).reshape((-1, 1))
if np.sum(np.abs(np.diff(directions,
axis=0))) < tol.zero:
return False, None
q_0, p_0 = origins
v, u = directions
w = p_0 - q_0
if plane_normal is None:
plane_normal = np.cross(u, v)
plane_normal /= np.linalg.norm(plane_normal)
v_perp = np.cross(v, plane_normal)
v_perp /= np.linalg.norm(v_perp)
coplanar = abs(np.dot(plane_normal, w)) < tol.zero
if not coplanar:
return False, None
s_I = (np.dot(-v_perp, w) /
np.dot(v_perp, u))
intersection = p_0 + s_I * u
return True, intersection[:(3 - is_2D)] | Find the intersection between two lines.
Uses terminology from:
http://geomalgorithms.com/a05-_intersect-1.html
line 1: P(s) = p_0 + sU
line 2: Q(t) = q_0 + tV
Parameters
---------
origins: (2, d) float, points on lines (d in [2,3])
directions: (2, d) float, direction vectors
plane_normal: (3, ) float, if not passed computed from cross
Returns
---------
intersects: boolean, whether the lines intersect.
In 2D, false if the lines are parallel
In 3D, false if lines are not coplanar
intersection: if intersects: (d) length point of intersection
else: None |
19,780 | def removeCMSPadding(str, blocksize=AES_blocksize):
try:
pad_len = ord(str[-1])
except TypeError:
pad_len = str[-1]
assert pad_len <= blocksize,
assert pad_len <= len(str),
return str[:-pad_len] | CMS padding: Remove padding with bytes containing the number of padding bytes |
19,781 | def drawRect(self, x1, y1, x2, y2, angle=0):
vertices = [[x1,y1],[x2,y1],[x2,y2],[x1,y2],]
rotatedVertices = rotateMatrix(vertices, (x1+x2)*0.5, (y1+y2)*0.5, angle)
self.drawClosedPath(rotatedVertices) | Draws a rectangle on the current :py:class:`Layer` with the current :py:class:`Brush`.
Coordinates are relative to the original layer size WITHOUT downsampling applied.
:param x1: The X of the top-left corner of the rectangle.
:param y1: The Y of the top-left corner of the rectangle.
:param x2: The X of the bottom-right corner of the rectangle.
:param y2: The Y of the bottom-right corner of the rectangle.
:param angle: An angle (in degrees) of rotation around the center of the rectangle.
:rtype: Nothing. |
19,782 | def drop_network_by_id(self, network_id: int) -> None:
network = self.session.query(Network).get(network_id)
self.drop_network(network) | Drop a network by its database identifier. |
19,783 | def append_copy(self, elem):
return XMLElement(lib.lsl_append_copy(self.e, elem.e)) | Append a copy of the specified element as a child. |
19,784 | def _put(self, item: SQLBaseObject):
if item._dto_type in self._expirations and self._expirations[item._dto_type] == 0:
return
item.updated()
self._session().merge(item) | Puts a item into the database. Updates lastUpdate column |
19,785 | def save_to_file(self, filename, format=None, **kwargs):
if format is None:
format = format_from_extension(filename)
with file(filename, ) as fp:
self.save_to_file_like(fp, format, **kwargs) | Save the object to file given by filename. |
19,786 | def incr(self, name, amount=1):
return self.client.incr(name, amount=amount) | 自增key的对应的值,当key不存在时则为默认值,否则在基础上自增整数amount
:param name: key
:param amount: 默认值
:return: 返回自增后的值 |
19,787 | def divide_prefixes(prefixes: List[str], seed:int=0) -> Tuple[List[str], List[str], List[str]]:
if len(prefixes) < 3:
raise PersephoneException(
"{} cannot be split into 3 groups as it only has {} items".format(prefixes, len(prefixes))
)
Ratios = namedtuple("Ratios", ["train", "valid", "test"])
ratios=Ratios(.90, .05, .05)
train_end = int(ratios.train*len(prefixes))
valid_end = int(train_end + ratios.valid*len(prefixes))
if valid_end == len(prefixes):
valid_end -= 1
if train_end == valid_end:
train_end -= 1
random.seed(seed)
random.shuffle(prefixes)
train_prefixes = prefixes[:train_end]
valid_prefixes = prefixes[train_end:valid_end]
test_prefixes = prefixes[valid_end:]
assert train_prefixes, "Got empty set for training data"
assert valid_prefixes, "Got empty set for validation data"
assert test_prefixes, "Got empty set for testing data"
return train_prefixes, valid_prefixes, test_prefixes | Divide data into training, validation and test subsets |
19,788 | def show_vcs_output_vcs_nodes_vcs_node_info_node_switch_mac(self, **kwargs):
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
node_switch_mac = ET.SubElement(vcs_node_info, "node-switch-mac")
node_switch_mac.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
19,789 | def get_octets(self, octets=None, timeout=1.0):
if octets is None:
octets = b
if not self.socket:
try:
self.connect()
except nfc.llcp.ConnectRefused:
return None
else:
self.release_connection = True
else:
self.release_connection = False
try:
request = struct.pack(, 0x10, 0x01, 4 + len(octets),
self.acceptable_length) + octets
if not send_request(self.socket, request, self.send_miu):
return None
response = recv_response(
self.socket, self.acceptable_length, timeout)
if response is not None:
if response[1] != 0x81:
raise SnepError(response[1])
return response[6:]
finally:
if self.release_connection:
self.close() | Get NDEF message octets from a SNEP Server.
.. versionadded:: 0.13
If the client has not yet a data link connection with a SNEP
Server, it temporarily connects to the default SNEP Server,
sends the message octets, disconnects after the server
response, and returns the received message octets. |
19,790 | def make_pdb(self, alt_states=False, inc_ligands=True):
if any([False if x.id else True for x in self._monomers]):
self.relabel_monomers()
if self.ligands and inc_ligands:
monomers = self._monomers + self.ligands._monomers
else:
monomers = self._monomers
pdb_str = write_pdb(monomers, self.id, alt_states=alt_states)
return pdb_str | Generates a PDB string for the `Polymer`.
Parameters
----------
alt_states : bool, optional
Include alternate conformations for `Monomers` in PDB.
inc_ligands : bool, optional
Includes `Ligands` in PDB.
Returns
-------
pdb_str : str
String of the pdb for the `Polymer`. Generated using information
from the component `Monomers`. |
19,791 | def _convert_to_config(self):
for k, v in vars(self.parsed_data).iteritems():
exec "self.config.%s = v"%k in locals(), globals() | self.parsed_data->self.config |
19,792 | def on_to_position(self, speed, position, brake=True, block=True):
speed = self._speed_native_units(speed)
self.speed_sp = int(round(speed))
self.position_sp = position
self._set_brake(brake)
self.run_to_abs_pos()
if block:
self.wait_until(, timeout=WAIT_RUNNING_TIMEOUT)
self.wait_until_not_moving() | Rotate the motor at ``speed`` to ``position``
``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue`
object, enabling use of other units. |
19,793 | def list_role_binding_for_all_namespaces(self, **kwargs):
kwargs[] = True
if kwargs.get():
return self.list_role_binding_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_role_binding_for_all_namespaces_with_http_info(**kwargs)
return data | list or watch objects of kind RoleBinding
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_role_binding_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1RoleBindingList
If the method is called asynchronously,
returns the request thread. |
19,794 | def clean_value(self):
result = []
for mdl in self:
result.append(super(ListNode, mdl).clean_value())
return result | Populates json serialization ready data.
This is the method used to serialize and store the object data in to DB
Returns:
List of dicts. |
19,795 | def download_document(self, document: Document, overwrite=True, path=None):
if not path:
path = os.path.join(os.path.expanduser(c["base_path"]), document.path)
if (self.modified(document) and overwrite) or not os.path.exists(join(path, document.title)):
log.info("Downloading %s" % join(path, document.title))
file = self._get( % document.id, stream=True)
os.makedirs(path, exist_ok=True)
with open(join(path, document.title), ) as f:
shutil.copyfileobj(file.raw, f) | Download a document to the given path. if no path is provided the path is constructed frome the base_url + stud.ip path + filename.
If overwrite is set the local version will be overwritten if the file was changed on studip since the last check |
19,796 | def before_export(self):
try:
csm_info = self.datastore[]
except KeyError:
csm_info = self.datastore[] = self.csm.info
for sm in csm_info.source_models:
for sg in sm.src_groups:
assert sg.eff_ruptures != -1, sg
for key in self.datastore:
self.datastore.set_nbytes(key)
self.datastore.flush() | Set the attributes nbytes |
19,797 | def compressBWTPoolProcess(tup):
inputFN = tup[0]
startIndex = tup[1]
endIndex = tup[2]
tempFN = tup[3]
whereSol = np.add(startIndex+1, np.where(bwt[startIndex:endIndex-1] != bwt[startIndex+1:endIndex])[0])
deltas = np.zeros(dtype=, shape=(whereSol.shape[0]+1,))
if whereSol.shape[0] == 0:
deltas[0] = endIndex-startIndex
else:
deltas[0] = whereSol[0]-startIndex
deltas[1:-1] = np.subtract(whereSol[1:], whereSol[0:-1])
deltas[-1] = endIndex - whereSol[-1]
size = 0
byteCount = 0
lastCount = 1
while lastCount > 0:
lastCount = np.where(deltas >= 2**(numberBits*byteCount))[0].shape[0]
size += lastCount
byteCount += 1
ret = np.lib.format.open_memmap(tempFN, , , (size,))
retIndex = 0
c = bwt[startIndex]
startChar = c
delta = deltas[0]
while delta > 0:
ret[retIndex] = ((delta & mask) << letterBits)+c
delta /= numPower
retIndex += 1
for i in xrange(0, whereSol.shape[0]):
c = bwt[whereSol[i]]
delta = deltas[i+1]
while delta > 0:
ret[retIndex] = ((delta & mask) << letterBits)+c
delta /= numPower
retIndex += 1
endChar = c
return (size, startChar, deltas[0], endChar, deltas[-1], tempFN) | During compression, each available process will calculate a subportion of the BWT independently using this
function. This process takes the chunk and rewrites it into a given filename using the technique described
in the compressBWT(...) function header |
19,798 | def get_foreign_key(self, name):
name = self._normalize_identifier(name)
if not self.has_foreign_key(name):
raise ForeignKeyDoesNotExist(name, self._name)
return self._fk_constraints[name] | Returns the foreign key constraint with the given name.
:param name: The constraint name
:type name: str
:rtype: ForeignKeyConstraint |
19,799 | def style_from_dict(style_dict, include_defaults=True):
assert isinstance(style_dict, Mapping)
if include_defaults:
s2 = {}
s2.update(DEFAULT_STYLE_EXTENSIONS)
s2.update(style_dict)
style_dict = s2
token_to_attrs = {}
for ttype, styledef in sorted(style_dict.items()):
attrs = DEFAULT_ATTRS
if not in styledef:
for i in range(1, len(ttype) + 1):
try:
attrs = token_to_attrs[ttype[:-i]]
except KeyError:
pass
else:
break
for part in styledef.split():
if part == :
pass
elif part == :
attrs = attrs._replace(bold=True)
elif part == :
attrs = attrs._replace(bold=False)
elif part == :
attrs = attrs._replace(italic=True)
elif part == :
attrs = attrs._replace(italic=False)
elif part == :
attrs = attrs._replace(underline=True)
elif part == :
attrs = attrs._replace(underline=False)
elif part == :
attrs = attrs._replace(blink=True)
elif part == :
attrs = attrs._replace(blink=False)
elif part == :
attrs = attrs._replace(reverse=True)
elif part == :
attrs = attrs._replace(reverse=False)
elif part in (, , ):
pass
elif part.startswith():
pass
elif part.startswith():
attrs = attrs._replace(bgcolor=_colorformat(part[3:]))
else:
attrs = attrs._replace(color=_colorformat(part))
token_to_attrs[ttype] = attrs
return _StyleFromDict(token_to_attrs) | Create a ``Style`` instance from a dictionary or other mapping.
The dictionary is equivalent to the ``Style.styles`` dictionary from
pygments, with a few additions: it supports 'reverse' and 'blink'.
Usage::
style_from_dict({
Token: '#ff0000 bold underline',
Token.Title: 'blink',
Token.SomethingElse: 'reverse',
})
:param include_defaults: Include the defaults (built-in) styling for
selected text, etc...) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.