Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
2,600 | def _rebuild_mod_path(orig_path, package_name, module):
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
try:
return sys_path.index(entry)
except ValueError:
return float()
def position_in_sys_path(path):
path_parts = path.split(os.sep)
module_parts = package_name.count() + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
new_path = sorted(orig_path, key=position_in_sys_path)
new_path = [_normalize_cached(p) for p in new_path]
if isinstance(module.__path__, list):
module.__path__[:] = new_path
else:
module.__path__ = new_path | Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order |
2,601 | def topological_sort(bpmn_graph, nodes_with_classification):
node_param_name = "node"
classification_param_name = "classification"
tmp_nodes_with_classification = copy.deepcopy(nodes_with_classification)
sorted_nodes_with_classification = []
no_incoming_flow_nodes = []
backward_flows = []
while tmp_nodes_with_classification:
for node_with_classification in tmp_nodes_with_classification:
incoming_list = node_with_classification[node_param_name][1][consts.Consts.incoming_flow]
if len(incoming_list) == 0:
no_incoming_flow_nodes.append(node_with_classification)
if len(no_incoming_flow_nodes) > 0:
while len(no_incoming_flow_nodes) > 0:
node_with_classification = no_incoming_flow_nodes.pop()
tmp_nodes_with_classification.remove(node_with_classification)
sorted_nodes_with_classification \
.append(next(tmp_node for tmp_node in nodes_with_classification
if tmp_node[node_param_name][0] == node_with_classification[node_param_name][0]))
outgoing_list = list(node_with_classification[node_param_name][1][consts.Consts.outgoing_flow])
tmp_outgoing_list = list(outgoing_list)
for flow_id in tmp_outgoing_list:
outgoing_list.remove(flow_id)
node_with_classification[node_param_name][1][consts.Consts.outgoing_flow].remove(flow_id)
flow = bpmn_graph.get_flow_by_id(flow_id)
target_id = flow[2][consts.Consts.target_ref]
target = next(tmp_node[node_param_name]
for tmp_node in tmp_nodes_with_classification
if tmp_node[node_param_name][0] == target_id)
target[1][consts.Consts.incoming_flow].remove(flow_id)
else:
for node_with_classification in tmp_nodes_with_classification:
if "Join" in node_with_classification[classification_param_name]:
incoming_list = list(node_with_classification[node_param_name][1][consts.Consts.incoming_flow])
tmp_incoming_list = list(incoming_list)
for flow_id in tmp_incoming_list:
incoming_list.remove(flow_id)
flow = bpmn_graph.get_flow_by_id(flow_id)
source_id = flow[2][consts.Consts.source_ref]
source = next(tmp_node[node_param_name]
for tmp_node in tmp_nodes_with_classification
if tmp_node[node_param_name][0] == source_id)
source[1][consts.Consts.outgoing_flow].remove(flow_id)
target_id = flow[2][consts.Consts.target_ref]
target = next(tmp_node[node_param_name]
for tmp_node in tmp_nodes_with_classification
if tmp_node[node_param_name][0] == target_id)
target[1][consts.Consts.incoming_flow].remove(flow_id)
backward_flows.append(flow)
return sorted_nodes_with_classification, backward_flows | :return: |
2,602 | def add_vcenter(self, **kwargs):
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter",
xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id = ET.SubElement(vcenter, "id")
id.text = kwargs.pop()
credentials = ET.SubElement(vcenter, "credentials")
url = ET.SubElement(credentials, "url")
url.text = kwargs.pop()
username = ET.SubElement(credentials, "username")
username.text = kwargs.pop()
password = ET.SubElement(credentials, "password")
password.text = kwargs.pop()
try:
self._callback(config)
return True
except Exception as error:
logging.error(error)
return False | Add vCenter on the switch
Args:
id(str) : Name of an established vCenter
url (bool) : vCenter URL
username (str): Username of the vCenter
password (str): Password of the vCenter
callback (function): A function executed upon completion of the
method.
Returns:
Return value of `callback`.
Raises:
None |
2,603 | def configuration_from_uri(cls, persistence_uri):
db_uri, persistence_state_id = cls.parse_persistence_uri(persistence_uri)
engine = create_engine(db_uri)
Base.metadata.create_all(engine)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
job = session.query(Job).filter(Job.id == persistence_state_id).first()
configuration = job.configuration
configuration = yaml.safe_load(configuration)
configuration[][] = True
configuration[][] = persistence_state_id
return configuration | Return a configuration object. |
2,604 | def explode(self):
for realm in [tmp_realm for tmp_realm in self if tmp_realm.higher_realms]:
for parent in realm.higher_realms:
higher_realm = self.find_by_name(parent)
if higher_realm:
higher_realm.realm_members.append(realm.get_name())
for realm in self:
for tmp_realm in self:
tmp_realm.rec_tag = False
realm.get_realms_by_explosion(self)
for tmp_realm in self:
del tmp_realm.rec_tag | Explode realms with each realm_members and higher_realms to get all the
realms sub realms.
:return: None |
2,605 | def robust_init(stochclass, tries, *args, **kwds):
stochs = [arg for arg in (list(args) + list(kwds.values()))
if isinstance(arg.__class__, StochasticMeta)]
parents = stochs
for s in stochs:
parents.extend(s.extended_parents)
extended_parents = set(parents)
random_parents = [
p for p in extended_parents if p.rseed is True and hasattr(
p,
)]
for i in range(tries):
try:
return stochclass(*args, **kwds)
except ZeroProbability:
exc = sys.exc_info()
for parent in random_parents:
try:
parent.random()
except:
six.reraise(*exc)
six.reraise(*exc) | Robust initialization of a Stochastic.
If the evaluation of the log-probability returns a ZeroProbability
error, due for example to a parent being outside of the support for
this Stochastic, the values of parents are randomly sampled until
a valid log-probability is obtained.
If the log-probability is still not valid after `tries` attempts, the
original ZeroProbability error is raised.
:Parameters:
stochclass : Stochastic, eg. Normal, Uniform, ...
The Stochastic distribution to instantiate.
tries : int
Maximum number of times parents will be sampled.
*args, **kwds
Positional and keyword arguments to declare the Stochastic variable.
:Example:
>>> lower = pymc.Uniform('lower', 0., 2., value=1.5, rseed=True)
>>> pymc.robust_init(pymc.Uniform, 100, 'data', lower=lower, upper=5, value=[1,2,3,4], observed=True) |
2,606 | def download(url, dest):
u = urllib.FancyURLopener()
logger.info("Downloading %s..." % url)
u.retrieve(url, dest)
logger.info( % dest)
return dest | Platform-agnostic downloader. |
2,607 | def is_valid(self):
if not self:
return True
if self.left and self.data[self.axis] < self.left.data[self.axis]:
return False
if self.right and self.data[self.axis] > self.right.data[self.axis]:
return False
return all(c.is_valid() for c, _ in self.children) or self.is_leaf | Checks recursively if the tree is valid
It is valid if each node splits correctly |
2,608 | def s2m(self):
m = % (IDENT)
self.meta.load(m, % (m), mdict=self.settings.get) | Imports settings to meta |
2,609 | def _get_prepped_model_field(model_obj, field):
field = model_obj._meta.get_field(field)
value = field.get_db_prep_save(getattr(model_obj, field.attname), connection)
return value | Gets the value of a field of a model obj that is prepared for the db. |
2,610 | def _repr_latex_(self):
lines.append(r"\begin{align*}")
for lhs, rhs in zip(self.left_hand_side_descriptors, self.right_hand_side):
lines.append(r"\dot{{{0}}} &= {1} \\".format(sympy.latex(lhs.symbol), sympy.latex(rhs)))
lines.append(r"\end{align*}")
return "\n".join(lines) | This is used in IPython notebook it allows us to render the ODEProblem object in LaTeX.
How Cool is this? |
2,611 | def show_dependencies(self, stream=sys.stdout):
def child_iter(node):
return [d.node for d in node.deps]
def text_str(node):
return colored(str(node), color=node.status.color_opts["color"])
for task in self.iflat_tasks():
print(draw_tree(task, child_iter, text_str), file=stream) | Writes to the given stream the ASCII representation of the dependency tree. |
2,612 | def create(cls, name, gateway, network, input_speed=None,
output_speed=None, domain_server_address=None,
provider_name=None, probe_address=None,
standby_mode_period=3600, standby_mode_timeout=30,
active_mode_period=5, active_mode_timeout=1, comment=None):
json = {: name,
: element_resolver(gateway),
: element_resolver(network),
: input_speed,
: output_speed,
: probe_address,
: provider_name,
: comment,
: standby_mode_period,
: standby_mode_timeout,
: active_mode_period,
: active_mode_timeout}
if domain_server_address:
r = RankedDNSAddress([])
r.add(domain_server_address)
json.update(domain_server_address=r.entries)
return ElementCreator(cls, json) | Create a new StaticNetlink to be used as a traffic handler.
:param str name: name of netlink Element
:param gateway_ref: gateway to map this netlink to. This can be an element
or str href.
:type gateway_ref: Router,Engine
:param list ref: network/s associated with this netlink.
:type ref: list(str,Element)
:param int input_speed: input speed in Kbps, used for ratio-based
load-balancing
:param int output_speed: output speed in Kbps, used for ratio-based
load-balancing
:param list domain_server_address: dns addresses for netlink. Engine
DNS can override this field
:type dns_addresses: list(str,Element)
:param str provider_name: optional name to identify provider for this
netlink
:param list probe_address: list of IP addresses to use as probing
addresses to validate connectivity
:type probe_ip_address: list(str)
:param int standby_mode_period: Specifies the probe period when
standby mode is used (in seconds)
:param int standby_mode_timeout: probe timeout in seconds
:param int active_mode_period: Specifies the probe period when active
mode is used (in seconds)
:param int active_mode_timeout: probe timeout in seconds
:raises ElementNotFound: if using type Element parameters that are
not found.
:raises CreateElementFailed: failure to create netlink with reason
:rtype: StaticNetlink
.. note:: To monitor the status of the network links, you must define
at least one probe IP address. |
2,613 | def previous(self):
try:
return self.__class__.objects.for_model(self.content_object,
self.content_type).\
filter(order__gt=self.order).order_by()[0]
except IndexError:
return None | Returns previous image for same content_object and None if image
is the first. |
2,614 | def _read_sections(ifile):
if os.path.exists(ifile):
return read_sections(ifile, exclude_ungrouped=True, prt=None) | Read sections_in.txt file, if it exists. |
2,615 | def get_current_live_chat(self):
now = datetime.now()
chat = self.upcoming_live_chat()
if chat and chat.is_in_progress():
return chat
return None | Check if there is a live chat on the go, so that we should take
over the AskMAMA page with the live chat. |
2,616 | def _get_pos_name(code, name=, english=True, delimiter=,
pos_tags=pos_map.POS_MAP):
pos_name = pos_map.get_pos_name(code, name, english, pos_tags=pos_tags)
return delimiter.join(pos_name) if name == else pos_name | Gets the part of speech name for *code*.
Joins the names together with *delimiter* if *name* is ``'all'``.
See :func:``pynlpir.pos_map.get_pos_name`` for more information. |
2,617 | def parse(s):
r
stuff = []
rest = s
while True:
front, token, rest = peel_off_esc_code(rest)
if front:
stuff.append(front)
if token:
try:
tok = token_type(token)
if tok:
stuff.extend(tok)
except ValueError:
raise ValueError("Can't parse escape sequence: %r %r %r %r" % (s, repr(front), token, repr(rest)))
if not rest:
break
return stuff | r"""
Returns a list of strings or format dictionaries to describe the strings.
May raise a ValueError if it can't be parsed.
>>> parse(">>> []")
['>>> []']
>>> #parse("\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m\x1b[33m]\x1b[39m\x1b[33m[\x1b[39m") |
2,618 | def set_path(self, file_path):
if not file_path:
self.read_data = self.memory_read
self.write_data = self.memory_write
elif not is_valid(file_path):
self.write_data(file_path, {})
self.path = file_path | Set the path of the database.
Create the file if it does not exist. |
2,619 | def dump_graph(self):
with self.lock:
return {
dot_separated(k): v.dump_graph_entry()
for k, v in self.relations.items()
} | Dump a key-only representation of the schema to a dictionary. Every
known relation is a key with a value of a list of keys it is referenced
by. |
2,620 | def update_slidepos(self):
g = get_root(self).globals
if not g.cpars[]:
self.after(20000, self.update_slidepos)
return
def slide_threaded_update():
try:
(pos_ms, pos_mm, pos_px), msg = g.fpslide.slide.return_position()
self.slide_pos_queue.put((pos_ms, pos_mm, pos_px))
except Exception as err:
t, v, tb = sys.exc_info()
error = traceback.format_exception_only(t, v)[0].strip()
tback = + \
.join(traceback.format_tb(tb))
g.FIFO.put((, error, tback))
t = threading.Thread(target=slide_threaded_update)
t.start()
self.after(20000, self.update_slidepos) | Periodically update the slide position.
Also farmed out to a thread to avoid hanging GUI main thread |
2,621 | def call_audit(func):
def audited_func(*args, **kwargs):
import traceback
stack = traceback.extract_stack()
r = func(*args, **kwargs)
func_name = func.__name__
print("@depth %d, trace %s -> %s(*%r, **%r) => %r" % (
len(stack),
" -> ".join("%s:%d:%s" % x[0:3] for x in stack[-5:-2]),
func_name,
args,
kwargs,
r))
return r
return audited_func | Print a detailed audit of all calls to this function. |
2,622 | def as_format(item, format_str=):
if isinstance(item, pd.Series):
return item.map(lambda x: format(x, format_str))
elif isinstance(item, pd.DataFrame):
return item.applymap(lambda x: format(x, format_str)) | Map a format string over a pandas object. |
2,623 | def _get_graph(graph, filename):
try:
rendered = graph.rendered_file
except AttributeError:
try:
graph.render(os.path.join(server.tmpdir, filename), format=)
rendered = filename
except OSError:
rendered = None
graph.rendered_file = rendered
return rendered | Retrieve or render a graph. |
2,624 | def mean_subtraction(x, mean, t, base_axis=1, update_running_mean=True):
r
from .function_bases import mean_subtraction as mean_subtraction_base
return mean_subtraction_base(x, mean, t,
base_axis=base_axis,
update_running_mean=update_running_mean) | r"""
It subtracts the mean of the elements of the input array,
and normalizes it to :math:`0`. Preprocessing arrays with this function has the effect of improving accuracy
in various tasks such as image classification.
At training time, this function is defined as
.. math::
\begin{eqnarray}
\mu &=& \frac{1}{M} \sum x_i \\
y_i &=& x_i - \mu
\end{eqnarray}
At testing time, the mean values used are those that were computed during training by moving average.
Note:
The backward performs an approximated differentiation that takes into account only the latest mini-batch.
Args:
x(~nnabla.Variable): N-D array of input.
mean(~nnabla.Variable): N-D array of running mean (modified during forward execution).
t(~nnabla.Variable): Scalar of num of iteration of running mean (modified during forward execution).
base_axis(int): Base axis of Mean Subtraction operation. Dimensions up to base_axis is treated as sample dimension.
[default=``1``]
update_running_mean(bool): Update running mean during forward execution.
[default=``True``]
Returns:
~nnabla.Variable: N-D array.
See Also:
``nnabla.function_bases.mean_subtraction``. |
2,625 | def _folder_item_assigned_worksheet(self, analysis_brain, item):
if not IAnalysisRequest.providedBy(self.context):
return
analysis_obj = self.get_object(analysis_brain)
worksheet = analysis_obj.getWorksheet()
if not worksheet:
return
title = t(_("Assigned to: ${worksheet_id}",
mapping={: safe_unicode(worksheet.id)}))
img = get_image(, title=title)
anchor = get_link(worksheet.absolute_url(), img)
self._append_html_element(item, , anchor) | Adds an icon to the item dict if the analysis is assigned to a
worksheet and if the icon is suitable for the current context
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row |
2,626 | def raw_snapshot_data(self, name):
j, _ = self.datacenter.request(, self.path + +
str(name))
return j | ::
GET /:login/machines/:id/snapshots/:name
:param name: identifier for snapshot
:type name: :py:class:`basestring`
:rtype: :py:class:`dict`
Used internally to get a raw dict of a single machine snapshot. |
2,627 | def for_account_hash(parent, account_hash):
account = AccountProxy(parent)
account.account_hash = account_hash
if account.acquire():
return account
return None | Returns a new AccountProxy that acquires the account with the
given hash, if such an account is known to the account manager.
It is an error if the account manager does not have such an
account. |
2,628 | def launch_ipython_legacy_shell(args):
try:
from IPython.config.loader import Config
except ImportError:
_print("The SolveBio Python shell requires IPython.\n"
"To install, type: ")
return False
try:
globals(), locals())
InteractiveShellEmbed(config=cfg, banner1=banner1, exit_msg=exit_msg)() | Open the SolveBio shell (IPython wrapper) for older IPython versions |
2,629 | def create_secgroup_rule(self, protocol, from_port, to_port,
source, target):
nova = self.nova
def get_id(gname):
sg = nova.security_groups.find(name=gname)
if not sg:
raise BangError("Security group not found, %s" % gname)
return str(sg.id)
kwargs = {
: protocol,
: str(from_port),
: str(to_port),
: get_id(target),
}
if in source:
kwargs[] = source
else:
kwargs[] = get_id(source)
kwargs[] =
nova.security_group_rules.create(**kwargs) | Creates a new server security group rule.
:param str protocol: E.g. ``tcp``, ``icmp``, etc...
:param int from_port: E.g. ``1``
:param int to_port: E.g. ``65535``
:param str source:
:param str target: |
2,630 | def update(self, configuration, debug=None):
if self.config != dict(configuration) and debug != self.debug:
self.config = dict(configuration)
self.debug = debug
self.configure()
return True
return False | Update the internal configuration values, removing debug_only
handlers if debug is False. Returns True if the configuration has
changed from previous configuration values.
:param dict configuration: The logging configuration
:param bool debug: Toggles use of debug_only loggers
:rtype: bool |
2,631 | def touch(args):
from time import ctime
p = OptionParser(touch.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
info, = args
fp = open(info)
for row in fp:
path, atime, mtime = row.split()
atime = float(atime)
mtime = float(mtime)
current_atime, current_mtime = get_times(path)
if int(atime) == int(current_atime) and \
int(mtime) == int(current_mtime):
continue
times = [ctime(x) for x in (current_atime, current_mtime, atime, mtime)]
msg = "{0} : ".format(path)
msg += "({0}, {1}) => ({2}, {3})".format(*times)
print(msg, file=sys.stderr)
os.utime(path, (atime, mtime)) | %prog touch timestamp.info
Recover timestamps for files in the current folder.
CAUTION: you must execute this in the same directory as timestamp(). |
2,632 | def corr_dw_v1(self):
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
old = self.sequences.states.fastaccess_old
new = self.sequences.states.fastaccess_new
idx = der.toy[self.idx_sim]
if (con.maxdw[idx] > 0.) and ((old.w-new.w) > con.maxdw[idx]):
new.w = old.w-con.maxdw[idx]
self.interp_v()
flu.qa = flu.qz+(old.v-new.v)/der.seconds | Adjust the water stage drop to the highest value allowed and correct
the associated fluxes.
Note that method |corr_dw_v1| calls the method `interp_v` of the
respective application model. Hence the requirements of the actual
`interp_v` need to be considered additionally.
Required control parameter:
|MaxDW|
Required derived parameters:
|llake_derived.TOY|
|Seconds|
Required flux sequence:
|QZ|
Updated flux sequence:
|llake_fluxes.QA|
Updated state sequences:
|llake_states.W|
|llake_states.V|
Basic Restriction:
:math:`W_{old} - W_{new} \\leq MaxDW`
Examples:
In preparation for the following examples, define a short simulation
time period with a simulation step size of 12 hours and initialize
the required model object:
>>> from hydpy import pub
>>> pub.timegrids = '2000.01.01', '2000.01.04', '12h'
>>> from hydpy.models.llake import *
>>> parameterstep('1d')
>>> derived.toy.update()
>>> derived.seconds.update()
Select the first half of the second day of January as the simulation
step relevant for the following examples:
>>> model.idx_sim = pub.timegrids.init['2000.01.02']
The following tests are based on method |interp_v_v1| for the
interpolation of the stored water volume based on the corrected
water stage:
>>> model.interp_v = model.interp_v_v1
For the sake of simplicity, the underlying `w`-`v` relationship is
assumed to be linear:
>>> n(2.)
>>> w(0., 1.)
>>> v(0., 1e6)
The maximum drop in water stage for the first half of the second
day of January is set to 0.4 m/d. Note that, due to the difference
between the parameter step size and the simulation step size, the
actual value used for calculation is 0.2 m/12h:
>>> maxdw(_1_1_18=.1,
... _1_2_6=.4,
... _1_2_18=.1)
>>> maxdw
maxdw(toy_1_1_18_0_0=0.1,
toy_1_2_6_0_0=0.4,
toy_1_2_18_0_0=0.1)
>>> from hydpy import round_
>>> round_(maxdw.value[2])
0.2
Define old and new water stages and volumes in agreement with the
given linear relationship:
>>> states.w.old = 1.
>>> states.v.old = 1e6
>>> states.w.new = .9
>>> states.v.new = 9e5
Also define an inflow and an outflow value. Note the that the latter
is set to zero, which is inconsistent with the actual water stage drop
defined above, but done for didactic reasons:
>>> fluxes.qz = 1.
>>> fluxes.qa = 0.
Calling the |corr_dw_v1| method does not change the values of
either of following sequences, as the actual drop (0.1 m/12h) is
smaller than the allowed drop (0.2 m/12h):
>>> model.corr_dw_v1()
>>> states.w
w(0.9)
>>> states.v
v(900000.0)
>>> fluxes.qa
qa(0.0)
Note that the values given above are not recalculated, which can
clearly be seen for the lake outflow, which is still zero.
Through setting the new value of the water stage to 0.6 m, the actual
drop (0.4 m/12h) exceeds the allowed drop (0.2 m/12h). Hence the
water stage is trimmed and the other values are recalculated:
>>> states.w.new = .6
>>> model.corr_dw_v1()
>>> states.w
w(0.8)
>>> states.v
v(800000.0)
>>> fluxes.qa
qa(5.62963)
Through setting the maximum water stage drop to zero, method
|corr_dw_v1| is effectively disabled. Regardless of the actual
change in water stage, no trimming or recalculating is performed:
>>> maxdw.toy_01_02_06 = 0.
>>> states.w.new = .6
>>> model.corr_dw_v1()
>>> states.w
w(0.6)
>>> states.v
v(800000.0)
>>> fluxes.qa
qa(5.62963) |
2,633 | def delete(name=None, group_id=None, region=None, key=None, keyid=None,
profile=None, vpc_id=None, vpc_name=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
group = _get_group(conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name,
group_id=group_id, region=region, key=key, keyid=keyid,
profile=profile)
if group:
deleted = conn.delete_security_group(group_id=group.id)
if deleted:
log.info(, group.name, group.id)
return True
else:
msg = .format(name)
log.error(msg)
return False
else:
log.debug()
return False | Delete a security group.
CLI example::
salt myminion boto_secgroup.delete mysecgroup |
2,634 | def get_assessment_notification_session_for_bank(self, assessment_receiver, bank_id):
if not self.supports_assessment_notification():
raise errors.Unimplemented()
return sessions.ItemNotificationSession(bank_id, runtime=self._runtime, receiver=assessment_receiver) | Gets the ``OsidSession`` associated with the assessment notification service for the given bank.
arg: assessment_receiver
(osid.assessment.AssessmentReceiver): the assessment
receiver interface
arg: bank_id (osid.id.Id): the ``Id`` of the bank
return: (osid.assessment.AssessmentNotificationSession) - ``an
_assessment_notification_session``
raise: NotFound - ``bank_id`` not found
raise: NullArgument - ``assessment_receiver`` or ``bank_id`` is
``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_assessment_notification()``
or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_notification()`` and
``supports_visible_federation()`` are ``true``.* |
2,635 | def do_bugin(self, args):
args = args.split()
if _debug: ConsoleCmd._debug("do_bugin %r", args)
if args:
loggerName = args[0]
if loggerName in logging.Logger.manager.loggerDict:
logger = logging.getLogger(loggerName)
else:
logger = None
else:
loggerName =
logger = logging.getLogger()
if not logger:
self.stdout.write("not a valid logger name\n")
elif loggerName in self.handlers:
self.stdout.write("%s already has a handler\n" % loggerName)
else:
handler = ConsoleLogHandler(logger)
self.handlers[loggerName] = handler
self.stdout.write("handler to %s added\n" % loggerName)
self.stdout.write("\n") | bugin [ <logger> ] - add a console logging handler to a logger |
2,636 | def SA_tank(D, L, sideA=None, sideB=None, sideA_a=0,
sideB_a=0, sideA_f=None, sideA_k=None, sideB_f=None, sideB_k=None,
full_output=False):
rs type; one of
[None, , , , , ].
sideB : string, optional
The right (or top for vertical) head of the tankconicalellipsoidaltorisphericalguppysphericalellipsoidalellipsoidalconicalconicalsphericalspherical
if sideA == :
sideA_SA = SA_conical_head(D=D, a=sideA_a)
elif sideA == :
sideA_SA = SA_ellipsoidal_head(D=D, a=sideA_a)
elif sideA == :
sideA_SA = SA_guppy_head(D=D, a=sideA_a)
elif sideA == :
sideA_SA = SA_partial_sphere(D=D, h=sideA_a)
elif sideA == :
sideA_SA = SA_torispheroidal(D=D, fd=sideA_f, fk=sideA_k)
else:
sideA_SA = pi/4*D**2
if sideB == :
sideB_SA = SA_conical_head(D=D, a=sideB_a)
elif sideB == :
sideB_SA = SA_ellipsoidal_head(D=D, a=sideB_a)
elif sideB == :
sideB_SA = SA_guppy_head(D=D, a=sideB_a)
elif sideB == :
sideB_SA = SA_partial_sphere(D=D, h=sideB_a)
elif sideB == :
sideB_SA = SA_torispheroidal(D=D, fd=sideB_f, fk=sideB_k)
else:
sideB_SA = pi/4*D**2
lateral_SA = pi*D*L
SA = sideA_SA + sideB_SA + lateral_SA
if full_output:
return SA, (sideA_SA, sideB_SA, lateral_SA)
else:
return SA | r'''Calculates the surface are of a cylindrical tank with optional heads.
In the degenerate case of being provided with only `D` and `L`, provides
the surface area of a cylinder.
Parameters
----------
D : float
Diameter of the cylindrical section of the tank, [m]
L : float
Length of the main cylindrical section of the tank, [m]
sideA : string, optional
The left (or bottom for vertical) head of the tank's type; one of
[None, 'conical', 'ellipsoidal', 'torispherical', 'guppy', 'spherical'].
sideB : string, optional
The right (or top for vertical) head of the tank's type; one of
[None, 'conical', 'ellipsoidal', 'torispherical', 'guppy', 'spherical'].
sideA_a : float, optional
The distance the head as specified by sideA extends down or to the left
from the main cylindrical section, [m]
sideB_a : float, optional
The distance the head as specified by sideB extends up or to the right
from the main cylindrical section, [m]
sideA_f : float, optional
Dish-radius parameter for side A; fD = dish radius [1/m]
sideA_k : float, optional
knuckle-radius parameter for side A; kD = knuckle radius [1/m]
sideB_f : float, optional
Dish-radius parameter for side B; fD = dish radius [1/m]
sideB_k : float, optional
knuckle-radius parameter for side B; kD = knuckle radius [1/m]
Returns
-------
SA : float
Surface area of the tank [m^2]
areas : tuple, only returned if full_output == True
(sideA_SA, sideB_SA, lateral_SA)
Other Parameters
----------------
full_output : bool, optional
Returns a tuple of (sideA_SA, sideB_SA, lateral_SA) if True
Examples
--------
Cylinder, Spheroid, Long Cones, and spheres. All checked.
>>> SA_tank(D=2, L=2)
18.84955592153876
>>> SA_tank(D=1., L=0, sideA='ellipsoidal', sideA_a=2, sideB='ellipsoidal',
... sideB_a=2)
28.480278854014387
>>> SA_tank(D=1., L=5, sideA='conical', sideA_a=2, sideB='conical',
... sideB_a=2)
22.18452243965656
>>> SA_tank(D=1., L=5, sideA='spherical', sideA_a=0.5, sideB='spherical',
... sideB_a=0.5)
18.84955592153876 |
2,637 | def rank_members_in(self, leaderboard_name, members_and_scores):
for member, score in grouper(2, members_and_scores):
self.rank_member_in(leaderboard_name, member, score) | Rank an array of members in the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param members_and_scores [Array] Variable list of members and scores. |
2,638 | def update_context(self, ctx):
assert isinstance(ctx, dict)
ctx[str(self.context_id)] = self.value | updates the query context with this clauses values |
2,639 | def list_nodes_min(call=None):
if call == :
raise SaltCloudSystemExit(
)
ret = {}
for device in get_devices_by_token():
ret[device.hostname] = {: device.id, : device.state}
return ret | Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min packet-provider
salt-cloud --function list_nodes_min packet-provider |
2,640 | def remove_all_trips_fully_outside_buffer(db_conn, center_lat, center_lon, buffer_km, update_secondary_data=True):
distance_function_str = add_wgs84_distance_function_to_db(db_conn)
stops_within_buffer_query_sql = "SELECT stop_I FROM stops WHERE CAST(" + distance_function_str + \
"(lat, lon, {lat} , {lon}) AS INT) < {d_m}"\
.format(lat=float(center_lat), lon=float(center_lon), d_m=int(1000*buffer_km))
select_all_trip_Is_where_stop_I_is_within_buffer_sql = "SELECT distinct(trip_I) FROM stop_times WHERE stop_I IN (" + stops_within_buffer_query_sql + ")"
trip_Is_to_remove_sql = "SELECT trip_I FROM trips WHERE trip_I NOT IN ( " + select_all_trip_Is_where_stop_I_is_within_buffer_sql + ")"
trip_Is_to_remove = pandas.read_sql(trip_Is_to_remove_sql, db_conn)["trip_I"].values
trip_Is_to_remove_string = ",".join([str(trip_I) for trip_I in trip_Is_to_remove])
remove_all_trips_fully_outside_buffer_sql = "DELETE FROM trips WHERE trip_I IN (" + trip_Is_to_remove_string + ")"
remove_all_stop_times_where_trip_I_fully_outside_buffer_sql = "DELETE FROM stop_times WHERE trip_I IN (" + trip_Is_to_remove_string + ")"
db_conn.execute(remove_all_trips_fully_outside_buffer_sql)
db_conn.execute(remove_all_stop_times_where_trip_I_fully_outside_buffer_sql)
delete_stops_not_in_stop_times_and_not_as_parent_stop(db_conn)
db_conn.execute(DELETE_ROUTES_NOT_PRESENT_IN_TRIPS_SQL)
db_conn.execute(DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL)
db_conn.execute(DELETE_DAYS_ENTRIES_NOT_PRESENT_IN_TRIPS_SQL)
db_conn.execute(DELETE_DAY_TRIPS2_ENTRIES_NOT_PRESENT_IN_TRIPS_SQL)
db_conn.execute(DELETE_CALENDAR_ENTRIES_FOR_NON_REFERENCE_SERVICE_IS_SQL)
db_conn.execute(DELETE_CALENDAR_DATES_ENTRIES_FOR_NON_REFERENCE_SERVICE_IS_SQL)
db_conn.execute(DELETE_FREQUENCIES_ENTRIES_NOT_PRESENT_IN_TRIPS)
db_conn.execute(DELETE_AGENCIES_NOT_REFERENCED_IN_ROUTES_SQL)
if update_secondary_data:
update_secondary_data_copies(db_conn) | Not used in the regular filter process for the time being.
Parameters
----------
db_conn: sqlite3.Connection
connection to the GTFS object
center_lat: float
center_lon: float
buffer_km: float |
2,641 | def load_checkpoints(self, checkpointDirs):
self.memo_lookup_table = None
if not checkpointDirs:
return {}
if type(checkpointDirs) is not list:
raise BadCheckpoint("checkpointDirs expects a list of checkpoints")
return self._load_checkpoints(checkpointDirs) | Load checkpoints from the checkpoint files into a dictionary.
The results are used to pre-populate the memoizer's lookup_table
Kwargs:
- checkpointDirs (list) : List of run folder to use as checkpoints
Eg. ['runinfo/001', 'runinfo/002']
Returns:
- dict containing, hashed -> future mappings |
2,642 | def require(*args, **kwargs):
if not args and not kwargs:
return freeze()
requirements = list(args)
extra = [.format(kw, kwargs[kw]) for kw in kwargs]
requirements.extend(extra)
args = [, ]
args.extend(requirements)
pip.main(args) | Install a set of packages using pip
This is designed to be an interface for IPython notebooks that
replicates the requirements.txt pip format. This lets notebooks
specify which versions of packages they need inside the notebook
itself.
This function is the general-purpose interface that lets
the caller specify any version string for any package. |
2,643 | def processResponse(self, arg, replytype, **kw):
if self.debug:
log.msg( %arg, debug=1)
for h in self.handlers:
arg.addCallback(h.processResponse, **kw)
arg.addCallback(self.parseResponse, replytype) | Parameters:
arg -- deferred
replytype -- typecode |
2,644 | def lit_count(self):
lit_value = self.value * len(self)
if not isinstance(self[0], PWMLED):
lit_value = int(lit_value)
return lit_value | The number of LEDs on the bar graph actually lit up. Note that just
like :attr:`value`, this can be negative if the LEDs are lit from last
to first. |
2,645 | def rename(name):
from peltak.extra.gitflow import logic
if name is None:
name = click.prompt()
logic.hotfix.rename(name) | Give the currently developed hotfix a new name. |
2,646 | def decode_ay(ay):
if ay is None:
return
elif isinstance(ay, str):
return ay
elif isinstance(ay, bytes):
return ay.decode()
else:
return bytearray(ay).rstrip(bytearray((0,))).decode() | Convert binary blob from DBus queries to strings. |
2,647 | def decode(self, inputs, context, inference=False):
return self.decoder(inputs, context, inference) | Applies the decoder to inputs, given the context from the encoder.
:param inputs: tensor with inputs (batch, seq_len) if 'batch_first'
else (seq_len, batch)
:param context: context from the encoder
:param inference: if True inference mode, if False training mode |
2,648 | def fromstring(cls, s, *args, **kwargs):
s = s.replace("\(", "&lparen;")
s = s.replace("\)", "&rparen;")
s = s.replace("\[", "[")
s = s.replace("\]", "]")
s = s.replace("\{", "&lcurly;")
s = s.replace("\}", "&rcurly;")
p = []
i = 0
for m in re.finditer(r"\[.*?\]|\(.*?\)", s):
p.append(s[i:m.start()])
p.append(s[m.start():m.end()].replace(" ", "&space;")); i=m.end()
p.append(s[i:])
s = "".join(p)
s = s.replace("][", "] [")
s = s.replace(")(", ") (")
s = s.replace("\|", "⊢")
s = re.sub(r"\s+\|\s+", "|", s)
s = re.sub(r"\s+", " ", s)
s = re.sub(r"\{\s+", "{", s)
s = re.sub(r"\s+\}", "}", s)
s = s.split(" ")
s = [v.replace("&space;"," ") for v in s]
P = cls([], *args, **kwargs)
G, O, i = [], [], 0
for s in s:
constraint = Constraint.fromstring(s.strip("{}"), taxonomy=kwargs.get("taxonomy", TAXONOMY))
constraint.index = len(P.sequence)
P.sequence.append(constraint)
while s.startswith("{"):
s = s[1:]
G.append((i, [])); i+=1
O.append([])
for g in G:
g[1].append(constraint)
while s.endswith("}"):
s = s[:-1]
if G: O[G[-1][0]] = G[-1][1]; G.pop()
P.groups = [g for g in O if g]
return P | Returns a new Pattern from the given string.
Constraints are separated by a space.
If a constraint contains a space, it must be wrapped in []. |
2,649 | def subscribe(self, clock_name: str=None, clock_slots: Iterable[str]=None, subscriptions: Dict[str, Any]={}):
for area in subscriptions:
init_full(self, area, subscriptions[area])
subscriptions[area] = {: subscriptions[area]}
if clock_name is not None:
self.clock_name = clock_name
self.clock_slots = clock_slots
subscriptions[clock_name] = {: clock_slots, : 1}
self.setup(puller=True, subscriptions=subscriptions) | Subscribes this Area to the given Areas and optionally given Slots. Must be called before the Area is run.
Args:
clock_name: The name of the Area that is used as synchronizing Clock.
clock_slots: The slots of the Clock relevant to this Area.
subscriptions: A dictionary containing the relevant Areas names as keys and optionally the Slots as values. |
2,650 | def flush(self, preserve=None):
if not os.path.exists(self.path):
return
if preserve is None:
shutil.rmtree(self.path)
return
for mip in self.vol.available_mips:
preserve_mip = self.vol.slices_from_global_coords(preserve)
preserve_mip = Bbox.from_slices(preserve_mip)
mip_path = os.path.join(self.path, self.vol.mip_key(mip))
if not os.path.exists(mip_path):
continue
for filename in os.listdir(mip_path):
bbox = Bbox.from_filename(filename)
if not Bbox.intersects(preserve_mip, bbox):
os.remove(os.path.join(mip_path, filename)) | Delete the cache for this dataset. Optionally preserve
a region. Helpful when working with overlaping volumes.
Warning: the preserve option is not multi-process safe.
You're liable to end up deleting the entire cache.
Optional:
preserve (Bbox: None): Preserve chunks located partially
or entirely within this bounding box.
Return: void |
2,651 | def immediate(self, name, value):
setattr(self, name, value)
self._all.add(name) | Load something immediately |
2,652 | def setconf(self, conf, rscpath, logger=None):
resource = self.pathresource(rscpath=rscpath, logger=logger)
if resource is None:
resource = self.resource()
try:
self._setconf(conf=conf, resource=resource, rscpath=rscpath)
except Exception as ex:
if logger is not None:
msg = .format(rscpath)
full_msg = .format(msg, ex, format_exc())
logger.error(full_msg)
reraise(self.Error, self.Error(msg)) | Set input conf in input path.
:param Configuration conf: conf to write to path.
:param str rscpath: specific resource path to use.
:param Logger logger: used to log info/errors.
:param bool error: raise catched errors.
:raises: ConfDriver.Error in case of error and input error. |
2,653 | def queue_it(queue=g_queue, **put_args):
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
queue.put(func(*args, **kwargs), **put_args)
return wrapper
return func_wrapper | Wrapper. Instead of returning the result of the function, add it to a queue.
.. code: python
import reusables
import queue
my_queue = queue.Queue()
@reusables.queue_it(my_queue)
def func(a):
return a
func(10)
print(my_queue.get())
# 10
:param queue: Queue to add result into |
2,654 | def in_resource(cls, session_type):
if cls.resources is AllSessionTypes:
return True
return session_type in cls.resources | Returns True if the attribute is part of a given session type.
The session_type is a tuple with the interface type and resource_class
:type session_type: (constants.InterfaceType, str)
:rtype: bool |
2,655 | def switch(self, idx, control):
old = None
new = None
if control == :
if self.PQ[idx] == 1:
old =
new =
elif self.vQ[idx] == 1:
old =
new =
elif control == :
if self.PQ[idx] == 1:
old =
new =
elif self.PV[idx] == 1:
old =
new =
elif control == :
if self.PV[idx] == 1:
old =
new =
elif self.vV[idx] == 1:
old =
new =
elif control == :
if self.vQ[idx] == 1:
old =
new =
elif self.vV[idx] == 1:
old =
new =
if old and new:
self.__dict__[old][idx] = 0
self.__dict__[new][idx] = 1 | Switch a single control of <idx> |
2,656 | def RunStateMethod(self, method_name, request=None, responses=None):
if self.rdf_flow.pending_termination:
self.Error(error_message=self.rdf_flow.pending_termination.reason)
return
client_id = self.rdf_flow.client_id
deadline = self.rdf_flow.processing_deadline
if deadline and rdfvalue.RDFDatetime.Now() > deadline:
raise flow.FlowError("Processing time for flow %s on %s expired." %
(self.rdf_flow.flow_id, self.rdf_flow.client_id))
self.rdf_flow.current_state = method_name
if request and responses:
logging.debug("Running %s for flow %s on %s, %d responses.", method_name,
self.rdf_flow.flow_id, client_id, len(responses))
else:
logging.debug("Running %s for flow %s on %s", method_name,
self.rdf_flow.flow_id, client_id)
try:
try:
method = getattr(self, method_name)
except AttributeError:
raise ValueError("Flow %s has no state method %s" %
(self.__class__.__name__, method_name))
responses = flow_responses.Responses.FromResponses(
request=request, responses=responses)
if responses.status is not None:
self.SaveResourceUsage(responses.status)
stats_collector_instance.Get().IncrementCounter("grr_worker_states_run")
if method_name == "Start":
stats_collector_instance.Get().IncrementCounter(
"flow_starts", fields=[self.rdf_flow.flow_class_name])
method()
else:
method(responses)
if self.replies_to_process:
if self.rdf_flow.parent_hunt_id and not self.rdf_flow.parent_flow_id:
self._ProcessRepliesWithHuntOutputPlugins(self.replies_to_process)
else:
self._ProcessRepliesWithFlowOutputPlugins(self.replies_to_process)
self.replies_to_process = []
except Exception as e:
stats_collector_instance.Get().IncrementCounter(
"flow_errors", fields=[self.rdf_flow.flow_class_name])
logging.exception("Flow %s on %s raised %s.", self.rdf_flow.flow_id,
client_id, utils.SmartUnicode(e))
self.Error(
error_message=utils.SmartUnicode(e), backtrace=traceback.format_exc()) | Completes the request by calling the state method.
Args:
method_name: The name of the state method to call.
request: A RequestState protobuf.
responses: A list of FlowMessages responding to the request. |
2,657 | def enterprise_login_required(view):
@wraps(view)
def wrapper(request, *args, **kwargs):
if not in kwargs:
raise Http404
enterprise_uuid = kwargs[]
enterprise_customer = get_enterprise_customer_or_404(enterprise_uuid)
if not request.user.is_authenticated:
parsed_current_url = urlparse(request.get_full_path())
parsed_query_string = parse_qs(parsed_current_url.query)
parsed_query_string.update({
: enterprise_customer.identity_provider,
FRESH_LOGIN_PARAMETER:
})
next_url = .format(
current_path=quote(parsed_current_url.path),
query_string=urlencode(parsed_query_string, doseq=True)
)
return redirect(
.format(
login_url=,
params=urlencode(
{: next_url}
)
)
)
return view(request, *args, **kwargs)
return wrapper | View decorator for allowing authenticated user with valid enterprise UUID.
This decorator requires enterprise identifier as a parameter
`enterprise_uuid`.
This decorator will throw 404 if no kwarg `enterprise_uuid` is provided to
the decorated view .
If there is no enterprise in database against the kwarg `enterprise_uuid`
or if the user is not authenticated then it will redirect the user to the
enterprise-linked SSO login page.
Usage::
@enterprise_login_required()
def my_view(request, enterprise_uuid):
# Some functionality ...
OR
class MyView(View):
...
@method_decorator(enterprise_login_required)
def get(self, request, enterprise_uuid):
# Some functionality ... |
2,658 | def load_terminfo(terminal_name=None, fallback=):
terminal_name = os.getenv()
if not terminal_name:
if not fallback:
raise TerminfoError()
else:
terminal_name = fallback
if os.getenv():
terminfo_locations = [os.getenv()]
else:
terminfo_locations = []
if os.getenv():
for i in os.getenv().split():
terminfo_locations.append(i or )
terminfo_locations += [
os.path.expanduser(),
,
,
,
]
terminfo_locations = list(OrderedDict.fromkeys(terminfo_locations))
terminfo_path = None
for dirpath in terminfo_locations:
path = os.path.join(dirpath, terminal_name[0], terminal_name)
if os.path.exists(path):
terminfo_path = path
break
if not path:
raise TerminfoError("Couldn%srb<hhhhhhBad magic numberascii|\x00<h<h\x00iso-8859-1'.join(string)
cap = StringCapability(*STRING_CAPABILITIES[idx], value=string)
terminfo.strings[cap.variable] = cap
idx += 1
terminfo._reset_index()
return terminfo | If the environment variable TERM is unset try with `fallback` if not empty.
vt100 is a popular terminal supporting ANSI X3.64. |
2,659 | def get_children(self, node):
try:
index = self.nodes.index(node) + 1
return [self.nodes[index]]
except IndexError:
return [] | Get children. |
2,660 | def create_pipeline(url, auth, json_payload, verify_ssl):
title = json_payload[][]
description = json_payload[][]
params = {:description, :True}
logging.info( + title)
put_result = requests.put(url + + title, params=params, headers=X_REQ_BY, auth=auth, verify=verify_ssl)
put_result.raise_for_status()
create_json = put_result.json()
logging.debug(create_json)
logging.info()
return create_json | Create a new pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json paylod as a dictionary.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json |
2,661 | def preload(python_data: LdapObject, database: Optional[Database] = None) -> LdapObject:
changes = {}
def preload_item(value: Any) -> Any:
if isinstance(value, NotLoaded):
return value.load(database)
else:
return value
for name in python_data.keys():
value_list = python_data.get_as_list(name)
if isinstance(value_list, NotLoadedObject):
raise RuntimeError(f"{name}: Unexpected NotLoadedObject outside list.")
elif isinstance(value_list, NotLoadedList):
value_list = value_list.load(database)
else:
if any(isinstance(v, NotLoadedList) for v in value_list):
raise RuntimeError(f"{name}: Unexpected NotLoadedList in list.")
elif any(isinstance(v, NotLoadedObject) for v in value_list):
value_list = [preload_item(value) for value in value_list]
else:
value_list = None
if value_list is not None:
changes[name] = value_list
return python_data.merge(changes) | Preload all NotLoaded fields in LdapObject. |
2,662 | def profileUpperLimit(self, delta = 2.71):
a = self.p_2
b = self.p_1
if self.vertex_x < 0:
c = self.p_0 + delta
else:
c = self.p_0 - self.vertex_y + delta
if b**2 - 4. * a * c < 0.:
print()
print(a, b, c)
return 0.
return max((np.sqrt(b**2 - 4. * a * c) - b) / (2. * a), (-1. * np.sqrt(b**2 - 4. * a * c) - b) / (2. * a)) | Compute one-sided upperlimit via profile method. |
2,663 | def get(no_create=False, server=None, port=None, force_uuid=None):
pid = os.getpid()
thread = threading.current_thread()
wdb = Wdb._instances.get((pid, thread))
if not wdb and not no_create:
wdb = object.__new__(Wdb)
Wdb.__init__(wdb, server, port, force_uuid)
wdb.pid = pid
wdb.thread = thread
Wdb._instances[(pid, thread)] = wdb
elif wdb:
if (server is not None and wdb.server != server
or port is not None and wdb.port != port):
log.warn()
else:
wdb.reconnect_if_needed()
return wdb | Get the thread local singleton |
2,664 | def display(self, filename=None):
if filename is None:
filename =
self.save(filename)
open_in_browser(filename) | Displays/opens the doc using the OS's default application. |
2,665 | def validate(self, table: pd.DataFrame, failed_only=False) -> pd.DataFrame:
return pd.concat([
self._validate_input(table, failed_only=failed_only),
self._validate_output(table, failed_only=failed_only),
]).fillna(True) | Return a dataframe of validation results for the appropriate series vs the vector of validators.
Args:
table (pd.DataFrame): A dataframe on which to apply validation logic.
failed_only (bool): If ``True``: return only the indexes that failed to validate. |
2,666 | def assert_gt(left, right, message=None, extra=None):
assert left > right, _assert_fail_message(message, left, right, "<=", extra) | Raises an AssertionError if left_hand <= right_hand. |
2,667 | def enqueue_job(self, job):
self.log.debug(.format(job.id, job.origin))
interval = job.meta.get(, None)
repeat = job.meta.get(, None)
cron_string = job.meta.get(, None)
if repeat:
job.meta[] = int(repeat) - 1
queue = self.get_queue_for_job(job)
queue.enqueue_job(job)
self.connection.zrem(self.scheduled_jobs_key, job.id)
if interval:
if repeat is not None:
if job.meta[] == 0:
return
self.connection.zadd(self.scheduled_jobs_key,
{job.id: to_unix(get_next_scheduled_time(cron_string))}) | Move a scheduled job to a queue. In addition, it also does puts the job
back into the scheduler if needed. |
2,668 | def count(self, model_class, conditions=None):
s table.
- `model_class`: the model to count.
- `conditions`: optional SQL conditions (contents of the WHERE clause).
SELECT count() FROM $table WHERE ' + conditions
query = self._substitute(query, model_class)
r = self._send(query)
return int(r.text) if r.text else 0 | Counts the number of records in the model's table.
- `model_class`: the model to count.
- `conditions`: optional SQL conditions (contents of the WHERE clause). |
2,669 | def delete(ctx, opts, owner_repo_identifier, yes):
owner, repo, identifier = owner_repo_identifier
delete_args = {
"identifier": click.style(identifier, bold=True),
"repository": click.style(repo, bold=True),
}
prompt = (
"delete the %(identifier)s entitlement from the %(repository)s "
"repository" % delete_args
)
if not utils.confirm_operation(prompt, assume_yes=yes):
return
click.secho(
"Deleting %(identifier)s entitlement from the %(repository)s "
"repository ... " % delete_args,
nl=False,
)
context_msg = "Failed to delete the entitlement!"
with handle_api_exceptions(ctx, opts=opts, context_msg=context_msg):
with maybe_spinner(opts):
api.delete_entitlement(owner=owner, repo=repo, identifier=identifier)
click.secho("OK", fg="green") | Delete an entitlement from a repository.
- OWNER/REPO/IDENTIFIER: Specify the OWNER namespace (i.e. user or org),
and the REPO name that has an entitlement identified by IDENTIFIER. All
separated by a slash.
Example: 'your-org/your-repo/abcdef123456'
Full CLI example:
$ cloudsmith ents delete your-org/your-repo/abcdef123456 |
2,670 | def array(
item_processor,
alias=None,
nested=None,
omit_empty=False,
hooks=None
):
processor = _Array(item_processor, alias, nested, omit_empty)
return _processor_wrap_if_hooks(processor, hooks) | Create an array processor that can be used to parse and serialize array data.
XML arrays may be nested within an array element, or they may be embedded
within their parent. A nested array would look like:
.. sourcecode:: xml
<root-element>
<some-element>ABC</some-element>
<nested-array>
<array-item>0</array-item>
<array-item>1</array-item>
</nested-array>
</root-element>
The corresponding embedded array would look like:
.. sourcecode:: xml
<root-element>
<some-element>ABC</some-element>
<array-item>0</array-item>
<array-item>1</array-item>
</root-element>
An array is considered required when its item processor is configured as being
required.
:param item_processor: A declxml processor object for the items of the array.
:param alias: If specified, the name given to the array when read from XML.
If not specified, then the name of the item processor is used instead.
:param nested: If the array is a nested array, then this should be the name of
the element under which all array items are located. If not specified, then
the array is treated as an embedded array. Can also be specified using supported
XPath syntax.
:param omit_empty: If True, then nested arrays will be omitted when serializing if
they are empty. Only valid when nested is specified. Note that an empty array
may only be omitted if it is not itself contained within an array. That is,
for an array of arrays, any empty arrays in the outer array will always be
serialized to prevent information about the original array from being lost
when serializing.
:param hooks: A Hooks object.
:return: A declxml processor object. |
2,671 | def get_or_create_evidence(self, citation: Citation, text: str) -> Evidence:
sha512 = hash_evidence(text=text, type=str(citation.type), reference=str(citation.reference))
if sha512 in self.object_cache_evidence:
evidence = self.object_cache_evidence[sha512]
self.session.add(evidence)
return evidence
evidence = self.get_evidence_by_hash(sha512)
if evidence is not None:
self.object_cache_evidence[sha512] = evidence
return evidence
evidence = Evidence(
text=text,
citation=citation,
sha512=sha512,
)
self.session.add(evidence)
self.object_cache_evidence[sha512] = evidence
return evidence | Create an entry and object for given evidence if it does not exist. |
2,672 | def from_str(cls, timestr, shaked=False):
orig = timestr
if not shaked:
timestr = cls.fix_timezone_separator(timestr)
try:
date = parser.parse(timestr)
except ValueError:
if not shaked:
shaked = False
for shaker in [
cls.fix_mispelled_day,
cls.remove_parenthesis_around_tz,
cls.remove_quotes_around_tz]:
new_timestr = shaker(timestr)
if new_timestr is not None:
timestr = new_timestr
shaked = True
if shaked:
try:
return cls.from_str(timestr, shaked=True)
except ValueError:
pass
msg = u"Unknown string format: {!r}".format(orig)
raise ValueError(msg), None, sys.exc_info()[2]
else:
try:
return cls.from_datetime(date)
except ValueError:
new_str = cls.remove_timezone(orig)
if new_str is not None:
return cls.from_str(new_str)
else:
raise | Use `dateutil` module to parse the give string
:param basestring timestr: string representing a date to parse
:param bool shaked: whether the input parameter been already
cleaned or not. |
2,673 | def npz_to_W_pdf(path=None, regx=):
r
file_list = load_file_list(path=path, regx=regx)
for f in file_list:
W = load_npz(path, f)[0]
logging.info("%s --> %s" % (f, f.split()[0] + ))
visualize.draw_weights(W, second=10, saveable=True, name=f.split()[0], fig_idx=2012) | r"""Convert the first weight matrix of `.npz` file to `.pdf` by using `tl.visualize.W()`.
Parameters
----------
path : str
A folder path to `npz` files.
regx : str
Regx for the file name.
Examples
---------
Convert the first weight matrix of w1_pre...npz file to w1_pre...pdf.
>>> tl.files.npz_to_W_pdf(path='/Users/.../npz_file/', regx='w1pre_[0-9]+\.(npz)') |
2,674 | def set_backlight(self, backlight):
if self._backlight is not None:
if self._pwm_enabled:
self._pwm.set_duty_cycle(self._backlight, self._pwm_duty_cycle(backlight))
else:
self._gpio.output(self._backlight, self._blpol if backlight else not self._blpol) | Enable or disable the backlight. If PWM is not enabled (default), a
non-zero backlight value will turn on the backlight and a zero value will
turn it off. If PWM is enabled, backlight can be any value from 0.0 to
1.0, with 1.0 being full intensity backlight. |
2,675 | def define_standalone_options(parser, extra_options=None):
c = config.parse_service_config()
parser.add_option(, , action=, dest=,
type=, help=,
default=c.db.host)
parser.add_option(, , action=, dest=,
type=, help=,
default=c.db.name)
parser.add_option(, , action=, dest=,
type=, help=,
default=c.db.port)
parser.add_option(, dest="db_username",
help="username to use for authentication ",
metavar="USER", default=c.db.username)
parser.add_option(, dest="db_password",
help="password to use for authentication ",
metavar="PASSWORD", default=c.db.password)
parser.add_option(, , action=, dest=,
help=,
default=False)
parser.add_option(, action=, dest=,
type=, help=,
default=os.environ.get(, ))
if extra_options:
for option in extra_options:
parser.add_option(option)
return parser | Adds the options specific to the database connection.
Parses the agency configuration files and uses its configuration as the
default values. |
2,676 | def is_citeable(publication_info):
def _item_has_pub_info(item):
return all(
key in item for key in (
,
)
)
def _item_has_page_or_artid(item):
return any(
key in item for key in (
,
)
)
has_pub_info = any(
_item_has_pub_info(item) for item in publication_info
)
has_page_or_artid = any(
_item_has_page_or_artid(item) for item in publication_info
)
return has_pub_info and has_page_or_artid | Check some fields in order to define if the article is citeable.
:param publication_info: publication_info field
already populated
:type publication_info: list |
2,677 | def add_permission(FunctionName, StatementId, Action, Principal, SourceArn=None,
SourceAccount=None, Qualifier=None,
region=None, key=None, keyid=None, profile=None):
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
kwargs = {}
for key in (, , ):
if locals()[key] is not None:
kwargs[key] = str(locals()[key])
conn.add_permission(FunctionName=FunctionName, StatementId=StatementId,
Action=Action, Principal=str(Principal),
**kwargs)
return {: True}
except ClientError as e:
return {: False, : __utils__[](e)} | Add a permission to a lambda function.
Returns {added: true} if the permission was added and returns
{added: False} if the permission was not added.
CLI Example:
.. code-block:: bash
salt myminion boto_lamba.add_permission my_function my_id "lambda:*" \\
s3.amazonaws.com aws:arn::::bucket-name \\
aws-account-id |
2,678 | def move(self, dest, src):
doc = deepcopy(self.document)
parent, fragment = None, doc
for token in Pointer(src):
parent, fragment = fragment, token.extract(fragment,
bypass_ref=True)
if isinstance(parent, Mapping):
del parent[token]
if isinstance(parent, MutableSequence):
parent.pop(int(token))
return Target(doc).add(dest, fragment) | Move element from sequence, member from mapping.
:param dest: the destination
:type dest: Pointer
:param src: the source
:type src: Pointer
:return: resolved document
:rtype: Target
.. note::
This operation is functionally identical to a "remove" operation on
the "from" location, followed immediately by an "add" operation at
the target location with the value that was just removed.
The "from" location MUST NOT be a proper prefix of the "path"
location; i.e., a location cannot be moved into one of its children |
2,679 | def order_by(self, key_selector=identity):
if self.closed():
raise ValueError("Attempt to call order_by() on a "
"closed Queryable.")
if not is_callable(key_selector):
raise TypeError("order_by() parameter key_selector={key_selector} "
"is not callable".format(key_selector=repr(key_selector)))
return self._create_ordered(iter(self), -1, key_selector) | Sorts by a key in ascending order.
Introduces a primary sorting order to the sequence. Additional sort
criteria should be specified by subsequent calls to then_by() and
then_by_descending(). Calling order_by() or order_by_descending() on
the results of a call to order_by() will introduce a new primary
ordering which will override any already established ordering.
This method performs a stable sort. The order of two elements with the
same key will be preserved.
Note: This method uses deferred execution.
Args:
key_selector: A unary function which extracts a key from each
element using which the result will be ordered.
Returns:
An OrderedQueryable over the sorted elements.
Raises:
ValueError: If the Queryable is closed.
TypeError: If the key_selector is not callable. |
2,680 | def stop(self):
logger.info("stopping process")
self.watcher.stop()
os.kill(self.child_pid, signal.SIGTERM) | Stop the process. |
2,681 | def _raiseImageMissing(self, pattern):
event = ImageMissingEvent(self, pattern=pattern, event_type="MISSING")
if self._imageMissingHandler is not None:
self._imageMissingHandler(event)
response = (event._response or self._findFailedResponse)
if response == "ABORT":
raise FindFailed(event)
elif response == "SKIP":
return False
elif response == "RETRY":
return True | Builds an ImageMissing event and triggers the default handler (or the custom handler,
if one has been specified). Returns True if throwing method should retry, False if it
should skip, and throws an exception if it should abort. |
2,682 | def get_labels(self, plt, label_fontsize=10):
if len(self.slab_regions) > 1:
label_in_vac = (self.slab_regions[0][1] + self.slab_regions[1][0])/2
if abs(self.slab_regions[0][0]-self.slab_regions[0][1]) > \
abs(self.slab_regions[1][0]-self.slab_regions[1][1]):
label_in_bulk = self.slab_regions[0][1]/2
else:
label_in_bulk = (self.slab_regions[1][1] + self.slab_regions[1][0]) / 2
else:
label_in_bulk = (self.slab_regions[0][0] + self.slab_regions[0][1])/2
if self.slab_regions[0][0] > 1-self.slab_regions[0][1]:
label_in_vac = self.slab_regions[0][0] / 2
else:
label_in_vac = (1 + self.slab_regions[0][1]) / 2
plt.plot([0, 1], [self.vacuum_locpot]*2, , zorder=-5, linewidth=1)
xy = [label_in_bulk, self.vacuum_locpot+self.ave_locpot*0.05]
plt.annotate(r"$V_{vac}=%.2f$" %(self.vacuum_locpot), xy=xy,
xytext=xy, color=, fontsize=label_fontsize)
plt.plot([0, 1], [self.efermi]*2, ,
zorder=-5, linewidth=3)
xy = [label_in_bulk, self.efermi+self.ave_locpot*0.05]
plt.annotate(r"$E_F=%.2f$" %(self.efermi), xytext=xy,
xy=xy, fontsize=label_fontsize, color=)
plt.plot([0, 1], [self.ave_bulk_p]*2, , linewidth=1., zorder=-1)
xy = [label_in_vac, self.ave_bulk_p + self.ave_locpot * 0.05]
plt.annotate(r"$V^{interior}_{slab}=%.2f$" % (self.ave_bulk_p),
xy=xy, xytext=xy, color=, fontsize=label_fontsize)
plt.plot([label_in_vac]*2, [self.efermi, self.vacuum_locpot],
, zorder=-5, linewidth=2)
xy = [label_in_vac, self.efermi + self.ave_locpot * 0.05]
plt.annotate(r"$\Phi=%.2f$" %(self.work_function),
xy=xy, xytext=xy, fontsize=label_fontsize)
return plt | Handles the optional labelling of the plot with relevant quantities
Args:
plt (plt): Plot of the locpot vs c axis
label_fontsize (float): Fontsize of labels
Returns Labelled plt |
2,683 | def _update_settings(self, dialect):
for parameter in self.csv_params[2:]:
pname, ptype, plabel, phelp = parameter
widget = self._widget_from_p(pname, ptype)
if ptype is types.TupleType:
ptype = types.ObjectType
digest = Digest(acceptable_types=[ptype])
if pname == :
if self.has_header is not None:
widget.SetValue(digest(self.has_header))
else:
value = getattr(dialect, pname)
widget.SetValue(digest(value)) | Sets the widget settings to those of the chosen dialect |
2,684 | def upload(self, login, package_name, release, basename, fd, distribution_type,
description=, md5=None, size=None, dependencies=None, attrs=None, channels=(,), callback=None):
2.7osx
url = % (self.domain, login, package_name, release, quote(basename))
if attrs is None:
attrs = {}
if not isinstance(attrs, dict):
raise TypeError()
payload = dict(distribution_type=distribution_type, description=description, attrs=attrs,
dependencies=dependencies, channels=channels)
data, headers = jencode(payload)
res = self.session.post(url, data=data, headers=headers)
self._check_response(res)
obj = res.json()
s3url = obj[]
s3data = obj[]
if md5 is None:
_hexmd5, b64md5, size = compute_hash(fd, size=size)
elif size is None:
spos = fd.tell()
fd.seek(0, os.SEEK_END)
size = fd.tell() - spos
fd.seek(spos)
s3data[] = size
s3data[] = b64md5
data_stream, headers = stream_multipart(s3data, files={:(basename, fd)},
callback=callback)
request_method = self.session if s3url.startswith(self.domain) else requests
s3res = request_method.post(
s3url, data=data_stream,
verify=self.session.verify, timeout=10 * 60 * 60,
headers=headers
)
if s3res.status_code != 201:
logger.info(s3res.text)
logger.info()
logger.info()
raise errors.BinstarError(, s3res.status_code)
url = % (self.domain, login, package_name, release, quote(basename))
payload = dict(dist_id=obj[])
data, headers = jencode(payload)
res = self.session.post(url, data=data, headers=headers)
self._check_response(res)
return res.json() | Upload a new distribution to a package release.
:param login: the login of the package owner
:param package_name: the name of the package
:param version: the version string of the release
:param basename: the basename of the distribution to download
:param fd: a file like object to upload
:param distribution_type: pypi or conda or ipynb, etc
:param description: (optional) a short description about the file
:param attrs: any extra attributes about the file (eg. build=1, pyversion='2.7', os='osx') |
2,685 | def make_simple(self):
(inner_radius, outer_radius) = self.get_radii()
radius = (inner_radius + outer_radius) / 2
return cadquery.Workplane() \
.circle(radius).extrude(self.length) | Return a cylinder with the thread's average radius & length.
:math:`radius = (inner_radius + outer_radius) / 2` |
2,686 | def safe_either(method, dictionary, key1, key2, default_value=None):
value = method(dictionary, key1)
return value if value is not None else method(dictionary, key2, default_value) | A helper-wrapper for the safe_value_2() family. |
2,687 | def clear_alarms(alarm):
*
if _TRAFFICCTL:
cmd = _traffic_ctl(, , alarm)
else:
cmd = _traffic_line(, alarm)
return _subprocess(cmd) | Clear (acknowledge) an alarm event. The arguments are “all” for all current
alarms, a specific alarm number (e.g. ‘‘1’‘), or an alarm string identifier
(e.g. ‘’MGMT_ALARM_PROXY_CONFIG_ERROR’‘).
.. code-block:: bash
salt '*' trafficserver.clear_alarms [all | #event | name] |
2,688 | def open(self):
options = copy(self.__options)
compression = None
if self.__scheme is None or self.__format is None:
detected_scheme, detected_format = helpers.detect_scheme_and_format(self.__source)
scheme = self.__scheme or detected_scheme
format = self.__format or detected_format
for type in config.SUPPORTED_COMPRESSION:
if self.__compression == type or detected_format == type:
compression = type
else:
scheme = self.__scheme
format = self.__format
self.__loader = None
if scheme is not None:
loader_class = self.__custom_loaders.get(scheme)
if loader_class is None:
if scheme not in config.LOADERS:
message = % scheme
raise exceptions.SchemeError(message)
loader_path = config.LOADERS[scheme]
if loader_path:
loader_class = helpers.import_attribute(loader_path)
if loader_class is not None:
loader_options = helpers.extract_options(options, loader_class.options)
if compression and in loader_class.options:
loader_options[] = False
self.__loader = loader_class(
bytes_sample_size=self.__bytes_sample_size,
**loader_options)
if compression == and six.PY3:
source = self.__loader.load(self.__source, mode=)
with zipfile.ZipFile(source) as archive:
name = archive.namelist()[0]
if in options.keys():
name = options[]
del options[]
with archive.open(name) as file:
source = tempfile.NamedTemporaryFile(suffix= + name)
for line in file:
source.write(line)
source.seek(0)
self.__source = source
self.__loader = StreamLoader(bytes_sample_size=self.__bytes_sample_size)
format = self.__format or helpers.detect_scheme_and_format(source.name)[1]
scheme =
elif compression == and six.PY3:
name = self.__source.replace(, )
self.__source = gzip.open(self.__loader.load(self.__source, mode=))
self.__loader = StreamLoader(bytes_sample_size=self.__bytes_sample_size)
format = self.__format or helpers.detect_scheme_and_format(name)[1]
scheme =
elif compression:
message =
raise exceptions.TabulatorException(message % compression)
parser_class = self.__custom_parsers.get(format)
if parser_class is None:
if format not in config.PARSERS:
message = % format
raise exceptions.FormatError(message)
parser_class = helpers.import_attribute(config.PARSERS[format])
parser_options = helpers.extract_options(options, parser_class.options)
self.__parser = parser_class(self.__loader,
force_parse=self.__force_parse,
**parser_options)
if options:
message =
message = message % (.join(options), scheme, format)
warnings.warn(message, UserWarning)
self.__parser.open(self.__source, encoding=self.__encoding)
self.__extract_sample()
self.__extract_headers()
if not self.__allow_html:
self.__detect_html()
self.__actual_scheme = scheme
self.__actual_format = format
self.__actual_encoding = self.__parser.encoding
return self | Opens the stream for reading. |
2,689 | def FindByIndex(node, index):
s children are in a list it will recursively search - similarly if the index is not found
in the current node
result = None
if isinstance(node.children, dict):
result = node.GetChild(index)
if result is None:
children = list(node.children.keys())
child = 0
while child < len(children) and result is None:
key = children[child]
result = FindByIndex(node.GetChild(key), index)
if result is not None:
break
child += 1
else:
child = 0
while child < len(node.children) and result is None:
result = FindByIndex(node.GetChild(child), index)
if result is not None:
break
child += 1
return result | Method which finds child according to index. Applies only to nodes whose children are sorted into a dict,
so if the current node's children are in a list it will recursively search - similarly if the index is not found
in the current node's dictionary indexes.
:param node: current node to search for
:param index: index of child.
:return: |
2,690 | def _get_field(self, field):
if not hasattr(self, "id") or self.id is None:
raise APIResponseError("Cannot query an article without an id")
sq = next(SearchQuery(q="id:{}".format(self.id), fl=field))
if field not in sq._raw:
if field in ["reference", "citation", "metrics", "bibtex"]:
pass
else:
return None
value = sq.__getattribute__(field)
self._raw[field] = value
return value | Queries the api for a single field for the record by `id`.
This method should only be called indirectly by cached properties.
:param field: name of the record field to load |
2,691 | def get_user_permissions(self, username):
path = Client.urls[] % (username,)
conns = self._call(path, )
return conns | :returns: list of dicts, or an empty list if there are no permissions.
:param string username: User to set permissions for. |
2,692 | def delete_many(self, mongo_collection, filter_doc, mongo_db=None, **kwargs):
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.delete_many(filter_doc, **kwargs) | Deletes one or more documents in a mongo collection.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.delete_many
:param mongo_collection: The name of the collection to delete from.
:type mongo_collection: str
:param filter_doc: A query that matches the documents to delete.
:type filter_doc: dict
:param mongo_db: The name of the database to use.
Can be omitted; then the database from the connection string is used.
:type mongo_db: str |
2,693 | def _get_argspec(func):
if inspect.isclass(func):
func = func.__init__
if not inspect.isfunction(func):
return [], False
parameters = inspect.signature(func).parameters
args = []
uses_starstar = False
for par in parameters.values():
if (par.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD or
par.kind == inspect.Parameter.KEYWORD_ONLY):
args.append(par.name)
elif par.kind == inspect.Parameter.VAR_KEYWORD:
uses_starstar = True
return args, uses_starstar | Helper function to support both Python versions |
2,694 | def _headline(self, error, i: int) -> str:
msgs = Msg()
if error.errclass == "fatal":
msg = msgs.fatal(i)
elif error.errclass == "warning":
msg = msgs.warning(i)
elif error.errclass == "info":
msg = msgs.info(i)
elif error.errclass == "debug":
msg = msgs.debug(i)
elif error.errclass == "via":
msg = msgs.via(i)
else:
msg = msgs.error(i)
if error.function is not None:
msg += " from " + colors.bold(error.function)
if error.caller is not None:
msg += " called from " + colors.bold(error.caller)
if error.caller_msg is not None:
msg += "\n" + error.caller_msg
if error.function is not None and error.msg is not None:
msg += ": "
else:
msg = msg + " "
if error.errtype is not None:
msg += error.errtype + " : "
if error.msg is not None:
msg += error.msg
return msg | Format the error message's headline |
2,695 | def dataset(self, **kwargs):
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
try:
return NameFactory.dataset_format.format(**kwargs_copy)
except KeyError:
return None | Return a key that specifies the data selection |
2,696 | def subsample(self):
logging.info()
for _ in range(self.cpus):
threads = Thread(target=self.subsamplethreads, args=())
threads.setDaemon(True)
threads.start()
with progressbar(self.runmetadata.samples) as bar:
for sample in bar:
if sample.general.bestassemblyfile != :
sample[self.analysistype].subsampledfastq = \
os.path.splitext(sample[self.analysistype].baitedfastq)[0] +
sample[self.analysistype].seqtkcall = \
.format(sample[self.analysistype].baitedfastq,
sample[self.analysistype].subsampledfastq)
self.samplequeue.put(sample)
self.samplequeue.join() | Subsample 1000 reads from the baited files |
2,697 | def _proxy(self):
if self._context is None:
self._context = UsageContext(self._version, sim_sid=self._solution[], )
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: UsageContext for this UsageInstance
:rtype: twilio.rest.preview.wireless.sim.usage.UsageContext |
2,698 | def _handleBulletWidth(bulletText, style, maxWidths):
if bulletText:
if isinstance(bulletText, basestring):
bulletWidth = stringWidth(bulletText, style.bulletFontName, style.bulletFontSize)
else:
bulletWidth = 0
for f in bulletText:
bulletWidth = bulletWidth + stringWidth(f.text, f.fontName, f.fontSize)
bulletRight = style.bulletIndent + bulletWidth + 0.6 * style.bulletFontSize
indent = style.leftIndent + style.firstLineIndent
if bulletRight > indent:
maxWidths[0] -= (bulletRight - indent) | work out bullet width and adjust maxWidths[0] if neccessary |
2,699 | def _poor_convergence(z, r, f, bn, mvec):
check_points = (-0.4 + 0.3j, 0.7 + 0.2j, 0.02 - 0.06j)
diffs = []
ftests = []
for check_point in check_points:
rtest = r * check_point
ztest = z + rtest
ftest = f(ztest)
comp = np.sum(bn * np.power(check_point, mvec))
ftests.append(ftest)
diffs.append(comp - ftest)
max_abs_error = np.max(np.abs(diffs))
max_f_value = np.max(np.abs(ftests))
return max_abs_error > 1e-3 * max_f_value | Test for poor convergence based on three function evaluations.
This test evaluates the function at the three points and returns false if
the relative error is greater than 1e-3. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.