code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def __view_add_actions(self):
"""
Sets the **Components_Manager_Ui_treeView** actions.
"""
self.Components_Manager_Ui_treeView.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.ComponentsManagerUi|Activate Component(s)",
slot=self.__view_activate_components_action__triggered))
self.Components_Manager_Ui_treeView.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.ComponentsManagerUi|Deactivate Component(s)",
slot=self.__view_deactivate_components_action__triggered))
separator_action = QAction(self.Components_Manager_Ui_treeView)
separator_action.setSeparator(True)
self.Components_Manager_Ui_treeView.addAction(separator_action)
self.Components_Manager_Ui_treeView.addAction(self.__engine.actions_manager.register_action(
"Actions|Umbra|Components|factory.ComponentsManagerUi|Reload Component(s)",
slot=self.__view_reload_components_action__triggered))
separator_action = QAction(self.Components_Manager_Ui_treeView)
separator_action.setSeparator(True)
self.Components_Manager_Ui_treeView.addAction(separator_action) | Sets the **Components_Manager_Ui_treeView** actions. |
def name(self):
"""
The name for the window as displayed in the title bar and status bar.
"""
# Name, explicitely set for the pane.
if self.chosen_name:
return self.chosen_name
else:
# Name from the process running inside the pane.
name = self.process.get_name()
if name:
return os.path.basename(name)
return '' | The name for the window as displayed in the title bar and status bar. |
def support_scripting(self):
"""
Returns True if scripting is available. Checks are done in the client
library (redis-py) AND the redis server. Result is cached, so done only
one time.
"""
if not hasattr(self, '_support_scripting'):
try:
self._support_scripting = self.redis_version >= (2, 5) \
and hasattr(self.connection, 'register_script')
except:
self._support_scripting = False
return self._support_scripting | Returns True if scripting is available. Checks are done in the client
library (redis-py) AND the redis server. Result is cached, so done only
one time. |
def add_node(self, id, label=None, type='CLASS', meta=None):
"""
Add a new node to the ontology
"""
g = self.get_graph()
if meta is None:
meta={}
g.add_node(id, label=label, type=type, meta=meta) | Add a new node to the ontology |
def moment1(self):
"""The first time delay weighted statistical moment of the
instantaneous unit hydrograph."""
delays, response = self.delay_response_series
return statstools.calc_mean_time(delays, response) | The first time delay weighted statistical moment of the
instantaneous unit hydrograph. |
def nl_msg_in_handler_debug(msg, arg):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/lib/handlers.c#L114."""
ofd = arg or _LOGGER.debug
ofd('-- Debug: Received Message:')
nl_msg_dump(msg, ofd)
return NL_OK | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/handlers.c#L114. |
def assemble_concatenated_meta(concated_meta_dfs, remove_all_metadata_fields):
""" Assemble the concatenated metadata dfs together. For example,
if horizontally concatenating, the concatenated metadata dfs are the
column metadata dfs. Both indices are sorted.
Args:
concated_meta_dfs (list of pandas dfs)
Returns:
all_concated_meta_df_sorted (pandas df)
"""
# Concatenate the concated_meta_dfs
if remove_all_metadata_fields:
for df in concated_meta_dfs:
df.drop(df.columns, axis=1, inplace=True)
all_concated_meta_df = pd.concat(concated_meta_dfs, axis=0)
# Sanity check: the number of rows in all_concated_meta_df should correspond
# to the sum of the number of rows in the input dfs
n_rows = all_concated_meta_df.shape[0]
logger.debug("all_concated_meta_df.shape[0]: {}".format(n_rows))
n_rows_cumulative = sum([df.shape[0] for df in concated_meta_dfs])
assert n_rows == n_rows_cumulative
# Sort the index and columns
all_concated_meta_df_sorted = all_concated_meta_df.sort_index(axis=0).sort_index(axis=1)
return all_concated_meta_df_sorted | Assemble the concatenated metadata dfs together. For example,
if horizontally concatenating, the concatenated metadata dfs are the
column metadata dfs. Both indices are sorted.
Args:
concated_meta_dfs (list of pandas dfs)
Returns:
all_concated_meta_df_sorted (pandas df) |
def check_length_of_initial_values(self, init_values):
"""
Ensures that the initial values are of the correct length.
"""
# Figure out how many shape parameters we should have and how many
# index coefficients we should have
num_nests = self.rows_to_nests.shape[1]
num_index_coefs = self.design.shape[1]
assumed_param_dimensions = num_index_coefs + num_nests
if init_values.shape[0] != assumed_param_dimensions:
msg = "The initial values are of the wrong dimension"
msg_1 = "It should be of dimension {}"
msg_2 = "But instead it has dimension {}"
raise ValueError(msg +
msg_1.format(assumed_param_dimensions) +
msg_2.format(init_values.shape[0]))
return None | Ensures that the initial values are of the correct length. |
def template_class_from_name(name):
""" Return the template class object from agiven name. """
# import the right template module
term = TerminalView()
template_name = name + 'Template'
try:
__import__('projy.templates.' + template_name)
template_mod = sys.modules['projy.templates.' + template_name]
except ImportError:
term.print_error_and_exit("Unable to find {}".format(name))
# import the class from the module
try:
template_class = getattr(template_mod, template_name)
except AttributeError:
term.print_error_and_exit("Unable to create a template {}".format(name))
return template_class() | Return the template class object from agiven name. |
def train(*tf_records: "Records to train on"):
"""Train on examples."""
tf.logging.set_verbosity(tf.logging.INFO)
estimator = dual_net.get_estimator()
effective_batch_size = FLAGS.train_batch_size
if FLAGS.use_tpu:
effective_batch_size *= FLAGS.num_tpu_cores
if FLAGS.use_tpu:
if FLAGS.use_bt:
def _input_fn(params):
games = bigtable_input.GameQueue(
FLAGS.cbt_project, FLAGS.cbt_instance, FLAGS.cbt_table)
games_nr = bigtable_input.GameQueue(
FLAGS.cbt_project, FLAGS.cbt_instance, FLAGS.cbt_table + '-nr')
return preprocessing.get_tpu_bt_input_tensors(
games,
games_nr,
params['batch_size'],
number_of_games=FLAGS.window_size,
random_rotation=True)
else:
def _input_fn(params):
return preprocessing.get_tpu_input_tensors(
params['batch_size'],
tf_records,
random_rotation=True)
# Hooks are broken with TPUestimator at the moment.
hooks = []
else:
def _input_fn():
return preprocessing.get_input_tensors(
FLAGS.train_batch_size,
tf_records,
filter_amount=FLAGS.filter_amount,
shuffle_buffer_size=FLAGS.shuffle_buffer_size,
random_rotation=True)
hooks = [UpdateRatioSessionHook(FLAGS.work_dir),
EchoStepCounterHook(output_dir=FLAGS.work_dir)]
steps = FLAGS.steps_to_train
logging.info("Training, steps = %s, batch = %s -> %s examples",
steps or '?', effective_batch_size,
(steps * effective_batch_size) if steps else '?')
if FLAGS.use_bt:
games = bigtable_input.GameQueue(
FLAGS.cbt_project, FLAGS.cbt_instance, FLAGS.cbt_table)
if not games.read_wait_cell():
games.require_fresh_games(20000)
latest_game = games.latest_game_number
index_from = max(latest_game, games.read_wait_cell())
print("== Last game before training:", latest_game, flush=True)
print("== Wait cell:", games.read_wait_cell(), flush=True)
try:
estimator.train(_input_fn, steps=steps, hooks=hooks)
if FLAGS.use_bt:
bigtable_input.set_fresh_watermark(games, index_from,
FLAGS.window_size)
except:
if FLAGS.use_bt:
games.require_fresh_games(0)
raise | Train on examples. |
def _set_value(self, new_value):
"""Sets the current value of the parameter, ensuring that it is within the allowed range."""
if self.min_value is not None and new_value < self.min_value:
raise SettingOutOfBounds(
"Trying to set parameter {0} = {1}, which is less than the minimum allowed {2}".format(
self.name, new_value, self.min_value))
if self.max_value is not None and new_value > self.max_value:
raise SettingOutOfBounds(
"Trying to set parameter {0} = {1}, which is more than the maximum allowed {2}".format(
self.name, new_value, self.max_value))
# Issue a warning if there is an auxiliary variable, as the setting does not have any effect
if self.has_auxiliary_variable():
with warnings.catch_warnings():
warnings.simplefilter("always", RuntimeWarning)
warnings.warn("You are trying to assign to a parameter which is either linked or "
"has auxiliary variables. The assignment has no effect.", RuntimeWarning)
# Save the value as a pure floating point to avoid the overhead of the astropy.units machinery when
# not needed
if self._transformation is None:
new_internal_value = new_value
else:
new_internal_value = self._transformation.forward(new_value)
# If the parameter has changed, update its value and call the callbacks if needed
if new_internal_value != self._internal_value:
# Update
self._internal_value = new_internal_value
# Call the callbacks (if any)
for callback in self._callbacks:
try:
callback(self)
except:
raise NotCallableOrErrorInCall("Could not call callback for parameter %s" % self.name) | Sets the current value of the parameter, ensuring that it is within the allowed range. |
def predict(data, training_dir=None, model_name=None, model_version=None, cloud=False):
"""Runs prediction locally or on the cloud.
Args:
data: List of csv strings or a Pandas DataFrame that match the model schema.
training_dir: local path to the trained output folder.
model_name: deployed model name
model_version: depoyed model version
cloud: bool. If False, does local prediction and data and training_dir
must be set. If True, does cloud prediction and data, model_name,
and model_version must be set.
For cloud prediction, the model must be created. This can be done by running
two gcloud commands::
1) gcloud beta ml models create NAME
2) gcloud beta ml versions create VERSION --model NAME --origin gs://BUCKET/training_dir/model
or these datalab commands:
1) import google.datalab as datalab
model = datalab.ml.ModelVersions(MODEL_NAME)
model.deploy(version_name=VERSION, path='gs://BUCKET/training_dir/model')
Note that the model must be on GCS.
Returns:
Pandas DataFrame.
"""
if cloud:
if not model_version or not model_name:
raise ValueError('model_version or model_name is not set')
if training_dir:
raise ValueError('training_dir not needed when cloud is True')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return cloud_predict(model_name, model_version, data)
else:
if not training_dir:
raise ValueError('training_dir is not set')
if model_version or model_name:
raise ValueError('model_name and model_version not needed when cloud is '
'False.')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return local_predict(training_dir, data) | Runs prediction locally or on the cloud.
Args:
data: List of csv strings or a Pandas DataFrame that match the model schema.
training_dir: local path to the trained output folder.
model_name: deployed model name
model_version: depoyed model version
cloud: bool. If False, does local prediction and data and training_dir
must be set. If True, does cloud prediction and data, model_name,
and model_version must be set.
For cloud prediction, the model must be created. This can be done by running
two gcloud commands::
1) gcloud beta ml models create NAME
2) gcloud beta ml versions create VERSION --model NAME --origin gs://BUCKET/training_dir/model
or these datalab commands:
1) import google.datalab as datalab
model = datalab.ml.ModelVersions(MODEL_NAME)
model.deploy(version_name=VERSION, path='gs://BUCKET/training_dir/model')
Note that the model must be on GCS.
Returns:
Pandas DataFrame. |
def _get_styles(self, style_urls, asset_url_path):
"""
Gets the content of the given list of style URLs and
inlines assets.
"""
styles = []
for style_url in style_urls:
urls_inline = STYLE_ASSET_URLS_INLINE_FORMAT.format(
asset_url_path.rstrip('/'))
asset_content = self._download(style_url)
content = re.sub(urls_inline, self._match_asset, asset_content)
styles.append(content)
return styles | Gets the content of the given list of style URLs and
inlines assets. |
def pv_absent(name):
'''
Ensure that a Physical Device is not being used by lvm
name
The device name to initialize.
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if not __salt__['lvm.pvdisplay'](name, quiet=True):
ret['comment'] = 'Physical Volume {0} does not exist'.format(name)
elif __opts__['test']:
ret['comment'] = 'Physical Volume {0} is set to be removed'.format(name)
ret['result'] = None
return ret
else:
changes = __salt__['lvm.pvremove'](name)
if __salt__['lvm.pvdisplay'](name, quiet=True):
ret['comment'] = 'Failed to remove Physical Volume {0}'.format(name)
ret['result'] = False
else:
ret['comment'] = 'Removed Physical Volume {0}'.format(name)
ret['changes']['removed'] = changes
return ret | Ensure that a Physical Device is not being used by lvm
name
The device name to initialize. |
def make_venv(self, dj_version):
"""Creates a virtual environment for a given Django version.
:param str dj_version:
:rtype: str
:return: path to created virtual env
"""
venv_path = self._get_venv_path(dj_version)
self.logger.info('Creating virtual environment for Django %s ...' % dj_version)
try:
create_venv(venv_path, **VENV_CREATE_KWARGS)
except ValueError:
self.logger.warning('Virtual environment directory already exists. Skipped.')
self.venv_install('django==%s' % dj_version, venv_path)
return venv_path | Creates a virtual environment for a given Django version.
:param str dj_version:
:rtype: str
:return: path to created virtual env |
def header(self, name, default=None):
"""
Returns the value of the HTTP header identified by `name`.
"""
wsgi_header = "HTTP_{0}".format(name.upper())
try:
return self.env_raw[wsgi_header]
except KeyError:
return default | Returns the value of the HTTP header identified by `name`. |
def execute(self, request):
"""Execute a request and return a response"""
url = request.uri
if request.parameters:
url += '?' + urlencode(request.parameters)
if request.headers:
headers = dict(self._headers, **request.headers)
else:
headers = self._headers
retry = 0
server = getattr(self._local, "server", None)
while True:
if not server:
self._local.server = server = self._get_server()
try:
parse_result = urlparse(server)
conn = get_pool().connection_from_host(parse_result.hostname,
parse_result.port,
parse_result.scheme)
kwargs = dict(
method=Method._VALUES_TO_NAMES[request.method],
url=parse_result.path + url,
body=request.body,
headers=headers,
timeout=self._timeout,
)
response = conn.urlopen(**kwargs)
return RestResponse(status=response.status,
body=response.data,
headers=response.headers)
except (IOError, urllib3.exceptions.HTTPError) as ex:
self._drop_server(server)
self._local.server = server = None
if retry >= self._max_retries:
logger.error("Client error: bailing out after %d failed retries",
self._max_retries, exc_info=1)
raise NoServerAvailable(ex)
logger.exception("Client error: %d retries left", self._max_retries - retry)
retry += 1 | Execute a request and return a response |
def convex_hull_image(image):
'''Given a binary image, return an image of the convex hull'''
labels = image.astype(int)
points, counts = convex_hull(labels, np.array([1]))
output = np.zeros(image.shape, int)
for i in range(counts[0]):
inext = (i+1) % counts[0]
draw_line(output, points[i,1:], points[inext,1:],1)
output = fill_labeled_holes(output)
return output == 1 | Given a binary image, return an image of the convex hull |
def set_speed(self, value):
''' set total axes movement speed in mm/second'''
self._combined_speed = float(value)
speed_per_min = int(self._combined_speed * SEC_PER_MIN)
command = GCODES['SET_SPEED'] + str(speed_per_min)
log.debug("set_speed: {}".format(command))
self._send_command(command) | set total axes movement speed in mm/second |
def dict_to_etree(d, root):
u"""Converts a dict to lxml.etree object.
>>> dict_to_etree({'root': {'#text': 'node_text', '@attr': 'val'}}, etree.Element('root')) # doctest: +ELLIPSIS
<Element root at 0x...>
:param dict d: dict representing the XML tree
:param etree.Element root: XML node which will be assigned the resulting tree
:returns: Textual representation of the XML tree
:rtype: str
"""
def _to_etree(d, node):
if d is None or len(d) == 0:
return
elif isinstance(d, basestring):
node.text = d
elif isinstance(d, dict):
for k, v in d.items():
assert isinstance(k, basestring)
if k.startswith('#'):
assert k == '#text'
assert isinstance(v, basestring)
node.text = v
elif k.startswith('@'):
assert isinstance(v, basestring)
node.set(k[1:], v)
elif isinstance(v, list):
# No matter the child count, their parent will be the same.
sub_elem = etree.SubElement(node, k)
for child_num, e in enumerate(v):
if e is None:
if child_num == 0:
# Found the first occurrence of an empty child,
# skip creating of its XML repr, since it would be
# the same as ``sub_element`` higher up.
continue
# A list with None element means an empty child node
# in its parent, thus, recreating tags we have to go
# up one level.
# <node><child/></child></node> <=> {'node': 'child': [None, None]}
_to_etree(node, k)
else:
# If this isn't first child and it's a complex
# value (dict), we need to check if it's value
# is equivalent to None.
if child_num != 0 and not (isinstance(e, dict) and not all(e.values())):
# At least one child was None, we have to create
# a new parent-node, which will not be empty.
sub_elem = etree.SubElement(node, k)
_to_etree(e, sub_elem)
else:
_to_etree(v, etree.SubElement(node, k))
elif etree.iselement(d):
# Supports the case, when we got an empty child and want to recreate it.
etree.SubElement(d, node)
else:
raise AttributeError('Argument is neither dict nor basestring.')
_to_etree(d, root)
return root | u"""Converts a dict to lxml.etree object.
>>> dict_to_etree({'root': {'#text': 'node_text', '@attr': 'val'}}, etree.Element('root')) # doctest: +ELLIPSIS
<Element root at 0x...>
:param dict d: dict representing the XML tree
:param etree.Element root: XML node which will be assigned the resulting tree
:returns: Textual representation of the XML tree
:rtype: str |
def main(args=sys.argv[1:]):
"""Extract text from a file.
Commands:
extract - extract text from path
check - make sure all deps are installed
Usage:
fulltext extract [-v] [-f] <path>...
fulltext check [-t]
Options:
-f, --file Open file first.
-t, --title Check deps for title.
-v, --verbose More verbose output.
"""
opt = docopt(main.__doc__.strip(), args, options_first=True)
config_logging(opt['--verbose'])
if opt['check']:
check_backends(opt['--title'])
elif opt['extract']:
handler = fulltext.get
if opt['--file']:
handler = _handle_open
for path in opt['<path>']:
print(handler(path))
else:
# we should never get here
raise ValueError("don't know how to handle cmd") | Extract text from a file.
Commands:
extract - extract text from path
check - make sure all deps are installed
Usage:
fulltext extract [-v] [-f] <path>...
fulltext check [-t]
Options:
-f, --file Open file first.
-t, --title Check deps for title.
-v, --verbose More verbose output. |
def multi_curve_fit(xs, ys, verbose):
"""
fit multiple functions to the x, y data, return the best fit
"""
#functions = {exponential: p0_exponential, reciprocal: p0_reciprocal, single_reciprocal: p0_single_reciprocal}
functions = {
exponential: p0_exponential,
reciprocal: p0_reciprocal,
#single_reciprocal: p0_single_reciprocal,
simple_reciprocal: p0_simple_reciprocal,
simple_2reciprocal: p0_simple_2reciprocal,
simple_4reciprocal: p0_simple_4reciprocal,
simple_5reciprocal: p0_simple_5reciprocal
}
from scipy.optimize import curve_fit
fit_results = {}
best = ['', np.inf]
for function in functions:
try:
weights = get_weights(xs, ys)
popt, pcov = curve_fit(function, xs, ys, functions[function](xs, ys), maxfev=8000, sigma=weights)
pcov = []
m = measure(function, xs, ys, popt, weights)
fit_results.update({function: {'measure': m, 'popt': popt, 'pcov': pcov}})
for f in fit_results:
if fit_results[f]['measure'] <= best[1]:
best = f, fit_results[f]['measure']
if verbose:
print(str(function), m)
except RuntimeError:
print('no fit found for ', function)
return fit_results[best[0]]['popt'], fit_results[best[0]]['pcov'], best | fit multiple functions to the x, y data, return the best fit |
def get_channel_id(turn_context: TurnContext) -> str:
"""Get the Channel Id from the current Activity on the Turn Context.
Args:
turn_context (TurnContext): The Turn Context to retrieve the Activity's Channel Id from.
Returns:
str: The Channel Id from the Turn Context's Activity.
"""
if turn_context.activity.channel_id is None:
return ""
else:
return turn_context.activity.channel_id | Get the Channel Id from the current Activity on the Turn Context.
Args:
turn_context (TurnContext): The Turn Context to retrieve the Activity's Channel Id from.
Returns:
str: The Channel Id from the Turn Context's Activity. |
def _get_desired_pkg(name, desired):
'''
Helper function that retrieves and nicely formats the desired pkg (and
version if specified) so that helpful information can be printed in the
comment for the state.
'''
if not desired[name] or desired[name].startswith(('<', '>', '=')):
oper = ''
else:
oper = '='
return '{0}{1}{2}'.format(name, oper,
'' if not desired[name] else desired[name]) | Helper function that retrieves and nicely formats the desired pkg (and
version if specified) so that helpful information can be printed in the
comment for the state. |
def enrich_relations(rdf, enrich_mappings, use_narrower, use_transitive):
"""Enrich the SKOS relations according to SKOS semantics, including
subproperties of broader and symmetric related properties. If use_narrower
is True, include inverse narrower relations for all broader relations. If
use_narrower is False, instead remove all narrower relations, replacing
them with inverse broader relations. If use_transitive is True, calculate
transitive hierarchical relationships.
(broaderTransitive, and also narrowerTransitive if use_narrower is
True) and include them in the model.
"""
# 1. first enrich mapping relationships (because they affect regular ones)
if enrich_mappings:
infer.skos_symmetric_mappings(rdf)
infer.skos_hierarchical_mappings(rdf, use_narrower)
# 2. then enrich regular relationships
# related <-> related
infer.skos_related(rdf)
# broaderGeneric -> broader + inverse narrowerGeneric
for s, o in rdf.subject_objects(SKOSEXT.broaderGeneric):
rdf.add((s, SKOS.broader, o))
# broaderPartitive -> broader + inverse narrowerPartitive
for s, o in rdf.subject_objects(SKOSEXT.broaderPartitive):
rdf.add((s, SKOS.broader, o))
infer.skos_hierarchical(rdf, use_narrower)
# transitive closure: broaderTransitive and narrowerTransitive
if use_transitive:
infer.skos_transitive(rdf, use_narrower)
else:
# transitive relationships are not wanted, so remove them
for s, o in rdf.subject_objects(SKOS.broaderTransitive):
rdf.remove((s, SKOS.broaderTransitive, o))
for s, o in rdf.subject_objects(SKOS.narrowerTransitive):
rdf.remove((s, SKOS.narrowerTransitive, o))
infer.skos_topConcept(rdf) | Enrich the SKOS relations according to SKOS semantics, including
subproperties of broader and symmetric related properties. If use_narrower
is True, include inverse narrower relations for all broader relations. If
use_narrower is False, instead remove all narrower relations, replacing
them with inverse broader relations. If use_transitive is True, calculate
transitive hierarchical relationships.
(broaderTransitive, and also narrowerTransitive if use_narrower is
True) and include them in the model. |
def show_taghistory():
"""Show history of all known repo/tags for image"""
if not nav:
sys.exit(1)
ecode = 0
try:
result = nav.get_taghistory()
if result:
anchore_utils.print_result(config, result)
except:
anchore_print_err("operation failed")
ecode = 1
contexts['anchore_allimages'].clear()
sys.exit(ecode) | Show history of all known repo/tags for image |
def toFilter(self, property):
""" Convert this range to a Filter with a tests having a given property.
"""
if self.leftedge == self.rightedge and self.leftop is ge and self.rightop is le:
# equivalent to ==
return Filter(style.SelectorAttributeTest(property, '=', self.leftedge))
try:
return Filter(style.SelectorAttributeTest(property, opstr[self.leftop], self.leftedge),
style.SelectorAttributeTest(property, opstr[self.rightop], self.rightedge))
except KeyError:
try:
return Filter(style.SelectorAttributeTest(property, opstr[self.rightop], self.rightedge))
except KeyError:
try:
return Filter(style.SelectorAttributeTest(property, opstr[self.leftop], self.leftedge))
except KeyError:
return Filter() | Convert this range to a Filter with a tests having a given property. |
def _set_get_vnetwork_hosts(self, v, load=False):
"""
Setter method for get_vnetwork_hosts, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_hosts (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_vnetwork_hosts is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_vnetwork_hosts() directly.
YANG Description: Shows discovered hosts
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=get_vnetwork_hosts.get_vnetwork_hosts, is_leaf=True, yang_name="get-vnetwork-hosts", rest_name="get-vnetwork-hosts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'host-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """get_vnetwork_hosts must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=get_vnetwork_hosts.get_vnetwork_hosts, is_leaf=True, yang_name="get-vnetwork-hosts", rest_name="get-vnetwork-hosts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'host-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)""",
})
self.__get_vnetwork_hosts = t
if hasattr(self, '_set'):
self._set() | Setter method for get_vnetwork_hosts, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_hosts (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_vnetwork_hosts is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_vnetwork_hosts() directly.
YANG Description: Shows discovered hosts |
def get_instruction(self, idx, off=None):
"""
Get a particular instruction by using (default) the index of the address if specified
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object
"""
if self.code is not None:
return self.code.get_bc().get_instruction(idx, off)
return None | Get a particular instruction by using (default) the index of the address if specified
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object |
def get_params_type(descriptor):
"""
Return the parameters type of a descriptor (e.g (IC)V)
"""
params = descriptor.split(')')[0][1:].split()
if params:
return [param for param in params]
return [] | Return the parameters type of a descriptor (e.g (IC)V) |
def _repr_html_(self):
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
if self._info_repr():
buf = StringIO("")
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace('<', r'<', 1)
val = val.replace('>', r'>', 1)
return '<pre>' + val + '</pre>'
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions, notebook=True)
else:
return None | Return a html representation for a particular DataFrame.
Mainly for IPython notebook. |
def _init_edges_relationships(rel2src2dsts, rel2dst2srcs):
"""Get the directed edges from GO term to GO term using relationships."""
edge_rel2fromto = {}
relationships = set(rel2src2dsts).union(rel2dst2srcs)
for reltype in relationships:
edge_from_to = []
if reltype in rel2src2dsts:
for parent, children in rel2src2dsts[reltype].items():
for child in children:
edge_from_to.append((child, parent))
if reltype in rel2dst2srcs:
for parent, children in rel2dst2srcs[reltype].items():
for child in children:
edge_from_to.append((child, parent))
edge_rel2fromto[reltype] = edge_from_to
return edge_rel2fromto | Get the directed edges from GO term to GO term using relationships. |
def log_calls(function):
'''
Decorator that logs function calls in their self.log
'''
def wrapper(self,*args,**kwargs):
self.log.log(group=function.__name__,message='Enter')
function(self,*args,**kwargs)
self.log.log(group=function.__name__,message='Exit')
return wrapper | Decorator that logs function calls in their self.log |
def line_break(s, length=76):
"""
将字符串分割成一行一行
:param s:
:param length:
:return:
"""
x = '\n'.join(s[pos:pos + length] for pos in range(0, len(s), length))
return x | 将字符串分割成一行一行
:param s:
:param length:
:return: |
def run_sex_check(in_prefix, in_type, out_prefix, base_dir, options):
"""Runs step6 (sexcheck).
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``bfile``).
This function calls the :py:mod:`pyGenClean.SexCheck.sex_check` module. The
required file type for this module is ``bfile``, hence the need to use the
:py:func:`check_input_files` to check if the file input file type is the
good one, or to create it if needed.
.. note::
The :py:mod:`pyGenClean.SexCheck.sex_check` module doesn't return
usable output files. Hence, this function returns the input file prefix
and its type.
"""
# Creating the output directory
os.mkdir(out_prefix)
# We know we need a bfile
required_type = "bfile"
check_input_files(in_prefix, in_type, required_type)
# We need to inject the name of the input file and the name of the output
# prefix
script_prefix = os.path.join(out_prefix, "sexcheck")
options += ["--{}".format(required_type), in_prefix,
"--out", script_prefix]
# We run the script
try:
sex_check.main(options)
except sex_check.ProgramError as e:
msg = "sex_check {}".format(e)
raise ProgramError(msg)
# Reading the hetero file on X
hetero = {}
if os.path.isfile(script_prefix + ".chr23_recodeA.raw.hetero"):
with open(script_prefix + ".chr23_recodeA.raw.hetero", "r") as i_file:
header = {
name: i for i, name in
enumerate(createRowFromPlinkSpacedOutput(i_file.readline()))
}
for required_col in ("PED", "ID", "HETERO"):
if required_col not in header:
msg = "{}: no column named {}".format(
script_prefix + ".chr23_recodeA.raw.hetero",
required_col,
)
raise ProgramError(msg)
# Reading the data
for line in i_file:
row = line.rstrip("\r\n").split("\t")
famid = row[header["PED"]]
indid = row[header["ID"]]
# Formatting the hetero value
het = None
try:
het = "{:.4f}".format(float(row[header["HETERO"]]))
except:
het = "N/A"
hetero[(famid, indid)] = het
# Reading the number of no call on Y
nb_no_call = {}
if os.path.isfile(script_prefix + ".chr24_recodeA.raw.noCall"):
with open(script_prefix + ".chr24_recodeA.raw.noCall", "r") as i_file:
header = {
name: i for i, name in
enumerate(createRowFromPlinkSpacedOutput(i_file.readline()))
}
for required_col in ("PED", "ID", "nbGeno", "nbNoCall"):
if required_col not in header:
msg = "{}: no column named {}".format(
script_prefix + ".chr24_recodeA.raw.noCall",
required_col,
)
raise ProgramError(msg)
# Reading the data
for line in i_file:
row = line.rstrip("\r\n").split("\t")
famid = row[header["PED"]]
indid = row[header["ID"]]
# Getting the statistics
nb_geno = row[header["nbGeno"]]
nb_nocall = row[header["nbNoCall"]]
percent = None
try:
percent = "{:.4f}".format(
float(nb_nocall) / float(nb_geno),
)
except:
percent = "N/A"
nb_no_call[(famid, indid)] = percent
# Reading the problem file to gather statistics. Note that dataset without
# problem will only have the header line (and no data)
nb_problems = 0
table = []
nb_no_genetic = 0
nb_discordant = 0
with open(script_prefix + ".list_problem_sex", "r") as i_file:
# Reading the header
header = i_file.readline().rstrip("\r\n").split("\t")
table.append(header)
header = {name: i for i, name in enumerate(header)}
for required_col in ("FID", "IID", "SNPSEX"):
if required_col not in header:
msg = "{}: no column named {}".format(
script_prefix + ".list_problem_sex",
required_col,
)
raise ProgramError(msg)
# Adding the missing column name
table[-1].append("HET")
table[-1].append(r"\%NOCALL")
# Reading the rest of the data
for line in i_file:
nb_problems += 1
# Creating the row
row = line.rstrip("\r\n").split("\t")
# Counting
if row[header["SNPSEX"]] == "0":
nb_no_genetic += 1
else:
nb_discordant += 1
table.append([
latex_template.sanitize_tex(row[header[name]])
for name in ("FID", "IID", "PEDSEX", "SNPSEX", "STATUS", "F")
])
table[-1].append(
hetero.get((row[header["FID"]], row[header["IID"]]), "N/A"),
)
table[-1].append(
nb_no_call.get((row[header["FID"]], row[header["IID"]]), "N/A")
)
# Getting the value for the maleF option
male_f = sex_check.parser.get_default("maleF")
if "--maleF" in options:
male_f = options[options.index("--maleF") + 1]
# Getting the value for the femaleF option
female_f = sex_check.parser.get_default("femaleF")
if "--femaleF" in options:
female_f = options[options.index("--femaleF") + 1]
# We write a LaTeX summary
latex_file = os.path.join(script_prefix + ".summary.tex")
graphics_paths = set()
try:
with open(latex_file, "w") as o_file:
print >>o_file, latex_template.subsection(sex_check.pretty_name)
text = (
"Using $F$ thresholds of {male_f} and {female_f} for males "
"and females respectively, {nb_problems:,d} sample{plural} "
"had gender problem according to Plink.".format(
male_f=male_f,
female_f=female_f,
nb_problems=nb_problems,
plural="s" if nb_problems > 1 else "",
)
)
print >>o_file, latex_template.wrap_lines(text)
# The float template
float_template = latex_template.jinja2_env.get_template(
"float_template.tex",
)
if nb_problems > 0:
# The label and text for the table
table_label = re.sub(
r"[/\\]",
"_",
script_prefix,
) + "_problems"
text = (
r"Table~\ref{" + table_label + "} summarizes the gender "
"problems encountered during the analysis."
)
print >>o_file, latex_template.wrap_lines(text)
# Getting the template
longtable_template = latex_template.jinja2_env.get_template(
"longtable_template.tex",
)
# Rendering
print >>o_file, longtable_template.render(
table_caption="Summarization of the gender problems "
"encountered during Plink's analysis. "
"HET is the heterozygosity rate on the X "
r"chromosome. \%NOCALL is the percentage of "
"no calls on the Y chromosome.",
table_label=table_label,
nb_col=len(table[1]),
col_alignments="llrrlrrrr",
text_size="scriptsize",
header_data=zip(table[0], [1 for i in table[0]]),
tabular_data=sorted(table[1:], key=lambda item: item[1]),
)
# Getting the templates
graphic_template = latex_template.jinja2_env.get_template(
"graphics_template.tex",
)
# If there is a figure, we add it here
if os.path.isfile(script_prefix + ".png"):
# Adding the figure
figure_label = re.sub(r"[/\\]", "_", script_prefix)
text = (
r"Figure~\ref{" + figure_label + r"} shows the $\bar{y}$ "
r"intensities versus the $\bar{x}$ intensities for each "
"samples. Problematic samples are shown using triangles."
)
print >>o_file, latex_template.wrap_lines(text)
# Getting the paths
graphics_path, path = os.path.split(script_prefix + ".png")
graphics_path = os.path.relpath(graphics_path, base_dir)
print >>o_file, float_template.render(
float_type="figure",
float_placement="H",
float_caption="Gender check using Plink. Mean $x$ and $y$ "
"intensities are shown for each sample. "
"Males are shown in blue, and females in "
"red. Triangles show problematic samples "
"(green for males, mauve for females). "
"Unknown gender are shown in gray.",
float_label=figure_label,
float_content=graphic_template.render(
width=r"0.8\textwidth",
path=latex_template.sanitize_fig_name(path),
),
)
# Adding the path where the graphic is
graphics_paths.add(graphics_path)
# If there is a 'sexcheck.LRR_BAF' directory, then there are LRR
# and BAF plots.
if os.path.isdir(script_prefix + ".LRR_BAF"):
figures = glob(
os.path.join(script_prefix + ".LRR_BAF", "*.png"),
)
if len(figures) > 0:
# Getting the sample IDs
sample_ids = [
re.search(
"^baf_lrr_(\S+)_lrr_baf.png$",
os.path.basename(figure),
) for figure in figures
]
sample_ids = [
"unknown sample" if not sample else sample.group(1)
for sample in sample_ids
]
# Sorting according to sample IDs
sorted_indexes = sorted(range(len(figures)),
key=figures.__getitem__)
figures = [figures[i] for i in sorted_indexes]
sample_ids = [sample_ids[i] for i in sorted_indexes]
# Getting the labels
labels = [
re.sub(
r"[/\\]",
"_",
script_prefix + "_baf_lrr_" +
os.path.splitext(sample)[0],
) for sample in sample_ids
]
fig_1 = labels[0]
fig_2 = ""
if len(figures) > 1:
fig_2 = labels[-1]
text = (
"Figure" + ("s" if len(figures) > 1 else "") +
r"~\ref{" + fig_1 + "} " +
(r"to \ref{" + fig_2 + "} " if fig_2 else "") +
"show" + (" " if len(figures) > 1 else "s ") + "the "
"log R ratio and the B allele frequency versus the "
"position on chromosome X and Y for the problematic "
"sample{}.".format("s" if len(figures) > 1 else "")
)
print >>o_file, latex_template.wrap_lines(text)
zipped = zip(figures, sample_ids, labels)
for figure, sample_id, label in zipped:
sample_id = latex_template.sanitize_tex(sample_id)
# Getting the paths
graphics_path, path = os.path.split(figure)
graphics_path = os.path.relpath(graphics_path,
base_dir)
caption = (
"Plots showing the log R ratio and the B allele "
"frequency for chromosome X and Y (on the left "
"and right, respectively) for sample "
"{}.".format(sample_id)
)
print >>o_file, float_template.render(
float_type="figure",
float_placement="H",
float_caption=caption,
float_label=label,
float_content=graphic_template.render(
width=r"\textwidth",
path=latex_template.sanitize_fig_name(path),
),
)
# Adding the path where the graphic is
graphics_paths.add(graphics_path)
except IOError:
msg = "{}: cannot write LaTeX summary".format(latex_file)
raise ProgramError(msg)
# Writing the summary results
with open(os.path.join(base_dir, "results_summary.txt"), "a") as o_file:
print >>o_file, "# {}".format(script_prefix)
print >>o_file, "Number of samples with gender problem"
print >>o_file, " - no genetic gender\t{:,d}".format(nb_no_genetic)
print >>o_file, " - discordant gender\t{:,d}".format(nb_discordant)
print >>o_file, "---"
# We know this step does not produce a new data set, so we return the
# original one
return _StepResult(
next_file=in_prefix,
next_file_type=required_type,
latex_summary=latex_file,
description=sex_check.desc,
long_description=sex_check.long_desc,
graph_path=graphics_paths,
) | Runs step6 (sexcheck).
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``bfile``).
This function calls the :py:mod:`pyGenClean.SexCheck.sex_check` module. The
required file type for this module is ``bfile``, hence the need to use the
:py:func:`check_input_files` to check if the file input file type is the
good one, or to create it if needed.
.. note::
The :py:mod:`pyGenClean.SexCheck.sex_check` module doesn't return
usable output files. Hence, this function returns the input file prefix
and its type. |
def restore(name=None, **kwargs):
'''
Make sure that the system contains the packages and repos from a
frozen state.
Read the list of packages and repositories from the freeze file,
and compare it with the current list of packages and repos. If
there is any difference, all the missing packages are repos will
be installed, and all the extra packages and repos will be
removed.
As this module is build on top of the pkg module, the user can
send extra attributes to the underlying pkg module via kwargs.
This function will call ``pkg.list_repos``, ``pkg.mod_repo``,
``pkg.list_pkgs``, ``pkg.install``, ``pkg.remove`` and
``pkg.del_repo``, and any additional arguments will be passed
through to those functions.
name
Name of the frozen state. Optional.
CLI Example:
.. code-block:: bash
salt '*' freezer.restore
salt '*' freezer.restore root=/chroot
'''
if not status(name):
raise CommandExecutionError('Frozen state not found.')
frozen_pkgs = {}
frozen_repos = {}
for name, content in zip(_paths(name), (frozen_pkgs, frozen_repos)):
with fopen(name) as fp:
content.update(json.load(fp))
# The ordering of removing or adding packages and repos can be
# relevant, as maybe some missing package comes from a repo that
# is also missing, so it cannot be installed. But can also happend
# that a missing package comes from a repo that is present, but
# will be removed.
#
# So the proposed order is;
# - Add missing repos
# - Add missing packages
# - Remove extra packages
# - Remove extra repos
safe_kwargs = clean_kwargs(**kwargs)
# Note that we expect that the information stored in list_XXX
# match with the mod_XXX counterpart. If this is not the case the
# recovery will be partial.
res = {
'pkgs': {'add': [], 'remove': []},
'repos': {'add': [], 'remove': []},
'comment': [],
}
# Add missing repositories
repos = __salt__['pkg.list_repos'](**safe_kwargs)
missing_repos = set(frozen_repos) - set(repos)
for repo in missing_repos:
try:
# In Python 2 we cannot do advance destructuring, so we
# need to create a temporary dictionary that will merge
# all the parameters
_tmp_kwargs = frozen_repos[repo].copy()
_tmp_kwargs.update(safe_kwargs)
__salt__['pkg.mod_repo'](repo, **_tmp_kwargs)
res['repos']['add'].append(repo)
log.info('Added missing repository %s', repo)
except Exception as e:
msg = 'Error adding %s repository: %s'
log.error(msg, repo, e)
res['comment'].append(msg % (repo, e))
# Add missing packages
# NOTE: we can remove the `for` using `pkgs`. This will improve
# performance, but I want to have a more detalied report of what
# packages are installed or failled.
pkgs = __salt__['pkg.list_pkgs'](**safe_kwargs)
missing_pkgs = set(frozen_pkgs) - set(pkgs)
for pkg in missing_pkgs:
try:
__salt__['pkg.install'](name=pkg, **safe_kwargs)
res['pkgs']['add'].append(pkg)
log.info('Added missing package %s', pkg)
except Exception as e:
msg = 'Error adding %s package: %s'
log.error(msg, pkg, e)
res['comment'].append(msg % (pkg, e))
# Remove extra packages
pkgs = __salt__['pkg.list_pkgs'](**safe_kwargs)
extra_pkgs = set(pkgs) - set(frozen_pkgs)
for pkg in extra_pkgs:
try:
__salt__['pkg.remove'](name=pkg, **safe_kwargs)
res['pkgs']['remove'].append(pkg)
log.info('Removed extra package %s', pkg)
except Exception as e:
msg = 'Error removing %s package: %s'
log.error(msg, pkg, e)
res['comment'].append(msg % (pkg, e))
# Remove extra repositories
repos = __salt__['pkg.list_repos'](**safe_kwargs)
extra_repos = set(repos) - set(frozen_repos)
for repo in extra_repos:
try:
__salt__['pkg.del_repo'](repo, **safe_kwargs)
res['repos']['remove'].append(repo)
log.info('Removed extra repository %s', repo)
except Exception as e:
msg = 'Error removing %s repository: %s'
log.error(msg, repo, e)
res['comment'].append(msg % (repo, e))
return res | Make sure that the system contains the packages and repos from a
frozen state.
Read the list of packages and repositories from the freeze file,
and compare it with the current list of packages and repos. If
there is any difference, all the missing packages are repos will
be installed, and all the extra packages and repos will be
removed.
As this module is build on top of the pkg module, the user can
send extra attributes to the underlying pkg module via kwargs.
This function will call ``pkg.list_repos``, ``pkg.mod_repo``,
``pkg.list_pkgs``, ``pkg.install``, ``pkg.remove`` and
``pkg.del_repo``, and any additional arguments will be passed
through to those functions.
name
Name of the frozen state. Optional.
CLI Example:
.. code-block:: bash
salt '*' freezer.restore
salt '*' freezer.restore root=/chroot |
def onReactionRemoved(
self,
mid=None,
author_id=None,
thread_id=None,
thread_type=None,
ts=None,
msg=None,
):
"""
Called when the client is listening, and somebody removes reaction from a message
:param mid: Message ID, that user reacted to
:param author_id: The ID of the person who removed reaction
:param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads`
:param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads`
:param ts: A timestamp of the action
:param msg: A full set of the data recieved
:type thread_type: models.ThreadType
"""
log.info(
"{} removed reaction from {} message in {} ({})".format(
author_id, mid, thread_id, thread_type
)
) | Called when the client is listening, and somebody removes reaction from a message
:param mid: Message ID, that user reacted to
:param author_id: The ID of the person who removed reaction
:param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads`
:param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads`
:param ts: A timestamp of the action
:param msg: A full set of the data recieved
:type thread_type: models.ThreadType |
def mimeData( self, items ):
"""
Returns the mime data for dragging for this instance.
:param items | [<QTableWidgetItem>, ..]
"""
func = self.dataCollector()
if ( func ):
return func(self, items)
return super(XTableWidget, self).mimeData(items) | Returns the mime data for dragging for this instance.
:param items | [<QTableWidgetItem>, ..] |
def messages(self):
"""
Access the messages
:returns: twilio.rest.messaging.v1.session.message.MessageList
:rtype: twilio.rest.messaging.v1.session.message.MessageList
"""
if self._messages is None:
self._messages = MessageList(self._version, session_sid=self._solution['sid'], )
return self._messages | Access the messages
:returns: twilio.rest.messaging.v1.session.message.MessageList
:rtype: twilio.rest.messaging.v1.session.message.MessageList |
def copy(self):
"""Return a copy where the float usage is hard-coded to mimic the
behavior of the real os.stat_result.
"""
stat_result = copy(self)
stat_result.use_float = self.use_float
return stat_result | Return a copy where the float usage is hard-coded to mimic the
behavior of the real os.stat_result. |
def delete_all_objects(self, nms, async_=False):
"""
Deletes all objects from this container.
By default the call will block until all objects have been deleted. By
passing True for the 'async_' parameter, this method will not block, and
instead return an object that can be used to follow the progress of the
deletion. When deletion is complete the bulk deletion object's
'results' attribute will be populated with the information returned
from the API call. In synchronous mode this is the value that is
returned when the call completes. It is a dictionary with the following
keys:
deleted - the number of objects deleted
not_found - the number of objects not found
status - the HTTP return status code. '200 OK' indicates success
errors - a list of any errors returned by the bulk delete call
"""
if nms is None:
nms = self.api.list_object_names(self.name, full_listing=True)
return self.api.bulk_delete(self.name, nms, async_=async_) | Deletes all objects from this container.
By default the call will block until all objects have been deleted. By
passing True for the 'async_' parameter, this method will not block, and
instead return an object that can be used to follow the progress of the
deletion. When deletion is complete the bulk deletion object's
'results' attribute will be populated with the information returned
from the API call. In synchronous mode this is the value that is
returned when the call completes. It is a dictionary with the following
keys:
deleted - the number of objects deleted
not_found - the number of objects not found
status - the HTTP return status code. '200 OK' indicates success
errors - a list of any errors returned by the bulk delete call |
def remove(self, key, value):
"""
Transactional implementation of :func:`MultiMap.remove(key, value)
<hazelcast.proxy.multi_map.MultiMap.remove>`
:param key: (object), the key of the entry to remove.
:param value: (object), the value of the entry to remove.
:return:
"""
check_not_none(key, "key can't be none")
check_not_none(value, "value can't be none")
return self._encode_invoke(transactional_multi_map_remove_entry_codec, key=self._to_data(key),
value=self._to_data(value)) | Transactional implementation of :func:`MultiMap.remove(key, value)
<hazelcast.proxy.multi_map.MultiMap.remove>`
:param key: (object), the key of the entry to remove.
:param value: (object), the value of the entry to remove.
:return: |
def create_build_paths(context: Context):
"""
Creates directories needed for build outputs
"""
paths = [context.app.asset_build_path, context.app.screenshots_build_path, context.app.collected_assets_path]
for path in filter(None, paths):
os.makedirs(path, exist_ok=True) | Creates directories needed for build outputs |
def lines2mecab(lines, **kwargs):
''' Use mecab to parse many lines '''
sents = []
for line in lines:
sent = txt2mecab(line, **kwargs)
sents.append(sent)
return sents | Use mecab to parse many lines |
def aggregate(self, block_size):
'''
geo.aggregate(block_size)
Returns copy of raster aggregated to smaller resolution, by adding cells.
'''
raster2 = block_reduce(self.raster, block_size, func=np.ma.sum)
geot = self.geot
geot = (geot[0], block_size[0] * geot[1], geot[2], geot[3], geot[4],
block_size[1] * geot[-1])
return GeoRaster(raster2, geot, nodata_value=self.nodata_value,\
projection=self.projection, datatype=self.datatype) | geo.aggregate(block_size)
Returns copy of raster aggregated to smaller resolution, by adding cells. |
def list_port_fwd(zone, permanent=True):
'''
List port forwarding
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' firewalld.list_port_fwd public
'''
ret = []
cmd = '--zone={0} --list-forward-ports'.format(zone)
if permanent:
cmd += ' --permanent'
for i in __firewall_cmd(cmd).splitlines():
(src, proto, dest, addr) = i.split(':')
ret.append(
{'Source port': src.split('=')[1],
'Protocol': proto.split('=')[1],
'Destination port': dest.split('=')[1],
'Destination address': addr.split('=')[1]}
)
return ret | List port forwarding
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' firewalld.list_port_fwd public |
def _pull_out_unaffected_blocks_rhs(rest, rhs, out_port, in_port):
"""Similar to :func:`_pull_out_unaffected_blocks_lhs` but on the RHS of a
series product self-feedback.
"""
_, block_index = rhs.index_in_block(in_port)
rest = tuple(rest)
bs = rhs.block_structure
(nbefore, nblock, nafter) = (sum(bs[:block_index]),
bs[block_index],
sum(bs[block_index + 1:]))
before, block, after = rhs.get_blocks((nbefore, nblock, nafter))
if before != cid(nbefore) or after != cid(nafter):
outer_rhs = before + cid(nblock - 1) + after
inner_rhs = cid(nbefore) + block + cid(nafter)
return Feedback.create(SeriesProduct.create(*(rest + (inner_rhs,))),
out_port=out_port, in_port=in_port) << outer_rhs
elif block == cid(nblock):
outer_rhs = before + cid(nblock - 1) + after
return Feedback.create(SeriesProduct.create(*rest),
out_port=out_port, in_port=in_port) << outer_rhs
raise CannotSimplify() | Similar to :func:`_pull_out_unaffected_blocks_lhs` but on the RHS of a
series product self-feedback. |
def configure_logging(level):
"""
Configure global log level to given one
:param level: Level (INFO | DEBUG | WARN | ERROR)
:return:
"""
global logging_level
logging_level = logging.ERROR
if "info" == level.lower():
logging_level = logging.INFO
elif "warn" == level.lower():
logging_level = logging.WARNING
elif "debug" == level.lower():
logging_level = logging.DEBUG | Configure global log level to given one
:param level: Level (INFO | DEBUG | WARN | ERROR)
:return: |
def _get_grammar_errors(self,pos,text,tokens):
"""
Internal function to get the number of grammar errors in given text
pos - part of speech tagged text (list)
text - normal text (list)
tokens - list of lists of tokenized text
"""
word_counts = [max(len(t),1) for t in tokens]
good_pos_tags = []
min_pos_seq=2
max_pos_seq=4
bad_pos_positions=[]
for i in xrange(0, len(text)):
pos_seq = [tag[1] for tag in pos[i]]
pos_ngrams = util_functions.ngrams(pos_seq, min_pos_seq, max_pos_seq)
long_pos_ngrams=[z for z in pos_ngrams if z.count(' ')==(max_pos_seq-1)]
bad_pos_tuples=[[z,z+max_pos_seq] for z in xrange(0,len(long_pos_ngrams)) if long_pos_ngrams[z] not in self._good_pos_ngrams]
bad_pos_tuples.sort(key=operator.itemgetter(1))
to_delete=[]
for m in reversed(xrange(len(bad_pos_tuples)-1)):
start, end = bad_pos_tuples[m]
for j in xrange(m+1, len(bad_pos_tuples)):
lstart, lend = bad_pos_tuples[j]
if lstart >= start and lstart <= end:
bad_pos_tuples[m][1]=bad_pos_tuples[j][1]
to_delete.append(j)
fixed_bad_pos_tuples=[bad_pos_tuples[z] for z in xrange(0,len(bad_pos_tuples)) if z not in to_delete]
bad_pos_positions.append(fixed_bad_pos_tuples)
overlap_ngrams = [z for z in pos_ngrams if z in self._good_pos_ngrams]
if (len(pos_ngrams)-len(overlap_ngrams))>0:
divisor=len(pos_ngrams)/len(pos_seq)
else:
divisor=1
if divisor == 0:
divisor=1
good_grammar_ratio = (len(pos_ngrams)-len(overlap_ngrams))/divisor
good_pos_tags.append(good_grammar_ratio)
return good_pos_tags,bad_pos_positions | Internal function to get the number of grammar errors in given text
pos - part of speech tagged text (list)
text - normal text (list)
tokens - list of lists of tokenized text |
def render_xml_to_string(template, input, params=None):
"""
Transforms ``input`` using ``template``, which should be an xslt.
:param template: an xslt template name.
:param input: an string that contains xml
:param params: A dictionary containing xslt parameters. Use :func:`~easymode.xslt.prepare_string_param`\
on strings you want to pass in.
:rtype: :class:`unicode`
"""
xsl_path = find_template_path(template)
result = transform(input, str(xsl_path), params)
return result | Transforms ``input`` using ``template``, which should be an xslt.
:param template: an xslt template name.
:param input: an string that contains xml
:param params: A dictionary containing xslt parameters. Use :func:`~easymode.xslt.prepare_string_param`\
on strings you want to pass in.
:rtype: :class:`unicode` |
def load_locale(locale, icu=False):
"""
Return data of locale
:param locale:
:return:
"""
if locale not in locales:
raise NotImplementedError("The locale '%s' is not supported" % locale)
if locale not in __locale_caches:
mod = __import__(__name__, fromlist=[locale], level=0)
__locale_caches[locale] = getattr(mod, locale)
return __locale_caches[locale] | Return data of locale
:param locale:
:return: |
def _compute_f3(self, C, mag):
"""
Compute f3 term (eq.6, page 106)
NOTE: In the original manuscript, for the case 5.8 < mag < c1,
the term in the numerator '(mag - 5.8)' is missing, while is
present in the software used for creating the verification tables
"""
if mag <= 5.8:
return C['a5']
elif 5.8 < mag < C['c1']:
return (
C['a5'] +
(C['a6'] - C['a5']) * (mag - 5.8) / (C['c1'] - 5.8)
)
else:
return C['a6'] | Compute f3 term (eq.6, page 106)
NOTE: In the original manuscript, for the case 5.8 < mag < c1,
the term in the numerator '(mag - 5.8)' is missing, while is
present in the software used for creating the verification tables |
def size(self):
"""Total number of grid points."""
# Since np.prod(()) == 1.0 we need to handle that by ourselves
return (0 if self.shape == () else
int(np.prod(self.shape, dtype='int64'))) | Total number of grid points. |
def domains(request):
"""
A page with number of services and layers faceted on domains.
"""
url = ''
query = '*:*&facet=true&facet.limit=-1&facet.pivot=domain_name,service_id&wt=json&indent=true&rows=0'
if settings.SEARCH_TYPE == 'elasticsearch':
url = '%s/select?q=%s' % (settings.SEARCH_URL, query)
if settings.SEARCH_TYPE == 'solr':
url = '%s/solr/hypermap/select?q=%s' % (settings.SEARCH_URL, query)
LOGGER.debug(url)
response = urllib2.urlopen(url)
data = response.read().replace('\n', '')
# stats
layers_count = Layer.objects.all().count()
services_count = Service.objects.all().count()
template = loader.get_template('aggregator/index.html')
context = RequestContext(request, {
'data': data,
'layers_count': layers_count,
'services_count': services_count,
})
return HttpResponse(template.render(context)) | A page with number of services and layers faceted on domains. |
def data_vector_from_blurred_mapping_matrix_and_data(blurred_mapping_matrix, image_1d, noise_map_1d):
"""Compute the hyper vector *D* from a blurred mapping matrix *f* and the 1D image *d* and 1D noise-map *\sigma* \
(see Warren & Dye 2003).
Parameters
-----------
blurred_mapping_matrix : ndarray
The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels.
image_1d : ndarray
Flattened 1D array of the observed image the inversion is fitting.
noise_map_1d : ndarray
Flattened 1D array of the noise-map used by the inversion during the fit.
"""
mapping_shape = blurred_mapping_matrix.shape
data_vector = np.zeros(mapping_shape[1])
for image_index in range(mapping_shape[0]):
for pix_index in range(mapping_shape[1]):
data_vector[pix_index] += image_1d[image_index] * \
blurred_mapping_matrix[image_index, pix_index] / (noise_map_1d[image_index] ** 2.0)
return data_vector | Compute the hyper vector *D* from a blurred mapping matrix *f* and the 1D image *d* and 1D noise-map *\sigma* \
(see Warren & Dye 2003).
Parameters
-----------
blurred_mapping_matrix : ndarray
The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels.
image_1d : ndarray
Flattened 1D array of the observed image the inversion is fitting.
noise_map_1d : ndarray
Flattened 1D array of the noise-map used by the inversion during the fit. |
def dump(self):
"""Return dictionary with current statistical information"""
data = dict(
# Sessions
sessions_active=self.sess_active,
# Connections
connections_active=self.conn_active,
connections_ps=self.conn_ps.last_average,
# Packets
packets_sent_ps=self.pack_sent_ps.last_average,
packets_recv_ps=self.pack_recv_ps.last_average
)
for k, v in self.sess_transports.items():
data['transp_' + k] = v
return data | Return dictionary with current statistical information |
def DeserializeTX(buffer):
"""
Deserialize the stream into a Transaction object.
Args:
buffer (BytesIO): stream to deserialize the Transaction from.
Returns:
neo.Core.TX.Transaction:
"""
mstream = MemoryStream(buffer)
reader = BinaryReader(mstream)
tx = Transaction.DeserializeFrom(reader)
return tx | Deserialize the stream into a Transaction object.
Args:
buffer (BytesIO): stream to deserialize the Transaction from.
Returns:
neo.Core.TX.Transaction: |
def reserve(self, location=None, force=False, wait_for_up=True, timeout=80):
""" Reserve port and optionally wait for port to come up.
:param location: port location as 'ip/module/port'. If None, the location will be taken from the configuration.
:param force: whether to revoke existing reservation (True) or not (False).
:param wait_for_up: True - wait for port to come up, False - return immediately.
:param timeout: how long (seconds) to wait for port to come up.
"""
if not location or is_local_host(location):
return
hostname, card, port = location.split('/')
chassis = self.root.hw.get_chassis(hostname)
# todo - test if port owned by me.
if force:
chassis.get_card(int(card)).get_port(int(port)).release()
try:
phy_port = chassis.get_card(int(card)).get_port(int(port))
except KeyError as _:
raise TgnError('Physical port {} unreachable'.format(location))
self.set_attributes(commit=True, connectedTo=phy_port.ref)
while self.get_attribute('connectedTo') == '::ixNet::OBJ-null':
time.sleep(1)
if wait_for_up:
self.wait_for_up(timeout) | Reserve port and optionally wait for port to come up.
:param location: port location as 'ip/module/port'. If None, the location will be taken from the configuration.
:param force: whether to revoke existing reservation (True) or not (False).
:param wait_for_up: True - wait for port to come up, False - return immediately.
:param timeout: how long (seconds) to wait for port to come up. |
def enable_contactgroup_svc_notifications(self, contactgroup):
"""Enable service notifications for a contactgroup
Format of the line that triggers function call::
ENABLE_CONTACTGROUP_SVC_NOTIFICATIONS;<contactgroup_name>
:param contactgroup: contactgroup to enable
:type contactgroup: alignak.objects.contactgroup.Contactgroup
:return: None
"""
for contact_id in contactgroup.get_contacts():
self.enable_contact_svc_notifications(self.daemon.contacts[contact_id]) | Enable service notifications for a contactgroup
Format of the line that triggers function call::
ENABLE_CONTACTGROUP_SVC_NOTIFICATIONS;<contactgroup_name>
:param contactgroup: contactgroup to enable
:type contactgroup: alignak.objects.contactgroup.Contactgroup
:return: None |
def read_dependencies(filename):
"""Read in the dependencies from the virtualenv requirements file.
"""
dependencies = []
filepath = os.path.join('requirements', filename)
with open(filepath, 'r') as stream:
for line in stream:
package = line.strip().split('#')[0].strip()
if package and package.split(' ')[0] != '-r':
dependencies.append(package)
return dependencies | Read in the dependencies from the virtualenv requirements file. |
def run(self):
''' run all patterns in the playbook '''
plays = []
matched_tags_all = set()
unmatched_tags_all = set()
# loop through all patterns and run them
self.callbacks.on_start()
for (play_ds, play_basedir) in zip(self.playbook, self.play_basedirs):
play = Play(self, play_ds, play_basedir)
matched_tags, unmatched_tags = play.compare_tags(self.only_tags)
matched_tags_all = matched_tags_all | matched_tags
unmatched_tags_all = unmatched_tags_all | unmatched_tags
# if we have matched_tags, the play must be run.
# if the play contains no tasks, assume we just want to gather facts
if (len(matched_tags) > 0 or len(play.tasks()) == 0):
plays.append(play)
# if the playbook is invoked with --tags that don't exist at all in the playbooks
# then we need to raise an error so that the user can correct the arguments.
unknown_tags = set(self.only_tags) - (matched_tags_all | unmatched_tags_all)
unknown_tags.discard('all')
if len(unknown_tags) > 0:
unmatched_tags_all.discard('all')
msg = 'tag(s) not found in playbook: %s. possible values: %s'
unknown = ','.join(sorted(unknown_tags))
unmatched = ','.join(sorted(unmatched_tags_all))
raise errors.AnsibleError(msg % (unknown, unmatched))
for play in plays:
if not self._run_play(play):
break
# summarize the results
results = {}
for host in self.stats.processed.keys():
results[host] = self.stats.summarize(host)
return results | run all patterns in the playbook |
def write(self, handle):
"""Write metadata to handle."""
handle.write(u"\t".join(self.columns))
handle.write(u"\n")
for row in self.rows:
row.write(handle) | Write metadata to handle. |
def read(self):
"""
Read one character from buffer.
:Returns:
Current character or None if end of buffer is reached
"""
if self._current >= len(self._data):
return None
self._current += 1
return self._data[self._current - 1] | Read one character from buffer.
:Returns:
Current character or None if end of buffer is reached |
def pool_function(args):
"""
A wrapper for being able to launch all the threads.
We will use python-emailahoy library for the verification.
Args:
-----
args: reception of the parameters for getPageWrapper as a tuple.
Returns:
--------
A dictionary representing whether the verification was ended
successfully. The format is as follows:
```
{"platform": "str(domain["value"])", "status": "DONE", "data": aux}
```
"""
is_valid = True
try:
checker = emailahoy.VerifyEmail()
status, message = checker.verify_email_smtp(args, from_host='gmail.com', from_email='[email protected]')
if status == 250:
print("\t[*] Verification of '{}' status: {}. Details:\n\t\t{}".format(general.success(args), general.success("SUCCESS ({})".format(str(status))), message.replace('\n', '\n\t\t')))
is_valid = True
else:
print("\t[*] Verification of '{}' status: {}. Details:\n\t\t{}".format(general.error(args), general.error("FAILED ({})".format(str(status))), message.replace('\n', '\n\t\t')))
is_valid = False
except Exception, e:
print(general.warning("WARNING. An error was found when performing the search. You can omit this message.\n" + str(e)))
is_valid = False
aux = {}
aux["type"] = "i3visio.profile"
aux["value"] = "Email - " + args
aux["attributes"] = general.expandEntitiesFromEmail(args)
platform = aux["attributes"][2]["value"].title()
aux["attributes"].append({
"type": "i3visio.platform",
"value": platform,
"attributes": []
}
)
if is_valid:
return {"platform": platform, "status": "DONE", "data": aux}
else:
return {"platform": platform, "status": "DONE", "data": {}} | A wrapper for being able to launch all the threads.
We will use python-emailahoy library for the verification.
Args:
-----
args: reception of the parameters for getPageWrapper as a tuple.
Returns:
--------
A dictionary representing whether the verification was ended
successfully. The format is as follows:
```
{"platform": "str(domain["value"])", "status": "DONE", "data": aux}
``` |
def pixel_to_q(self, row: float, column: float):
"""Return the q coordinates of a given pixel.
Inputs:
row: float
the row (vertical) coordinate of the pixel
column: float
the column (horizontal) coordinate of the pixel
Coordinates are 0-based and calculated from the top left corner.
"""
qrow = 4 * np.pi * np.sin(
0.5 * np.arctan(
(row - float(self.header.beamcentery)) *
float(self.header.pixelsizey) /
float(self.header.distance))) / float(self.header.wavelength)
qcol = 4 * np.pi * np.sin(0.5 * np.arctan(
(column - float(self.header.beamcenterx)) *
float(self.header.pixelsizex) /
float(self.header.distance))) / float(self.header.wavelength)
return qrow, qcol | Return the q coordinates of a given pixel.
Inputs:
row: float
the row (vertical) coordinate of the pixel
column: float
the column (horizontal) coordinate of the pixel
Coordinates are 0-based and calculated from the top left corner. |
def collapse(self, msgpos):
"""collapse message at given position"""
MT = self._tree[msgpos]
MT.collapse(MT.root)
self.focus_selected_message() | collapse message at given position |
def _cas_2(self):
''' Longitude overlap (2 images). '''
lonc_left = self._format_lon(self.lonm)
lonc_right = self._format_lon(self.lonM)
latc = self._format_lat(self.latm)
print(lonc_left, lonc_right, self.lonm, self.lonM)
img_name_left = self._format_name_map(lonc_left, latc)
print(img_name_left)
img_left = BinaryTable(img_name_left, self.path_pdsfiles)
X_left, Y_left, Z_left = img_left.extract_grid(self.lonm,
float(
img_left.EASTERNMOST_LONGITUDE),
self.latm,
self.latM)
img_name_right = self._format_name_map(lonc_right, latc)
img_right = BinaryTable(img_name_right, self.path_pdsfiles)
X_right, Y_right, Z_right = img_right.extract_grid(float(img_right.WESTERNMOST_LONGITUDE),
self.lonM,
self.latm,
self.latM)
X_new = np.hstack((X_left, X_right))
Y_new = np.hstack((Y_left, Y_right))
Z_new = np.hstack((Z_left, Z_right))
return X_new, Y_new, Z_new | Longitude overlap (2 images). |
def _make_return_edges(self):
"""
For each returning function, create return edges in self.graph.
:return: None
"""
for func_addr, func in self.functions.items():
if func.returning is False:
continue
# get the node on CFG
if func.startpoint is None:
l.warning('Function %#x does not have a startpoint (yet).', func_addr)
continue
startpoint = self.model.get_any_node(func.startpoint.addr)
if startpoint is None:
# weird...
l.warning('No CFGNode is found for function %#x in _make_return_edges().', func_addr)
continue
endpoints = self._get_return_sources(func)
# get all callers
callers = self.model.get_predecessors(startpoint, jumpkind='Ijk_Call')
# for each caller, since they all end with a call instruction, get the immediate successor
return_targets = itertools.chain.from_iterable(
self.model.get_successors(caller, excluding_fakeret=False, jumpkind='Ijk_FakeRet') for caller in callers
)
return_targets = set(return_targets)
for ep in endpoints:
src = self.model.get_any_node(ep.addr)
for rt in return_targets:
if not src.instruction_addrs:
ins_addr = None
else:
if self.project.arch.branch_delay_slot:
if len(src.instruction_addrs) > 1:
ins_addr = src.instruction_addrs[-2]
else:
l.error('At %s: expecting more than one instruction. Only got one.', src)
ins_addr = None
else:
ins_addr = src.instruction_addrs[-1]
self._graph_add_edge(rt, src, 'Ijk_Ret', ins_addr, DEFAULT_STATEMENT) | For each returning function, create return edges in self.graph.
:return: None |
def create_win_salt_restart_task():
'''
Create a task in Windows task scheduler to enable restarting the salt-minion
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' service.create_win_salt_restart_task()
'''
cmd = 'cmd'
args = '/c ping -n 3 127.0.0.1 && net stop salt-minion && net start ' \
'salt-minion'
return __salt__['task.create_task'](name='restart-salt-minion',
user_name='System',
force=True,
action_type='Execute',
cmd=cmd,
arguments=args,
trigger_type='Once',
start_date='1975-01-01',
start_time='01:00') | Create a task in Windows task scheduler to enable restarting the salt-minion
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' service.create_win_salt_restart_task() |
def dict_table(cls,
d,
order=None,
header=None,
sort_keys=True,
show_none="",
max_width=40):
"""prints a pretty table from an dict of dicts
:param d: A a dict with dicts of the same type.
Each key will be a column
:param order: The order in which the columns are printed.
The order is specified by the key names of the dict.
:param header: The Header of each of the columns
:type header: A list of string
:param sort_keys: Key(s) of the dict to be used for sorting.
This specify the column(s) in the table for sorting.
:type sort_keys: string or a tuple of string (for sorting with multiple columns)
:param show_none: prints None if True for None values otherwise ""
:type show_none: bool
:param max_width: maximum width for a cell
:type max_width: int
"""
def _keys():
all_keys = []
for e in d:
keys = d[e].keys()
all_keys.extend(keys)
return list(set(all_keys))
# noinspection PyBroadException
def _get(item, key):
try:
tmp = str(d[item][key])
if tmp == "None":
tmp = show_none
except:
tmp = ' '
return tmp
if d is None or d == {}:
return None
if order is None:
order = _keys()
if header is None and order is not None:
header = order
elif header is None:
header = _keys()
x = PrettyTable(header)
x.max_width = max_width
if sort_keys:
if type(sort_keys) is str:
sorted_list = sorted(d, key=lambda x: d[x][sort_keys])
elif type(sort_keys) == tuple:
sorted_list = sorted(d, key=lambda x: tuple(
[d[x][sort_key] for sort_key in sort_keys]))
else:
sorted_list = d
else:
sorted_list = d
for element in sorted_list:
values = []
for key in order:
values.append(_get(element, key))
x.add_row(values)
x.align = "l"
return x | prints a pretty table from an dict of dicts
:param d: A a dict with dicts of the same type.
Each key will be a column
:param order: The order in which the columns are printed.
The order is specified by the key names of the dict.
:param header: The Header of each of the columns
:type header: A list of string
:param sort_keys: Key(s) of the dict to be used for sorting.
This specify the column(s) in the table for sorting.
:type sort_keys: string or a tuple of string (for sorting with multiple columns)
:param show_none: prints None if True for None values otherwise ""
:type show_none: bool
:param max_width: maximum width for a cell
:type max_width: int |
def edge_betweenness_bin(G):
'''
Edge betweenness centrality is the fraction of all shortest paths in
the network that contain a given edge. Edges with high values of
betweenness centrality participate in a large number of shortest paths.
Parameters
----------
A : NxN np.ndarray
binary directed/undirected connection matrix
Returns
-------
EBC : NxN np.ndarray
edge betweenness centrality matrix
BC : Nx1 np.ndarray
node betweenness centrality vector
Notes
-----
Betweenness centrality may be normalised to the range [0,1] as
BC/[(N-1)(N-2)], where N is the number of nodes in the network.
'''
n = len(G)
BC = np.zeros((n,)) # vertex betweenness
EBC = np.zeros((n, n)) # edge betweenness
for u in range(n):
D = np.zeros((n,))
D[u] = 1 # distance from u
NP = np.zeros((n,))
NP[u] = 1 # number of paths from u
P = np.zeros((n, n)) # predecessors
Q = np.zeros((n,), dtype=int) # indices
q = n - 1 # order of non-increasing distance
Gu = G.copy()
V = np.array([u])
while V.size:
Gu[:, V] = 0 # remove remaining in-edges
for v in V:
Q[q] = v
q -= 1
W, = np.where(Gu[v, :]) # neighbors of V
for w in W:
if D[w]:
NP[w] += NP[v] # NP(u->w) sum of old and new
P[w, v] = 1 # v is a predecessor
else:
D[w] = 1
NP[w] = NP[v] # NP(u->v) = NP of new path
P[w, v] = 1 # v is a predecessor
V, = np.where(np.any(Gu[V, :], axis=0))
if np.any(np.logical_not(D)): # if some vertices unreachable
Q[:q], = np.where(np.logical_not(D)) # ...these are first in line
DP = np.zeros((n,)) # dependency
for w in Q[:n - 1]:
BC[w] += DP[w]
for v in np.where(P[w, :])[0]:
DPvw = (1 + DP[w]) * NP[v] / NP[w]
DP[v] += DPvw
EBC[v, w] += DPvw
return EBC, BC | Edge betweenness centrality is the fraction of all shortest paths in
the network that contain a given edge. Edges with high values of
betweenness centrality participate in a large number of shortest paths.
Parameters
----------
A : NxN np.ndarray
binary directed/undirected connection matrix
Returns
-------
EBC : NxN np.ndarray
edge betweenness centrality matrix
BC : Nx1 np.ndarray
node betweenness centrality vector
Notes
-----
Betweenness centrality may be normalised to the range [0,1] as
BC/[(N-1)(N-2)], where N is the number of nodes in the network. |
def check(self, src_tgt, actual_deps):
"""Check for missing deps.
See docstring for _compute_missing_deps for details.
"""
if self._check_missing_direct_deps or self._check_unnecessary_deps:
missing_file_deps, missing_direct_tgt_deps = \
self._compute_missing_deps(src_tgt, actual_deps)
buildroot = get_buildroot()
def shorten(path): # Make the output easier to read.
if path.startswith(buildroot):
return os.path.relpath(path, buildroot)
return path
def filter_whitelisted(missing_deps):
# Removing any targets that exist in the whitelist from the list of dependency issues.
return [(tgt_pair, evidence) for (tgt_pair, evidence) in missing_deps
if tgt_pair[0].address not in self._target_whitelist]
missing_direct_tgt_deps = filter_whitelisted(missing_direct_tgt_deps)
if self._check_missing_direct_deps and missing_direct_tgt_deps:
log_fn = (self.context.log.error if self._check_missing_direct_deps == 'fatal'
else self.context.log.warn)
for (tgt_pair, evidence) in missing_direct_tgt_deps:
evidence_str = '\n'.join([' {} uses {}'.format(shorten(e[0]), shorten(e[1]))
for e in evidence])
log_fn('Missing direct BUILD dependency {} -> {} because:\n{}'
.format(tgt_pair[0].address.spec, tgt_pair[1].address.spec, evidence_str))
if self._check_missing_direct_deps == 'fatal':
raise TaskError('Missing direct deps.')
if self._check_unnecessary_deps:
log_fn = (self.context.log.error if self._check_unnecessary_deps == 'fatal'
else self.context.log.warn)
had_unused = self._do_check_unnecessary_deps(src_tgt, actual_deps, log_fn)
if had_unused and self._check_unnecessary_deps == 'fatal':
raise TaskError('Unnecessary deps.') | Check for missing deps.
See docstring for _compute_missing_deps for details. |
def returner(ret):
'''
Send an slack message with the data
'''
_options = _get_options(ret)
channel = _options.get('channel')
username = _options.get('username')
as_user = _options.get('as_user')
api_key = _options.get('api_key')
changes = _options.get('changes')
only_show_failed = _options.get('only_show_failed')
yaml_format = _options.get('yaml_format')
if not channel:
log.error('slack.channel not defined in salt config')
return
if not username:
log.error('slack.username not defined in salt config')
return
if not as_user:
log.error('slack.as_user not defined in salt config')
return
if not api_key:
log.error('slack.api_key not defined in salt config')
return
if only_show_failed and changes:
log.error('cannot define both slack.changes and slack.only_show_failed in salt config')
return
returns = ret.get('return')
if changes is True:
returns = {(key, value) for key, value in returns.items() if value['result'] is not True or value['changes']}
if only_show_failed is True:
returns = {(key, value) for key, value in returns.items() if value['result'] is not True}
if yaml_format is True:
returns = salt.utils.yaml.safe_dump(returns)
else:
returns = pprint.pformat(returns)
message = ('id: {0}\r\n'
'function: {1}\r\n'
'function args: {2}\r\n'
'jid: {3}\r\n'
'return: {4}\r\n').format(
ret.get('id'),
ret.get('fun'),
ret.get('fun_args'),
ret.get('jid'),
returns)
slack = _post_message(channel,
message,
username,
as_user,
api_key)
return slack | Send an slack message with the data |
async def rows(self, offs, size=None, iden=None):
'''
Yield a number of raw items from the CryoTank starting at a given offset.
Args:
offs (int): The index of the desired datum (starts at 0)
size (int): The max number of items to yield.
Yields:
((indx, bytes)): Index and msgpacked bytes.
'''
if iden is not None:
self.setOffset(iden, offs)
for i, (indx, byts) in enumerate(self._items.rows(offs)):
if size is not None and i >= size:
return
yield indx, byts | Yield a number of raw items from the CryoTank starting at a given offset.
Args:
offs (int): The index of the desired datum (starts at 0)
size (int): The max number of items to yield.
Yields:
((indx, bytes)): Index and msgpacked bytes. |
def sponsor_image_url(sponsor, name):
"""Returns the corresponding url from the sponsors images"""
if sponsor.files.filter(name=name).exists():
# We avoid worrying about multiple matches by always
# returning the first one.
return sponsor.files.filter(name=name).first().item.url
return '' | Returns the corresponding url from the sponsors images |
def get_auth(self):
"""
This method requests an authentication object from the HPE IMC NMS and returns an HTTPDigest Auth Object
:return:
"""
url = self.h_url + self.server + ":" + self.port
auth = requests.auth.HTTPDigestAuth(self.username,self.password)
auth_url = "/imcrs"
f_url = url + auth_url
try:
r = requests.get(f_url, auth=auth, headers=headers, verify=False)
return r.status_code
# checks for reqeusts exceptions
except requests.exceptions.RequestException as e:
return ("Error:\n" + str(e) + '\n\nThe IMC server address is invalid. Please try again')
set_imc_creds()
if r.status_code != 200: # checks for valid IMC credentials
return ("Error:\n" + str(e) +"Error: \n You're credentials are invalid. Please try again\n\n")
set_imc_creds() | This method requests an authentication object from the HPE IMC NMS and returns an HTTPDigest Auth Object
:return: |
def run_job(self, section_id, session=None):
# type: (Text, Optional[Session]) -> None
"""Run a job as described in the section named ``section_id``.
Raises:
KeyError: when the section could not be found.
"""
if not self.parser.has_section(section_id):
raise KeyError('section not found: {}'.format(section_id))
session = session or Session()
for name, looter_cls in six.iteritems(self._CLS_MAP):
targets = self.get_targets(self._get(section_id, name))
quiet = self._getboolean(
section_id, "quiet", self.args.get("--quiet", False))
if targets:
logger.info("Launching {} job for section {}".format(name, section_id))
for target, directory in six.iteritems(targets):
try:
logger.info("Downloading {} to {}".format(target, directory))
looter = looter_cls(
target,
add_metadata=self._getboolean(section_id, 'add-metadata', False),
get_videos=self._getboolean(section_id, 'get-videos', False),
videos_only=self._getboolean(section_id, 'videos-only', False),
jobs=self._getint(section_id, 'jobs', 16),
template=self._get(section_id, 'template', '{id}'),
dump_json=self._getboolean(section_id, 'dump-json', False),
dump_only=self._getboolean(section_id, 'dump-only', False),
extended_dump=self._getboolean(section_id, 'extended-dump', False),
session=session)
if self.parser.has_option(section_id, 'username'):
looter.logout()
username = self._get(section_id, 'username')
password = self._get(section_id, 'password') or \
getpass.getpass('Password for "{}": '.format(username))
looter.login(username, password)
n = looter.download(
directory,
media_count=self._getint(section_id, 'num-to-dl'),
# FIXME: timeframe=self._get(section_id, 'timeframe'),
new_only=self._getboolean(section_id, 'new', False),
pgpbar_cls=None if quiet else TqdmProgressBar,
dlpbar_cls=None if quiet else TqdmProgressBar)
logger.success("Downloaded %i medias !", n)
except Exception as exception:
logger.error(six.text_type(exception)) | Run a job as described in the section named ``section_id``.
Raises:
KeyError: when the section could not be found. |
def parse_events(content, start=None, end=None, default_span=timedelta(days=7)):
"""
Query the events occurring in a given time range.
:param content: iCal URL/file content as String
:param start: start date for search, default today
:param end: end date for search
:param default_span: default query length (one week)
:return: events as list
"""
if not start:
start = now()
if not end:
end = start + default_span
if not content:
raise ValueError('Content is invalid!')
calendar = Calendar.from_ical(content)
# Find the calendar's timezone info, or use UTC
for c in calendar.walk():
if c.name == 'VTIMEZONE':
cal_tz = gettz(str(c['TZID']))
break;
else:
cal_tz = UTC
start = normalize(start, cal_tz)
end = normalize(end, cal_tz)
found = []
for component in calendar.walk():
if component.name == "VEVENT":
e = create_event(component)
if e.recurring:
# Unfold recurring events according to their rrule
rule = parse_rrule(component, cal_tz)
dur = e.end - e.start
found.extend(e.copy_to(dt) for dt in rule.between(start - dur, end, inc=True))
elif e.end >= start and e.start <= end:
found.append(e)
return found | Query the events occurring in a given time range.
:param content: iCal URL/file content as String
:param start: start date for search, default today
:param end: end date for search
:param default_span: default query length (one week)
:return: events as list |
def send(self, *args, **kwargs):
"""Sends the envelope using a freshly created SMTP connection. *args*
and *kwargs* are passed directly to :py:class:`envelopes.conn.SMTP`
constructor.
Returns a tuple of SMTP object and whatever its send method returns."""
conn = SMTP(*args, **kwargs)
send_result = conn.send(self)
return conn, send_result | Sends the envelope using a freshly created SMTP connection. *args*
and *kwargs* are passed directly to :py:class:`envelopes.conn.SMTP`
constructor.
Returns a tuple of SMTP object and whatever its send method returns. |
def displayText(self, value, blank='', joiner=', '):
"""
Returns the display text for the value associated with
the inputted text. This will result in a comma separated
list of labels for the value, or the blank text provided if
no text is found.
:param value | <variant>
blank | <str>
joiner | <str>
:return <str>
"""
if value is None:
return ''
labels = []
for key, my_value in sorted(self.items(), key=lambda x: x[1]):
if value & my_value:
labels.append(self._labels.get(my_value, text.pretty(key)))
return joiner.join(labels) or blank | Returns the display text for the value associated with
the inputted text. This will result in a comma separated
list of labels for the value, or the blank text provided if
no text is found.
:param value | <variant>
blank | <str>
joiner | <str>
:return <str> |
def _kmp_construct_next(self, pattern):
"""the helper function for KMP-string-searching is to construct the DFA. pattern should be an integer array. return a 2D array representing the DFA for moving the pattern."""
next = [[0 for state in pattern] for input_token in self.ALPHABETA_KMP]
next[pattern[0]][0] = 1
restart_state = 0
for state in range(1, len(pattern)):
for input_token in self.ALPHABETA_KMP:
next[input_token][state] = next[input_token][restart_state]
next[pattern[state]][state] = state + 1
restart_state = next[pattern[state]][restart_state]
return next | the helper function for KMP-string-searching is to construct the DFA. pattern should be an integer array. return a 2D array representing the DFA for moving the pattern. |
def open_addnew_win(self, *args, **kwargs):
"""Open a new window so the use can choose to add new reftracks
:returns: None
:rtype: None
:raises: NotImplementedError
"""
if self.reftrackadderwin:
self.reftrackadderwin.close()
self.reftrackadderwin = ReftrackAdderWin(self.refobjinter, self.root, parent=self)
self.reftrackadderwin.destroyed.connect(self.addnewwin_destroyed)
self.reftrackadderwin.show() | Open a new window so the use can choose to add new reftracks
:returns: None
:rtype: None
:raises: NotImplementedError |
def delete_nsg(access_token, subscription_id, resource_group, nsg_name):
'''Delete network security group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
nsg_name (str): Name of the NSG.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Network/networkSecurityGroups/', nsg_name,
'?api-version=', NETWORK_API])
return do_delete(endpoint, access_token) | Delete network security group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
nsg_name (str): Name of the NSG.
Returns:
HTTP response. |
def load_datafile(self, name, search_path=None, **kwargs):
"""
find datafile and load them from codec
"""
if not search_path:
search_path = self.define_dir
self.debug_msg('loading datafile %s from %s' % (name, str(search_path)))
return codec.load_datafile(name, search_path, **kwargs) | find datafile and load them from codec |
def stopContext(self, context):
"""Clear the database if so configured for this
"""
# Use pymongo directly to drop all collections of created db
if ((self.clear_context['module'] and inspect.ismodule(context)) or
(self.clear_context['class'] and inspect.isclass(context))):
self.connection.drop_database(self.database_name) | Clear the database if so configured for this |
def copy_ssh_keys_to_host(self, host, password=None, no_add_host=False, known_hosts=DEFAULT_KNOWN_HOSTS):
"""
Copy the SSH keys to the given host.
:param host: the `Host` object to copy the SSH keys to.
:param password: the SSH password for the given host.
:param no_add_host: if the host is not in the known_hosts file, write an error instead of adding it to the
known_hosts.
:param known_hosts: the `known_hosts` file to store the SSH public keys.
:raise paramiko.ssh_exception.AuthenticationException: if SSH authentication error.
:raise paramiko.ssh_exception.SSHException: generic SSH error.
:raise socket.error: if error at the socket level.
"""
client = None
try:
client = paramiko.SSHClient()
if not no_add_host:
client.set_missing_host_key_policy(paramiko.client.AutoAddPolicy())
if os.path.isfile(known_hosts):
client.load_host_keys(filename=known_hosts)
client.connect(host.hostname, port=host.port, username=host.user, password=password,
key_filename=self.priv_key)
cmd = (r'''mkdir -p ~/.ssh && chmod 700 ~/.ssh && \
k='{0}' && if ! grep -qFx "$k" ~/.ssh/authorized_keys; then echo "$k" >> ~/.ssh/authorized_keys; fi'''
.format(self.pub_key_content))
logger.debug('Run on [%s]: %s', host.hostname, cmd)
client.exec_command(cmd.encode('utf-8'))
finally:
if client:
client.close() | Copy the SSH keys to the given host.
:param host: the `Host` object to copy the SSH keys to.
:param password: the SSH password for the given host.
:param no_add_host: if the host is not in the known_hosts file, write an error instead of adding it to the
known_hosts.
:param known_hosts: the `known_hosts` file to store the SSH public keys.
:raise paramiko.ssh_exception.AuthenticationException: if SSH authentication error.
:raise paramiko.ssh_exception.SSHException: generic SSH error.
:raise socket.error: if error at the socket level. |
def is_admin(self):
"""Is the user a system administrator"""
return self.role == self.roles.administrator.value and self.state == State.approved | Is the user a system administrator |
def add_sma(self,periods=20,column=None,name='',
str=None,**kwargs):
"""
Add Simple Moving Average (SMA) study to QuantFigure.studies
Parameters:
periods : int or list(int)
Number of periods
column :string
Defines the data column name that contains the
data over which the study will be applied.
Default: 'close'
name : string
Name given to the study
str : string
Label factory for studies
The following wildcards can be used:
{name} : Name of the column
{study} : Name of the study
{period} : Period used
Examples:
'study: {study} - period: {period}'
kwargs:
legendgroup : bool
If true, all legend items are grouped into a
single one
All formatting values available on iplot()
"""
if not column:
column=self._d['close']
study={'kind':'sma',
'name':name,
'params':{'periods':periods,'column':column,
'str':str},
'display':utils.merge_dict({'legendgroup':False},kwargs)}
self._add_study(study) | Add Simple Moving Average (SMA) study to QuantFigure.studies
Parameters:
periods : int or list(int)
Number of periods
column :string
Defines the data column name that contains the
data over which the study will be applied.
Default: 'close'
name : string
Name given to the study
str : string
Label factory for studies
The following wildcards can be used:
{name} : Name of the column
{study} : Name of the study
{period} : Period used
Examples:
'study: {study} - period: {period}'
kwargs:
legendgroup : bool
If true, all legend items are grouped into a
single one
All formatting values available on iplot() |
def dmrs_tikz_dependency(xs, **kwargs):
"""
Return a LaTeX document with each Xmrs in *xs* rendered as DMRSs.
DMRSs use the `tikz-dependency` package for visualization.
"""
def link_label(link):
return '{}/{}'.format(link.rargname or '', link.post)
def label_edge(link):
if link.post == H_POST and link.rargname == RSTR_ROLE:
return 'rstr'
elif link.post == EQ_POST:
return 'eq'
else:
return 'arg'
if isinstance(xs, Xmrs):
xs = [xs]
lines = """\\documentclass{standalone}
\\usepackage{tikz-dependency}
\\usepackage{relsize}
%%%
%%% style for dmrs graph
%%%
\\depstyle{dmrs}{edge unit distance=1.5ex,
label style={above, scale=.9, opacity=0, text opacity=1},
baseline={([yshift=-0.7\\baselineskip]current bounding box.north)}}
%%% set text opacity=0 to hide text, opacity = 0 to hide box
\\depstyle{root}{edge unit distance=3ex, label style={opacity=1}}
\\depstyle{arg}{edge above}
\\depstyle{rstr}{edge below, dotted, label style={text opacity=1}}
\\depstyle{eq}{edge below, label style={text opacity=1}}
\\depstyle{icons}{edge below, dashed}
\\providecommand{\\named}{}
\\renewcommand{\\named}{named}
%%% styles for predicates and roles (from mrs.sty)
\\providecommand{\\spred}{}
\\renewcommand{\\spred}[1]{\\mbox{\\textsf{#1}}}
\\providecommand{\\srl}{}
\\renewcommand{\\srl}[1]{\\mbox{\\textsf{\\smaller #1}}}
%%%
\\begin{document}""".split("\n")
for ix, x in enumerate(xs):
lines.append("%%%\n%%% {}\n%%%".format(ix+1))
lines.append("\\begin{dependency}[dmrs]")
ns = nodes(x)
### predicates
lines.append(" \\begin{deptext}[column sep=10pt]")
for i, n in enumerate(ns):
sep = "\\&" if (i < len(ns) - 1) else "\\\\"
pred = _latex_escape(n.pred.short_form())
pred = "\\named{}" if pred == 'named' else pred
if n.carg is not None:
print(n.carg.strip('"'))
pred += "\\smaller ({})".format(n.carg.strip('"'))
lines.append(" \\spred{{{}}} {} % node {}".format(
pred, sep, i+1))
lines.append(" \\end{deptext}")
nodeidx = {n.nodeid: i+1 for i, n in enumerate(ns)}
### links
for link in links(x):
if link.start == 0:
lines.append(
' \\deproot[root]{{{}}}{{{}}}'.format(
nodeidx[link.end],
'\\srl{TOP}' # _latex_escape('/' + link.post)
)
)
else:
lines.append(' \\depedge[{}]{{{}}}{{{}}}{{\\srl{{{}}}}}'.format(
label_edge(link),
nodeidx[link.start],
nodeidx[link.end],
_latex_escape(link_label(link))
))
### placeholder for icons
lines.append('% \\depedge[icons]{f}{t}{FOCUS}')
lines.append('\\end{dependency}\n')
lines.append('\\end{document}')
return '\n'.join(lines) | Return a LaTeX document with each Xmrs in *xs* rendered as DMRSs.
DMRSs use the `tikz-dependency` package for visualization. |
def update_reflexrules_workflow_state(portal):
"""
Updates Reflex Rules' inactive_state, otherwise they don't have it by
default.
:param portal: Portal object
:return: None
"""
wf_tool = getToolByName(portal, 'portal_workflow')
logger.info("Updating Reflex Rules' 'inactive_state's...")
wf = wf_tool.getWorkflowById("bika_inactive_workflow")
uc = api.get_tool('portal_catalog')
r_rules = uc(portal_type='ReflexRule')
for rr in r_rules:
obj = rr.getObject()
wf.updateRoleMappingsFor(obj)
obj.reindexObject()
logger.info("Reflex Rules' 'inactive_state's were updated.") | Updates Reflex Rules' inactive_state, otherwise they don't have it by
default.
:param portal: Portal object
:return: None |
def callback(self, request, **kwargs):
"""
Called from the Service when the user accept to activate it
:param request: request object
:return: callback url
:rtype: string , path to the template
"""
access_token = Pocket.get_access_token(consumer_key=self.consumer_key, code=request.session['request_token'])
kwargs = {'access_token': access_token}
return super(ServicePocket, self).callback(request, **kwargs) | Called from the Service when the user accept to activate it
:param request: request object
:return: callback url
:rtype: string , path to the template |
def name(self):
'''
Returns the name of the current :py:class:`Detrender` subclass.
'''
if self.cadence == 'lc':
return self.__class__.__name__
else:
return '%s.sc' % self.__class__.__name__ | Returns the name of the current :py:class:`Detrender` subclass. |
def getStates(self):
"""Get all :class:`rtcclient.models.State` objects of this workitem
:return: a :class:`list` contains all the
:class:`rtcclient.models.State` objects
:rtype: list
"""
cust_attr = (self.raw_data.get("rtc_cm:state")
.get("@rdf:resource")
.split("/")[-2])
return self.rtc_obj._get_paged_resources("State",
projectarea_id=self.contextId,
customized_attr=cust_attr,
page_size="50") | Get all :class:`rtcclient.models.State` objects of this workitem
:return: a :class:`list` contains all the
:class:`rtcclient.models.State` objects
:rtype: list |
def parents(self, resources):
""" Split the path in name and get parents """
if self.docname == 'index':
# The root has no parents
return []
parents = []
parent = resources.get(self.parent)
while parent is not None:
parents.append(parent)
parent = resources.get(parent.parent)
return parents | Split the path in name and get parents |
def send_workflow(self):
"""
With the workflow instance and the task invitation is assigned a role.
"""
task_invitation = TaskInvitation.objects.get(self.task_invitation_key)
wfi = task_invitation.instance
select_role = self.input['form']['select_role']
if wfi.current_actor == self.current.role:
task_invitation.role = RoleModel.objects.get(select_role)
wfi.current_actor = RoleModel.objects.get(select_role)
wfi.save()
task_invitation.save()
[inv.delete() for inv in TaskInvitation.objects.filter(instance=wfi) if
not inv == task_invitation]
title = _(u"Successful")
msg = _(u"The workflow was assigned to someone else with success.")
else:
title = _(u"Unsuccessful")
msg = _(u"This workflow does not belong to you, you cannot assign it to someone else.")
self.current.msg_box(title=title, msg=msg) | With the workflow instance and the task invitation is assigned a role. |
def uriref_matches_iriref(v1: URIRef, v2: Union[str, ShExJ.IRIREF]) -> bool:
""" Compare :py:class:`rdflib.URIRef` value with :py:class:`ShExJ.IRIREF` value """
return str(v1) == str(v2) | Compare :py:class:`rdflib.URIRef` value with :py:class:`ShExJ.IRIREF` value |
async def load_cache(self, archive: bool = False) -> int:
"""
Load caches and archive enough to go offline and be able to generate proof
on all credentials in wallet.
Return timestamp (epoch seconds) of cache load event, also used as subdirectory
for cache archives.
:return: cache load event timestamp (epoch seconds)
"""
LOGGER.debug('HolderProver.load_cache >>> archive: %s', archive)
rv = int(time())
box_ids = json.loads(await self.get_box_ids_json())
for s_id in box_ids['schema_id']:
with SCHEMA_CACHE.lock:
await self.get_schema(s_id)
for cd_id in box_ids['cred_def_id']:
with CRED_DEF_CACHE.lock:
await self.get_cred_def(cd_id)
for rr_id in box_ids['rev_reg_id']:
await self._get_rev_reg_def(rr_id)
with REVO_CACHE.lock:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
if revo_cache_entry:
try:
await revo_cache_entry.get_delta_json(self._build_rr_delta_json, rv, rv)
except ClosedPool:
LOGGER.warning(
'Holder-Prover %s is offline from pool %s, cannot update revo cache reg delta for %s to %s',
self.wallet.name,
self.pool.name,
rr_id,
rv)
if archive:
Caches.archive(self.dir_cache)
LOGGER.debug('HolderProver.load_cache <<< %s', rv)
return rv | Load caches and archive enough to go offline and be able to generate proof
on all credentials in wallet.
Return timestamp (epoch seconds) of cache load event, also used as subdirectory
for cache archives.
:return: cache load event timestamp (epoch seconds) |
def _check_fields(self, x, y):
"""
Check x and y fields parameters and initialize
"""
if x is None:
if self.x is None:
self.err(
self._check_fields,
"X field is not set: please specify a parameter")
return
x = self.x
if y is None:
if self.y is None:
self.err(
self._check_fields,
"Y field is not set: please specify a parameter")
return
y = self.y
return x, y | Check x and y fields parameters and initialize |
def validate(self, value, validator):
"""Validates and returns the value.
If the value does not validate against the schema, SchemaValidationError
will be raised.
:param value: A value to validate (usually a dict).
:param validator: An instance of a jsonschema validator class, as
created by Schema.get_validator().
:returns: the passed value.
:raises SchemaValidationError:
:raises Exception:
"""
try:
validator.validate(value)
except Exception as e:
logging.debug(e, exc_info=e)
if isinstance(e, DoctorError):
raise
else:
# Gather all the validation errors
validation_errors = sorted(
validator.iter_errors(value), key=lambda e: e.path)
errors = {}
for error in validation_errors:
try:
key = error.path[0]
except IndexError:
key = '_other'
errors[key] = error.args[0]
raise SchemaValidationError(e.args[0], errors=errors)
return value | Validates and returns the value.
If the value does not validate against the schema, SchemaValidationError
will be raised.
:param value: A value to validate (usually a dict).
:param validator: An instance of a jsonschema validator class, as
created by Schema.get_validator().
:returns: the passed value.
:raises SchemaValidationError:
:raises Exception: |
def _get_goroot(self, goids_all, namespace):
"""Get the top GO for the set of goids_all."""
root_goid = self.consts.NAMESPACE2GO[namespace]
if root_goid in goids_all:
return root_goid
root_goids = set()
for goid in goids_all:
goterm = self.gosubdag.go2obj[goid]
if goterm.depth == 0:
root_goids.add(goterm.id)
if len(root_goids) == 1:
return next(iter(root_goids))
raise RuntimeError("UNEXPECTED NUMBER OF ROOTS: {R}".format(R=root_goids)) | Get the top GO for the set of goids_all. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.