code
stringlengths 59
3.37k
| docstring
stringlengths 8
15.5k
|
---|---|
def do_watch(self, *args):
tables = []
if not self.engine.cached_descriptions:
self.engine.describe_all()
all_tables = list(self.engine.cached_descriptions)
for arg in args:
candidates = set((t for t in all_tables if fnmatch(t, arg)))
for t in sorted(candidates):
if t not in tables:
tables.append(t)
mon = Monitor(self.engine, tables)
mon.start()
|
Watch Dynamo tables consumed capacity
|
def filter(self, scored_list):
top_n_key = -1 * self.top_n
top_n_list = sorted(scored_list, key=lambda x: x[1])[top_n_key:]
result_list = sorted(top_n_list, key=lambda x: x[0])
return result_list
|
Filtering with top-n ranking.
Args:
scored_list: The list of scoring.
Retruns:
The list of filtered result.
|
def _dbus_get_object(bus_name, object_name):
try:
bus = dbus.SessionBus()
obj = bus.get_object(bus_name, object_name)
return obj
except (NameError, dbus.exceptions.DBusException):
return None
|
Fetches DBUS proxy object given the specified parameters.
`bus_name`
Name of the bus interface.
`object_name`
Object path related to the interface.
Returns object or ``None``.
|
def NumRegressors(npix, pld_order, cross_terms=True):
res = 0
for k in range(1, pld_order + 1):
if cross_terms:
res += comb(npix + k - 1, k)
else:
res += npix
return int(res)
|
Return the number of regressors for `npix` pixels
and PLD order `pld_order`.
:param bool cross_terms: Include pixel cross-terms? Default :py:obj:`True`
|
def match_future_child(self, parent, relation, recursive=False):
match = False
children = self.get_descendants if recursive else self.get_children
for child in children(parent, no_iframe=self.iframe_restrict):
match = self.match_selectors(child, relation)
if match:
break
return match
|
Match future child.
|
def open_connection(ip, username, password, function, args, write=False,
conn_timeout=5, sess_timeout=300, port=22):
output = color('=' * 50 + '\nResults from device: %s\n' % ip, 'yel')
try:
conn = Jaide(ip, username, password, connect_timeout=conn_timeout,
session_timeout=sess_timeout, port=port)
if write is not False:
return write, output + function(conn, *args)
else:
return output + function(conn, *args)
except errors.SSHError:
output += color('Unable to connect to port %s on device: %s\n' %
(str(port), ip), 'red')
except errors.AuthenticationError:
output += color('Authentication failed for device: %s' % ip, 'red')
except AuthenticationException:
output += color('Authentication failed for device: %s' % ip, 'red')
except SSHException as e:
output += color('Error connecting to device: %s\nError: %s' %
(ip, str(e)), 'red')
except socket.timeout:
output += color('Timeout exceeded connecting to device: %s' % ip, 'red')
except socket.gaierror:
output += color('No route to host, or invalid hostname: %s' % ip, 'red')
except socket.error:
output += color('The device refused the connection on port %s, or '
'no route to host.' % port, 'red')
if write is not False:
return write, output
else:
return output
|
Open a Jaide session with the device.
To open a Jaide session to the device, and run the appropriate function
against the device. Arguments for the downstream function are passed
through.
@param ip: String of the IP or hostname of the device to connect to.
@type ip: str
@param username: The string username used to connect to the device.
@type useranme: str
@param password: The string password used to connect to the device.
@type password: str
@param function: The downstream jaide.wrap function we'll be handing
| off the jaide.Jaide() object to execute the command
| once we've established the connection.
@type function: function pointer.
@param args: The arguments that we will hand off to the downstream
| function.
@type args: list
@param write: If set, it would be a tuple that we pass back as part of
| our return statement, so that any callback function
| can know how and where to put the output from the device.
@type write: False or tuple.
@param conn_timeout: Sets the connection timeout value. This is how
| we'll wait when connecting before classifying
| the device unreachable.
@type conn_timeout: int
@param sess_timeout: Sets the session timeout value. A higher value may
| be desired for long running commands, such as
| 'request system snapshot slice alternate'
@type sess_timeout: int
@param port: The port to connect to the device on. Defaults to 22.
@type port: int
@returns: We could return either just a string of the output from the
| device, or a tuple containing the information needed to write
| to a file and the string output from the device.
@rtype: Tuple or str
|
def prior(self):
prior = self.prob * self.selectfrac
for f in self.priorfactors:
prior *= self.priorfactors[f]
return prior
|
Model prior for particular model.
Product of eclipse probability (``self.prob``),
the fraction of scenario that is allowed by the various
constraints (``self.selectfrac``), and all additional
factors in ``self.priorfactors``.
|
def copy_directory_structure(destination_directory, relative_path):
full_path = os.path.join(destination_directory, relative_path)
if os.path.exists(full_path):
return
os.makedirs(destination_directory, relative_path)
|
Create all the intermediate directories required for relative_path to exist within destination_directory.
This assumes that relative_path is a directory located within root_dir.
Examples:
destination_directory: /tmp/destination
relative_path: test/unit/
will create: /tmp/destination/test/unit
Args:
destination_directory (str): root of the destination directory where the directory structure will be created.
relative_path (str): relative path that will be created within destination_directory
|
def project_op(self, op):
if not self.is_orthonormal():
raise ValueError("project_op only implemented for orthonormal operator bases")
return self.basis_transform.H * qt.operator_to_vector(op).data
|
Project an operator onto the basis.
:param qutip.Qobj op: The operator to project.
:return: The projection coefficients as a numpy array.
:rtype: scipy.sparse.csr_matrix
|
def expected_param_keys(self):
expected_keys = []
r = re.compile('%\(([^\)]+)\)s')
for block in self.keys():
for key in self[block].keys():
s = self[block][key]
if type(s)!=str: continue
md = re.search(r, s)
while md is not None:
k = md.group(1)
if k not in expected_keys:
expected_keys.append(k)
s = s[md.span()[1]:]
md = re.search(r, s)
return expected_keys
|
returns a list of params that this ConfigTemplate expects to receive
|
def _extract_archive(archive, verbosity=0, interactive=True, outdir=None,
program=None, format=None, compression=None):
if format is None:
format, compression = get_archive_format(archive)
check_archive_format(format, compression)
program = find_archive_program(format, 'extract', program=program)
check_program_compression(archive, 'extract', program, compression)
get_archive_cmdlist = get_archive_cmdlist_func(program, 'extract', format)
if outdir is None:
outdir = util.tmpdir(dir=".")
do_cleanup_outdir = True
else:
do_cleanup_outdir = False
try:
cmdlist = get_archive_cmdlist(archive, compression, program, verbosity, interactive, outdir)
if cmdlist:
run_archive_cmdlist(cmdlist, verbosity=verbosity)
if do_cleanup_outdir:
target, msg = cleanup_outdir(outdir, archive)
else:
target, msg = outdir, "`%s'" % outdir
if verbosity >= 0:
util.log_info("... %s extracted to %s." % (archive, msg))
return target
finally:
if do_cleanup_outdir:
try:
os.rmdir(outdir)
except OSError:
pass
|
Extract an archive.
@return: output directory if command is 'extract', else None
|
def get_isolated_cpus():
path = sysfs_path('devices/system/cpu/isolated')
isolated = read_first_line(path)
if isolated:
return parse_cpu_list(isolated)
cmdline = read_first_line(proc_path('cmdline'))
if cmdline:
match = re.search(r'\bisolcpus=([^ ]+)', cmdline)
if match:
isolated = match.group(1)
return parse_cpu_list(isolated)
return None
|
Get the list of isolated CPUs.
Return a sorted list of CPU identifiers, or return None if no CPU is
isolated.
|
def is_enum_type(type_):
return isinstance(type_, type) and issubclass(type_, tuple(_get_types(Types.ENUM)))
|
Checks if the given type is an enum type.
:param type_: The type to check
:return: True if the type is a enum type, otherwise False
:rtype: bool
|
def add_pii_permissions(self, group, view_only=None):
pii_model_names = [m.split(".")[1] for m in self.pii_models]
if view_only:
permissions = Permission.objects.filter(
(Q(codename__startswith="view") | Q(codename__startswith="display")),
content_type__model__in=pii_model_names,
)
else:
permissions = Permission.objects.filter(
content_type__model__in=pii_model_names
)
for permission in permissions:
group.permissions.add(permission)
for model in self.pii_models:
permissions = Permission.objects.filter(
codename__startswith="view",
content_type__app_label=model.split(".")[0],
content_type__model=f"historical{model.split('.')[1]}",
)
for permission in permissions:
group.permissions.add(permission)
for permission in Permission.objects.filter(
content_type__app_label="edc_registration",
codename__in=[
"add_registeredsubject",
"delete_registeredsubject",
"change_registeredsubject",
],
):
group.permissions.remove(permission)
permission = Permission.objects.get(
content_type__app_label="edc_registration",
codename="view_historicalregisteredsubject",
)
group.permissions.add(permission)
|
Adds PII model permissions.
|
def get_ranked_players():
rankings_page = requests.get(RANKINGS_URL)
root = etree.HTML(rankings_page.text)
player_rows = root.xpath('//div[@id="ranked"]//tr')
for row in player_rows[1:]:
player_row = row.xpath('td[@class!="country"]//text()')
yield _Player(
name=player_row[1],
country=row[1][0].get('title'),
triple_crowns=player_row[3],
monthly_win=player_row[4],
biggest_cash=player_row[5],
plb_score=player_row[6],
biggest_score=player_row[7],
average_score=player_row[8],
previous_rank=player_row[9],
)
|
Get the list of the first 100 ranked players.
|
def log_likelihood(self):
ll = GP.log_likelihood(self)
jacobian = self.warping_function.fgrad_y(self.Y_untransformed)
return ll + np.log(jacobian).sum()
|
Notice we add the jacobian of the warping function here.
|
def _reset_plain(self):
if self._text:
self._blocks.append(BlockText('\n'.join(self._text)))
self._text.clear()
|
Create a BlockText from the captured lines and clear _text.
|
def _set_widths(self, row, proc_group):
width_free = self.style["width_"] - sum(
[sum(self.fields[c].width for c in self.columns),
self.width_separtor])
if width_free < 0:
width_fixed = sum(
[sum(self.fields[c].width for c in self.columns
if c not in self.autowidth_columns),
self.width_separtor])
assert width_fixed > self.style["width_"], "bug in width logic"
raise elements.StyleError(
"Fixed widths specified in style exceed total width")
elif width_free == 0:
lgr.debug("Not checking widths; no free width left")
return False
lgr.debug("Checking width for row %r", row)
adjusted = False
for column in sorted(self.columns, key=lambda c: self.fields[c].width):
if width_free < 1:
lgr.debug("Giving up on checking widths; no free width left")
break
if column in self.autowidth_columns:
field = self.fields[column]
lgr.debug("Checking width of column %r "
"(field width: %d, free width: %d)",
column, field.width, width_free)
if field.pre[proc_group]:
value = field(row[column], keys=[proc_group],
exclude_post=True)
else:
value = row[column]
value = six.text_type(value)
value_width = len(value)
wmax = self.autowidth_columns[column]["max"]
if value_width > field.width:
width_old = field.width
width_available = width_free + field.width
width_new = min(value_width,
wmax or width_available,
width_available)
if width_new > width_old:
adjusted = True
field.width = width_new
lgr.debug("Adjusting width of %r column from %d to %d "
"to accommodate value %r",
column, width_old, field.width, value)
self._truncaters[column].length = field.width
width_free -= field.width - width_old
lgr.debug("Free width is %d after processing column %r",
width_free, column)
return adjusted
|
Update auto-width Fields based on `row`.
Parameters
----------
row : dict
proc_group : {'default', 'override'}
Whether to consider 'default' or 'override' key for pre- and
post-format processors.
Returns
-------
True if any widths required adjustment.
|
def set_trace(self, frame=None):
if hasattr(local, '_pdbpp_completing'):
return
if frame is None:
frame = sys._getframe().f_back
self._via_set_trace_frame = frame
return super(Pdb, self).set_trace(frame)
|
Remember starting frame.
This is used with pytest, which does not use pdb.set_trace().
|
def check_schedule():
all_items = prefetch_schedule_items()
for validator, _type, _msg in SCHEDULE_ITEM_VALIDATORS:
if validator(all_items):
return False
all_slots = prefetch_slots()
for validator, _type, _msg in SLOT_VALIDATORS:
if validator(all_slots):
return False
return True
|
Helper routine to easily test if the schedule is valid
|
def wait_for(func):
@wraps(func)
def wrapped(*args, **kwargs):
timeout = kwargs.pop('timeout', TIMEOUT)
start = None
while True:
try:
return func(*args, **kwargs)
except AssertionError:
if not start:
start = time()
if time() - start < timeout:
sleep(CHECK_EVERY)
continue
else:
raise
return wrapped
|
A decorator to invoke a function, retrying on assertion errors for a
specified time interval.
Adds a kwarg `timeout` to `func` which is a number of seconds to try
for (default 15).
|
def unique_filename(**kwargs):
if 'dir' not in kwargs:
path = temp_dir('impacts')
kwargs['dir'] = path
else:
path = temp_dir(kwargs['dir'])
kwargs['dir'] = path
if not os.path.exists(kwargs['dir']):
umask = os.umask(0000)
os.makedirs(kwargs['dir'], 0o777)
os.umask(umask)
handle, filename = mkstemp(**kwargs)
os.close(handle)
try:
os.remove(filename)
except OSError:
pass
return filename
|
Create new filename guaranteed not to exist previously
Use mkstemp to create the file, then remove it and return the name
If dir is specified, the tempfile will be created in the path specified
otherwise the file will be created in a directory following this scheme:
:file:'/tmp/inasafe/<dd-mm-yyyy>/<user>/impacts'
See http://docs.python.org/library/tempfile.html for details.
Example usage:
tempdir = temp_dir(sub_dir='test')
filename = unique_filename(suffix='.foo', dir=tempdir)
print filename
/tmp/inasafe/23-08-2012/timlinux/test/tmpyeO5VR.foo
Or with no preferred subdir, a default subdir of 'impacts' is used:
filename = unique_filename(suffix='.shp')
print filename
/tmp/inasafe/23-08-2012/timlinux/impacts/tmpoOAmOi.shp
|
def list(showgroups):
ecode = 0
try:
result = {}
subscribed = {}
available = {}
unavailable = {}
current_user_data = contexts['anchore_auth']['user_info']
feedmeta = anchore_feeds.load_anchore_feedmeta()
for feed in feedmeta.keys():
if feedmeta[feed]['subscribed']:
subscribed[feed] = {}
subscribed[feed]['description'] = feedmeta[feed]['description']
if showgroups:
subscribed[feed]['groups'] = feedmeta[feed]['groups'].keys()
else:
if current_user_data:
tier = int(current_user_data['tier'])
else:
tier = 0
if int(feedmeta[feed]['access_tier']) > tier:
collection = unavailable
else:
collection = available
collection[feed] = {}
collection[feed]['description'] = feedmeta[feed]['description']
if showgroups and collection == available:
collection[feed]['groups'] = feedmeta[feed]['groups'].keys()
if available:
result['Available'] = available
if subscribed:
result['Subscribed'] = subscribed
if unavailable:
result['Unavailable/Insufficient Access Tier'] = unavailable
anchore_print(result, do_formatting=True)
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode)
|
Show list of Anchore data feeds.
|
def predict_array(self, arr):
precompute = self.precompute
self.precompute = False
pred = super().predict_array(arr)
self.precompute = precompute
return pred
|
This over-ride is necessary because otherwise the learner method accesses the wrong model when it is called
with precompute set to true
Args:
arr: a numpy array to be used as input to the model for prediction purposes
Returns:
a numpy array containing the predictions from the model
|
def _get_links(self,):
res = ''
if self.links:
for l in self.links:
res += ' links = ' + str(l[0]) + '\n'
if l[0].child_nodes:
for chld in l[0].child_nodes:
res += ' child = ' + str(chld) + '\n'
if l[0].links:
for lnk in l[0].links:
res += ' sublink = ' + str(lnk[0]) + '\n'
else:
res += ' links = None\n'
return res
|
return the list of links of a node
|
def delete_policy(self, pol_id):
if pol_id not in self.policies:
LOG.error("Invalid policy %s", pol_id)
return
del self.policies[pol_id]
self.policy_cnt -= 1
|
Deletes the policy from the local dictionary.
|
async def set_constraints(self, constraints):
app_facade = client.ApplicationFacade.from_connection(self.connection)
log.debug(
'Setting constraints for %s: %s', self.name, constraints)
return await app_facade.SetConstraints(self.name, constraints)
|
Set machine constraints for this application.
:param dict constraints: Dict of machine constraints
|
def cleanup():
for install_dir in linters.INSTALL_DIRS:
try:
shutil.rmtree(install_dir, ignore_errors=True)
except Exception:
print(
"{0}\nFailed to delete {1}".format(
traceback.format_exc(), install_dir
)
)
sys.stdout.flush()
|
Delete standard installation directories.
|
def sponsor_tagged_image(sponsor, tag):
if sponsor.files.filter(tag_name=tag).exists():
return sponsor.files.filter(tag_name=tag).first().tagged_file.item.url
return ''
|
returns the corresponding url from the tagged image list.
|
def attribute(self, tag, name):
if tag in self._tags and name in self._tags[tag]:
return self._tags[tag][name]
|
return attribute by tag and attribute name
|
def get_power(self):
self.get_status()
try:
self.consumption = self.data['power']
except TypeError:
self.consumption = 0
return self.consumption
|
Get current power.
|
def error_message_and_exit(message, error_result):
if message:
error_message(message)
puts(json.dumps(error_result, indent=2))
sys.exit(1)
|
Prints error messages in blue, the failed task result and quits.
|
def process_cx_file(file_name, require_grounding=True):
with open(file_name, 'rt') as fh:
json_list = json.load(fh)
return process_cx(json_list, require_grounding=require_grounding)
|
Process a CX JSON file into Statements.
Parameters
----------
file_name : str
Path to file containing CX JSON.
require_grounding: bool
Whether network nodes lacking grounding information should be included
among the extracted Statements (default is True).
Returns
-------
NdexCxProcessor
Processor containing Statements.
|
def guess_extension(amimetype, normalize=False):
ext = _mimes.guess_extension(amimetype)
if ext and normalize:
ext = {'.asc': '.txt', '.obj': '.bin'}.get(ext, ext)
from invenio.legacy.bibdocfile.api_normalizer import normalize_format
return normalize_format(ext)
return ext
|
Tries to guess extension for a mimetype.
@param amimetype: name of a mimetype
@time amimetype: string
@return: the extension
@rtype: string
|
def make_server(application, conf_dir=None):
if conf_dir:
load_config(conf_dir)
configure_syslog()
log_config()
if options.use_ssl:
ssl_options = ssl_server_options()
server = tornado.httpserver.HTTPServer(
application, ssl_options=ssl_options)
general_logger.info(
'start tornado https server at https://%s:%s'
' with ssl_options: %s', options.ip, options.port, ssl_options)
else:
server = tornado.httpserver.HTTPServer(application)
general_logger.info('start tornado http server at http://{0}:{1}'.format(
options.ip, options.port))
server.bind(options.port, options.ip)
return server
|
Configure the server return the server instance
|
def transform_stringlike(self, const):
yield LOAD_CONST(const)
if isinstance(const, bytes):
yield from self.bytes_instrs
elif isinstance(const, str):
yield from self.str_instrs
|
Yield instructions to process a str or bytes constant.
|
def clean_document(document,
sent_tokenize, _treebank_word_tokenize,
tagger,
lemmatizer,
lemmatize,
stopset,
first_cap_re, all_cap_re,
digits_punctuation_whitespace_re,
pos_set):
try:
tokenized_document = fast_word_tokenize(document, sent_tokenize, _treebank_word_tokenize)
except LookupError:
print("Warning: Could not tokenize document. If these warnings are commonplace, there is a problem with the nltk resources.")
lemma_list = list()
lemma_to_keywordbag = defaultdict(lambda: defaultdict(int))
return lemma_list, lemma_to_keywordbag
tokenized_document = [separate_camel_case(token, first_cap_re, all_cap_re).lower() for token in tokenized_document]
tokenized_document = tagger.tag(tokenized_document)
tokenized_document = [token[0] for token in tokenized_document if (token[1] in pos_set)]
tokenized_document_no_punctuation = list()
append_token = tokenized_document_no_punctuation.append
for token in tokenized_document:
new_token = remove_digits_punctuation_whitespace(token, digits_punctuation_whitespace_re)
if not new_token == u'':
append_token(new_token)
tokenized_document_no_stopwords = list()
append_word = tokenized_document_no_stopwords.append
for word in tokenized_document_no_punctuation:
if word not in stopset:
append_word(word)
lemma_to_keywordbag = defaultdict(lambda: defaultdict(int))
final_doc = list()
append_lemma = final_doc.append
for word in tokenized_document_no_stopwords:
lemma = lemmatize(word)
append_lemma(lemma)
lemma_to_keywordbag[lemma][word] += 1
lemma_list = list()
append_word = lemma_list.append
for word in final_doc:
if word not in stopset:
append_word(word)
return lemma_list, lemma_to_keywordbag
|
Extracts a clean bag-of-words from a document.
Inputs: - document: A string containing some text.
Output: - lemma_list: A python list of lemmas or stems.
- lemma_to_keywordbag: A python dictionary that maps stems/lemmas to original topic keywords.
|
def parse_value(cell):
value = cell.value
if isinstance(value, string_types):
value = value.strip()
if isinstance(value, (datetime)):
value = value.isoformat()
return value
|
Extrae el valor de una celda de Excel como texto.
|
def min_distance_internal(self, return_names=False, return_data=False):
ddata = fcl.DistanceData()
if return_data:
ddata = fcl.DistanceData(
fcl.DistanceRequest(enable_nearest_points=True),
fcl.DistanceResult()
)
self._manager.distance(ddata, fcl.defaultDistanceCallback)
distance = ddata.result.min_distance
names, data = None, None
if return_names or return_data:
names = (self._extract_name(ddata.result.o1),
self._extract_name(ddata.result.o2))
data = DistanceData(names, ddata.result)
names = tuple(sorted(names))
if return_names and return_data:
return distance, names, data
elif return_names:
return distance, names
elif return_data:
return distance, data
else:
return distance
|
Get the minimum distance between any pair of objects in the manager.
Parameters
-------------
return_names : bool
If true, a 2-tuple is returned containing the names
of the closest objects.
return_data : bool
If true, a DistanceData object is returned as well
Returns
-----------
distance : float
Min distance between any two managed objects
names : (2,) str
The names of the closest objects
data : DistanceData
Extra data about the distance query
|
def publish(obj, event, event_state, **kwargs):
if len(EVENT_HANDLERS) == 0:
return
if inspect.isclass(obj):
pub_cls = obj
else:
pub_cls = obj.__class__
potential = [x.__name__ for x in inspect.getmro(pub_cls)]
fallbacks = None
callbacks = []
for cls in potential:
event_key = '.'.join([cls, event, event_state])
backup_key = '.'.join([cls, event, states.ANY])
if event_key in EVENT_HANDLERS:
callbacks = EVENT_HANDLERS[event_key]
break
elif fallbacks is None and backup_key in EVENT_HANDLERS:
fallbacks = EVENT_HANDLERS[backup_key]
if fallbacks is not None:
callbacks = fallbacks
for callback in callbacks:
callback(obj, **kwargs)
return
|
Publish an event from an object.
This is a really basic pub-sub event system to allow for tracking progress
on methods externally. It fires the events for the first match it finds in
the object hierarchy, going most specific to least. If no match is found
for the exact event+event_state, the most specific event+ANY is fired
instead.
Multiple callbacks can be bound to the event+event_state if desired. All
will be fired in the order they were registered.
|
def add_initial(initial_input):
stateful = {
'py_name': utils.make_python_identifier('_initial_%s' % initial_input)[0],
'real_name': 'Smooth of %s' % initial_input,
'doc': 'Returns the value taken on during the initialization phase',
'py_expr': 'functions.Initial(lambda: %s)' % (
initial_input),
'unit': 'None',
'lims': 'None',
'eqn': 'None',
'subs': '',
'kind': 'stateful',
'arguments': ''
}
return "%s()" % stateful['py_name'], [stateful]
|
Constructs a stateful object for handling vensim's 'Initial' functionality
Parameters
----------
initial_input: basestring
The expression which will be evaluated, and the first value of which returned
Returns
-------
reference: basestring
reference to the Initial object `__call__` method,
which will return the first calculated value of `initial_input`
new_structure: list
list of element construction dictionaries for the builder to assemble
|
def _GetMetric(self, metric_name):
if metric_name in self._counter_metrics:
return self._counter_metrics[metric_name]
elif metric_name in self._event_metrics:
return self._event_metrics[metric_name]
elif metric_name in self._gauge_metrics:
return self._gauge_metrics[metric_name]
else:
raise ValueError("Metric %s is not registered." % metric_name)
|
Fetches the metric object corresponding to the given name.
|
def get_daemons(self, daemon_name=None, daemon_type=None):
if daemon_name is not None:
sections = self._search_sections('daemon.%s' % daemon_name)
if 'daemon.%s' % daemon_name in sections:
return sections['daemon.' + daemon_name]
return {}
if daemon_type is not None:
sections = self._search_sections('daemon.')
for name, daemon in list(sections.items()):
if 'type' not in daemon or not daemon['type'] == daemon_type:
sections.pop(name)
return sections
return self._search_sections('daemon.')
|
Get the daemons configuration parameters
If name is provided, get the configuration for this daemon, else,
If type is provided, get the configuration for all the daemons of this type, else
get the configuration of all the daemons.
:param daemon_name: the searched daemon name
:param daemon_type: the searched daemon type
:return: a dict containing the daemon(s) configuration parameters
|
def insert(self, _values=None, **values):
if not values and not _values:
return True
if not isinstance(_values, list):
if _values is not None:
values.update(_values)
values = [values]
else:
values = _values
for i, value in enumerate(values):
values[i] = OrderedDict(sorted(value.items()))
bindings = []
for record in values:
for value in record.values():
bindings.append(value)
sql = self._grammar.compile_insert(self, values)
bindings = self._clean_bindings(bindings)
return self._connection.insert(sql, bindings)
|
Insert a new record into the database
:param _values: The new record values
:type _values: dict or list
:param values: The new record values as keyword arguments
:type values: dict
:return: The result
:rtype: bool
|
def _get_hdds(vm_):
_hdds = config.get_cloud_config_value(
'hdds', vm_, __opts__, default=None,
search_global=False
)
hdds = []
for hdd in _hdds:
hdds.append(
Hdd(
size=hdd['size'],
is_main=hdd['is_main']
)
)
return hdds
|
Construct VM hdds from cloud profile config
|
def parse_rdp_assignment(line):
toks = line.strip().split('\t')
seq_id = toks.pop(0)
direction = toks.pop(0)
if ((len(toks) % 3) != 0):
raise ValueError(
"Expected assignments in a repeating series of (rank, name, "
"confidence), received %s" % toks)
assignments = []
itoks = iter(toks)
for taxon, rank, confidence_str in zip(itoks, itoks, itoks):
if not taxon:
continue
assignments.append((taxon.strip('"'), rank, float(confidence_str)))
return seq_id, direction, assignments
|
Returns a list of assigned taxa from an RDP classification line
|
def __default(self, ast_token):
if self.list_level == 1:
if self.list_entry is None:
self.list_entry = ast_token
elif not isinstance(ast_token, type(self.list_entry)):
self.final_ast_tokens.append(ast_token)
elif self.list_level == 0:
self.final_ast_tokens.append(ast_token)
|
Handle tokens inside the list or outside the list.
|
def _activate_URI(self, selfLinkuri):
uri = urlparse.urlsplit(str(self._meta_data['bigip']._meta_data['uri']))
attribute_reg = self._meta_data.get('attribute_registry', {})
attrs = list(itervalues(attribute_reg))
attrs = self._assign_stats(attrs)
(scheme, domain, path, qarg, frag) = urlparse.urlsplit(selfLinkuri)
path_uri = urlparse.urlunsplit((scheme, uri.netloc, path, '', ''))
if not path_uri.endswith('/'):
path_uri = path_uri + '/'
qargs = urlparse.parse_qs(qarg)
self._meta_data.update({'uri': path_uri,
'creation_uri_qargs': qargs,
'creation_uri_frag': frag,
'allowed_lazy_attributes': attrs})
|
Call this with a selfLink, after it's returned in _create or _load.
Each instance is tightly bound to a particular service URI. When that
service is created by this library, or loaded from the device, the URI
is set to self._meta_data['uri']. This operation can only occur once,
any subsequent attempt to manipulate self._meta_data['uri'] is
probably a mistake.
self.selfLink references a value that is returned as a JSON value from
the device. This value contains "localhost" as the domain or the uri.
"localhost" is only conceivably useful if the client library is run on
the device itself, so it is replaced with the domain this API used to
communicate with the device.
self.selfLink correctly contains a complete uri, that is only _now_
(post create or load) available to self.
Now that the complete URI is available to self, it is now possible to
reference subcollections, as attributes of self!
e.g. a resource with a uri path like:
"/mgmt/tm/ltm/pool/~Common~pool_collection1/members"
The mechanism used to enable this change is to set
the `allowed_lazy_attributes` _meta_data key to hold values of the
`attribute_registry` _meta_data key.
Finally we stash the corrected `uri`, returned hash_fragment, query
args, and of course allowed_lazy_attributes in _meta_data.
:param selfLinkuri: the server provided selfLink (contains localhost)
:raises: URICreationCollision
|
def get_term_proportions(dtm):
unnorm = get_term_frequencies(dtm)
if unnorm.sum() == 0:
raise ValueError('`dtm` does not contain any tokens (is all-zero)')
else:
return unnorm / unnorm.sum()
|
Return the term proportions given the document-term matrix `dtm`
|
def remove_all_gap_columns( self ):
seqs = []
for c in self.components:
try:
seqs.append( list( c.text ) )
except TypeError:
seqs.append( None )
i = 0
text_size = self.text_size
while i < text_size:
all_gap = True
for seq in seqs:
if seq is None: continue
if seq[i] != '-': all_gap = False
if all_gap:
for seq in seqs:
if seq is None: continue
del seq[i]
text_size -= 1
else:
i += 1
for i in range( len( self.components ) ):
if seqs[i] is None: continue
self.components[i].text = ''.join( seqs[i] )
self.text_size = text_size
|
Remove any columns containing only gaps from alignment components,
text of components is modified IN PLACE.
|
def setup_omero_cli(self):
if not self.dir:
raise Exception('No server directory set')
if 'omero.cli' in sys.modules:
raise Exception('omero.cli can only be imported once')
log.debug("Setting up omero CLI")
lib = os.path.join(self.dir, "lib", "python")
if not os.path.exists(lib):
raise Exception("%s does not exist!" % lib)
sys.path.insert(0, lib)
import omero
import omero.cli
log.debug("Using omero CLI from %s", omero.cli.__file__)
self.cli = omero.cli.CLI()
self.cli.loadplugins()
self._omero = omero
|
Imports the omero CLI module so that commands can be run directly.
Note Python does not allow a module to be imported multiple times,
so this will only work with a single omero instance.
This can have several surprising effects, so setup_omero_cli()
must be explcitly called.
|
def is_allowed(func):
@wraps(func)
def _is_allowed(user, *args, **kwargs):
password = kwargs.pop('password', None)
if user.check_password(password):
return func(user, *args, **kwargs)
else:
raise NotAllowedError()
sig = inspect.signature(func)
parms = list(sig.parameters.values())
parms.append(inspect.Parameter('password',
inspect.Parameter.KEYWORD_ONLY,
default=None))
_is_allowed.__signature__ = sig.replace(parameters=parms)
return _is_allowed
|
Check user password, when is correct, then run decorated function.
:returns: decorated function
|
def process_response(self, request_id=None):
self.__errors = []
self.__error_reason = None
if 'post_data' in self.__request_data and 'SAMLResponse' in self.__request_data['post_data']:
response = OneLogin_Saml2_Response(self.__settings, self.__request_data['post_data']['SAMLResponse'])
self.__last_response = response.get_xml_document()
if response.is_valid(self.__request_data, request_id):
self.__attributes = response.get_attributes()
self.__nameid = response.get_nameid()
self.__nameid_format = response.get_nameid_format()
self.__session_index = response.get_session_index()
self.__session_expiration = response.get_session_not_on_or_after()
self.__last_message_id = response.get_id()
self.__last_assertion_id = response.get_assertion_id()
self.__last_authn_contexts = response.get_authn_contexts()
self.__authenticated = True
self.__last_assertion_not_on_or_after = response.get_assertion_not_on_or_after()
else:
self.__errors.append('invalid_response')
self.__error_reason = response.get_error()
else:
self.__errors.append('invalid_binding')
raise OneLogin_Saml2_Error(
'SAML Response not found, Only supported HTTP_POST Binding',
OneLogin_Saml2_Error.SAML_RESPONSE_NOT_FOUND
)
|
Process the SAML Response sent by the IdP.
:param request_id: Is an optional argument. Is the ID of the AuthNRequest sent by this SP to the IdP.
:type request_id: string
:raises: OneLogin_Saml2_Error.SAML_RESPONSE_NOT_FOUND, when a POST with a SAMLResponse is not found
|
def sr1(x, promisc=None, filter=None, iface=None, nofilter=0, *args, **kargs):
s = conf.L3socket(promisc=promisc, filter=filter,
nofilter=nofilter, iface=iface)
ans, _ = sndrcv(s, x, *args, **kargs)
s.close()
if len(ans) > 0:
return ans[0][1]
else:
return None
|
Send packets at layer 3 and return only the first answer
|
def send_confirmation():
form_class = _security.send_confirmation_form
if request.is_json:
form = form_class(MultiDict(request.get_json()))
else:
form = form_class()
if form.validate_on_submit():
send_confirmation_instructions(form.user)
if not request.is_json:
do_flash(*get_message('CONFIRMATION_REQUEST',
email=form.user.email))
if request.is_json:
return _render_json(form)
return _security.render_template(
config_value('SEND_CONFIRMATION_TEMPLATE'),
send_confirmation_form=form,
**_ctx('send_confirmation')
)
|
View function which sends confirmation instructions.
|
def data_parent(self) -> Optional["InternalNode"]:
parent = self.parent
while parent:
if isinstance(parent, DataNode):
return parent
parent = parent.parent
|
Return the closest ancestor data node.
|
def export_original_data(self):
def export_field(value):
try:
return value.export_original_data()
except AttributeError:
return value
return [export_field(val) for val in self.__original_data__]
|
Retrieves the original_data
|
def rebalance_replicas(
self,
max_movement_count=None,
max_movement_size=None,
):
movement_count = 0
movement_size = 0
for partition in six.itervalues(self.cluster_topology.partitions):
count, size = self._rebalance_partition_replicas(
partition,
None if not max_movement_count
else max_movement_count - movement_count,
None if not max_movement_size
else max_movement_size - movement_size,
)
movement_count += count
movement_size += size
return movement_count, movement_size
|
Balance replicas across replication-groups.
:param max_movement_count: The maximum number of partitions to move.
:param max_movement_size: The maximum total size of the partitions to move.
:returns: A 2-tuple whose first element is the number of partitions moved
and whose second element is the total size of the partitions moved.
|
def _copy_body_to_tempfile(cls, environ):
try:
length = int(environ.get('CONTENT_LENGTH', 0))
except ValueError:
length = 0
try:
fileobj = tempfile.SpooledTemporaryFile(1024*1024)
except AttributeError:
fileobj = tempfile.TemporaryFile()
if length:
remaining = length
while remaining > 0:
data = environ['wsgi.input'].read(min(remaining, 65536))
if not data:
raise IOError(
"Client disconnected (%s more bytes were expected)"
% remaining
)
fileobj.write(data)
remaining -= len(data)
fileobj.seek(0)
environ['wsgi.input'] = fileobj
return fileobj, length
|
Copy wsgi.input to a tempfile so it can be reused.
|
def electric_succeptibility(l, Ep, epsilonp, rm, n, rho, unfolding, part=0):
r
epsilonm = epsilonp.conjugate()
rp = np.array([rm[i].transpose().conjugate() for i in range(3)])
if part == 1:
op = cartesian_dot_product(rp, epsilonm[0])
op += cartesian_dot_product(rm, epsilonp[0])
op = -e_num*n/epsilon_0_num/np.abs(Ep[0])*op
elif part == -1:
op = cartesian_dot_product(rm, epsilonp[0])
op += - cartesian_dot_product(rp, epsilonm[0])
op = -1j*e_num*n/epsilon_0_num/np.abs(Ep[0])*op
elif part == 0:
chire = electric_succeptibility(l, Ep, epsilonp, rm,
n, rho, unfolding, +1)
chiim = electric_succeptibility(l, Ep, epsilonp, rm,
n, rho, unfolding, -1)
return chire + 1j*chiim
return np.real(observable(op, rho, unfolding))
|
r"""Return the electric succeptibility for a given field.
INPUT:
- ``l`` - The index labeling the probe field.
- ``Ep`` - A list of the amplitudes of all pump fields.
- ``epsilonp`` - The polarization vector of the probe field.
- ``rm`` - The below-diagonal components of the position operator \
in the cartesian basis:
- ``n`` - The number density of atoms.
- ``rho`` - A density matrix in unfolded format, or a list of such \
density matrices.
- ``unfolding`` - A mapping from matrix element indices to unfolded \
indices.
>>> import numpy as np
>>> from sympy import symbols
>>> from scipy.constants import physical_constants
>>> from fast import vapour_number_density
>>> e_num = physical_constants["elementary charge"][0]
>>> hbar_num = physical_constants["Planck constant over 2 pi"][0]
>>> Ne = 2
>>> Nl = 1
>>> Ep = [-1.0]
>>> epsilonp = np.array([[0, 0, 1.0]])
>>> delta = symbols("delta")
>>> detuning_knob = [delta]
>>> gamma = np.array([[0.0, -1.0], [1.0, 0.0]])
>>> omega_level = np.array([0.0, 100.0])
>>> rm = [np.array([[0.0, 0.0], [1.0, 0.0]])*hbar_num/e_num
... for p in range(3)]
>>> xi = np.array([[[0, 1], [1, 0]]])
>>> theta = phase_transformation(Ne, Nl, rm, xi)
>>> sweep_steady_state = fast_sweep_steady_state(Ep, epsilonp, gamma,
... omega_level, rm, xi,
... theta)
>>> deltas, rho = sweep_steady_state([[-20, 20, 11]])
>>> n = vapour_number_density(273.15+20, "Rb")
>>> unfolding = Unfolding(Ne, True, True, True)
>>> chire = electric_succeptibility(0, Ep, epsilonp, rm, n,
... rho, unfolding)
>>> print(chire)
[ 4.4824e-09-1.1206e-10j 5.5971e-09-1.7491e-10j 7.4459e-09-3.1024e-10j
1.1097e-08-6.9356e-10j 2.1449e-08-2.6811e-09j 0.0000e+00-5.9877e-08j
-2.1449e-08-2.6811e-09j -1.1097e-08-6.9356e-10j -7.4459e-09-3.1024e-10j
-5.5971e-09-1.7491e-10j -4.4824e-09-1.1206e-10j]
|
def get_ip_scope(auth, url, scopeid=None, ):
if scopeid is None:
get_ip_scope_url = "/imcrs/res/access/assignedIpScope"
else:
get_ip_scope_url = "/imcrs/res/access/assignedIpScope/ip?ipScopeId=" + str(scopeid)
f_url = url + get_ip_scope_url
response = requests.get(f_url, auth=auth, headers=HEADERS)
try:
if response.status_code == 200:
ipscopelist = (json.loads(response.text))['assignedIpScope']
if isinstance(ipscopelist, list):
return ipscopelist
elif isinstance(ipscopelist, dict):
return [ipscopelist]
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " get_ip_scope: An Error has occured"
|
function requires no inputs and returns all IP address scopes currently configured on the HPE
IMC server. If the optional scopeid parameter is included, this will automatically return
only the desired scope id.
:param scopeid: integer of the desired scope id ( optional )
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: list of dictionary objects where each element of the list represents one IP scope
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> ip_scope_list = get_ip_scope(auth.creds, auth.url)
>>> assert type(ip_scope_list) is list
>>> assert 'ip' in ip_scope_list[0]
|
def retrieve_diaspora_hcard(handle):
webfinger = retrieve_and_parse_diaspora_webfinger(handle)
document, code, exception = fetch_document(webfinger.get("hcard_url"))
if exception:
return None
return document
|
Retrieve a remote Diaspora hCard document.
:arg handle: Remote handle to retrieve
:return: str (HTML document)
|
def make_tophat_ii (lower, upper):
if not np.isfinite (lower):
raise ValueError ('"lower" argument must be finite number; got %r' % lower)
if not np.isfinite (upper):
raise ValueError ('"upper" argument must be finite number; got %r' % upper)
def range_tophat_ii (x):
x = np.asarray (x)
x1 = np.atleast_1d (x)
r = ((lower <= x1) & (x1 <= upper)).astype (x.dtype)
if x.ndim == 0:
return np.asscalar (r)
return r
range_tophat_ii.__doc__ = ('Ranged tophat function, left-inclusive and '
'right-inclusive. Returns 1 if %g <= x <= %g, '
'0 otherwise.') % (lower, upper)
return range_tophat_ii
|
Return a ufunc-like tophat function on the defined range, left-inclusive
and right-inclusive. Returns 1 if lower < x < upper, 0 otherwise.
|
def frigg(branch: str):
assert os.environ.get('FRIGG_BUILD_BRANCH') == branch
assert not os.environ.get('FRIGG_PULL_REQUEST')
|
Performs necessary checks to ensure that the frigg build is one
that should create releases.
:param branch: The branch the environment should be running against.
|
def set_projection(self, val):
knownproj = ["SIN", "ZEA", "TAN", "NCP", "AIT", "ZEA"]
assert val.upper() in knownproj
self._coord["projection"] = val.upper()
|
Set the projection of the given axis in this coordinate.
The known projections are SIN, ZEA, TAN, NCP, AIT, ZEA
|
def save(self):
active_language = get_language()
for (name, value) in self.cleaned_data.items():
if name not in registry:
name, code = name.rsplit('_modeltranslation_', 1)
else:
code = None
setting_obj, created = Setting.objects.get_or_create(name=name)
if settings.USE_MODELTRANSLATION:
if registry[name]["translatable"]:
try:
activate(code)
except:
pass
finally:
setting_obj.value = value
activate(active_language)
else:
for code in OrderedDict(settings.LANGUAGES):
setattr(setting_obj,
build_localized_fieldname('value', code),
value)
else:
setting_obj.value = value
setting_obj.save()
|
Save each of the settings to the DB.
|
def import_variables(self, container, varnames=None):
if varnames is None:
for keyword in self.tkvariables:
setattr(container, keyword, self.tkvariables[keyword])
else:
for keyword in varnames:
if keyword in self.tkvariables:
setattr(container, keyword, self.tkvariables[keyword])
|
Helper method to avoid call get_variable for every variable.
|
def get_dates_for_project(self, project):
file_re = re.compile(r'^%s_([0-9]{8})\.json$' % project)
all_dates = []
for f in os.listdir(self.cache_path):
if not os.path.isfile(os.path.join(self.cache_path, f)):
continue
m = file_re.match(f)
if m is None:
continue
all_dates.append(datetime.strptime(m.group(1), '%Y%m%d'))
return sorted(all_dates)
|
Return a list of the dates we have in cache for the specified project,
sorted in ascending date order.
:param project: project name
:type project: str
:return: list of datetime.datetime objects
:rtype: datetime.datetime
|
def _init_deferred_buffers(self):
self._transfer_list = collections.deque()
self._crnt_cmd = _Command(self._packet_size)
self._commands_to_read = collections.deque()
self._command_response_buf = bytearray()
|
Initialize or reinitalize all the deferred transfer buffers
Calling this method will drop all pending transactions
so use with care.
|
def new_get_angle_diff(v1,v2):
v1 = numpy.array(v1)
v2 = numpy.array(v2)
angle = numpy.arctan2(numpy.linalg.norm(numpy.cross(v1, v2)), numpy.dot(v1, v2))
return math.degrees(angle)
|
returns angular difference in degrees between two vectors. may be more precise in certain cases. see SPD
|
def addHandler(self, handler):
self._handlers.append(handler)
self.inner.addHandler(handler)
|
Setups a new internal logging handler. For fastlog loggers,
handlers are kept track of in the self._handlers list
|
def get_local_file_dist(self):
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
|
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
|
def _ReadParserPresetsFromFile(self):
self._presets_file = os.path.join(
self._data_location, self._PRESETS_FILE_NAME)
if not os.path.isfile(self._presets_file):
raise errors.BadConfigOption(
'No such parser presets file: {0:s}.'.format(self._presets_file))
try:
parsers_manager.ParsersManager.ReadPresetsFromFile(self._presets_file)
except errors.MalformedPresetError as exception:
raise errors.BadConfigOption(
'Unable to read presets from file with error: {0!s}'.format(
exception))
|
Reads the parser presets from the presets.yaml file.
Raises:
BadConfigOption: if the parser presets file cannot be read.
|
def wait(hotkey=None, suppress=False, trigger_on_release=False):
if hotkey:
lock = _Event()
remove = add_hotkey(hotkey, lambda: lock.set(), suppress=suppress, trigger_on_release=trigger_on_release)
lock.wait()
remove_hotkey(remove)
else:
while True:
_time.sleep(1e6)
|
Blocks the program execution until the given hotkey is pressed or,
if given no parameters, blocks forever.
|
def spkcov(spk, idcode, cover=None):
spk = stypes.stringToCharP(spk)
idcode = ctypes.c_int(idcode)
if cover is None:
cover = stypes.SPICEDOUBLE_CELL(2000)
else:
assert isinstance(cover, stypes.SpiceCell)
assert cover.is_double()
libspice.spkcov_c(spk, idcode, ctypes.byref(cover))
return cover
|
Find the coverage window for a specified ephemeris object in a
specified SPK file.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkcov_c.html
:param spk: Name of SPK file.
:type spk: str
:param idcode: ID code of ephemeris object.
:type idcode: int
:param cover: Optional SPICE Window giving coverage in "spk" for "idcode".
:type cover: spiceypy.utils.support_types.SpiceCell
|
def total_charges(self):
selected_charges = Charge.objects \
.filter(invoice=self) \
.charges() \
.exclude(product_code=CARRIED_FORWARD)
return total_amount(selected_charges)
|
Represents the 'goods' acquired in the invoice.
|
def _check_box_toggled(self, widget, data=None):
active = widget.get_active()
arg_name = data
if 'entry' in self.args[arg_name]:
self.args[arg_name]['entry'].set_sensitive(active)
if 'browse_btn' in self.args[arg_name]:
self.args[arg_name]['browse_btn'].set_sensitive(active)
self.path_window.show_all()
|
Function manipulates with entries and buttons.
|
def group_default_invalidator(self, obj):
user_pks = User.objects.values_list('pk', flat=True)
return [('User', pk, False) for pk in user_pks]
|
Invalidated cached items when the Group changes.
|
def adjustMask(self):
if self.currentMode() == XPopupWidget.Mode.Dialog:
self.clearMask()
return
path = self.borderPath()
bitmap = QBitmap(self.width(), self.height())
bitmap.fill(QColor('white'))
with XPainter(bitmap) as painter:
painter.setRenderHint(XPainter.Antialiasing)
pen = QPen(QColor('black'))
pen.setWidthF(0.75)
painter.setPen(pen)
painter.setBrush(QColor('black'))
painter.drawPath(path)
self.setMask(bitmap)
|
Updates the alpha mask for this popup widget.
|
def unload_plugin(name, category=None):
if category is not None:
_all_plugins[category].pop(name)
else:
for cat in _all_plugins:
if name in _all_plugins[cat]:
_all_plugins[cat].pop(name)
|
remove single plugin
Parameters
----------
name : str
plugin name
category : str
plugin category
Examples
--------
>>> from pprint import pprint
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
>>> class DecoderPlugin(object):
... plugin_name = 'example'
... plugin_descript = 'a decoder for dicts containing _example_ key'
... dict_signature = ('_example_',)
...
>>> errors = load_plugin_classes([DecoderPlugin],category='decoders')
>>> pprint(view_plugins())
{'decoders': {'example': 'a decoder for dicts containing _example_ key'},
'encoders': {},
'parsers': {}}
>>> unload_plugin('example','decoders')
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.