code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def modifiedaminoacids(df, kind='pie'):
"""
Generate a plot of relative numbers of modified amino acids in source DataFrame.
Plot a pie or bar chart showing the number and percentage of modified amino
acids in the supplied data frame. The amino acids displayed will be
determined from the supplied data/modification type.
:param df: processed DataFrame
:param kind: `str` type of plot; either "pie" or "bar"
:return: matplotlib ax
"""
colors = ['#6baed6','#c6dbef','#bdbdbd']
total_aas, quants = analysis.modifiedaminoacids(df)
df = pd.DataFrame()
for a, n in quants.items():
df[a] = [n]
df.sort_index(axis=1, inplace=True)
if kind == 'bar' or kind == 'both':
ax1 = df.plot(kind='bar', figsize=(7,7), color=colors)
ax1.set_ylabel('Number of phosphorylated amino acids')
ax1.set_xlabel('Amino acid')
ax1.set_xticks([])
ylim = np.max(df.values)+1000
ax1.set_ylim(0, ylim )
_bartoplabel(ax1, 100*df.values[0], total_aas, ylim )
ax1.set_xlim((-0.3, 0.3))
return ax
if kind == 'pie' or kind == 'both':
dfp =df.T
residues = dfp.index.values
dfp.index = ["%.2f%% (%d)" % (100*df[i].values[0]/total_aas, df[i].values[0]) for i in dfp.index.values ]
ax2 = dfp.plot(kind='pie', y=0, colors=colors)
ax2.legend(residues, loc='upper left', bbox_to_anchor=(1.0, 1.0))
ax2.set_ylabel('')
ax2.set_xlabel('')
ax2.figure.set_size_inches(6,6)
for t in ax2.texts:
t.set_fontsize(15)
return ax2
return ax1, ax2 | Generate a plot of relative numbers of modified amino acids in source DataFrame.
Plot a pie or bar chart showing the number and percentage of modified amino
acids in the supplied data frame. The amino acids displayed will be
determined from the supplied data/modification type.
:param df: processed DataFrame
:param kind: `str` type of plot; either "pie" or "bar"
:return: matplotlib ax |
def assert_angles_allclose(x, y, **kwargs):
"""
Like numpy's assert_allclose, but for angles (in radians).
"""
c2 = (np.sin(x)-np.sin(y))**2 + (np.cos(x)-np.cos(y))**2
diff = np.arccos((2.0 - c2)/2.0) # a = b = 1
assert np.allclose(diff, 0.0, **kwargs) | Like numpy's assert_allclose, but for angles (in radians). |
def twoDimensionalScatter(title, title_x, title_y,
x, y,
lim_x = None, lim_y = None,
color = 'b', size = 20, alpha=None):
"""
Create a two-dimensional scatter plot.
INPUTS
"""
plt.figure()
plt.scatter(x, y, c=color, s=size, alpha=alpha, edgecolors='none')
plt.xlabel(title_x)
plt.ylabel(title_y)
plt.title(title)
if type(color) is not str:
plt.colorbar()
if lim_x:
plt.xlim(lim_x[0], lim_x[1])
if lim_y:
plt.ylim(lim_y[0], lim_y[1]) | Create a two-dimensional scatter plot.
INPUTS |
def _compute_projection_filters(G, sf, estimated_source):
"""Least-squares projection of estimated source on the subspace spanned by
delayed versions of reference sources, with delays between 0 and
filters_len-1
"""
# epsilon
eps = np.finfo(np.float).eps
# shapes
(nsampl, nchan) = estimated_source.shape
# handles the case where we are calling this with only one source
# G should be nsrc X nsrc X nchan X nchan X filters_len X filters_len
# and sf should be nsrc X nchan X filters_len
if len(G.shape) == 4:
G = G[None, None, ...]
sf = sf[None, ...]
nsrc = G.shape[0]
filters_len = G.shape[-1]
# zero pad estimates and put chan in first dimension
estimated_source = _zeropad(estimated_source.T, filters_len - 1, axis=1)
# compute its FFT
n_fft = int(2**np.ceil(np.log2(nsampl + filters_len - 1.)))
sef = scipy.fftpack.fft(estimated_source, n=n_fft)
# compute the cross-correlations between sources and estimates
D = np.zeros((nsrc, nchan, filters_len, nchan))
for (j, cj, c) in itertools.product(
list(range(nsrc)), list(range(nchan)), list(range(nchan))
):
ssef = sf[j, cj] * np.conj(sef[c])
ssef = np.real(scipy.fftpack.ifft(ssef))
D[j, cj, :, c] = np.hstack((ssef[0], ssef[-1:-filters_len:-1]))
# reshape matrices to build the filters
D = D.reshape(nsrc * nchan * filters_len, nchan)
G = _reshape_G(G)
# Distortion filters
try:
C = np.linalg.solve(G + eps*np.eye(G.shape[0]), D).reshape(
nsrc, nchan, filters_len, nchan
)
except np.linalg.linalg.LinAlgError:
C = np.linalg.lstsq(G, D)[0].reshape(
nsrc, nchan, filters_len, nchan
)
# if we asked for one single reference source,
# return just a nchan X filters_len matrix
if nsrc == 1:
C = C[0]
return C | Least-squares projection of estimated source on the subspace spanned by
delayed versions of reference sources, with delays between 0 and
filters_len-1 |
def field(ctx, text, index, delimiter=' '):
"""
Reference a field in string separated by a delimiter
"""
splits = text.split(delimiter)
# remove our delimiters and whitespace
splits = [f for f in splits if f != delimiter and len(f.strip()) > 0]
index = conversions.to_integer(index, ctx)
if index < 1:
raise ValueError('Field index cannot be less than 1')
if index <= len(splits):
return splits[index-1]
else:
return '' | Reference a field in string separated by a delimiter |
def work_get(self, wallet, account):
"""
Retrieves work for **account** in **wallet**
.. enable_control required
.. version 8.0 required
:param wallet: Wallet to get account work for
:type wallet: str
:param account: Account to get work for
:type account: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.work_get(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... account="xrb_1111111111111111111111111111111111111111111111111111hifc8npp"
... )
"432e5cf728c90f4f"
"""
wallet = self._process_value(wallet, 'wallet')
account = self._process_value(account, 'account')
payload = {"wallet": wallet, "account": account}
resp = self.call('work_get', payload)
return resp['work'] | Retrieves work for **account** in **wallet**
.. enable_control required
.. version 8.0 required
:param wallet: Wallet to get account work for
:type wallet: str
:param account: Account to get work for
:type account: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.work_get(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... account="xrb_1111111111111111111111111111111111111111111111111111hifc8npp"
... )
"432e5cf728c90f4f" |
def chunks(arr, size):
"""Splits a list into chunks
:param arr: list to split
:type arr: :class:`list`
:param size: number of elements in each chunk
:type size: :class:`int`
:return: generator object
:rtype: :class:`generator`
"""
for i in _range(0, len(arr), size):
yield arr[i:i+size] | Splits a list into chunks
:param arr: list to split
:type arr: :class:`list`
:param size: number of elements in each chunk
:type size: :class:`int`
:return: generator object
:rtype: :class:`generator` |
def post(self, request, *args, **kwargs):
"""
Handle the datas for posting a quick entry,
and redirect to the admin in case of error or
to the entry's page in case of success.
"""
now = timezone.now()
data = {
'title': request.POST.get('title'),
'slug': slugify(request.POST.get('title')),
'status': DRAFT if 'save_draft' in request.POST else PUBLISHED,
'sites': [Site.objects.get_current().pk],
'authors': [request.user.pk],
'content_template': 'zinnia/_entry_detail.html',
'detail_template': 'entry_detail.html',
'publication_date': now,
'creation_date': now,
'last_update': now,
'content': request.POST.get('content'),
'tags': request.POST.get('tags')}
form = QuickEntryForm(data)
if form.is_valid():
form.instance.content = self.htmlize(form.cleaned_data['content'])
entry = form.save()
return redirect(entry)
data = {'title': smart_str(request.POST.get('title', '')),
'content': smart_str(self.htmlize(
request.POST.get('content', ''))),
'tags': smart_str(request.POST.get('tags', '')),
'slug': slugify(request.POST.get('title', '')),
'authors': request.user.pk,
'sites': Site.objects.get_current().pk}
return redirect('%s?%s' % (reverse('admin:zinnia_entry_add'),
urlencode(data))) | Handle the datas for posting a quick entry,
and redirect to the admin in case of error or
to the entry's page in case of success. |
def getCmd(snmpEngine, authData, transportTarget, contextData,
*varBinds, **options):
"""Creates a generator to perform one or more SNMP GET queries.
On each iteration, new SNMP GET request is send (:RFC:`1905#section-4.2.1`).
The iterator blocks waiting for response to arrive or error to occur.
Parameters
----------
snmpEngine : :py:class:`~pysnmp.hlapi.SnmpEngine`
Class instance representing SNMP engine.
authData : :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget : :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
contextData : :py:class:`~pysnmp.hlapi.ContextData`
Class instance representing SNMP ContextEngineId and ContextName values.
\*varBinds : :py:class:`~pysnmp.smi.rfc1902.ObjectType`
One or more class instances representing MIB variables to place
into SNMP request.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `True`.
Yields
------
errorIndication : str
True value indicates SNMP engine error.
errorStatus : str
True value indicates SNMP PDU error.
errorIndex : int
Non-zero value refers to `varBinds[errorIndex-1]`
varBinds : tuple
A sequence of :py:class:`~pysnmp.smi.rfc1902.ObjectType` class
instances representing MIB variables returned in SNMP response.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Notes
-----
The `getCmd` generator will be exhausted immediately unless
a new sequence of `varBinds` are send back into running generator
(supported since Python 2.6).
Examples
--------
>>> from pysnmp.hlapi import *
>>> g = getCmd(SnmpEngine(),
... CommunityData('public'),
... UdpTransportTarget(('demo.snmplabs.com', 161)),
... ContextData(),
... ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)))
>>> next(g)
(None, 0, 0, [ObjectType(ObjectIdentity(ObjectName('1.3.6.1.2.1.1.1.0')), DisplayString('SunOS zeus.snmplabs.com 4.1.3_U1 1 sun4m'))])
>>>
"""
# noinspection PyShadowingNames
def cbFun(snmpEngine, sendRequestHandle,
errorIndication, errorStatus, errorIndex,
varBinds, cbCtx):
cbCtx['errorIndication'] = errorIndication
cbCtx['errorStatus'] = errorStatus
cbCtx['errorIndex'] = errorIndex
cbCtx['varBinds'] = varBinds
cbCtx = {}
while True:
if varBinds:
cmdgen.getCmd(snmpEngine, authData, transportTarget,
contextData, *varBinds,
cbFun=cbFun, cbCtx=cbCtx,
lookupMib=options.get('lookupMib', True))
snmpEngine.transportDispatcher.runDispatcher()
errorIndication = cbCtx['errorIndication']
errorStatus = cbCtx['errorStatus']
errorIndex = cbCtx['errorIndex']
varBinds = cbCtx['varBinds']
else:
errorIndication = errorStatus = errorIndex = None
varBinds = []
varBinds = (yield errorIndication, errorStatus, errorIndex, varBinds)
if not varBinds:
break | Creates a generator to perform one or more SNMP GET queries.
On each iteration, new SNMP GET request is send (:RFC:`1905#section-4.2.1`).
The iterator blocks waiting for response to arrive or error to occur.
Parameters
----------
snmpEngine : :py:class:`~pysnmp.hlapi.SnmpEngine`
Class instance representing SNMP engine.
authData : :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget : :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
contextData : :py:class:`~pysnmp.hlapi.ContextData`
Class instance representing SNMP ContextEngineId and ContextName values.
\*varBinds : :py:class:`~pysnmp.smi.rfc1902.ObjectType`
One or more class instances representing MIB variables to place
into SNMP request.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `True`.
Yields
------
errorIndication : str
True value indicates SNMP engine error.
errorStatus : str
True value indicates SNMP PDU error.
errorIndex : int
Non-zero value refers to `varBinds[errorIndex-1]`
varBinds : tuple
A sequence of :py:class:`~pysnmp.smi.rfc1902.ObjectType` class
instances representing MIB variables returned in SNMP response.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Notes
-----
The `getCmd` generator will be exhausted immediately unless
a new sequence of `varBinds` are send back into running generator
(supported since Python 2.6).
Examples
--------
>>> from pysnmp.hlapi import *
>>> g = getCmd(SnmpEngine(),
... CommunityData('public'),
... UdpTransportTarget(('demo.snmplabs.com', 161)),
... ContextData(),
... ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)))
>>> next(g)
(None, 0, 0, [ObjectType(ObjectIdentity(ObjectName('1.3.6.1.2.1.1.1.0')), DisplayString('SunOS zeus.snmplabs.com 4.1.3_U1 1 sun4m'))])
>>> |
def wrap_requests(requests_func):
"""Wrap the requests function to trace it."""
def call(url, *args, **kwargs):
blacklist_hostnames = execution_context.get_opencensus_attr(
'blacklist_hostnames')
parsed_url = urlparse(url)
if parsed_url.port is None:
dest_url = parsed_url.hostname
else:
dest_url = '{}:{}'.format(parsed_url.hostname, parsed_url.port)
if utils.disable_tracing_hostname(dest_url, blacklist_hostnames):
return requests_func(url, *args, **kwargs)
_tracer = execution_context.get_opencensus_tracer()
_span = _tracer.start_span()
_span.name = '[requests]{}'.format(requests_func.__name__)
_span.span_kind = span_module.SpanKind.CLIENT
# Add the requests url to attributes
_tracer.add_attribute_to_current_span(HTTP_URL, url)
result = requests_func(url, *args, **kwargs)
# Add the status code to attributes
_tracer.add_attribute_to_current_span(
HTTP_STATUS_CODE, str(result.status_code))
_tracer.end_span()
return result
return call | Wrap the requests function to trace it. |
def getcols(sheetMatch=None,colMatch="Decay"):
"""find every column in every sheet and put it in a new sheet or book."""
book=BOOK()
if sheetMatch is None:
matchingSheets=book.sheetNames
print('all %d sheets selected '%(len(matchingSheets)))
else:
matchingSheets=[x for x in book.sheetNames if sheetMatch in x]
print('%d of %d sheets selected matching "%s"'%(len(matchingSheets),len(book.sheetNames),sheetMatch))
matchingSheetsWithCol=[]
for sheetName in matchingSheets:
i = book.sheetNames.index(sheetName) # index of that sheet
for j,colName in enumerate(book.sheets[i].colDesc):
if colMatch in colName:
matchingSheetsWithCol.append((sheetName,j))
break
else:
print(" no match in [%s]%s"%(book.bookName,sheetName))
print("%d of %d of those have your column"%(len(matchingSheetsWithCol),len(matchingSheets)))
for item in matchingSheetsWithCol:
print(item,item[0],item[1]) | find every column in every sheet and put it in a new sheet or book. |
def asxc(cls, obj):
"""Convert object into Xcfunc."""
if isinstance(obj, cls): return obj
if is_string(obj): return cls.from_name(obj)
raise TypeError("Don't know how to convert <%s:%s> to Xcfunc" % (type(obj), str(obj))) | Convert object into Xcfunc. |
def assertTimeZoneIsNotNone(self, dt, msg=None):
'''Fail unless ``dt`` has a non-null ``tzinfo`` attribute.
Parameters
----------
dt : datetime
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object.
'''
if not isinstance(dt, datetime):
raise TypeError('First argument is not a datetime object')
self.assertIsNotNone(dt.tzinfo, msg=msg) | Fail unless ``dt`` has a non-null ``tzinfo`` attribute.
Parameters
----------
dt : datetime
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
Raises
------
TypeError
If ``dt`` is not a datetime object. |
def remove_pardir_symbols(path, sep=os.sep, pardir=os.pardir):
"""
Remove relative path symobls such as '..'
Args:
path (str): A target path string
sep (str): A strint to refer path delimiter (Default: `os.sep`)
pardir (str): A string to refer parent directory (Default: `os.pardir`)
Returns:
str
"""
bits = path.split(sep)
bits = (x for x in bits if x != pardir)
return sep.join(bits) | Remove relative path symobls such as '..'
Args:
path (str): A target path string
sep (str): A strint to refer path delimiter (Default: `os.sep`)
pardir (str): A string to refer parent directory (Default: `os.pardir`)
Returns:
str |
def shell(self, name='default', site=None, use_root=0, **kwargs):
"""
Opens a SQL shell to the given database, assuming the configured database
and user supports this feature.
"""
r = self.database_renderer(name=name, site=site)
if int(use_root):
kwargs = dict(
db_user=r.env.db_root_username,
db_password=r.env.db_root_password,
db_host=r.env.db_host,
db_name=r.env.db_name,
)
r.env.update(kwargs)
if not name:
r.env.db_name = ''
r.run('/bin/bash -i -c "mysql -u {db_user} -p\'{db_password}\' -h {db_host} {db_name}"') | Opens a SQL shell to the given database, assuming the configured database
and user supports this feature. |
def explode(self, obj):
""" Determine if the object should be exploded. """
if obj in self._done:
return False
result = False
for item in self._explode:
if hasattr(item, '_moId'):
# If it has a _moId it is an instance
if obj._moId == item._moId:
result = True
else:
# If it does not have a _moId it is a template
if obj.__class__.__name__ == item.__name__:
result = True
if result:
self._done.add(obj)
return result | Determine if the object should be exploded. |
def extract_command(outputdir, domain_methods, text_domain, keywords,
comment_tags, base_dir, project, version,
msgid_bugs_address):
"""Extracts strings into .pot files
:arg domain: domains to generate strings for or 'all' for all domains
:arg outputdir: output dir for .pot files; usually
locale/templates/LC_MESSAGES/
:arg domain_methods: DOMAIN_METHODS setting
:arg text_domain: TEXT_DOMAIN settings
:arg keywords: KEYWORDS setting
:arg comment_tags: COMMENT_TAGS setting
:arg base_dir: BASE_DIR setting
:arg project: PROJECT setting
:arg version: VERSION setting
:arg msgid_bugs_address: MSGID_BUGS_ADDRESS setting
"""
# Must monkeypatch first to fix i18n extensions stomping issues!
monkeypatch_i18n()
# Create the outputdir if it doesn't exist
outputdir = os.path.abspath(outputdir)
if not os.path.isdir(outputdir):
print('Creating output dir %s ...' % outputdir)
os.makedirs(outputdir)
domains = domain_methods.keys()
def callback(filename, method, options):
if method != 'ignore':
print(' %s' % filename)
# Extract string for each domain
for domain in domains:
print('Extracting all strings in domain %s...' % domain)
methods = domain_methods[domain]
catalog = Catalog(
header_comment='',
project=project,
version=version,
msgid_bugs_address=msgid_bugs_address,
charset='utf-8',
)
extracted = extract_from_dir(
base_dir,
method_map=methods,
options_map=generate_options_map(),
keywords=keywords,
comment_tags=comment_tags,
callback=callback,
)
for filename, lineno, msg, cmts, ctxt in extracted:
catalog.add(msg, None, [(filename, lineno)], auto_comments=cmts,
context=ctxt)
with open(os.path.join(outputdir, '%s.pot' % domain), 'wb') as fp:
write_po(fp, catalog, width=80)
print('Done') | Extracts strings into .pot files
:arg domain: domains to generate strings for or 'all' for all domains
:arg outputdir: output dir for .pot files; usually
locale/templates/LC_MESSAGES/
:arg domain_methods: DOMAIN_METHODS setting
:arg text_domain: TEXT_DOMAIN settings
:arg keywords: KEYWORDS setting
:arg comment_tags: COMMENT_TAGS setting
:arg base_dir: BASE_DIR setting
:arg project: PROJECT setting
:arg version: VERSION setting
:arg msgid_bugs_address: MSGID_BUGS_ADDRESS setting |
def create_concept_scheme(rdf, ns, lname=''):
"""Create a skos:ConceptScheme in the model and return it."""
ont = None
if not ns:
# see if there's an owl:Ontology and use that to determine namespace
onts = list(rdf.subjects(RDF.type, OWL.Ontology))
if len(onts) > 1:
onts.sort()
ont = onts[0]
logging.warning(
"Multiple owl:Ontology instances found. "
"Creating concept scheme from %s.", ont)
elif len(onts) == 1:
ont = onts[0]
else:
ont = None
if not ont:
logging.info(
"No skos:ConceptScheme or owl:Ontology found. "
"Using namespace auto-detection for creating concept scheme.")
ns = detect_namespace(rdf)
elif ont.endswith('/') or ont.endswith('#') or ont.endswith(':'):
ns = ont
else:
ns = ont + '/'
NS = Namespace(ns)
cs = NS[lname]
rdf.add((cs, RDF.type, SKOS.ConceptScheme))
if ont is not None:
rdf.remove((ont, RDF.type, OWL.Ontology))
# remove owl:imports declarations
for o in rdf.objects(ont, OWL.imports):
rdf.remove((ont, OWL.imports, o))
# remove protege specific properties
for p, o in rdf.predicate_objects(ont):
prot = URIRef(
'http://protege.stanford.edu/plugins/owl/protege#')
if p.startswith(prot):
rdf.remove((ont, p, o))
# move remaining properties (dc:title etc.) of the owl:Ontology into
# the skos:ConceptScheme
replace_uri(rdf, ont, cs)
return cs | Create a skos:ConceptScheme in the model and return it. |
def restore(self, bAsync = True):
"""
Unmaximize and unminimize the window.
@see: L{maximize}, L{minimize}
@type bAsync: bool
@param bAsync: Perform the request asynchronously.
@raise WindowsError: An error occured while processing this request.
"""
if bAsync:
win32.ShowWindowAsync( self.get_handle(), win32.SW_RESTORE )
else:
win32.ShowWindow( self.get_handle(), win32.SW_RESTORE ) | Unmaximize and unminimize the window.
@see: L{maximize}, L{minimize}
@type bAsync: bool
@param bAsync: Perform the request asynchronously.
@raise WindowsError: An error occured while processing this request. |
def data(self, data, part=False, dataset=''):
"""Parse data and update links.
Parameters
----------
data
Data to parse.
part : `bool`, optional
True if data is partial (default: `False`).
dataset : `str`, optional
Dataset key prefix (default: '').
"""
links = self.parser(self.scanner(data, part), part, dataset)
self.storage.add_links(links) | Parse data and update links.
Parameters
----------
data
Data to parse.
part : `bool`, optional
True if data is partial (default: `False`).
dataset : `str`, optional
Dataset key prefix (default: ''). |
def get_handler(self, *args, **options):
"""
Returns the static files serving handler wrapping the default handler,
if static files should be served. Otherwise just returns the default
handler.
"""
handler = super(Command, self).get_handler(*args, **options)
insecure_serving = options.get('insecure_serving', False)
if self.should_use_static_handler(options):
return StaticFilesHandler(handler)
return handler | Returns the static files serving handler wrapping the default handler,
if static files should be served. Otherwise just returns the default
handler. |
def delete(self):
"""
If a dynamic version, delete it the standard way and remove it from the
inventory, else delete all dynamic versions.
"""
if self.dynamic_version_of is None:
self._delete_dynamic_versions()
else:
super(DynamicFieldMixin, self).delete()
self._inventory.srem(self.dynamic_part) | If a dynamic version, delete it the standard way and remove it from the
inventory, else delete all dynamic versions. |
def normalize_feature_objects(feature_objs):
"""Takes an iterable of GeoJSON-like Feature mappings or
an iterable of objects with a geo interface and
normalizes it to the former."""
for obj in feature_objs:
if hasattr(obj, "__geo_interface__") and \
'type' in obj.__geo_interface__.keys() and \
obj.__geo_interface__['type'] == 'Feature':
yield obj.__geo_interface__
elif isinstance(obj, dict) and 'type' in obj and \
obj['type'] == 'Feature':
yield obj
else:
raise ValueError("Did not recognize object {0}"
"as GeoJSON Feature".format(obj)) | Takes an iterable of GeoJSON-like Feature mappings or
an iterable of objects with a geo interface and
normalizes it to the former. |
def calendar(type='holiday', direction='next', last=1, startDate=None, token='', version=''):
'''This call allows you to fetch a number of trade dates or holidays from a given date. For example, if you want the next trading day, you would call /ref-data/us/dates/trade/next/1.
https://iexcloud.io/docs/api/#u-s-exchanges
8am, 9am, 12pm, 1pm UTC daily
Args:
type (string); "holiday" or "trade"
direction (string); "next" or "last"
last (int); number to move in direction
startDate (date); start date for next or last, YYYYMMDD
token (string); Access token
version (string); API version
Returns:
dict: result
'''
if startDate:
startDate = _strOrDate(startDate)
return _getJson('ref-data/us/dates/{type}/{direction}/{last}/{date}'.format(type=type, direction=direction, last=last, date=startDate), token, version)
return _getJson('ref-data/us/dates/' + type + '/' + direction + '/' + str(last), token, version) | This call allows you to fetch a number of trade dates or holidays from a given date. For example, if you want the next trading day, you would call /ref-data/us/dates/trade/next/1.
https://iexcloud.io/docs/api/#u-s-exchanges
8am, 9am, 12pm, 1pm UTC daily
Args:
type (string); "holiday" or "trade"
direction (string); "next" or "last"
last (int); number to move in direction
startDate (date); start date for next or last, YYYYMMDD
token (string); Access token
version (string); API version
Returns:
dict: result |
def install_binary_dist(self, members, virtualenv_compatible=True, prefix=None,
python=None, track_installed_files=False):
"""
Install a binary distribution into the given prefix.
:param members: An iterable of tuples with two values each:
1. A :class:`tarfile.TarInfo` object.
2. A file-like object.
:param prefix: The "prefix" under which the requirements should be
installed. This will be a pathname like ``/usr``,
``/usr/local`` or the pathname of a virtual environment.
Defaults to :attr:`.Config.install_prefix`.
:param python: The pathname of the Python executable to use in the shebang
line of all executable Python scripts inside the binary
distribution. Defaults to :attr:`.Config.python_executable`.
:param virtualenv_compatible: Whether to enable workarounds to make the
resulting filenames compatible with
virtual environments (defaults to
:data:`True`).
:param track_installed_files: If this is :data:`True` (not the default for
this method because of backwards
compatibility) pip-accel will create
``installed-files.txt`` as required by
pip to properly uninstall packages.
This method installs a binary distribution created by
:class:`build_binary_dist()` into the given prefix (a directory like
``/usr``, ``/usr/local`` or a virtual environment).
"""
# TODO This is quite slow for modules like Django. Speed it up! Two choices:
# 1. Run the external tar program to unpack the archive. This will
# slightly complicate the fixing up of hashbangs.
# 2. Using links? The plan: We can maintain a "seed" environment under
# $PIP_ACCEL_CACHE and use symbolic and/or hard links to populate other
# places based on the "seed" environment.
module_search_path = set(map(os.path.normpath, sys.path))
prefix = os.path.normpath(prefix or self.config.install_prefix)
python = os.path.normpath(python or self.config.python_executable)
installed_files = []
for member, from_handle in members:
pathname = member.name
if virtualenv_compatible:
# Some binary distributions include C header files (see for example
# the greenlet package) however the subdirectory of include/ in a
# virtual environment is a symbolic link to a subdirectory of
# /usr/include/ so we should never try to install C header files
# inside the directory pointed to by the symbolic link. Instead we
# implement the same workaround that pip uses to avoid this
# problem.
pathname = re.sub('^include/', 'include/site/', pathname)
if self.config.on_debian and '/site-packages/' in pathname:
# On Debian based system wide Python installs the /site-packages/
# directory is not in Python's module search path while
# /dist-packages/ is. We try to be compatible with this.
match = re.match('^(.+?)/site-packages', pathname)
if match:
site_packages = os.path.normpath(os.path.join(prefix, match.group(0)))
dist_packages = os.path.normpath(os.path.join(prefix, match.group(1), 'dist-packages'))
if dist_packages in module_search_path and site_packages not in module_search_path:
pathname = pathname.replace('/site-packages/', '/dist-packages/')
pathname = os.path.join(prefix, pathname)
if track_installed_files:
# Track the installed file's absolute pathname.
installed_files.append(pathname)
directory = os.path.dirname(pathname)
if not os.path.isdir(directory):
logger.debug("Creating directory: %s ..", directory)
makedirs(directory)
logger.debug("Creating file: %s ..", pathname)
with open(pathname, 'wb') as to_handle:
contents = from_handle.read()
if contents.startswith(b'#!/'):
contents = self.fix_hashbang(contents, python)
to_handle.write(contents)
os.chmod(pathname, member.mode)
if track_installed_files:
self.update_installed_files(installed_files) | Install a binary distribution into the given prefix.
:param members: An iterable of tuples with two values each:
1. A :class:`tarfile.TarInfo` object.
2. A file-like object.
:param prefix: The "prefix" under which the requirements should be
installed. This will be a pathname like ``/usr``,
``/usr/local`` or the pathname of a virtual environment.
Defaults to :attr:`.Config.install_prefix`.
:param python: The pathname of the Python executable to use in the shebang
line of all executable Python scripts inside the binary
distribution. Defaults to :attr:`.Config.python_executable`.
:param virtualenv_compatible: Whether to enable workarounds to make the
resulting filenames compatible with
virtual environments (defaults to
:data:`True`).
:param track_installed_files: If this is :data:`True` (not the default for
this method because of backwards
compatibility) pip-accel will create
``installed-files.txt`` as required by
pip to properly uninstall packages.
This method installs a binary distribution created by
:class:`build_binary_dist()` into the given prefix (a directory like
``/usr``, ``/usr/local`` or a virtual environment). |
def as_dict(self):
"""
As in :Class: `pymatgen.core.Molecule` except
with using `to_dict_of_dicts` from NetworkX
to store graph information.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"molecule": self.molecule.as_dict(),
"graphs": json_graph.adjacency_data(self.graph)}
return d | As in :Class: `pymatgen.core.Molecule` except
with using `to_dict_of_dicts` from NetworkX
to store graph information. |
def _set_defined_policy(self, v, load=False):
"""
Setter method for defined_policy, mapped from YANG variable /rbridge_id/secpolicy/defined_policy (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_defined_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_defined_policy() directly.
YANG Description: Set the defined policy
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=defined_policy.defined_policy, is_container='container', presence=False, yang_name="defined-policy", rest_name="defined-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Defined policy set', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fc-auth', defining_module='brocade-fc-auth', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """defined_policy must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=defined_policy.defined_policy, is_container='container', presence=False, yang_name="defined-policy", rest_name="defined-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Defined policy set', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fc-auth', defining_module='brocade-fc-auth', yang_type='container', is_config=True)""",
})
self.__defined_policy = t
if hasattr(self, '_set'):
self._set() | Setter method for defined_policy, mapped from YANG variable /rbridge_id/secpolicy/defined_policy (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_defined_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_defined_policy() directly.
YANG Description: Set the defined policy |
def load_datafile(name, search_path, codecs=get_codecs(), **kwargs):
"""
find datafile and load them from codec
TODO only does the first one
kwargs:
default = if passed will return that on failure instead of throwing
"""
return munge.load_datafile(name, search_path, codecs, **kwargs) | find datafile and load them from codec
TODO only does the first one
kwargs:
default = if passed will return that on failure instead of throwing |
def stats(self, request, *args, **kwargs):
"""
To get count of alerts per severities - run **GET** request against */api/alerts/stats/*.
This endpoint supports all filters that are available for alerts list (*/api/alerts/*).
Response example:
.. code-block:: javascript
{
"debug": 2,
"error": 1,
"info": 1,
"warning": 1
}
"""
queryset = self.filter_queryset(self.get_queryset())
alerts_severities_count = queryset.values('severity').annotate(count=Count('severity'))
severity_names = dict(models.Alert.SeverityChoices.CHOICES)
# For consistency with all other endpoint we need to return severity names in lower case.
alerts_severities_count = {
severity_names[asc['severity']].lower(): asc['count'] for asc in alerts_severities_count}
for severity_name in severity_names.values():
if severity_name.lower() not in alerts_severities_count:
alerts_severities_count[severity_name.lower()] = 0
return response.Response(alerts_severities_count, status=status.HTTP_200_OK) | To get count of alerts per severities - run **GET** request against */api/alerts/stats/*.
This endpoint supports all filters that are available for alerts list (*/api/alerts/*).
Response example:
.. code-block:: javascript
{
"debug": 2,
"error": 1,
"info": 1,
"warning": 1
} |
def _get_snmpv3(self, oid):
"""
Try to send an SNMP GET operation using SNMPv3 for the specified OID.
Parameters
----------
oid : str
The SNMP OID that you want to get.
Returns
-------
string : str
The string as part of the value from the OID you are trying to retrieve.
"""
snmp_target = (self.hostname, self.snmp_port)
cmd_gen = cmdgen.CommandGenerator()
(error_detected, error_status, error_index, snmp_data) = cmd_gen.getCmd(
cmdgen.UsmUserData(
self.user,
self.auth_key,
self.encrypt_key,
authProtocol=self.auth_proto,
privProtocol=self.encryp_proto,
),
cmdgen.UdpTransportTarget(snmp_target, timeout=1.5, retries=2),
oid,
lookupNames=True,
lookupValues=True,
)
if not error_detected and snmp_data[0][1]:
return text_type(snmp_data[0][1])
return "" | Try to send an SNMP GET operation using SNMPv3 for the specified OID.
Parameters
----------
oid : str
The SNMP OID that you want to get.
Returns
-------
string : str
The string as part of the value from the OID you are trying to retrieve. |
def to_frame(self, data, state):
"""
Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes.
"""
# Find the next newline
data_len = data.find(b'\n')
if data_len < 0:
# No line to extract
raise exc.NoFrames()
# Track how much to exclude
frame_len = data_len + 1
# Are we to exclude carriage returns?
if (self.carriage_return and data_len and
data[data_len - 1] == ord(b'\r')):
data_len -= 1
# Extract the frame
frame = six.binary_type(data[:data_len])
del data[:frame_len]
# Return the frame
return frame | Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes. |
def arch(self):
"""
Return an architecture for this task.
:returns: an arch string (eg "noarch", or "ppc64le"), or None this task
has no architecture associated with it.
"""
if self.method in ('buildArch', 'createdistrepo', 'livecd'):
return self.params[2]
if self.method in ('createrepo', 'runroot'):
return self.params[1]
if self.method == 'createImage':
return self.params[3]
if self.method == 'indirectionimage':
return self.params[0]['arch'] | Return an architecture for this task.
:returns: an arch string (eg "noarch", or "ppc64le"), or None this task
has no architecture associated with it. |
def filter_params(self, value):
""" return filtering params """
if value is None:
return {}
val_min = value.get('min', None)
val_max = value.get('max', None)
params = {}
if val_min == val_max:
return { self.target: val_min }
key = self.target + "__"
if val_min is not None:
params[key+self.lookup_types[0]] = val_min
if val_max is not None:
params[key+self.lookup_types[1]] = val_max
return params | return filtering params |
def grant_usage_install_privileges(cls, cur, schema_name, roles):
"""
Sets search path
"""
cur.execute('GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA {0} TO {1};'
'GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA {0} TO {1};'
'GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA {0} TO {1};'
.format(schema_name, roles)) | Sets search path |
def parent(self, parent):
"""Setter for the parent state of the state element
:param rafcon.core.states.state.State parent: Parent state or None
"""
if parent is None:
self._parent = None
else:
from rafcon.core.states.state import State
assert isinstance(parent, State)
old_parent = self.parent
self._parent = ref(parent)
valid, message = self._check_validity()
if not valid:
if not old_parent:
self._parent = None
else:
self._parent = ref(old_parent)
class_name = self.__class__.__name__
if global_config.get_config_value("LIBRARY_RECOVERY_MODE") is True:
do_delete_item = True
# In case of just the data type is wrong raise an Exception but keep the data flow
if "not have matching data types" in message:
do_delete_item = False
self._parent = ref(parent)
raise RecoveryModeException("{0} invalid within state \"{1}\" (id {2}): {3}".format(
class_name, parent.name, parent.state_id, message), do_delete_item=do_delete_item)
else:
raise ValueError("{0} invalid within state \"{1}\" (id {2}): {3} {4}".format(
class_name, parent.name, parent.state_id, message, self)) | Setter for the parent state of the state element
:param rafcon.core.states.state.State parent: Parent state or None |
def create_columns(self):
"""For each column in file create a TransactionCsvImportColumn"""
reader = self._get_csv_reader()
headings = six.next(reader)
try:
examples = six.next(reader)
except StopIteration:
examples = []
found_fields = set()
for i, value in enumerate(headings):
if i >= 20:
break
infer_field = self.has_headings and value not in found_fields
to_field = (
{
"date": "date",
"amount": "amount",
"description": "description",
"memo": "description",
"notes": "description",
}.get(value.lower(), "")
if infer_field
else ""
)
if to_field:
found_fields.add(to_field)
TransactionCsvImportColumn.objects.update_or_create(
transaction_import=self,
column_number=i + 1,
column_heading=value if self.has_headings else "",
to_field=to_field,
example=examples[i].strip() if examples else "",
) | For each column in file create a TransactionCsvImportColumn |
def cmdline(argv=sys.argv[1:]):
"""
Script for rebasing a text file
"""
parser = ArgumentParser(
description='Rebase a text from his stop words')
parser.add_argument('language', help='The language used to rebase')
parser.add_argument('source', help='Text file to rebase')
options = parser.parse_args(argv)
factory = StopWordFactory()
language = options.language
stop_words = factory.get_stop_words(language, fail_safe=True)
content = open(options.source, 'rb').read().decode('utf-8')
print(stop_words.rebase(content)) | Script for rebasing a text file |
def extract_datetime_hour(cls, datetime_str):
"""
Tries to extract a `datetime` object from the given string, including only hours.
Raises `DateTimeFormatterException` if the extraction fails.
"""
if not datetime_str:
raise DateTimeFormatterException('datetime_str must a valid string')
try:
return cls._extract_timestamp(datetime_str, cls.DATETIME_HOUR_FORMAT)
except (TypeError, ValueError):
raise DateTimeFormatterException('Invalid datetime string {}.'.format(datetime_str)) | Tries to extract a `datetime` object from the given string, including only hours.
Raises `DateTimeFormatterException` if the extraction fails. |
def fromML(vec):
"""
Convert a vector from the new mllib-local representation.
This does NOT copy the data; it copies references.
:param vec: a :py:class:`pyspark.ml.linalg.Vector`
:return: a :py:class:`pyspark.mllib.linalg.Vector`
.. versionadded:: 2.0.0
"""
if isinstance(vec, newlinalg.DenseVector):
return DenseVector(vec.array)
elif isinstance(vec, newlinalg.SparseVector):
return SparseVector(vec.size, vec.indices, vec.values)
else:
raise TypeError("Unsupported vector type %s" % type(vec)) | Convert a vector from the new mllib-local representation.
This does NOT copy the data; it copies references.
:param vec: a :py:class:`pyspark.ml.linalg.Vector`
:return: a :py:class:`pyspark.mllib.linalg.Vector`
.. versionadded:: 2.0.0 |
def get_resource_by_agent(self, agent_id):
"""Gets the ``Resource`` associated with the given agent.
arg: agent_id (osid.id.Id): ``Id`` of the ``Agent``
return: (osid.resource.Resource) - associated resource
raise: NotFound - ``agent_id`` is not found
raise: NullArgument - ``agent_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
collection = JSONClientValidated('resource',
collection='Resource',
runtime=self._runtime)
result = collection.find_one(
dict({'agentIds': {'$in': [str(agent_id)]}},
**self._view_filter()))
return objects.Resource(
osid_object_map=result,
runtime=self._runtime,
proxy=self._proxy) | Gets the ``Resource`` associated with the given agent.
arg: agent_id (osid.id.Id): ``Id`` of the ``Agent``
return: (osid.resource.Resource) - associated resource
raise: NotFound - ``agent_id`` is not found
raise: NullArgument - ``agent_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def clear_max_string_length(self):
"""stub"""
if (self.get_max_string_length_metadata().is_read_only() or
self.get_max_string_length_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['maxStringLength'] = \
self.get_max_string_length_metadata().get_default_cardinal_values()[0] | stub |
def update(context, id, export_control, active):
"""update(context, id, export_control, active)
Update a component
>>> dcictl component-update [OPTIONS]
:param string id: ID of the component [required]
:param boolean export-control: Set the component visible for users
:param boolean active: Set the component in the active state
"""
component_info = component.get(context, id=id)
etag = component_info.json()['component']['etag']
result = component.update(context, id=id, etag=etag,
export_control=export_control,
state=utils.active_string(active))
utils.format_output(result, context.format) | update(context, id, export_control, active)
Update a component
>>> dcictl component-update [OPTIONS]
:param string id: ID of the component [required]
:param boolean export-control: Set the component visible for users
:param boolean active: Set the component in the active state |
def all_state_variables_read(self):
""" recursive version of variables_read
"""
if self._all_state_variables_read is None:
self._all_state_variables_read = self._explore_functions(
lambda x: x.state_variables_read)
return self._all_state_variables_read | recursive version of variables_read |
def sendConnect(self, data):
"""Send a CONNECT command to the broker
:param data: List of other broker main socket URL"""
# Imported dynamically - Not used if only one broker
if self.backend == 'ZMQ':
import zmq
self.context = zmq.Context()
self.socket = self.context.socket(zmq.DEALER)
if sys.version_info < (3,):
self.socket.setsockopt_string(zmq.IDENTITY, unicode('launcher'))
else:
self.socket.setsockopt_string(zmq.IDENTITY, 'launcher')
self.socket.connect(
"tcp://{hostname}:{port}".format(
port=self.brokerPort,
hostname = self.hostname
)
)
self.socket.send_multipart([b"CONNECT",
pickle.dumps(data,
pickle.HIGHEST_PROTOCOL)])
else:
# TODO
pass | Send a CONNECT command to the broker
:param data: List of other broker main socket URL |
def update_peer(self,
current_name,
new_name, new_url, username, password, peer_type="REPLICATION"):
"""
Update a replication peer.
@param current_name: The name of the peer to updated.
@param new_name: The new name for the peer.
@param new_url: The new url for the peer.
@param username: The admin username to use to setup the remote side of the peer connection.
@param password: The password of the admin user.
@param peer_type: Added in v11. The type of the peer. Defaults to 'REPLICATION'.
@return: The updated peer.
@since: API v3
"""
if self._get_resource_root().version < 11:
peer_type = None
peer = ApiCmPeer(self._get_resource_root(),
name=new_name,
url=new_url,
username=username,
password=password,
type=peer_type)
return self._put("peers/" + current_name, ApiCmPeer, data=peer, api_version=3) | Update a replication peer.
@param current_name: The name of the peer to updated.
@param new_name: The new name for the peer.
@param new_url: The new url for the peer.
@param username: The admin username to use to setup the remote side of the peer connection.
@param password: The password of the admin user.
@param peer_type: Added in v11. The type of the peer. Defaults to 'REPLICATION'.
@return: The updated peer.
@since: API v3 |
def bake_content(request):
"""Invoke the baking process - trigger post-publication"""
ident_hash = request.matchdict['ident_hash']
try:
id, version = split_ident_hash(ident_hash)
except IdentHashError:
raise httpexceptions.HTTPNotFound()
if not version:
raise httpexceptions.HTTPBadRequest('must specify the version')
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT bool(portal_type = 'Collection'), stateid, module_ident
FROM modules
WHERE ident_hash(uuid, major_version, minor_version) = %s
""", (ident_hash,))
try:
is_binder, stateid, module_ident = cursor.fetchone()
except TypeError:
raise httpexceptions.HTTPNotFound()
if not is_binder:
raise httpexceptions.HTTPBadRequest(
'{} is not a book'.format(ident_hash))
if stateid == 5:
cursor.execute("""\
SELECT pg_notify('post_publication',
'{"module_ident": '||%s||',
"ident_hash": "'||%s||'",
"timestamp": "'||CURRENT_TIMESTAMP||'"}')
""", (module_ident, ident_hash))
else:
cursor.execute("""\
UPDATE modules SET stateid = 5
WHERE ident_hash(uuid, major_version, minor_version) = %s
""", (ident_hash,)) | Invoke the baking process - trigger post-publication |
def add_tcp_callback(port, callback, threaded_callback=False):
"""
Adds a unix socket server callback, which will be invoked when values
arrive from a connected socket client. The callback must accept two
parameters, eg. ``def callback(socket, msg)``.
"""
_rpio.add_tcp_callback(port, callback, threaded_callback) | Adds a unix socket server callback, which will be invoked when values
arrive from a connected socket client. The callback must accept two
parameters, eg. ``def callback(socket, msg)``. |
def put(self, locator = None, component = None):
"""
Puts a new reference into this reference map.
:param locator: a component reference to be added.
:param component: a locator to find the reference by.
"""
if component == None:
raise Exception("Component cannot be null")
self._lock.acquire()
try:
self._references.append(Reference(locator, component))
finally:
self._lock.release() | Puts a new reference into this reference map.
:param locator: a component reference to be added.
:param component: a locator to find the reference by. |
def shift_and_scale(matrix, shift, scale):
""" Shift and scale matrix so its minimum value is placed at `shift` and
its maximum value is scaled to `scale` """
zeroed = matrix - matrix.min()
scaled = (scale - shift) * (zeroed / zeroed.max())
return scaled + shift | Shift and scale matrix so its minimum value is placed at `shift` and
its maximum value is scaled to `scale` |
def strip_rate(self, idx):
"""strip(1 byte) radiotap.datarate
note that, unit of this field is originally 0.5 Mbps
:idx: int
:return: int
idx
:return: double
rate in terms of Mbps
"""
val, = struct.unpack_from('<B', self._rtap, idx)
rate_unit = float(1) / 2 # Mbps
return idx + 1, rate_unit * val | strip(1 byte) radiotap.datarate
note that, unit of this field is originally 0.5 Mbps
:idx: int
:return: int
idx
:return: double
rate in terms of Mbps |
def nullspace(A, atol=1e-13, rtol=0):
"""Compute an approximate basis for the nullspace of A.
The algorithm used by this function is based on the singular value
decomposition of `A`.
Parameters
----------
A : numpy.ndarray
A should be at most 2-D. A 1-D array with length k will be treated
as a 2-D with shape (1, k)
atol : float
The absolute tolerance for a zero singular value. Singular values
smaller than `atol` are considered to be zero.
rtol : float
The relative tolerance. Singular values less than rtol*smax are
considered to be zero, where smax is the largest singular value.
If both `atol` and `rtol` are positive, the combined tolerance is the
maximum of the two; that is::
tol = max(atol, rtol * smax)
Singular values smaller than `tol` are considered to be zero.
Returns
-------
numpy.ndarray
If `A` is an array with shape (m, k), then `ns` will be an array
with shape (k, n), where n is the estimated dimension of the
nullspace of `A`. The columns of `ns` are a basis for the
nullspace; each element in numpy.dot(A, ns) will be approximately
zero.
Notes
-----
Taken from the numpy cookbook.
"""
A = np.atleast_2d(A)
u, s, vh = np.linalg.svd(A)
tol = max(atol, rtol * s[0])
nnz = (s >= tol).sum()
ns = vh[nnz:].conj().T
return ns | Compute an approximate basis for the nullspace of A.
The algorithm used by this function is based on the singular value
decomposition of `A`.
Parameters
----------
A : numpy.ndarray
A should be at most 2-D. A 1-D array with length k will be treated
as a 2-D with shape (1, k)
atol : float
The absolute tolerance for a zero singular value. Singular values
smaller than `atol` are considered to be zero.
rtol : float
The relative tolerance. Singular values less than rtol*smax are
considered to be zero, where smax is the largest singular value.
If both `atol` and `rtol` are positive, the combined tolerance is the
maximum of the two; that is::
tol = max(atol, rtol * smax)
Singular values smaller than `tol` are considered to be zero.
Returns
-------
numpy.ndarray
If `A` is an array with shape (m, k), then `ns` will be an array
with shape (k, n), where n is the estimated dimension of the
nullspace of `A`. The columns of `ns` are a basis for the
nullspace; each element in numpy.dot(A, ns) will be approximately
zero.
Notes
-----
Taken from the numpy cookbook. |
def get_wulff_shape(self, material_id):
"""
Constructs a Wulff shape for a material.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
Returns:
pymatgen.analysis.wulff.WulffShape
"""
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.wulff import WulffShape, hkl_tuple_to_str
structure = self.get_structure_by_material_id(material_id)
surfaces = self.get_surface_data(material_id)["surfaces"]
lattice = (SpacegroupAnalyzer(structure)
.get_conventional_standard_structure().lattice)
miller_energy_map = {}
for surf in surfaces:
miller = tuple(surf["miller_index"])
# Prefer reconstructed surfaces, which have lower surface energies.
if (miller not in miller_energy_map) or surf["is_reconstructed"]:
miller_energy_map[miller] = surf["surface_energy"]
millers, energies = zip(*miller_energy_map.items())
return WulffShape(lattice, millers, energies) | Constructs a Wulff shape for a material.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
Returns:
pymatgen.analysis.wulff.WulffShape |
def mimetype_icon(path, fallback=None):
"""
Tries to create an icon from theme using the file mimetype.
E.g.::
return self.mimetype_icon(
path, fallback=':/icons/text-x-python.png')
:param path: file path for which the icon must be created
:param fallback: fallback icon path (qrc or file system)
:returns: QIcon or None if the file mimetype icon could not be found.
"""
mime = mimetypes.guess_type(path)[0]
if mime:
icon = mime.replace('/', '-')
# if system.WINDOWS:
# return icons.file()
if QtGui.QIcon.hasThemeIcon(icon):
icon = QtGui.QIcon.fromTheme(icon)
if not icon.isNull():
return icon
if fallback:
return QtGui.QIcon(fallback)
return QtGui.QIcon.fromTheme('text-x-generic') | Tries to create an icon from theme using the file mimetype.
E.g.::
return self.mimetype_icon(
path, fallback=':/icons/text-x-python.png')
:param path: file path for which the icon must be created
:param fallback: fallback icon path (qrc or file system)
:returns: QIcon or None if the file mimetype icon could not be found. |
def unique_filename(**kwargs):
"""Create new filename guaranteed not to exist previously
Use mkstemp to create the file, then remove it and return the name
If dir is specified, the tempfile will be created in the path specified
otherwise the file will be created in a directory following this scheme:
:file:'/tmp/inasafe/<dd-mm-yyyy>/<user>/impacts'
See http://docs.python.org/library/tempfile.html for details.
Example usage:
tempdir = temp_dir(sub_dir='test')
filename = unique_filename(suffix='.foo', dir=tempdir)
print filename
/tmp/inasafe/23-08-2012/timlinux/test/tmpyeO5VR.foo
Or with no preferred subdir, a default subdir of 'impacts' is used:
filename = unique_filename(suffix='.shp')
print filename
/tmp/inasafe/23-08-2012/timlinux/impacts/tmpoOAmOi.shp
"""
if 'dir' not in kwargs:
path = temp_dir('impacts')
kwargs['dir'] = path
else:
path = temp_dir(kwargs['dir'])
kwargs['dir'] = path
if not os.path.exists(kwargs['dir']):
# Ensure that the dir mask won't conflict with the mode
# Umask sets the new mask and returns the old
umask = os.umask(0000)
# Ensure that the dir is world writable by explicitly setting mode
os.makedirs(kwargs['dir'], 0o777)
# Reinstate the old mask for tmp dir
os.umask(umask)
# Now we have the working dir set up go on and return the filename
handle, filename = mkstemp(**kwargs)
# Need to close it using the file handle first for windows!
os.close(handle)
try:
os.remove(filename)
except OSError:
pass
return filename | Create new filename guaranteed not to exist previously
Use mkstemp to create the file, then remove it and return the name
If dir is specified, the tempfile will be created in the path specified
otherwise the file will be created in a directory following this scheme:
:file:'/tmp/inasafe/<dd-mm-yyyy>/<user>/impacts'
See http://docs.python.org/library/tempfile.html for details.
Example usage:
tempdir = temp_dir(sub_dir='test')
filename = unique_filename(suffix='.foo', dir=tempdir)
print filename
/tmp/inasafe/23-08-2012/timlinux/test/tmpyeO5VR.foo
Or with no preferred subdir, a default subdir of 'impacts' is used:
filename = unique_filename(suffix='.shp')
print filename
/tmp/inasafe/23-08-2012/timlinux/impacts/tmpoOAmOi.shp |
def naiveWordAlignment(tg, utteranceTierName, wordTierName, isleDict,
phoneHelperTierName=None,
removeOverlappingSegments=False):
'''
Performs naive alignment for utterances in a textgrid
Naive alignment gives each segment equal duration. Word duration is
determined by the duration of an utterance and the number of phones in
the word.
By 'utterance' I mean a string of words separated by a space bounded
in time eg (0.5, 1.5, "he said he likes ketchup").
phoneHelperTierName - creates a tier that is parallel to the word tier.
However, the labels are the phones for the word,
rather than the word
removeOverlappingSegments - remove any labeled words or phones that
fall under labeled utterances
'''
utteranceTier = tg.tierDict[utteranceTierName]
wordTier = None
if wordTierName in tg.tierNameList:
wordTier = tg.tierDict[wordTierName]
# Load in the word tier, if it exists:
wordEntryList = []
phoneEntryList = []
if wordTier is not None:
if removeOverlappingSegments:
for startT, stopT, _ in utteranceTier.entryList:
wordTier = wordTier.eraseRegion(startT, stopT,
'truncate', False)
wordEntryList = wordTier.entryList
# Do the naive alignment
for startT, stopT, label in utteranceTier.entryList:
wordList = label.split()
# Get the list of phones in each word
superPhoneList = []
numPhones = 0
i = 0
while i < len(wordList):
word = wordList[i]
try:
firstSyllableList = isleDict.lookup(word)[0][0][0]
except isletool.WordNotInISLE:
wordList.pop(i)
continue
phoneList = [phone for syllable in firstSyllableList
for phone in syllable]
superPhoneList.append(phoneList)
numPhones += len(phoneList)
i += 1
# Get the naive alignment for words, if alignment doesn't
# already exist for words
subWordEntryList = []
subPhoneEntryList = []
if wordTier is not None:
subWordEntryList = wordTier.crop(startT, stopT,
"truncated", False).entryList
if len(subWordEntryList) == 0:
wordStartT = startT
phoneDur = (stopT - startT) / float(numPhones)
for i, word in enumerate(wordList):
phoneListTxt = " ".join(superPhoneList[i])
wordStartT = wordStartT
wordEndT = wordStartT + (phoneDur * len(superPhoneList[i]))
subWordEntryList.append((wordStartT, wordEndT, word))
subPhoneEntryList.append((wordStartT, wordEndT, phoneListTxt))
wordStartT = wordEndT
wordEntryList.extend(subWordEntryList)
phoneEntryList.extend(subPhoneEntryList)
# Replace or add the word tier
newWordTier = tgio.IntervalTier(wordTierName,
wordEntryList,
tg.minTimestamp,
tg.maxTimestamp)
if wordTier is not None:
tg.replaceTier(wordTierName, newWordTier)
else:
tg.addTier(newWordTier)
# Add the phone tier
# This is mainly used as an annotation tier
if phoneHelperTierName is not None and len(phoneEntryList) > 0:
newPhoneTier = tgio.IntervalTier(phoneHelperTierName,
phoneEntryList,
tg.minTimestamp,
tg.minTimestamp)
if phoneHelperTierName in tg.tierNameList:
tg.replaceTier(phoneHelperTierName, newPhoneTier)
else:
tg.addTier(newPhoneTier)
return tg | Performs naive alignment for utterances in a textgrid
Naive alignment gives each segment equal duration. Word duration is
determined by the duration of an utterance and the number of phones in
the word.
By 'utterance' I mean a string of words separated by a space bounded
in time eg (0.5, 1.5, "he said he likes ketchup").
phoneHelperTierName - creates a tier that is parallel to the word tier.
However, the labels are the phones for the word,
rather than the word
removeOverlappingSegments - remove any labeled words or phones that
fall under labeled utterances |
def configure_logging(logger_name, filename=None):
""" Configure logging and return the named logger and the location of the logging configuration file loaded.
This function expects a Splunk app directory structure::
<app-root>
bin
...
default
...
local
...
This function looks for a logging configuration file at each of these locations, loading the first, if any,
logging configuration file that it finds::
local/{name}.logging.conf
default/{name}.logging.conf
local/logging.conf
default/logging.conf
The current working directory is set to *<app-root>* before the logging configuration file is loaded. Hence, paths
in the logging configuration file are relative to *<app-root>*. The current directory is reset before return.
You may short circuit the search for a logging configuration file by providing an alternative file location in
`path`. Logging configuration files must be in `ConfigParser format`_.
#Arguments:
:param logger_name: Logger name
:type logger_name: bytes, unicode
:param filename: Location of an alternative logging configuration file or `None`.
:type filename: bytes, unicode or NoneType
:returns: The named logger and the location of the logging configuration file loaded.
:rtype: tuple
.. _ConfigParser format: https://docs.python.org/2/library/logging.config.html#configuration-file-format
"""
if filename is None:
if logger_name is None:
probing_paths = [path.join('local', 'logging.conf'), path.join('default', 'logging.conf')]
else:
probing_paths = [
path.join('local', logger_name + '.logging.conf'),
path.join('default', logger_name + '.logging.conf'),
path.join('local', 'logging.conf'),
path.join('default', 'logging.conf')]
for relative_path in probing_paths:
configuration_file = path.join(app_root, relative_path)
if path.exists(configuration_file):
filename = configuration_file
break
elif not path.isabs(filename):
found = False
for conf in 'local', 'default':
configuration_file = path.join(app_root, conf, filename)
if path.exists(configuration_file):
filename = configuration_file
found = True
break
if not found:
raise ValueError('Logging configuration file "{}" not found in local or default directory'.format(filename))
elif not path.exists(filename):
raise ValueError('Logging configuration file "{}" not found'.format(filename))
if filename is not None:
global _current_logging_configuration_file
filename = path.realpath(filename)
if filename != _current_logging_configuration_file:
working_directory = getcwd()
chdir(app_root)
try:
fileConfig(filename, {'SPLUNK_HOME': splunk_home})
finally:
chdir(working_directory)
_current_logging_configuration_file = filename
if len(root.handlers) == 0:
root.addHandler(StreamHandler())
return None if logger_name is None else getLogger(logger_name), filename | Configure logging and return the named logger and the location of the logging configuration file loaded.
This function expects a Splunk app directory structure::
<app-root>
bin
...
default
...
local
...
This function looks for a logging configuration file at each of these locations, loading the first, if any,
logging configuration file that it finds::
local/{name}.logging.conf
default/{name}.logging.conf
local/logging.conf
default/logging.conf
The current working directory is set to *<app-root>* before the logging configuration file is loaded. Hence, paths
in the logging configuration file are relative to *<app-root>*. The current directory is reset before return.
You may short circuit the search for a logging configuration file by providing an alternative file location in
`path`. Logging configuration files must be in `ConfigParser format`_.
#Arguments:
:param logger_name: Logger name
:type logger_name: bytes, unicode
:param filename: Location of an alternative logging configuration file or `None`.
:type filename: bytes, unicode or NoneType
:returns: The named logger and the location of the logging configuration file loaded.
:rtype: tuple
.. _ConfigParser format: https://docs.python.org/2/library/logging.config.html#configuration-file-format |
def reporter(self, analysistype='genesippr'):
"""
Creates a report of the genesippr results
:param analysistype: The variable to use when accessing attributes in the metadata object
"""
logging.info('Creating {} report'.format(analysistype))
# Create a dictionary to link all the genera with their genes
genusgenes = dict()
# The organism-specific targets are in .tfa files in the target path
targetpath = str()
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
targetpath = sample[analysistype].targetpath
for organismfile in glob(os.path.join(targetpath, '*.tfa')):
organism = os.path.splitext(os.path.basename(organismfile))[0]
# Use BioPython to extract all the gene names from the file
for record in SeqIO.parse(open(organismfile), 'fasta'):
# Append the gene names to the genus-specific list
try:
genusgenes[organism].add(record.id.split('_')[0])
except (KeyError, IndexError):
genusgenes[organism] = set()
genusgenes[organism].add(record.id.split('_')[0])
# Determine from which genera the gene hits were sourced
for sample in self.runmetadata.samples:
# Initialise the list to store the genera
sample[analysistype].targetgenera = list()
if sample.general.bestassemblyfile != 'NA':
for organism in genusgenes:
# Iterate through all the genesippr hits and attribute each gene to the appropriate genus
for gene in sample[analysistype].results:
# If the gene name is in the genes from that organism, add the genus name to the list of
# genera found in the sample
if gene.split('_')[0] in genusgenes[organism]:
if organism not in sample[analysistype].targetgenera:
sample[analysistype].targetgenera.append(organism)
# Create the path in which the reports are stored
make_path(self.reportpath)
# The report will have every gene for all genera in the header
header = 'Strain,Genus,{},\n'.format(','.join(self.genelist))
data = str()
with open(os.path.join(self.reportpath, analysistype + '.csv'), 'w') as report:
for sample in self.runmetadata.samples:
sample[analysistype].report_output = list()
if sample.general.bestassemblyfile != 'NA':
# Add the genus/genera found in the sample
data += '{},{},'.format(sample.name, ';'.join(sample[analysistype].targetgenera))
best_dict = dict()
if sample[analysistype].results:
gene_check = list()
# Find the best match for all the hits
for target, pid in sample[analysistype].results.items():
gene_name = target.split('_')[0]
for gene in self.genelist:
# If the key matches a gene in the list of genes
if gene == gene_name:
# If the percent identity is better, update the dictionary
try:
if float(pid) > best_dict[gene]:
best_dict[gene] = float(pid)
except KeyError:
best_dict[gene] = float(pid)
for gene in self.genelist:
# If the gene was not found in the sample, print an empty cell in the report
try:
best_dict[gene]
except KeyError:
data += ','
# Print the required information for the gene
for name, identity in sample[analysistype].results.items():
if name.split('_')[0] == gene and gene not in gene_check:
data += '{pid}%'.format(pid=best_dict[gene])
try:
if not sample.general.trimmedcorrectedfastqfiles[0].endswith('.fasta'):
data += ' ({avgd} +/- {std}),'\
.format(avgd=sample[analysistype].avgdepth[name],
std=sample[analysistype].standarddev[name])
else:
data += ','
except IndexError:
data += ','
gene_check.append(gene)
# Add the simplified results to the object - used in the assembly pipeline report
sample[analysistype].report_output.append(gene)
# Add a newline after each sample
data += '\n'
# Add a newline if the sample did not have any gene hits
else:
data += '\n'
# Write the header and data to file
report.write(header)
report.write(data) | Creates a report of the genesippr results
:param analysistype: The variable to use when accessing attributes in the metadata object |
def parse_pv(header):
"""
Parses the PV array from an astropy FITS header.
Args:
header: astropy.io.fits.header.Header
The header containing the PV values.
Returns:
cd: 2d array (list(list(float))
[[PV1_0, PV1_1, ... PV1_N], [PV2_0, PV2_1, ... PV2_N]]
Note that N depends on the order of the fit. For example, an
order 3 fit goes up to PV?_10.
"""
order_fit = parse_order_fit(header)
def parse_with_base(i):
key_base = "PV%d_" % i
pvi_x = [header[key_base + "0"]]
def parse_range(lower, upper):
for j in range(lower, upper + 1):
pvi_x.append(header[key_base + str(j)])
if order_fit >= 1:
parse_range(1, 3)
if order_fit >= 2:
parse_range(4, 6)
if order_fit >= 3:
parse_range(7, 10)
return pvi_x
return [parse_with_base(1), parse_with_base(2)] | Parses the PV array from an astropy FITS header.
Args:
header: astropy.io.fits.header.Header
The header containing the PV values.
Returns:
cd: 2d array (list(list(float))
[[PV1_0, PV1_1, ... PV1_N], [PV2_0, PV2_1, ... PV2_N]]
Note that N depends on the order of the fit. For example, an
order 3 fit goes up to PV?_10. |
def download(url):
"""Uses requests to download an URL, maybe from a file"""
session = requests.Session()
session.mount('file://', FileAdapter())
try:
res = session.get(url)
except requests.exceptions.ConnectionError as e:
raise e
res.raise_for_status()
return res | Uses requests to download an URL, maybe from a file |
def get_nets_radb(self, response, is_http=False):
"""
The function for parsing network blocks from ASN origin data.
Args:
response (:obj:`str`): The response from the RADB whois/http
server.
is_http (:obj:`bool`): If the query is RADB HTTP instead of whois,
set to True. Defaults to False.
Returns:
list: A list of network block dictionaries
::
[{
'cidr' (str) - The assigned CIDR
'start' (int) - The index for the start of the parsed
network block
'end' (int) - The index for the end of the parsed network
block
}]
"""
nets = []
if is_http:
regex = r'route(?:6)?:[^\S\n]+(?P<val>.+?)<br>'
else:
regex = r'^route(?:6)?:[^\S\n]+(?P<val>.+|.+)$'
# Iterate through all of the networks found, storing the CIDR value
# and the start and end positions.
for match in re.finditer(
regex,
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
net['cidr'] = match.group(1).strip()
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except ValueError: # pragma: no cover
pass
return nets | The function for parsing network blocks from ASN origin data.
Args:
response (:obj:`str`): The response from the RADB whois/http
server.
is_http (:obj:`bool`): If the query is RADB HTTP instead of whois,
set to True. Defaults to False.
Returns:
list: A list of network block dictionaries
::
[{
'cidr' (str) - The assigned CIDR
'start' (int) - The index for the start of the parsed
network block
'end' (int) - The index for the end of the parsed network
block
}] |
def _ReadTab(Year):
'''
Reads OMNI data tab with Tsyganenko parameters.
Input:
Year: Integer year to read
'''
dtype_in = [('Year','int32'),('DayNo','int32'),('Hr','int32'),('Mn','int32'),
('Bx','float32'),('By','float32'),('Bz','float32'),
('Vx','float32'),('Vy','float32'),('Vz','float32'),
('Den','float32'),('Temp','float32'),('SymH','float32'),('IMFFlag','int32'),('ISWFlag','int32'),
('Tilt','float32'),('Pdyn','float32'),('W1','float32'),('W2','float32'),('W3','float32'),
('W4','float32'),('W5','float32'),('W6','float32')]
fname = Globals.DataPath+'tab/{:04d}.tab'.format(Year)
data = pf.ReadASCIIData(fname,Header=False,dtype=dtype_in)
dtype_out = [('Date','int32'),('ut','float32'),('Year','int32'),('DayNo','int32'),('Hr','int32'),('Mn','int32'),
('Bx','float32'),('By','float32'),('Bz','float32'),('Vx','float32'),('Vy','float32'),('Vz','float32'),
('Den','float32'),('Temp','float32'),('SymH','float32'),('IMFFlag','int32'),('ISWFlag','int32'),
('Tilt','float32'),('Pdyn','float32'),('W1','float32'),('W2','float32'),('W3','float32'),
('W4','float32'),('W5','float32'),('W6','float32'),('G1','float32'),('G2','float32'),('Kp','float32')]
out = np.recarray(data.size,dtype=dtype_out)
names = data.dtype.names
for n in names:
if n in out.dtype.names:
out[n] = data[n]
out.G1 = 0.0
out.G2 = 0.0
out.Kp = 0.0
out.ut = out.Hr + out.Mn/60.0
for i in range(0,out.size):
out.Date[i] = TT.DayNotoDate(out.Year[i],out.DayNo[i])
return out | Reads OMNI data tab with Tsyganenko parameters.
Input:
Year: Integer year to read |
def set_firewall_settings(profile,
inbound=None,
outbound=None,
store='local'):
'''
Set the firewall inbound/outbound settings for the specified profile and
store
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
inbound (str):
The inbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- blockinbound
- blockinboundalways
- allowinbound
- notconfigured
Default is ``None``
outbound (str):
The outbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- allowoutbound
- blockoutbound
- notconfigured
Default is ``None``
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
'''
# Input validation
if profile.lower() not in ('domain', 'public', 'private'):
raise ValueError('Incorrect profile: {0}'.format(profile))
if inbound and inbound.lower() not in ('blockinbound',
'blockinboundalways',
'allowinbound',
'notconfigured'):
raise ValueError('Incorrect inbound value: {0}'.format(inbound))
if outbound and outbound.lower() not in ('allowoutbound',
'blockoutbound',
'notconfigured'):
raise ValueError('Incorrect outbound value: {0}'.format(outbound))
if not inbound and not outbound:
raise ValueError('Must set inbound or outbound')
# You have to specify inbound and outbound setting at the same time
# If you're only specifying one, you have to get the current setting for the
# other
if not inbound or not outbound:
ret = get_settings(profile=profile,
section='firewallpolicy',
store=store)
if not inbound:
inbound = ret['Inbound']
if not outbound:
outbound = ret['Outbound']
command = 'set {0}profile firewallpolicy {1},{2}' \
''.format(profile, inbound, outbound)
results = _netsh_command(command=command, store=store)
if results:
raise CommandExecutionError('An error occurred: {0}'.format(results))
return True | Set the firewall inbound/outbound settings for the specified profile and
store
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
inbound (str):
The inbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- blockinbound
- blockinboundalways
- allowinbound
- notconfigured
Default is ``None``
outbound (str):
The outbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- allowoutbound
- blockoutbound
- notconfigured
Default is ``None``
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect |
def init_app(self, app):
"""Flask application initialization."""
super(InvenioIIIFAPI, self).init_app(app)
api = Api(app=app)
self.iiif_ext.init_restful(api, prefix=app.config['IIIF_API_PREFIX']) | Flask application initialization. |
def trace_memory_start(self):
""" Starts measuring memory consumption """
self.trace_memory_clean_caches()
objgraph.show_growth(limit=30)
gc.collect()
self._memory_start = self.worker.get_memory()["total"] | Starts measuring memory consumption |
def on_fork():
"""
Should be called by any program integrating Mitogen each time the process
is forked, in the context of the new child.
"""
reset_logging_framework() # Must be first!
fixup_prngs()
mitogen.core.Latch._on_fork()
mitogen.core.Side._on_fork()
mitogen.core.ExternalContext.service_stub_lock = threading.Lock()
mitogen__service = sys.modules.get('mitogen.service')
if mitogen__service:
mitogen__service._pool_lock = threading.Lock() | Should be called by any program integrating Mitogen each time the process
is forked, in the context of the new child. |
def _JzStaeckelIntegrandSquared(v,E,Lz,I3V,delta,u0,cosh2u0,sinh2u0,
potu0pi2,pot):
#potu0pi2= potentialStaeckel(u0,nu.pi/2.,pot,delta)
"""The J_z integrand: p_v(v)/2/delta^2"""
sin2v= nu.sin(v)**2.
dV= cosh2u0*potu0pi2\
-(sinh2u0+sin2v)*potentialStaeckel(u0,v,pot,delta)
return E*sin2v+I3V+dV-Lz**2./2./delta**2./sin2v | The J_z integrand: p_v(v)/2/delta^2 |
def swipe_right(self, steps=10, *args, **selectors):
"""
Swipe the UI object with *selectors* from center to right
See `Swipe Left` for more details.
"""
self.device(**selectors).swipe.right(steps=steps) | Swipe the UI object with *selectors* from center to right
See `Swipe Left` for more details. |
def add_table(self, table, row=None, col=0, row_spaces=1):
"""
Adds a table to the worksheet at (row, col).
Return the (row, col) where the table has been put.
:param xltable.Table table: Table to add to the worksheet.
:param int row: Row to start the table at (defaults to the next free row).
:param int col: Column to start the table at.
:param int row_spaces: Number of rows to leave between this table and the next.
"""
name = table.name
assert name is not None, "Tables must have a name"
assert name not in self.__tables, "Table %s already exists in this worksheet" % name
if row is None:
row = self.__next_row
self.__next_row = max(row + table.height + row_spaces, self.__next_row)
self.__tables[name] = (table, (row, col))
return row, col | Adds a table to the worksheet at (row, col).
Return the (row, col) where the table has been put.
:param xltable.Table table: Table to add to the worksheet.
:param int row: Row to start the table at (defaults to the next free row).
:param int col: Column to start the table at.
:param int row_spaces: Number of rows to leave between this table and the next. |
def query(self):
"""
Request object passed to datasource.query function:
{
'timezone': 'browser',
'panelId': 38,
'range': {
'from': '2018-08-29T02:38:09.633Z',
'to': '2018-08-29T03:38:09.633Z',
'raw': {'from': 'now-1h', 'to': 'now'}
},
'rangeRaw': {'from': 'now-1h', 'to': 'now'},
'interval': '10s',
'intervalMs': 10000,
'targets': [
{
'target': 'problems', 'refId': 'A', 'type': 'table'}
],
'format': 'json',
'maxDataPoints': 314,
'scopedVars': {
'__interval': {'text': '10s', 'value': '10s'},
'__interval_ms': {'text': 10000, 'value': 10000}
}
}
Only the first target is considered. If several targets are required, an error is raised.
The target is a string that is searched in the target_queries dictionary. If found
the corresponding query is executed and the result is returned.
Table response from datasource.query. An array of:
[
{
"type": "table",
"columns": [
{
"text": "Time",
"type": "time",
"sort": true,
"desc": true,
},
{
"text": "mean",
},
{
"text": "sum",
}
],
"rows": [
[
1457425380000,
null,
null
],
[
1457425370000,
1002.76215352,
1002.76215352
],
]
}
]
:return: See upper comment
:rtype: list
"""
logger.debug("Grafana query... %s", cherrypy.request.method)
if cherrypy.request.method == 'OPTIONS':
cherrypy.response.headers['Access-Control-Allow-Methods'] = 'GET,POST,PATCH,PUT,DELETE'
cherrypy.response.headers['Access-Control-Allow-Headers'] = 'Content-Type,Authorization'
cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
cherrypy.request.handler = None
return {}
if getattr(cherrypy.request, 'json', None):
posted_data = cherrypy.request.json
logger.debug("Posted data: %s", cherrypy.request.json)
targets = None
target = None
try:
targets = posted_data.get("targets")
assert targets
assert len(targets) == 1
target = targets[0].get("target")
except Exception as exp: # pylint: disable=broad-except
cherrypy.response.status = 409
return {'_status': u'ERR', '_message': u'Request error: %s.' % exp}
resp = []
if target in ['events_log']:
resp = [{
"type": "table",
"columns": [
{
"text": "Time",
"type": "time",
"sort": True,
"desc": True
},
{
"text": "Severity",
"type": "integer"
},
{
"text": "Message",
"type": "string"
}
],
"rows": []
}]
severity = {
"info": 0,
'warning': 1,
'error': 2,
'critical': 3
}
for log in reversed(self.app.recent_events):
# 0 for the first required target
# timestamp must be precise on ms for Grafana
resp[0]['rows'].append([log['timestamp'] * 1000,
severity.get(log['level'].lower(), 3), log['message']])
if target in ['problems_log']:
resp = [{
"type": "table",
"columns": [
{
"text": "Raised",
"type": "time",
"sort": True,
"desc": True
},
{
"text": "Severity",
"type": "integer"
},
{
"text": "Host",
"type": "string"
},
{
"text": "Service",
"type": "string"
},
{
"text": "State",
"type": "integer"
},
{
"text": "Output",
"type": "string"
}
],
"rows": []
}]
severity = {
"up": 0,
'down': 2,
'ok': 0,
'warning': 1,
'critical': 2
}
problems = {}
for scheduler_link in self.app.conf.schedulers:
sched_res = scheduler_link.con.get('monitoring_problems', wait=True)
if 'problems' in sched_res:
problems.update(sched_res['problems'])
# todo: add a sorting
for problem_uuid in problems:
log = problems[problem_uuid]
# 0 for the first required target
resp[0]['rows'].append([log['last_hard_state_change'] * 1000,
severity.get(log['state'].lower(), 3),
log['host'], log['service'], log['state'], log['output']])
return resp | Request object passed to datasource.query function:
{
'timezone': 'browser',
'panelId': 38,
'range': {
'from': '2018-08-29T02:38:09.633Z',
'to': '2018-08-29T03:38:09.633Z',
'raw': {'from': 'now-1h', 'to': 'now'}
},
'rangeRaw': {'from': 'now-1h', 'to': 'now'},
'interval': '10s',
'intervalMs': 10000,
'targets': [
{
'target': 'problems', 'refId': 'A', 'type': 'table'}
],
'format': 'json',
'maxDataPoints': 314,
'scopedVars': {
'__interval': {'text': '10s', 'value': '10s'},
'__interval_ms': {'text': 10000, 'value': 10000}
}
}
Only the first target is considered. If several targets are required, an error is raised.
The target is a string that is searched in the target_queries dictionary. If found
the corresponding query is executed and the result is returned.
Table response from datasource.query. An array of:
[
{
"type": "table",
"columns": [
{
"text": "Time",
"type": "time",
"sort": true,
"desc": true,
},
{
"text": "mean",
},
{
"text": "sum",
}
],
"rows": [
[
1457425380000,
null,
null
],
[
1457425370000,
1002.76215352,
1002.76215352
],
]
}
]
:return: See upper comment
:rtype: list |
def start(self, host, nornir):
"""
Run the task for the given host.
Arguments:
host (:obj:`nornir.core.inventory.Host`): Host we are operating with. Populated right
before calling the ``task``
nornir(:obj:`nornir.core.Nornir`): Populated right before calling
the ``task``
Returns:
host (:obj:`nornir.core.task.MultiResult`): Results of the task and its subtasks
"""
self.host = host
self.nornir = nornir
try:
logger.debug("Host %r: running task %r", self.host.name, self.name)
r = self.task(self, **self.params)
if not isinstance(r, Result):
r = Result(host=host, result=r)
except NornirSubTaskError as e:
tb = traceback.format_exc()
logger.error(
"Host %r: task %r failed with traceback:\n%s",
self.host.name,
self.name,
tb,
)
r = Result(host, exception=e, result=str(e), failed=True)
except Exception as e:
tb = traceback.format_exc()
logger.error(
"Host %r: task %r failed with traceback:\n%s",
self.host.name,
self.name,
tb,
)
r = Result(host, exception=e, result=tb, failed=True)
r.name = self.name
r.severity_level = logging.ERROR if r.failed else self.severity_level
self.results.insert(0, r)
return self.results | Run the task for the given host.
Arguments:
host (:obj:`nornir.core.inventory.Host`): Host we are operating with. Populated right
before calling the ``task``
nornir(:obj:`nornir.core.Nornir`): Populated right before calling
the ``task``
Returns:
host (:obj:`nornir.core.task.MultiResult`): Results of the task and its subtasks |
def _format_arg_list(args, variadic=False):
"""Format a list of arguments for pretty printing.
:param a: list of arguments.
:type a: list
:param v: tell if the function accepts variadic arguments
:type v: bool
"""
def sugar(s):
"""Shorten strings that are too long for decency."""
s = s.replace("{", "{{").replace("}", "}}")
if len(s) > 50:
return s[:20] + " ... " + s[-20:]
else:
return s
def arg_to_str(arg):
"""Convert argument to a string."""
if isinstance(arg, str):
return sugar(repr(arg))
elif arg is Parameter.empty:
return '\u2014'
else:
return sugar(str(arg))
if not args:
if variadic:
return "(\u2026)"
else:
return "()"
return "(" + ", ".join(map(arg_to_str, args)) + ")" | Format a list of arguments for pretty printing.
:param a: list of arguments.
:type a: list
:param v: tell if the function accepts variadic arguments
:type v: bool |
def info(self):
""" retreive metadata and currenct price data """
url = "{}/v7/finance/quote?symbols={}".format(
self._base_url, self.ticker)
r = _requests.get(url=url).json()["quoteResponse"]["result"]
if len(r) > 0:
return r[0]
return {} | retreive metadata and currenct price data |
def fonts(self):
"""Generator yielding all fonts of this typeface
Yields:
Font: the next font in this typeface
"""
for width in (w for w in FontWidth if w in self):
for slant in (s for s in FontSlant if s in self[width]):
for weight in (w for w in FontWeight
if w in self[width][slant]):
yield self[width][slant][weight] | Generator yielding all fonts of this typeface
Yields:
Font: the next font in this typeface |
def parts_to_url(parts=None, scheme=None, netloc=None, path=None, query=None, fragment=None):
""" Build url urlunsplit style, but optionally handle path as a list and/or query as a dict """
if isinstance(parts, _urllib_parse.SplitResult):
scheme, netloc, path, query, fragment = parts
elif parts and isinstance(parts, dict):
scheme = parts.get('scheme', 'http')
netloc = parts.get('netloc', '')
path = parts.get('path', [])
query = parts.get('query', {})
fragment = parts.get('fragment', '')
if isinstance(path, (list, tuple)):
path = '/' + '/'.join(path).strip('/')
if isinstance(query, (dict, tuple)):
query = _unquote(_urlencode(query, doseq=True))
return _urlunsplit((scheme, netloc, path, query, fragment)) or None | Build url urlunsplit style, but optionally handle path as a list and/or query as a dict |
def _processDocstring(self, node, tail='', **kwargs):
"""
Handles a docstring for functions, classes, and modules.
Basically just figures out the bounds of the docstring and sends it
off to the parser to do the actual work.
"""
typeName = type(node).__name__
# Modules don't have lineno defined, but it's always 0 for them.
curLineNum = startLineNum = 0
if typeName != 'Module':
startLineNum = curLineNum = node.lineno - 1
# Figure out where both our enclosing object and our docstring start.
line = ''
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
match = AstWalker.__docstrMarkerRE.match(line)
if match:
break
curLineNum += 1
docstringStart = curLineNum
# Figure out where our docstring ends.
if not AstWalker.__docstrOneLineRE.match(line):
# Skip for the special case of a single-line docstring.
curLineNum += 1
while curLineNum < len(self.lines):
line = self.lines[curLineNum]
if line.find(match.group(2)) >= 0:
break
curLineNum += 1
endLineNum = curLineNum + 1
# Isolate our enclosing object's declaration.
defLines = self.lines[startLineNum: docstringStart]
# Isolate our docstring.
self.docLines = self.lines[docstringStart: endLineNum]
# If we have a docstring, extract information from it.
if self.docLines:
# Get rid of the docstring delineators.
self.docLines[0] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[0])
self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('',
self.docLines[-1])
# Handle special strings within the docstring.
docstringConverter = self.__alterDocstring(
tail, self.__writeDocstring())
for lineInfo in enumerate(self.docLines):
docstringConverter.send(lineInfo)
docstringConverter.send((len(self.docLines) - 1, None))
# Add a Doxygen @brief tag to any single-line description.
if self.options.autobrief:
safetyCounter = 0
while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '':
del self.docLines[0]
self.docLines.append('')
safetyCounter += 1
if safetyCounter >= len(self.docLines):
# Escape the effectively empty docstring.
break
if len(self.docLines) == 1 or (len(self.docLines) >= 2 and (
self.docLines[1].strip(whitespace + '#') == '' or
self.docLines[1].strip(whitespace + '#').startswith('@'))):
self.docLines[0] = "## @brief {0}".format(self.docLines[0].lstrip('#'))
if len(self.docLines) > 1 and self.docLines[1] == '# @par':
self.docLines[1] = '#'
if defLines:
match = AstWalker.__indentRE.match(defLines[0])
indentStr = match and match.group(1) or ''
self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine)
for docLine in self.docLines]
# Taking away a docstring from an interface method definition sometimes
# leaves broken code as the docstring may be the only code in it.
# Here we manually insert a pass statement to rectify this problem.
if typeName != 'Module':
if docstringStart < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[docstringStart])
indentStr = match and match.group(1) or ''
else:
indentStr = ''
containingNodes = kwargs.get('containingNodes', []) or []
fullPathNamespace = self._getFullPathName(containingNodes)
parentType = fullPathNamespace[-2][1]
if parentType == 'interface' and typeName == 'FunctionDef' \
or fullPathNamespace[-1][1] == 'interface':
defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1],
linesep, indentStr)
elif self.options.autobrief and typeName == 'ClassDef':
# If we're parsing docstrings separate out class attribute
# definitions to get better Doxygen output.
for firstVarLineNum, firstVarLine in enumerate(self.docLines):
if '@property\t' in firstVarLine:
break
lastVarLineNum = len(self.docLines)
if lastVarLineNum > 0 and '@property\t' in firstVarLine:
while lastVarLineNum > firstVarLineNum:
lastVarLineNum -= 1
if '@property\t' in self.docLines[lastVarLineNum]:
break
lastVarLineNum += 1
if firstVarLineNum < len(self.docLines):
indentLineNum = endLineNum
indentStr = ''
while not indentStr and indentLineNum < len(self.lines):
match = AstWalker.__indentRE.match(self.lines[indentLineNum])
indentStr = match and match.group(1) or ''
indentLineNum += 1
varLines = ['{0}{1}'.format(linesep, docLine).replace(
linesep, linesep + indentStr)
for docLine in self.docLines[
firstVarLineNum: lastVarLineNum]]
defLines.extend(varLines)
self.docLines[firstVarLineNum: lastVarLineNum] = []
# After the property shuffling we will need to relocate
# any existing namespace information.
namespaceLoc = defLines[-1].find('\n# @namespace')
if namespaceLoc >= 0:
self.docLines[-1] += defLines[-1][namespaceLoc:]
defLines[-1] = defLines[-1][:namespaceLoc]
# For classes and functions, apply our changes and reverse the
# order of the declaration and docstring, and for modules just
# apply our changes.
if typeName != 'Module':
self.lines[startLineNum: endLineNum] = self.docLines + defLines
else:
self.lines[startLineNum: endLineNum] = defLines + self.docLines | Handles a docstring for functions, classes, and modules.
Basically just figures out the bounds of the docstring and sends it
off to the parser to do the actual work. |
def backup_key(self, name, mount_point=DEFAULT_MOUNT_POINT):
"""Return a plaintext backup of a named key.
The backup contains all the configuration data and keys of all the versions along with the HMAC key. The
response from this endpoint can be used with the /restore endpoint to restore the key.
Supported methods:
GET: /{mount_point}/backup/{name}. Produces: 200 application/json
:param name: Name of the key.
:type name: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: requests.Response
"""
api_path = '/v1/{mount_point}/backup/{name}'.format(
mount_point=mount_point,
name=name,
)
response = self._adapter.get(
url=api_path,
)
return response.json() | Return a plaintext backup of a named key.
The backup contains all the configuration data and keys of all the versions along with the HMAC key. The
response from this endpoint can be used with the /restore endpoint to restore the key.
Supported methods:
GET: /{mount_point}/backup/{name}. Produces: 200 application/json
:param name: Name of the key.
:type name: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: requests.Response |
def _set_overlay_policy_map(self, v, load=False):
"""
Setter method for overlay_policy_map, mapped from YANG variable /overlay_policy_map (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_overlay_policy_map is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_overlay_policy_map() directly.
YANG Description: Define a policy-map[Actions on the classified packet].
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("pmap_name",overlay_policy_map.overlay_policy_map, yang_name="overlay-policy-map", rest_name="overlay-policy-map", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='pmap-name', extensions={u'tailf-common': {u'info': u'Overlay Policy Map Configuration', u'cli-no-key-completion': None, u'cli-full-no': None, u'sort-priority': u'75', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'OverlayPolicyMapCallPoint', u'cli-mode-name': u'config-overlay-policymap-$(pmap-name)'}}), is_container='list', yang_name="overlay-policy-map", rest_name="overlay-policy-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Overlay Policy Map Configuration', u'cli-no-key-completion': None, u'cli-full-no': None, u'sort-priority': u'75', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'OverlayPolicyMapCallPoint', u'cli-mode-name': u'config-overlay-policymap-$(pmap-name)'}}, namespace='urn:brocade.com:mgmt:brocade-overlay-policy', defining_module='brocade-overlay-policy', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """overlay_policy_map must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("pmap_name",overlay_policy_map.overlay_policy_map, yang_name="overlay-policy-map", rest_name="overlay-policy-map", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='pmap-name', extensions={u'tailf-common': {u'info': u'Overlay Policy Map Configuration', u'cli-no-key-completion': None, u'cli-full-no': None, u'sort-priority': u'75', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'OverlayPolicyMapCallPoint', u'cli-mode-name': u'config-overlay-policymap-$(pmap-name)'}}), is_container='list', yang_name="overlay-policy-map", rest_name="overlay-policy-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Overlay Policy Map Configuration', u'cli-no-key-completion': None, u'cli-full-no': None, u'sort-priority': u'75', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'OverlayPolicyMapCallPoint', u'cli-mode-name': u'config-overlay-policymap-$(pmap-name)'}}, namespace='urn:brocade.com:mgmt:brocade-overlay-policy', defining_module='brocade-overlay-policy', yang_type='list', is_config=True)""",
})
self.__overlay_policy_map = t
if hasattr(self, '_set'):
self._set() | Setter method for overlay_policy_map, mapped from YANG variable /overlay_policy_map (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_overlay_policy_map is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_overlay_policy_map() directly.
YANG Description: Define a policy-map[Actions on the classified packet]. |
def remove_listener(self, callback):
"""
Remove a listener.
"""
listeners = filter(lambda x: x['callback'] == callback, self.listeners)
for l in listeners:
self.listeners.remove(l) | Remove a listener. |
def _operation_status_message(self):
"""Returns the most relevant status string and failed action.
This string is meant for display only.
Returns:
A printable status string and name of failed action (if any).
"""
msg = None
action = None
if not google_v2_operations.is_done(self._op):
last_event = google_v2_operations.get_last_event(self._op)
if last_event:
msg = last_event['description']
action_id = last_event.get('details', {}).get('actionId')
if action_id:
action = google_v2_operations.get_action_by_id(self._op, action_id)
else:
msg = 'Pending'
else:
failed_events = google_v2_operations.get_failed_events(self._op)
if failed_events:
failed_event = failed_events[-1]
msg = failed_event.get('details', {}).get('stderr')
action_id = failed_event.get('details', {}).get('actionId')
if action_id:
action = google_v2_operations.get_action_by_id(self._op, action_id)
if not msg:
error = google_v2_operations.get_error(self._op)
if error:
msg = error['message']
else:
msg = 'Success'
return msg, action | Returns the most relevant status string and failed action.
This string is meant for display only.
Returns:
A printable status string and name of failed action (if any). |
def get_scoped_package_version_metadata_from_recycle_bin(self, feed_id, package_scope, unscoped_package_name, package_version):
"""GetScopedPackageVersionMetadataFromRecycleBin.
[Preview API] Get information about a scoped package version in the recycle bin.
:param str feed_id: Name or ID of the feed.
:param str package_scope: Scope of the package (the 'scope' part of @scope/name)
:param str unscoped_package_name: Name of the package (the 'name' part of @scope/name).
:param str package_version: Version of the package.
:rtype: :class:`<NpmPackageVersionDeletionState> <azure.devops.v5_0.npm.models.NpmPackageVersionDeletionState>`
"""
route_values = {}
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
response = self._send(http_method='GET',
location_id='220f45eb-94a5-432c-902a-5b8c6372e415',
version='5.0-preview.1',
route_values=route_values)
return self._deserialize('NpmPackageVersionDeletionState', response) | GetScopedPackageVersionMetadataFromRecycleBin.
[Preview API] Get information about a scoped package version in the recycle bin.
:param str feed_id: Name or ID of the feed.
:param str package_scope: Scope of the package (the 'scope' part of @scope/name)
:param str unscoped_package_name: Name of the package (the 'name' part of @scope/name).
:param str package_version: Version of the package.
:rtype: :class:`<NpmPackageVersionDeletionState> <azure.devops.v5_0.npm.models.NpmPackageVersionDeletionState>` |
def removeUserGroups(self, users=None):
"""Removes users' groups.
Args:
users (str): A comma delimited list of user names.
Defaults to ``None``.
Warning:
When ``users`` is not provided (``None``), all users
in the organization will have their groups deleted!
"""
admin = None
userCommunity = None
portal = None
groupAdmin = None
user = None
userCommData = None
group = None
try:
admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)
if users is None:
print ("You have selected to remove all users groups, you must modify the code to do this")
usersObj = []
commUsers = admin.portals.portalSelf.users(start=1, num=100)
usersObj = commUsers['users']
return
else:
usersObj = []
userStr = users.split(',')
for user in userStr:
try:
user = admin.community.users.user(str(user).strip())
usersObj.append(user)
except:
print ("%s does not exist" % str(user).strip())
if usersObj:
for userCommData in usersObj:
print ("Loading groups for user: %s" % userCommData.username)
if userCommData.groups:
for group in userCommData.groups:
groupObj = admin.community.groups.group(groupId=group['id'])
if groupObj.owner == userCommData.username:
print (groupObj.delete())
else:
print ("No Groups Found")
except:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "removeUserGroups",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
admin = None
userCommunity = None
portal = None
groupAdmin = None
user = None
userCommData = None
group = None
del admin
del userCommunity
del portal
del groupAdmin
del user
del userCommData
del group
gc.collect() | Removes users' groups.
Args:
users (str): A comma delimited list of user names.
Defaults to ``None``.
Warning:
When ``users`` is not provided (``None``), all users
in the organization will have their groups deleted! |
def initialize(self, timeouts):
""" Bind or connect the nanomsg socket to some address """
# Bind or connect to address
if self.bind is True:
self.socket.bind(self.address)
else:
self.socket.connect(self.address)
# Set send and recv timeouts
self._set_timeouts(timeouts) | Bind or connect the nanomsg socket to some address |
def fromjson(source, *args, **kwargs):
"""
Extract data from a JSON file. The file must contain a JSON array as
the top level object, and each member of the array will be treated as a
row of data. E.g.::
>>> import petl as etl
>>> data = '''
... [{"foo": "a", "bar": 1},
... {"foo": "b", "bar": 2},
... {"foo": "c", "bar": 2}]
... '''
>>> with open('example.json', 'w') as f:
... f.write(data)
...
74
>>> table1 = etl.fromjson('example.json', header=['foo', 'bar'])
>>> table1
+-----+-----+
| foo | bar |
+=====+=====+
| 'a' | 1 |
+-----+-----+
| 'b' | 2 |
+-----+-----+
| 'c' | 2 |
+-----+-----+
If your JSON file does not fit this structure, you will need to parse it
via :func:`json.load` and select the array to treat as the data, see also
:func:`petl.io.json.fromdicts`.
.. versionchanged:: 1.1.0
If no `header` is specified, fields will be discovered by sampling keys
from the first `sample` objects in `source`. The header will be
constructed from keys in the order discovered. Note that this
ordering may not be stable, and therefore it may be advisable to specify
an explicit `header` or to use another function like
:func:`petl.transform.headers.sortheader` on the resulting table to
guarantee stability.
"""
source = read_source_from_arg(source)
return JsonView(source, *args, **kwargs) | Extract data from a JSON file. The file must contain a JSON array as
the top level object, and each member of the array will be treated as a
row of data. E.g.::
>>> import petl as etl
>>> data = '''
... [{"foo": "a", "bar": 1},
... {"foo": "b", "bar": 2},
... {"foo": "c", "bar": 2}]
... '''
>>> with open('example.json', 'w') as f:
... f.write(data)
...
74
>>> table1 = etl.fromjson('example.json', header=['foo', 'bar'])
>>> table1
+-----+-----+
| foo | bar |
+=====+=====+
| 'a' | 1 |
+-----+-----+
| 'b' | 2 |
+-----+-----+
| 'c' | 2 |
+-----+-----+
If your JSON file does not fit this structure, you will need to parse it
via :func:`json.load` and select the array to treat as the data, see also
:func:`petl.io.json.fromdicts`.
.. versionchanged:: 1.1.0
If no `header` is specified, fields will be discovered by sampling keys
from the first `sample` objects in `source`. The header will be
constructed from keys in the order discovered. Note that this
ordering may not be stable, and therefore it may be advisable to specify
an explicit `header` or to use another function like
:func:`petl.transform.headers.sortheader` on the resulting table to
guarantee stability. |
def _convert_to_indexer(self, obj, axis=None, is_setter=False,
raise_missing=False):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
'In the face of ambiguity, refuse the temptation to guess.'
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
if axis is None:
axis = self.axis or 0
labels = self.obj._get_axis(axis)
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
# try to find out correct indexer, if not type correct raise
try:
obj = self._convert_scalar_indexer(obj, axis)
except TypeError:
# but we will allow setting
if is_setter:
pass
# see if we are positional in nature
is_int_index = labels.is_integer()
is_int_positional = is_integer(obj) and not is_int_index
# if we are a label return me
try:
return labels.get_loc(obj)
except LookupError:
if isinstance(obj, tuple) and isinstance(labels, MultiIndex):
if is_setter and len(obj) == labels.nlevels:
return {'key': obj}
raise
except TypeError:
pass
except (ValueError):
if not is_int_positional:
raise
# a positional
if is_int_positional:
# if we are setting and its not a valid location
# its an insert which fails by definition
if is_setter:
# always valid
if self.name == 'loc':
return {'key': obj}
# a positional
if (obj >= self.obj.shape[axis] and
not isinstance(labels, MultiIndex)):
raise ValueError("cannot set by positional indexing with "
"enlargement")
return obj
if is_nested_tuple(obj, labels):
return labels.get_locs(obj)
elif is_list_like_indexer(obj):
if com.is_bool_indexer(obj):
obj = check_bool_indexer(labels, obj)
inds, = obj.nonzero()
return inds
else:
# When setting, missing keys are not allowed, even with .loc:
kwargs = {'raise_missing': True if is_setter else
raise_missing}
return self._get_listlike_indexer(obj, axis, **kwargs)[1]
else:
try:
return labels.get_loc(obj)
except LookupError:
# allow a not found key only if we are a setter
if not is_list_like_indexer(obj) and is_setter:
return {'key': obj}
raise | Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
'In the face of ambiguity, refuse the temptation to guess.'
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing |
def load_dynamic_config(config_file=DEFAULT_DYNAMIC_CONFIG_FILE):
"""Load and parse dynamic config"""
dynamic_configurations = {}
# Insert config path so we can import it
sys.path.insert(0, path.dirname(path.abspath(config_file)))
try:
config_module = __import__('config')
dynamic_configurations = config_module.CONFIG
except ImportError:
# Provide a default if config not found
LOG.error('ImportError: Unable to load dynamic config. Check config.py file imports!')
return dynamic_configurations | Load and parse dynamic config |
def pull_commits(self, pr_number):
"""Get pull request commits"""
payload = {
'per_page': PER_PAGE,
}
commit_url = urijoin("pulls", str(pr_number), "commits")
return self.fetch_items(commit_url, payload) | Get pull request commits |
def get_free_diskbytes(dir_):
r"""
Args:
dir_ (str):
Returns:
int: bytes_ folder/drive free space (in bytes)
References::
http://stackoverflow.com/questions/51658/cross-platform-space-remaining-on-volume-using-python
http://linux.die.net/man/2/statvfs
CommandLine:
python -m utool.util_cplat --exec-get_free_diskbytes
python -m utool.util_cplat --exec-get_free_diskbytes --dir /media/raid
python -m utool.util_cplat --exec-get_free_diskbytes --dir E:
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> import utool as ut
>>> dir_ = ut.get_argval('--dir', type_=str, default=ut.truepath('~'))
>>> bytes_ = get_free_diskbytes(dir_)
>>> result = ('bytes_ = %s' % (str(bytes_),))
>>> print(result)
>>> print('Unused space in %r = %r' % (dir_, ut.byte_str2(bytes_)))
>>> print('Total space in %r = %r' % (dir_, ut.byte_str2(get_total_diskbytes(dir_))))
"""
if WIN32:
import ctypes
free_bytes = ctypes.c_ulonglong(0)
outvar = ctypes.pointer(free_bytes)
dir_ptr = ctypes.c_wchar_p(dir_)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(dir_ptr, None, None, outvar)
bytes_ = free_bytes.value
return bytes_
else:
st = os.statvfs(dir_)
# blocks avaiable * block size
bytes_ = st.f_bavail * st.f_frsize
#bytes_ = st.f_bfree * st.f_frsize # includes root only space
return bytes_ | r"""
Args:
dir_ (str):
Returns:
int: bytes_ folder/drive free space (in bytes)
References::
http://stackoverflow.com/questions/51658/cross-platform-space-remaining-on-volume-using-python
http://linux.die.net/man/2/statvfs
CommandLine:
python -m utool.util_cplat --exec-get_free_diskbytes
python -m utool.util_cplat --exec-get_free_diskbytes --dir /media/raid
python -m utool.util_cplat --exec-get_free_diskbytes --dir E:
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> import utool as ut
>>> dir_ = ut.get_argval('--dir', type_=str, default=ut.truepath('~'))
>>> bytes_ = get_free_diskbytes(dir_)
>>> result = ('bytes_ = %s' % (str(bytes_),))
>>> print(result)
>>> print('Unused space in %r = %r' % (dir_, ut.byte_str2(bytes_)))
>>> print('Total space in %r = %r' % (dir_, ut.byte_str2(get_total_diskbytes(dir_)))) |
def paginate_queryset(self, queryset, page_size):
"""
Returns tuple containing paginator instance, page instance,
object list, and whether there are other pages.
:param queryset: the queryset instance to paginate.
:param page_size: the number of instances per page.
:rtype: tuple.
"""
paginator = self.get_paginator(
queryset,
page_size,
orphans = self.get_paginate_orphans(),
allow_empty_first_page = self.get_allow_empty()
)
page_kwarg = self.page_kwarg
#noinspection PyUnresolvedReferences
page_num = self.kwargs.get(page_kwarg) or self.request.GET.get(page_kwarg) or 1
# Default to a valid page.
try:
page = paginator.page(page_num)
except PageNotAnInteger:
page = paginator.page(1)
except EmptyPage:
page = paginator.page(paginator.num_pages)
#noinspection PyRedundantParentheses
return (paginator, page, page.object_list, page.has_other_pages()) | Returns tuple containing paginator instance, page instance,
object list, and whether there are other pages.
:param queryset: the queryset instance to paginate.
:param page_size: the number of instances per page.
:rtype: tuple. |
def get_daemon_stats(self, details=False):
"""Increase the stats provided by the Daemon base class
:return: stats dictionary
:rtype: dict
"""
# call the daemon one
res = super(BaseSatellite, self).get_daemon_stats(details=details)
counters = res['counters']
counters['external-commands'] = len(self.external_commands)
counters['satellites.arbiters'] = len(self.arbiters)
counters['satellites.schedulers'] = len(self.schedulers)
return res | Increase the stats provided by the Daemon base class
:return: stats dictionary
:rtype: dict |
def extend_instance(instance, *bases, **kwargs):
"""
Apply subclass (mixin) to a class object or its instance
By default, the mixin is placed at the start of bases
to ensure its called first as per MRO. If you wish to
have it injected last, which is useful for monkeypatching,
then you can specify 'last=True'. See here:
http://stackoverflow.com/a/10018792/1267398
:attr cls: Target object
:type cls: Class instance
:attr bases: List of new bases to subclass with
:attr last: Inject new bases after existing bases
:type last: bool
>>> class A(object): pass
>>> class B(object): pass
>>> a = A()
>>> b = B()
>>> isinstance(b, A)
False
>>> extend_instance(b, A)
>>> isinstance(b, A)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
assert not inspect.isclass(instance)
base_cls = instance.__class__
base_cls_name = instance.__class__.__name__
new_bases = (base_cls,)+bases if last else bases+(base_cls,)
new_cls = type(base_cls_name, tuple(new_bases), {})
setattr(instance, '__class__', new_cls) | Apply subclass (mixin) to a class object or its instance
By default, the mixin is placed at the start of bases
to ensure its called first as per MRO. If you wish to
have it injected last, which is useful for monkeypatching,
then you can specify 'last=True'. See here:
http://stackoverflow.com/a/10018792/1267398
:attr cls: Target object
:type cls: Class instance
:attr bases: List of new bases to subclass with
:attr last: Inject new bases after existing bases
:type last: bool
>>> class A(object): pass
>>> class B(object): pass
>>> a = A()
>>> b = B()
>>> isinstance(b, A)
False
>>> extend_instance(b, A)
>>> isinstance(b, A)
True |
def copy_figure(self):
"""Copy figure from figviewer to clipboard."""
if self.figviewer and self.figviewer.figcanvas.fig:
self.figviewer.figcanvas.copy_figure() | Copy figure from figviewer to clipboard. |
def publishing(self, service):
"""
the purpose of this tasks is to get the data from the cache
then publish them
:param service: service object where we will publish
:type service: object
"""
# flag to know if we have to update
to_update = False
# flag to get the status of a service
status = False
# provider - the service that offer data
# check if the service has already been triggered
# if date_triggered is None, then it's the first run
if service.date_triggered is None:
logger.debug("first run {}".format(service))
to_update = True
status = True
# run run run
data = self.provider(service)
count_new_data = len(data) if data else 0
if count_new_data > 0:
to_update, status = self.consumer(service, data, to_update, status)
# let's log
self.log_update(service, to_update, status, count_new_data)
# let's update
if to_update and status:
self.update_trigger(service) | the purpose of this tasks is to get the data from the cache
then publish them
:param service: service object where we will publish
:type service: object |
def galactic_to_equatorial(gl, gb):
'''This converts from galactic coords to equatorial coordinates.
Parameters
----------
gl : float or array-like
Galactic longitude values(s) in decimal degrees.
gb : float or array-like
Galactic latitude value(s) in decimal degrees.
Returns
-------
tuple of (float, float) or tuple of (np.array, np.array)
The equatorial coordinates (RA, DEC) for each element of the input
(`gl`, `gb`) in decimal degrees. These are reported in the ICRS frame.
'''
gal = SkyCoord(gl*u.degree, gl*u.degree, frame='galactic')
transformed = gal.transform_to('icrs')
return transformed.ra.degree, transformed.dec.degree | This converts from galactic coords to equatorial coordinates.
Parameters
----------
gl : float or array-like
Galactic longitude values(s) in decimal degrees.
gb : float or array-like
Galactic latitude value(s) in decimal degrees.
Returns
-------
tuple of (float, float) or tuple of (np.array, np.array)
The equatorial coordinates (RA, DEC) for each element of the input
(`gl`, `gb`) in decimal degrees. These are reported in the ICRS frame. |
def get_resources(self):
"""Gets all ``Resources``.
In plenary mode, the returned list contains all known resources
or an error results. Otherwise, the returned list may contain
only those resources that are accessible through this session.
return: (osid.resource.ResourceList) - a list of ``Resources``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('resource',
collection='Resource',
runtime=self._runtime)
result = collection.find(self._view_filter()).sort('_id', DESCENDING)
return objects.ResourceList(result, runtime=self._runtime, proxy=self._proxy) | Gets all ``Resources``.
In plenary mode, the returned list contains all known resources
or an error results. Otherwise, the returned list may contain
only those resources that are accessible through this session.
return: (osid.resource.ResourceList) - a list of ``Resources``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def get_analyzable_segments(workflow, sci_segs, cat_files, out_dir, tags=None):
"""
Get the analyzable segments after applying ini specified vetoes and any
other restrictions on the science segs, e.g. a minimum segment length, or
demanding that only coincident segments are analysed.
Parameters
-----------
workflow : Workflow object
Instance of the workflow object
sci_segs : Ifo-keyed dictionary of glue.segmentlists
The science segments for each ifo to which the vetoes, or any other
restriction, will be applied.
cat_files : FileList of SegFiles
The category veto files generated by get_veto_segs
out_dir : path
Location to store output files
tags : list of strings
Used to retrieve subsections of the ini file for
configuration options.
Returns
--------
sci_ok_seg_file : workflow.core.SegFile instance
The segment file combined from all ifos containing the analyzable
science segments.
sci_ok_segs : Ifo keyed dict of ligo.segments.segmentlist instances
The analyzable science segs for each ifo, keyed by ifo
sci_ok_seg_name : str
The name with which analyzable science segs are stored in the output
XML file.
"""
if tags is None:
tags = []
logging.info('Starting reducing to analysable science segments')
make_analysis_dir(out_dir)
# NOTE: Should this be overrideable in the config file?
sci_ok_seg_name = "SCIENCE_OK"
sci_ok_seg_dict = segments.segmentlistdict()
sci_ok_segs = {}
cat_sets = parse_cat_ini_opt(workflow.cp.get_opt_tags('workflow-segments',
'segments-science-veto', tags))
if len(cat_sets) > 1:
raise ValueError('Provide only 1 category group to determine'
' analyzable segments')
cat_set = cat_sets[0]
for ifo in workflow.ifos:
curr_segs = copy.copy(sci_segs[ifo])
files = cat_files.find_output_with_ifo(ifo)
for category in cat_set:
veto_def_cat = cat_to_veto_def_cat(category)
file_list = files.find_output_with_tag('VETO_CAT%d' %(veto_def_cat))
if len(file_list) > 1:
err_msg = "Found more than one veto file for %s " %(ifo,)
err_msg += "and category %s." %(category,)
raise ValueError(err_msg)
if len(file_list) == 0:
err_msg = "Found no veto files for %s " %(ifo,)
err_msg += "and category %s." %(category,)
raise ValueError(err_msg)
curr_veto_file = file_list[0]
cat_segs = curr_veto_file.return_union_seglist()
curr_segs -= cat_segs
curr_segs.coalesce()
sci_ok_seg_dict[ifo + ':' + sci_ok_seg_name] = curr_segs
sci_ok_seg_file = SegFile.from_segment_list_dict(sci_ok_seg_name,
sci_ok_seg_dict, extension='xml',
valid_segment=workflow.analysis_time,
directory=out_dir, tags=tags)
if workflow.cp.has_option_tags("workflow-segments",
"segments-minimum-segment-length", tags):
min_seg_length = int( workflow.cp.get_opt_tags("workflow-segments",
"segments-minimum-segment-length", tags) )
sci_ok_seg_file.remove_short_sci_segs(min_seg_length)
# FIXME: Another test we can do is limit to coinc time +/- some window
# this should *not* be set through segments-method, but currently
# is not implemented
#segments_method = workflow.cp.get_opt_tags("workflow-segments",
# "segments-method", tags)
#if segments_method == 'ALL_SINGLE_IFO_TIME':
# pass
#elif segments_method == 'COINC_TIME':
# cum_segs = None
# for ifo in sci_segs:
# if cum_segs is not None:
# cum_segs = (cum_segs & sci_segs[ifo]).coalesce()
# else:
# cum_segs = sci_segs[ifo]
#
# for ifo in sci_segs:
# sci_segs[ifo] = cum_segs
#else:
# raise ValueError("Invalid segments-method, %s. Options are "
# "ALL_SINGLE_IFO_TIME and COINC_TIME" % segments_method)
for ifo in workflow.ifos:
sci_ok_segs[ifo] = \
sci_ok_seg_file.segment_dict[ifo + ':' + sci_ok_seg_name]
logging.info('Done generating analyzable science segments')
return sci_ok_seg_file, sci_ok_segs, sci_ok_seg_name | Get the analyzable segments after applying ini specified vetoes and any
other restrictions on the science segs, e.g. a minimum segment length, or
demanding that only coincident segments are analysed.
Parameters
-----------
workflow : Workflow object
Instance of the workflow object
sci_segs : Ifo-keyed dictionary of glue.segmentlists
The science segments for each ifo to which the vetoes, or any other
restriction, will be applied.
cat_files : FileList of SegFiles
The category veto files generated by get_veto_segs
out_dir : path
Location to store output files
tags : list of strings
Used to retrieve subsections of the ini file for
configuration options.
Returns
--------
sci_ok_seg_file : workflow.core.SegFile instance
The segment file combined from all ifos containing the analyzable
science segments.
sci_ok_segs : Ifo keyed dict of ligo.segments.segmentlist instances
The analyzable science segs for each ifo, keyed by ifo
sci_ok_seg_name : str
The name with which analyzable science segs are stored in the output
XML file. |
def _start_workflow_stages(pb: ProcessingBlock, pb_id: str,
workflow_stage_dict: dict,
workflow_stage: WorkflowStage,
docker: DockerSwarmClient):
"""Start a workflow stage by starting a number of docker services.
This function first assesses if the specified workflow stage can be
started based on its dependencies. If this is found to be the case,
the workflow stage is stared by first resolving and template arguments
in the workflow stage configuration, and then using the Docker Swarm Client
API to start workflow stage services. As part of this, the
workflow_stage_dict data structure is updated accordingly.
TODO(BMo) This function will need refactoring at some point as part
of an update to the way workflow state metadata is stored in the
configuration database. Currently the stage_data dictionary
is a bit of a hack for a badly specified Configuration Database
backed WorkflowStage object.
This function is used by `execute_processing_block`.
Args:
pb (ProcessingBlock): Configuration database Processing Block data
object
pb_id (str): Processing Block identifier
workflow_stage_dict (dict): Workflow stage metadata structure
workflow_stage (WorkflowStage): Workflow state configuration database
data object.
docker (DockerClient): Docker Swarm Client object.
"""
# FIXME(BMo) replace pb_id argument, get this from the pb instead!
stage_data = workflow_stage_dict[workflow_stage.id]
stage_data['start'] = False
# Determine if the stage can be started.
if stage_data['status'] == 'none':
if not workflow_stage.dependencies:
stage_data['start'] = True
else:
dependency_status = []
for dependency in workflow_stage.dependencies:
dependency_status.append(
workflow_stage_dict[dependency['value']][
'status'] == 'complete')
# ii += 1
stage_data['start'] = all(dependency_status)
# Start the workflow stage.
if stage_data['start']:
# Configure EE (set up templates)
LOG.info('-- Starting workflow stage: %s --', workflow_stage.id)
LOG.info('Configuring EE templates.')
args_template = jinja2.Template(workflow_stage.args_template)
stage_params = pb.workflow_parameters[workflow_stage.id]
template_params = {**workflow_stage.config, **stage_params}
args = args_template.render(stage=template_params)
LOG.info('Resolving workflow script arguments.')
args = json.dumps(json.loads(args))
compose_template = jinja2.Template(
workflow_stage.compose_template)
compose_str = compose_template.render(stage=dict(args=args))
# Prefix service names with the PB id
compose_dict = yaml.load(compose_str)
service_names = compose_dict['services'].keys()
new_service_names = [
'{}_{}_{}'.format(pb_id, pb.workflow_id, name)
for name in service_names]
for new, old in zip(new_service_names, service_names):
compose_dict['services'][new] = \
compose_dict['services'].pop(old)
compose_str = yaml.dump(compose_dict)
# Run the compose file
service_ids = docker.create_services(compose_str)
LOG.info('Staring workflow containers:')
for service_id in service_ids:
service_name = docker.get_service_name(service_id)
LOG.info(" %s, %s ", service_name, service_id)
stage_data['services'][service_id] = {}
LOG.info('Created Services: %s', service_ids)
stage_data['services'][service_id] = dict(
name=docker.get_service_name(service_id),
status='running',
complete=False
)
stage_data["status"] = 'running' | Start a workflow stage by starting a number of docker services.
This function first assesses if the specified workflow stage can be
started based on its dependencies. If this is found to be the case,
the workflow stage is stared by first resolving and template arguments
in the workflow stage configuration, and then using the Docker Swarm Client
API to start workflow stage services. As part of this, the
workflow_stage_dict data structure is updated accordingly.
TODO(BMo) This function will need refactoring at some point as part
of an update to the way workflow state metadata is stored in the
configuration database. Currently the stage_data dictionary
is a bit of a hack for a badly specified Configuration Database
backed WorkflowStage object.
This function is used by `execute_processing_block`.
Args:
pb (ProcessingBlock): Configuration database Processing Block data
object
pb_id (str): Processing Block identifier
workflow_stage_dict (dict): Workflow stage metadata structure
workflow_stage (WorkflowStage): Workflow state configuration database
data object.
docker (DockerClient): Docker Swarm Client object. |
def open_pickle(path: str):
"""Open a pickle and return loaded pickle object.
:type path: str
:param : path: File path to pickle file to be opened.
:rtype : object
"""
try:
with open(path, 'rb') as opened_pickle:
try:
return pickle.load(opened_pickle)
except Exception as pickle_error:
logger.error(pickle_error)
raise
except FileNotFoundError as fnf_error:
logger.error(fnf_error)
raise
except IOError as io_err:
logger.error(io_err)
raise
except EOFError as eof_error:
logger.error(eof_error)
raise
except pickle.UnpicklingError as unp_error:
logger.error(unp_error)
raise | Open a pickle and return loaded pickle object.
:type path: str
:param : path: File path to pickle file to be opened.
:rtype : object |
def get_members(self, role=github.GithubObject.NotSet):
"""
:calls: `GET /teams/:id/members <https://developer.github.com/v3/teams/members/#list-team-members>`_
:param role: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
assert role is github.GithubObject.NotSet or isinstance(role, (str, unicode)), role
url_parameters = dict()
if role is not github.GithubObject.NotSet:
assert role in ['member', 'maintainer', 'all']
url_parameters["role"] = role
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/members",
url_parameters
) | :calls: `GET /teams/:id/members <https://developer.github.com/v3/teams/members/#list-team-members>`_
:param role: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser` |
def decrypt(s, base64 = False):
"""
对称解密函数
"""
return _cipher().decrypt(base64 and b64decode(s) or s) | 对称解密函数 |
def _query(self): # pylint: disable=E0202
"""
Query WMI using WMI Query Language (WQL) & parse the results.
Returns: List of WMI objects or `TimeoutException`.
"""
formated_property_names = ",".join(self.property_names)
wql = "Select {property_names} from {class_name}{filters}".format(
property_names=formated_property_names, class_name=self.class_name, filters=self.formatted_filters
)
self.logger.debug(u"Querying WMI: {0}".format(wql))
try:
# From: https://msdn.microsoft.com/en-us/library/aa393866(v=vs.85).aspx
flag_return_immediately = 0x10 # Default flag.
flag_forward_only = 0x20
flag_use_amended_qualifiers = 0x20000
query_flags = flag_return_immediately | flag_forward_only
# For the first query, cache the qualifiers to determine each
# propertie's "CounterType"
includes_qualifiers = self.is_raw_perf_class and self._property_counter_types is None
if includes_qualifiers:
self._property_counter_types = CaseInsensitiveDict()
query_flags |= flag_use_amended_qualifiers
raw_results = self.get_connection().ExecQuery(wql, "WQL", query_flags)
results = self._parse_results(raw_results, includes_qualifiers=includes_qualifiers)
except pywintypes.com_error:
self.logger.warning(u"Failed to execute WMI query (%s)", wql, exc_info=True)
results = []
return results | Query WMI using WMI Query Language (WQL) & parse the results.
Returns: List of WMI objects or `TimeoutException`. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.