code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def cnmfrm(cname, lenout=_default_len_out):
"""
Retrieve frame ID code and name to associate with an object.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/cnmfrm_c.html
:param cname: Name of the object to find a frame for.
:type cname: int
:param lenout: Maximum length available for frame name.
:type lenout: int
:return:
The ID code of the frame associated with cname,
The name of the frame with ID frcode.
:rtype: tuple
"""
lenout = ctypes.c_int(lenout)
frname = stypes.stringToCharP(lenout)
cname = stypes.stringToCharP(cname)
found = ctypes.c_int()
frcode = ctypes.c_int()
libspice.cnmfrm_c(cname, lenout, ctypes.byref(frcode), frname,
ctypes.byref(found))
return frcode.value, stypes.toPythonString(frname), bool(found.value) | Retrieve frame ID code and name to associate with an object.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/cnmfrm_c.html
:param cname: Name of the object to find a frame for.
:type cname: int
:param lenout: Maximum length available for frame name.
:type lenout: int
:return:
The ID code of the frame associated with cname,
The name of the frame with ID frcode.
:rtype: tuple |
def contains (self, point):
"""contains(point) -> True | False
Returns True if point is contained inside this Rectangle, False otherwise.
Examples:
>>> r = Rect( Point(-1, -1), Point(1, 1) )
>>> r.contains( Point(0, 0) )
True
>>> r.contains( Point(2, 3) )
False
"""
return (point.x >= self.ul.x and point.x <= self.lr.x) and \
(point.y >= self.ul.y and point.y <= self.lr.y) | contains(point) -> True | False
Returns True if point is contained inside this Rectangle, False otherwise.
Examples:
>>> r = Rect( Point(-1, -1), Point(1, 1) )
>>> r.contains( Point(0, 0) )
True
>>> r.contains( Point(2, 3) )
False |
def get_model(self):
"""
Returns an instance of Bayesian Model.
"""
model = BayesianModel()
model.add_nodes_from(self.variables)
model.add_edges_from(self.edges)
model.name = self.model_name
tabular_cpds = []
for var, values in self.variable_CPD.items():
evidence = values['CONDSET'] if 'CONDSET' in values else []
cpd = values['DPIS']
evidence_card = values['CARDINALITY'] if 'CARDINALITY' in values else []
states = self.variables[var]['STATES']
cpd = TabularCPD(var, len(states), cpd,
evidence=evidence,
evidence_card=evidence_card)
tabular_cpds.append(cpd)
model.add_cpds(*tabular_cpds)
if nx.__version__.startswith('1'):
for var, properties in self.variables.items():
model.node[var] = properties
else:
for var, properties in self.variables.items():
model._node[var] = properties
return model | Returns an instance of Bayesian Model. |
def container_clone(object_id, input_params={}, always_retry=False, **kwargs):
"""
Invokes the /container-xxxx/clone API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Cloning#API-method%3A-%2Fclass-xxxx%2Fclone
"""
return DXHTTPRequest('/%s/clone' % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /container-xxxx/clone API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Cloning#API-method%3A-%2Fclass-xxxx%2Fclone |
def create(zone, brand, zonepath, force=False):
'''
Create an in-memory configuration for the specified zone.
zone : string
name of zone
brand : string
brand name
zonepath : string
path of zone
force : boolean
overwrite configuration
CLI Example:
.. code-block:: bash
salt '*' zonecfg.create deathscythe ipkg /zones/deathscythe
'''
ret = {'status': True}
# write config
cfg_file = salt.utils.files.mkstemp()
with salt.utils.files.fpopen(cfg_file, 'w+', mode=0o600) as fp_:
fp_.write("create -b -F\n" if force else "create -b\n")
fp_.write("set brand={0}\n".format(_sanitize_value(brand)))
fp_.write("set zonepath={0}\n".format(_sanitize_value(zonepath)))
# create
if not __salt__['file.directory_exists'](zonepath):
__salt__['file.makedirs_perms'](zonepath if zonepath[-1] == '/' else '{0}/'.format(zonepath), mode='0700')
_dump_cfg(cfg_file)
res = __salt__['cmd.run_all']('zonecfg -z {zone} -f {cfg}'.format(
zone=zone,
cfg=cfg_file,
))
ret['status'] = res['retcode'] == 0
ret['message'] = res['stdout'] if ret['status'] else res['stderr']
if ret['message'] == '':
del ret['message']
else:
ret['message'] = _clean_message(ret['message'])
# cleanup config file
if __salt__['file.file_exists'](cfg_file):
__salt__['file.remove'](cfg_file)
return ret | Create an in-memory configuration for the specified zone.
zone : string
name of zone
brand : string
brand name
zonepath : string
path of zone
force : boolean
overwrite configuration
CLI Example:
.. code-block:: bash
salt '*' zonecfg.create deathscythe ipkg /zones/deathscythe |
def default_help_formatter(quick_helps):
"""Apply default formatting for help messages
:param quick_helps: list of tuples containing help info
"""
ret = ''
for line in quick_helps:
cmd_path, param_hlp, cmd_hlp = line
ret += ' '.join(cmd_path) + ' '
if param_hlp:
ret += param_hlp + ' '
ret += '- ' + cmd_hlp + '\n'
return ret | Apply default formatting for help messages
:param quick_helps: list of tuples containing help info |
def get_port_at(self, tile_id, direction):
"""
If no port is found, a new none port is made and added to self.ports.
Returns the port.
:param tile_id:
:param direction:
:return: Port
"""
for port in self.ports:
if port.tile_id == tile_id and port.direction == direction:
return port
port = Port(tile_id, direction, PortType.none)
self.ports.append(port)
return port | If no port is found, a new none port is made and added to self.ports.
Returns the port.
:param tile_id:
:param direction:
:return: Port |
def get_unique_nonzeros(arr):
""" Return a sorted list of the non-zero unique values of arr.
Parameters
----------
arr: numpy.ndarray
The data array
Returns
-------
list of items of arr.
"""
rois = np.unique(arr)
rois = rois[np.nonzero(rois)]
rois.sort()
return rois | Return a sorted list of the non-zero unique values of arr.
Parameters
----------
arr: numpy.ndarray
The data array
Returns
-------
list of items of arr. |
def upgrade():
"""Upgrade database."""
op.create_table(
'oaiserver_set',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('spec', sa.String(length=255), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('search_pattern', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('spec')
)
op.create_index(
op.f('ix_oaiserver_set_name'),
'oaiserver_set',
['name'],
unique=False
) | Upgrade database. |
def use_openssl(libcrypto_path, libssl_path, trust_list_path=None):
"""
Forces using OpenSSL dynamic libraries on OS X (.dylib) or Windows (.dll),
or using a specific dynamic library on Linux/BSD (.so).
This can also be used to configure oscrypto to use LibreSSL dynamic
libraries.
This method must be called before any oscrypto submodules are imported.
:param libcrypto_path:
A unicode string of the file path to the OpenSSL/LibreSSL libcrypto
dynamic library.
:param libssl_path:
A unicode string of the file path to the OpenSSL/LibreSSL libssl
dynamic library.
:param trust_list_path:
An optional unicode string of the path to a file containing
OpenSSL-compatible CA certificates in PEM format. If this is not
provided and the platform is OS X or Windows, the system trust roots
will be exported from the OS and used for all TLS connections.
:raises:
ValueError - when one of the paths is not a unicode string
OSError - when the trust_list_path does not exist on the filesystem
oscrypto.errors.LibraryNotFoundError - when one of the path does not exist on the filesystem
RuntimeError - when this function is called after another part of oscrypto has been imported
"""
if not isinstance(libcrypto_path, str_cls):
raise ValueError('libcrypto_path must be a unicode string, not %s' % type_name(libcrypto_path))
if not isinstance(libssl_path, str_cls):
raise ValueError('libssl_path must be a unicode string, not %s' % type_name(libssl_path))
if not os.path.exists(libcrypto_path):
raise LibraryNotFoundError('libcrypto does not exist at %s' % libcrypto_path)
if not os.path.exists(libssl_path):
raise LibraryNotFoundError('libssl does not exist at %s' % libssl_path)
if trust_list_path is not None:
if not isinstance(trust_list_path, str_cls):
raise ValueError('trust_list_path must be a unicode string, not %s' % type_name(trust_list_path))
if not os.path.exists(trust_list_path):
raise OSError('trust_list_path does not exist at %s' % trust_list_path)
with _backend_lock:
if _module_values['backend'] is not None:
raise RuntimeError('Another part of oscrypto has already been imported, unable to force use of OpenSSL')
_module_values['backend'] = 'openssl'
_module_values['backend_config'] = {
'libcrypto_path': libcrypto_path,
'libssl_path': libssl_path,
'trust_list_path': trust_list_path,
} | Forces using OpenSSL dynamic libraries on OS X (.dylib) or Windows (.dll),
or using a specific dynamic library on Linux/BSD (.so).
This can also be used to configure oscrypto to use LibreSSL dynamic
libraries.
This method must be called before any oscrypto submodules are imported.
:param libcrypto_path:
A unicode string of the file path to the OpenSSL/LibreSSL libcrypto
dynamic library.
:param libssl_path:
A unicode string of the file path to the OpenSSL/LibreSSL libssl
dynamic library.
:param trust_list_path:
An optional unicode string of the path to a file containing
OpenSSL-compatible CA certificates in PEM format. If this is not
provided and the platform is OS X or Windows, the system trust roots
will be exported from the OS and used for all TLS connections.
:raises:
ValueError - when one of the paths is not a unicode string
OSError - when the trust_list_path does not exist on the filesystem
oscrypto.errors.LibraryNotFoundError - when one of the path does not exist on the filesystem
RuntimeError - when this function is called after another part of oscrypto has been imported |
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
Capture the exception that caused the task to fail, if any.
"""
log.error('[{}] failed due to {}'.format(task_id, getattr(einfo, 'traceback', None)))
super(LoggedTask, self).on_failure(exc, task_id, args, kwargs, einfo) | Capture the exception that caused the task to fail, if any. |
def _walk_through(job_dir):
'''
Walk though the jid dir and look for jobs
'''
serial = salt.payload.Serial(__opts__)
for top in os.listdir(job_dir):
t_path = os.path.join(job_dir, top)
if not os.path.exists(t_path):
continue
for final in os.listdir(t_path):
load_path = os.path.join(t_path, final, LOAD_P)
if not os.path.isfile(load_path):
continue
with salt.utils.files.fopen(load_path, 'rb') as rfh:
try:
job = serial.load(rfh)
except Exception:
log.exception('Failed to deserialize %s', load_path)
continue
if not job:
log.error('Deserialization of job succeded but there is no data in %s', load_path)
continue
jid = job['jid']
yield jid, job, t_path, final | Walk though the jid dir and look for jobs |
def save_csv(
self,
name,
address=True,
class_param=None,
class_name=None,
matrix_save=True,
normalize=False):
"""
Save ConfusionMatrix in CSV file.
:param name: filename
:type name : str
:param address: flag for address return
:type address : bool
:param class_param : class parameters list for save, Example : ["TPR","TNR","AUC"]
:type class_param : list
:param class_name : class name (sub set of classes), Example :[1,2,3]
:type class_name : list
:param matrix_save : save matrix flag
:type matrix_save : bool
:param normalize : save normalize matrix flag
:type normalize : bool
:return: saving Status as dict {"Status":bool , "Message":str}
"""
try:
message = None
classes = class_filter(self.classes, class_name)
csv_file = open(name + ".csv", "w")
csv_data = csv_print(
classes,
self.class_stat,
self.digit,
class_param)
csv_file.write(csv_data)
if matrix_save:
matrix = self.table
if normalize:
matrix = self.normalized_table
csv_matrix_file = open(name + "_matrix" + ".csv", "w")
csv_matrix_data = csv_matrix_print(self.classes, matrix)
csv_matrix_file.write(csv_matrix_data)
if address:
message = os.path.join(os.getcwd(), name + ".csv")
return {"Status": True, "Message": message}
except Exception as e:
return {"Status": False, "Message": str(e)} | Save ConfusionMatrix in CSV file.
:param name: filename
:type name : str
:param address: flag for address return
:type address : bool
:param class_param : class parameters list for save, Example : ["TPR","TNR","AUC"]
:type class_param : list
:param class_name : class name (sub set of classes), Example :[1,2,3]
:type class_name : list
:param matrix_save : save matrix flag
:type matrix_save : bool
:param normalize : save normalize matrix flag
:type normalize : bool
:return: saving Status as dict {"Status":bool , "Message":str} |
def calc_ag_v1(self):
"""Sum the through flown area of the total cross section.
Required flux sequences:
|AM|
|AV|
|AVR|
Calculated flux sequence:
|AG|
Example:
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> fluxes.am = 1.0
>>> fluxes.av= 2.0, 3.0
>>> fluxes.avr = 4.0, 5.0
>>> model.calc_ag_v1()
>>> fluxes.ag
ag(15.0)
"""
flu = self.sequences.fluxes.fastaccess
flu.ag = flu.am+flu.av[0]+flu.av[1]+flu.avr[0]+flu.avr[1] | Sum the through flown area of the total cross section.
Required flux sequences:
|AM|
|AV|
|AVR|
Calculated flux sequence:
|AG|
Example:
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> fluxes.am = 1.0
>>> fluxes.av= 2.0, 3.0
>>> fluxes.avr = 4.0, 5.0
>>> model.calc_ag_v1()
>>> fluxes.ag
ag(15.0) |
def refweights(self):
"""A |numpy| |numpy.ndarray| with equal weights for all segment
junctions..
>>> from hydpy.models.hstream import *
>>> parameterstep('1d')
>>> states.qjoints.shape = 5
>>> states.qjoints.refweights
array([ 0.2, 0.2, 0.2, 0.2, 0.2])
"""
# pylint: disable=unsubscriptable-object
# due to a pylint bug (see https://github.com/PyCQA/pylint/issues/870)
return numpy.full(self.shape, 1./self.shape[0], dtype=float) | A |numpy| |numpy.ndarray| with equal weights for all segment
junctions..
>>> from hydpy.models.hstream import *
>>> parameterstep('1d')
>>> states.qjoints.shape = 5
>>> states.qjoints.refweights
array([ 0.2, 0.2, 0.2, 0.2, 0.2]) |
def update_chain(graph, loc, du, ud):
"""
Updates the DU chain of the instruction located at loc such that there is
no more reference to it so that we can remove it.
When an instruction is found to be dead (i.e it has no side effect, and the
register defined is not used) we have to update the DU chain of all the
variables that may me used by the dead instruction.
"""
ins = graph.get_ins_from_loc(loc)
for var in ins.get_used_vars():
# We get the definition points of the current variable
for def_loc in set(ud[var, loc]):
# We remove the use of the variable at loc from the DU chain of
# the variable definition located at def_loc
du[var, def_loc].remove(loc)
ud[var, loc].remove(def_loc)
if not ud.get((var, loc)):
ud.pop((var, loc))
# If the DU chain of the defined variable is now empty, this means
# that we may have created a new dead instruction, so we check that
# the instruction has no side effect and we update the DU chain of
# the new dead instruction, and we delete it.
# We also make sure that def_loc is not < 0. This is the case when
# the current variable is a method parameter.
if def_loc >= 0 and not du[var, def_loc]:
du.pop((var, def_loc))
def_ins = graph.get_ins_from_loc(def_loc)
if def_ins.is_call():
def_ins.remove_defined_var()
elif def_ins.has_side_effect():
continue
else:
update_chain(graph, def_loc, du, ud)
graph.remove_ins(def_loc) | Updates the DU chain of the instruction located at loc such that there is
no more reference to it so that we can remove it.
When an instruction is found to be dead (i.e it has no side effect, and the
register defined is not used) we have to update the DU chain of all the
variables that may me used by the dead instruction. |
def convert_args_to_list(args):
"""Convert all iterable pairs of inputs into a list of list"""
list_of_pairs = []
if len(args) == 0:
return []
if any(isinstance(arg, (list, tuple)) for arg in args):
# Domain([[1, 4]])
# Domain([(1, 4)])
# Domain([(1, 4), (5, 8)])
# Domain([[1, 4], [5, 8]])
if len(args) == 1 and \
any(isinstance(arg, (list, tuple)) for arg in args[0]):
for item in args[0]:
list_of_pairs.append(list(item))
else:
# Domain([1, 4])
# Domain((1, 4))
# Domain((1, 4), (5, 8))
# Domain([1, 4], [5, 8])
for item in args:
list_of_pairs.append(list(item))
else:
# Domain(1, 2)
if len(args) == 2:
list_of_pairs.append(list(args))
else:
msg = "The argument type is invalid. ".format(args)
raise TypeError(msg)
return list_of_pairs | Convert all iterable pairs of inputs into a list of list |
def load_font(self, font_path, font_size):
"""Load the specified font from a file."""
self.__font_path = font_path
self.__font_size = font_size
if font_path != "":
self.__font = pygame.font.Font(font_path, font_size)
self.__set_text(self.__text) | Load the specified font from a file. |
def _dens(self,R,z,phi=0.,t=0.):
"""
NAME:
_dens
PURPOSE:
evaluate the density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the density
HISTORY:
2015-12-04 - Started - Bovy (UofT)
"""
return 1./(1.+(R**2.+z**2.)/self._a2)/4./nu.pi/self._a3 | NAME:
_dens
PURPOSE:
evaluate the density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the density
HISTORY:
2015-12-04 - Started - Bovy (UofT) |
def update_todo_menu(self):
"""Update todo list menu"""
editorstack = self.get_current_editorstack()
results = editorstack.get_todo_results()
self.todo_menu.clear()
filename = self.get_current_filename()
for text, line0 in results:
icon = ima.icon('todo')
slot = lambda _checked, _l=line0: self.load(filename, goto=_l)
action = create_action(self, text=text, icon=icon, triggered=slot)
self.todo_menu.addAction(action)
self.update_todo_actions() | Update todo list menu |
def refresh():
"""
Refreshes an existing JWT by creating a new one that is a copy of the old
except that it has a refrehsed access expiration.
.. example::
$ curl http://localhost:5000/refresh -X GET \
-H "Authorization: Bearer <your_token>"
"""
old_token = guard.read_token_from_header()
new_token = guard.refresh_jwt_token(old_token)
ret = {'access_token': new_token}
return flask.jsonify(ret), 200 | Refreshes an existing JWT by creating a new one that is a copy of the old
except that it has a refrehsed access expiration.
.. example::
$ curl http://localhost:5000/refresh -X GET \
-H "Authorization: Bearer <your_token>" |
def _set_remote(self, stream=False):
"""
Call :py:meth:`~._args_for_remote`; if the return value is not None,
execute 'terraform remote config' with those arguments and ensure it
exits 0.
:param stream: whether or not to stream TF output in realtime
:type stream: bool
"""
args = self._args_for_remote()
if args is None:
logger.debug('_args_for_remote() returned None; not configuring '
'terraform remote')
return
logger.warning('Setting terraform remote config: %s', ' '.join(args))
args = ['config'] + args
self._run_tf('remote', cmd_args=args, stream=stream)
logger.info('Terraform remote configured.') | Call :py:meth:`~._args_for_remote`; if the return value is not None,
execute 'terraform remote config' with those arguments and ensure it
exits 0.
:param stream: whether or not to stream TF output in realtime
:type stream: bool |
def _Dispatch(ps, server, SendResponse, SendFault, post, action, nsdict={}, **kw):
'''Send ParsedSoap instance to ServiceContainer, which dispatches to
appropriate service via post, and method via action. Response is a
self-describing pyobj, which is passed to a SoapWriter.
Call SendResponse or SendFault to send the reply back, appropriately.
server -- ServiceContainer instance
'''
localURL = 'http://%s:%d%s' %(server.server_name,server.server_port,post)
address = action
service = server.getNode(post)
isWSResource = False
if isinstance(service, WSAResource):
isWSResource = True
service.setServiceURL(localURL)
address = Address()
try:
address.parse(ps)
except Exception, e:
return SendFault(FaultFromException(e, 0, sys.exc_info()[2]), **kw)
if action and action != address.getAction():
e = WSActionException('SOAP Action("%s") must match WS-Action("%s") if specified.' \
%(action,address.getAction()))
return SendFault(FaultFromException(e, 0, None), **kw)
action = address.getAction()
if isinstance(service, ServiceInterface) is False:
e = NoSuchService('no service at POST(%s) in container: %s' %(post,server))
return SendFault(FaultFromException(e, 0, sys.exc_info()[2]), **kw)
if not service.authorize(None, post, action):
return SendFault(Fault(Fault.Server, "Not authorized"), code=401)
#try:
# raise NotAuthorized()
#except Exception, e:
#return SendFault(FaultFromException(e, 0, None), code=401, **kw)
##return SendFault(FaultFromException(NotAuthorized(), 0, None), code=401, **kw)
try:
method = service.getOperation(ps, address)
except Exception, e:
return SendFault(FaultFromException(e, 0, sys.exc_info()[2]), **kw)
try:
if isWSResource is True:
request,result = method(ps, address)
else:
request,result = method(ps)
except Exception, e:
return SendFault(FaultFromException(e, 0, sys.exc_info()[2]), **kw)
# Verify if Signed
service.verify(ps)
# If No response just return.
if result is None:
return SendResponse('', **kw)
sw = SoapWriter(nsdict=nsdict)
try:
sw.serialize(result)
except Exception, e:
return SendFault(FaultFromException(e, 0, sys.exc_info()[2]), **kw)
if isWSResource is True:
action = service.getResponseAction(ps, action)
addressRsp = Address(action=action)
try:
addressRsp.setResponseFromWSAddress(address, localURL)
addressRsp.serialize(sw)
except Exception, e:
return SendFault(FaultFromException(e, 0, sys.exc_info()[2]), **kw)
# Create Signatures
service.sign(sw)
try:
soapdata = str(sw)
return SendResponse(soapdata, **kw)
except Exception, e:
return SendFault(FaultFromException(e, 0, sys.exc_info()[2]), **kw) | Send ParsedSoap instance to ServiceContainer, which dispatches to
appropriate service via post, and method via action. Response is a
self-describing pyobj, which is passed to a SoapWriter.
Call SendResponse or SendFault to send the reply back, appropriately.
server -- ServiceContainer instance |
def rst2html(rst_src, **kwargs):
"""
Convert a reStructuredText string into a unicode HTML fragment.
For `kwargs`, see `default_rst_opts` and
http://docutils.sourceforge.net/docs/user/config.html
"""
pub = rst2pub(rst_src, settings_overrides=kwargs, writer_name='html')
return pub.writer.parts['body'] | Convert a reStructuredText string into a unicode HTML fragment.
For `kwargs`, see `default_rst_opts` and
http://docutils.sourceforge.net/docs/user/config.html |
def _include_module(self, module, mn):
""" See if a module should be included or excluded based upon
included_packages and excluded_packages.
As some packages have the following format:
scipy.special.specfun
scipy.linalg
Where the top-level package name is just a prefix to a longer package name,
we don't want to do a direct comparison. Instead, we want to exclude packages
which are either exactly "<package_name>", or start with "<package_name>".
"""
if mn in self.topology.include_packages:
_debug.debug("_include_module:explicit using __include_packages: module=%s", mn)
return True
if '.' in mn:
for include_package in self.topology.include_packages:
if mn.startswith(include_package + '.'):
_debug.debug("_include_module:explicit pattern using __include_packages: module=%s pattern=%s", mn, \
include_package + '.')
return True
if mn in self.topology.exclude_packages:
_debug.debug("_include_module:explicit using __exclude_packages: module=%s", mn)
return False
if '.' in mn:
for exclude_package in self.topology.exclude_packages:
if mn.startswith(exclude_package + '.'):
_debug.debug("_include_module:explicit pattern using __exclude_packages: module=%s pattern=%s", mn, \
exclude_package + '.')
return False
_debug.debug("_include_module:including: module=%s", mn)
return True | See if a module should be included or excluded based upon
included_packages and excluded_packages.
As some packages have the following format:
scipy.special.specfun
scipy.linalg
Where the top-level package name is just a prefix to a longer package name,
we don't want to do a direct comparison. Instead, we want to exclude packages
which are either exactly "<package_name>", or start with "<package_name>". |
def _concat_datetimetz(to_concat, name=None):
"""
concat DatetimeIndex with the same tz
all inputs must be DatetimeIndex
it is used in DatetimeIndex.append also
"""
# Right now, internals will pass a List[DatetimeArray] here
# for reductions like quantile. I would like to disentangle
# all this before we get here.
sample = to_concat[0]
if isinstance(sample, ABCIndexClass):
return sample._concat_same_dtype(to_concat, name=name)
elif isinstance(sample, ABCDatetimeArray):
return sample._concat_same_type(to_concat) | concat DatetimeIndex with the same tz
all inputs must be DatetimeIndex
it is used in DatetimeIndex.append also |
def _get(self, url):
"""
Helper method: GET data from given URL on TBA's API.
:param url: URL string to get data from.
:return: Requested data in JSON format.
"""
return self.session.get(self.READ_URL_PRE + url).json() | Helper method: GET data from given URL on TBA's API.
:param url: URL string to get data from.
:return: Requested data in JSON format. |
def zyz_decomposition(gate: Gate) -> Circuit:
"""
Returns the Euler Z-Y-Z decomposition of a local 1-qubit gate.
"""
if gate.qubit_nb != 1:
raise ValueError('Expected 1-qubit gate')
q, = gate.qubits
U = asarray(gate.asoperator())
U /= np.linalg.det(U) ** (1/2) # SU(2)
if abs(U[0, 0]) > abs(U[1, 0]):
theta1 = 2 * np.arccos(min(abs(U[0, 0]), 1))
else:
theta1 = 2 * np.arcsin(min(abs(U[1, 0]), 1))
cos_halftheta1 = np.cos(theta1/2)
if not np.isclose(cos_halftheta1, 0.0):
phase = U[1, 1] / cos_halftheta1
theta0_plus_theta2 = 2 * np.arctan2(np.imag(phase), np.real(phase))
else:
theta0_plus_theta2 = 0.0
sin_halftheta1 = np.sin(theta1/2)
if not np.isclose(sin_halftheta1, 0.0):
phase = U[1, 0] / sin_halftheta1
theta0_sub_theta2 = 2 * np.arctan2(np.imag(phase), np.real(phase))
else:
theta0_sub_theta2 = 0.0
theta0 = (theta0_plus_theta2 + theta0_sub_theta2) / 2
theta2 = (theta0_plus_theta2 - theta0_sub_theta2) / 2
t0 = theta0/np.pi
t1 = theta1/np.pi
t2 = theta2/np.pi
circ1 = Circuit()
circ1 += TZ(t2, q)
circ1 += TY(t1, q)
circ1 += TZ(t0, q)
return circ1 | Returns the Euler Z-Y-Z decomposition of a local 1-qubit gate. |
def register_type(klass, type_url=None):
"""Register a klass as the factory for a given type URL.
:type klass: :class:`type`
:param klass: class to be used as a factory for the given type
:type type_url: str
:param type_url: (Optional) URL naming the type. If not provided,
infers the URL from the type descriptor.
:raises ValueError: if a registration already exists for the URL.
"""
if type_url is None:
type_url = _compute_type_url(klass)
if type_url in _TYPE_URL_MAP:
if _TYPE_URL_MAP[type_url] is not klass:
raise ValueError("Conflict: %s" % (_TYPE_URL_MAP[type_url],))
_TYPE_URL_MAP[type_url] = klass | Register a klass as the factory for a given type URL.
:type klass: :class:`type`
:param klass: class to be used as a factory for the given type
:type type_url: str
:param type_url: (Optional) URL naming the type. If not provided,
infers the URL from the type descriptor.
:raises ValueError: if a registration already exists for the URL. |
def load_single_dict(pinyin_dict, style='default'):
"""载入用户自定义的单字拼音库
:param pinyin_dict: 单字拼音库。比如: ``{0x963F: u"ā,ē"}``
:param style: pinyin_dict 参数值的拼音库风格. 支持 'default', 'tone2'
:type pinyin_dict: dict
"""
if style == 'tone2':
for k, v in pinyin_dict.items():
v = _replace_tone2_style_dict_to_default(v)
PINYIN_DICT[k] = v
else:
PINYIN_DICT.update(pinyin_dict)
mmseg.retrain(mmseg.seg) | 载入用户自定义的单字拼音库
:param pinyin_dict: 单字拼音库。比如: ``{0x963F: u"ā,ē"}``
:param style: pinyin_dict 参数值的拼音库风格. 支持 'default', 'tone2'
:type pinyin_dict: dict |
def edit_message_reply_markup(
self,
chat_id: Union[int, str],
message_id: int,
reply_markup: "pyrogram.InlineKeyboardMarkup" = None
) -> "pyrogram.Message":
"""Use this method to edit only the reply markup of messages sent by the bot or via the bot (for inline bots).
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
message_id (``int``):
Message identifier in the chat specified in chat_id.
reply_markup (:obj:`InlineKeyboardMarkup`, *optional*):
An InlineKeyboardMarkup object.
Returns:
On success, if edited message is sent by the bot, the edited
:obj:`Message <pyrogram.Message>` is returned, otherwise True is returned.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
r = self.send(
functions.messages.EditMessage(
peer=self.resolve_peer(chat_id),
id=message_id,
reply_markup=reply_markup.write() if reply_markup else None
)
)
for i in r.updates:
if isinstance(i, (types.UpdateEditMessage, types.UpdateEditChannelMessage)):
return pyrogram.Message._parse(
self, i.message,
{i.id: i for i in r.users},
{i.id: i for i in r.chats}
) | Use this method to edit only the reply markup of messages sent by the bot or via the bot (for inline bots).
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
message_id (``int``):
Message identifier in the chat specified in chat_id.
reply_markup (:obj:`InlineKeyboardMarkup`, *optional*):
An InlineKeyboardMarkup object.
Returns:
On success, if edited message is sent by the bot, the edited
:obj:`Message <pyrogram.Message>` is returned, otherwise True is returned.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. |
def create_classifier(self,
name,
positive_examples,
negative_examples=None,
negative_examples_filename=None,
**kwargs):
"""
Create a classifier.
Train a new multi-faceted classifier on the uploaded image data. Create your
custom classifier with positive or negative examples. Include at least two sets of
examples, either two positive example files or one positive and one negative file.
You can upload a maximum of 256 MB per call.
Encode all names in UTF-8 if they contain non-ASCII characters (.zip and image
file names, and classifier and class names). The service assumes UTF-8 encoding if
it encounters non-ASCII characters.
:param str name: The name of the new classifier. Encode special characters in
UTF-8.
:param dict positive_examples: A dictionary that contains the value for each
classname. The value is a .zip file of images that depict the visual subject of a
class in the new classifier. You can include more than one positive example file
in a call.
Specify the parameter name by appending `_positive_examples` to the class name.
For example, `goldenretriever_positive_examples` creates the class
**goldenretriever**.
Include at least 10 images in .jpg or .png format. The minimum recommended image
resolution is 32X32 pixels. The maximum number of images is 10,000 images or 100
MB per .zip file.
Encode special characters in the file name in UTF-8.
:param file negative_examples: A .zip file of images that do not depict the visual
subject of any of the classes of the new classifier. Must contain a minimum of 10
images.
Encode special characters in the file name in UTF-8.
:param str negative_examples_filename: The filename for negative_examples.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if name is None:
raise ValueError('name must be provided')
if not positive_examples:
raise ValueError('positive_examples must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('watson_vision_combined', 'V3',
'create_classifier')
headers.update(sdk_headers)
params = {'version': self.version}
form_data = {}
form_data['name'] = (None, name, 'text/plain')
for key in positive_examples.keys():
part_name = '%s_positive_examples' % (key)
value = positive_examples[key]
if hasattr(value, 'name'):
filename = basename(value.name)
form_data[part_name] = (filename, value, 'application/octet-stream')
if negative_examples:
if not negative_examples_filename and hasattr(
negative_examples, 'name'):
negative_examples_filename = basename(negative_examples.name)
if not negative_examples_filename:
raise ValueError('negative_examples_filename must be provided')
form_data['negative_examples'] = (negative_examples_filename,
negative_examples,
'application/octet-stream')
url = '/v3/classifiers'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
files=form_data,
accept_json=True)
return response | Create a classifier.
Train a new multi-faceted classifier on the uploaded image data. Create your
custom classifier with positive or negative examples. Include at least two sets of
examples, either two positive example files or one positive and one negative file.
You can upload a maximum of 256 MB per call.
Encode all names in UTF-8 if they contain non-ASCII characters (.zip and image
file names, and classifier and class names). The service assumes UTF-8 encoding if
it encounters non-ASCII characters.
:param str name: The name of the new classifier. Encode special characters in
UTF-8.
:param dict positive_examples: A dictionary that contains the value for each
classname. The value is a .zip file of images that depict the visual subject of a
class in the new classifier. You can include more than one positive example file
in a call.
Specify the parameter name by appending `_positive_examples` to the class name.
For example, `goldenretriever_positive_examples` creates the class
**goldenretriever**.
Include at least 10 images in .jpg or .png format. The minimum recommended image
resolution is 32X32 pixels. The maximum number of images is 10,000 images or 100
MB per .zip file.
Encode special characters in the file name in UTF-8.
:param file negative_examples: A .zip file of images that do not depict the visual
subject of any of the classes of the new classifier. Must contain a minimum of 10
images.
Encode special characters in the file name in UTF-8.
:param str negative_examples_filename: The filename for negative_examples.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse |
def sqrt_with_finite_grads(x, name=None):
"""A sqrt function whose gradient at zero is very large but finite.
Args:
x: a `Tensor` whose sqrt is to be computed.
name: a Python `str` prefixed to all ops created by this function.
Default `None` (i.e., "sqrt_with_finite_grads").
Returns:
sqrt: the square root of `x`, with an overridden gradient at zero
grad: a gradient function, which is the same as sqrt's gradient everywhere
except at zero, where it is given a large finite value, instead of `inf`.
Raises:
TypeError: if `tf.convert_to_tensor(x)` is not a `float` type.
Often in kernel functions, we need to compute the L2 norm of the difference
between two vectors, `x` and `y`: `sqrt(sum_i((x_i - y_i) ** 2))`. In the
case where `x` and `y` are identical, e.g., on the diagonal of a kernel
matrix, we get `NaN`s when we take gradients with respect to the inputs. To
see, this consider the forward pass:
```
[x_1 ... x_N] --> [x_1 ** 2 ... x_N ** 2] -->
(x_1 ** 2 + ... + x_N ** 2) --> sqrt((x_1 ** 2 + ... + x_N ** 2))
```
When we backprop through this forward pass, the `sqrt` yields an `inf` because
`grad_z(sqrt(z)) = 1 / (2 * sqrt(z))`. Continuing the backprop to the left, at
the `x ** 2` term, we pick up a `2 * x`, and when `x` is zero, we get
`0 * inf`, which is `NaN`.
We'd like to avoid these `NaN`s, since they infect the rest of the connected
computation graph. Practically, when two inputs to a kernel function are
equal, we are in one of two scenarios:
1. We are actually computing k(x, x), in which case norm(x - x) is
identically zero, independent of x. In this case, we'd like the
gradient to reflect this independence: it should be zero.
2. We are computing k(x, y), and x just *happens* to have the same value
as y. The gradient at such inputs is in fact ill-defined (there is a
cusp in the sqrt((x - y) ** 2) surface along the line x = y). There are,
however, an infinite number of sub-gradients, all of which are valid at
all such inputs. By symmetry, there is exactly one which is "special":
zero, and we elect to use that value here. In practice, having two
identical inputs to a kernel matrix is probably a pathological
situation to be avoided, but that is better resolved at a higher level
than this.
To avoid the infinite gradient at zero, we use tf.custom_gradient to redefine
the gradient at zero. We assign it to be a very large value, specifically
the sqrt of the max value of the floating point dtype of the input. We use
the sqrt (as opposed to just using the max floating point value) to avoid
potential overflow when combining this value with others downstream.
"""
with tf.compat.v1.name_scope(name, 'sqrt_with_finite_grads', [x]):
x = tf.convert_to_tensor(value=x, name='x')
if not x.dtype.is_floating:
raise TypeError('Input `x` must be floating type.')
def grad(grad_ys):
large_float_like_x = np.sqrt(np.finfo(x.dtype.as_numpy_dtype()).max)
safe_grads = tf.where(
tf.equal(x, 0), tf.fill(tf.shape(input=x), large_float_like_x),
0.5 * tf.math.rsqrt(x))
return grad_ys * safe_grads
return tf.sqrt(x), grad | A sqrt function whose gradient at zero is very large but finite.
Args:
x: a `Tensor` whose sqrt is to be computed.
name: a Python `str` prefixed to all ops created by this function.
Default `None` (i.e., "sqrt_with_finite_grads").
Returns:
sqrt: the square root of `x`, with an overridden gradient at zero
grad: a gradient function, which is the same as sqrt's gradient everywhere
except at zero, where it is given a large finite value, instead of `inf`.
Raises:
TypeError: if `tf.convert_to_tensor(x)` is not a `float` type.
Often in kernel functions, we need to compute the L2 norm of the difference
between two vectors, `x` and `y`: `sqrt(sum_i((x_i - y_i) ** 2))`. In the
case where `x` and `y` are identical, e.g., on the diagonal of a kernel
matrix, we get `NaN`s when we take gradients with respect to the inputs. To
see, this consider the forward pass:
```
[x_1 ... x_N] --> [x_1 ** 2 ... x_N ** 2] -->
(x_1 ** 2 + ... + x_N ** 2) --> sqrt((x_1 ** 2 + ... + x_N ** 2))
```
When we backprop through this forward pass, the `sqrt` yields an `inf` because
`grad_z(sqrt(z)) = 1 / (2 * sqrt(z))`. Continuing the backprop to the left, at
the `x ** 2` term, we pick up a `2 * x`, and when `x` is zero, we get
`0 * inf`, which is `NaN`.
We'd like to avoid these `NaN`s, since they infect the rest of the connected
computation graph. Practically, when two inputs to a kernel function are
equal, we are in one of two scenarios:
1. We are actually computing k(x, x), in which case norm(x - x) is
identically zero, independent of x. In this case, we'd like the
gradient to reflect this independence: it should be zero.
2. We are computing k(x, y), and x just *happens* to have the same value
as y. The gradient at such inputs is in fact ill-defined (there is a
cusp in the sqrt((x - y) ** 2) surface along the line x = y). There are,
however, an infinite number of sub-gradients, all of which are valid at
all such inputs. By symmetry, there is exactly one which is "special":
zero, and we elect to use that value here. In practice, having two
identical inputs to a kernel matrix is probably a pathological
situation to be avoided, but that is better resolved at a higher level
than this.
To avoid the infinite gradient at zero, we use tf.custom_gradient to redefine
the gradient at zero. We assign it to be a very large value, specifically
the sqrt of the max value of the floating point dtype of the input. We use
the sqrt (as opposed to just using the max floating point value) to avoid
potential overflow when combining this value with others downstream. |
def _shuffled(seq):
"""Deterministically shuffle identically under both py2 + py3."""
fixed_random = random.Random()
if six.PY2: # pragma: no cover (py2)
fixed_random.seed(FIXED_RANDOM_SEED)
else: # pragma: no cover (py3)
fixed_random.seed(FIXED_RANDOM_SEED, version=1)
seq = list(seq)
random.shuffle(seq, random=fixed_random.random)
return seq | Deterministically shuffle identically under both py2 + py3. |
def coords(self):
"""
A tuple of two `~numpy.ndarray` containing the ``y`` and ``x``
pixel coordinates of unmasked pixels within the source segment.
Non-finite pixel values (e.g. NaN, infs) are excluded
(automatically masked).
If all pixels are masked, ``coords`` will be a tuple of
two empty arrays.
"""
yy, xx = np.nonzero(self.data_cutout_ma)
return (yy + self._slice[0].start, xx + self._slice[1].start) | A tuple of two `~numpy.ndarray` containing the ``y`` and ``x``
pixel coordinates of unmasked pixels within the source segment.
Non-finite pixel values (e.g. NaN, infs) are excluded
(automatically masked).
If all pixels are masked, ``coords`` will be a tuple of
two empty arrays. |
def all(iterable = None, *, name = None, metric = call_default):
"""Measure total time and item count for consuming an iterable
:arg iterable: any iterable
:arg function metric: f(name, count, total_time)
:arg str name: name for the metric
"""
if iterable is None:
return _iter_decorator(name, metric)
else:
return _do_all(iterable, name, metric) | Measure total time and item count for consuming an iterable
:arg iterable: any iterable
:arg function metric: f(name, count, total_time)
:arg str name: name for the metric |
def _set_range(self, v, load=False):
"""
Setter method for range, mapped from YANG variable /rbridge_id/router/ospf/area/range (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_range is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_range() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("range_address range_mask",range.range, yang_name="range", rest_name="range", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='range-address range-mask', extensions={u'tailf-common': {u'info': u'To define or undefine a type-3 address \nrange (ABR only)', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'OSPFAreaRangeCallPoint'}}), is_container='list', yang_name="range", rest_name="range", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'To define or undefine a type-3 address \nrange (ABR only)', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'OSPFAreaRangeCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """range must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("range_address range_mask",range.range, yang_name="range", rest_name="range", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='range-address range-mask', extensions={u'tailf-common': {u'info': u'To define or undefine a type-3 address \nrange (ABR only)', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'OSPFAreaRangeCallPoint'}}), is_container='list', yang_name="range", rest_name="range", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'To define or undefine a type-3 address \nrange (ABR only)', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'OSPFAreaRangeCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='list', is_config=True)""",
})
self.__range = t
if hasattr(self, '_set'):
self._set() | Setter method for range, mapped from YANG variable /rbridge_id/router/ospf/area/range (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_range is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_range() directly. |
def hdel(self, name, *keys):
"""
Delete one or more hash field.
:param name: str the name of the redis key
:param keys: on or more members to remove from the key.
:return: Future()
"""
with self.pipe as pipe:
m_encode = self.memberparse.encode
keys = [m_encode(m) for m in self._parse_values(keys)]
return pipe.hdel(self.redis_key(name), *keys) | Delete one or more hash field.
:param name: str the name of the redis key
:param keys: on or more members to remove from the key.
:return: Future() |
def create_awslambda(self):
"""Create security groups as defined in the configs."""
utils.banner("Creating Lambda Function")
awslambdaobj = awslambda.LambdaFunction(
app=self.app, env=self.env, region=self.region, prop_path=self.json_path)
awslambdaobj.create_lambda_function()
utils.banner("Creating Lambda Event")
lambdaeventobj = awslambda.LambdaEvent(app=self.app, env=self.env, region=self.region, prop_path=self.json_path)
lambdaeventobj.create_lambda_events() | Create security groups as defined in the configs. |
def copy_session(session: requests.Session) -> requests.Session:
"""Duplicates a requests.Session."""
new = requests.Session()
new.cookies = requests.utils.cookiejar_from_dict(requests.utils.dict_from_cookiejar(session.cookies))
new.headers = session.headers.copy()
return new | Duplicates a requests.Session. |
def get_auth(host, app_name, database_name):
"""
Authentication hook to allow plugging in custom authentication credential providers
"""
from .hooks import _get_auth_hook
return _get_auth_hook(host, app_name, database_name) | Authentication hook to allow plugging in custom authentication credential providers |
def _fire(self, layers, things, the_plot):
"""Launches a new bolt from the player."""
# We don't fire if the player fired another bolt just now.
if the_plot.get('last_player_shot') == the_plot.frame: return
the_plot['last_player_shot'] = the_plot.frame
# We start just above the player.
row, col = things['P'].position
self._teleport((row-1, col)) | Launches a new bolt from the player. |
def integer(
element_name, # type: Text
attribute=None, # type: Optional[Text]
required=True, # type: bool
alias=None, # type: Optional[Text]
default=0, # type: Optional[int]
omit_empty=False, # type: bool
hooks=None # type: Optional[Hooks]
):
# type: (...) -> Processor
"""
Create a processor for integer values.
See also :func:`declxml.boolean`
"""
value_parser = _number_parser(int)
return _PrimitiveValue(
element_name,
value_parser,
attribute,
required,
alias,
default,
omit_empty,
hooks
) | Create a processor for integer values.
See also :func:`declxml.boolean` |
def add_file(self, fileGrp, mimetype=None, url=None, ID=None, pageId=None, force=False, local_filename=None, **kwargs):
"""
Add a `OcrdFile </../../ocrd_models/ocrd_models.ocrd_file.html>`_.
Arguments:
fileGrp (string): Add file to ``mets:fileGrp`` with this ``USE`` attribute
mimetype (string):
url (string):
ID (string):
pageId (string):
force (boolean): Whether to add the file even if a ``mets:file`` with the same ``ID`` already exists.
local_filename (string):
mimetype (string):
"""
if not ID:
raise Exception("Must set ID of the mets:file")
el_fileGrp = self._tree.getroot().find(".//mets:fileGrp[@USE='%s']" % (fileGrp), NS)
if el_fileGrp is None:
el_fileGrp = self.add_file_group(fileGrp)
if ID is not None and self.find_files(ID=ID) != []:
if not force:
raise Exception("File with ID='%s' already exists" % ID)
mets_file = self.find_files(ID=ID)[0]
else:
mets_file = OcrdFile(ET.SubElement(el_fileGrp, TAG_METS_FILE), mets=self)
mets_file.url = url
mets_file.mimetype = mimetype
mets_file.ID = ID
mets_file.pageId = pageId
mets_file.local_filename = local_filename
self._file_by_id[ID] = mets_file
return mets_file | Add a `OcrdFile </../../ocrd_models/ocrd_models.ocrd_file.html>`_.
Arguments:
fileGrp (string): Add file to ``mets:fileGrp`` with this ``USE`` attribute
mimetype (string):
url (string):
ID (string):
pageId (string):
force (boolean): Whether to add the file even if a ``mets:file`` with the same ``ID`` already exists.
local_filename (string):
mimetype (string): |
def constraint(self, n=-1, fid=0):
"""Obtain the set of orthogonal equations that make the solution of
the rank deficient normal equations possible.
:param fid: the id of the sub-fitter (numerical)
"""
c = self._getval("constr", fid)
if n < 0 or n > self.deficiency(fid):
return c
else:
raise RuntimeError("Not yet implemented") | Obtain the set of orthogonal equations that make the solution of
the rank deficient normal equations possible.
:param fid: the id of the sub-fitter (numerical) |
def toJson(self, data=None, pretty=False):
"""convert the flattened dictionary into json"""
if data==None: data = self.attrs
data = self.flatten(data) # don't send objects as str in json
#if pretty:
ret = json.dumps(data, indent=4, sort_keys=True)
#self.inflate() # restore objects from json str data
return ret | convert the flattened dictionary into json |
def dump(rt, from_date, with_json=True, latest_only=False, **kwargs):
"""Dump the remote tokens as a list of dictionaries.
:param ra: Remote toekn to be dumped.
:type ra: `invenio_oauthclient.models.RemoteToken [Invenio2.x]`
:returns: Remote tokens serialized to dictionary.
:rtype: dict
"""
return dict(id_remote_account=rt.id_remote_account,
token_type=rt.token_type,
access_token=rt.access_token,
secret=rt.secret) | Dump the remote tokens as a list of dictionaries.
:param ra: Remote toekn to be dumped.
:type ra: `invenio_oauthclient.models.RemoteToken [Invenio2.x]`
:returns: Remote tokens serialized to dictionary.
:rtype: dict |
def clear(self):
"""
Calls `_clear` abstract method which must be implemented by descendants.
:raises: GPflowError exception when parent of the node is built.
"""
parent = self.parent
if parent is not self and parent.is_built_coherence(self.graph) is Build.YES:
raise GPflowError('Clear method cannot be started. Upper nodes are built.')
self._clear() | Calls `_clear` abstract method which must be implemented by descendants.
:raises: GPflowError exception when parent of the node is built. |
def _findOptionValueAdvAudit(option):
'''
Get the Advanced Auditing policy as configured in
``C:\\Windows\\Security\\Audit\\audit.csv``
Args:
option (str): The name of the setting as it appears in audit.csv
Returns:
bool: ``True`` if successful, otherwise ``False``
'''
if 'lgpo.adv_audit_data' not in __context__:
system_root = os.environ.get('SystemRoot', 'C:\\Windows')
f_audit = os.path.join(system_root, 'security', 'audit', 'audit.csv')
f_audit_gpo = os.path.join(system_root, 'System32', 'GroupPolicy',
'Machine', 'Microsoft', 'Windows NT',
'Audit', 'audit.csv')
# Make sure there is an existing audit.csv file on the machine
if not __salt__['file.file_exists'](f_audit):
if __salt__['file.file_exists'](f_audit_gpo):
# If the GPO audit.csv exists, we'll use that one
__salt__['file.copy'](f_audit_gpo, f_audit)
else:
field_names = _get_audit_defaults('fieldnames')
# If the file doesn't exist anywhere, create it with default
# fieldnames
__salt__['file.makedirs'](f_audit)
__salt__['file.write'](f_audit, ','.join(field_names))
audit_settings = {}
with salt.utils.files.fopen(f_audit, mode='r') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
audit_settings.update(
{row['Subcategory']: row['Setting Value']})
__context__['lgpo.adv_audit_data'] = audit_settings
return __context__['lgpo.adv_audit_data'].get(option, None) | Get the Advanced Auditing policy as configured in
``C:\\Windows\\Security\\Audit\\audit.csv``
Args:
option (str): The name of the setting as it appears in audit.csv
Returns:
bool: ``True`` if successful, otherwise ``False`` |
def score_pairwise(aseq, bseq):
"""Compute pairwise distances between two sequences (raw strings)."""
assert len(aseq) == len(bseq)
# Affine gap penalties -- default values from EMBOSS needle/water
GAP_OPEN = -10.0
GAP_EXTEND = -0.5
GAP_CHARS = frozenset('-.')
score = 0.0
in_gap = True # Don't apply the opening penalty to the N-terminal gap
for ares, bres in zip(aseq.upper(), bseq.upper()):
if ares in GAP_CHARS and bres in GAP_CHARS:
# Both are gaps -- this happens in multiple sequence alignments
continue
match = blosum62.get((ares, bres), None)
if match is None:
assert GAP_CHARS.intersection((ares, bres)), \
"Expected one gap in: " + str((ares, bres))
# Gap
if not in_gap:
score += GAP_OPEN
in_gap = True
score += GAP_EXTEND
else:
in_gap = False
score += match
if in_gap:
# Correct for a penalty on the C-terminal gap
score -= GAP_OPEN
return score | Compute pairwise distances between two sequences (raw strings). |
def hessian(self, x, y, Rs, theta_Rs, r_core, center_x=0, center_y=0):
"""
:param x: x coordinate
:param y: y coordinate
:param Rs: scale radius
:param rho0: central core density
:param r_core: core radius
:param center_x:
:param center_y:
:return:
"""
if Rs < 0.0001:
Rs = 0.0001
x_ = x - center_x
y_ = y - center_y
R = np.sqrt(x_ ** 2 + y_ ** 2)
rho0 = self._alpha2rho0(theta_Rs=theta_Rs, Rs=Rs, r_core=r_core)
kappa = self.density_2d(x_, y_, Rs, rho0, r_core)
gamma1, gamma2 = self.cBurkGamma(R, Rs, rho0, r_core, x_, y_)
f_xx = kappa + gamma1
f_yy = kappa - gamma1
f_xy = gamma2
return f_xx, f_yy, f_xy | :param x: x coordinate
:param y: y coordinate
:param Rs: scale radius
:param rho0: central core density
:param r_core: core radius
:param center_x:
:param center_y:
:return: |
async def monitor_status(self, alarm_status_callback=None,
zone_changed_callback=None,
output_changed_callback=None):
"""Start monitoring of the alarm status.
Send command to satel integra to start sending updates. Read in a
loop and call respective callbacks when received messages.
"""
self._alarm_status_callback = alarm_status_callback
self._zone_changed_callback = zone_changed_callback
self._output_changed_callback = output_changed_callback
_LOGGER.info("Starting monitor_status loop")
while not self.closed:
_LOGGER.debug("Iteration... ")
while not self.connected:
_LOGGER.info("Not connected, re-connecting... ")
await self.connect()
if not self.connected:
_LOGGER.warning("Not connected, sleeping for 10s... ")
await asyncio.sleep(self._reconnection_timeout)
continue
await self.start_monitoring()
if not self.connected:
_LOGGER.warning("Start monitoring failed, sleeping for 10s...")
await asyncio.sleep(self._reconnection_timeout)
continue
while True:
await self._update_status()
_LOGGER.debug("Got status!")
if not self.connected:
_LOGGER.info("Got connection broken, reconnecting!")
break
_LOGGER.info("Closed, quit monitoring.") | Start monitoring of the alarm status.
Send command to satel integra to start sending updates. Read in a
loop and call respective callbacks when received messages. |
def register_site(self):
"""Function to register the site and generate a unique ID for the site
Returns:
**string:** The ID of the site (also called client id) if the registration is successful
Raises:
**OxdServerError:** If the site registration fails.
"""
if self.oxd_id:
logger.info('Client is already registered. ID: %s', self.oxd_id)
return self.oxd_id
# add required params for the command
params = {
"authorization_redirect_uri": self.authorization_redirect_uri,
"oxd_rp_programming_language": "python",
}
# add other optional params if they exist in config
for op in self.opt_params:
if self.config.get("client", op):
params[op] = self.config.get("client", op)
for olp in self.opt_list_params:
if self.config.get("client", olp):
params[olp] = self.config.get("client", olp).split(",")
logger.debug("Sending command `register_site` with params %s", params)
response = self.msgr.request("register_site", **params)
logger.debug("Received response: %s", response)
if response['status'] == 'error':
raise OxdServerError(response['data'])
self.oxd_id = response["data"]["oxd_id"]
self.config.set("oxd", "id", self.oxd_id)
logger.info("Site registration successful. Oxd ID: %s", self.oxd_id)
return self.oxd_id | Function to register the site and generate a unique ID for the site
Returns:
**string:** The ID of the site (also called client id) if the registration is successful
Raises:
**OxdServerError:** If the site registration fails. |
def _process_file(self, obj, fobj, field):
"""
obj is record object
fobj is data
field is FileField instance
"""
from uliweb import settings
paths = []
upload_to = self.upload_to or self._get_upload_path(field, 'upload_to', obj)
if upload_to:
self.fileserving.to_path = upload_to
upload_to_sub = self.upload_to_sub or self._get_upload_path(field, 'upload_to_sub', obj)
if upload_to_sub:
paths.append(upload_to_sub)
paths.append(fobj['filename'])
return self.fileserving.save_file(os.path.join(*paths),
fobj['file'], replace=self.file_replace,
convert=self.file_convert) | obj is record object
fobj is data
field is FileField instance |
def _set_foreign_attributes_for_create(self, model):
"""
Set the foreign ID and type for creation a related model.
"""
model.set_attribute(self.get_plain_foreign_key(), self.get_parent_key())
model.set_attribute(self.get_plain_morph_type(), self._morph_name) | Set the foreign ID and type for creation a related model. |
def make_tf_example(features, pi, value):
"""
Args:
features: [N, N, FEATURE_DIM] nparray of uint8
pi: [N * N + 1] nparray of float32
value: float
"""
return tf.train.Example(features=tf.train.Features(feature={
'x': tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[features.tostring()])),
'pi': tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[pi.tostring()])),
'outcome': tf.train.Feature(
float_list=tf.train.FloatList(
value=[value]))})) | Args:
features: [N, N, FEATURE_DIM] nparray of uint8
pi: [N * N + 1] nparray of float32
value: float |
def ping(proxy=None, hostport=None):
"""
rpc_ping
Returns {'alive': True} on succcess
Returns {'error': ...} on error
"""
schema = {
'type': 'object',
'properties': {
'status': {
'type': 'string'
},
},
'required': [
'status'
]
}
assert proxy or hostport, 'Need either proxy handle or hostport string'
if proxy is None:
proxy = connect_hostport(hostport)
resp = {}
try:
resp = proxy.ping()
resp = json_validate( schema, resp )
if json_is_error(resp):
return resp
assert resp['status'] == 'alive'
except ValidationError as e:
if BLOCKSTACK_DEBUG:
log.exception(e)
resp = {'error': 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.', 'http_status': 502}
return resp
except socket.timeout:
log.error("Connection timed out")
resp = {'error': 'Connection to remote host timed out.', 'http_status': 503}
return resp
except socket.error as se:
log.error("Connection error {}".format(se.errno))
resp = {'error': 'Connection to remote host failed.', 'http_status': 502}
return resp
except Exception as ee:
if BLOCKSTACK_DEBUG:
log.exception(ee)
log.error("Caught exception while connecting to Blockstack node: {}".format(ee))
resp = {'error': 'Failed to contact Blockstack node. Try again with `--debug`.', 'http_status': 500}
return resp
return resp | rpc_ping
Returns {'alive': True} on succcess
Returns {'error': ...} on error |
def paste_action_callback(self, *event):
"""Callback method for paste action"""
if react_to_event(self.view, self.oc_list_ctrl.tree_view, event) and self.oc_list_ctrl.active_entry_widget is None:
global_clipboard.paste(self.model, limited=['outcomes'])
return True | Callback method for paste action |
def TakeWhile(self: dict, f):
"""
[
{
'self': [1, 2, 3, 4, 5],
'f': lambda x: x < 4,
'assert': lambda ret: list(ret) == [1, 2, 3]
}
]
"""
if is_to_destruct(f):
f = destruct_func(f)
for e in self.items():
if not f(e):
break
yield e | [
{
'self': [1, 2, 3, 4, 5],
'f': lambda x: x < 4,
'assert': lambda ret: list(ret) == [1, 2, 3]
}
] |
def get_pd_by_id(self, id):
"""
Get ScaleIO ProtectionDomain object by its id
:param name: ID of ProtectionDomain
:return: ScaleIO ProctectionDomain object
:raise KeyError: No ProtectionDomain with specified name found
:rtype: ProtectionDomain object
"""
for pd in self.conn.protection_domains:
if pd.id == id:
return pd
raise KeyError("Protection Domain with ID " + id + " not found") | Get ScaleIO ProtectionDomain object by its id
:param name: ID of ProtectionDomain
:return: ScaleIO ProctectionDomain object
:raise KeyError: No ProtectionDomain with specified name found
:rtype: ProtectionDomain object |
def call_jira_rest(self, url, user, password, method="GET", data=None):
"""
Make JIRA REST call
:param data: data for rest call
:param method: type of call: GET or POST for now
:param url: url to call
:param user: user for authentication
:param password: password for authentication
:return:
"""
headers = {'content-type': 'application/json'}
self._logger.debug('Connecting to Jira to call the following REST method {0}'.format(url))
if method == "GET":
response = requests.get(self.base_url + url, auth=requests.auth.HTTPBasicAuth(user, password))
elif method == "POST":
response = requests.post(self.base_url + url, data=json.dumps(data),
auth=requests.auth.HTTPBasicAuth(user, password), headers=headers)
else:
raise ValueError('method argument supports GET or POST values only')
self._logger.debug('REST call successfully finalised')
return response.json() | Make JIRA REST call
:param data: data for rest call
:param method: type of call: GET or POST for now
:param url: url to call
:param user: user for authentication
:param password: password for authentication
:return: |
def min_cost(self):
"""
Returns the cost of the best assignment
"""
if self._min_cost:
return self._min_cost
self._min_cost = np.sum(self.c[np.arange(self.nx), self.solution])
return self._min_cost | Returns the cost of the best assignment |
def save_archive(archive):
"""
Save `archive` into database and into proper indexes.
Attr:
archive (obj): Instance of the :class:`.DBArchive`.
Returns:
obj: :class:`.DBArchive` without data.
Raises:
InvalidType: When the `archive` is not instance of :class:`.DBArchive`.
UnindexablePublication: When there is no index (property) which can be
used to index `archive` in database.
"""
_assert_obj_type(archive, obj_type=DBArchive)
_get_handler().store_object(archive)
return archive.to_comm(light_request=True) | Save `archive` into database and into proper indexes.
Attr:
archive (obj): Instance of the :class:`.DBArchive`.
Returns:
obj: :class:`.DBArchive` without data.
Raises:
InvalidType: When the `archive` is not instance of :class:`.DBArchive`.
UnindexablePublication: When there is no index (property) which can be
used to index `archive` in database. |
def show_popup(self, *args, **kwargs):
"""Show a popup with a textedit
:returns: None
:rtype: None
:raises: None
"""
self.mw = JB_MainWindow(parent=self, flags=QtCore.Qt.Dialog)
self.mw.setWindowTitle(self.popuptitle)
self.mw.setWindowModality(QtCore.Qt.ApplicationModal)
w = QtGui.QWidget()
self.mw.setCentralWidget(w)
vbox = QtGui.QVBoxLayout(w)
pte = QtGui.QPlainTextEdit()
pte.setPlainText(self.get_popup_text())
vbox.addWidget(pte)
# move window to cursor position
d = self.cursor().pos() - self.mw.mapToGlobal(self.mw.pos())
self.mw.move(d)
self.mw.show() | Show a popup with a textedit
:returns: None
:rtype: None
:raises: None |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: IpAccessControlListContext for this IpAccessControlListInstance
:rtype: twilio.rest.api.v2010.account.sip.ip_access_control_list.IpAccessControlListContext
"""
if self._context is None:
self._context = IpAccessControlListContext(
self._version,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: IpAccessControlListContext for this IpAccessControlListInstance
:rtype: twilio.rest.api.v2010.account.sip.ip_access_control_list.IpAccessControlListContext |
def eig_seg(mask, img_list, apply_segmentation_to_images=False, cthresh=0, smooth=1):
"""
Segment a mask into regions based on the max value in an image list.
At a given voxel the segmentation label will contain the index to the image
that has the largest value. If the 3rd image has the greatest value,
the segmentation label will be 3 at that voxel.
Arguments
---------
mask : ANTsImage
D-dimensional mask > 0 defining segmentation region.
img_list : collection of ANTsImage or np.ndarray
images to use
apply_segmentation_to_images : boolean
determines if original image list is modified by the segmentation.
cthresh : integer
throw away isolated clusters smaller than this value
smooth : float
smooth the input data first by this value
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> mylist = [ants.image_read(ants.get_ants_data('r16')),
ants.image_read(ants.get_ants_data('r27')),
ants.image_read(ants.get_ants_data('r85'))]
>>> myseg = ants.eig_seg(ants.get_mask(mylist[0]), mylist)
"""
maskvox = mask > 0
maskseg = mask.clone()
maskseg[maskvox] = 0
if isinstance(img_list, np.ndarray):
mydata = img_list
elif isinstance(img_list, (tuple, list)):
mydata = core.image_list_to_matrix(img_list, mask)
if (smooth > 0):
for i in range(mydata.shape[0]):
temp_img = core.make_image(mask, mydata[i,:], pixeltype='float')
temp_img = utils.smooth_image(temp_img, smooth, sigma_in_physical_coordinates=True)
mydata[i,:] = temp_img[mask >= 0.5]
segids = np.argmax(np.abs(mydata), axis=0)+1
segmax = np.max(np.abs(mydata), axis=0)
maskseg[maskvox] = (segids * (segmax > 1e-09))
if cthresh > 0:
for kk in range(int(maskseg.max())):
timg = utils.threshold_image(maskseg, kk, kk)
timg = utils.label_clusters(timg, cthresh)
timg = utils.threshold_image(timg, 1, 1e15) * float(kk)
maskseg[maskseg == kk] = timg[maskseg == kk]
if (apply_segmentation_to_images) and (not isinstance(img_list, np.ndarray)):
for i in range(len(img_list)):
img = img_list[i]
img[maskseg != float(i)] = 0
img_list[i] = img
return maskseg | Segment a mask into regions based on the max value in an image list.
At a given voxel the segmentation label will contain the index to the image
that has the largest value. If the 3rd image has the greatest value,
the segmentation label will be 3 at that voxel.
Arguments
---------
mask : ANTsImage
D-dimensional mask > 0 defining segmentation region.
img_list : collection of ANTsImage or np.ndarray
images to use
apply_segmentation_to_images : boolean
determines if original image list is modified by the segmentation.
cthresh : integer
throw away isolated clusters smaller than this value
smooth : float
smooth the input data first by this value
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> mylist = [ants.image_read(ants.get_ants_data('r16')),
ants.image_read(ants.get_ants_data('r27')),
ants.image_read(ants.get_ants_data('r85'))]
>>> myseg = ants.eig_seg(ants.get_mask(mylist[0]), mylist) |
def _fmt_metric(value, show_stdv=True):
"""format metric string"""
if len(value) == 2:
return '%s:%g' % (value[0], value[1])
if len(value) == 3:
if show_stdv:
return '%s:%g+%g' % (value[0], value[1], value[2])
return '%s:%g' % (value[0], value[1])
raise ValueError("wrong metric value") | format metric string |
def translify(text):
"""Translify russian text"""
try:
res = translit.translify(smart_text(text, encoding))
except Exception as err:
# because filter must die silently
res = default_value % {'error': err, 'value': text}
return res | Translify russian text |
def delete_keyvault_secret(access_token, vault_uri, secret_name):
'''Deletes a secret from a key vault using the key vault URI.
Args:
access_token (str): A valid Azure authentication token.
vault_uri (str): Vault URI e.g. https://myvault.azure.net.
secret_name (str): Name of the secret to add.
Returns:
HTTP response. 200 OK.
'''
endpoint = ''.join([vault_uri,
'/secrets/', secret_name,
'?api-version=', '7.0'])
return do_delete(endpoint, access_token) | Deletes a secret from a key vault using the key vault URI.
Args:
access_token (str): A valid Azure authentication token.
vault_uri (str): Vault URI e.g. https://myvault.azure.net.
secret_name (str): Name of the secret to add.
Returns:
HTTP response. 200 OK. |
def list_logical_volumes(select_criteria=None, path_mode=False):
'''
List logical volumes
:param select_criteria: str: Limit list to those volumes matching this
criteria (see 'lvs -S help' for more details)
:param path_mode: bool: return logical volume name in 'vg/lv' format, this
format is required for some commands like lvextend
:returns: [str]: List of logical volumes
'''
lv_diplay_attr = 'lv_name'
if path_mode:
# Parsing output logic relies on the column order
lv_diplay_attr = 'vg_name,' + lv_diplay_attr
cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings']
if select_criteria:
cmd.extend(['--select', select_criteria])
lvs = []
for lv in check_output(cmd).decode('UTF-8').splitlines():
if not lv:
continue
if path_mode:
lvs.append('/'.join(lv.strip().split()))
else:
lvs.append(lv.strip())
return lvs | List logical volumes
:param select_criteria: str: Limit list to those volumes matching this
criteria (see 'lvs -S help' for more details)
:param path_mode: bool: return logical volume name in 'vg/lv' format, this
format is required for some commands like lvextend
:returns: [str]: List of logical volumes |
def fromseconds(cls, seconds):
"""Return a |Period| instance based on a given number of seconds."""
try:
seconds = int(seconds)
except TypeError:
seconds = int(seconds.flatten()[0])
return cls(datetime.timedelta(0, int(seconds))) | Return a |Period| instance based on a given number of seconds. |
def emit(self, **kwargs):
"""Emit signal by calling all connected slots.
The arguments supplied have to match the signal definition.
Args:
kwargs: Keyword arguments to be passed to connected slots.
Raises:
:exc:`InvalidEmit`: If arguments don't match signal specification.
"""
self._ensure_emit_kwargs(kwargs)
for slot in self.slots:
slot(**kwargs) | Emit signal by calling all connected slots.
The arguments supplied have to match the signal definition.
Args:
kwargs: Keyword arguments to be passed to connected slots.
Raises:
:exc:`InvalidEmit`: If arguments don't match signal specification. |
def limit_disk_io(self, uuid, media, totalbytessecset=False, totalbytessec=0, readbytessecset=False, readbytessec=0, writebytessecset=False,
writebytessec=0, totaliopssecset=False, totaliopssec=0, readiopssecset=False, readiopssec=0, writeiopssecset=False, writeiopssec=0,
totalbytessecmaxset=False, totalbytessecmax=0, readbytessecmaxset=False, readbytessecmax=0, writebytessecmaxset=False, writebytessecmax=0,
totaliopssecmaxset=False, totaliopssecmax=0, readiopssecmaxset=False, readiopssecmax=0, writeiopssecmaxset=False, writeiopssecmax=0,
totalbytessecmaxlengthset=False, totalbytessecmaxlength=0, readbytessecmaxlengthset=False, readbytessecmaxlength=0,
writebytessecmaxlengthset=False, writebytessecmaxlength=0, totaliopssecmaxlengthset=False, totaliopssecmaxlength=0,
readiopssecmaxlengthset=False, readiopssecmaxlength=0, writeiopssecmaxlengthset=False, writeiopssecmaxlength=0, sizeiopssecset=False,
sizeiopssec=0, groupnameset=False, groupname=''):
"""
Remove a nic from a machine
:param uuid: uuid of the kvm container (same as the used in create)
:param media: the media to limit the diskio
:return:
"""
args = {
'uuid': uuid,
'media': media,
'totalbytessecset': totalbytessecset,
'totalbytessec': totalbytessec,
'readbytessecset': readbytessecset,
'readbytessec': readbytessec,
'writebytessecset': writebytessecset,
'writebytessec': writebytessec,
'totaliopssecset': totaliopssecset,
'totaliopssec': totaliopssec,
'readiopssecset': readiopssecset,
'readiopssec': readiopssec,
'writeiopssecset': writeiopssecset,
'writeiopssec': writeiopssec,
'totalbytessecmaxset': totalbytessecmaxset,
'totalbytessecmax': totalbytessecmax,
'readbytessecmaxset': readbytessecmaxset,
'readbytessecmax': readbytessecmax,
'writebytessecmaxset': writebytessecmaxset,
'writebytessecmax': writebytessecmax,
'totaliopssecmaxset': totaliopssecmaxset,
'totaliopssecmax': totaliopssecmax,
'readiopssecmaxset': readiopssecmaxset,
'readiopssecmax': readiopssecmax,
'writeiopssecmaxset': writeiopssecmaxset,
'writeiopssecmax': writeiopssecmax,
'totalbytessecmaxlengthset': totalbytessecmaxlengthset,
'totalbytessecmaxlength': totalbytessecmaxlength,
'readbytessecmaxlengthset': readbytessecmaxlengthset,
'readbytessecmaxlength': readbytessecmaxlength,
'writebytessecmaxlengthset': writebytessecmaxlengthset,
'writebytessecmaxlength': writebytessecmaxlength,
'totaliopssecmaxlengthset': totaliopssecmaxlengthset,
'totaliopssecmaxlength': totaliopssecmaxlength,
'readiopssecmaxlengthset': readiopssecmaxlengthset,
'readiopssecmaxlength': readiopssecmaxlength,
'writeiopssecmaxlengthset': writeiopssecmaxlengthset,
'writeiopssecmaxlength': writeiopssecmaxlength,
'sizeiopssecset': sizeiopssecset,
'sizeiopssec': sizeiopssec,
'groupnameset': groupnameset,
'groupname': groupname,
}
self._limit_disk_io_action_chk.check(args)
self._client.sync('kvm.limit_disk_io', args) | Remove a nic from a machine
:param uuid: uuid of the kvm container (same as the used in create)
:param media: the media to limit the diskio
:return: |
def get_occurrence(event_id, occurrence_id=None, year=None, month=None,
day=None, hour=None, minute=None, second=None,
tzinfo=None):
"""
Because occurrences don't have to be persisted, there must be two ways to
retrieve them. both need an event, but if its persisted the occurrence can
be retrieved with an id. If it is not persisted it takes a date to
retrieve it. This function returns an event and occurrence regardless of
which method is used.
"""
if(occurrence_id):
occurrence = get_object_or_404(Occurrence, id=occurrence_id)
event = occurrence.event
elif None not in (year, month, day, hour, minute, second):
event = get_object_or_404(Event, id=event_id)
date = timezone.make_aware(datetime.datetime(int(year), int(month),
int(day), int(hour), int(minute),
int(second)), tzinfo)
occurrence = event.get_occurrence(date)
if occurrence is None:
raise Http404
else:
raise Http404
return event, occurrence | Because occurrences don't have to be persisted, there must be two ways to
retrieve them. both need an event, but if its persisted the occurrence can
be retrieved with an id. If it is not persisted it takes a date to
retrieve it. This function returns an event and occurrence regardless of
which method is used. |
def get_item_list(self, item_list_url):
""" Retrieve an item list from the server as an ItemList object
:type item_list_url: String or ItemList
:param item_list_url: URL of the item list to retrieve, or an
ItemList object
:rtype: ItemList
:returns: The ItemList
:raises: APIError if the request was not successful
"""
resp = self.api_request(str(item_list_url))
return ItemList(resp['items'], self, str(item_list_url), resp['name']) | Retrieve an item list from the server as an ItemList object
:type item_list_url: String or ItemList
:param item_list_url: URL of the item list to retrieve, or an
ItemList object
:rtype: ItemList
:returns: The ItemList
:raises: APIError if the request was not successful |
def fast_deepcopy(obj):
"""This is a faster implementation of deepcopy via pickle.
It is meant primarily for sets of Statements with complex hierarchies
but can be used for any object.
"""
with BytesIO() as buf:
pickle.dump(obj, buf)
buf.seek(0)
obj_new = pickle.load(buf)
return obj_new | This is a faster implementation of deepcopy via pickle.
It is meant primarily for sets of Statements with complex hierarchies
but can be used for any object. |
def set_lock_code(ctx, lock_code, new_lock_code, clear, generate, force):
"""
Set or change the configuration lock code.
A lock code may be used to protect the application configuration.
The lock code must be a 32 characters (16 bytes) hex value.
"""
dev = ctx.obj['dev']
def prompt_new_lock_code():
return prompt_lock_code(prompt='Enter your new lock code')
def prompt_current_lock_code():
return prompt_lock_code(prompt='Enter your current lock code')
def change_lock_code(lock_code, new_lock_code):
lock_code = _parse_lock_code(ctx, lock_code)
new_lock_code = _parse_lock_code(ctx, new_lock_code)
try:
dev.write_config(
device_config(
config_lock=new_lock_code),
reboot=True,
lock_key=lock_code)
except Exception as e:
logger.error('Changing the lock code failed', exc_info=e)
ctx.fail('Failed to change the lock code. Wrong current code?')
def set_lock_code(new_lock_code):
new_lock_code = _parse_lock_code(ctx, new_lock_code)
try:
dev.write_config(
device_config(
config_lock=new_lock_code),
reboot=True)
except Exception as e:
logger.error('Setting the lock code failed', exc_info=e)
ctx.fail('Failed to set the lock code.')
if generate and new_lock_code:
ctx.fail('Invalid options: --new-lock-code conflicts with --generate.')
if clear:
new_lock_code = CLEAR_LOCK_CODE
if generate:
new_lock_code = b2a_hex(os.urandom(16)).decode('utf-8')
click.echo(
'Using a randomly generated lock code: {}'.format(new_lock_code))
force or click.confirm(
'Lock configuration with this lock code?', abort=True, err=True)
if dev.config.configuration_locked:
if lock_code:
if new_lock_code:
change_lock_code(lock_code, new_lock_code)
else:
new_lock_code = prompt_new_lock_code()
change_lock_code(lock_code, new_lock_code)
else:
if new_lock_code:
lock_code = prompt_current_lock_code()
change_lock_code(lock_code, new_lock_code)
else:
lock_code = prompt_current_lock_code()
new_lock_code = prompt_new_lock_code()
change_lock_code(lock_code, new_lock_code)
else:
if lock_code:
ctx.fail(
'There is no current lock code set. '
'Use --new-lock-code to set one.')
else:
if new_lock_code:
set_lock_code(new_lock_code)
else:
new_lock_code = prompt_new_lock_code()
set_lock_code(new_lock_code) | Set or change the configuration lock code.
A lock code may be used to protect the application configuration.
The lock code must be a 32 characters (16 bytes) hex value. |
def apply(self, vpc):
"""
returns a list of new security groups that will be added
"""
assert vpc is not None
# make sure we're up to date
self.reload_remote_groups()
vpc_groups = self.vpc_groups(vpc)
self._apply_groups(vpc)
# reloads groups from AWS, the authority
self.reload_remote_groups()
vpc_groups = self.vpc_groups(vpc)
groups = {k.name:k for k in vpc_groups}
for x,y in self.config.items():
# process 1 security group at a time
group = groups[x]
if y.get('rules'):
# apply all rule changes
rules = [Rule.parse(rule) for rule in y.get('rules')]
rules = list(itertools.chain(*rules))
rules = self.filter_existing_rules(rules, group)
# need to use chain because multiple rules can be created for a single stanza
for rule in rules:
group_name = groups.get(rule.group_name, None)
if group_name and rule.address:
raise Exception("Can't auth an address and a group")
logger.debug("Authorizing %s %s %s to address:%s name:%s", rule.protocol,
rule.from_port, rule.to_port, rule.address, rule.group_name)
group_to_authorize = groups.get(rule.group_name, None)
try:
group.authorize(rule.protocol,
rule.from_port,
rule.to_port,
rule.address,
group_to_authorize, None)
except Exception as e:
print "could not authorize group %s" % group_to_authorize
raise
# apply rules
return self | returns a list of new security groups that will be added |
def setup(self):
"""Setup list widget content."""
if len(self.plugins_tabs) == 0:
self.close()
return
self.list.clear()
current_path = self.current_path
filter_text = self.filter_text
# Get optional line or symbol to define mode and method handler
trying_for_symbol = ('@' in self.filter_text)
if trying_for_symbol:
self.mode = self.SYMBOL_MODE
self.setup_symbol_list(filter_text, current_path)
else:
self.mode = self.FILE_MODE
self.setup_file_list(filter_text, current_path)
# Set position according to size
self.set_dialog_position() | Setup list widget content. |
def get_asset_from_edit_extension_draft(self, publisher_name, draft_id, asset_type, extension_name, **kwargs):
"""GetAssetFromEditExtensionDraft.
[Preview API]
:param str publisher_name:
:param str draft_id:
:param str asset_type:
:param str extension_name:
:rtype: object
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if draft_id is not None:
route_values['draftId'] = self._serialize.url('draft_id', draft_id, 'str')
if asset_type is not None:
route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str')
query_parameters = {}
if extension_name is not None:
query_parameters['extensionName'] = self._serialize.query('extension_name', extension_name, 'str')
response = self._send(http_method='GET',
location_id='88c0b1c8-b4f1-498a-9b2a-8446ef9f32e7',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback) | GetAssetFromEditExtensionDraft.
[Preview API]
:param str publisher_name:
:param str draft_id:
:param str asset_type:
:param str extension_name:
:rtype: object |
def bitop_xor(self, dest, key, *keys):
"""Perform bitwise XOR operations between strings."""
return self.execute(b'BITOP', b'XOR', dest, key, *keys) | Perform bitwise XOR operations between strings. |
def persistent_object_context_changed(self):
""" Override from PersistentObject. """
super().persistent_object_context_changed()
def change_registration(registered_object, unregistered_object):
if registered_object and registered_object.uuid == self.parent_uuid:
self.__parent = registered_object
if self.persistent_object_context:
self.__registration_listener = self.persistent_object_context.registration_event.listen(change_registration)
self.__parent = self.persistent_object_context.get_registered_object(self.parent_uuid) | Override from PersistentObject. |
def best_training_job(self):
"""Return name of the best training job for the latest hyperparameter tuning job.
Raises:
Exception: If there is no best training job available for the hyperparameter tuning job.
"""
self._ensure_last_tuning_job()
tuning_job_describe_result = \
self.estimator.sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=self.latest_tuning_job.name)
try:
return tuning_job_describe_result['BestTrainingJob']['TrainingJobName']
except KeyError:
raise Exception('Best training job not available for tuning job: {}'.format(self.latest_tuning_job.name)) | Return name of the best training job for the latest hyperparameter tuning job.
Raises:
Exception: If there is no best training job available for the hyperparameter tuning job. |
def set_error_page(self, loadbalancer, html):
"""
A single custom error page may be added per account load balancer
with an HTTP protocol. Page updates will override existing content.
If a custom error page is deleted, or the load balancer is changed
to a non-HTTP protocol, the default error page will be restored.
"""
uri = "/loadbalancers/%s/errorpage" % utils.get_id(loadbalancer)
req_body = {"errorpage": {"content": html}}
resp, body = self.api.method_put(uri, body=req_body)
return body | A single custom error page may be added per account load balancer
with an HTTP protocol. Page updates will override existing content.
If a custom error page is deleted, or the load balancer is changed
to a non-HTTP protocol, the default error page will be restored. |
def structure_recursion(self, struct, folder):
"""
From nested dictionaries representing .SAFE structure it recursively extracts all the files that need to be
downloaded and stores them into class attribute `download_list`.
:param struct: nested dictionaries representing a part of .SAFE structure
:type struct: dict
:param folder: name of folder where this structure will be saved
:type folder: str
"""
has_subfolder = False
for name, substruct in struct.items():
subfolder = os.path.join(folder, name)
if not isinstance(substruct, dict):
product_name, data_name = self._url_to_props(substruct)
if '.' in data_name:
data_type = MimeType(data_name.split('.')[-1])
data_name = data_name.rsplit('.', 1)[0]
else:
data_type = MimeType.RAW
if data_name in self.bands + self.metafiles:
self.download_list.append(DownloadRequest(url=substruct, filename=subfolder, data_type=data_type,
data_name=data_name, product_name=product_name))
else:
has_subfolder = True
self.structure_recursion(substruct, subfolder)
if not has_subfolder:
self.folder_list.append(folder) | From nested dictionaries representing .SAFE structure it recursively extracts all the files that need to be
downloaded and stores them into class attribute `download_list`.
:param struct: nested dictionaries representing a part of .SAFE structure
:type struct: dict
:param folder: name of folder where this structure will be saved
:type folder: str |
def get_droplet(self, droplet_id):
"""
Return a Droplet by its ID.
"""
return Droplet.get_object(api_token=self.token, droplet_id=droplet_id) | Return a Droplet by its ID. |
def get_inspector():
"""Reuse inspector"""
global _INSPECTOR
if _INSPECTOR:
return _INSPECTOR
else:
bind = op.get_bind()
_INSPECTOR = sa.engine.reflection.Inspector.from_engine(bind)
return _INSPECTOR | Reuse inspector |
def write_format_data(self, format_dict):
"""Write the format data dict to the frontend.
This default version of this method simply writes the plain text
representation of the object to ``io.stdout``. Subclasses should
override this method to send the entire `format_dict` to the
frontends.
Parameters
----------
format_dict : dict
The format dict for the object passed to `sys.displayhook`.
"""
# We want to print because we want to always make sure we have a
# newline, even if all the prompt separators are ''. This is the
# standard IPython behavior.
result_repr = format_dict['text/plain']
if '\n' in result_repr:
# So that multi-line strings line up with the left column of
# the screen, instead of having the output prompt mess up
# their first line.
# We use the prompt template instead of the expanded prompt
# because the expansion may add ANSI escapes that will interfere
# with our ability to determine whether or not we should add
# a newline.
prompt_template = self.shell.prompt_manager.out_template
if prompt_template and not prompt_template.endswith('\n'):
# But avoid extraneous empty lines.
result_repr = '\n' + result_repr
print >>io.stdout, result_repr | Write the format data dict to the frontend.
This default version of this method simply writes the plain text
representation of the object to ``io.stdout``. Subclasses should
override this method to send the entire `format_dict` to the
frontends.
Parameters
----------
format_dict : dict
The format dict for the object passed to `sys.displayhook`. |
async def wait_tasks(tasks, flatten=True):
'''Gather a list of asynchronous tasks and wait their completion.
:param list tasks:
A list of *asyncio* tasks wrapped in :func:`asyncio.ensure_future`.
:param bool flatten:
If ``True`` the returned results are flattened into one list if the
tasks return iterable objects. The parameter does nothing if all the
results are not iterable.
:returns:
The results of tasks as a list or as a flattened list
'''
rets = await asyncio.gather(*tasks)
if flatten and all(map(lambda x: hasattr(x, '__iter__'), rets)):
rets = list(itertools.chain(*rets))
return rets | Gather a list of asynchronous tasks and wait their completion.
:param list tasks:
A list of *asyncio* tasks wrapped in :func:`asyncio.ensure_future`.
:param bool flatten:
If ``True`` the returned results are flattened into one list if the
tasks return iterable objects. The parameter does nothing if all the
results are not iterable.
:returns:
The results of tasks as a list or as a flattened list |
def create_database(self, dbname, partitioned=False, **kwargs):
"""
Creates a new database on the remote server with the name provided
and adds the new database object to the client's locally cached
dictionary before returning it to the caller. The method will
optionally throw a CloudantClientException if the database
exists remotely.
:param str dbname: Name used to create the database.
:param bool throw_on_exists: Boolean flag dictating whether or
not to throw a CloudantClientException when attempting to
create a database that already exists.
:param bool partitioned: Create as a partitioned database. Defaults to
``False``.
:returns: The newly created database object
"""
new_db = self._DATABASE_CLASS(self, dbname, partitioned=partitioned)
try:
new_db.create(kwargs.get('throw_on_exists', False))
except CloudantDatabaseException as ex:
if ex.status_code == 412:
raise CloudantClientException(412, dbname)
super(CouchDB, self).__setitem__(dbname, new_db)
return new_db | Creates a new database on the remote server with the name provided
and adds the new database object to the client's locally cached
dictionary before returning it to the caller. The method will
optionally throw a CloudantClientException if the database
exists remotely.
:param str dbname: Name used to create the database.
:param bool throw_on_exists: Boolean flag dictating whether or
not to throw a CloudantClientException when attempting to
create a database that already exists.
:param bool partitioned: Create as a partitioned database. Defaults to
``False``.
:returns: The newly created database object |
def _cmp_models(self, m1, m2):
"""Compare two models from different swagger APIs and tell if they are
equal (return 0), or not (return != 0)"""
# Don't alter m1/m2 by mistake
m1 = copy.deepcopy(m1)
m2 = copy.deepcopy(m2)
# Remove keys added by bravado-core
def _cleanup(d):
"""Remove all keys in the blacklist"""
for k in ('x-model', 'x-persist', 'x-scope'):
if k in d:
del d[k]
for v in list(d.values()):
if isinstance(v, dict):
_cleanup(v)
_cleanup(m1)
_cleanup(m2)
# log.debug("model1:\n" + pprint.pformat(m1))
# log.debug("model2:\n" + pprint.pformat(m2))
return not m1 == m2 | Compare two models from different swagger APIs and tell if they are
equal (return 0), or not (return != 0) |
def get_canonical_and_alternates_urls(
url,
drop_ln=True,
washed_argd=None,
quote_path=False):
"""
Given an Invenio URL returns a tuple with two elements. The first is the
canonical URL, that is the original URL with CFG_SITE_URL prefix, and
where the ln= argument stripped. The second element element is mapping,
language code -> alternate URL
@param quote_path: if True, the path section of the given C{url}
is quoted according to RFC 2396
"""
dummy_scheme, dummy_netloc, path, dummy_params, query, fragment = urlparse(
url)
canonical_scheme, canonical_netloc = urlparse(cfg.get('CFG_SITE_URL'))[0:2]
parsed_query = washed_argd or parse_qsl(query)
no_ln_parsed_query = [(key, value)
for (key, value) in parsed_query if key != 'ln']
if drop_ln:
canonical_parsed_query = no_ln_parsed_query
else:
canonical_parsed_query = parsed_query
if quote_path:
path = urllib.quote(path)
canonical_query = urlencode(canonical_parsed_query)
canonical_url = urlunparse(
(canonical_scheme,
canonical_netloc,
path,
dummy_params,
canonical_query,
fragment))
alternate_urls = {}
for ln in cfg.get('CFG_SITE_LANGS'):
alternate_query = urlencode(no_ln_parsed_query + [('ln', ln)])
alternate_url = urlunparse(
(canonical_scheme,
canonical_netloc,
path,
dummy_params,
alternate_query,
fragment))
alternate_urls[ln] = alternate_url
return canonical_url, alternate_urls | Given an Invenio URL returns a tuple with two elements. The first is the
canonical URL, that is the original URL with CFG_SITE_URL prefix, and
where the ln= argument stripped. The second element element is mapping,
language code -> alternate URL
@param quote_path: if True, the path section of the given C{url}
is quoted according to RFC 2396 |
def call_parallel(self, cdata, low):
'''
Call the state defined in the given cdata in parallel
'''
# There are a number of possibilities to not have the cdata
# populated with what we might have expected, so just be smart
# enough to not raise another KeyError as the name is easily
# guessable and fallback in all cases to present the real
# exception to the user
name = (cdata.get('args') or [None])[0] or cdata['kwargs'].get('name')
if not name:
name = low.get('name', low.get('__id__'))
proc = salt.utils.process.MultiprocessingProcess(
target=self._call_parallel_target,
args=(name, cdata, low))
proc.start()
ret = {'name': name,
'result': None,
'changes': {},
'comment': 'Started in a separate process',
'proc': proc}
return ret | Call the state defined in the given cdata in parallel |
def start(parallel, items, config, dirs=None, name=None, multiplier=1,
max_multicore=None):
"""Start a parallel cluster or machines to be used for running remote
functions.
Returns a function used to process, in parallel items with a given function.
Allows sharing of a single cluster across multiple functions with
identical resource requirements. Uses local execution for non-distributed
clusters or completed jobs.
A checkpoint directory keeps track of finished tasks, avoiding spinning up
clusters for sections that have been previous processed.
multiplier - Number of expected jobs per initial input item. Used to avoid
underscheduling cores when an item is split during processing.
max_multicore -- The maximum number of cores to use for each process. Can be
used to process less multicore usage when jobs run faster on more single
cores.
"""
if name:
checkpoint_dir = utils.safe_makedir(os.path.join(dirs["work"],
"checkpoints_parallel"))
checkpoint_file = os.path.join(checkpoint_dir, "%s.done" % name)
else:
checkpoint_file = None
sysinfo = system.get_info(dirs, parallel, config.get("resources", {}))
items = [x for x in items if x is not None] if items else []
max_multicore = int(max_multicore or sysinfo.get("cores", 1))
parallel = resources.calculate(parallel, items, sysinfo, config,
multiplier=multiplier,
max_multicore=max_multicore)
try:
view = None
if parallel["type"] == "ipython":
if checkpoint_file and os.path.exists(checkpoint_file):
logger.info("Running locally instead of distributed -- checkpoint passed: %s" % name)
parallel["cores_per_job"] = 1
parallel["num_jobs"] = 1
parallel["checkpointed"] = True
yield multi.runner(parallel, config)
else:
from bcbio.distributed import ipython
with ipython.create(parallel, dirs, config) as view:
yield ipython.runner(view, parallel, dirs, config)
else:
yield multi.runner(parallel, config)
except:
if view is not None:
from bcbio.distributed import ipython
ipython.stop(view)
raise
else:
for x in ["cores_per_job", "num_jobs", "mem"]:
parallel.pop(x, None)
if checkpoint_file:
with open(checkpoint_file, "w") as out_handle:
out_handle.write("done\n") | Start a parallel cluster or machines to be used for running remote
functions.
Returns a function used to process, in parallel items with a given function.
Allows sharing of a single cluster across multiple functions with
identical resource requirements. Uses local execution for non-distributed
clusters or completed jobs.
A checkpoint directory keeps track of finished tasks, avoiding spinning up
clusters for sections that have been previous processed.
multiplier - Number of expected jobs per initial input item. Used to avoid
underscheduling cores when an item is split during processing.
max_multicore -- The maximum number of cores to use for each process. Can be
used to process less multicore usage when jobs run faster on more single
cores. |
def get_serializer(self, *args, **kwargs):
"""
Return the serializer instance that should be used for validating and
deserializing input, and for serializing output.
"""
serializer_class = self.get_serializer_class()
kwargs['context'] = self.get_serializer_context()
return serializer_class(*args, **kwargs) | Return the serializer instance that should be used for validating and
deserializing input, and for serializing output. |
def list_icmp_block(zone, permanent=True):
'''
List ICMP blocks on a zone
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' firewlld.list_icmp_block zone
'''
cmd = '--zone={0} --list-icmp-blocks'.format(zone)
if permanent:
cmd += ' --permanent'
return __firewall_cmd(cmd).split() | List ICMP blocks on a zone
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' firewlld.list_icmp_block zone |
def ttl(self, response):
"""Returns time to live in seconds. 0 means no caching.
Criteria:
- response code 200
- read-only method (GET, HEAD, OPTIONS)
Plus http headers:
- cache-control: option1, option2, ...
where options are:
private | public
no-cache
no-store
max-age: seconds
s-maxage: seconds
must-revalidate
proxy-revalidate
- expires: Thu, 01 Dec 1983 20:00:00 GMT
- pragma: no-cache (=cache-control: no-cache)
See http://www.mobify.com/blog/beginners-guide-to-http-cache-headers/
TODO: tests
"""
if response.code != 200: return 0
if not self.request.method in ['GET', 'HEAD', 'OPTIONS']: return 0
try:
pragma = self.request.headers['pragma']
if pragma == 'no-cache':
return 0
except KeyError:
pass
try:
cache_control = self.request.headers['cache-control']
# no caching options
for option in ['private', 'no-cache', 'no-store', 'must-revalidate', 'proxy-revalidate']:
if cache_control.find(option): return 0
# further parsing to get a ttl
options = parse_cache_control(cache_control)
try:
return int(options['s-maxage'])
except KeyError:
pass
try:
return int(options['max-age'])
except KeyError:
pass
if 's-maxage' in options:
max_age = options['s-maxage']
if max_age < ttl: ttl = max_age
if 'max-age' in options:
max_age = options['max-age']
if max_age < ttl: ttl = max_age
return ttl
except KeyError:
pass
try:
expires = self.request.headers['expires']
return time.mktime(time.strptime(expires, '%a, %d %b %Y %H:%M:%S')) - time.time()
except KeyError:
pass | Returns time to live in seconds. 0 means no caching.
Criteria:
- response code 200
- read-only method (GET, HEAD, OPTIONS)
Plus http headers:
- cache-control: option1, option2, ...
where options are:
private | public
no-cache
no-store
max-age: seconds
s-maxage: seconds
must-revalidate
proxy-revalidate
- expires: Thu, 01 Dec 1983 20:00:00 GMT
- pragma: no-cache (=cache-control: no-cache)
See http://www.mobify.com/blog/beginners-guide-to-http-cache-headers/
TODO: tests |
def _send_event(self, event):
"""! @brief Process event objects and decide when to send to event sink.
This method handles the logic to associate a timestamp event with the prior other
event. A list of pending events is built up until either a timestamp or overflow event
is generated, at which point all pending events are flushed to the event sink. If a
timestamp is seen, the timestamp of all pending events is set prior to flushing.
"""
flush = False
# Handle merging data trace events.
if self._merge_data_trace_events(event):
return
if isinstance(event, events.TraceTimestamp):
for ev in self._pending_events:
ev.timestamp = event.timestamp
flush = True
else:
self._pending_events.append(event)
if isinstance(event, events.TraceOverflow):
flush = True
if flush:
self._flush_events() | ! @brief Process event objects and decide when to send to event sink.
This method handles the logic to associate a timestamp event with the prior other
event. A list of pending events is built up until either a timestamp or overflow event
is generated, at which point all pending events are flushed to the event sink. If a
timestamp is seen, the timestamp of all pending events is set prior to flushing. |
def headerize(provenances):
"""Create a header for each keyword.
:param provenances: The keywords.
:type provenances: dict
:return: New keywords with header for every keyword.
:rtype: dict
"""
special_case = {
'Inasafe': 'InaSAFE',
'Qgis': 'QGIS',
'Pyqt': 'PyQt',
'Os': 'OS',
'Gdal': 'GDAL',
'Maps': 'Map'
}
for key, value in list(provenances.items()):
if '_' in key:
header = key.replace('_', ' ').title()
else:
header = key.title()
header_list = header.split(' ')
proper_word = None
proper_word_index = None
for index, word in enumerate(header_list):
if word in list(special_case.keys()):
proper_word = special_case[word]
proper_word_index = index
if proper_word:
header_list[proper_word_index] = proper_word
header = ' '.join(header_list)
provenances.update(
{
key: {
'header': '{header} '.format(header=header),
'content': value
}
})
return provenances | Create a header for each keyword.
:param provenances: The keywords.
:type provenances: dict
:return: New keywords with header for every keyword.
:rtype: dict |
def dataset_search(self, dataset_returning_query):
"""
Run a dataset query against Citrination.
:param dataset_returning_query: :class:`DatasetReturningQuery` to execute.
:type dataset_returning_query: :class:`DatasetReturningQuery`
:return: Dataset search result object with the results of the query.
:rtype: :class:`DatasetSearchResult`
"""
self._validate_search_query(dataset_returning_query)
return self._execute_search_query(
dataset_returning_query,
DatasetSearchResult
) | Run a dataset query against Citrination.
:param dataset_returning_query: :class:`DatasetReturningQuery` to execute.
:type dataset_returning_query: :class:`DatasetReturningQuery`
:return: Dataset search result object with the results of the query.
:rtype: :class:`DatasetSearchResult` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.