code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def offsets_for_max_size( max_size ):
"""
Return the subset of offsets needed to contain intervals over (0,max_size)
"""
for i, max in enumerate( reversed( BIN_OFFSETS_MAX ) ):
if max_size < max:
break
else:
raise Exception( "%d is larger than the maximum possible size (%d)" % ( max_size, BIN_OFFSETS_MAX[0] ) )
return BIN_OFFSETS[ ( len(BIN_OFFSETS) - i - 1 ) : ] | Return the subset of offsets needed to contain intervals over (0,max_size) |
def apply_filters(target, lines):
"""
Applys filters to the lines of a datasource. This function is used only in
integration tests. Filters are applied in an equivalent but more performant
way at run time.
"""
filters = get_filters(target)
if filters:
for l in lines:
if any(f in l for f in filters):
yield l
else:
for l in lines:
yield l | Applys filters to the lines of a datasource. This function is used only in
integration tests. Filters are applied in an equivalent but more performant
way at run time. |
def _set_error_disable_timeout(self, v, load=False):
"""
Setter method for error_disable_timeout, mapped from YANG variable /protocol/spanning_tree/rpvst/error_disable_timeout (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_error_disable_timeout is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_error_disable_timeout() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=error_disable_timeout.error_disable_timeout, is_container='container', presence=False, yang_name="error-disable-timeout", rest_name="error-disable-timeout", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set Error-disable-timeout for the spanning tree', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """error_disable_timeout must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=error_disable_timeout.error_disable_timeout, is_container='container', presence=False, yang_name="error-disable-timeout", rest_name="error-disable-timeout", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set Error-disable-timeout for the spanning tree', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)""",
})
self.__error_disable_timeout = t
if hasattr(self, '_set'):
self._set() | Setter method for error_disable_timeout, mapped from YANG variable /protocol/spanning_tree/rpvst/error_disable_timeout (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_error_disable_timeout is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_error_disable_timeout() directly. |
def start_listener_thread(self, timeout_ms=30000, exception_handler=None):
""" Start a listener thread to listen for events in the background.
Args:
timeout (int): How long to poll the Home Server for before
retrying.
exception_handler (func(exception)): Optional exception handler
function which can be used to handle exceptions in the caller
thread.
"""
try:
thread = Thread(target=self.listen_forever,
args=(timeout_ms, exception_handler))
thread.daemon = True
self.sync_thread = thread
self.should_listen = True
thread.start()
except RuntimeError:
e = sys.exc_info()[0]
logger.error("Error: unable to start thread. %s", str(e)) | Start a listener thread to listen for events in the background.
Args:
timeout (int): How long to poll the Home Server for before
retrying.
exception_handler (func(exception)): Optional exception handler
function which can be used to handle exceptions in the caller
thread. |
def _headers(self, name, is_file=False):
""" Returns the header of the encoding of this parameter.
Args:
name (str): Field name
Kwargs:
is_file (bool): If true, this is a file field
Returns:
array. Headers
"""
value = self._files[name] if is_file else self._data[name]
_boundary = self.boundary.encode("utf-8") if isinstance(self.boundary, unicode) else urllib.quote_plus(self.boundary)
headers = ["--%s" % _boundary]
if is_file:
disposition = 'form-data; name="%s"; filename="%s"' % (name, os.path.basename(value))
else:
disposition = 'form-data; name="%s"' % name
headers.append("Content-Disposition: %s" % disposition)
if is_file:
file_type = self._file_type(name)
else:
file_type = "text/plain; charset=utf-8"
headers.append("Content-Type: %s" % file_type)
if is_file:
headers.append("Content-Length: %i" % self._file_size(name))
else:
headers.append("Content-Length: %i" % len(value))
headers.append("")
headers.append("")
return "\r\n".join(headers) | Returns the header of the encoding of this parameter.
Args:
name (str): Field name
Kwargs:
is_file (bool): If true, this is a file field
Returns:
array. Headers |
async def executor(func, *args, **kwargs):
'''
Execute a function in an executor thread.
Args:
todo ((func,args,kwargs)): A todo tuple.
'''
def syncfunc():
return func(*args, **kwargs)
loop = asyncio.get_running_loop()
return await loop.run_in_executor(None, syncfunc) | Execute a function in an executor thread.
Args:
todo ((func,args,kwargs)): A todo tuple. |
def listlike(obj):
"""Is an object iterable like a list (and not a string)?"""
return hasattr(obj, "__iter__") \
and not issubclass(type(obj), str)\
and not issubclass(type(obj), unicode) | Is an object iterable like a list (and not a string)? |
def check_lazy_load_straat(f):
'''
Decorator function to lazy load a :class:`Straat`.
'''
def wrapper(*args):
straat = args[0]
if (
straat._namen is None or straat._metadata is None
):
log.debug('Lazy loading Straat %d', straat.id)
straat.check_gateway()
s = straat.gateway.get_straat_by_id(straat.id)
straat._namen = s._namen
straat._metadata = s._metadata
return f(*args)
return wrapper | Decorator function to lazy load a :class:`Straat`. |
def alert_statuses(self, alert_statuses):
"""Sets the alert_statuses of this IntegrationStatus.
A Map from the ids of the alerts contained in this integration to their install status. The install status can take on one of three values, `VISIBLE`, `HIDDEN`, and `NOT_LOADED` # noqa: E501
:param alert_statuses: The alert_statuses of this IntegrationStatus. # noqa: E501
:type: dict(str, str)
"""
if alert_statuses is None:
raise ValueError("Invalid value for `alert_statuses`, must not be `None`") # noqa: E501
allowed_values = ["VISIBLE", "HIDDEN", "NOT_LOADED"] # noqa: E501
if not set(alert_statuses.keys()).issubset(set(allowed_values)):
raise ValueError(
"Invalid keys in `alert_statuses` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(alert_statuses.keys()) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._alert_statuses = alert_statuses | Sets the alert_statuses of this IntegrationStatus.
A Map from the ids of the alerts contained in this integration to their install status. The install status can take on one of three values, `VISIBLE`, `HIDDEN`, and `NOT_LOADED` # noqa: E501
:param alert_statuses: The alert_statuses of this IntegrationStatus. # noqa: E501
:type: dict(str, str) |
def get_cursor_position(self):
"""Returns the terminal (row, column) of the cursor
0-indexed, like blessings cursor positions"""
# TODO would this be cleaner as a parameter?
in_stream = self.in_stream
query_cursor_position = u"\x1b[6n"
self.write(query_cursor_position)
def retrying_read():
while True:
try:
c = in_stream.read(1)
if c == '':
raise ValueError("Stream should be blocking - should't"
" return ''. Returned %r so far",
(resp,))
return c
except IOError:
raise ValueError(
'cursor get pos response read interrupted'
)
# find out if this ever really happens - if so, continue
resp = ''
while True:
c = retrying_read()
resp += c
m = re.search('(?P<extra>.*)'
'(?P<CSI>\x1b\[|\x9b)'
'(?P<row>\\d+);(?P<column>\\d+)R', resp, re.DOTALL)
if m:
row = int(m.groupdict()['row'])
col = int(m.groupdict()['column'])
extra = m.groupdict()['extra']
if extra:
if self.extra_bytes_callback:
self.extra_bytes_callback(
extra.encode(in_stream.encoding)
)
else:
raise ValueError(("Bytes preceding cursor position "
"query response thrown out:\n%r\n"
"Pass an extra_bytes_callback to "
"CursorAwareWindow to prevent this")
% (extra,))
return (row - 1, col - 1) | Returns the terminal (row, column) of the cursor
0-indexed, like blessings cursor positions |
def pos_tag(self):
"""
Apply Part-of-Speech (POS) tagging on each token.
Uses the default NLTK tagger if no language-specific tagger could be loaded (English is assumed then as
language). The default NLTK tagger uses Penn Treebank tagset
(https://ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html).
The default German tagger based on TIGER corpus uses the STTS tagset
(http://www.ims.uni-stuttgart.de/forschung/ressourcen/lexika/TagSets/stts-table.html).
"""
self._require_tokens()
self._require_no_ngrams_as_tokens()
self._invalidate_workers_tokens()
logger.info('POS tagging tokens')
self._send_task_to_workers('pos_tag')
self.pos_tagged = True
return self | Apply Part-of-Speech (POS) tagging on each token.
Uses the default NLTK tagger if no language-specific tagger could be loaded (English is assumed then as
language). The default NLTK tagger uses Penn Treebank tagset
(https://ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html).
The default German tagger based on TIGER corpus uses the STTS tagset
(http://www.ims.uni-stuttgart.de/forschung/ressourcen/lexika/TagSets/stts-table.html). |
def format_installed_dap_list(simple=False):
'''Formats all installed DAPs in a human readable form to list of lines'''
lines = []
if simple:
for pkg in sorted(get_installed_daps()):
lines.append(pkg)
else:
for pkg, instances in sorted(get_installed_daps_detailed().items()):
versions = []
for instance in instances:
location = utils.unexpanduser(instance['location'])
version = instance['version']
if not versions: # if this is the first
version = utils.bold(version)
versions.append('{v}:{p}'.format(v=version, p=location))
pkg = utils.bold(pkg)
lines.append('{pkg} ({versions})'.format(pkg=pkg, versions=' '.join(versions)))
return lines | Formats all installed DAPs in a human readable form to list of lines |
def get_series_episode(series_id, season, episode):
"""Get an episode of a series.
:param int series_id: id of the series.
:param int season: season number of the episode.
:param int episode: episode number of the episode.
:return: the episode data.
:rtype: dict
"""
result = tvdb_client.query_series_episodes(series_id, aired_season=season, aired_episode=episode)
if result:
return tvdb_client.get_episode(result['data'][0]['id']) | Get an episode of a series.
:param int series_id: id of the series.
:param int season: season number of the episode.
:param int episode: episode number of the episode.
:return: the episode data.
:rtype: dict |
def close(self):
'''Terminate or kill the subprocess.
This function is blocking.
'''
if not self._process:
return
if self._process.returncode is not None:
return
_logger.debug('Terminate process.')
try:
self._process.terminate()
except OSError as error:
if error.errno != errno.ESRCH:
raise
for dummy in range(10):
if self._process.returncode is not None:
return
time.sleep(0.05)
_logger.debug('Failed to terminate. Killing.')
try:
self._process.kill()
except OSError as error:
if error.errno != errno.ESRCH:
raise | Terminate or kill the subprocess.
This function is blocking. |
def _decimal_to_xsd_format(value):
"""
Converts a decimal.Decimal value to its XSD decimal type value.
Result is a string containing the XSD decimal type's lexical value
representation. The conversion is done without any precision loss.
Note that Python's native decimal.Decimal string representation will
not do here as the lexical representation desired here does not allow
representing decimal values using float-like `<mantissa>E<exponent>'
format, e.g. 12E+30 or 0.10006E-12.
"""
value = XDecimal._decimal_canonical(value)
negative, digits, exponent = value.as_tuple()
# The following implementation assumes the following tuple decimal
# encoding (part of the canonical decimal value encoding):
# - digits must contain at least one element
# - no leading integral 0 digits except a single one in 0 (if a non-0
# decimal value has leading integral 0 digits they must be encoded
# in its 'exponent' value and not included explicitly in its
# 'digits' tuple)
assert digits
assert digits[0] != 0 or len(digits) == 1
result = []
if negative:
result.append("-")
# No fractional digits.
if exponent >= 0:
result.extend(str(x) for x in digits)
result.extend("0" * exponent)
return "".join(result)
digit_count = len(digits)
# Decimal point offset from the given digit start.
point_offset = digit_count + exponent
# Trim trailing fractional 0 digits.
fractional_digit_count = min(digit_count, -exponent)
while fractional_digit_count and digits[digit_count - 1] == 0:
digit_count -= 1
fractional_digit_count -= 1
# No trailing fractional 0 digits and a decimal point coming not after
# the given digits, meaning there is no need to add additional trailing
# integral 0 digits.
if point_offset <= 0:
# No integral digits.
result.append("0")
if digit_count > 0:
result.append(".")
result.append("0" * -point_offset)
result.extend(str(x) for x in digits[:digit_count])
else:
# Have integral and possibly some fractional digits.
result.extend(str(x) for x in digits[:point_offset])
if point_offset < digit_count:
result.append(".")
result.extend(str(x) for x in digits[point_offset:digit_count])
return "".join(result) | Converts a decimal.Decimal value to its XSD decimal type value.
Result is a string containing the XSD decimal type's lexical value
representation. The conversion is done without any precision loss.
Note that Python's native decimal.Decimal string representation will
not do here as the lexical representation desired here does not allow
representing decimal values using float-like `<mantissa>E<exponent>'
format, e.g. 12E+30 or 0.10006E-12. |
def get_policies(self):
"""Returns all the Policies under the Identity namespace.
Returns:
(list): A list containing all the Policies under the Identity
namespace.
"""
prefix = _IDENTITY_NS + _POLICY_NS
policylist_list = [
_create_from_bytes(d, identity_pb2.PolicyList)
for _, d in self._state_view.leaves(prefix=prefix)
]
policies = []
for policy_list in policylist_list:
for policy in policy_list.policies:
policies.append(policy)
return sorted(policies, key=lambda p: p.name) | Returns all the Policies under the Identity namespace.
Returns:
(list): A list containing all the Policies under the Identity
namespace. |
def parse(self,fileName,offset):
'''Parses synset from file <fileName>
from offset <offset>
'''
p = Parser()
p.file = open(fileName, 'rb')
a = p.parse_synset(offset=offset)
p.file.close()
self.__dict__.update(a.__dict__) | Parses synset from file <fileName>
from offset <offset> |
def isorbit_record(self):
"""`True` if `targetname` appears to be a comet orbit record number.
NAIF record numbers are 6 digits, begin with a '9' and can
change at any time.
"""
import re
test = re.match('^9[0-9]{5}$', self.targetname.strip()) is not None
return test | `True` if `targetname` appears to be a comet orbit record number.
NAIF record numbers are 6 digits, begin with a '9' and can
change at any time. |
def match_and(self, tokens, item):
"""Matches and."""
for match in tokens:
self.match(match, item) | Matches and. |
def replace_uri(rdf, fromuri, touri):
"""Replace all occurrences of fromuri with touri in the given model.
If touri is a list or tuple of URIRef, all values will be inserted.
If touri=None, will delete all occurrences of fromuri instead.
"""
replace_subject(rdf, fromuri, touri)
replace_predicate(rdf, fromuri, touri)
replace_object(rdf, fromuri, touri) | Replace all occurrences of fromuri with touri in the given model.
If touri is a list or tuple of URIRef, all values will be inserted.
If touri=None, will delete all occurrences of fromuri instead. |
def read_validate_params(self, request):
"""
Checks if all incoming parameters meet the expected values.
"""
self.client = self.client_authenticator.by_identifier_secret(request)
self.password = request.post_param("password")
self.username = request.post_param("username")
self.scope_handler.parse(request=request, source="body")
return True | Checks if all incoming parameters meet the expected values. |
def connect_ssl(cls, user, password, endpoints,
ca_certs=None, validate=None):
"""
Creates an SSL transport to the first endpoint (aserver) to which
we successfully connect
"""
if isinstance(endpoints, basestring):
endpoints = [endpoints]
transport = SingleEndpointTransport(
SocketTransport.connect_ssl, endpoints, ca_certs=ca_certs,
validate=validate)
return cls(transport, user, password) | Creates an SSL transport to the first endpoint (aserver) to which
we successfully connect |
def addcols(X, cols, names=None):
"""
Add one or more columns to a numpy ndarray.
Technical dependency of :func:`tabular.spreadsheet.aggregate_in`.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.addcols`.
**Parameters**
**X** : numpy ndarray with structured dtype or recarray
The recarray to add columns to.
**cols** : numpy ndarray, or list of arrays of columns
Column(s) to add.
**names**: list of strings, optional
Names of the new columns. Only applicable when `cols` is a
list of arrays.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy array made up of `X` plus the new columns.
**See also:** :func:`tabular.spreadsheet.colstack`
"""
if isinstance(names,str):
names = [n.strip() for n in names.split(',')]
if isinstance(cols, list):
if any([isinstance(x,np.ndarray) or isinstance(x,list) or \
isinstance(x,tuple) for x in cols]):
assert all([len(x) == len(X) for x in cols]), \
'Trying to add columns of wrong length.'
assert names != None and len(cols) == len(names), \
'Number of columns to add must equal number of new names.'
cols = utils.fromarrays(cols,type=np.ndarray,names = names)
else:
assert len(cols) == len(X), 'Trying to add column of wrong length.'
cols = utils.fromarrays([cols], type=np.ndarray,names=names)
else:
assert isinstance(cols, np.ndarray)
if cols.dtype.names == None:
cols = utils.fromarrays([cols],type=np.ndarray, names=names)
Replacements = [a for a in cols.dtype.names if a in X.dtype.names]
if len(Replacements) > 0:
print('Replacing columns',
[a for a in cols.dtype.names if a in X.dtype.names])
return utils.fromarrays(
[X[a] if a not in cols.dtype.names else cols[a] for a in X.dtype.names] +
[cols[a] for a in cols.dtype.names if a not in X.dtype.names],
type=np.ndarray,
names=list(X.dtype.names) + [a for a in cols.dtype.names
if a not in X.dtype.names]) | Add one or more columns to a numpy ndarray.
Technical dependency of :func:`tabular.spreadsheet.aggregate_in`.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.addcols`.
**Parameters**
**X** : numpy ndarray with structured dtype or recarray
The recarray to add columns to.
**cols** : numpy ndarray, or list of arrays of columns
Column(s) to add.
**names**: list of strings, optional
Names of the new columns. Only applicable when `cols` is a
list of arrays.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy array made up of `X` plus the new columns.
**See also:** :func:`tabular.spreadsheet.colstack` |
def power(self):
""":return: A power object modeled as a named tuple"""
power = self._state['powerUsage']
return PowerUsage(power.get('avgDayValue'),
power.get('avgValue'),
power.get('dayCost'),
power.get('dayUsage'),
power.get('isSmart'),
power.get('meterReading'),
power.get('value'),
power.get('meterReadingLow'),
power.get('dayLowUsage')) | :return: A power object modeled as a named tuple |
def build_backend(self, conn_string):
"""
Given a DSN, returns an instantiated backend class.
Ex::
backend = gator.build_backend('locmem://')
# ...or...
backend = gator.build_backend('redis://127.0.0.1:6379/0')
:param conn_string: A DSN for connecting to the queue. Passed along
to the backend.
:type conn_string: string
:returns: A backend ``Client`` instance
"""
backend_name, _ = conn_string.split(':', 1)
backend_path = 'alligator.backends.{}_backend'.format(backend_name)
client_class = import_attr(backend_path, 'Client')
return client_class(conn_string) | Given a DSN, returns an instantiated backend class.
Ex::
backend = gator.build_backend('locmem://')
# ...or...
backend = gator.build_backend('redis://127.0.0.1:6379/0')
:param conn_string: A DSN for connecting to the queue. Passed along
to the backend.
:type conn_string: string
:returns: A backend ``Client`` instance |
def update_enterprise_courses(self, enterprise_customer, course_container_key='results', **kwargs):
"""
This method adds enterprise-specific metadata for each course.
We are adding following field in all the courses.
tpa_hint: a string for identifying Identity Provider.
enterprise_id: the UUID of the enterprise
**kwargs: any additional data one would like to add on a per-use basis.
Arguments:
enterprise_customer: The customer whose data will be used to fill the enterprise context.
course_container_key: The key used to find the container for courses in the serializer's data dictionary.
"""
enterprise_context = {
'tpa_hint': enterprise_customer and enterprise_customer.identity_provider,
'enterprise_id': enterprise_customer and str(enterprise_customer.uuid),
}
enterprise_context.update(**kwargs)
courses = []
for course in self.data[course_container_key]:
courses.append(
self.update_course(course, enterprise_customer, enterprise_context)
)
self.data[course_container_key] = courses | This method adds enterprise-specific metadata for each course.
We are adding following field in all the courses.
tpa_hint: a string for identifying Identity Provider.
enterprise_id: the UUID of the enterprise
**kwargs: any additional data one would like to add on a per-use basis.
Arguments:
enterprise_customer: The customer whose data will be used to fill the enterprise context.
course_container_key: The key used to find the container for courses in the serializer's data dictionary. |
def add_tab(self, widget):
"""Add tab."""
self.clients.append(widget)
index = self.tabwidget.addTab(widget, widget.get_short_name())
self.tabwidget.setCurrentIndex(index)
self.tabwidget.setTabToolTip(index, widget.get_filename())
if self.dockwidget and not self.ismaximized:
self.dockwidget.setVisible(True)
self.dockwidget.raise_()
self.activateWindow()
widget.notebookwidget.setFocus() | Add tab. |
async def finalize_websocket(
self,
result: ResponseReturnValue,
websocket_context: Optional[WebsocketContext]=None,
from_error_handler: bool=False,
) -> Optional[Response]:
"""Turns the view response return value into a response.
Arguments:
result: The result of the websocket to finalize into a response.
websocket_context: The websocket context, optional as Flask
omits this argument.
"""
if result is not None:
response = await self.make_response(result)
else:
response = None
try:
response = await self.postprocess_websocket(response, websocket_context)
await websocket_finished.send(self, response=response)
except Exception:
if not from_error_handler:
raise
self.logger.exception('Request finalizing errored')
return response | Turns the view response return value into a response.
Arguments:
result: The result of the websocket to finalize into a response.
websocket_context: The websocket context, optional as Flask
omits this argument. |
def do_query(self, line):
"""
query [:tablename] [-r] [--count|-c] [--array|-a] [-{max}] [{rkey-condition}] hkey [attributes,...]
where rkey-condition:
--eq={key} (equal key)
--ne={key} (not equal key)
--le={key} (less or equal than key)
--lt={key} (less than key)
--ge={key} (greater or equal than key)
--gt={key} (greater than key)
--exists (key exists)
--nexists (key does not exists)
--contains={key} (contains key)
--ncontains={key} (does not contains key)
--begin={startkey} (rkey begins with startkey)
--between={firstkey},{lastkey} (between firstkey and lastkey)
"""
table, line = self.get_table_params(line)
args = self.getargs(line)
condition = None
count = False
as_array = False
max_size = None
batch_size = None
start = None
if '-r' in args:
asc = False
args.remove('-r')
else:
asc = True
while args:
arg = args[0]
if arg[0] == '-' and arg[1:].isdigit():
max_size = int(arg[1:])
args.pop(0)
elif args[0].startswith('--max='):
arg = args.pop(0)
max_size = int(arg[6:])
elif arg in ['--count', '-c']:
count = True
args.pop(0)
elif arg in ['--array', '-a']:
as_array = True
args.pop(0)
elif args[0].startswith('--batch='):
arg = args.pop(0)
batch_size = int(arg[8:])
elif args[0].startswith('--start='):
arg = args.pop(0)
start = (arg[8:], )
elif args[0] == '--next':
arg = args.pop(0)
if self.next_key:
start = self.next_key
else:
print "no next"
return
elif arg.startswith("--begin="):
condition = BEGINS_WITH(self.get_typed_key_value(table, arg[8:], False))
args.pop(0)
elif arg.startswith("--eq="):
condition = EQ(self.get_typed_key_value(table, arg[5:], False))
args.pop(0)
elif arg.startswith("--ne="):
condition = NE(self.get_typed_key_value(table, arg[5:], False))
args.pop(0)
elif arg.startswith("--le="):
condition = LE(self.get_typed_key_value(table, arg[5:], False))
args.pop(0)
elif arg.startswith("--lt="):
condition = LT(self.get_typed_key_value(table, arg[5:], False))
args.pop(0)
elif arg.startswith("--ge="):
condition = GE(self.get_typed_key_value(table, arg[5:], False))
args.pop(0)
elif arg.startswith("--gt="):
condition = GT(self.get_typed_key_value(table, arg[5:], False))
args.pop(0)
elif arg == "--exists":
condition = NOT_NULL()
args.pop(0)
elif arg == "--nexists":
condition = NULL()
args.pop(0)
elif arg.startswith("--contains="):
condition = CONTAINS(self.get_typed_key_value(table, arg[11:], False))
args.pop(0)
elif arg.startswith("--between="):
parts = arg[10:].split(",", 1)
condition = BETWEEN(self.get_typed_key_value(table, parts[0], True), self.get_typed_key_value(table, parts[1], False))
args.pop(0)
else:
break
hkey = self.get_typed_key_value(table, args.pop(0))
attr_keys = args[0].split(",") if args else None
attrs = list(set(attr_keys)) if attr_keys else None
result = table.query(hkey, range_key_condition=condition, attributes_to_get=attrs, scan_index_forward=asc, request_limit=batch_size, max_results=max_size, count=count, exclusive_start_key=start)
if count:
print "count: %s/%s" % (result.scanned_count, result.count)
self.next_key = None
else:
if as_array and attr_keys:
self.print_iterator_array(result, attr_keys)
else:
self.print_iterator(result)
self.next_key = result.last_evaluated_key
if self.consumed:
print "consumed units:", result.consumed_units | query [:tablename] [-r] [--count|-c] [--array|-a] [-{max}] [{rkey-condition}] hkey [attributes,...]
where rkey-condition:
--eq={key} (equal key)
--ne={key} (not equal key)
--le={key} (less or equal than key)
--lt={key} (less than key)
--ge={key} (greater or equal than key)
--gt={key} (greater than key)
--exists (key exists)
--nexists (key does not exists)
--contains={key} (contains key)
--ncontains={key} (does not contains key)
--begin={startkey} (rkey begins with startkey)
--between={firstkey},{lastkey} (between firstkey and lastkey) |
def profile(script, argv, profiler_factory,
pickle_protocol, dump_filename, mono):
"""Profile a Python script."""
filename, code, globals_ = script
sys.argv[:] = [filename] + list(argv)
__profile__(filename, code, globals_, profiler_factory,
pickle_protocol=pickle_protocol, dump_filename=dump_filename,
mono=mono) | Profile a Python script. |
def minimize(f, start=None, smooth=False, log=None, array=False, **vargs):
"""Minimize a function f of one or more arguments.
Args:
f: A function that takes numbers and returns a number
start: A starting value or list of starting values
smooth: Whether to assume that f is smooth and use first-order info
log: Logging function called on the result of optimization (e.g. print)
vargs: Other named arguments passed to scipy.optimize.minimize
Returns either:
(a) the minimizing argument of a one-argument function
(b) an array of minimizing arguments of a multi-argument function
"""
if start is None:
assert not array, "Please pass starting values explicitly when array=True"
arg_count = f.__code__.co_argcount
assert arg_count > 0, "Please pass starting values explicitly for variadic functions"
start = [0] * arg_count
if not hasattr(start, '__len__'):
start = [start]
if array:
objective = f
else:
@functools.wraps(f)
def objective(args):
return f(*args)
if not smooth and 'method' not in vargs:
vargs['method'] = 'Powell'
result = optimize.minimize(objective, start, **vargs)
if log is not None:
log(result)
if len(start) == 1:
return result.x.item(0)
else:
return result.x | Minimize a function f of one or more arguments.
Args:
f: A function that takes numbers and returns a number
start: A starting value or list of starting values
smooth: Whether to assume that f is smooth and use first-order info
log: Logging function called on the result of optimization (e.g. print)
vargs: Other named arguments passed to scipy.optimize.minimize
Returns either:
(a) the minimizing argument of a one-argument function
(b) an array of minimizing arguments of a multi-argument function |
def _to_dict(self, node, fast_access=True, short_names=False, nested=False,
copy=True, with_links=True):
""" Returns a dictionary with pairings of (full) names as keys and instances as values.
:param fast_access:
If true parameter or result values are returned instead of the
instances.
:param short_names:
If true keys are not full names but only the names.
Raises a ValueError if the names are not unique.
:param nested:
If true returns a nested dictionary.
:param with_links:
If links should be considered
:return: dictionary
:raises: ValueError
"""
if (fast_access or short_names or nested) and not copy:
raise ValueError('You can not request the original data with >>fast_access=True<< or'
' >>short_names=True<< of >>nested=True<<.')
if nested and short_names:
raise ValueError('You cannot use short_names and nested at the '
'same time.')
# First, let's check if we can return the `flat_leaf_storage_dict` or a copy of that, this
# is faster than creating a novel dictionary by tree traversal.
if node.v_is_root:
temp_dict = self._flat_leaf_storage_dict
if not fast_access and not short_names:
if copy:
return temp_dict.copy()
else:
return temp_dict
else:
iterator = temp_dict.values()
else:
iterator = node.f_iter_leaves(with_links=with_links)
# If not we need to build the dict by iterating recursively over all leaves:
result_dict = {}
for val in iterator:
if short_names:
new_key = val.v_name
else:
new_key = val.v_full_name
if new_key in result_dict:
raise ValueError('Your short names are not unique. '
'Duplicate key `%s`!' % new_key)
new_val = self._apply_fast_access(val, fast_access)
result_dict[new_key] = new_val
if nested:
if node.v_is_root:
nest_dict = result_dict
else:
# remove the name of the current node
# such that the nested dictionary starts with the children
strip = len(node.v_full_name) + 1
nest_dict = {key[strip:]: val for key, val in result_dict.items()}
result_dict = nest_dictionary(nest_dict, '.')
return result_dict | Returns a dictionary with pairings of (full) names as keys and instances as values.
:param fast_access:
If true parameter or result values are returned instead of the
instances.
:param short_names:
If true keys are not full names but only the names.
Raises a ValueError if the names are not unique.
:param nested:
If true returns a nested dictionary.
:param with_links:
If links should be considered
:return: dictionary
:raises: ValueError |
def home():
"""Temporary helper function to link to the API routes"""
return dict(links=dict(api='{}{}'.format(request.url, PREFIX[1:]))), \
HTTPStatus.OK | Temporary helper function to link to the API routes |
def build_routename(cls, name, routename_prefix=None):
"""
Given a ``name`` & an optional ``routename_prefix``, this generates a
name for a URL.
:param name: The name for the URL (ex. 'detail')
:type name: string
:param routename_prefix: (Optional) A prefix for the URL's name (for
resolving). The default is ``None``, which will autocreate a prefix
based on the class name. Ex: ``BlogPostResource`` ->
``api_blogpost_list``
:type routename_prefix: string
:returns: The final name
:rtype: string
"""
if routename_prefix is None:
routename_prefix = 'api_{}'.format(
cls.__name__.replace('Resource', '').lower()
)
routename_prefix = routename_prefix.rstrip('_')
return '_'.join([routename_prefix, name]) | Given a ``name`` & an optional ``routename_prefix``, this generates a
name for a URL.
:param name: The name for the URL (ex. 'detail')
:type name: string
:param routename_prefix: (Optional) A prefix for the URL's name (for
resolving). The default is ``None``, which will autocreate a prefix
based on the class name. Ex: ``BlogPostResource`` ->
``api_blogpost_list``
:type routename_prefix: string
:returns: The final name
:rtype: string |
def custom_indicator_class_factory(indicator_type, base_class, class_dict, value_fields):
"""Internal method for dynamically building Custom Indicator Class."""
value_count = len(value_fields)
def init_1(self, tcex, value1, xid, **kwargs): # pylint: disable=W0641
"""Init method for Custom Indicator Types with one value"""
summary = self.build_summary(value1) # build the indicator summary
base_class.__init__(self, tcex, indicator_type, summary, xid, **kwargs)
for k, v in class_dict.items():
setattr(self, k, v)
def init_2(self, tcex, value1, value2, xid, **kwargs): # pylint: disable=W0641
"""Init method for Custom Indicator Types with two values."""
summary = self.build_summary(value1, value2) # build the indicator summary
base_class.__init__(self, tcex, indicator_type, summary, xid, **kwargs)
for k, v in class_dict.items():
setattr(self, k, v)
def init_3(self, tcex, value1, value2, value3, xid, **kwargs): # pylint: disable=W0641
"""Init method for Custom Indicator Types with three values."""
summary = self.build_summary(value1, value2, value3) # build the indicator summary
base_class.__init__(self, tcex, indicator_type, summary, xid, **kwargs)
for k, v in class_dict.items():
setattr(self, k, v)
class_name = indicator_type.replace(' ', '')
init_method = locals()['init_{}'.format(value_count)]
newclass = type(str(class_name), (base_class,), {'__init__': init_method})
return newclass | Internal method for dynamically building Custom Indicator Class. |
def metar(wxdata: MetarData, units: Units) -> MetarTrans:
"""
Translate the results of metar.parse
Keys: Wind, Visibility, Clouds, Temperature, Dewpoint, Altimeter, Other
"""
translations = shared(wxdata, units)
translations['wind'] = wind(wxdata.wind_direction, wxdata.wind_speed,
wxdata.wind_gust, wxdata.wind_variable_direction,
units.wind_speed)
translations['temperature'] = temperature(wxdata.temperature, units.temperature)
translations['dewpoint'] = temperature(wxdata.dewpoint, units.temperature)
translations['remarks'] = remarks.translate(wxdata.remarks) # type: ignore
return MetarTrans(**translations) | Translate the results of metar.parse
Keys: Wind, Visibility, Clouds, Temperature, Dewpoint, Altimeter, Other |
def fdr(pvals, alpha=0.05, method='fdr_bh'):
"""P-values FDR correction with Benjamini/Hochberg and
Benjamini/Yekutieli procedure.
This covers Benjamini/Hochberg for independent or positively correlated and
Benjamini/Yekutieli for general or negatively correlated tests.
Parameters
----------
pvals : array_like
Array of p-values of the individual tests.
alpha : float
Error rate (= alpha level).
method : str
FDR correction methods ::
'fdr_bh' : Benjamini/Hochberg for independent / posit correlated tests
'fdr_by' : Benjamini/Yekutieli for negatively correlated tests
Returns
-------
reject : array, bool
True if a hypothesis is rejected, False if not
pval_corrected : array
P-values adjusted for multiple hypothesis testing using the BH or BY
correction.
See also
--------
bonf : Bonferroni correction
holm : Holm-Bonferroni correction
Notes
-----
From Wikipedia:
The **Benjamini–Hochberg** procedure (BH step-up procedure) controls the
false discovery rate (FDR) at level :math:`\\alpha`. It works as follows:
1. For a given :math:`\\alpha`, find the largest :math:`k` such that
:math:`P_{(k)}\\leq \\frac {k}{m}\\alpha.`
2. Reject the null hypothesis (i.e., declare discoveries) for all
:math:`H_{(i)}` for :math:`i = 1, \\ldots, k`.
The BH procedure is valid when the m tests are independent, and also in
various scenarios of dependence, but is not universally valid.
The **Benjamini–Yekutieli** procedure (BY) controls the FDR under arbitrary
dependence assumptions. This refinement modifies the threshold and finds
the largest :math:`k` such that:
.. math::
P_{(k)} \\leq \\frac{k}{m \\cdot c(m)} \\alpha
References
----------
- Benjamini, Y., and Hochberg, Y. (1995). Controlling the false discovery
rate: a practical and powerful approach to multiple testing. Journal of
the Royal Statistical Society Series B, 57, 289–300.
- Benjamini, Y., and Yekutieli, D. (2001). The control of the false
discovery rate in multiple testing under dependency. Annals of
Statistics, 29, 1165–1188.
- https://en.wikipedia.org/wiki/False_discovery_rate
Examples
--------
FDR correction of an array of p-values
>>> from pingouin import fdr
>>> pvals = [.50, .003, .32, .054, .0003]
>>> reject, pvals_corr = fdr(pvals, alpha=.05)
>>> print(reject, pvals_corr)
[False True False False True] [0.5 0.0075 0.4 0.09 0.0015]
"""
assert method.lower() in ['fdr_bh', 'fdr_by']
# Convert to array and save original shape
pvals = np.asarray(pvals)
shape_init = pvals.shape
pvals = pvals.ravel()
num_nan = np.isnan(pvals).sum()
# Sort the (flattened) p-values
pvals_sortind = np.argsort(pvals)
pvals_sorted = pvals[pvals_sortind]
sortrevind = pvals_sortind.argsort()
ntests = pvals.size - num_nan
# Empirical CDF factor
ecdffactor = np.arange(1, ntests + 1) / float(ntests)
if method.lower() == 'fdr_by':
cm = np.sum(1. / np.arange(1, ntests + 1))
ecdffactor /= cm
# Now we adjust the p-values
pvals_corr = np.diag(pvals_sorted / ecdffactor[..., None])
pvals_corr = np.minimum.accumulate(pvals_corr[::-1])[::-1]
pvals_corr = np.clip(pvals_corr, None, 1)
# And revert to the original shape and order
pvals_corr = np.append(pvals_corr, np.full(num_nan, np.nan))
pvals_corrected = pvals_corr[sortrevind].reshape(shape_init)
with np.errstate(invalid='ignore'):
reject = np.less(pvals_corrected, alpha)
# reject = reject[sortrevind].reshape(shape_init)
return reject, pvals_corrected | P-values FDR correction with Benjamini/Hochberg and
Benjamini/Yekutieli procedure.
This covers Benjamini/Hochberg for independent or positively correlated and
Benjamini/Yekutieli for general or negatively correlated tests.
Parameters
----------
pvals : array_like
Array of p-values of the individual tests.
alpha : float
Error rate (= alpha level).
method : str
FDR correction methods ::
'fdr_bh' : Benjamini/Hochberg for independent / posit correlated tests
'fdr_by' : Benjamini/Yekutieli for negatively correlated tests
Returns
-------
reject : array, bool
True if a hypothesis is rejected, False if not
pval_corrected : array
P-values adjusted for multiple hypothesis testing using the BH or BY
correction.
See also
--------
bonf : Bonferroni correction
holm : Holm-Bonferroni correction
Notes
-----
From Wikipedia:
The **Benjamini–Hochberg** procedure (BH step-up procedure) controls the
false discovery rate (FDR) at level :math:`\\alpha`. It works as follows:
1. For a given :math:`\\alpha`, find the largest :math:`k` such that
:math:`P_{(k)}\\leq \\frac {k}{m}\\alpha.`
2. Reject the null hypothesis (i.e., declare discoveries) for all
:math:`H_{(i)}` for :math:`i = 1, \\ldots, k`.
The BH procedure is valid when the m tests are independent, and also in
various scenarios of dependence, but is not universally valid.
The **Benjamini–Yekutieli** procedure (BY) controls the FDR under arbitrary
dependence assumptions. This refinement modifies the threshold and finds
the largest :math:`k` such that:
.. math::
P_{(k)} \\leq \\frac{k}{m \\cdot c(m)} \\alpha
References
----------
- Benjamini, Y., and Hochberg, Y. (1995). Controlling the false discovery
rate: a practical and powerful approach to multiple testing. Journal of
the Royal Statistical Society Series B, 57, 289–300.
- Benjamini, Y., and Yekutieli, D. (2001). The control of the false
discovery rate in multiple testing under dependency. Annals of
Statistics, 29, 1165–1188.
- https://en.wikipedia.org/wiki/False_discovery_rate
Examples
--------
FDR correction of an array of p-values
>>> from pingouin import fdr
>>> pvals = [.50, .003, .32, .054, .0003]
>>> reject, pvals_corr = fdr(pvals, alpha=.05)
>>> print(reject, pvals_corr)
[False True False False True] [0.5 0.0075 0.4 0.09 0.0015] |
def preprocess_legislation(legislation_json):
'''
Preprocess the legislation parameters to add prices and amounts from national accounts
'''
import os
import pkg_resources
import pandas as pd
# Add fuel prices to the tree
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
prix_annuel_carburants = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'prix',
'prix_annuel_carburants.csv'
), sep =';'
)
prix_annuel_carburants['Date'] = prix_annuel_carburants['Date'].astype(int)
prix_annuel_carburants = prix_annuel_carburants.set_index('Date')
all_values = {}
prix_carburants = {
"@type": "Node",
"description": "prix des carburants en euros par hectolitre",
"children": {},
}
# For super_95_e10, we need to use the price of super_95 between 2009 and 2012 included,
# because we don't have the data. We use super_95 because it is very close and won't affect the results too much
prix_annuel = prix_annuel_carburants['super_95_e10_ttc']
all_values['super_95_e10_ttc'] = []
for year in range(1990, 2009):
values1 = dict()
values1['start'] = u'{}-01-01'.format(year)
values1['stop'] = u'{}-12-31'.format(year)
values1['value'] = prix_annuel.loc[year] * 100
all_values['super_95_e10_ttc'].append(values1)
prix_annuel = prix_annuel_carburants['super_95_ttc']
for year in range(2009, 2013):
values2 = dict()
values2['start'] = u'{}-01-01'.format(year)
values2['stop'] = u'{}-12-31'.format(year)
values2['value'] = prix_annuel.loc[year] * 100
all_values['super_95_e10_ttc'].append(values2)
prix_annuel = prix_annuel_carburants['super_95_e10_ttc']
for year in range(2013, 2015):
values3 = dict()
values3['start'] = u'{}-01-01'.format(year)
values3['stop'] = u'{}-12-31'.format(year)
values3['value'] = prix_annuel.loc[year] * 100
all_values['super_95_e10_ttc'].append(values3)
prix_carburants['children']['super_95_e10_ttc'] = {
"@type": "Parameter",
"description": 'super_95_e10_ttc'.replace('_', ' '),
"format": "float",
"values": all_values['super_95_e10_ttc']
}
for element in ['diesel_ht', 'diesel_ttc', 'super_95_ht', 'super_95_ttc', 'super_98_ht', 'super_98_ttc',
'super_95_e10_ht', 'gplc_ht', 'gplc_ttc', 'super_plombe_ht', 'super_plombe_ttc']:
assert element in prix_annuel_carburants.columns
prix_annuel = prix_annuel_carburants[element]
all_values[element] = []
for year in range(1990, 2015):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = prix_annuel.loc[year] * 100
all_values[element].append(values)
prix_carburants['children'][element] = {
"@type": "Parameter",
"description": element.replace('_', ' '),
"format": "float",
"values": all_values[element]
}
legislation_json['children']['imposition_indirecte']['children']['prix_carburants'] = prix_carburants
# Add the number of vehicle in circulation to the tree
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
parc_annuel_moyen_vp = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'quantites',
'parc_annuel_moyen_vp.csv'
), sep =';'
)
parc_annuel_moyen_vp = parc_annuel_moyen_vp.set_index('Unnamed: 0')
values_parc = {}
parc_vp = {
"@type": "Node",
"description": "taille moyenne du parc automobile en France métropolitaine en milliers de véhicules",
"children": {},
}
for element in ['diesel', 'essence']:
taille_parc = parc_annuel_moyen_vp[element]
values_parc[element] = []
for year in range(1990, 2014):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = taille_parc.loc[year]
values_parc[element].append(values)
parc_vp['children'][element] = {
"@type": "Parameter",
"description": "nombre de véhicules particuliers immatriculés en France à motorisation " + element,
"format": "float",
"values": values_parc[element]
}
legislation_json['children']['imposition_indirecte']['children']['parc_vp'] = parc_vp
# Add the total quantity of fuel consumed per year to the tree
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
quantite_carbu_vp_france = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'quantites',
'quantite_carbu_vp_france.csv'
), sep =';'
)
quantite_carbu_vp_france = quantite_carbu_vp_france.set_index('Unnamed: 0')
values_quantite = {}
quantite_carbu_vp = {
"@type": "Node",
"description": "quantite de carburants consommés en France métropolitaine",
"children": {},
}
for element in ['diesel', 'essence']:
quantite_carburants = quantite_carbu_vp_france[element]
values_quantite[element] = []
for year in range(1990, 2014):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = quantite_carburants.loc[year]
values_quantite[element].append(values)
quantite_carbu_vp['children'][element] = {
"@type": "Parameter",
"description": "consommation totale de " + element + " en France",
"format": "float",
"values": values_quantite[element]
}
legislation_json['children']['imposition_indirecte']['children']['quantite_carbu_vp'] = quantite_carbu_vp
# Add the shares of each type of supercabrurant (SP95, SP98, E10, etc.) among supercarburants
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
part_des_types_de_supercarburants = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'part_des_types_de_supercarburants.csv'
), sep =';'
)
del part_des_types_de_supercarburants['Source']
part_des_types_de_supercarburants = \
part_des_types_de_supercarburants[part_des_types_de_supercarburants['annee'] > 0].copy()
part_des_types_de_supercarburants['annee'] = part_des_types_de_supercarburants['annee'].astype(int)
part_des_types_de_supercarburants = part_des_types_de_supercarburants.set_index('annee')
# delete share of e_85 because we have no data for its price
# When the sum of all shares is not one, need to multiply each share by the same coefficient
cols = part_des_types_de_supercarburants.columns
for element in cols:
part_des_types_de_supercarburants[element] = (
part_des_types_de_supercarburants[element] /
(part_des_types_de_supercarburants['somme'] - part_des_types_de_supercarburants['sp_e85'])
)
del part_des_types_de_supercarburants['sp_e85']
del part_des_types_de_supercarburants['somme']
cols = part_des_types_de_supercarburants.columns
part_des_types_de_supercarburants['somme'] = 0
for element in cols:
part_des_types_de_supercarburants['somme'] += part_des_types_de_supercarburants[element]
assert (part_des_types_de_supercarburants['somme'] == 1).any(), "The weighting of the shares did not work"
values_part_supercarburants = {}
part_type_supercaburant = {
"@type": "Node",
"description": "part de la consommation totale d'essence de chaque type supercarburant",
"children": {},
}
for element in ['super_plombe', 'sp_95', 'sp_98', 'sp_e10']:
part_par_carburant = part_des_types_de_supercarburants[element]
values_part_supercarburants[element] = []
for year in range(2000, 2015):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = part_par_carburant.loc[year]
values_part_supercarburants[element].append(values)
part_type_supercaburant['children'][element] = {
"@type": "Parameter",
"description": "part de " + element + " dans la consommation totale d'essences",
"format": "float",
"values": values_part_supercarburants[element]
}
legislation_json['children']['imposition_indirecte']['children']['part_type_supercarburants'] = \
part_type_supercaburant
# Add data from comptabilite national about alcohol
alcool_conso_et_vin = {
"@type": "Node",
"description": "alcools",
"children": {},
}
alcool_conso_et_vin['children']['vin'] = {
"@type": "Node",
"description": "Pour calculer le taux de taxation implicite sur le vin",
"children": {
"droit_cn_vin": {
"@type": "Parameter",
"description": u"Masse droit vin, vin mousseux, cidres et poirés selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 129},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 130},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 129},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 132},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 133},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 127},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 127},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 127},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 127},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 125},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 117},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 119},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 117},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 114},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 117},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 119},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 118},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 120},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 122},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
"masse_conso_cn_vin": {
"@type": "Parameter",
"description": u"Masse consommation vin, vin mousseux, cidres et poirés selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 7191},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 7419},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 7636},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 8025},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 8451},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 8854},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 9168},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 9476},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 9695},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 9985},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 9933},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 10002},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 10345},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 10461},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 10728},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 11002},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 11387},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 11407},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 11515},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
},
}
alcool_conso_et_vin['children']['biere'] = {
"@type": "Node",
"description": "Pour calculer le taux de taxation implicite sur la bière",
"children": {
"droit_cn_biere": {
"@type": "Parameter",
"description": "Masse droit biere selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 361},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 366},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 364},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 365},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 380},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 359},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 364},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 361},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 370},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 378},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 364},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 396},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 382},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 375}, {'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 376},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 375},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 393},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 783},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 897},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
"masse_conso_cn_biere": {
"@type": "Parameter",
"description": u"Masse consommation biere selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 2111},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 2144},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 2186},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 2291},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 2334},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 2290},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 2327},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 2405},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 2554},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 2484},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 2466},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 2486},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 2458},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 2287},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 2375},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 2461},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 2769},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 2868},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 3321},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
},
}
alcool_conso_et_vin['children']['alcools_forts'] = {
"@type": "Node",
"description": "Pour calculer le taux de taxation implicite sur alcools forts",
"children": {
"droit_cn_alcools": {
"@type": "Parameter",
"description": "Masse droit alcool selon comptabilité nationale sans droits sur les produits intermediaires et cotisation spéciale alcool fort",
"format": "float",
"values": [
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 1872},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 1957},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 1932},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 1891},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 1908},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 1842},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 1954},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 1990},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 2005},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 2031},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 2111},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 2150},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 2225},
# TODO: Problème pour les alcools forts chiffres différents entre les deux bases excel !
],
},
"droit_cn_alcools_total": {
"@type": "Parameter",
"description": u"Masse droit alcool selon comptabilité nationale avec les differents droits",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 2337},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 2350},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 2366},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 2369},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 2385},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 2416}, {'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 2514},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 2503},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 2453},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 2409},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 2352},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 2477},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 2516},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 2528},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 2629},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 2734},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 3078},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 2718},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 3022},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
"masse_conso_cn_alcools": {
"@type": "Parameter",
"description": u"Masse consommation alcool selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 4893},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 5075},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 5065},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 5123},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 5234},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 5558},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 5721},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 5932},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 5895},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 5967},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 5960},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 6106},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 6142},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 6147},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 6342},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 6618},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 6680},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 6996},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 7022},
],
},
},
}
legislation_json['children']['imposition_indirecte']['children']['alcool_conso_et_vin'] = alcool_conso_et_vin
# Make the change from francs to euros for excise taxes in ticpe
keys_ticpe = legislation_json['children']['imposition_indirecte']['children']['ticpe']['children'].keys()
for element in keys_ticpe:
get_values = \
legislation_json['children']['imposition_indirecte']['children']['ticpe']['children'][element]['values']
for each_value in get_values:
get_character = '{}'.format(each_value['start'])
year = int(get_character[:4])
if year < 2002:
each_value['value'] = each_value['value'] / 6.55957
else:
each_value['value'] = each_value['value']
return legislation_json | Preprocess the legislation parameters to add prices and amounts from national accounts |
def get_parameter_p_value_too_high_warning(
model_type, model_params, parameter, p_value, maximum_p_value
):
""" Return an empty list or a single warning wrapped in a list indicating
whether model parameter p-value is too high.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
parameter : :any:`str`
The name of the parameter, e.g., ``'intercept'``.
p_value : :any:`float`
The p-value of the parameter.
maximum_p_value : :any:`float`
The maximum allowable p-value of the parameter.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
"""
warnings = []
if p_value > maximum_p_value:
data = {
"{}_p_value".format(parameter): p_value,
"{}_maximum_p_value".format(parameter): maximum_p_value,
}
data.update(model_params)
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.{parameter}_p_value_too_high".format(
model_type=model_type, parameter=parameter
)
),
description=(
"Model fit {parameter} p-value is too high. Candidate model rejected.".format(
parameter=parameter
)
),
data=data,
)
)
return warnings | Return an empty list or a single warning wrapped in a list indicating
whether model parameter p-value is too high.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
parameter : :any:`str`
The name of the parameter, e.g., ``'intercept'``.
p_value : :any:`float`
The p-value of the parameter.
maximum_p_value : :any:`float`
The maximum allowable p-value of the parameter.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning. |
def statuses(self):
"""Get a list of status Resources from the server.
:rtype: List[Status]
"""
r_json = self._get_json('status')
statuses = [Status(self._options, self._session, raw_stat_json)
for raw_stat_json in r_json]
return statuses | Get a list of status Resources from the server.
:rtype: List[Status] |
def append_seeding_annotation(self, annotation: str, values: Set[str]) -> Seeding:
"""Add a seed induction method for single annotation's values.
:param annotation: The annotation to filter by
:param values: The values of the annotation to keep
"""
return self.seeding.append_annotation(annotation, values) | Add a seed induction method for single annotation's values.
:param annotation: The annotation to filter by
:param values: The values of the annotation to keep |
def p_file_type_1(self, p):
"""file_type : FILE_TYPE file_type_value"""
try:
self.builder.set_file_type(self.document, p[2])
except OrderError:
self.order_error('FileType', 'FileName', p.lineno(1))
except CardinalityError:
self.more_than_one_error('FileType', p.lineno(1)) | file_type : FILE_TYPE file_type_value |
def process_m2m_through_save(self, obj, created=False, **kwargs):
"""Process M2M post save for custom through model."""
# We are only interested in signals that establish relations.
if not created:
return
self._process_m2m_through(obj, 'post_add') | Process M2M post save for custom through model. |
def setAndUpdateValues(self,solution_next,IncomeDstn,LivPrb,DiscFac):
'''
Unpacks some of the inputs (and calculates simple objects based on them),
storing the results in self for use by other methods. These include:
income shocks and probabilities, next period's marginal value function
(etc), the probability of getting the worst income shock next period,
the patience factor, human wealth, and the bounding MPCs.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
Returns
-------
None
'''
self.DiscFacEff = DiscFac*LivPrb # "effective" discount factor
self.ShkPrbsNext = IncomeDstn[0]
self.PermShkValsNext = IncomeDstn[1]
self.TranShkValsNext = IncomeDstn[2]
self.PermShkMinNext = np.min(self.PermShkValsNext)
self.TranShkMinNext = np.min(self.TranShkValsNext)
self.vPfuncNext = solution_next.vPfunc
self.WorstIncPrb = np.sum(self.ShkPrbsNext[
(self.PermShkValsNext*self.TranShkValsNext)==
(self.PermShkMinNext*self.TranShkMinNext)])
if self.CubicBool:
self.vPPfuncNext = solution_next.vPPfunc
if self.vFuncBool:
self.vFuncNext = solution_next.vFunc
# Update the bounding MPCs and PDV of human wealth:
self.PatFac = ((self.Rfree*self.DiscFacEff)**(1.0/self.CRRA))/self.Rfree
self.MPCminNow = 1.0/(1.0 + self.PatFac/solution_next.MPCmin)
self.ExIncNext = np.dot(self.ShkPrbsNext,self.TranShkValsNext*self.PermShkValsNext)
self.hNrmNow = self.PermGroFac/self.Rfree*(self.ExIncNext + solution_next.hNrm)
self.MPCmaxNow = 1.0/(1.0 + (self.WorstIncPrb**(1.0/self.CRRA))*
self.PatFac/solution_next.MPCmax) | Unpacks some of the inputs (and calculates simple objects based on them),
storing the results in self for use by other methods. These include:
income shocks and probabilities, next period's marginal value function
(etc), the probability of getting the worst income shock next period,
the patience factor, human wealth, and the bounding MPCs.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
Returns
-------
None |
def trc(postfix: Optional[str] = None, *, depth=1) -> logging.Logger:
"""
Automatically generate a logger from the calling function
:param postfix: append another logger name on top this
:param depth: depth of the call stack at which to capture the caller name
:return: instance of a logger with a correct path to a current caller
"""
x = inspect.stack()[depth]
code = x[0].f_code
func = [obj for obj in gc.get_referrers(code) if inspect.isfunction(obj)][0]
mod = inspect.getmodule(x.frame)
parts = (mod.__name__, func.__qualname__)
if postfix:
parts += (postfix,)
logger_name = '.'.join(parts)
return logging.getLogger(logger_name) | Automatically generate a logger from the calling function
:param postfix: append another logger name on top this
:param depth: depth of the call stack at which to capture the caller name
:return: instance of a logger with a correct path to a current caller |
def remove_independent_variable(self, variable_name):
"""
Remove an independent variable which was added with add_independent_variable
:param variable_name: name of variable to remove
:return:
"""
self._remove_child(variable_name)
# Remove also from the list of independent variables
self._independent_variables.pop(variable_name) | Remove an independent variable which was added with add_independent_variable
:param variable_name: name of variable to remove
:return: |
def run(self):
""" Called by the process, it runs it.
NEVER call this method directly. Instead call start() to start the separate process.
If you don't want to use a second process, then call fetch() directly on this istance.
To stop, call terminate()
"""
if not self._queue:
raise Exception("No queue available to send messages")
factory = LiveStreamFactory(self)
self._reactor.connectSSL("streaming.campfirenow.com", 443, factory, ssl.ClientContextFactory())
self._reactor.run() | Called by the process, it runs it.
NEVER call this method directly. Instead call start() to start the separate process.
If you don't want to use a second process, then call fetch() directly on this istance.
To stop, call terminate() |
def on_message(self, msg):
'''
Called when client sends a message.
Supports a python debugging console. This forms
the "eval" part of a standard read-eval-print loop.
Currently the only implementation of the python
console is in the WebUI but the implementation
of a terminal based console is planned.
'''
msg = json.loads(msg)
psession = self.funcserver.pysessions.get(self.pysession_id, None)
if psession is None:
interpreter = PyInterpreter(self.funcserver.define_python_namespace())
psession = dict(interpreter=interpreter, socks=set([self.id]))
self.funcserver.pysessions[self.pysession_id] = psession
else:
interpreter = psession['interpreter']
psession['socks'].add(self.id)
code = msg['code']
msg_id = msg['id']
stdout = sys.stdout
try:
sys.stdout = cStringIO.StringIO()
interpreter.runsource(code)
output = sys.stdout.getvalue() or interpreter.output
if isinstance(output, list): output = ''.join(output)
interpreter.output = []
finally:
sys.stdout = stdout
msg = {'type': MSG_TYPE_CONSOLE, 'id': msg_id, 'data': output}
self.send_message(msg) | Called when client sends a message.
Supports a python debugging console. This forms
the "eval" part of a standard read-eval-print loop.
Currently the only implementation of the python
console is in the WebUI but the implementation
of a terminal based console is planned. |
def isasteroid(self):
"""`True` if `targetname` appears to be an asteroid."""
if self.asteroid is not None:
return self.asteroid
elif self.comet is not None:
return not self.comet
else:
return any(self.parse_asteroid()) is not None | `True` if `targetname` appears to be an asteroid. |
def get_compound_ids(self):
"""Extract the current compound ids in the database. Updates the self.compound_ids list
"""
cursor = self.conn.cursor()
cursor.execute('SELECT inchikey_id FROM metab_compound')
self.conn.commit()
for row in cursor:
if not row[0] in self.compound_ids:
self.compound_ids.append(row[0]) | Extract the current compound ids in the database. Updates the self.compound_ids list |
def add_message(self, id, body, tags=False):
"""
add messages to the rx_queue
:param id: str message Id
:param body: str the message body
:param tags: dict[string->string] tags to be associated with the message
:return: self
"""
if not tags:
tags = {}
try:
self._tx_queue_lock.acquire()
self._tx_queue.append(
EventHub_pb2.Message(id=id, body=body, tags=tags, zone_id=self.eventhub_client.zone_id))
finally:
self._tx_queue_lock.release()
return self | add messages to the rx_queue
:param id: str message Id
:param body: str the message body
:param tags: dict[string->string] tags to be associated with the message
:return: self |
def _delete(self, url, data, scope):
"""
Make a DELETE request using the session object to a Degreed endpoint.
Args:
url (str): The url to send a DELETE request to.
data (str): The json encoded payload to DELETE.
scope (str): Must be one of the scopes Degreed expects:
- `CONTENT_PROVIDER_SCOPE`
- `COMPLETION_PROVIDER_SCOPE`
"""
self._create_session(scope)
response = self.session.delete(url, data=data)
return response.status_code, response.text | Make a DELETE request using the session object to a Degreed endpoint.
Args:
url (str): The url to send a DELETE request to.
data (str): The json encoded payload to DELETE.
scope (str): Must be one of the scopes Degreed expects:
- `CONTENT_PROVIDER_SCOPE`
- `COMPLETION_PROVIDER_SCOPE` |
def encoded_dict(in_dict):
"""Encode every value of a dict to UTF-8.
Useful for POSTing requests on the 'data' parameter of urlencode.
"""
out_dict = {}
for k, v in in_dict.items():
if isinstance(v, unicode):
if sys.version_info < (3, 0):
v = v.encode('utf8')
elif isinstance(v, str):
# Must be encoded in UTF-8
if sys.version_info < (3, 0):
v.decode('utf8')
out_dict[k] = v
return out_dict | Encode every value of a dict to UTF-8.
Useful for POSTing requests on the 'data' parameter of urlencode. |
def _verify_configs(configs):
"""
Verify a Molecule config was found and returns None.
:param configs: A list containing absolute paths to Molecule config files.
:return: None
"""
if configs:
scenario_names = [c.scenario.name for c in configs]
for scenario_name, n in collections.Counter(scenario_names).items():
if n > 1:
msg = ("Duplicate scenario name '{}' found. "
'Exiting.').format(scenario_name)
util.sysexit_with_message(msg)
else:
msg = "'{}' glob failed. Exiting.".format(MOLECULE_GLOB)
util.sysexit_with_message(msg) | Verify a Molecule config was found and returns None.
:param configs: A list containing absolute paths to Molecule config files.
:return: None |
def update(name, connection_uri="", id_file="", o=[], config=None):
"""
Enhanced version of the edit command featuring multiple
edits using regular expressions to match entries
"""
storm_ = get_storm_instance(config)
settings = {}
if id_file != "":
settings['identityfile'] = id_file
for option in o:
k, v = option.split("=")
settings[k] = v
try:
storm_.update_entry(name, **settings)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1) | Enhanced version of the edit command featuring multiple
edits using regular expressions to match entries |
def setUp(self, port, soc, input):
'''
Instance Data:
op -- WSDLTools Operation instance
bop -- WSDLTools BindingOperation instance
input -- boolean input/output
'''
name = soc.getOperationName()
bop = port.getBinding().operations.get(name)
op = port.getBinding().getPortType().operations.get(name)
assert op is not None, 'port has no operation %s' %name
assert bop is not None, 'port has no binding operation %s' %name
self.input = input
self.op = op
self.bop = bop | Instance Data:
op -- WSDLTools Operation instance
bop -- WSDLTools BindingOperation instance
input -- boolean input/output |
def delta_e_cie2000(lab_color_vector, lab_color_matrix, Kl=1, Kc=1, Kh=1):
"""
Calculates the Delta E (CIE2000) of two colors.
"""
L, a, b = lab_color_vector
avg_Lp = (L + lab_color_matrix[:, 0]) / 2.0
C1 = numpy.sqrt(numpy.sum(numpy.power(lab_color_vector[1:], 2)))
C2 = numpy.sqrt(numpy.sum(numpy.power(lab_color_matrix[:, 1:], 2), axis=1))
avg_C1_C2 = (C1 + C2) / 2.0
G = 0.5 * (1 - numpy.sqrt(numpy.power(avg_C1_C2, 7.0) / (numpy.power(avg_C1_C2, 7.0) + numpy.power(25.0, 7.0))))
a1p = (1.0 + G) * a
a2p = (1.0 + G) * lab_color_matrix[:, 1]
C1p = numpy.sqrt(numpy.power(a1p, 2) + numpy.power(b, 2))
C2p = numpy.sqrt(numpy.power(a2p, 2) + numpy.power(lab_color_matrix[:, 2], 2))
avg_C1p_C2p = (C1p + C2p) / 2.0
h1p = numpy.degrees(numpy.arctan2(b, a1p))
h1p += (h1p < 0) * 360
h2p = numpy.degrees(numpy.arctan2(lab_color_matrix[:, 2], a2p))
h2p += (h2p < 0) * 360
avg_Hp = (((numpy.fabs(h1p - h2p) > 180) * 360) + h1p + h2p) / 2.0
T = 1 - 0.17 * numpy.cos(numpy.radians(avg_Hp - 30)) + \
0.24 * numpy.cos(numpy.radians(2 * avg_Hp)) + \
0.32 * numpy.cos(numpy.radians(3 * avg_Hp + 6)) - \
0.2 * numpy.cos(numpy.radians(4 * avg_Hp - 63))
diff_h2p_h1p = h2p - h1p
delta_hp = diff_h2p_h1p + (numpy.fabs(diff_h2p_h1p) > 180) * 360
delta_hp -= (h2p > h1p) * 720
delta_Lp = lab_color_matrix[:, 0] - L
delta_Cp = C2p - C1p
delta_Hp = 2 * numpy.sqrt(C2p * C1p) * numpy.sin(numpy.radians(delta_hp) / 2.0)
S_L = 1 + ((0.015 * numpy.power(avg_Lp - 50, 2)) / numpy.sqrt(20 + numpy.power(avg_Lp - 50, 2.0)))
S_C = 1 + 0.045 * avg_C1p_C2p
S_H = 1 + 0.015 * avg_C1p_C2p * T
delta_ro = 30 * numpy.exp(-(numpy.power(((avg_Hp - 275) / 25), 2.0)))
R_C = numpy.sqrt((numpy.power(avg_C1p_C2p, 7.0)) / (numpy.power(avg_C1p_C2p, 7.0) + numpy.power(25.0, 7.0)))
R_T = -2 * R_C * numpy.sin(2 * numpy.radians(delta_ro))
return numpy.sqrt(
numpy.power(delta_Lp / (S_L * Kl), 2) +
numpy.power(delta_Cp / (S_C * Kc), 2) +
numpy.power(delta_Hp / (S_H * Kh), 2) +
R_T * (delta_Cp / (S_C * Kc)) * (delta_Hp / (S_H * Kh))) | Calculates the Delta E (CIE2000) of two colors. |
def whichEncoding(self):
"""How should I be encoded?
@returns: one of ENCODE_URL, ENCODE_HTML_FORM, or ENCODE_KVFORM.
@change: 2.1.0 added the ENCODE_HTML_FORM response.
"""
if self.request.mode in BROWSER_REQUEST_MODES:
if self.fields.getOpenIDNamespace() == OPENID2_NS and \
len(self.encodeToURL()) > OPENID1_URL_LIMIT:
return ENCODE_HTML_FORM
else:
return ENCODE_URL
else:
return ENCODE_KVFORM | How should I be encoded?
@returns: one of ENCODE_URL, ENCODE_HTML_FORM, or ENCODE_KVFORM.
@change: 2.1.0 added the ENCODE_HTML_FORM response. |
def process_request(self, request):
"""
Lazy set user and token
"""
request.token = get_token(request)
request.user = SimpleLazyObject(lambda: get_user(request))
request._dont_enforce_csrf_checks = dont_enforce_csrf_checks(request) | Lazy set user and token |
def _render_item(self, depth, key, value = None, **settings):
"""
Format single list item.
"""
strptrn = self.INDENT * depth
lchar = self.lchar(settings[self.SETTING_LIST_STYLE])
s = self._es_text(settings, settings[self.SETTING_LIST_FORMATING])
lchar = self.fmt_text(lchar, **s)
strptrn = "{}"
if value is not None:
strptrn += ": {}"
s = self._es_text(settings, settings[self.SETTING_TEXT_FORMATING])
strptrn = self.fmt_text(strptrn.format(key, value), **s)
return '{} {} {}'.format(self.INDENT * depth, lchar, strptrn) | Format single list item. |
def compute_diff(dir_base, dir_cmp):
""" Compare `dir_base' and `dir_cmp' and returns a list with
the following keys:
- deleted files `deleted'
- created files `created'
- updated files `updated'
- deleted directories `deleted_dirs'
"""
data = {}
data['deleted'] = list(set(dir_cmp['files']) - set(dir_base['files']))
data['created'] = list(set(dir_base['files']) - set(dir_cmp['files']))
data['updated'] = []
data['deleted_dirs'] = list(set(dir_cmp['subdirs']) - set(dir_base['subdirs']))
for f in set(dir_cmp['files']).intersection(set(dir_base['files'])):
if dir_base['index'][f] != dir_cmp['index'][f]:
data['updated'].append(f)
return data | Compare `dir_base' and `dir_cmp' and returns a list with
the following keys:
- deleted files `deleted'
- created files `created'
- updated files `updated'
- deleted directories `deleted_dirs' |
def deal_with_changeset_stack_policy(self, fqn, stack_policy):
""" Set a stack policy when using changesets.
ChangeSets don't allow you to set stack policies in the same call to
update them. This sets it before executing the changeset if the
stack policy is passed in.
Args:
stack_policy (:class:`stacker.providers.base.Template`): A template
object representing a stack policy.
"""
if stack_policy:
kwargs = generate_stack_policy_args(stack_policy)
kwargs["StackName"] = fqn
logger.debug("Setting stack policy on %s.", fqn)
self.cloudformation.set_stack_policy(**kwargs) | Set a stack policy when using changesets.
ChangeSets don't allow you to set stack policies in the same call to
update them. This sets it before executing the changeset if the
stack policy is passed in.
Args:
stack_policy (:class:`stacker.providers.base.Template`): A template
object representing a stack policy. |
def find_root(filename, target='bids'):
"""Find base directory (root) for a filename.
Parameters
----------
filename : instance of Path
search the root for this file
target: str
'bids' (the directory containing 'participants.tsv'), 'subject' (the
directory starting with 'sub-'), 'session' (the directory starting with
'ses-')
Returns
-------
Path
path of the target directory
"""
lg.debug(f'Searching root in {filename}')
if target == 'bids' and (filename / 'dataset_description.json').exists():
return filename
elif filename.is_dir():
pattern = target[:3] + '-'
if filename.stem.startswith(pattern):
return filename
return find_root(filename.parent, target) | Find base directory (root) for a filename.
Parameters
----------
filename : instance of Path
search the root for this file
target: str
'bids' (the directory containing 'participants.tsv'), 'subject' (the
directory starting with 'sub-'), 'session' (the directory starting with
'ses-')
Returns
-------
Path
path of the target directory |
def service_execution(self, name=None, pk=None, scope=None, service=None, **kwargs):
"""
Retrieve single KE-chain ServiceExecution.
Uses the same interface as the :func:`service_executions` method but returns only a single
pykechain :class:`models.ServiceExecution` instance.
If additional `keyword=value` arguments are provided, these are added to the request parameters. Please
refer to the documentation of the KE-chain API for additional query parameters.
:param name: (optional) name to limit the search for
:type name: basestring or None
:param pk: (optional) primary key or id (UUID) of the service to search for
:type pk: basestring or None
:param scope: (optional) id (UUID) of the scope to search in
:type scope: basestring or None
:param kwargs: (optional) additional search keyword arguments
:type kwargs: dict or None
:return: a single :class:`models.ServiceExecution` object
:raises NotFoundError: When no `ServiceExecution` object is found
:raises MultipleFoundError: When more than a single `ServiceExecution` object is found
"""
_service_executions = self.service_executions(name=name, pk=pk, scope=scope, service=service, **kwargs)
if len(_service_executions) == 0:
raise NotFoundError("No service execution fits criteria")
if len(_service_executions) != 1:
raise MultipleFoundError("Multiple service executions fit criteria")
return _service_executions[0] | Retrieve single KE-chain ServiceExecution.
Uses the same interface as the :func:`service_executions` method but returns only a single
pykechain :class:`models.ServiceExecution` instance.
If additional `keyword=value` arguments are provided, these are added to the request parameters. Please
refer to the documentation of the KE-chain API for additional query parameters.
:param name: (optional) name to limit the search for
:type name: basestring or None
:param pk: (optional) primary key or id (UUID) of the service to search for
:type pk: basestring or None
:param scope: (optional) id (UUID) of the scope to search in
:type scope: basestring or None
:param kwargs: (optional) additional search keyword arguments
:type kwargs: dict or None
:return: a single :class:`models.ServiceExecution` object
:raises NotFoundError: When no `ServiceExecution` object is found
:raises MultipleFoundError: When more than a single `ServiceExecution` object is found |
def lookupGeoInfo(positions):
"""Looks up lat/lon info with goole given a list
of positions as parsed by parsePositionFile.
Returns google results in form of dicionary
"""
list_data=[]
oldlat=0
oldlon=0
d={}
for pos in positions:
# Only lookup point if it is above threshold
diff_lat=abs(float(pos['lat'])-oldlat)
diff_lon=abs(float(pos['lon'])-oldlon)
if (diff_lat>POS_THRESHOLD_DEG) or\
(diff_lon>POS_THRESHOLD_DEG):
d=lookup_by_latlon(pos['lat'],pos['lon'])
oldlat=float(pos['lat'])
oldlon=float(pos['lon'])
else:
logger.debug("Skipping %s/%s, close to prev"%(pos['lat'],pos['lon']))
# Use fresh lookup value or old value
list_data.append(d)
logger.info('looked up %d positions'%(len(list_data)))
return list_data | Looks up lat/lon info with goole given a list
of positions as parsed by parsePositionFile.
Returns google results in form of dicionary |
def do_check_freshness(self, hosts, services, timeperiods, macromodulations, checkmodulations,
checks, when):
# pylint: disable=too-many-nested-blocks, too-many-branches
"""Check freshness and schedule a check now if necessary.
This function is called by the scheduler if Alignak is configured to check the freshness.
It is called for hosts that have the freshness check enabled if they are only
passively checked.
It is called for services that have the freshness check enabled if they are only
passively checked and if their depending host is not in a freshness expired state
(freshness_expiry = True).
A log is raised when the freshess expiry is detected and the item is set as
freshness_expiry.
:param hosts: hosts objects, used to launch checks
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used launch checks
:type services: alignak.objects.service.Services
:param timeperiods: Timeperiods objects, used to get check_period
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param macromodulations: Macro modulations objects, used in commands (notif, check)
:type macromodulations: alignak.objects.macromodulation.Macromodulations
:param checkmodulations: Checkmodulations objects, used to change check command if necessary
:type checkmodulations: alignak.objects.checkmodulation.Checkmodulations
:param checks: checks dict, used to get checks_in_progress for the object
:type checks: dict
:return: A check or None
:rtype: None | object
"""
now = when
# Before, check if class (host or service) have check_freshness OK
# Then check if item want freshness, then check freshness
cls = self.__class__
if not self.in_checking and self.freshness_threshold and not self.freshness_expired:
# logger.debug("Checking freshness for %s, last state update: %s, now: %s.",
# self.get_full_name(), self.last_state_update, now)
if os.getenv('ALIGNAK_LOG_CHECKS', None):
logger.info("--ALC-- -> checking freshness for: %s", self.get_full_name())
# If we never checked this item, we begin the freshness period
if not self.last_state_update:
self.last_state_update = int(now)
if self.last_state_update < now - \
(self.freshness_threshold + cls.additional_freshness_latency):
timeperiod = timeperiods[self.check_period]
if timeperiod is None or timeperiod.is_time_valid(now):
# Create a new check for the scheduler
chk = self.launch_check(now, hosts, services, timeperiods,
macromodulations, checkmodulations, checks)
if not chk:
logger.warning("No raised freshness check for: %s", self)
return None
chk.freshness_expiry_check = True
chk.check_time = time.time()
chk.output = "Freshness period expired: %s" % (
datetime.utcfromtimestamp(int(chk.check_time)).strftime(
"%Y-%m-%d %H:%M:%S %Z"))
if self.my_type == 'host':
if self.freshness_state == 'o':
chk.exit_status = 0
elif self.freshness_state == 'd':
chk.exit_status = 2
elif self.freshness_state in ['u', 'x']:
chk.exit_status = 4
else:
chk.exit_status = 3
else:
if self.freshness_state == 'o':
chk.exit_status = 0
elif self.freshness_state == 'w':
chk.exit_status = 1
elif self.freshness_state == 'c':
chk.exit_status = 2
elif self.freshness_state == 'u':
chk.exit_status = 3
elif self.freshness_state == 'x':
chk.exit_status = 4
else:
chk.exit_status = 3
return chk
else:
logger.debug("Ignored freshness check for %s, because "
"we are not in the check period.", self.get_full_name())
return None | Check freshness and schedule a check now if necessary.
This function is called by the scheduler if Alignak is configured to check the freshness.
It is called for hosts that have the freshness check enabled if they are only
passively checked.
It is called for services that have the freshness check enabled if they are only
passively checked and if their depending host is not in a freshness expired state
(freshness_expiry = True).
A log is raised when the freshess expiry is detected and the item is set as
freshness_expiry.
:param hosts: hosts objects, used to launch checks
:type hosts: alignak.objects.host.Hosts
:param services: services objects, used launch checks
:type services: alignak.objects.service.Services
:param timeperiods: Timeperiods objects, used to get check_period
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:param macromodulations: Macro modulations objects, used in commands (notif, check)
:type macromodulations: alignak.objects.macromodulation.Macromodulations
:param checkmodulations: Checkmodulations objects, used to change check command if necessary
:type checkmodulations: alignak.objects.checkmodulation.Checkmodulations
:param checks: checks dict, used to get checks_in_progress for the object
:type checks: dict
:return: A check or None
:rtype: None | object |
def add_path_part(url, regex=PATH_PART):
"""
replace the variables in a url template with regex named groups
:param url: string of a url template
:param regex: regex of the named group
:returns: regex
"""
formatter = string.Formatter()
url_var_template = "(?P<{var_name}>{regex})"
for part in formatter.parse(url):
string_part, var_name, _, _ = part
if string_part:
yield string_part
if var_name:
yield url_var_template.format(var_name=var_name, regex=regex) | replace the variables in a url template with regex named groups
:param url: string of a url template
:param regex: regex of the named group
:returns: regex |
def p_generate_block(self, p):
'generate_block : BEGIN generate_items END'
p[0] = Block(p[2], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | generate_block : BEGIN generate_items END |
def add_axes_and_nodes(self):
"""
Adds the axes (i.e. 2 or 3 axes, not to be confused with matplotlib
axes) and the nodes that belong to each axis.
"""
for i, (group, nodelist) in enumerate(self.nodes.items()):
theta = self.group_theta(group)
if self.has_edge_within_group(group):
theta = theta - self.minor_angle
self.plot_nodes(nodelist, theta, group)
theta = theta + 2 * self.minor_angle
self.plot_nodes(nodelist, theta, group)
else:
self.plot_nodes(nodelist, theta, group) | Adds the axes (i.e. 2 or 3 axes, not to be confused with matplotlib
axes) and the nodes that belong to each axis. |
def setOrientation( self, orientation ):
"""
Sets the orientation for this toolbar to the inputed value, and \
updates the contents margins and collapse button based on the vaule.
:param orientation | <Qt.Orientation>
"""
super(XToolBar, self).setOrientation(orientation)
self.refreshButton() | Sets the orientation for this toolbar to the inputed value, and \
updates the contents margins and collapse button based on the vaule.
:param orientation | <Qt.Orientation> |
def change_state_id(self, state_id=None):
"""Changes the id of the state to a new id
If no state_id is passed as parameter, a new state id is generated.
:param str state_id: The new state id of the state
:return:
"""
if state_id is None:
state_id = state_id_generator(used_state_ids=[self.state_id])
if not self.is_root_state and not self.is_root_state_of_library:
used_ids = list(self.parent.states.keys()) + [self.parent.state_id, self.state_id]
if state_id in used_ids:
state_id = state_id_generator(used_state_ids=used_ids)
self._state_id = state_id | Changes the id of the state to a new id
If no state_id is passed as parameter, a new state id is generated.
:param str state_id: The new state id of the state
:return: |
def create_bracket_parameter_id(brackets_id, brackets_curr_decay, increased_id=-1):
"""Create a full id for a specific bracket's hyperparameter configuration
Parameters
----------
brackets_id: int
brackets id
brackets_curr_decay:
brackets curr decay
increased_id: int
increased id
Returns
-------
int
params id
"""
if increased_id == -1:
increased_id = str(create_parameter_id())
params_id = '_'.join([str(brackets_id),
str(brackets_curr_decay),
increased_id])
return params_id | Create a full id for a specific bracket's hyperparameter configuration
Parameters
----------
brackets_id: int
brackets id
brackets_curr_decay:
brackets curr decay
increased_id: int
increased id
Returns
-------
int
params id |
def remove_group(self, process_id, wit_ref_name, page_id, section_id, group_id):
"""RemoveGroup.
[Preview API] Removes a group from the work item form.
:param str process_id: The ID of the process
:param str wit_ref_name: The reference name of the work item type
:param str page_id: The ID of the page the group is in
:param str section_id: The ID of the section to the group is in
:param str group_id: The ID of the group
"""
route_values = {}
if process_id is not None:
route_values['processId'] = self._serialize.url('process_id', process_id, 'str')
if wit_ref_name is not None:
route_values['witRefName'] = self._serialize.url('wit_ref_name', wit_ref_name, 'str')
if page_id is not None:
route_values['pageId'] = self._serialize.url('page_id', page_id, 'str')
if section_id is not None:
route_values['sectionId'] = self._serialize.url('section_id', section_id, 'str')
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')
self._send(http_method='DELETE',
location_id='766e44e1-36a8-41d7-9050-c343ff02f7a5',
version='5.0-preview.1',
route_values=route_values) | RemoveGroup.
[Preview API] Removes a group from the work item form.
:param str process_id: The ID of the process
:param str wit_ref_name: The reference name of the work item type
:param str page_id: The ID of the page the group is in
:param str section_id: The ID of the section to the group is in
:param str group_id: The ID of the group |
def from_ordinal(cls, ordinal):
""" Return the :class:`.Date` that corresponds to the proleptic
Gregorian ordinal, where ``0001-01-01`` has ordinal 1 and
``9999-12-31`` has ordinal 3,652,059. Values outside of this
range trigger a :exc:`ValueError`. The corresponding instance
method for the reverse date-to-ordinal transformation is
:meth:`.to_ordinal`.
"""
if ordinal == 0:
return ZeroDate
if ordinal >= 736695:
year = 2018 # Project release year
month = 1
day = int(ordinal - 736694)
elif ordinal >= 719163:
year = 1970 # Unix epoch
month = 1
day = int(ordinal - 719162)
else:
year = 1
month = 1
day = int(ordinal)
if day < 1 or day > 3652059:
# Note: this requires a maximum of 22 bits for storage
# Could be transferred in 3 bytes.
raise ValueError("Ordinal out of range (1..3652059)")
if year < MIN_YEAR or year > MAX_YEAR:
raise ValueError("Year out of range (%d..%d)" % (MIN_YEAR, MAX_YEAR))
days_in_year = DAYS_IN_YEAR[year]
while day > days_in_year:
day -= days_in_year
year += 1
days_in_year = DAYS_IN_YEAR[year]
days_in_month = DAYS_IN_MONTH[(year, month)]
while day > days_in_month:
day -= days_in_month
month += 1
days_in_month = DAYS_IN_MONTH[(year, month)]
year, month, day = _normalize_day(year, month, day)
return cls.__new(ordinal, year, month, day) | Return the :class:`.Date` that corresponds to the proleptic
Gregorian ordinal, where ``0001-01-01`` has ordinal 1 and
``9999-12-31`` has ordinal 3,652,059. Values outside of this
range trigger a :exc:`ValueError`. The corresponding instance
method for the reverse date-to-ordinal transformation is
:meth:`.to_ordinal`. |
def get_default_property_values(self, classname):
"""Return a dict with default values for all properties declared on this class."""
schema_element = self.get_element_by_class_name(classname)
result = {
property_name: property_descriptor.default
for property_name, property_descriptor in six.iteritems(schema_element.properties)
}
if schema_element.is_edge:
# Remove the source/destination properties for edges, if they exist.
result.pop(EDGE_SOURCE_PROPERTY_NAME, None)
result.pop(EDGE_DESTINATION_PROPERTY_NAME, None)
return result | Return a dict with default values for all properties declared on this class. |
def at_css(self, css, timeout = DEFAULT_AT_TIMEOUT, **kw):
""" Returns the first node matching the given CSSv3 expression or ``None``
if a timeout occurs. """
return self.wait_for_safe(lambda: super(WaitMixin, self).at_css(css),
timeout = timeout,
**kw) | Returns the first node matching the given CSSv3 expression or ``None``
if a timeout occurs. |
def plot(graph, show_x_axis=True,
head=None, tail=None, label_length=4, padding=0,
height=2, show_min_max=True, show_data_range=True,
show_title=True):
"""
show_x_axis: Display X axis
head: Show first [head:] elements
tail: Show last [-tail:] elements
padding: Padding size between columns (default 0)
height: Override graph height
label_length: Force X axis label string size, may truncate label
show_min_max: Display Min and Max values on the left of the graph
show_title: Display graph title (if any)
show_data_range: Display X axis data range
"""
def __plot(graph):
def get_padding_str(label, value):
padding_str = ''
if len(label) < label_length:
diff = label_length - len(label)
padding_str = ' ' * diff
padding_str2 = ''
if len(str(value)) < m:
diff = m - len(str(value))
padding_str2 = ' ' * diff
return '%s%s' % (padding_str,padding_str2)
out = zip(*graph.strings)
out.reverse()
if graph.title and show_title:
print graph.title
lines = [sep.join(a) for a in out]
if show_min_max:
lines[0] = lines[0] + " -- Max: %s" % str(max(graph.data))
lines[-1] = lines[-1] + " -- Min %s" % str(min(graph.data))
print '\n'.join(lines)
if graph.labels and show_x_axis:
print (u'%s' % x_sep.join(['<%s>[%s]%s'
% (label[:label_length], str(v),
get_padding_str(label, v))
for label, v in zip(graph.labels, graph.data)]))
if show_data_range and graph.labels:
print 'Data range: %s - %s' % (graph.first_x, graph.last_x)
graph.clean_range()
if head:
graph.head = head
if tail:
graph.tail = tail
if height:
graph.height = height
if label_length < 1:
label_length = 4
max_label_length = max(map(len, graph.labels or ['']))
if max_label_length < label_length:
label_length = max_label_length
sep = ''
if padding >= 1:
sep = ' ' * padding
m = max(map(len, map(str, graph.data))) # length of longest value
x_sep = ' '
if show_x_axis and graph.labels:
# 2('[])' + 2('<>') + 1 space
sep = ' ' * (label_length + 1 + 2 + m + 2)
__plot(graph) | show_x_axis: Display X axis
head: Show first [head:] elements
tail: Show last [-tail:] elements
padding: Padding size between columns (default 0)
height: Override graph height
label_length: Force X axis label string size, may truncate label
show_min_max: Display Min and Max values on the left of the graph
show_title: Display graph title (if any)
show_data_range: Display X axis data range |
def multi_future(
children: Union[List[_Yieldable], Dict[Any, _Yieldable]],
quiet_exceptions: "Union[Type[Exception], Tuple[Type[Exception], ...]]" = (),
) -> "Union[Future[List], Future[Dict]]":
"""Wait for multiple asynchronous futures in parallel.
Since Tornado 6.0, this function is exactly the same as `multi`.
.. versionadded:: 4.0
.. versionchanged:: 4.2
If multiple ``Futures`` fail, any exceptions after the first (which is
raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
.. deprecated:: 4.3
Use `multi` instead.
"""
if isinstance(children, dict):
keys = list(children.keys()) # type: Optional[List]
children_seq = children.values() # type: Iterable
else:
keys = None
children_seq = children
children_futs = list(map(convert_yielded, children_seq))
assert all(is_future(i) or isinstance(i, _NullFuture) for i in children_futs)
unfinished_children = set(children_futs)
future = _create_future()
if not children_futs:
future_set_result_unless_cancelled(future, {} if keys is not None else [])
def callback(fut: Future) -> None:
unfinished_children.remove(fut)
if not unfinished_children:
result_list = []
for f in children_futs:
try:
result_list.append(f.result())
except Exception as e:
if future.done():
if not isinstance(e, quiet_exceptions):
app_log.error(
"Multiple exceptions in yield list", exc_info=True
)
else:
future_set_exc_info(future, sys.exc_info())
if not future.done():
if keys is not None:
future_set_result_unless_cancelled(
future, dict(zip(keys, result_list))
)
else:
future_set_result_unless_cancelled(future, result_list)
listening = set() # type: Set[Future]
for f in children_futs:
if f not in listening:
listening.add(f)
future_add_done_callback(f, callback)
return future | Wait for multiple asynchronous futures in parallel.
Since Tornado 6.0, this function is exactly the same as `multi`.
.. versionadded:: 4.0
.. versionchanged:: 4.2
If multiple ``Futures`` fail, any exceptions after the first (which is
raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
.. deprecated:: 4.3
Use `multi` instead. |
def _ComputeHash( key, seed = 0x0 ):
"""Computes the hash of the value passed using MurmurHash3 algorithm with the seed value.
"""
def fmix( h ):
h ^= h >> 16
h = ( h * 0x85ebca6b ) & 0xFFFFFFFF
h ^= h >> 13
h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF
h ^= h >> 16
return h
length = len( key )
nblocks = int( length / 4 )
h1 = seed
c1 = 0xcc9e2d51
c2 = 0x1b873593
# body
for block_start in xrange( 0, nblocks * 4, 4 ):
k1 = key[ block_start + 3 ] << 24 | \
key[ block_start + 2 ] << 16 | \
key[ block_start + 1 ] << 8 | \
key[ block_start + 0 ]
k1 = c1 * k1 & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( c2 * k1 ) & 0xFFFFFFFF
h1 ^= k1
h1 = ( h1 << 13 | h1 >> 19 ) & 0xFFFFFFFF # inlined _ROTL32
h1 = ( h1 * 5 + 0xe6546b64 ) & 0xFFFFFFFF
# tail
tail_index = nblocks * 4
k1 = 0
tail_size = length & 3
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size != 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # _ROTL32
k1 = ( k1 * c2 ) & 0xFFFFFFFF
h1 ^= k1
return fmix( h1 ^ length ) | Computes the hash of the value passed using MurmurHash3 algorithm with the seed value. |
def interfaces():
"""
Gets the network interfaces on this server.
:returns: list of network interfaces
"""
results = []
if not sys.platform.startswith("win"):
net_if_addrs = psutil.net_if_addrs()
for interface in sorted(net_if_addrs.keys()):
ip_address = ""
mac_address = ""
netmask = ""
interface_type = "ethernet"
for addr in net_if_addrs[interface]:
# get the first available IPv4 address only
if addr.family == socket.AF_INET:
ip_address = addr.address
netmask = addr.netmask
if addr.family == psutil.AF_LINK:
mac_address = addr.address
if interface.startswith("tap"):
# found no way to reliably detect a TAP interface
interface_type = "tap"
results.append({"id": interface,
"name": interface,
"ip_address": ip_address,
"netmask": netmask,
"mac_address": mac_address,
"type": interface_type})
else:
try:
service_installed = True
if not _check_windows_service("npf") and not _check_windows_service("npcap"):
service_installed = False
else:
results = get_windows_interfaces()
except ImportError:
message = "pywin32 module is not installed, please install it on the server to get the available interface names"
raise aiohttp.web.HTTPInternalServerError(text=message)
except Exception as e:
log.error("uncaught exception {type}".format(type=type(e)), exc_info=1)
raise aiohttp.web.HTTPInternalServerError(text="uncaught exception: {}".format(e))
if service_installed is False:
raise aiohttp.web.HTTPInternalServerError(text="The Winpcap or Npcap is not installed or running")
# This interface have special behavior
for result in results:
result["special"] = False
for special_interface in ("lo", "vmnet", "vboxnet", "docker", "lxcbr",
"virbr", "ovs-system", "veth", "fw", "p2p",
"bridge", "vmware", "virtualbox", "gns3"):
if result["name"].lower().startswith(special_interface):
result["special"] = True
for special_interface in ("-nic"):
if result["name"].lower().endswith(special_interface):
result["special"] = True
return results | Gets the network interfaces on this server.
:returns: list of network interfaces |
def get(self, default=None):
"""
Get the result of the Job, or return *default* if the job is not finished
or errored. This function will never explicitly raise an exception. Note
that the *default* value is also returned if the job was cancelled.
# Arguments
default (any): The value to return when the result can not be obtained.
"""
if not self.__cancelled and self.__state == Job.SUCCESS:
return self.__result
else:
return default | Get the result of the Job, or return *default* if the job is not finished
or errored. This function will never explicitly raise an exception. Note
that the *default* value is also returned if the job was cancelled.
# Arguments
default (any): The value to return when the result can not be obtained. |
def SecurityCheck(self, func, request, *args, **kwargs):
"""Check if access should be allowed for the request."""
try:
auth_header = request.headers.get("Authorization", "")
if not auth_header.startswith(self.BEARER_PREFIX):
raise ValueError("JWT token is missing.")
token = auth_header[len(self.BEARER_PREFIX):]
auth_domain = config.CONFIG["AdminUI.firebase_auth_domain"]
project_id = auth_domain.split(".")[0]
idinfo = id_token.verify_firebase_token(
token, request, audience=project_id)
if idinfo["iss"] != self.SECURE_TOKEN_PREFIX + project_id:
raise ValueError("Wrong issuer.")
request.user = idinfo["email"]
except ValueError as e:
# For a homepage, just do a pass-through, otherwise JS code responsible
# for the Firebase auth won't ever get executed. This approach is safe,
# because wsgiapp.HttpRequest object will raise on any attempt to
# access uninitialized HttpRequest.user attribute.
if request.path != "/":
return self.AuthError("JWT token validation failed: %s" % e)
return func(request, *args, **kwargs) | Check if access should be allowed for the request. |
def unpack(data):
""" return length, content
"""
length = struct.unpack('i', data[0:HEADER_SIZE])
return length[0], data[HEADER_SIZE:] | return length, content |
def _get_address_override(endpoint_type=PUBLIC):
"""Returns any address overrides that the user has defined based on the
endpoint type.
Note: this function allows for the service name to be inserted into the
address if the user specifies {service_name}.somehost.org.
:param endpoint_type: the type of endpoint to retrieve the override
value for.
:returns: any endpoint address or hostname that the user has overridden
or None if an override is not present.
"""
override_key = ADDRESS_MAP[endpoint_type]['override']
addr_override = config(override_key)
if not addr_override:
return None
else:
return addr_override.format(service_name=service_name()) | Returns any address overrides that the user has defined based on the
endpoint type.
Note: this function allows for the service name to be inserted into the
address if the user specifies {service_name}.somehost.org.
:param endpoint_type: the type of endpoint to retrieve the override
value for.
:returns: any endpoint address or hostname that the user has overridden
or None if an override is not present. |
def get_siblings_treepos(self, treepos):
"""Given a treeposition, return the treepositions of its siblings."""
parent_pos = self.get_parent_treepos(treepos)
siblings_treepos = []
if parent_pos is not None:
for child_treepos in self.get_children_treepos(parent_pos):
if child_treepos != treepos:
siblings_treepos.append(child_treepos)
return siblings_treepos | Given a treeposition, return the treepositions of its siblings. |
def sents(self, fileids=None) -> Generator[str, str, None]:
"""
:param fileids:
:return: A generator of sentences
"""
for para in self.paras(fileids):
sentences = self._sent_tokenizer.tokenize(para)
for sentence in sentences:
yield sentence | :param fileids:
:return: A generator of sentences |
def any_hook(*hook_patterns):
"""
Assert that the currently executing hook matches one of the given patterns.
Each pattern will match one or more hooks, and can use the following
special syntax:
* ``db-relation-{joined,changed}`` can be used to match multiple hooks
(in this case, ``db-relation-joined`` and ``db-relation-changed``).
* ``{provides:mysql}-relation-joined`` can be used to match a relation
hook by the role and interface instead of the relation name. The role
must be one of ``provides``, ``requires``, or ``peer``.
* The previous two can be combined, of course: ``{provides:mysql}-relation-{joined,changed}``
"""
current_hook = hookenv.hook_name()
# expand {role:interface} patterns
i_pat = re.compile(r'{([^:}]+):([^}]+)}')
hook_patterns = _expand_replacements(i_pat, hookenv.role_and_interface_to_relations, hook_patterns)
# expand {A,B,C,...} patterns
c_pat = re.compile(r'{((?:[^:,}]+,?)+)}')
hook_patterns = _expand_replacements(c_pat, lambda v: v.split(','), hook_patterns)
return current_hook in hook_patterns | Assert that the currently executing hook matches one of the given patterns.
Each pattern will match one or more hooks, and can use the following
special syntax:
* ``db-relation-{joined,changed}`` can be used to match multiple hooks
(in this case, ``db-relation-joined`` and ``db-relation-changed``).
* ``{provides:mysql}-relation-joined`` can be used to match a relation
hook by the role and interface instead of the relation name. The role
must be one of ``provides``, ``requires``, or ``peer``.
* The previous two can be combined, of course: ``{provides:mysql}-relation-{joined,changed}`` |
def enqueue(trg_queue, item_f, *args, **kwargs):
'''Enqueue the contents of a file, or file-like object, file-descriptor or
the contents of a file at an address (e.g. '/my/file') queue with
arbitrary arguments, enqueue is to venqueue what printf is to vprintf
'''
return venqueue(trg_queue, item_f, args, **kwargs) | Enqueue the contents of a file, or file-like object, file-descriptor or
the contents of a file at an address (e.g. '/my/file') queue with
arbitrary arguments, enqueue is to venqueue what printf is to vprintf |
def load_data(self, path):
"""Load isoelastics from a text file
The text file is loaded with `numpy.loadtxt` and must have
three columns, representing the two data columns and the
elastic modulus with units defined in `definitions.py`.
The file header must have a section defining meta data of the
content like so:
# [...]
#
# - column 1: area_um
# - column 2: deform
# - column 3: emodulus
# - channel width [um]: 20
# - flow rate [ul/s]: 0.04
# - viscosity [mPa*s]: 15
# - method: analytical
#
# [...]
Parameters
----------
path: str
Path to a isoelastics text file
"""
path = pathlib.Path(path).resolve()
# Get metadata
meta = {}
with path.open() as fd:
while True:
line = fd.readline().strip()
if line.startswith("# - "):
line = line.strip("#- ")
var, val = line.split(":")
if val.strip().replace(".", "").isdigit():
# channel width, flow rate, viscosity
val = float(val)
else:
# columns, calculation
val = val.strip().lower()
meta[var.strip()] = val
elif line and not line.startswith("#"):
break
assert meta["column 1"] in dfn.scalar_feature_names
assert meta["column 2"] in dfn.scalar_feature_names
assert meta["column 3"] == "emodulus"
assert meta["method"] in VALID_METHODS
# Load isoelasics
with path.open("rb") as isfd:
isodata = np.loadtxt(isfd)
# Slice out individual isoelastics
emoduli = np.unique(isodata[:, 2])
isoel = []
for emod in emoduli:
where = isodata[:, 2] == emod
isoel.append(isodata[where])
# Add isoelastics to instance
self.add(isoel=isoel,
col1=meta["column 1"],
col2=meta["column 2"],
channel_width=meta["channel width [um]"],
flow_rate=meta["flow rate [ul/s]"],
viscosity=meta["viscosity [mPa*s]"],
method=meta["method"]) | Load isoelastics from a text file
The text file is loaded with `numpy.loadtxt` and must have
three columns, representing the two data columns and the
elastic modulus with units defined in `definitions.py`.
The file header must have a section defining meta data of the
content like so:
# [...]
#
# - column 1: area_um
# - column 2: deform
# - column 3: emodulus
# - channel width [um]: 20
# - flow rate [ul/s]: 0.04
# - viscosity [mPa*s]: 15
# - method: analytical
#
# [...]
Parameters
----------
path: str
Path to a isoelastics text file |
def filter_seq(seq):
'''Examines unreserved sequences to see if they are prone to mutation. This
currently ignores solely-power-of-2 guides with b > 3'''
if seq.res:
return None
n = nt.Factors(seq.factors)
guide, s, t = aq.canonical_form(n)
seq.guide = guide
# The target_tau for the composite is at most the class minus extant prime factor count
cls = aq.get_class(guide=guide)
num_larges = seq.factors.count('P')
upper_bound_tau = cls - num_larges - len(t)
if cls < 2 or upper_bound_tau < 2: # Cheap tests to eliminate almost all sequences
return None
# Next we ignore sequences whose guide is solely a power of 2 greater than 3
v = nt.Factors({p: a for p, a in guide.items() if p != 2 and a > 0})
if int(v) == 1 and cls > 3:
return None
# This condition greatly reduces fdb load, but excludes a lot of sequences
if not aq.is_driver(guide=guide):
return None
return n, guide | Examines unreserved sequences to see if they are prone to mutation. This
currently ignores solely-power-of-2 guides with b > 3 |
def dbg_repr(self, max_display=10):
"""
Debugging output of this slice.
:param max_display: The maximum number of SimRun slices to show.
:return: A string representation.
"""
s = repr(self) + "\n"
if len(self.chosen_statements) > max_display:
s += "%d SimRuns in program slice, displaying %d.\n" % (len(self.chosen_statements), max_display)
else:
s += "%d SimRuns in program slice.\n" % len(self.chosen_statements)
# Pretty-print the first `max_display` basic blocks
if max_display is None:
# Output all
run_addrs = sorted(self.chosen_statements.keys())
else:
# Only output the first "max_display" ones
run_addrs = sorted(self.chosen_statements.keys())[ : max_display]
for run_addr in run_addrs:
s += self.dbg_repr_run(run_addr) + "\n"
return s | Debugging output of this slice.
:param max_display: The maximum number of SimRun slices to show.
:return: A string representation. |
def search_shell(self):
"""
Looks for a shell service
"""
with self._lock:
if self._shell is not None:
# A shell is already there
return
reference = self._context.get_service_reference(SERVICE_SHELL)
if reference is not None:
self.set_shell(reference) | Looks for a shell service |
def origin(self):
"""Return an URL with scheme, host and port parts only.
user, password, path, query and fragment are removed.
"""
# TODO: add a keyword-only option for keeping user/pass maybe?
if not self.is_absolute():
raise ValueError("URL should be absolute")
if not self._val.scheme:
raise ValueError("URL should have scheme")
v = self._val
netloc = self._make_netloc(None, None, v.hostname, v.port, encode=False)
val = v._replace(netloc=netloc, path="", query="", fragment="")
return URL(val, encoded=True) | Return an URL with scheme, host and port parts only.
user, password, path, query and fragment are removed. |
def mpraw_as_np(shape, dtype):
"""Construct a numpy array of the specified shape and dtype for which the
underlying storage is a multiprocessing RawArray in shared memory.
Parameters
----------
shape : tuple
Shape of numpy array
dtype : data-type
Data type of array
Returns
-------
arr : ndarray
Numpy array
"""
sz = int(np.product(shape))
csz = sz * np.dtype(dtype).itemsize
raw = mp.RawArray('c', csz)
return np.frombuffer(raw, dtype=dtype, count=sz).reshape(shape) | Construct a numpy array of the specified shape and dtype for which the
underlying storage is a multiprocessing RawArray in shared memory.
Parameters
----------
shape : tuple
Shape of numpy array
dtype : data-type
Data type of array
Returns
-------
arr : ndarray
Numpy array |
def tablib_export_action(modeladmin, request, queryset, file_type="xls"):
"""
Allow the user to download the current filtered list of items
:param file_type:
One of the formats supported by tablib (e.g. "xls", "csv", "html",
etc.)
"""
dataset = SimpleDataset(queryset, headers=None)
filename = '{0}.{1}'.format(
smart_str(modeladmin.model._meta.verbose_name_plural), file_type)
response_kwargs = {
'content_type': get_content_type(file_type)
}
response = HttpResponse(getattr(dataset, file_type), **response_kwargs)
response['Content-Disposition'] = 'attachment; filename={0}'.format(
filename)
return response | Allow the user to download the current filtered list of items
:param file_type:
One of the formats supported by tablib (e.g. "xls", "csv", "html",
etc.) |
def execute_prepared(self, prepared_statement, multi_row_parameters):
"""
:param prepared_statement: A PreparedStatement instance
:param multi_row_parameters: A list/tuple containing list/tuples of parameters (for multiple rows)
"""
self._check_closed()
# Convert parameters into a generator producing lists with parameters as named tuples (incl. some meta data):
parameters = prepared_statement.prepare_parameters(multi_row_parameters)
while parameters:
request = RequestMessage.new(
self.connection,
RequestSegment(
message_types.EXECUTE,
(StatementId(prepared_statement.statement_id),
Parameters(parameters))
)
)
reply = self.connection.send_request(request)
parts = reply.segments[0].parts
function_code = reply.segments[0].function_code
if function_code == function_codes.SELECT:
self._handle_select(parts, prepared_statement.result_metadata_part)
elif function_code in function_codes.DML:
self._handle_upsert(parts, request.segments[0].parts[1].unwritten_lobs)
elif function_code == function_codes.DDL:
# No additional handling is required
pass
elif function_code in (function_codes.DBPROCEDURECALL, function_codes.DBPROCEDURECALLWITHRESULT):
self._handle_dbproc_call(parts, prepared_statement._params_metadata) # resultset metadata set in prepare
else:
raise InterfaceError("Invalid or unsupported function code received: %d" % function_code) | :param prepared_statement: A PreparedStatement instance
:param multi_row_parameters: A list/tuple containing list/tuples of parameters (for multiple rows) |
def reset_network(message):
"""Resets the users network to make changes take effect"""
for command in settings.RESTART_NETWORK:
try:
subprocess.check_call(command)
except:
pass
print(message) | Resets the users network to make changes take effect |
def conn_has_method(conn, method_name):
'''
Find if the provided connection object has a specific method
'''
if method_name in dir(conn):
return True
log.error('Method \'%s\' not yet supported!', method_name)
return False | Find if the provided connection object has a specific method |
def with_trailing_args(self, *arguments):
"""
Return new Command object that will be run with specified
trailing arguments.
"""
new_command = copy.deepcopy(self)
new_command._trailing_args = [str(arg) for arg in arguments]
return new_command | Return new Command object that will be run with specified
trailing arguments. |
def _link_package_versions(self, link, search):
"""Return an InstallationCandidate or None"""
platform = get_platform()
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
self._log_skipped_link(link, 'not a file')
return
if ext not in SUPPORTED_EXTENSIONS:
self._log_skipped_link(
link, 'unsupported archive format: %s' % ext)
return
if "binary" not in search.formats and ext == wheel_ext:
self._log_skipped_link(
link, 'No binaries permitted for %s' % search.supplied)
return
if "macosx10" in link.path and ext == '.zip':
self._log_skipped_link(link, 'macosx10 one')
return
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
self._log_skipped_link(link, 'invalid wheel filename')
return
if (pkg_resources.safe_name(wheel.name).lower() !=
search.canonical):
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
if not wheel.supported():
self._log_skipped_link(
link, 'it is not compatible with this Python')
return
# This is a dirty hack to prevent installing Binary Wheels from
# PyPI unless it is a Windows or Mac Binary Wheel. This is
# paired with a change to PyPI disabling uploads for the
# same. Once we have a mechanism for enabling support for
# binary wheels on linux that deals with the inherent problems
# of binary distribution this can be removed.
comes_from = getattr(link, "comes_from", None)
if (
(
not platform.startswith('win') and not
platform.startswith('macosx') and not
platform == 'cli'
) and
comes_from is not None and
urllib_parse.urlparse(
comes_from.url
).netloc.endswith(PyPI.netloc)):
if not wheel.supported(tags=supported_tags_noarch):
self._log_skipped_link(
link,
"it is a pypi-hosted binary "
"Wheel on an unsupported platform",
)
return
version = wheel.version
# This should be up by the search.ok_binary check, but see issue 2700.
if "source" not in search.formats and ext != wheel_ext:
self._log_skipped_link(
link, 'No sources permitted for %s' % search.supplied)
return
if not version:
version = egg_info_matches(egg_info, search.supplied, link)
if version is None:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
if (link.internal is not None and not
link.internal and not
normalize_name(search.supplied).lower()
in self.allow_external and not
self.allow_all_external):
# We have a link that we are sure is external, so we should skip
# it unless we are allowing externals
self._log_skipped_link(link, 'it is externally hosted')
self.need_warn_external = True
return
if (link.verifiable is not None and not
link.verifiable and not
(normalize_name(search.supplied).lower()
in self.allow_unverified)):
# We have a link that we are sure we cannot verify its integrity,
# so we should skip it unless we are allowing unsafe installs
# for this requirement.
self._log_skipped_link(
link, 'it is an insecure and unverifiable file')
self.need_warn_unverified = True
return
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
self._log_skipped_link(
link, 'Python version is incorrect')
return
logger.debug('Found link %s, version: %s', link, version)
return InstallationCandidate(search.supplied, version, link) | Return an InstallationCandidate or None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.