code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def run(self, **kwargs):
"""
Does the magic!
"""
logger.info('UpdateLocationsIfNecessaryTask was called')
# read last ip count
try:
with open(app_settings.IP_ASSEMBLER_IP_CHANGED_FILE, 'r') as f:
content_list = f.readlines()
if len(content_list) == 0:
ip_count_old = -1
else:
ip_count_old = int(content_list[0])
except IOError:
ip_count_old = -1
logger.info('read IP count of %(count)d' % {'count': ip_count_old})
# if IPs have significantly changed, update the locations
ip_count_now = IP.objects.count()
if ip_count_now == -1 or ip_count_now > ip_count_old + app_settings.IP_ASSEMBLER_IP_CHANGED_THRESHOLD:
logger.info('Checking IP counts, last: %(ip_count_old)d - now: %(ip_count_now)d' % {
'ip_count_old': ip_count_old,
'ip_count_now': ip_count_now
})
# call the updater task
UpdateHtaccessLocationsTask().delay()
# write the new count to the file
try:
open(app_settings.IP_ASSEMBLER_IP_CHANGED_FILE, 'w').close()
with open(app_settings.IP_ASSEMBLER_IP_CHANGED_FILE, 'w') as f:
f.write(str(ip_count_now))
except IOError:
logger.exception('unable to write to file %(file_path)s' % {'file_path': app_settings.IP_ASSEMBLER_IP_CHANGED_FILE})
else:
logger.info('nothing to do here') | Does the magic! |
def info_post_request(self, node, info):
"""Run when a request to create an info is complete."""
for agent in node.neighbors():
node.transmit(what=info, to_whom=agent) | Run when a request to create an info is complete. |
def _parse_args():
"""Parse and return command line arguments."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=_CliFormatter)
parser.add_argument('-v', '--verbose', action='store_true',
help='Enable verbose output.')
fb_group = parser.add_argument_group('FogBugz arguments')
fb_group.add_argument(
'-u', '--url', help=(
'URL for bugzscout requests to be sent. Should be something '
'like .../scoutSubmit.asp.'))
fb_group.add_argument(
'--user', help='User to designate when submitting via bugzscout.')
fb_group.add_argument(
'--project', help='Fogbugz project to file cases under.')
fb_group.add_argument(
'--area', help='Fogbugz area to file cases under.')
error_group = parser.add_argument_group('error arguments')
error_group.add_argument('-e', '--extra',
help='Extra data to send with error.')
error_group.add_argument('--default-message',
help='Set default message if case is new.')
error_group.add_argument('description',
help=('Description of error. Will be matched '
'against existing cases.'))
parser.set_defaults(**_defaults())
return parser.parse_args() | Parse and return command line arguments. |
def collect_segment_partitions(self):
"""Return a dict of segments partitions, keyed on the name of the parent partition
"""
from collections import defaultdict
# Group the segments by their parent partition name, which is the
# same name, but without the segment.
partitions = defaultdict(set)
for p in self.dataset.partitions:
if p.type == p.TYPE.SEGMENT:
name = p.identity.name
name.segment = None
partitions[name].add(p)
return partitions | Return a dict of segments partitions, keyed on the name of the parent partition |
def submit(self, spec):
"""Submit a new skein application.
Parameters
----------
spec : ApplicationSpec, str, or dict
A description of the application to run. Can be an
``ApplicationSpec`` object, a path to a yaml/json file, or a
dictionary description of an application specification.
Returns
-------
app_id : str
The id of the submitted application.
"""
spec = ApplicationSpec._from_any(spec)
resp = self._call('submit', spec.to_protobuf())
return resp.id | Submit a new skein application.
Parameters
----------
spec : ApplicationSpec, str, or dict
A description of the application to run. Can be an
``ApplicationSpec`` object, a path to a yaml/json file, or a
dictionary description of an application specification.
Returns
-------
app_id : str
The id of the submitted application. |
def find_lexer_class_for_filename(_fn, code=None):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Returns None if not found.
"""
matches = []
fn = basename(_fn)
for modname, name, _, filenames, _ in itervalues(LEXERS):
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
matches.append((_lexer_cache[name], filename))
for cls in find_plugin_lexers():
for filename in cls.filenames:
if _fn_matches(fn, filename):
matches.append((cls, filename))
if sys.version_info > (3,) and isinstance(code, bytes):
# decode it, since all analyse_text functions expect unicode
code = guess_decode(code)
def get_rating(info):
cls, filename = info
# explicit patterns get a bonus
bonus = '*' not in filename and 0.5 or 0
# The class _always_ defines analyse_text because it's included in
# the Lexer class. The default implementation returns None which
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
# to find lexers which need it overridden.
if code:
return cls.analyse_text(code) + bonus, cls.__name__
return cls.priority + bonus, cls.__name__
if matches:
matches.sort(key=get_rating)
# print "Possible lexers, after sort:", matches
return matches[-1][0] | Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Returns None if not found. |
def create_object_if_not_exists(self, alias, name=None, *args, **kwargs):
"""Constructs the type with the given alias using the given args and kwargs.
NB: aliases may be the alias' object type itself if that type is known.
:API: public
:param alias: Either the type alias or the type itself.
:type alias: string|type
:param *args: These pass through to the underlying callable object.
:param **kwargs: These pass through to the underlying callable object.
:returns: The created object, or an existing object with the same `name`.
"""
if name is None:
raise ValueError("Method requires an object `name`.")
obj_creator = functools.partial(self.create_object,
alias,
name=name,
*args,
**kwargs)
return self._storage.add_if_not_exists(name, obj_creator) | Constructs the type with the given alias using the given args and kwargs.
NB: aliases may be the alias' object type itself if that type is known.
:API: public
:param alias: Either the type alias or the type itself.
:type alias: string|type
:param *args: These pass through to the underlying callable object.
:param **kwargs: These pass through to the underlying callable object.
:returns: The created object, or an existing object with the same `name`. |
def _send_msg(self, header, payload):
"""send message to server"""
if self.verbose:
print('->', repr(header))
print('..', repr(payload))
assert header.payload == len(payload)
try:
sent = self.socket.send(header + payload)
except IOError as err:
raise ConnError(*err.args)
# FIXME FIXME FIXME:
# investigate under which situations socket.send should be retried
# instead of aborted.
# FIXME FIXME FIXME
if sent < len(header + payload):
raise ShortWrite(sent, len(header + payload))
assert sent == len(header + payload), sent | send message to server |
def get_dev_details(ip_address, auth, url):
"""Takes string input of IP address to issue RESTUL call to HP IMC\n
:param ip_address: string object of dotted decimal notation of IPv4 address
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: dictionary of device details
:rtype: dict
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.device import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> dev_1 = get_dev_details('10.101.0.221', auth.creds, auth.url)
>>> assert type(dev_1) is dict
>>> assert 'sysName' in dev_1
>>> dev_2 = get_dev_details('8.8.8.8', auth.creds, auth.url)
Device not found
>>> assert type(dev_2) is str
"""
get_dev_details_url = "/imcrs/plat/res/device?resPrivilegeFilter=false&ip=" + \
str(ip_address) + "&start=0&size=1000&orderBy=id&desc=false&total=false"
f_url = url + get_dev_details_url
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=HEADERS)
# r.status_code
try:
if r.status_code == 200:
dev_details = (json.loads(r.text))
if len(dev_details) == 0:
print("Device not found")
return "Device not found"
elif type(dev_details['device']) == list:
for i in dev_details['device']:
if i['ip'] == ip_address:
dev_details = i
return dev_details
elif type(dev_details['device']) == dict:
return dev_details['device']
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_dev_details: An Error has occured" | Takes string input of IP address to issue RESTUL call to HP IMC\n
:param ip_address: string object of dotted decimal notation of IPv4 address
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: dictionary of device details
:rtype: dict
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.device import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> dev_1 = get_dev_details('10.101.0.221', auth.creds, auth.url)
>>> assert type(dev_1) is dict
>>> assert 'sysName' in dev_1
>>> dev_2 = get_dev_details('8.8.8.8', auth.creds, auth.url)
Device not found
>>> assert type(dev_2) is str |
def delete_user(self, username, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-user.html>`_
:arg username: username
:arg refresh: If `true` (the default) then refresh the affected shards
to make this operation visible to search, if `wait_for` then wait
for a refresh to make this operation visible to search, if `false`
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_for'
"""
if username in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'username'.")
return self.transport.perform_request(
"DELETE", _make_path("_security", "user", username), params=params
) | `<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-user.html>`_
:arg username: username
:arg refresh: If `true` (the default) then refresh the affected shards
to make this operation visible to search, if `wait_for` then wait
for a refresh to make this operation visible to search, if `false`
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_for' |
def parse(self, paramfile):
""" Read parameter file and set parameter values.
File should have python-like syntax. Full file name needed.
"""
with open(paramfile, 'r') as f:
for line in f.readlines():
line_clean = line.rstrip('\n').split('#')[0] # trim out comments and trailing cr
if line_clean and '=' in line: # use valid lines only
attribute, value = line_clean.split('=')
try:
value_eval = eval(value.strip())
except NameError:
value_eval = value.strip()
finally:
setattr(self, attribute.strip(), value_eval) | Read parameter file and set parameter values.
File should have python-like syntax. Full file name needed. |
def write(url, content, **args):
"""Put an object into a ftp URL."""
relay = urlparse.urlparse(args.pop('relay', 'lmtp://localhot'))
try:
smtplib_SMTPS = functools.partial(smtplib.SMTP_SSL,
keyfile=args.pop('keyfile', None),
certfile=args.pop('certfile', None))
except AttributeError:
def smtplib_SMTPS():
raise ValueError(relay.geturl())
filename = args.pop('filename', '')
content_type, encoding = mimetypes.guess_type(filename)
content_type = args.pop('content_type', content_type)
encoding = args.pop('content_encoding', encoding)
maintype, subtype = content_type.split('/')
content = content_types.get(content_types).format(content, **args)
content = content_encodings.get(encoding).encode(content)
message = {
'application': application.MIMEApplication,
'text': text.MIMEText}[maintype](content, subtype)
if filename:
message.set_param('filename', ('UTF-8', '', filename.decode('UTF-8')))
if encoding:
message['Content-Encoding'] = encoding
message['To'] = urllib.unquote(url.path)
for name, value in urlparse.parse_qsl(url.query):
message[name.replace('_', '-')] = value
if message['From'] is None:
username = os.environ.get('USERNAME')
username = os.environ.get('LOGNAME', username)
username = os.environ.get('USER', username)
message['From'] = '{}@{}'.format(username, socket.getfqdn())
# ``mailto`` scheme allow for a body param. We don't.
del message['body']
# Send the email.
client = {'smtp': smtplib.SMTP,
'lmtp': smtplib.LMTP,
'smtps': smtplib_SMTPS}[relay.scheme]()
client.connect(''.join([relay.hostname, relay.path]), relay.port)
if relay.username and relay.password:
client.login(relay.username, relay.password)
client.sendmail(message['From'], [message['To']], message.as_string())
client.quit() | Put an object into a ftp URL. |
def tap_and_hold(self, xcoord, ycoord):
"""
Touch down at given coordinates.
:Args:
- xcoord: X Coordinate to touch down.
- ycoord: Y Coordinate to touch down.
"""
self._actions.append(lambda: self._driver.execute(
Command.TOUCH_DOWN, {
'x': int(xcoord),
'y': int(ycoord)}))
return self | Touch down at given coordinates.
:Args:
- xcoord: X Coordinate to touch down.
- ycoord: Y Coordinate to touch down. |
def reddening(self,extval):
"""Compute the reddening for the given extinction.
.. math::
A(V) = R(V) \\; \\times \\; E(B-V)
\\textnormal{THRU} = 10^{-0.4 \\; A(V)}
.. note::
``self.litref`` is passed into ``ans.citation``.
Parameters
----------
extval : float
Value of :math:`E(B-V)` in magnitudes.
Returns
-------
ans : `~pysynphot.spectrum.ArraySpectralElement`
Extinction curve to apply to a source spectrum.
"""
T = 10.0**(-0.4*extval*self.obscuration)
ans = ExtinctionSpectralElement(wave=self.wave,
waveunits=self.waveunits,
throughput=T,
name='%s(EBV=%g)'%(self.name, extval))
ans.citation = self.litref
return ans | Compute the reddening for the given extinction.
.. math::
A(V) = R(V) \\; \\times \\; E(B-V)
\\textnormal{THRU} = 10^{-0.4 \\; A(V)}
.. note::
``self.litref`` is passed into ``ans.citation``.
Parameters
----------
extval : float
Value of :math:`E(B-V)` in magnitudes.
Returns
-------
ans : `~pysynphot.spectrum.ArraySpectralElement`
Extinction curve to apply to a source spectrum. |
def _generate_author_query(self, author_name):
"""Generates a query handling specifically authors.
Notes:
The match query is generic enough to return many results. Then, using the filter clause we truncate these
so that we imitate legacy's behaviour on returning more "exact" results. E.g. Searching for `Smith, John`
shouldn't return papers of 'Smith, Bob'.
Additionally, doing a ``match`` with ``"operator": "and"`` in order to be even more exact in our search, by
requiring that ``full_name`` field contains both
"""
name_variations = [name_variation.lower()
for name_variation
in generate_minimal_name_variations(author_name)]
# When the query contains sufficient data, i.e. full names, e.g. ``Mele, Salvatore`` (and not ``Mele, S`` or
# ``Mele``) we can improve our filtering in order to filter out results containing records with authors that
# have the same non lastnames prefix, e.g. 'Mele, Samuele'.
if author_name_contains_fullnames(author_name):
specialized_author_filter = [
{
'bool': {
'must': [
{
'term': {ElasticSearchVisitor.AUTHORS_NAME_VARIATIONS_FIELD: names_variation[0]}
},
generate_match_query(
ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'],
names_variation[1],
with_operator_and=True
)
]
}
} for names_variation
in product(name_variations, name_variations)
]
else:
# In the case of initials or even single lastname search, filter with only the name variations.
specialized_author_filter = [
{'term': {ElasticSearchVisitor.AUTHORS_NAME_VARIATIONS_FIELD: name_variation}}
for name_variation in name_variations
]
query = {
'bool': {
'filter': {
'bool': {
'should': specialized_author_filter
}
},
'must': {
'match': {
ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author']: author_name
}
}
}
}
return generate_nested_query(ElasticSearchVisitor.AUTHORS_NESTED_QUERY_PATH, query) | Generates a query handling specifically authors.
Notes:
The match query is generic enough to return many results. Then, using the filter clause we truncate these
so that we imitate legacy's behaviour on returning more "exact" results. E.g. Searching for `Smith, John`
shouldn't return papers of 'Smith, Bob'.
Additionally, doing a ``match`` with ``"operator": "and"`` in order to be even more exact in our search, by
requiring that ``full_name`` field contains both |
def bitop_or(self, dest, key, *keys):
"""Perform bitwise OR operations between strings."""
return self.execute(b'BITOP', b'OR', dest, key, *keys) | Perform bitwise OR operations between strings. |
def remove_sample(self, md5):
"""Delete a specific sample"""
# Grab the sample
record = self.database[self.sample_collection].find_one({'md5': md5})
if not record:
return
# Delete it
print 'Deleting sample: %s (%.2f MB)...' % (record['md5'], record['length']/1024.0/1024.0)
self.database[self.sample_collection].remove({'md5': record['md5']})
self.gridfs_handle.delete(record['__grid_fs'])
# Print info
print 'Sample Storage: %.2f out of %.2f MB' % (self.sample_storage_size(), self.samples_cap) | Delete a specific sample |
def deleteMapTable(self, name, session):
"""
Remove duplicate map table if it exists
"""
duplicate_map_tables = session.query(MapTable).filter(MapTable.mapTableFile == self).filter(MapTable.name == name).all()
for duplicate_map_table in duplicate_map_tables:
if duplicate_map_table.indexMap:
session.delete(duplicate_map_table.indexMap)
session.delete(duplicate_map_table)
session.commit() | Remove duplicate map table if it exists |
def expect_keyword(parser, value):
# type: (Parser, str) -> Token
"""If the next token is a keyword with the given value, return that
token after advancing the parser. Otherwise, do not change the parser
state and return False."""
token = parser.token
if token.kind == TokenKind.NAME and token.value == value:
advance(parser)
return token
raise GraphQLSyntaxError(
parser.source,
token.start,
u'Expected "{}", found {}'.format(value, get_token_desc(token)),
) | If the next token is a keyword with the given value, return that
token after advancing the parser. Otherwise, do not change the parser
state and return False. |
def update_service_definitions(self, service_definitions):
"""UpdateServiceDefinitions.
[Preview API]
:param :class:`<VssJsonCollectionWrapper> <azure.devops.v5_0.location.models.VssJsonCollectionWrapper>` service_definitions:
"""
content = self._serialize.body(service_definitions, 'VssJsonCollectionWrapper')
self._send(http_method='PATCH',
location_id='d810a47d-f4f4-4a62-a03f-fa1860585c4c',
version='5.0-preview.1',
content=content) | UpdateServiceDefinitions.
[Preview API]
:param :class:`<VssJsonCollectionWrapper> <azure.devops.v5_0.location.models.VssJsonCollectionWrapper>` service_definitions: |
def has_nvme_ssd(system_obj):
"""Gets if the system has any drive as NVMe SSD drive
:param system_obj: The HPESystem object.
:returns True if system has SSD drives and protocol is NVMe.
"""
storage_value = False
storage_resource = _get_attribute_value_of(system_obj, 'storages')
if storage_resource is not None:
storage_value = _get_attribute_value_of(
storage_resource, 'has_nvme_ssd', default=False)
return storage_value | Gets if the system has any drive as NVMe SSD drive
:param system_obj: The HPESystem object.
:returns True if system has SSD drives and protocol is NVMe. |
def _helper(result,
graph,
number_edges_remaining: int,
node_blacklist: Set[BaseEntity],
invert_degrees: Optional[bool] = None,
):
"""Help build a random graph.
:type result: networkx.Graph
:type graph: networkx.Graph
"""
original_node_count = graph.number_of_nodes()
log.debug('adding remaining %d edges', number_edges_remaining)
for _ in range(number_edges_remaining):
source, possible_step_nodes, c = None, set(), 0
while not source or not possible_step_nodes:
source = get_random_node(result, node_blacklist, invert_degrees=invert_degrees)
c += 1
if c >= original_node_count:
log.warning('infinite loop happening')
log.warning('source: %s', source)
log.warning('no grow: %s', node_blacklist)
return # Happens when after exhausting the connected components. Try increasing the number seed edges
if source is None:
continue # maybe do something else?
# Only keep targets in the original graph that aren't in the result graph
possible_step_nodes = set(graph[source]) - set(result[source])
if not possible_step_nodes:
node_blacklist.add(
source) # there aren't any possible nodes to step to, so try growing from somewhere else
step_node = random.choice(list(possible_step_nodes))
# it's not really a big deal which, but it might be possible to weight this by the utility of edges later
key, attr_dict = random.choice(list(graph[source][step_node].items()))
result.add_edge(source, step_node, key=key, **attr_dict) | Help build a random graph.
:type result: networkx.Graph
:type graph: networkx.Graph |
def search(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {}
for a in args:
k, v = a.split('=')
kwargs[k] = v
return self._paged_api_call(self.flickr.photos_search, kwargs) | Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)... |
def process_csxml_file(self, filename, interval=None, lazy=False):
"""Processes a filehandle to MedScan csxml input into INDRA
statements.
The CSXML format consists of a top-level `<batch>` root element
containing a series of `<doc>` (document) elements, in turn containing
`<sec>` (section) elements, and in turn containing `<sent>` (sentence)
elements.
Within the `<sent>` element, a series of additional elements appear in
the following order:
* `<toks>`, which contains a tokenized form of the sentence in its text
attribute
* `<textmods>`, which describes any preprocessing/normalization done to
the underlying text
* `<match>` elements, each of which contains one of more `<entity>`
elements, describing entities in the text with their identifiers.
The local IDs of each entities are given in the `msid` attribute of
this element; these IDs are then referenced in any subsequent SVO
elements.
* `<svo>` elements, representing subject-verb-object triples. SVO
elements with a `type` attribute of `CONTROL` represent normalized
regulation relationships; they often represent the normalized
extraction of the immediately preceding (but unnormalized SVO
element). However, in some cases there can be a "CONTROL" SVO
element without its parent immediately preceding it.
Parameters
----------
filename : string
The path to a Medscan csxml file.
interval : (start, end) or None
Select the interval of documents to read, starting with the
`start`th document and ending before the `end`th document. If
either is None, the value is considered undefined. If the value
exceeds the bounds of available documents, it will simply be
ignored.
lazy : bool
If True, only create a generator which can be used by the
`get_statements` method. If True, populate the statements list now.
"""
if interval is None:
interval = (None, None)
tmp_fname = tempfile.mktemp(os.path.basename(filename))
fix_character_encoding(filename, tmp_fname)
self.__f = open(tmp_fname, 'rb')
self._gen = self._iter_through_csxml_file_from_handle(*interval)
if not lazy:
for stmt in self._gen:
self.statements.append(stmt)
return | Processes a filehandle to MedScan csxml input into INDRA
statements.
The CSXML format consists of a top-level `<batch>` root element
containing a series of `<doc>` (document) elements, in turn containing
`<sec>` (section) elements, and in turn containing `<sent>` (sentence)
elements.
Within the `<sent>` element, a series of additional elements appear in
the following order:
* `<toks>`, which contains a tokenized form of the sentence in its text
attribute
* `<textmods>`, which describes any preprocessing/normalization done to
the underlying text
* `<match>` elements, each of which contains one of more `<entity>`
elements, describing entities in the text with their identifiers.
The local IDs of each entities are given in the `msid` attribute of
this element; these IDs are then referenced in any subsequent SVO
elements.
* `<svo>` elements, representing subject-verb-object triples. SVO
elements with a `type` attribute of `CONTROL` represent normalized
regulation relationships; they often represent the normalized
extraction of the immediately preceding (but unnormalized SVO
element). However, in some cases there can be a "CONTROL" SVO
element without its parent immediately preceding it.
Parameters
----------
filename : string
The path to a Medscan csxml file.
interval : (start, end) or None
Select the interval of documents to read, starting with the
`start`th document and ending before the `end`th document. If
either is None, the value is considered undefined. If the value
exceeds the bounds of available documents, it will simply be
ignored.
lazy : bool
If True, only create a generator which can be used by the
`get_statements` method. If True, populate the statements list now. |
def report_error(self, line_number, offset, text, check):
"""
Report an error, according to options.
"""
if options.quiet == 1 and not self.file_errors:
message(self.filename)
self.file_errors += 1
code = text[:4]
options.counters[code] = options.counters.get(code, 0) + 1
options.messages[code] = text[5:]
if options.quiet:
return
if options.testsuite:
base = os.path.basename(self.filename)[:4]
if base == code:
return
if base[0] == 'E' and code[0] == 'W':
return
if ignore_code(code):
return
if options.counters[code] == 1 or options.repeat:
message("%s:%s:%d: %s" %
(self.filename, line_number, offset + 1, text))
if options.show_source:
line = self.lines[line_number - 1]
message(line.rstrip())
message(' ' * offset + '^')
if options.show_pep8:
message(check.__doc__.lstrip('\n').rstrip()) | Report an error, according to options. |
def _prep_ssh(
self,
tgt,
fun,
arg=(),
timeout=None,
tgt_type='glob',
kwarg=None,
**kwargs):
'''
Prepare the arguments
'''
opts = copy.deepcopy(self.opts)
opts.update(kwargs)
if timeout:
opts['timeout'] = timeout
arg = salt.utils.args.condition_input(arg, kwarg)
opts['argv'] = [fun] + arg
opts['selected_target_option'] = tgt_type
opts['tgt'] = tgt
opts['arg'] = arg
return salt.client.ssh.SSH(opts) | Prepare the arguments |
def gradient(self):
r"""Gradient of the KL functional.
The gradient of `KullbackLeibler` with ``prior`` :math:`g` is given
as
.. math::
\nabla F(x) = 1 - \frac{g}{x}.
The gradient is not defined in points where one or more components
are non-positive.
"""
functional = self
class KLGradient(Operator):
"""The gradient operator of this functional."""
def __init__(self):
"""Initialize a new instance."""
super(KLGradient, self).__init__(
functional.domain, functional.domain, linear=False)
def _call(self, x):
"""Apply the gradient operator to the given point.
The gradient is not defined in points where one or more
components are non-positive.
"""
if functional.prior is None:
return (-1.0) / x + 1
else:
return (-functional.prior) / x + 1
return KLGradient() | r"""Gradient of the KL functional.
The gradient of `KullbackLeibler` with ``prior`` :math:`g` is given
as
.. math::
\nabla F(x) = 1 - \frac{g}{x}.
The gradient is not defined in points where one or more components
are non-positive. |
def __software_to_pkg_id(self, publisher, name, is_component, is_32bit):
'''
Determine the Package ID of a software/component using the
software/component ``publisher``, ``name``, whether its a software or a
component, and if its 32bit or 64bit archiecture.
Args:
publisher (str): Publisher of the software/component.
name (str): Name of the software.
is_component (bool): True if package is a component.
is_32bit (bool): True if the software/component is 32bit architecture.
Returns:
str: Package Id
'''
if publisher:
# remove , and lowercase as , are used as list separators
pub_lc = publisher.replace(',', '').lower()
else:
# remove , and lowercase
pub_lc = 'NoValue' # Capitals/Special Value
if name:
name_lc = name.replace(',', '').lower()
# remove , OR we do the URL Encode on chars we do not want e.g. \\ and ,
else:
name_lc = 'NoValue' # Capitals/Special Value
if is_component:
soft_type = 'comp'
else:
soft_type = 'soft'
if is_32bit:
soft_type += '32' # Tag only the 32bit only
default_pkg_id = pub_lc+'\\\\'+name_lc+'\\\\'+soft_type
# Check to see if class was initialise with pkg_obj with a method called
# to_pkg_id, and if so use it for the naming standard instead of the default
if self.__pkg_obj and hasattr(self.__pkg_obj, 'to_pkg_id'):
pkg_id = self.__pkg_obj.to_pkg_id(publisher, name, is_component, is_32bit)
if pkg_id:
return pkg_id
return default_pkg_id | Determine the Package ID of a software/component using the
software/component ``publisher``, ``name``, whether its a software or a
component, and if its 32bit or 64bit archiecture.
Args:
publisher (str): Publisher of the software/component.
name (str): Name of the software.
is_component (bool): True if package is a component.
is_32bit (bool): True if the software/component is 32bit architecture.
Returns:
str: Package Id |
def remove_range(self, start, end):
'''Remove a range by score.
'''
return self._sl.remove_range(
start, end, callback=lambda sc, value: self._dict.pop(value)) | Remove a range by score. |
def disassemble(qobj):
"""Dissasemble a qobj and return the circuits, run_config, and user header
Args:
qobj (Qobj): The input qobj object to dissasemble
Returns:
circuits (list): A list of quantum circuits
run_config (dict): The dist of the run config
user_qobj_header (dict): The dict of any user headers in the qobj
"""
run_config = qobj.config.to_dict()
user_qobj_header = qobj.header.to_dict()
circuits = _experiments_to_circuits(qobj)
return circuits, run_config, user_qobj_header | Dissasemble a qobj and return the circuits, run_config, and user header
Args:
qobj (Qobj): The input qobj object to dissasemble
Returns:
circuits (list): A list of quantum circuits
run_config (dict): The dist of the run config
user_qobj_header (dict): The dict of any user headers in the qobj |
def t_INDENTIFIER(t):
r'(\$?[_a-zA-Z][_a-zA-Z0-9]*)|(__[A-Z_]+__)'
if t.value in reserved:
t.type = t.value.upper()
if t.value in reservedMap:
t.value = reservedMap[t.value]
elif t.value in strStatment:
t.type = 'STATEMENT'
return t | r'(\$?[_a-zA-Z][_a-zA-Z0-9]*)|(__[A-Z_]+__) |
def updateAnomalyLikelihoods(anomalyScores,
params,
verbosity=0):
"""
Compute updated probabilities for anomalyScores using the given params.
:param anomalyScores: a list of records. Each record is a list with the
following three elements: [timestamp, value, score]
Example::
[datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]
:param params: the JSON dict returned by estimateAnomalyLikelihoods
:param verbosity: integer controlling extent of printouts for debugging
:type verbosity: int
:returns: 3-tuple consisting of:
- likelihoods
numpy array of likelihoods, one for each aggregated point
- avgRecordList
list of averaged input records
- params
an updated JSON object containing the state of this metric.
"""
if verbosity > 3:
print("In updateAnomalyLikelihoods.")
print("Number of anomaly scores:", len(anomalyScores))
print("First 20:", anomalyScores[0:min(20, len(anomalyScores))])
print("Params:", params)
if len(anomalyScores) == 0:
raise ValueError("Must have at least one anomalyScore")
if not isValidEstimatorParams(params):
raise ValueError("'params' is not a valid params structure")
# For backward compatibility.
if "historicalLikelihoods" not in params:
params["historicalLikelihoods"] = [1.0]
# Compute moving averages of these new scores using the previous values
# as well as likelihood for these scores using the old estimator
historicalValues = params["movingAverage"]["historicalValues"]
total = params["movingAverage"]["total"]
windowSize = params["movingAverage"]["windowSize"]
aggRecordList = numpy.zeros(len(anomalyScores), dtype=float)
likelihoods = numpy.zeros(len(anomalyScores), dtype=float)
for i, v in enumerate(anomalyScores):
newAverage, historicalValues, total = (
MovingAverage.compute(historicalValues, total, v[2], windowSize)
)
aggRecordList[i] = newAverage
likelihoods[i] = tailProbability(newAverage, params["distribution"])
# Filter the likelihood values. First we prepend the historical likelihoods
# to the current set. Then we filter the values. We peel off the likelihoods
# to return and the last windowSize values to store for later.
likelihoods2 = params["historicalLikelihoods"] + list(likelihoods)
filteredLikelihoods = _filterLikelihoods(likelihoods2)
likelihoods[:] = filteredLikelihoods[-len(likelihoods):]
historicalLikelihoods = likelihoods2[-min(windowSize, len(likelihoods2)):]
# Update the estimator
newParams = {
"distribution": params["distribution"],
"movingAverage": {
"historicalValues": historicalValues,
"total": total,
"windowSize": windowSize,
},
"historicalLikelihoods": historicalLikelihoods,
}
assert len(newParams["historicalLikelihoods"]) <= windowSize
if verbosity > 3:
print("Number of likelihoods:", len(likelihoods))
print("First 20 likelihoods:", likelihoods[0:min(20, len(likelihoods))])
print("Leaving updateAnomalyLikelihoods.")
return (likelihoods, aggRecordList, newParams) | Compute updated probabilities for anomalyScores using the given params.
:param anomalyScores: a list of records. Each record is a list with the
following three elements: [timestamp, value, score]
Example::
[datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]
:param params: the JSON dict returned by estimateAnomalyLikelihoods
:param verbosity: integer controlling extent of printouts for debugging
:type verbosity: int
:returns: 3-tuple consisting of:
- likelihoods
numpy array of likelihoods, one for each aggregated point
- avgRecordList
list of averaged input records
- params
an updated JSON object containing the state of this metric. |
def select_many(self, *args):
'''
Select several instances from the instance pool. Query operators such as
where_eq(), order_by() or filter functions may be passed as optional
arguments.
'''
s = apply_query_operators(self.storage, args)
if isinstance(s, QuerySet):
return s
else:
return QuerySet(s) | Select several instances from the instance pool. Query operators such as
where_eq(), order_by() or filter functions may be passed as optional
arguments. |
def generate_kmers(seq, k=4):
"""Return a generator of all the unique substrings (k-mer or q-gram strings) within a sequence/string
Not effiicent for large k and long strings.
Doesn't form substrings that are shorter than k, only exactly k-mers
Used for algorithms like UniqTag for genome unique identifier locality sensitive hashing.
jellyfish is a C implementation of k-mer counting
If seq is a string generate a sequence of k-mer string
If seq is a sequence of strings then generate a sequence of generators or sequences of k-mer strings
If seq is a sequence of sequences of strings generate a sequence of sequence of generators ...
Default k = 4 because that's the length of a gene base-pair?
>>> ' '.join(generate_kmers('AGATAGATAGACACAGAAATGGGACCACAC'))
'AGAT GATA ATAG TAGA AGAT GATA ATAG TAGA AGAC GACA ACAC CACA ACAG ... CCAC CACA ACAC'
"""
if isinstance(seq, basestring):
for i in range(len(seq) - k + 1):
yield seq[i:i + k]
elif isinstance(seq, (int, float, Decimal)):
for s in generate_kmers(str(seq)):
yield s
else:
for s in seq:
yield generate_kmers(s, k) | Return a generator of all the unique substrings (k-mer or q-gram strings) within a sequence/string
Not effiicent for large k and long strings.
Doesn't form substrings that are shorter than k, only exactly k-mers
Used for algorithms like UniqTag for genome unique identifier locality sensitive hashing.
jellyfish is a C implementation of k-mer counting
If seq is a string generate a sequence of k-mer string
If seq is a sequence of strings then generate a sequence of generators or sequences of k-mer strings
If seq is a sequence of sequences of strings generate a sequence of sequence of generators ...
Default k = 4 because that's the length of a gene base-pair?
>>> ' '.join(generate_kmers('AGATAGATAGACACAGAAATGGGACCACAC'))
'AGAT GATA ATAG TAGA AGAT GATA ATAG TAGA AGAC GACA ACAC CACA ACAG ... CCAC CACA ACAC' |
def get_locations(self, url):
"""Get valid location header values from responses.
:param url: a URL address. If a HEAD request sent to it
fails because the address has invalid schema, times out
or there is a connection error, the generator yields nothing.
:returns: valid redirection addresses. If a request for
a redirection address fails, and the address is still a valid
URL string, it's included as the last yielded value. If it's
not, the previous value is the last one.
:raises ValuError: if the argument is not a valid URL
"""
if not is_valid_url(url):
raise InvalidURLError('{} is not a valid URL'.format(url))
try:
response = self.session.head(url)
except (ConnectionError, InvalidSchema, Timeout):
raise StopIteration
try:
generator = self.session.resolve_redirects(
response,
response.request
)
for response in generator:
yield response.url
except InvalidURL:
pass
except (ConnectionError, InvalidSchema, Timeout) as error:
last_url = response.headers['location']
if isinstance(error, Timeout) or is_valid_url(last_url):
yield last_url | Get valid location header values from responses.
:param url: a URL address. If a HEAD request sent to it
fails because the address has invalid schema, times out
or there is a connection error, the generator yields nothing.
:returns: valid redirection addresses. If a request for
a redirection address fails, and the address is still a valid
URL string, it's included as the last yielded value. If it's
not, the previous value is the last one.
:raises ValuError: if the argument is not a valid URL |
def std(self):
"""Standard deviation
Note that is by default normalizd by n - 1
# TODO, what does pandas do for multiple grouping columns?
# Currently we are just going to use one grouping column
"""
std_expr = grizzly_impl.groupby_std(
[self.column],
[self.column_type],
self.grouping_columns,
self.grouping_column_types
)
unzipped_columns = grizzly_impl.unzip_columns(
std_expr,
self.grouping_column_types + [WeldDouble()],
)
index_expr = LazyOpResult(
grizzly_impl.get_field(unzipped_columns, 0),
self.grouping_column_types[0],
1
)
column_expr = LazyOpResult(
grizzly_impl.get_field(unzipped_columns, 1),
self.grouping_column_types[0],
1
)
group_expr = utils.group([index_expr, column_expr])
return SeriesWeld(
group_expr.expr,
WeldDouble(),
index_type=self.grouping_column_types[0],
index_name=self.grouping_column_names[0]
) | Standard deviation
Note that is by default normalizd by n - 1
# TODO, what does pandas do for multiple grouping columns?
# Currently we are just going to use one grouping column |
def evaluate_and_log_bleu(estimator, bleu_writer, bleu_source, bleu_ref):
"""Calculate and record the BLEU score."""
subtokenizer = tokenizer.Subtokenizer(
os.path.join(FLAGS.data_dir, FLAGS.vocab_file))
uncased_score, cased_score = translate_and_compute_bleu(
estimator, subtokenizer, bleu_source, bleu_ref)
print("Bleu score (uncased):", uncased_score)
print("Bleu score (cased):", cased_score)
summary = tf.Summary(value=[
tf.Summary.Value(tag="bleu/uncased", simple_value=uncased_score),
tf.Summary.Value(tag="bleu/cased", simple_value=cased_score),
])
bleu_writer.add_summary(summary, get_global_step(estimator))
bleu_writer.flush()
return uncased_score, cased_score | Calculate and record the BLEU score. |
def add_project_name_or_id_arg(arg_parser, required=True, help_text_suffix="manage"):
"""
Adds project name or project id argument. These two are mutually exclusive.
:param arg_parser:
:param required:
:param help_text:
:return:
"""
project_name_or_id = arg_parser.add_mutually_exclusive_group(required=required)
name_help_text = "Name of the project to {}.".format(help_text_suffix)
add_project_name_arg(project_name_or_id, required=False, help_text=name_help_text)
id_help_text = "ID of the project to {}.".format(help_text_suffix)
add_project_id_arg(project_name_or_id, required=False, help_text=id_help_text) | Adds project name or project id argument. These two are mutually exclusive.
:param arg_parser:
:param required:
:param help_text:
:return: |
def _parse_jetconfig(self):
"""
Undocumented cross-compatability functionality with jetconfig
(https://github.com/shakefu/jetconfig) that is very sloppy.
"""
conf = env('JETCONFIG_ETCD', None)
if not conf:
return
import urlparse
auth = None
port = None
conf = conf.split(',').pop()
entry = urlparse.urlparse(conf)
scheme = entry.scheme
host = entry.netloc or entry.path # Path is where it goes if there's no
# scheme on the URL
if '@' in host:
auth, host = host.split('@')
if ':' in host:
host, port = host.split(':')
if not port and scheme == 'https':
port = '443'
if scheme:
os.environ['PYCONFIG_ETCD_PROTOCOL'] = scheme
if auth:
os.environ['PYCONFIG_ETCD_AUTH'] = auth
if port:
host = host + ":" + port
os.environ['PYCONFIG_ETCD_HOSTS'] = host | Undocumented cross-compatability functionality with jetconfig
(https://github.com/shakefu/jetconfig) that is very sloppy. |
def clear_samples(self):
"""Clears the chain and blobs from memory.
"""
# store the iteration that the clear is occuring on
self._lastclear = self.niterations
self._itercounter = 0
# now clear the chain
self._sampler.reset() | Clears the chain and blobs from memory. |
def jitter_run(res, rstate=None, approx=False):
"""
Probes **statistical uncertainties** on a nested sampling run by
explicitly generating a *realization* of the prior volume associated
with each sample (dead point). Companion function to :meth:`resample_run`
and :meth:`simulate_run`.
Parameters
----------
res : :class:`~dynesty.results.Results` instance
The :class:`~dynesty.results.Results` instance taken from a previous
nested sampling run.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance.
approx : bool, optional
Whether to approximate all sets of uniform order statistics by their
associated marginals (from the Beta distribution). Default is `False`.
Returns
-------
new_res : :class:`~dynesty.results.Results` instance
A new :class:`~dynesty.results.Results` instance with corresponding
weights based on our "jittered" prior volume realizations.
"""
if rstate is None:
rstate = np.random
# Initialize evolution of live points over the course of the run.
nsamps, samples_n = _get_nsamps_samples_n(res)
logl = res.logl
# Simulate the prior volume shrinkage associated with our set of "dead"
# points. At each iteration, if the number of live points is constant or
# increasing, our prior volume compresses by the maximum value of a set
# of `K_i` uniformly distributed random numbers (i.e. as `Beta(K_i, 1)`).
# If instead the number of live points is decreasing, that means we're
# instead sampling down a set of uniform random variables
# (i.e. uniform order statistics).
nlive_flag = np.ones(nsamps, dtype='bool')
nlive_start, bounds = [], []
if not approx:
# Find all instances where the number of live points is either constant
# or increasing.
nlive_flag[1:] = np.diff(samples_n) >= 0
# For all the portions that are decreasing, find out where they start,
# where they end, and how many live points are present at that given
# iteration.
if np.any(~nlive_flag):
i = 0
while i < nsamps:
if not nlive_flag[i]:
bound = []
bound.append(i-1)
nlive_start.append(samples_n[i-1])
while i < nsamps and not nlive_flag[i]:
i += 1
bound.append(i)
bounds.append(bound)
i += 1
# The maximum out of a set of `K_i` uniformly distributed random variables
# has a marginal distribution of `Beta(K_i, 1)`.
t_arr = np.zeros(nsamps)
t_arr[nlive_flag] = rstate.beta(a=samples_n[nlive_flag], b=1)
# If we instead are sampling the set of uniform order statistics,
# we note that the jth largest value is marginally distributed as
# `Beta(j, K_i-j+1)`. The full joint distribution is::
#
# X_(j) / X_N = (Y_1 + ... + Y_j) / (Y_1 + ... + Y_{K+1})
#
# where X_(j) is the prior volume of the live point with the `j`-th
# *best* likelihood (i.e. prior volume shrinks as likelihood increases)
# and the `Y_i`'s are i.i.d. exponentially distributed random variables.
nunif = len(nlive_start)
for i in range(nunif):
nstart = nlive_start[i]
bound = bounds[i]
sn = samples_n[bound[0]:bound[1]]
y_arr = rstate.exponential(scale=1.0, size=nstart+1)
ycsum = y_arr.cumsum()
ycsum /= ycsum[-1]
uorder = ycsum[np.append(nstart, sn-1)]
rorder = uorder[1:] / uorder[:-1]
t_arr[bound[0]:bound[1]] = rorder
# These are the "compression factors" at each iteration. Let's turn
# these into associated ln(volumes).
logvol = np.log(t_arr).cumsum()
# Compute weights using quadratic estimator.
h = 0.
logz = -1.e300
loglstar = -1.e300
logzvar = 0.
logvols_pad = np.concatenate(([0.], logvol))
logdvols = misc.logsumexp(a=np.c_[logvols_pad[:-1], logvols_pad[1:]],
axis=1, b=np.c_[np.ones(nsamps),
-np.ones(nsamps)])
logdvols += math.log(0.5)
dlvs = -np.diff(np.append(0., res.logvol))
saved_logwt, saved_logz, saved_logzvar, saved_h = [], [], [], []
for i in range(nsamps):
loglstar_new = logl[i]
logdvol, dlv = logdvols[i], dlvs[i]
logwt = np.logaddexp(loglstar_new, loglstar) + logdvol
logz_new = np.logaddexp(logz, logwt)
lzterm = (math.exp(loglstar - logz_new) * loglstar +
math.exp(loglstar_new - logz_new) * loglstar_new)
h_new = (math.exp(logdvol) * lzterm +
math.exp(logz - logz_new) * (h + logz) -
logz_new)
dh = h_new - h
h = h_new
logz = logz_new
logzvar += dh * dlv
loglstar = loglstar_new
saved_logwt.append(logwt)
saved_logz.append(logz)
saved_logzvar.append(logzvar)
saved_h.append(h)
# Copy results.
new_res = Results([item for item in res.items()])
# Overwrite items with our new estimates.
new_res.logvol = np.array(logvol)
new_res.logwt = np.array(saved_logwt)
new_res.logz = np.array(saved_logz)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
new_res.logzerr = np.sqrt(np.array(saved_logzvar))
new_res.h = np.array(saved_h)
return new_res | Probes **statistical uncertainties** on a nested sampling run by
explicitly generating a *realization* of the prior volume associated
with each sample (dead point). Companion function to :meth:`resample_run`
and :meth:`simulate_run`.
Parameters
----------
res : :class:`~dynesty.results.Results` instance
The :class:`~dynesty.results.Results` instance taken from a previous
nested sampling run.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance.
approx : bool, optional
Whether to approximate all sets of uniform order statistics by their
associated marginals (from the Beta distribution). Default is `False`.
Returns
-------
new_res : :class:`~dynesty.results.Results` instance
A new :class:`~dynesty.results.Results` instance with corresponding
weights based on our "jittered" prior volume realizations. |
def create_vpc(self):
"""Create a virtual private cloud on Amazon's Web services configured
for deploying JupyterHubs.
"""
self.create_stack(
self.vpc_name,
'amazon-eks-vpc.yaml',
parameters=define_parameters(
VpcBlock="10.42.0.0/16",
Subnet01Block="10.42.1.0/24",
Subnet02Block="10.42.2.0/24",
Subnet03Block="10.42.3.0/24"
)
) | Create a virtual private cloud on Amazon's Web services configured
for deploying JupyterHubs. |
def resizeEvent(self, event):
"""Emit custom signal when the window is re-sized.
:param event: The re-sized event.
:type event: QResizeEvent
"""
self.resized.emit()
return super(MetadataConverterDialog, self).resizeEvent(event) | Emit custom signal when the window is re-sized.
:param event: The re-sized event.
:type event: QResizeEvent |
def _tp__get_typed_properties(self):
"""Return a tuple of typed attrs that can be used for comparisons.
Raises:
NotImplementedError: Raised if this class was mixed into a class
that was not created by _AnnotatedObjectMeta.
"""
try:
return tuple(getattr(self, p) for p in self._tp__typed_properties)
except AttributeError:
raise NotImplementedError | Return a tuple of typed attrs that can be used for comparisons.
Raises:
NotImplementedError: Raised if this class was mixed into a class
that was not created by _AnnotatedObjectMeta. |
def save(evt, designer):
"Basic save functionality: just replaces the gui code"
# ask the user if we should save the changes:
ok = gui.confirm("Save the changes?", "GUI2PY Designer",
cancel=True, default=True)
if ok:
wx_obj = evt.GetEventObject()
w = wx_obj.obj
try:
if DEBUG: print "saving..."
# make a backup:
fin = open(designer.filename, "r")
fout = open(designer.filename + ".bak", "w")
fout.write(fin.read())
fout.close()
fin.close()
if designer.filename.endswith(".rsrc.py"):
# serialize and save the resource
gui.save(designer.filename, [gui.dump(w)])
else:
# reopen the files to proccess them
fin = open(designer.filename + ".bak", "r")
fout = open(designer.filename, "w")
copy = True
newlines = fin.newlines or "\n"
def dump(obj, indent=1):
"recursive convert object to string"
for ctl in obj[:]:
write(ctl, indent)
def write(ctl, indent):
if ctl[:]:
fout.write(" " * indent * 4)
fout.write("with %s:" % ctl.__repr__(parent=None, indent=indent, context=True))
fout.write(newlines)
dump(ctl, indent + 1)
else:
fout.write(" " * indent * 4)
fout.write(ctl.__repr__(parent=None, indent=indent))
fout.write(newlines)
dumped = False
for line in fin:
if line.startswith("# --- gui2py designer generated code starts ---"):
fout.write(line)
fout.write(newlines)
write(w, indent=0)
fout.write(newlines)
dumped = True
copy = False
if line.startswith("# --- gui2py designer generated code ends ---"):
copy = True
if copy:
fout.write(line)
#fout.write("\n\r")
if not dumped:
gui.alert("No valid # --- gui2py... delimiters! \n"
"Unable to write down design code!",
"Design not updated:")
fout.close()
fin.close()
except Exception, e:
import traceback
print(traceback.print_exc())
ok = gui.confirm("Close anyway?\n%s" % str(e), 'Unable to save:',
ok=True, cancel=True)
if ok is not None:
wx.CallAfter(exit) # terminate the designer program
return ok | Basic save functionality: just replaces the gui code |
def get_items(self, container_id, scope=None, item_path=None, metadata=None, format=None, download_file_name=None, include_download_tickets=None, is_shallow=None):
"""GetItems.
[Preview API]
:param long container_id:
:param str scope:
:param str item_path:
:param bool metadata:
:param str format:
:param str download_file_name:
:param bool include_download_tickets:
:param bool is_shallow:
:rtype: [FileContainerItem]
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'long')
query_parameters = {}
if scope is not None:
query_parameters['scope'] = self._serialize.query('scope', scope, 'str')
if item_path is not None:
query_parameters['itemPath'] = self._serialize.query('item_path', item_path, 'str')
if metadata is not None:
query_parameters['metadata'] = self._serialize.query('metadata', metadata, 'bool')
if format is not None:
query_parameters['$format'] = self._serialize.query('format', format, 'str')
if download_file_name is not None:
query_parameters['downloadFileName'] = self._serialize.query('download_file_name', download_file_name, 'str')
if include_download_tickets is not None:
query_parameters['includeDownloadTickets'] = self._serialize.query('include_download_tickets', include_download_tickets, 'bool')
if is_shallow is not None:
query_parameters['isShallow'] = self._serialize.query('is_shallow', is_shallow, 'bool')
response = self._send(http_method='GET',
location_id='e4f5c81e-e250-447b-9fef-bd48471bea5e',
version='5.0-preview.4',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[FileContainerItem]', self._unwrap_collection(response)) | GetItems.
[Preview API]
:param long container_id:
:param str scope:
:param str item_path:
:param bool metadata:
:param str format:
:param str download_file_name:
:param bool include_download_tickets:
:param bool is_shallow:
:rtype: [FileContainerItem] |
def pip_command_output(pip_args):
"""
Get output (as a string) from pip command
:param pip_args: list o pip switches to pass
:return: string with results
"""
import sys
import pip
from io import StringIO
# as pip will write to stdout we use some nasty hacks
# to substitute system stdout with our own
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
pip.main(pip_args)
output = mystdout.getvalue()
mystdout.truncate(0)
sys.stdout = old_stdout
return output | Get output (as a string) from pip command
:param pip_args: list o pip switches to pass
:return: string with results |
def strip_praw_submission(cls, sub):
"""
Parse through a submission and return a dict with data ready to be
displayed through the terminal.
Definitions:
permalink - URL to the reddit page with submission comments.
url_full - URL that the submission points to.
url - URL that will be displayed on the subreddit page, may be
"selfpost", "x-post submission", "x-post subreddit", or an
external link.
"""
reddit_link = re.compile(
r'https?://(www\.)?(np\.)?redd(it\.com|\.it)/r/.*')
author = getattr(sub, 'author', '[deleted]')
name = getattr(author, 'name', '[deleted]')
flair = getattr(sub, 'link_flair_text', '')
data = {}
data['object'] = sub
data['type'] = 'Submission'
data['title'] = sub.title
data['text'] = sub.selftext
data['html'] = sub.selftext_html or ''
data['created'] = cls.humanize_timestamp(sub.created_utc)
data['created_long'] = cls.humanize_timestamp(sub.created_utc, True)
data['comments'] = '{0} comments'.format(sub.num_comments)
data['score'] = '{0} pts'.format('-' if sub.hide_score else sub.score)
data['author'] = name
data['permalink'] = sub.permalink
data['subreddit'] = six.text_type(sub.subreddit)
data['flair'] = '[{0}]'.format(flair.strip(' []')) if flair else ''
data['url_full'] = sub.url
data['likes'] = sub.likes
data['gold'] = sub.gilded
data['nsfw'] = sub.over_18
data['stickied'] = sub.stickied
data['hidden'] = sub.hidden
data['xpost_subreddit'] = None
data['index'] = None # This is filled in later by the method caller
data['saved'] = sub.saved
if sub.edited:
data['edited'] = '(edit {})'.format(
cls.humanize_timestamp(sub.edited))
data['edited_long'] = '(edit {})'.format(
cls.humanize_timestamp(sub.edited, True))
else:
data['edited'] = ''
data['edited_long'] = ''
if sub.url.split('/r/')[-1] == sub.permalink.split('/r/')[-1]:
data['url'] = 'self.{0}'.format(data['subreddit'])
data['url_type'] = 'selfpost'
elif reddit_link.match(sub.url):
# Strip the subreddit name from the permalink to avoid having
# submission.subreddit.url make a separate API call
url_parts = sub.url.split('/')
data['xpost_subreddit'] = url_parts[4]
data['url'] = 'self.{0}'.format(url_parts[4])
if 'comments' in url_parts:
data['url_type'] = 'x-post submission'
else:
data['url_type'] = 'x-post subreddit'
else:
data['url'] = sub.url
data['url_type'] = 'external'
return data | Parse through a submission and return a dict with data ready to be
displayed through the terminal.
Definitions:
permalink - URL to the reddit page with submission comments.
url_full - URL that the submission points to.
url - URL that will be displayed on the subreddit page, may be
"selfpost", "x-post submission", "x-post subreddit", or an
external link. |
def replace(self, year=None, week=None):
"""Return a Week with either the year or week attribute value replaced"""
return self.__class__(self.year if year is None else year,
self.week if week is None else week) | Return a Week with either the year or week attribute value replaced |
def shadow_calc(data):
"""计算上下影线
Arguments:
data {DataStruct.slice} -- 输入的是一个行情切片
Returns:
up_shadow {float} -- 上影线
down_shdow {float} -- 下影线
entity {float} -- 实体部分
date {str} -- 时间
code {str} -- 代码
"""
up_shadow = abs(data.high - (max(data.open, data.close)))
down_shadow = abs(data.low - (min(data.open, data.close)))
entity = abs(data.open - data.close)
towards = True if data.open < data.close else False
print('=' * 15)
print('up_shadow : {}'.format(up_shadow))
print('down_shadow : {}'.format(down_shadow))
print('entity: {}'.format(entity))
print('towards : {}'.format(towards))
return up_shadow, down_shadow, entity, data.date, data.code | 计算上下影线
Arguments:
data {DataStruct.slice} -- 输入的是一个行情切片
Returns:
up_shadow {float} -- 上影线
down_shdow {float} -- 下影线
entity {float} -- 实体部分
date {str} -- 时间
code {str} -- 代码 |
def not_(self, value, name=''):
"""
Bitwise integer complement:
name = ~value
"""
if isinstance(value.type, types.VectorType):
rhs = values.Constant(value.type, (-1,) * value.type.count)
else:
rhs = values.Constant(value.type, -1)
return self.xor(value, rhs, name=name) | Bitwise integer complement:
name = ~value |
def _load(self, url, verbose):
"""
Execute a request against the Salesking API to fetch the items
:param url: url to fetch
:return response
:raises SaleskingException with the corresponding http errors
"""
msg = u"_load url: %s" % url
self._last_query_str = url
log.debug(msg)
if verbose:
print msg
response = self.__api__.request(url)
return response | Execute a request against the Salesking API to fetch the items
:param url: url to fetch
:return response
:raises SaleskingException with the corresponding http errors |
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
)) | Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val" |
def stretch_cv(x,sr,sc,interpolation=cv2.INTER_AREA):
""" Stretches image x horizontally by sr+1, and vertically by sc+1 while retaining the original image size and proportion. """
if sr==0 and sc==0: return x
r,c,*_ = x.shape
x = cv2.resize(x, None, fx=sr+1, fy=sc+1, interpolation=interpolation)
nr,nc,*_ = x.shape
cr = (nr-r)//2; cc = (nc-c)//2
return x[cr:r+cr, cc:c+cc] | Stretches image x horizontally by sr+1, and vertically by sc+1 while retaining the original image size and proportion. |
def setConnStringForWindows():
""" Set Conn String for Windiws
Windows has a different way of forking processes, which causes the
@worker_process_init.connect signal not to work in "CeleryDbConnInit"
"""
global _dbConnectString
from peek_platform.file_config.PeekFileConfigABC import PeekFileConfigABC
from peek_platform.file_config.PeekFileConfigSqlAlchemyMixin import \
PeekFileConfigSqlAlchemyMixin
from peek_platform import PeekPlatformConfig
class _WorkerTaskConfigMixin(PeekFileConfigABC,
PeekFileConfigSqlAlchemyMixin):
pass
PeekPlatformConfig.componentName = peekWorkerName
_dbConnectString = _WorkerTaskConfigMixin().dbConnectString | Set Conn String for Windiws
Windows has a different way of forking processes, which causes the
@worker_process_init.connect signal not to work in "CeleryDbConnInit" |
def list_jobs(self, argument_filters=None):
'''
a method to list jobs in the scheduler
:param argument_filters: list of query criteria dictionaries for class argument keys
:return: list of jobs (which satisfy the filters)
NOTE: query criteria architecture
each item in the argument filters list must be a dictionary
which is composed of one or more key names which represent the
dotpath to a key in the job record to be queried with a value
that is a dictionary of conditional operators used to test the
value in the corresponding key in each record in the list of jobs.
eg. argument_filters = [ { '.function': { 'must_contain': [ 'debug' ] } } ]
this example filter looks in the function key of each job for a
value which contains the characters 'debug'.
NOTE: the filter method uses a query filters list structure to represent
the disjunctive normal form of a logical expression. a record is
added to the results list if any query criteria dictionary in the
list evaluates to true. within each query criteria dictionary, all
declared conditional operators must evaluate to true.
in this way, the argument_filters represents a boolean OR operator and
each criteria dictionary inside the list represents a boolean AND
operator between all keys in the dictionary.
NOTE: each query_criteria uses the architecture of query declaration in
the jsonModel.query method
the list of keys in each query_criteria is the same as the arguments for
adding a job to the scheduler
query_criteria = {
'.id': {},
'.function': {},
'.name': {},
'.dt': {},
'.interval': {},
'.month': {},
'.day': {},
'.weekday': {},
'.hour': {},
'.minute': {},
'.second': {},
'.start_date': {},
'.end_date': {}
}
conditional operators for '.id', '.function', '.name':
"byte_data": false,
"discrete_values": [ "" ],
"excluded_values": [ "" ],
"greater_than": "",
"less_than": "",
"max_length": 0,
"max_value": "",
"min_length": 0,
"min_value": "",
"must_contain": [ "" ],
"must_not_contain": [ "" ],
"contains_either": [ "" ]
conditional operators for '.dt', 'start', 'end':
"discrete_values": [ 0.0 ],
"excluded_values": [ 0.0 ],
"greater_than": 0.0,
"less_than": 0.0,
"max_value": 0.0,
"min_value": 0.0
operators for '.interval', '.month', '.day', '.weekday', '.hour', '.minute', '.second':
"discrete_values": [ 0 ],
"excluded_values": [ 0 ],
"greater_than": 0,
"less_than": 0,
"max_value": 0,
"min_value": 0
'''
title = '%s.list_jobs' % self.__class__.__name__
# validate inputs
if argument_filters:
self.fields.validate(argument_filters, '.argument_filters')
# send request to get jobs
url = '%s/scheduler/jobs' % self.url
job_list = self._get_request(url)
# construct filter function
def query_function(**kwargs):
job_details = {}
for key, value in kwargs.items():
if key in self.job_model.schema.keys():
job_details[key] = value
for query_criteria in argument_filters:
if self.job_model.query(query_criteria, job_details):
return True
return False
# construct empty list
results_list = []
# add refactored jobs to results list
for job in job_list:
job_details = self._construct_details(job)
if argument_filters:
if query_function(**job_details):
results_list.append(job_details)
else:
results_list.append(job_details)
return results_list | a method to list jobs in the scheduler
:param argument_filters: list of query criteria dictionaries for class argument keys
:return: list of jobs (which satisfy the filters)
NOTE: query criteria architecture
each item in the argument filters list must be a dictionary
which is composed of one or more key names which represent the
dotpath to a key in the job record to be queried with a value
that is a dictionary of conditional operators used to test the
value in the corresponding key in each record in the list of jobs.
eg. argument_filters = [ { '.function': { 'must_contain': [ 'debug' ] } } ]
this example filter looks in the function key of each job for a
value which contains the characters 'debug'.
NOTE: the filter method uses a query filters list structure to represent
the disjunctive normal form of a logical expression. a record is
added to the results list if any query criteria dictionary in the
list evaluates to true. within each query criteria dictionary, all
declared conditional operators must evaluate to true.
in this way, the argument_filters represents a boolean OR operator and
each criteria dictionary inside the list represents a boolean AND
operator between all keys in the dictionary.
NOTE: each query_criteria uses the architecture of query declaration in
the jsonModel.query method
the list of keys in each query_criteria is the same as the arguments for
adding a job to the scheduler
query_criteria = {
'.id': {},
'.function': {},
'.name': {},
'.dt': {},
'.interval': {},
'.month': {},
'.day': {},
'.weekday': {},
'.hour': {},
'.minute': {},
'.second': {},
'.start_date': {},
'.end_date': {}
}
conditional operators for '.id', '.function', '.name':
"byte_data": false,
"discrete_values": [ "" ],
"excluded_values": [ "" ],
"greater_than": "",
"less_than": "",
"max_length": 0,
"max_value": "",
"min_length": 0,
"min_value": "",
"must_contain": [ "" ],
"must_not_contain": [ "" ],
"contains_either": [ "" ]
conditional operators for '.dt', 'start', 'end':
"discrete_values": [ 0.0 ],
"excluded_values": [ 0.0 ],
"greater_than": 0.0,
"less_than": 0.0,
"max_value": 0.0,
"min_value": 0.0
operators for '.interval', '.month', '.day', '.weekday', '.hour', '.minute', '.second':
"discrete_values": [ 0 ],
"excluded_values": [ 0 ],
"greater_than": 0,
"less_than": 0,
"max_value": 0,
"min_value": 0 |
def recover_devices(cls):
"""Track devices.
Creates global dict to track device names across driver invocations
and populates based on current devices configured on the system.
"""
if "_devices" in globals():
return
global _devices
confs_dir = os.path.abspath(os.path.normpath(cfg.CONF.dhcp_confs))
for netid in os.listdir(confs_dir):
conf_dir = os.path.join(confs_dir, netid)
intf_filename = os.path.join(conf_dir, 'interface')
try:
with open(intf_filename, 'r') as f:
ifname = f.read()
_devices[netid] = ifname
except IOError:
LOG.error('Unable to read interface file: %s',
intf_filename)
LOG.debug("Recovered device %s for network %s'",
ifname, netid) | Track devices.
Creates global dict to track device names across driver invocations
and populates based on current devices configured on the system. |
def get_profile(A):
"Fail-soft profile getter; if no profile is present assume none and quietly ignore."
try:
with open(os.path.expanduser(A.profile)) as I:
profile = json.load(I)
return profile
except:
return {} | Fail-soft profile getter; if no profile is present assume none and quietly ignore. |
def badge_label(self, badge):
'''Display the badge label for a given kind'''
kind = badge.kind if isinstance(badge, Badge) else badge
return self.__badges__[kind] | Display the badge label for a given kind |
def _upload(auth_http, project_id, bucket_name, file_path, object_name, acl):
"""Uploads a file to Google Cloud Storage.
Args:
auth_http: An authorized httplib2.Http instance.
project_id: The project to upload to.
bucket_name: The bucket to upload to.
file_path: Path to the file to upload.
object_name: The name within the bucket to upload to.
acl: The ACL to assign to the uploaded file.
"""
with open(file_path, 'rb') as f:
data = f.read()
content_type, content_encoding = mimetypes.guess_type(file_path)
headers = {
'x-goog-project-id': project_id,
'x-goog-api-version': API_VERSION,
'x-goog-acl': acl,
'Content-Length': '%d' % len(data)
}
if content_type: headers['Content-Type'] = content_type
if content_type: headers['Content-Encoding'] = content_encoding
try:
response, content = auth_http.request(
'http://%s.storage.googleapis.com/%s' % (bucket_name, object_name),
method='PUT',
headers=headers,
body=data)
except httplib2.ServerNotFoundError, se:
raise Error(404, 'Server not found.')
if response.status >= 300:
raise Error(response.status, response.reason)
return content | Uploads a file to Google Cloud Storage.
Args:
auth_http: An authorized httplib2.Http instance.
project_id: The project to upload to.
bucket_name: The bucket to upload to.
file_path: Path to the file to upload.
object_name: The name within the bucket to upload to.
acl: The ACL to assign to the uploaded file. |
def expand_tpm(tpm):
"""Broadcast a state-by-node TPM so that singleton dimensions are expanded
over the full network.
"""
unconstrained = np.ones([2] * (tpm.ndim - 1) + [tpm.shape[-1]])
return tpm * unconstrained | Broadcast a state-by-node TPM so that singleton dimensions are expanded
over the full network. |
def _finalize_step(self):
"""Finalize simulation step after all agents have acted for the current
step.
"""
t = time.time()
if self._callback is not None:
self._callback(self.age)
t2 = time.time()
self._step_processing_time += t2 - t
self._log(logging.INFO, "Step {} run in: {:.3f}s ({:.3f}s of "
"actual processing time used)"
.format(self.age, self._step_processing_time,
t2 - self._step_start_time))
self._processing_time += self._step_processing_time | Finalize simulation step after all agents have acted for the current
step. |
def apply_gemm(scope, input_name, output_name, container, operator_name=None, alpha=1.0, beta=1.0,
transA=0, transB=0):
"""
Applies operator `gemm <https://github.com/onnx/onnx/blob/master/docs/Operators.md#gemm>`.
"""
name = _create_name_or_use_existing_one(scope, 'Gemm', operator_name)
attrs = {'alpha': alpha, 'beta': beta, 'transA': transA, 'transB': transB}
if container.target_opset < 5:
attrs['op_version'] = 1
attrs['broadcast'] = 1
elif container.target_opset < 7:
attrs['op_version'] = 6
attrs['broadcast'] = 1
else:
attrs['op_version'] = 7
container.add_node('Gemm', input_name, output_name, name=name, **attrs) | Applies operator `gemm <https://github.com/onnx/onnx/blob/master/docs/Operators.md#gemm>`. |
def terminate_process(self, idf):
""" Terminate a process by id """
try:
p = self.q.pop(idf)
p.terminate()
return p
except:
return None | Terminate a process by id |
def qsnorm(p):
"""
rational approximation for x where q(x)=d, q being the cumulative
normal distribution function. taken from Abramowitz & Stegun p. 933
|error(x)| < 4.5*10**-4
"""
d = p
if d < 0. or d > 1.:
print('d not in (1,1) ')
sys.exit()
x = 0.
if (d - 0.5) > 0:
d = 1. - d
if (d - 0.5) < 0:
t2 = -2. * np.log(d)
t = np.sqrt(t2)
x = t - old_div((2.515517 + .802853 * t + .010328 * t2),
(1. + 1.432788 * t + .189269 * t2 + .001308 * t * t2))
if p < 0.5:
x = -x
return x | rational approximation for x where q(x)=d, q being the cumulative
normal distribution function. taken from Abramowitz & Stegun p. 933
|error(x)| < 4.5*10**-4 |
def CountFlowOutputPluginLogEntries(self,
client_id,
flow_id,
output_plugin_id,
with_type=None):
"""Returns number of flow output plugin log entries of a given flow."""
return len(
self.ReadFlowOutputPluginLogEntries(
client_id,
flow_id,
output_plugin_id,
0,
sys.maxsize,
with_type=with_type)) | Returns number of flow output plugin log entries of a given flow. |
def _is_valid_duration(self, inpt, metadata):
"""Checks if input is a valid Duration"""
# NEED TO ADD CHECKS FOR OTHER METADATA, LIKE MINIMUM, MAXIMUM, ETC.
from dlkit.abstract_osid.calendaring.primitives import Duration as abc_duration
if isinstance(inpt, abc_duration):
return True
else:
return False | Checks if input is a valid Duration |
def md5(self, raw_output=False):
"""
Calculates the md5 hash of a given string
:example 'cfcd208495d565ef66e7dff9f98764da'
"""
res = hashlib.md5(str(self.generator.random.random()).encode('utf-8'))
if raw_output:
return res.digest()
return res.hexdigest() | Calculates the md5 hash of a given string
:example 'cfcd208495d565ef66e7dff9f98764da' |
def get_mcu_definition(self, project_file):
""" Parse project file to get mcu definition """
project_file = join(getcwd(), project_file)
uvproj_dic = xmltodict.parse(file(project_file), dict_constructor=dict)
# Generic Target, should get from Target class !
mcu = MCU_TEMPLATE
try:
mcu['tool_specific'] = {
# legacy device
'uvision' : {
'TargetOption' : {
'Device' : [uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['Device']],
'DeviceId' : [None if not uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['DeviceId'] else
int(uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['DeviceId'])],
'Vendor' : [uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['Vendor']],
'Cpu' : [uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['Cpu']],
'FlashDriverDll' : [uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['FlashDriverDll']],
'SFDFile' : [uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['SFDFile']],
'RegisterFile': [uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['RegisterFile']],
}
}
}
except KeyError:
# validity check for uvision project
logging.debug("The project_file %s seems to be not valid .uvproj file.")
return mcu
return mcu | Parse project file to get mcu definition |
def get_all_chats(self):
"""
Fetches all chats
:return: List of chats
:rtype: list[Chat]
"""
chats = self.wapi_functions.getAllChats()
if chats:
return [factory_chat(chat, self) for chat in chats]
else:
return [] | Fetches all chats
:return: List of chats
:rtype: list[Chat] |
def notices(self):
"""pops and returns all notices
http://initd.org/psycopg/docs/connection.html#connection.notices
"""
return [self._db.notices.pop()[8:].strip() for x in range(len(self._db.notices))] | pops and returns all notices
http://initd.org/psycopg/docs/connection.html#connection.notices |
def get_config_directory():
"""Return the directory the config file is located in.
This enables us to use relative paths in config values.
"""
# avoid circular import
from .commands.stacker import Stacker
command = Stacker()
namespace = command.parse_args()
return os.path.dirname(namespace.config.name) | Return the directory the config file is located in.
This enables us to use relative paths in config values. |
def simple_generate_batch(cls, create, size, **kwargs):
"""Generate a batch of instances.
These instances will be either 'built' or 'created'.
Args:
size (int): the number of instances to generate
create (bool): whether to 'build' or 'create' the instances.
Returns:
object list: the generated instances
"""
strategy = enums.CREATE_STRATEGY if create else enums.BUILD_STRATEGY
return cls.generate_batch(strategy, size, **kwargs) | Generate a batch of instances.
These instances will be either 'built' or 'created'.
Args:
size (int): the number of instances to generate
create (bool): whether to 'build' or 'create' the instances.
Returns:
object list: the generated instances |
def Nu_Mokry(Re, Pr, rho_w=None, rho_b=None):
r'''Calculates internal convection Nusselt number for turbulent vertical
upward flow in a pipe under supercritical conditions according to [1]_,
and reviewed in [2]_.
.. math::
Nu_b = 0.0061 Re_b^{0.904} \bar{Pr}_b^{0.684}
\left(\frac{\rho_w}{\rho_b}\right)^{0.564}
\bar{Cp} = \frac{H_w-H_b}{T_w-T_b}
Parameters
----------
Re : float
Reynolds number with bulk fluid properties, [-]
Pr : float
Prandtl number with bulk fluid properties and an average heat capacity
between the wall and bulk temperatures [-]
rho_w : float, optional
Density at the wall temperature, [kg/m^3]
rho_b : float, optional
Density at the bulk temperature, [kg/m^3]
Returns
-------
Nu : float
Nusselt number with bulk fluid properties, [-]
Notes
-----
For the data used to develop the correlation, P was set at 20 MPa, and D
was 10 mm. G varied from 200-1500 kg/m^2/s and q varied from 0 to 1250
kW/m^2.
Cp used in the calculation of Prandtl number should be the average value
of those at the wall and the bulk temperatures.
For deteriorated heat transfer, this was the four most accurate correlation
in [2]_ with a MAD of 24.0%. It was also the 7th most accurate against
enhanced heat transfer, with a MAD of 14.7%, and the most accurate for the
normal heat transfer database as well as the top correlation in all
categories combined.
If the extra density information is not provided, it will not be used.
Examples
--------
>>> Nu_Mokry(1E5, 1.2, 330, 290.)
246.1156319156992
References
----------
.. [1] Mokry, Sarah, Igor Pioro, Amjad Farah, Krysten King, Sahil Gupta,
Wargha Peiman, and Pavel Kirillov. "Development of Supercritical Water
Heat-Transfer Correlation for Vertical Bare Tubes." Nuclear Engineering
and Design, International Conference on Nuclear Energy for New Europe
2009, 241, no. 4 (April 2011): 1126-36.
doi:10.1016/j.nucengdes.2010.06.012.
.. [2] Chen, Weiwei, Xiande Fang, Yu Xu, and Xianghui Su. "An Assessment of
Correlations of Forced Convection Heat Transfer to Water at
Supercritical Pressure." Annals of Nuclear Energy 76 (February 2015):
451-60. doi:10.1016/j.anucene.2014.10.027.
'''
Nu = 0.0061*Re**0.904*Pr**0.684
if rho_w and rho_b:
Nu *= (rho_w/rho_b)**0.564
return Nu | r'''Calculates internal convection Nusselt number for turbulent vertical
upward flow in a pipe under supercritical conditions according to [1]_,
and reviewed in [2]_.
.. math::
Nu_b = 0.0061 Re_b^{0.904} \bar{Pr}_b^{0.684}
\left(\frac{\rho_w}{\rho_b}\right)^{0.564}
\bar{Cp} = \frac{H_w-H_b}{T_w-T_b}
Parameters
----------
Re : float
Reynolds number with bulk fluid properties, [-]
Pr : float
Prandtl number with bulk fluid properties and an average heat capacity
between the wall and bulk temperatures [-]
rho_w : float, optional
Density at the wall temperature, [kg/m^3]
rho_b : float, optional
Density at the bulk temperature, [kg/m^3]
Returns
-------
Nu : float
Nusselt number with bulk fluid properties, [-]
Notes
-----
For the data used to develop the correlation, P was set at 20 MPa, and D
was 10 mm. G varied from 200-1500 kg/m^2/s and q varied from 0 to 1250
kW/m^2.
Cp used in the calculation of Prandtl number should be the average value
of those at the wall and the bulk temperatures.
For deteriorated heat transfer, this was the four most accurate correlation
in [2]_ with a MAD of 24.0%. It was also the 7th most accurate against
enhanced heat transfer, with a MAD of 14.7%, and the most accurate for the
normal heat transfer database as well as the top correlation in all
categories combined.
If the extra density information is not provided, it will not be used.
Examples
--------
>>> Nu_Mokry(1E5, 1.2, 330, 290.)
246.1156319156992
References
----------
.. [1] Mokry, Sarah, Igor Pioro, Amjad Farah, Krysten King, Sahil Gupta,
Wargha Peiman, and Pavel Kirillov. "Development of Supercritical Water
Heat-Transfer Correlation for Vertical Bare Tubes." Nuclear Engineering
and Design, International Conference on Nuclear Energy for New Europe
2009, 241, no. 4 (April 2011): 1126-36.
doi:10.1016/j.nucengdes.2010.06.012.
.. [2] Chen, Weiwei, Xiande Fang, Yu Xu, and Xianghui Su. "An Assessment of
Correlations of Forced Convection Heat Transfer to Water at
Supercritical Pressure." Annals of Nuclear Energy 76 (February 2015):
451-60. doi:10.1016/j.anucene.2014.10.027. |
def sample(self, nsims=1000):
""" Samples from the posterior predictive distribution
Parameters
----------
nsims : int (default : 1000)
How many draws from the posterior predictive distribution
Returns
----------
- np.ndarray of draws from the data
"""
if self.latent_variables.estimation_method not in ['BBVI', 'M-H']:
raise Exception("No latent variables estimated!")
else:
lv_draws = self.draw_latent_variables(nsims=nsims)
sigmas = [self._model(lv_draws[:,i])[0] for i in range(nsims)]
data_draws = np.array([ss.t.rvs(loc=self.latent_variables.z_list[-1].prior.transform(lv_draws[-1,i]),
df=self.latent_variables.z_list[-2].prior.transform(lv_draws[-2,i]), scale=np.exp(sigmas[i]/2.0)) for i in range(nsims)])
return data_draws | Samples from the posterior predictive distribution
Parameters
----------
nsims : int (default : 1000)
How many draws from the posterior predictive distribution
Returns
----------
- np.ndarray of draws from the data |
def ASRS(self, params):
"""
ASRS [Ra,] Ra, Rc
ASRS [Ra,] Rb, #imm5_counting
Arithmetic shift right Rb by Rc or imm5_counting and store the result in Ra
imm5 counting is [1, 32]
In the register shift, the first two operands must be the same register
Ra, Rb, and Rc must be low registers
If Ra is omitted, then it is assumed to be Rb
"""
# This instruction allows for an optional destination register
# If it is omitted, then it is assumed to be Rb
# As defined in http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0662b/index.html
try:
Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_COMMA_SEPARATED, params)
except iarm.exceptions.ParsingError:
Rb, Rc = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params)
Ra = Rb
if self.is_register(Rc):
# ASRS Ra, Ra, Rb
self.check_arguments(low_registers=(Ra, Rc))
self.match_first_two_parameters(Ra, Rb)
def ASRS_func():
# Set the C flag, or the last shifted out bit
if (self.register[Rc] > 0) and (self.register[Rb] & (1 << (self.register[Rc] - 1))):
self.set_APSR_flag_to_value('C', 1)
else:
self.set_APSR_flag_to_value('C', 0)
if self.register[Ra] & (1 << (self._bit_width - 1)):
self.register[Ra] = (self.register[Ra] >> self.register[Rc]) | (
int('1' * self.register[Rc], 2) << (self._bit_width - self.register[Rc]))
else:
self.register[Ra] = self.register[Ra] >> self.register[Rc]
self.set_NZ_flags(self.register[Ra])
else:
# ASRS Ra, Rb, #imm5_counting
self.check_arguments(low_registers=(Ra, Rb), imm5_counting=(Rc,))
shift_amount = self.check_immediate(Rc)
def ASRS_func():
# Set the C flag, or the last shifted out bit
if self.register[Rb] & (1 << (shift_amount - 1)):
self.set_APSR_flag_to_value('C', 1)
else:
self.set_APSR_flag_to_value('C', 0)
if self.register[Ra] & (1 << (self._bit_width - 1)):
self.register[Ra] = (self.register[Ra] >> shift_amount) | (
int('1' * shift_amount, 2) << (self._bit_width - shift_amount))
else:
self.register[Ra] = self.register[Rb] >> shift_amount
self.set_NZ_flags(self.register[Ra])
return ASRS_func | ASRS [Ra,] Ra, Rc
ASRS [Ra,] Rb, #imm5_counting
Arithmetic shift right Rb by Rc or imm5_counting and store the result in Ra
imm5 counting is [1, 32]
In the register shift, the first two operands must be the same register
Ra, Rb, and Rc must be low registers
If Ra is omitted, then it is assumed to be Rb |
def compile(self):
"""
Compile this expression into an ODPS SQL
:return: compiled DAG
:rtype: str
"""
from ..engines import get_default_engine
engine = get_default_engine(self)
return engine.compile(self) | Compile this expression into an ODPS SQL
:return: compiled DAG
:rtype: str |
def how_long(length=4, choices=len(words), speed=1000 * 1000 * 1000 * 1000,
optimism=2):
"""
How long might it take to guess a password?
@param length: the number of words that we're going to choose.
@type length: L{int}
@param choice: the number of words we might choose between.
@type choice: L{int}
@param speed: the speed of our hypothetical password guesser, in guesses
per second.
@type speed: L{int}
@param optimism: When we start guessing all the options, we probably won't
have to guess I{all} of them to get a hit. This assumes that the
guesser will have to guess only C{1/optimism} of the total number of
possible options before it finds a hit.
"""
return ((choices ** length) / (speed * optimism)) | How long might it take to guess a password?
@param length: the number of words that we're going to choose.
@type length: L{int}
@param choice: the number of words we might choose between.
@type choice: L{int}
@param speed: the speed of our hypothetical password guesser, in guesses
per second.
@type speed: L{int}
@param optimism: When we start guessing all the options, we probably won't
have to guess I{all} of them to get a hit. This assumes that the
guesser will have to guess only C{1/optimism} of the total number of
possible options before it finds a hit. |
def get_as_string(self, key):
"""
Converts map element into a string or returns "" if conversion is not possible.
:param key: an index of element to get.
:return: string value ot the element or "" if conversion is not supported.
"""
value = self.get(key)
return StringConverter.to_string(value) | Converts map element into a string or returns "" if conversion is not possible.
:param key: an index of element to get.
:return: string value ot the element or "" if conversion is not supported. |
def missing_particle(separation=0.0, radius=RADIUS, SNR=20):
""" create a two particle state and compare it to featuring using a single particle guess """
# create a base image of one particle
s = init.create_two_particle_state(imsize=6*radius+4, axis='x', sigma=1.0/SNR,
delta=separation, radius=radius, stateargs={'varyn': True}, psfargs={'error': 1e-6})
s.obj.typ[1] = 0.
s.reset()
return s, s.obj.pos.copy() | create a two particle state and compare it to featuring using a single particle guess |
def get_group_member_profile(self, group_id, user_id, timeout=None):
"""Call get group member profile API.
https://devdocs.line.me/en/#get-group-room-member-profile
Gets the user profile of a member of a group that
the bot is in. This can be the user ID of a user who has
not added the bot as a friend or has blocked the bot.
:param str group_id: Group ID
:param str user_id: User ID
:param timeout: (optional) How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is self.http_client.timeout
:type timeout: float | tuple(float, float)
:rtype: :py:class:`linebot.models.responses.Profile`
:return: Profile instance
"""
response = self._get(
'/v2/bot/group/{group_id}/member/{user_id}'.format(group_id=group_id, user_id=user_id),
timeout=timeout
)
return Profile.new_from_json_dict(response.json) | Call get group member profile API.
https://devdocs.line.me/en/#get-group-room-member-profile
Gets the user profile of a member of a group that
the bot is in. This can be the user ID of a user who has
not added the bot as a friend or has blocked the bot.
:param str group_id: Group ID
:param str user_id: User ID
:param timeout: (optional) How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is self.http_client.timeout
:type timeout: float | tuple(float, float)
:rtype: :py:class:`linebot.models.responses.Profile`
:return: Profile instance |
def locate(self, pattern):
'''Find sequences matching a pattern. For a circular sequence, the
search extends over the origin.
:param pattern: str or NucleicAcidSequence for which to find matches.
:type pattern: str or coral.DNA
:returns: A list of top and bottom strand indices of matches.
:rtype: list of lists of indices (ints)
:raises: ValueError if the pattern is longer than either the input
sequence (for linear DNA) or twice as long as the input
sequence (for circular DNA).
'''
top_matches = self.top.locate(pattern)
bottom_matches = self.bottom.locate(pattern)
return [top_matches, bottom_matches] | Find sequences matching a pattern. For a circular sequence, the
search extends over the origin.
:param pattern: str or NucleicAcidSequence for which to find matches.
:type pattern: str or coral.DNA
:returns: A list of top and bottom strand indices of matches.
:rtype: list of lists of indices (ints)
:raises: ValueError if the pattern is longer than either the input
sequence (for linear DNA) or twice as long as the input
sequence (for circular DNA). |
def _hmmalign(self, input_path, directions, pipeline,
forward_reads_output_path, reverse_reads_output_path):
'''
Align reads to the aln_hmm. Receives unaligned sequences and
aligns them.
Parameters
----------
input_path : str
Filename of unaligned hits to be aligned
directions : dict
dictionary containing read names as keys, and complement
as the entry (True=Forward, False=Reverse)
pipeline: str
either PIPELINE_AA = "P" or PIPELINE_NT = "D"
forward_reads_output_fh: str
Where to write aligned forward reads
reverse_reads_output_fh: str
Where to write aligned reverse reads
Returns
-------
Nothing.
'''
if pipeline == PIPELINE_AA:
reverse_direction_reads_present=False
else:
reverse_direction_reads_present=False in directions.values()
with tempfile.NamedTemporaryFile(prefix='for_file', suffix='.fa') as for_file_fh:
for_file = for_file_fh.name
with tempfile.NamedTemporaryFile(prefix='rev_file', suffix='.fa') as rev_file_fh:
rev_file = rev_file_fh.name
# Align input reads to a specified hmm.
if reverse_direction_reads_present: # Any that are in the reverse direction would be True
reverse = []
forward = []
records = list(SeqIO.parse(open(input_path), 'fasta'))
# Split the reads into reverse and forward lists
for record in records:
read_id = record.id
if directions[read_id] == True:
forward.append(record)
elif directions[read_id] == False:
reverse.append(record)
else:
raise Exception(logging.error('Programming error: hmmalign'))
exit(1)
logging.debug("Found %i forward direction reads" % len(forward))
logging.debug("Found %i reverse direction reads" % len(reverse))
# Write reverse complement and forward reads to files
with open(for_file, 'w') as for_aln:
logging.debug("Writing forward direction reads to %s" % for_file)
for record in forward:
for_aln.write('>' + record.id + '\n')
for_aln.write(str(record.seq) + '\n')
# HMMalign and convert to fasta format
if any(forward):
self.hmmalign_sequences(self.aln_hmm, for_file, forward_reads_output_path)
else:
cmd = 'touch %s' % (forward_reads_output_path)
extern.run(cmd)
with open(rev_file, 'w') as rev_aln:
logging.debug("Writing reverse direction reads to %s" % rev_file)
for record in reverse:
if record.id and record.seq:
rev_aln.write('>' + record.id + '\n')
rev_aln.write(str(record.seq.reverse_complement()) + '\n')
self.hmmalign_sequences(self.aln_hmm, rev_file, reverse_reads_output_path)
conv_files = [forward_reads_output_path, reverse_reads_output_path]
return conv_files
else:
# If there are only forward reads, just hmmalign and be done with it.
self.hmmalign_sequences(self.aln_hmm, input_path, forward_reads_output_path)
conv_files = [forward_reads_output_path]
return conv_files | Align reads to the aln_hmm. Receives unaligned sequences and
aligns them.
Parameters
----------
input_path : str
Filename of unaligned hits to be aligned
directions : dict
dictionary containing read names as keys, and complement
as the entry (True=Forward, False=Reverse)
pipeline: str
either PIPELINE_AA = "P" or PIPELINE_NT = "D"
forward_reads_output_fh: str
Where to write aligned forward reads
reverse_reads_output_fh: str
Where to write aligned reverse reads
Returns
-------
Nothing. |
def get_stp_mst_detail_output_msti_instance_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
msti = ET.SubElement(output, "msti")
instance_id = ET.SubElement(msti, "instance-id")
instance_id.text = kwargs.pop('instance_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def get_bounds(self, bin_num):
"""Get the bonds of a bin, given its index `bin_num`.
:returns: a `Bounds` namedtuple with properties min and max
respectively.
"""
min_value = pow(2.0, float(bin_num) / 2.0) * self.min_value
max_value = pow(2.0, float(bin_num + 1.0) / 2.0) * self.min_value
return self.Bounds(min_value, max_value) | Get the bonds of a bin, given its index `bin_num`.
:returns: a `Bounds` namedtuple with properties min and max
respectively. |
def sphgen(self, force_rerun=False):
"""Create sphere representation (sph file) of receptor from the surface representation
Args:
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running sphere generation...'.format(self.id))
if not self.dms_path:
return ValueError('Please run dms_maker')
sph = op.join(self.dock_dir, '{}_receptor.sph'.format(self.id))
insph = op.join(self.dock_dir, 'INSPH')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=sph):
with open(insph, "w") as f:
f.write("{}\n".format(self.dms_path))
f.write("R\n")
f.write("X\n")
f.write("0.0\n")
f.write("4.0\n")
f.write("1.4\n")
f.write("{}\n".format(sph))
os.chdir(self.dock_dir)
cmd = "sphgen_cpp"
os.system(cmd)
os.remove(insph)
if ssbio.utils.is_non_zero_file(sph):
self.sphgen_path = sph
log.debug('{}: successful sphgen execution'.format(self.sphgen_path))
else:
log.critical('{}: sphgen_cpp failed to run on dms file'.format(self.dms_path)) | Create sphere representation (sph file) of receptor from the surface representation
Args:
force_rerun (bool): If method should be rerun even if output file exists |
def generate_config_parser(config, include_all=False):
"""
Generates a config parser from a configuration dictionary.
The dictionary contains the merged informations of the schema and,
optionally, of a source configuration file. Values of the source
configuration file will be stored in the *value* field of an option.
"""
# The allow_no_value allows us to output commented lines.
config_parser = SafeConfigParser(allow_no_value=True)
for section_name, option_name in _get_included_schema_sections_options(config, include_all):
if not config_parser.has_section(section_name):
config_parser.add_section(section_name)
option = config[section_name][option_name]
if option.get('required'):
config_parser.set(section_name, '# REQUIRED')
config_parser.set(section_name, '# ' + option.get('description', 'No description provided.'))
if option.get('deprecated'):
config_parser.set(section_name, '# DEPRECATED')
option_value = _get_value(option)
config_parser.set(section_name, option_name, option_value)
config_parser.set(section_name, '')
return config_parser | Generates a config parser from a configuration dictionary.
The dictionary contains the merged informations of the schema and,
optionally, of a source configuration file. Values of the source
configuration file will be stored in the *value* field of an option. |
def filter_tess_lcdict(lcdict,
filterqualityflags=True,
nanfilter='sap,pdc,time',
timestoignore=None,
quiet=False):
'''This filters the provided TESS `lcdict`, removing nans and bad
observations.
By default, this function removes points in the TESS LC that have ANY
quality flags set.
Parameters
----------
lcdict : lcdict
An `lcdict` produced by `consolidate_tess_fitslc` or
`read_tess_fitslc`.
filterflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'}
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing). The `lcdict` is filtered IN PLACE!
'''
cols = lcdict['columns']
# filter all bad LC points as noted by quality flags
if filterqualityflags:
nbefore = lcdict['time'].size
filterind = lcdict['quality'] == 0
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][filterind]
else:
lcdict[col] = lcdict[col][filterind]
nafter = lcdict['time'].size
if not quiet:
LOGINFO('applied quality flag filter, '
'ndet before = %s, ndet after = %s'
% (nbefore, nafter))
if nanfilter and nanfilter == 'sap,pdc,time':
notnanind = (
np.isfinite(lcdict['sap']['sap_flux']) &
np.isfinite(lcdict['sap']['sap_flux_err']) &
np.isfinite(lcdict['pdc']['pdcsap_flux']) &
np.isfinite(lcdict['pdc']['pdcsap_flux_err']) &
np.isfinite(lcdict['time'])
)
elif nanfilter and nanfilter == 'sap,time':
notnanind = (
np.isfinite(lcdict['sap']['sap_flux']) &
np.isfinite(lcdict['sap']['sap_flux_err']) &
np.isfinite(lcdict['time'])
)
elif nanfilter and nanfilter == 'pdc,time':
notnanind = (
np.isfinite(lcdict['pdc']['pdcsap_flux']) &
np.isfinite(lcdict['pdc']['pdcsap_flux_err']) &
np.isfinite(lcdict['time'])
)
elif nanfilter is None:
pass
else:
raise NotImplementedError
# remove nans from all columns
if nanfilter:
nbefore = lcdict['time'].size
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][notnanind]
else:
lcdict[col] = lcdict[col][notnanind]
nafter = lcdict['time'].size
if not quiet:
LOGINFO('removed nans, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# exclude all times in timestoignore
if (timestoignore and
isinstance(timestoignore, list) and
len(timestoignore) > 0):
exclind = np.full_like(lcdict['time'],True).astype(bool)
nbefore = exclind.size
# get all the masks
for ignoretime in timestoignore:
time0, time1 = ignoretime[0], ignoretime[1]
thismask = ~((lcdict['time'] >= time0) & (lcdict['time'] <= time1))
exclind = exclind & thismask
# apply the masks
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][exclind]
else:
lcdict[col] = lcdict[col][exclind]
nafter = lcdict['time'].size
if not quiet:
LOGINFO('removed timestoignore, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
return lcdict | This filters the provided TESS `lcdict`, removing nans and bad
observations.
By default, this function removes points in the TESS LC that have ANY
quality flags set.
Parameters
----------
lcdict : lcdict
An `lcdict` produced by `consolidate_tess_fitslc` or
`read_tess_fitslc`.
filterflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'}
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing). The `lcdict` is filtered IN PLACE! |
def status_pipeline(conf, args):
"""Stop a pipeline."""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
status_result = api.pipeline_status(url, args.pipeline_id, auth, verify_ssl)
return status_result['status'] | Stop a pipeline. |
async def set_lock(self, resource, lock_identifier):
"""
Tries to set the lock to all the redis instances
:param resource: The resource string name to lock
:param lock_identifier: The id of the lock. A unique string
:return float: The elapsed time that took to lock the instances
in seconds
:raises: LockError if the lock has not been set to at least (N/2 + 1)
instances
"""
start_time = time.time()
lock_timeout = self.lock_timeout
successes = await asyncio.gather(*[
i.set_lock(resource, lock_identifier, lock_timeout) for
i in self.instances
], return_exceptions=True)
successful_sets = sum(s is None for s in successes)
elapsed_time = time.time() - start_time
locked = True if successful_sets >= int(len(self.instances) / 2) + 1 else False
self.log.debug('Lock "%s" is set on %d/%d instances in %s seconds',
resource, successful_sets, len(self.instances), elapsed_time)
if not locked:
raise LockError('Can not acquire the lock "%s"' % resource)
return elapsed_time | Tries to set the lock to all the redis instances
:param resource: The resource string name to lock
:param lock_identifier: The id of the lock. A unique string
:return float: The elapsed time that took to lock the instances
in seconds
:raises: LockError if the lock has not been set to at least (N/2 + 1)
instances |
def requires_authentication(func):
"""
Function decorator that throws an exception if the user
is not authenticated, and executes the function normally
if the user is authenticated.
"""
def _auth(self, *args, **kwargs):
if not self._authenticated:
raise NotAuthenticatedException('Function {} requires'
.format(func.__name__)
+ ' authentication')
else:
return func(self, *args, **kwargs)
return _auth | Function decorator that throws an exception if the user
is not authenticated, and executes the function normally
if the user is authenticated. |
def linear_connection(plist, lane):
"""Connects a linear list of processes into a list of dictionaries
Parameters
----------
plist : list
List with process names. This list should contain at least two entries.
lane : int
Corresponding lane of the processes
Returns
-------
res : list
List of dictionaries with the links between processes
"""
logger.debug(
"Establishing linear connection with processes: {}".format(plist))
res = []
previous = None
for p in plist:
# Skip first process
if not previous:
previous = p
continue
res.append({
"input": {
"process": previous,
"lane": lane
},
"output": {
"process": p,
"lane": lane
}
})
previous = p
return res | Connects a linear list of processes into a list of dictionaries
Parameters
----------
plist : list
List with process names. This list should contain at least two entries.
lane : int
Corresponding lane of the processes
Returns
-------
res : list
List of dictionaries with the links between processes |
def open_url(url, httpuser=None, httppassword=None, method=None):
"""
Open a URL using an opener that will simulate a browser user-agent
url: The URL
httpuser, httppassword: HTTP authentication credentials (either both or
neither must be provided)
method: The HTTP method
Caller is reponsible for calling close() on the returned object
"""
if os.getenv('OMEGO_SSL_NO_VERIFY') == '1':
# This needs to come first to override the default HTTPS handler
log.debug('OMEGO_SSL_NO_VERIFY=1')
try:
sslctx = ssl.create_default_context()
except Exception as e:
log.error('Failed to create Default SSL context: %s' % e)
raise Stop(
'Failed to create Default SSL context, OMEGO_SSL_NO_VERIFY '
'is not supported on older versions of Python')
sslctx.check_hostname = False
sslctx.verify_mode = ssl.CERT_NONE
opener = urllib2.build_opener(urllib2.HTTPSHandler(context=sslctx))
else:
opener = urllib2.build_opener()
if 'USER_AGENT' in os.environ:
opener.addheaders = [('User-agent', os.environ.get('USER_AGENT'))]
log.debug('Setting user-agent: %s', os.environ.get('USER_AGENT'))
if httpuser and httppassword:
mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
mgr.add_password(None, url, httpuser, httppassword)
log.debug('Enabling HTTP authentication')
opener.add_handler(urllib2.HTTPBasicAuthHandler(mgr))
opener.add_handler(urllib2.HTTPDigestAuthHandler(mgr))
elif httpuser or httppassword:
raise FileException(
'httpuser and httppassword must be used together', url)
# Override method http://stackoverflow.com/a/4421485
req = urllib2.Request(url)
if method:
req.get_method = lambda: method
return opener.open(req) | Open a URL using an opener that will simulate a browser user-agent
url: The URL
httpuser, httppassword: HTTP authentication credentials (either both or
neither must be provided)
method: The HTTP method
Caller is reponsible for calling close() on the returned object |
def update_field(self, elements):
"""
Update the field with a list of provided values but only if the values
are different. Return a boolean indicating whether a change was made
indicating whether `save` should be called. If the field is currently
set to any or none, then no comparison is made and field is updated.
:param list elements: list of elements in href or Element format
to compare to existing field
:rtype: bool
"""
changed = False
if isinstance(elements, list):
if self.is_any or self.is_none:
self.add_many(elements)
changed = True
else:
_elements = element_resolver(elements, do_raise=False)
if set(self.all_as_href()) ^ set(_elements):
self.data[self.typeof] = _elements
changed = True
if changed and self.rule and (isinstance(self, (Source, Destination)) and \
self.rule.typeof in ('fw_ipv4_nat_rule', 'fw_ipv6_nat_rule')):
# Modify NAT cell if necessary
self.rule._update_nat_field(self)
return changed | Update the field with a list of provided values but only if the values
are different. Return a boolean indicating whether a change was made
indicating whether `save` should be called. If the field is currently
set to any or none, then no comparison is made and field is updated.
:param list elements: list of elements in href or Element format
to compare to existing field
:rtype: bool |
def dispatch(self, frame):
'''
Override the default dispatch since we don't need the rest of
the stack.
'''
if frame.type() == HeartbeatFrame.type():
self.send_heartbeat()
elif frame.type() == MethodFrame.type():
if frame.class_id == 10:
cb = self._method_map.get(frame.method_id)
if cb:
method = self.clear_synchronous_cb(cb)
method(frame)
else:
raise Channel.InvalidMethod(
"unsupported method %d on channel %d",
frame.method_id, self.channel_id)
else:
raise Channel.InvalidClass(
"class %d is not supported on channel %d",
frame.class_id, self.channel_id)
else:
raise Frame.InvalidFrameType(
"frame type %d is not supported on channel %d",
frame.type(), self.channel_id) | Override the default dispatch since we don't need the rest of
the stack. |
def infer_batch(self, dataloader):
"""
Description : inference for LipNet
"""
sum_losses = 0
len_losses = 0
for input_data, input_label in dataloader:
data = gluon.utils.split_and_load(input_data, self.ctx, even_split=False)
label = gluon.utils.split_and_load(input_label, self.ctx, even_split=False)
sum_losses, len_losses = self.infer(data, label)
sum_losses += sum_losses
len_losses += len_losses
return sum_losses, len_losses | Description : inference for LipNet |
def get_js(self):
""" Fetches and returns javascript file path or contents, depending if
we want a standalone presentation or not.
"""
js_file = os.path.join(self.theme_dir, 'js', 'slides.js')
if not os.path.exists(js_file):
js_file = os.path.join(THEMES_DIR, 'default', 'js', 'slides.js')
if not os.path.exists(js_file):
raise IOError(u"Cannot find slides.js in default theme")
with codecs.open(js_file, encoding=self.encoding) as js_file_obj:
return {
'path_url': utils.get_path_url(js_file, self.relative),
'contents': js_file_obj.read(),
} | Fetches and returns javascript file path or contents, depending if
we want a standalone presentation or not. |
def getnamedargs(*args, **kwargs):
"""allows you to pass a dict and named args
so you can pass ({'a':5, 'b':3}, c=8) and get
dict(a=5, b=3, c=8)"""
adict = {}
for arg in args:
if isinstance(arg, dict):
adict.update(arg)
adict.update(kwargs)
return adict | allows you to pass a dict and named args
so you can pass ({'a':5, 'b':3}, c=8) and get
dict(a=5, b=3, c=8) |
def EndEdit(self, row, col, grid, oldVal=None):
"""
End editing the cell. This function must check if the current
value of the editing control is valid and different from the
original value (available as oldval in its string form.) If
it has not changed then simply return None, otherwise return
the value in its string form.
*Must Override*
"""
# Mirror our changes onto the main_window's code bar
self._tc.Unbind(wx.EVT_KEY_UP)
self.ApplyEdit(row, col, grid)
del self._col
del self._row
del self._grid | End editing the cell. This function must check if the current
value of the editing control is valid and different from the
original value (available as oldval in its string form.) If
it has not changed then simply return None, otherwise return
the value in its string form.
*Must Override* |
def copy_(name,
source,
force=False,
makedirs=False,
preserve=False,
user=None,
group=None,
mode=None,
subdir=False,
**kwargs):
'''
If the file defined by the ``source`` option exists on the minion, copy it
to the named path. The file will not be overwritten if it already exists,
unless the ``force`` option is set to ``True``.
.. note::
This state only copies files from one location on a minion to another
location on the same minion. For copying files from the master, use a
:py:func:`file.managed <salt.states.file.managed>` state.
name
The location of the file to copy to
source
The location of the file to copy to the location specified with name
force
If the target location is present then the file will not be moved,
specify "force: True" to overwrite the target file
makedirs
If the target subdirectories don't exist create them
preserve
.. versionadded:: 2015.5.0
Set ``preserve: True`` to preserve user/group ownership and mode
after copying. Default is ``False``. If ``preserve`` is set to ``True``,
then user/group/mode attributes will be ignored.
user
.. versionadded:: 2015.5.0
The user to own the copied file, this defaults to the user salt is
running as on the minion. If ``preserve`` is set to ``True``, then
this will be ignored
group
.. versionadded:: 2015.5.0
The group to own the copied file, this defaults to the group salt is
running as on the minion. If ``preserve`` is set to ``True`` or on
Windows this will be ignored
mode
.. versionadded:: 2015.5.0
The permissions to set on the copied file, aka 644, '0775', '4664'.
If ``preserve`` is set to ``True``, then this will be ignored.
Not supported on Windows.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
subdir
.. versionadded:: 2015.5.0
If the name is a directory then place the file inside the named
directory
.. note::
The copy function accepts paths that are local to the Salt minion.
This function does not support salt://, http://, or the other
additional file paths that are supported by :mod:`states.file.managed
<salt.states.file.managed>` and :mod:`states.file.recurse
<salt.states.file.recurse>`.
'''
name = os.path.expanduser(name)
source = os.path.expanduser(source)
ret = {
'name': name,
'changes': {},
'comment': 'Copied "{0}" to "{1}"'.format(source, name),
'result': True}
if not name:
return _error(ret, 'Must provide name to file.copy')
changed = True
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name))
if not os.path.exists(source):
return _error(ret, 'Source file "{0}" is not present'.format(source))
if preserve:
user = __salt__['file.get_user'](source)
group = __salt__['file.get_group'](source)
mode = __salt__['file.get_mode'](source)
else:
user = _test_owner(kwargs, user=user)
if user is None:
user = __opts__['user']
if salt.utils.platform.is_windows():
if group is not None:
log.warning(
'The group argument for %s has been ignored as this is '
'a Windows system.', name
)
group = user
if group is None:
group = __salt__['file.gid_to_group'](
__salt__['user.info'](user).get('gid', 0)
)
u_check = _check_user(user, group)
if u_check:
# The specified user or group do not exist
return _error(ret, u_check)
if mode is None:
mode = __salt__['file.get_mode'](source)
if os.path.isdir(name) and subdir:
# If the target is a dir, and overwrite_dir is False, copy into the dir
name = os.path.join(name, os.path.basename(source))
if os.path.lexists(source) and os.path.lexists(name):
# if this is a file which did not change, do not update
if force and os.path.isfile(name):
hash1 = salt.utils.hashutils.get_hash(name)
hash2 = salt.utils.hashutils.get_hash(source)
if hash1 == hash2:
changed = True
ret['comment'] = ' '.join([ret['comment'], '- files are identical but force flag is set'])
if not force:
changed = False
elif not __opts__['test'] and changed:
# Remove the destination to prevent problems later
try:
# On windows, if a file has the read-only attribute then we are unable
# to complete this copy unless force is set to true.
__salt__['file.remove'](name, force=force)
except (IOError, OSError):
return _error(
ret,
'Failed to delete "{0}" in preparation for '
'forced move'.format(name)
)
if __opts__['test']:
if changed:
ret['comment'] = 'File "{0}" is set to be copied to "{1}"'.format(
source,
name
)
ret['result'] = None
else:
ret['comment'] = ('The target file "{0}" exists and will not be '
'overwritten'.format(name))
ret['result'] = True
return ret
if not changed:
ret['comment'] = ('The target file "{0}" exists and will not be '
'overwritten'.format(name))
ret['result'] = True
return ret
# Run makedirs
dname = os.path.dirname(name)
if not os.path.isdir(dname):
if makedirs:
try:
_makedirs(name=name, user=user, group=group, dir_mode=mode)
except CommandExecutionError as exc:
return _error(ret, 'Drive {0} is not mapped'.format(exc.message))
else:
return _error(
ret,
'The target directory {0} is not present'.format(dname))
# All tests pass, move the file into place
try:
if os.path.isdir(source):
shutil.copytree(source, name, symlinks=True)
for root, dirs, files in salt.utils.path.os_walk(name):
for dir_ in dirs:
__salt__['file.lchown'](os.path.join(root, dir_), user, group)
for file_ in files:
__salt__['file.lchown'](os.path.join(root, file_), user, group)
else:
shutil.copy(source, name)
ret['changes'] = {name: source}
# Preserve really means just keep the behavior of the cp command. If
# the filesystem we're copying to is squashed or doesn't support chown
# then we shouldn't be checking anything.
if not preserve:
__salt__['file.check_perms'](name, ret, user, group, mode)
except (IOError, OSError):
return _error(
ret, 'Failed to copy "{0}" to "{1}"'.format(source, name))
return ret | If the file defined by the ``source`` option exists on the minion, copy it
to the named path. The file will not be overwritten if it already exists,
unless the ``force`` option is set to ``True``.
.. note::
This state only copies files from one location on a minion to another
location on the same minion. For copying files from the master, use a
:py:func:`file.managed <salt.states.file.managed>` state.
name
The location of the file to copy to
source
The location of the file to copy to the location specified with name
force
If the target location is present then the file will not be moved,
specify "force: True" to overwrite the target file
makedirs
If the target subdirectories don't exist create them
preserve
.. versionadded:: 2015.5.0
Set ``preserve: True`` to preserve user/group ownership and mode
after copying. Default is ``False``. If ``preserve`` is set to ``True``,
then user/group/mode attributes will be ignored.
user
.. versionadded:: 2015.5.0
The user to own the copied file, this defaults to the user salt is
running as on the minion. If ``preserve`` is set to ``True``, then
this will be ignored
group
.. versionadded:: 2015.5.0
The group to own the copied file, this defaults to the group salt is
running as on the minion. If ``preserve`` is set to ``True`` or on
Windows this will be ignored
mode
.. versionadded:: 2015.5.0
The permissions to set on the copied file, aka 644, '0775', '4664'.
If ``preserve`` is set to ``True``, then this will be ignored.
Not supported on Windows.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
subdir
.. versionadded:: 2015.5.0
If the name is a directory then place the file inside the named
directory
.. note::
The copy function accepts paths that are local to the Salt minion.
This function does not support salt://, http://, or the other
additional file paths that are supported by :mod:`states.file.managed
<salt.states.file.managed>` and :mod:`states.file.recurse
<salt.states.file.recurse>`. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.