text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_tree_members(self):
""" Retrieves all members from this node of the tree down."""
members = []
queue = deque()
queue.appendleft(self)
visited = set()
while len(queue):
node = queue.popleft()
if node not in visited:
members.extend(node.get_member_info())
queue.extendleft(node.get_children())
visited.add(node)
return [{attribute: member.get(attribute) for attribute in self.attr_list} for member in members if member] | 0.005236 |
def upgrade(refresh=True, dist_upgrade=False, **kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any apt-get/dpkg commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Upgrades all packages via ``apt-get upgrade`` or ``apt-get dist-upgrade``
if ``dist_upgrade`` is ``True``.
Returns a dictionary containing the changes:
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
dist_upgrade
Whether to perform the upgrade using dist-upgrade vs upgrade. Default
is to use upgrade.
.. versionadded:: 2014.7.0
cache_valid_time
.. versionadded:: 2016.11.0
Skip refreshing the package database if refresh has already occurred within
<value> seconds
download_only
Only download the packages, don't unpack or install them
.. versionadded:: 2018.3.0
force_conf_new
Always install the new version of any configuration files.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
'''
cache_valid_time = kwargs.pop('cache_valid_time', 0)
if salt.utils.data.is_true(refresh):
refresh_db(cache_valid_time)
old = list_pkgs()
if 'force_conf_new' in kwargs and kwargs['force_conf_new']:
force_conf = '--force-confnew'
else:
force_conf = '--force-confold'
cmd = ['apt-get', '-q', '-y', '-o', 'DPkg::Options::={0}'.format(force_conf),
'-o', 'DPkg::Options::=--force-confdef']
if kwargs.get('force_yes', False):
cmd.append('--force-yes')
if kwargs.get('skip_verify', False):
cmd.append('--allow-unauthenticated')
if kwargs.get('download_only', False):
cmd.append('--download-only')
cmd.append('dist-upgrade' if dist_upgrade else 'upgrade')
result = _call_apt(cmd, env=DPKG_ENV_VARS.copy())
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if result['retcode'] != 0:
raise CommandExecutionError(
'Problem encountered upgrading packages',
info={'changes': ret, 'result': result}
)
return ret | 0.001671 |
def get_private_key(platform, service, purpose, key_use, version, private_key, keys_folder):
'''
Loads a private key from the file system and adds it to a dict of keys
:param keys: A dict of keys
:param platform the platform the key is for
:param service the service the key is for
:param key_use what the key is used for
:param version the version of the key
:param purpose: The purpose of the private key
:param private_key: The name of the private key to add
:param keys_folder: The location on disk where the key exists
:param kid_override: This allows the caller to override the generated KID value
:return: None
'''
private_key_data = get_file_contents(keys_folder, private_key)
private_key = load_pem_private_key(private_key_data.encode(), None, backend=backend)
pub_key = private_key.public_key()
pub_bytes = pub_key.public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo)
kid = _generate_kid_from_key(pub_bytes.decode())
key = _create_key(platform=platform, service=service, key_use=key_use,
key_type="private", purpose=purpose, version=version,
public_key=pub_bytes.decode(), private_key=private_key_data)
return kid, key | 0.004739 |
def submissions_between(reddit_session,
subreddit,
lowest_timestamp=None,
highest_timestamp=None,
newest_first=True,
extra_cloudsearch_fields=None,
verbosity=1):
"""Yield submissions between two timestamps.
If both ``highest_timestamp`` and ``lowest_timestamp`` are unspecified,
yields all submissions in the ``subreddit``.
Submissions are yielded from newest to oldest(like in the "new" queue).
:param reddit_session: The reddit_session to make requests from. In all the
examples this is assigned to the variable ``r``.
:param subreddit: Either a subreddit object, or the name of a
subreddit. Use `all` to get the submissions stream for all submissions
made to reddit.
:param lowest_timestamp: The lower bound for ``created_utc`` atributed of
submissions.
(Default: subreddit's created_utc or 0 when subreddit == "all").
:param highest_timestamp: The upper bound for ``created_utc`` attribute
of submissions. (Default: current unix time)
NOTE: both highest_timestamp and lowest_timestamp are proper
unix timestamps(just like ``created_utc`` attributes)
:param newest_first: If set to true, yields submissions
from newest to oldest. Otherwise yields submissions
from oldest to newest
:param extra_cloudsearch_fields: Allows extra filtering of results by
parameters like author, self. Full list is available here:
https://www.reddit.com/wiki/search
:param verbosity: A number that controls the amount of output produced to
stderr. <= 0: no output; >= 1: output the total number of submissions
processed; >= 2: output debugging information regarding
the search queries. (Default: 1)
"""
def debug(msg, level):
if verbosity >= level:
sys.stderr.write(msg + '\n')
def format_query_field(k, v):
if k in ["nsfw", "self"]:
# even though documentation lists "no" and "yes"
# as possible values, in reality they don't work
if v not in [0, 1, "0", "1"]:
raise PRAWException("Invalid value for the extra"
"field {}. Only '0' and '1' are"
"valid values.".format(k))
return "{}:{}".format(k, v)
return "{}:'{}'".format(k, v)
if extra_cloudsearch_fields is None:
extra_cloudsearch_fields = {}
extra_query_part = " ".join(
[format_query_field(k, v) for (k, v)
in sorted(extra_cloudsearch_fields.items())]
)
if highest_timestamp is None:
highest_timestamp = int(time.time()) + REDDIT_TIMESTAMP_OFFSET
else:
highest_timestamp = int(highest_timestamp) + REDDIT_TIMESTAMP_OFFSET
if lowest_timestamp is not None:
lowest_timestamp = int(lowest_timestamp) + REDDIT_TIMESTAMP_OFFSET
elif not isinstance(subreddit, six.string_types):
lowest_timestamp = int(subreddit.created)
elif subreddit not in ("all", "contrib", "mod", "friend"):
lowest_timestamp = int(reddit_session.get_subreddit(subreddit).created)
else:
lowest_timestamp = 0
original_highest_timestamp = highest_timestamp
original_lowest_timestamp = lowest_timestamp
# When making timestamp:X..Y queries, reddit misses submissions
# inside X..Y range, but they can be found inside Y..Z range
# It is not clear what is the value of Z should be, but it seems
# like the difference is usually about ~1 hour or less
# To be sure, let's set the workaround offset to 2 hours
out_of_order_submissions_workaround_offset = 7200
highest_timestamp += out_of_order_submissions_workaround_offset
lowest_timestamp -= out_of_order_submissions_workaround_offset
# Those parameters work ok, but there may be a better set of parameters
window_size = 60 * 60
search_limit = 100
min_search_results_in_window = 50
window_adjustment_ratio = 1.25
backoff = BACKOFF_START
processed_submissions = 0
prev_win_increased = False
prev_win_decreased = False
while highest_timestamp >= lowest_timestamp:
try:
if newest_first:
t1 = max(highest_timestamp - window_size, lowest_timestamp)
t2 = highest_timestamp
else:
t1 = lowest_timestamp
t2 = min(lowest_timestamp + window_size, highest_timestamp)
search_query = 'timestamp:{}..{}'.format(t1, t2)
if extra_query_part:
search_query = "(and {} {})".format(search_query,
extra_query_part)
debug(search_query, 3)
search_results = list(reddit_session.search(search_query,
subreddit=subreddit,
limit=search_limit,
syntax='cloudsearch',
sort='new'))
debug("Received {0} search results for query {1}"
.format(len(search_results), search_query),
2)
backoff = BACKOFF_START
except HTTPException as exc:
debug("{0}. Sleeping for {1} seconds".format(exc, backoff), 2)
time.sleep(backoff)
backoff *= 2
continue
if len(search_results) >= search_limit:
power = 2 if prev_win_decreased else 1
window_size = int(window_size / window_adjustment_ratio**power)
prev_win_decreased = True
debug("Decreasing window size to {0} seconds".format(window_size),
2)
# Since it is possible that there are more submissions
# in the current window, we have to re-do the request
# with reduced window
continue
else:
prev_win_decreased = False
search_results = [s for s in search_results
if original_lowest_timestamp <= s.created and
s.created <= original_highest_timestamp]
for submission in sorted(search_results,
key=attrgetter('created_utc', 'id'),
reverse=newest_first):
yield submission
processed_submissions += len(search_results)
debug('Total processed submissions: {}'
.format(processed_submissions), 1)
if newest_first:
highest_timestamp -= (window_size + 1)
else:
lowest_timestamp += (window_size + 1)
if len(search_results) < min_search_results_in_window:
power = 2 if prev_win_increased else 1
window_size = int(window_size * window_adjustment_ratio**power)
prev_win_increased = True
debug("Increasing window size to {0} seconds"
.format(window_size), 2)
else:
prev_win_increased = False | 0.000138 |
def download(self, url, filename, relative=False, headers=None, timeout=5):
"""
Download the file from the given url at the current path
"""
request_url = self.base_url + url if relative else url
floyd_logger.debug("Downloading file from url: {}".format(request_url))
# Auth headers if present
request_headers = {}
if self.auth_header:
request_headers["Authorization"] = self.auth_header
# Add any additional headers
if headers:
request_headers.update(headers)
try:
response = requests.get(request_url,
headers=request_headers,
timeout=timeout,
stream=True)
self.check_response_status(response)
with open(filename, 'wb') as f:
# chunk mode response doesn't have content-length so we are
# using a custom header here
content_length = response.headers.get('x-floydhub-content-length')
if not content_length:
content_length = response.headers.get('content-length')
if content_length:
for chunk in progress.bar(response.iter_content(chunk_size=1024),
expected_size=(int(content_length) / 1024) + 1):
if chunk:
f.write(chunk)
else:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
return filename
except requests.exceptions.ConnectionError as exception:
floyd_logger.debug("Exception: {}".format(exception))
sys.exit("Cannot connect to the Floyd server. Check your internet connection.") | 0.003127 |
def _output_work(self, work, root):
"""Saves the TEI XML document `root` at the path `work`."""
output_filename = os.path.join(self._output_dir, work)
tree = etree.ElementTree(root)
tree.write(output_filename, encoding='utf-8', pretty_print=True) | 0.007194 |
def additions_install(**kwargs):
'''
Install VirtualBox Guest Additions. Uses the CD, connected by VirtualBox.
To connect VirtualBox Guest Additions via VirtualBox graphical interface
press 'Host+D' ('Host' is usually 'Right Ctrl').
See https://www.virtualbox.org/manual/ch04.html#idp52733088 for more details.
CLI Example:
.. code-block:: bash
salt '*' vbox_guest.additions_install
salt '*' vbox_guest.additions_install reboot=True
salt '*' vbox_guest.additions_install upgrade_os=True
:param reboot: reboot computer to complete installation
:type reboot: bool
:param upgrade_os: upgrade OS (to ensure the latests version of kernel and developer tools are installed)
:type upgrade_os: bool
:return: version of VirtualBox Guest Additions or string with error
'''
with _additions_mounted() as mount_point:
kernel = __grains__.get('kernel', '')
if kernel == 'Linux':
return _additions_install_linux(mount_point, **kwargs) | 0.002904 |
def instance_for_arguments(self, arguments: {Prior: float}):
"""
Create an instance of the associated class for a set of arguments
Parameters
----------
arguments: {Prior: float}
Dictionary mapping_matrix priors to attribute analysis_path and value pairs
Returns
-------
An instance of the class
"""
for prior, value in arguments.items():
prior.assert_within_limits(value)
model_arguments = {t.name: arguments[t.prior] for t in self.direct_prior_tuples}
constant_arguments = {t.name: t.constant.value for t in self.direct_constant_tuples}
for tuple_prior in self.tuple_prior_tuples:
model_arguments[tuple_prior.name] = tuple_prior.prior.value_for_arguments(arguments)
for prior_model_tuple in self.direct_prior_model_tuples:
model_arguments[prior_model_tuple.name] = prior_model_tuple.prior_model.instance_for_arguments(arguments)
return self.cls(**{**model_arguments, **constant_arguments}) | 0.006567 |
def draggable(self) -> Union[bool, str]:
"""Get ``draggable`` property."""
if not self.hasAttribute('draggable'):
return False
return self.getAttribute('draggable') | 0.01 |
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a text file-like object using a pyparsing definition.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
# TODO: self._line_structures is a work-around and this needs
# a structural fix.
if not self._line_structures:
raise errors.UnableToParseFile(
'Line structure undeclared, unable to proceed.')
encoding = self._ENCODING or parser_mediator.codepage
text_file_object = text_file.TextFile(file_object, encoding=encoding)
try:
line = self._ReadLine(text_file_object, max_len=self.MAX_LINE_LENGTH)
except UnicodeDecodeError:
raise errors.UnableToParseFile(
'Not a text file or encoding not supported.')
if not line:
raise errors.UnableToParseFile('Not a text file.')
if len(line) == self.MAX_LINE_LENGTH or len(
line) == self.MAX_LINE_LENGTH - 1:
logger.debug((
'Trying to read a line and reached the maximum allowed length of '
'{0:d}. The last few bytes of the line are: {1:s} [parser '
'{2:s}]').format(
self.MAX_LINE_LENGTH, repr(line[-10:]), self.NAME))
if not self._IsText(line):
raise errors.UnableToParseFile('Not a text file, unable to proceed.')
if not self.VerifyStructure(parser_mediator, line):
raise errors.UnableToParseFile('Wrong file structure.')
consecutive_line_failures = 0
index = None
# Set the offset to the beginning of the file.
self._current_offset = 0
# Read every line in the text file.
while line:
if parser_mediator.abort:
break
parsed_structure = None
use_key = None
# Try to parse the line using all the line structures.
for index, (key, structure) in enumerate(self._line_structures):
try:
parsed_structure = structure.parseString(line)
except pyparsing.ParseException:
pass
if parsed_structure:
use_key = key
break
if parsed_structure:
self.ParseRecord(parser_mediator, use_key, parsed_structure)
consecutive_line_failures = 0
if index is not None and index != 0:
key_structure = self._line_structures.pop(index)
self._line_structures.insert(0, key_structure)
else:
if len(line) > 80:
line = '{0:s}...'.format(line[:77])
parser_mediator.ProduceExtractionWarning(
'unable to parse log line: {0:s} at offset: {1:d}'.format(
repr(line), self._current_offset))
consecutive_line_failures += 1
if (consecutive_line_failures >
self.MAXIMUM_CONSECUTIVE_LINE_FAILURES):
raise errors.UnableToParseFile(
'more than {0:d} consecutive failures to parse lines.'.format(
self.MAXIMUM_CONSECUTIVE_LINE_FAILURES))
self._current_offset = text_file_object.get_offset()
try:
line = self._ReadLine(text_file_object, max_len=self.MAX_LINE_LENGTH)
except UnicodeDecodeError:
parser_mediator.ProduceExtractionWarning(
'unable to read and decode log line at offset {0:d}'.format(
self._current_offset))
break | 0.008097 |
def _to_proto_sparse_tensor(sparse_tensor, nested_proto,
process_leafs, already_processed):
"""Serializes a `tf.SparseTensor` into `nested_proto`.
Args:
sparse_tensor: An instance of `tf.SparseTensor`.
nested_proto: A `module_pb2.NestedData` instance to be filled from
`sparse_tensor`.
process_leafs: A function to be applied to the leaf valued of the nested
structure.
already_processed: Set of already processed objects (used to avoid
infinite recursion).
"""
already_processed.add(id(sparse_tensor))
nested_proto.named_tuple.name = _SPARSE_TENSOR_NAME
for str_key in _SPARSE_TENSOR_FIELD:
tensor = getattr(sparse_tensor, str_key)
nested_proto.named_tuple.map[str_key].value = process_leafs(tensor) | 0.006386 |
def _create_value(self, data, name, spec):
""" Create the value for a field.
:param data: the whole data for the entity (all fields).
:param name: name of the initialized field.
:param spec: spec for the whole entity.
"""
field = getattr(self, 'create_' + name, None)
if field:
# this factory has a special creator function for this field
return field(data, name, spec)
value = data.get(name)
return spec.fields[name].clean(value) | 0.003788 |
def _get_after_tld_chars(self):
"""
Initialize after tld characters
"""
after_tld_chars = set(string.whitespace)
after_tld_chars |= {'/', '\"', '\'', '<', '>', '?', ':', '.', ','}
# get left enclosure characters
_, right_enclosure = zip(*self._enclosure)
# add right enclosure characters to be valid after TLD
# for correct parsing of URL e.g. (example.com)
after_tld_chars |= set(right_enclosure)
return after_tld_chars | 0.003929 |
def import_locations(self, osm_file):
"""Import OSM data files.
``import_locations()`` returns a list of ``Node`` and ``Way`` objects.
It expects data files conforming to the `OpenStreetMap 0.5 DTD`_, which
is XML such as::
<?xml version="1.0" encoding="UTF-8"?>
<osm version="0.5" generator="upoints/0.9.0">
<node id="0" lat="52.015749" lon="-0.221765" user="jnrowe" visible="true" timestamp="2008-01-25T12:52:11+00:00" />
<node id="1" lat="52.015761" lon="-0.221767" visible="true" timestamp="2008-01-25T12:53:00+00:00">
<tag k="created_by" v="hand" />
<tag k="highway" v="crossing" />
</node>
<node id="2" lat="52.015754" lon="-0.221766" user="jnrowe" visible="true" timestamp="2008-01-25T12:52:30+00:00">
<tag k="amenity" v="pub" />
</node>
<way id="0" visible="true" timestamp="2008-01-25T13:00:00+0000">
<nd ref="0" />
<nd ref="1" />
<nd ref="2" />
<tag k="ref" v="My Way" />
<tag k="highway" v="primary" />
</way>
</osm>
The reader uses the :mod:`ElementTree` module, so should be very fast
when importing data. The above file processed by
``import_locations()`` will return the following `Osm` object::
Osm([
Node(0, 52.015749, -0.221765, True, "jnrowe",
utils.Timestamp(2008, 1, 25, 12, 52, 11), None),
Node(1, 52.015761, -0.221767, True,
utils.Timestamp(2008, 1, 25, 12, 53), None,
{"created_by": "hand", "highway": "crossing"}),
Node(2, 52.015754, -0.221766, True, "jnrowe",
utils.Timestamp(2008, 1, 25, 12, 52, 30),
{"amenity": "pub"}),
Way(0, [0, 1, 2], True, None,
utils.Timestamp(2008, 1, 25, 13, 00),
{"ref": "My Way", "highway": "primary"})],
generator="upoints/0.9.0")
Args:
osm_file (iter): OpenStreetMap data to read
Returns:
Osm: Nodes and ways from the data
.. _OpenStreetMap 0.5 DTD:
http://wiki.openstreetmap.org/wiki/OSM_Protocol_Version_0.5/DTD
"""
self._osm_file = osm_file
data = utils.prepare_xml_read(osm_file, objectify=True)
# This would be a lot simpler if OSM exports defined a namespace
if not data.tag == 'osm':
raise ValueError("Root element %r is not `osm'" % data.tag)
self.version = data.get('version')
if not self.version:
raise ValueError('No specified OSM version')
elif not self.version == '0.5':
raise ValueError('Unsupported OSM version %r' % data)
self.generator = data.get('generator')
for elem in data.getchildren():
if elem.tag == 'node':
self.append(Node.parse_elem(elem))
elif elem.tag == 'way':
self.append(Way.parse_elem(elem)) | 0.001572 |
def info(message, domain):
"""Log simple info"""
if domain in Logger._ignored_domains:
return
Logger._log(None, message, INFO, domain) | 0.011696 |
def from_json(json_data):
"""
Returns a pyalveo.OAuth2 given a json string built from the oauth.to_json() method.
"""
#If we have a string, then decode it, otherwise assume it's already decoded
if isinstance(json_data, str):
data = json.loads(json_data)
else:
data = json_data
oauth_dict = {
'client_id':data.get('client_id',None),
'client_secret':data.get('client_secret',None),
'redirect_url':data.get('redirect_url',None),
}
oauth = OAuth2(api_url=data.get('api_url',None), api_key=data.get('api_key',None),oauth=oauth_dict, verifySSL=data.get('verifySSL',True))
oauth.token = data.get('token',None)
oauth.state = data.get('state',None)
oauth.auth_url = data.get('auth_url',None)
return oauth | 0.020948 |
def deployment(
*,
block_uri: URI,
contract_instance: str,
contract_type: str,
address: HexStr,
transaction: HexStr = None,
block: HexStr = None,
deployment_bytecode: Dict[str, Any] = None,
runtime_bytecode: Dict[str, Any] = None,
compiler: Dict[str, Any] = None,
) -> Manifest:
"""
Returns a manifest, with the newly included deployment. Requires a valid blockchain URI,
however no validation is provided that this URI is unique amongst the other deployment
URIs, so the user must take care that each blockchain URI represents a unique blockchain.
"""
return _deployment(
contract_instance,
contract_type,
deployment_bytecode,
runtime_bytecode,
compiler,
block_uri,
address,
transaction,
block,
) | 0.004779 |
def _AdjustForTimeZoneOffset(
self, year, month, day_of_month, hours, minutes, time_zone_offset):
"""Adjusts the date and time values for a time zone offset.
Args:
year (int): year e.g. 1970.
month (int): month, where 1 represents January.
day_of_month (int): day of the month, where 1 represents the first day.
hours (int): hours.
minutes (int): minutes.
time_zone_offset (int): time zone offset in number of minutes from UTC.
Returns:
tuple[int, int, int, int, int, int]: time zone correct year, month,
day_of_month, hours and minutes values.
"""
hours_from_utc, minutes_from_utc = divmod(time_zone_offset, 60)
minutes += minutes_from_utc
# Since divmod makes sure the sign of minutes_from_utc is positive
# we only need to check the upper bound here, because hours_from_utc
# remains signed it is corrected accordingly.
if minutes >= 60:
minutes -= 60
hours += 1
hours += hours_from_utc
if hours < 0:
hours += 24
day_of_month -= 1
elif hours >= 24:
hours -= 24
day_of_month += 1
days_per_month = self._GetDaysPerMonth(year, month)
if day_of_month < 1:
month -= 1
if month < 1:
month = 12
year -= 1
day_of_month += self._GetDaysPerMonth(year, month)
elif day_of_month > days_per_month:
month += 1
if month > 12:
month = 1
year += 1
day_of_month -= days_per_month
return year, month, day_of_month, hours, minutes | 0.008371 |
def _CalculateHashesFileEntry(
self, file_system, file_entry, parent_full_path, output_writer):
"""Recursive calculates hashes starting with the file entry.
Args:
file_system (dfvfs.FileSystem): file system.
file_entry (dfvfs.FileEntry): file entry.
parent_full_path (str): full path of the parent file entry.
output_writer (StdoutWriter): output writer.
"""
# Since every file system implementation can have their own path
# segment separator we are using JoinPath to be platform and file system
# type independent.
full_path = file_system.JoinPath([parent_full_path, file_entry.name])
for data_stream in file_entry.data_streams:
hash_value = self._CalculateHashDataStream(file_entry, data_stream.name)
display_path = self._GetDisplayPath(
file_entry.path_spec, full_path, data_stream.name)
output_writer.WriteFileHash(display_path, hash_value or 'N/A')
for sub_file_entry in file_entry.sub_file_entries:
self._CalculateHashesFileEntry(
file_system, sub_file_entry, full_path, output_writer) | 0.004529 |
def generate(self):
'''
Generate noise samples.
Returns:
`np.ndarray` of samples.
'''
generated_arr = np.random.uniform(
low=0.1,
high=0.9,
size=((self.__batch_size, self.__seq_len, self.__dim))
)
if self.noise_sampler is not None:
self.noise_sampler.output_shape = generated_arr.shape
generated_arr += self.noise_sampler.generate()
return generated_arr | 0.005803 |
def rdfgraph_to_ontol(rg):
"""
Return an Ontology object from an rdflib graph object
Status: Incomplete
"""
digraph = networkx.MultiDiGraph()
from rdflib.namespace import RDF
label_map = {}
for c in rg.subjects(RDF.type, OWL.Class):
cid = contract_uri_wrap(c)
logging.info("C={}".format(cid))
for lit in rg.objects(c, RDFS.label):
label_map[cid] = lit.value
digraph.add_node(cid, label=lit.value)
for s in rg.objects(c, RDFS.subClassOf):
# todo - blank nodes
sid = contract_uri_wrap(s)
digraph.add_edge(sid, cid, pred='subClassOf')
logging.info("G={}".format(digraph))
payload = {
'graph': digraph,
#'xref_graph': xref_graph,
#'graphdoc': obographdoc,
#'logical_definitions': logical_definitions
}
ont = Ontology(handle='wd', payload=payload)
return ont | 0.005297 |
def update_status(self, *args, **kwargs):
""" :reference: https://dev.twitter.com/rest/reference/post/statuses/update
:allowed_param:'status', 'in_reply_to_status_id', 'in_reply_to_status_id_str', 'auto_populate_reply_metadata', 'lat', 'long', 'source', 'place_id', 'display_coordinates', 'media_ids'
"""
post_data = {}
media_ids = kwargs.pop('media_ids', None)
if media_ids is not None:
post_data['media_ids'] = list_to_csv(media_ids)
return bind_api(
api=self,
path='/statuses/update.json',
method='POST',
payload_type='status',
allowed_param=['status', 'in_reply_to_status_id', 'in_reply_to_status_id_str',
'auto_populate_reply_metadata', 'lat', 'long', 'source', 'place_id', 'display_coordinates'],
require_auth=True
)(post_data=post_data, *args, **kwargs) | 0.006383 |
def _flatten_projection(cls, projection):
"""
Flatten a structured projection (structure projections support for
projections of (to be) dereferenced fields.
"""
# If `projection` is empty return a full projection
if not projection:
return {'__': False}, {}, {}
# Flatten the projection
flat_projection = {}
references = {}
subs = {}
inclusive = True
for key, value in deepcopy(projection).items():
if isinstance(value, dict):
# Store a reference/SubFrame projection
if '$ref' in value:
references[key] = value
elif '$sub' in value or '$sub.' in value:
subs[key] = value
flat_projection[key] = True
elif key == '$ref':
# Strip any `$ref` key
continue
elif key == '$sub' or key == '$sub.':
# Strip any `$sub` key
continue
else:
# Store the root projection value
flat_projection[key] = value
inclusive = False
# If only references and `SubFrames` were specified in the projection
# then return a full projection.
if inclusive:
flat_projection = {'__': False}
return flat_projection, references, subs | 0.001409 |
def commit(jaide, commands, check, sync, comment, confirm, at_time, blank):
""" Execute a commit against the device.
Purpose: This function will send set commands to a device, and commit
| the changes. Options exist for confirming, comments,
| synchronizing, checking, blank commits, or delaying to a later
| time/date.
@param jaide: The jaide connection to the device.
@type jaide: jaide.Jaide object
@param commands: String containing the set command to be sent to the
| device. It can be a python list of strings, a single set
| command, a comma separated string of commands, or a
| string filepath pointing to a file with set commands
| on each line.
@type commands: str or list
@param check: A bool set to true to only run a commit check, and not
| commit any changes. Useful for checking syntax of set
| commands.
@type check: bool
@param sync: A bool set to true to sync the commit across both REs.
@type sync: bool
@param comment: A string that will be logged to the commit log
| describing the commit.
@type comment: str
@param confirm: An integer of seconds to commit confirm for.
@type confirm: int
@param at_time: A string containing the time or time and date of when
| the commit should happen. Junos is expecting one of two
| formats:
| A time value of the form hh:mm[:ss] (hours, minutes,
| and optionally seconds)
| A date and time value of the form yyyy-mm-dd hh:mm[:ss]
| (year, month, date, hours, minutes, and optionally
| seconds)
@type at_time: str
@param blank: A bool set to true to only make a blank commit. A blank
| commit makes a commit, but doesn't have any set commands
| associated with it, so no changes are made, but a commit
| does happen.
@type blank: bool
@returns: The output from the device.
@rtype: str
"""
# set the commands to do nothing if the user wants a blank commit.
if blank:
commands = 'annotate system ""'
output = ""
# add show | compare output
if commands != "":
output += color("show | compare:\n", 'yel')
try:
output += color_diffs(jaide.compare_config(commands)) + '\n'
except RPCError as e:
output += color("Could not get config comparison results before"
" committing due to the following error:\n%s" %
str(e))
# If they just want to validate the config, without committing
if check:
output += color("Commit check results from: %s\n" % jaide.host, 'yel')
try:
output += jaide.commit_check(commands) + '\n'
except RPCError:
output += color("Uncommitted changes left on the device or someone"
" else is in edit mode, couldn't lock the "
"candidate configuration.\n", 'red')
except:
output += color("Failed to commit check on device %s for an "
"unknown reason.\n" % jaide.host, 'red')
# Actually make a commit
else:
output += color("Attempting to commit on device: %s\n" % jaide.host,
'yel')
try:
results = jaide.commit(confirmed=confirm, comment=comment,
at_time=at_time, synchronize=sync,
commands=commands)
except RPCError as e:
output += color('Commit could not be completed on this device, due'
' to the following error(s):\n' + str(e), 'red')
# Jaide command succeeded, parse results
else:
if 'commit complete' in results:
output += results.split('commit complete')[0] + '\n'
output += color('Commit complete on device: %s\n' % jaide.host)
if confirm:
output += color('Commit confirm will rollback in %s '
'minutes unless you commit again.\n' %
str(confirm/60))
elif 'commit at' in results:
output += results.split('commit at will be executed at')[0]
output += color('Commit staged to happen at: %s\n' % at_time)
else:
if 'failed' in results:
output += (results.replace('failed', color('failed',
'red')))
if 'red' in results:
output += (results.replace('red', color('red',
'red')))
output += color('Commit Failed on device: %s\n' % jaide.host,
'red')
return output | 0.000397 |
def _parse_textgroup(self, cts_file):
""" Parses a textgroup from a cts file
:param cts_file: Path to the CTS File
:type cts_file: str
:return: CtsTextgroupMetadata and Current file
"""
with io.open(cts_file) as __xml__:
return self.classes["textgroup"].parse(
resource=__xml__
), cts_file | 0.005291 |
def daisy_chains(self, kih, max_path_length=None):
""" Generator for daisy chains (complementary kihs) associated with a knob.
Notes
-----
Daisy chain graph is the directed graph with edges from knob residue to each hole residue for each KnobIntoHole
in self.
Given a KnobIntoHole, the daisy chains are non-trivial paths in this graph (walks along the directed edges)
that begin and end at the knob.
These paths must be of length <= max_path_length
Parameters
----------
kih : KnobIntoHole interaction.
max_path_length : int or None
Maximum length of a daisy chain.
Defaults to number of chains in self.ampal_parent.
This is the maximum sensible value. Larger values than this will cause slow running of this function.
"""
if max_path_length is None:
max_path_length = len(self.ampal_parent)
g = self.daisy_chain_graph
paths = networkx.all_simple_paths(g, source=kih.knob, target=kih.knob, cutoff=max_path_length)
return paths | 0.006284 |
def get_palette(samples, options, return_mask=False, kmeans_iter=40):
'''Extract the palette for the set of sampled RGB values. The first
palette entry is always the background color; the rest are determined
from foreground pixels by running K-means clustering. Returns the
palette, as well as a mask corresponding to the foreground pixels.
'''
if not options.quiet:
print(' getting palette...')
bg_color = get_bg_color(samples, 6)
fg_mask = get_fg_mask(bg_color, samples, options)
centers, _ = kmeans(samples[fg_mask].astype(np.float32),
options.num_colors-1,
iter=kmeans_iter)
palette = np.vstack((bg_color, centers)).astype(np.uint8)
if not return_mask:
return palette
else:
return palette, fg_mask | 0.001222 |
def InitSampCheck(self):
"""make an interactive grid in which users can edit sample names
as well as which site a sample belongs to"""
self.sample_window += 1
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
if self.sample_window == 1:
text = """Step 2:
Check that all samples are correctly named,
and that they belong to the correct site
(if site name is simply wrong, that will be fixed in step 3)"""
step_label = wx.StaticText(self.panel, label=text)#, size=(900, 100))
else:
text = """Step 4:
Some of the data from the er_sites table has propogated into er_samples.
Check that these data are correct, and fill in missing cells using controlled vocabularies.
The columns for class, lithology, and type can take multiple values in the form of a colon-delimited list.
You may use the drop-down menus to add as many values as needed in these columns.
(see Help button for more details)\n\n** Denotes controlled vocabulary"""
step_label = wx.StaticText(self.panel, label=text)#, size=(900, 100))
if self.sample_window == 1:
# provide no extra headers
headers = {'sample': {'er': [[], [], []],
'pmag': [[], [], []]}}
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'sample',
headers, self.panel,
'site')
if self.sample_window > 1:
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'sample',
self.er_magic_data.headers, self.panel,
'site')
self.samp_grid = self.grid_builder.make_grid(incl_pmag=False)
self.samp_grid.InitUI()
self.grid_builder.add_data_to_grid(self.samp_grid, 'sample', incl_pmag=False)
self.grid = self.samp_grid
sites = sorted(self.er_magic_data.make_name_list(self.er_magic_data.sites))
self.drop_down_menu = drop_down_menus.Menus("sample", self, self.samp_grid, sites) # initialize all needed drop-down menus
### Create Buttons ###
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.addSiteButton = wx.Button(self.panel, label="Add a new site")
self.Bind(wx.EVT_BUTTON, self.on_addSiteButton, self.addSiteButton)
hbox_one.Add(self.addSiteButton, flag=wx.RIGHT, border=10)
if self.sample_window == 1:
html_help = "ErMagicSampleHelp1.html"
if self.sample_window > 1:
html_help = "ErMagicSampleHelp.html"
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, html_help), self.helpButton)
hbox_one.Add(self.helpButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.samp_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
next_dia = self.InitSiteCheck if self.sample_window < 2 else self.InitLocCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.samp_grid, next_dia=next_dia), self.continueButton)
self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back")
previous_dia = self.InitSpecCheck if self.sample_window < 2 else self.InitSiteCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia=previous_dia), self.backButton)
hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.backButton)
hboxgrid = pw.hbox_grid(self.panel, self.onDeleteRow, 'sample', self.grid)
self.deleteRowButton = hboxgrid.deleteRowButton
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
### Make Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(step_label, flag=wx.ALIGN_LEFT|wx.TOP|wx.BOTTOM, border=20)
vbox.Add(hbox_one, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxgrid, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(self.samp_grid, flag=wx.ALL, border=10) # using wx.EXPAND or not does not affect re-size problem
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#if sys.platform in ['win32', 'win64']:
# self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
## this combination may prevent a display error that (without the fix) only resolves on manually resizing the window
self.panel.Refresh()
self.samp_grid.ForceRefresh()
self.panel.Refresh()
self.Refresh()
# this prevents display errors
self.Hide()
self.Show() | 0.007875 |
def _escapeText(text):
""" Adds backslash-escapes to property value characters that need them."""
output = ""
index = 0
match = reCharsToEscape.search(text, index)
while match:
output = output + text[index:match.start()] + '\\' + text[match.start()]
index = match.end()
match = reCharsToEscape.search(text, index)
output = output + text[index:]
return output | 0.02965 |
def cached_make_env(make_env):
"""
Only creates a new environment from the provided function if one has not yet already been
created. This is useful here because we need to infer certain properties of the env, e.g.
its observation and action spaces, without any intend of actually using it.
"""
if make_env not in CACHED_ENVS:
env = make_env()
CACHED_ENVS[make_env] = env
return CACHED_ENVS[make_env] | 0.006757 |
def create_qualification_type(self,
name,
description,
status,
keywords=None,
retry_delay=None,
test=None,
answer_key=None,
answer_key_xml=None,
test_duration=None,
auto_granted=False,
auto_granted_value=1):
"""
Create a new Qualification Type.
name: This will be visible to workers and must be unique for a
given requester.
description: description shown to workers. Max 2000 characters.
status: 'Active' or 'Inactive'
keywords: list of keyword strings or comma separated string.
Max length of 1000 characters when concatenated with commas.
retry_delay: number of seconds after requesting a
qualification the worker must wait before they can ask again.
If not specified, workers can only request this qualification
once.
test: a QuestionForm
answer_key: an XML string of your answer key, for automatically
scored qualification tests.
(Consider implementing an AnswerKey class for this to support.)
test_duration: the number of seconds a worker has to complete the test.
auto_granted: if True, requests for the Qualification are granted
immediately. Can't coexist with a test.
auto_granted_value: auto_granted qualifications are given this value.
"""
params = {'Name' : name,
'Description' : description,
'QualificationTypeStatus' : status,
}
if retry_delay is not None:
params['RetryDelayInSeconds'] = retry_delay
if test is not None:
assert(isinstance(test, QuestionForm))
assert(test_duration is not None)
params['Test'] = test.get_as_xml()
if test_duration is not None:
params['TestDurationInSeconds'] = test_duration
if answer_key is not None:
if isinstance(answer_key, basestring):
params['AnswerKey'] = answer_key # xml
else:
raise TypeError
# Eventually someone will write an AnswerKey class.
if auto_granted:
assert(test is None)
params['AutoGranted'] = True
params['AutoGrantedValue'] = auto_granted_value
if keywords:
params['Keywords'] = self.get_keywords_as_string(keywords)
return self._process_request('CreateQualificationType', params,
[('QualificationType', QualificationType),]) | 0.006514 |
def remove_flag(self, flag):
"""
Remove flag to the flags and memorize this attribute has changed so we
can regenerate it when outputting text.
"""
super(Entry, self).remove_flag(flag)
self._changed_attrs.add('flags') | 0.007547 |
def snapped_speed_limits(client, path):
"""Returns the posted speed limit (in km/h) for given road segments.
The provided points will first be snapped to the most likely roads the
vehicle was traveling along.
:param path: The path of points to be snapped.
:type path: a single location, or a list of locations, where a
location is a string, dict, list, or tuple
:rtype: dict with a list of speed limits and a list of the snapped points.
"""
params = {"path": convert.location_list(path)}
return client._request("/v1/speedLimits", params,
base_url=_ROADS_BASE_URL,
accepts_clientid=False,
extract_body=_roads_extract) | 0.00545 |
def disable_command(self, command: str, message_to_print: str) -> None:
"""
Disable a command and overwrite its functions
:param command: the command being disabled
:param message_to_print: what to print when this command is run or help is called on it while disabled
The variable COMMAND_NAME can be used as a placeholder for the name of the
command being disabled.
ex: message_to_print = "{} is currently disabled".format(COMMAND_NAME)
"""
import functools
# If the commands is already disabled, then return
if command in self.disabled_commands:
return
# Make sure this is an actual command
command_function = self.cmd_func(command)
if command_function is None:
raise AttributeError("{} does not refer to a command".format(command))
help_func_name = HELP_FUNC_PREFIX + command
# Add the disabled command record
self.disabled_commands[command] = DisabledCommand(command_function=command_function,
help_function=getattr(self, help_func_name, None))
# Overwrite the command and help functions to print the message
new_func = functools.partial(self._report_disabled_command_usage,
message_to_print=message_to_print.replace(COMMAND_NAME, command))
setattr(self, self.cmd_func_name(command), new_func)
setattr(self, help_func_name, new_func) | 0.005607 |
def hierarchy_nav(self, obj):
"""Renders hierarchy navigation elements (folders)."""
result_repr = '' # For items without children.
ch_count = getattr(obj, Hierarchy.CHILD_COUNT_MODEL_ATTR, 0)
is_parent_link = getattr(obj, Hierarchy.UPPER_LEVEL_MODEL_ATTR, False)
if is_parent_link or ch_count: # For items with children and parent links.
icon = 'icon icon-folder'
title = _('Objects inside: %s') % ch_count
if is_parent_link:
icon = 'icon icon-folder-up'
title = _('Upper level')
url = './'
if obj.pk:
url = '?%s=%s' % (Hierarchy.PARENT_ID_QS_PARAM, obj.pk)
if self._current_changelist.is_popup:
qs_get = copy(self._current_changelist._request.GET)
try:
del qs_get[Hierarchy.PARENT_ID_QS_PARAM]
except KeyError:
pass
qs_get = qs_get.urlencode()
url = ('%s&%s' if '?' in url else '%s?%s') % (url, qs_get)
result_repr = format_html('<a href="{0}" class="{1}" title="{2}"></a>', url, icon, force_text(title))
return result_repr | 0.003226 |
def uval(self):
"Accesses :attr:`value` and :attr:`uncert` as a :class:`pwkit.msmt.Uval`."
from .msmt import Uval
return Uval.from_norm(self.value, self.uncert) | 0.016304 |
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None):
'''
Make a web call to RallyDev.
'''
token = _get_token()
username = __opts__.get('rallydev', {}).get('username', None)
password = __opts__.get('rallydev', {}).get('password', None)
path = 'https://rally1.rallydev.com/slm/webservice/v2.0/'
if action:
path += action
if command:
path += '/{0}'.format(command)
log.debug('RallyDev URL: %s', path)
if not isinstance(args, dict):
args = {}
args['key'] = token
if header_dict is None:
header_dict = {'Content-type': 'application/json'}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
return_content = None
result = salt.utils.http.query(
path,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
username=username,
password=password,
cookies=True,
persist_session=True,
opts=__opts__,
)
log.debug('RallyDev Response Status Code: %s', result['status'])
if 'error' in result:
log.error(result['error'])
return [result['status'], result['error']]
return [result['status'], result.get('dict', {})] | 0.00067 |
def check_address_has_code(
client: 'JSONRPCClient',
address: Address,
contract_name: str = '',
):
""" Checks that the given address contains code. """
result = client.web3.eth.getCode(to_checksum_address(address), 'latest')
if not result:
if contract_name:
formated_contract_name = '[{}]: '.format(contract_name)
else:
formated_contract_name = ''
raise AddressWithoutCode(
'{}Address {} does not contain code'.format(
formated_contract_name,
to_checksum_address(address),
),
) | 0.001595 |
def to_texttree(self, indent=3, func=True, symbol='ascii'):
"""Method returning a text representation of the (sub-)tree
rooted at the current node instance (`self`).
:param indent: the indentation width for each tree level.
:type indent: int
:param func: function returning a string representation for
each node. e.g. `func=lambda n: str(n._coord)`
would show the node coordinates.
`func=True` node.name displayed for each node.
`func=False` no node representation, just
the tree structure is displayed.
:type func: function or bool
:param symbol: tuple of tree symbol characters.
`None` or 'ascii' gives a preformed ascii tree, equivalent to tuple :code:`(|, +, |, |, |, -, .)`.
'box' preformed with box-drawing characters, equivalent to tuple :code:`(┬, └, ┬, ├, └, ─, ⋅)`.
'unicode' preformed with unicode characters.
:type symbol: tuple or str or None
:returns: a string representation of the tree.
:rtype: str
"""
if indent<2:
indent=2
if func is True: # default func prints node.name
func = lambda n: "{}".format(n.name)
if isinstance(symbol, (list, tuple)):
s_root, s_branch, s_spar, s_fnode = symbol
elif symbol=="unicode":
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
elif symbol=="box": # https://en.wikipedia.org/wiki/Box-drawing_character
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
else:
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"|", "+", "|", "|", "|", "-", ".")
_text = ""
#local_root_level = len(self.ancestors)
local_root_level = self._level
for _n in self:
#level = len(node.ancestors) - local_root_level
level = _n._level - local_root_level
if level==0:
_text += s_root
elif _n.parent.childs[0] == _n and len(_n.parent.childs)>1: # first child
#s_spar="f"
_text += ( (s_level + " "*(indent-1))*(level-1)
+ s_branch
+ s_spar*(indent-1)
+ s_fnode)
elif _n.parent.childs[-1] == _n and len(_n.childs)==0: # last child, no children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_lnode )
elif _n.parent.childs[-1] == _n: # last child, has children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
# elif level>0:
# _text += (s_level + " "*(indent-1))*(level-1) + s_branch + s_spar*(indent-1)
else:
#_text += s_fnode
#s_spar="m"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
if func and callable(func):
_text += func(_n)
_text += "\n"
return _text | 0.015695 |
def _get_cycles(graph_dict, path, visited, result, vertice):
"""recursive function doing the real work for get_cycles"""
if vertice in path:
cycle = [vertice]
for node in path[::-1]:
if node == vertice:
break
cycle.insert(0, node)
# make a canonical representation
start_from = min(cycle)
index = cycle.index(start_from)
cycle = cycle[index:] + cycle[0:index]
# append it to result if not already in
if cycle not in result:
result.append(cycle)
return
path.append(vertice)
try:
for node in graph_dict[vertice]:
# don't check already visited nodes again
if node not in visited:
_get_cycles(graph_dict, path, visited, result, node)
visited.add(node)
except KeyError:
pass
path.pop() | 0.001111 |
def _decode_length(self, offset, sizeof_char):
"""
Generic Length Decoding at offset of string
The method works for both 8 and 16 bit Strings.
Length checks are enforced:
* 8 bit strings: maximum of 0x7FFF bytes (See
http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/ResourceTypes.cpp#692)
* 16 bit strings: maximum of 0x7FFFFFF bytes (See
http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/ResourceTypes.cpp#670)
:param offset: offset into the string data section of the beginning of
the string
:param sizeof_char: number of bytes per char (1 = 8bit, 2 = 16bit)
:returns: tuple of (length, read bytes)
"""
sizeof_2chars = sizeof_char << 1
fmt = "<2{}".format('B' if sizeof_char == 1 else 'H')
highbit = 0x80 << (8 * (sizeof_char - 1))
length1, length2 = unpack(fmt, self.m_charbuff[offset:(offset + sizeof_2chars)])
if (length1 & highbit) != 0:
length = ((length1 & ~highbit) << (8 * sizeof_char)) | length2
size = sizeof_2chars
else:
length = length1
size = sizeof_char
if sizeof_char == 1:
assert length <= 0x7FFF, "length of UTF-8 string is too large! At offset={}".format(offset)
else:
assert length <= 0x7FFFFFFF, "length of UTF-16 string is too large! At offset={}".format(offset)
return length, size | 0.00334 |
def _create_disks(service_instance, disks, scsi_controllers=None, parent=None):
'''
Returns a list of disk specs representing the disks to be created for a
virtual machine
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
disks
List of disks with properties
scsi_controllers
List of SCSI controllers
parent
Parent object reference
.. code-block: bash
disk:
adapter: 'Hard disk 1'
size: 16
unit: GB
address: '0:0'
controller: 'SCSI controller 0'
thin_provision: False
eagerly_scrub: False
datastore: 'myshare'
filename: 'vm/mydisk.vmdk'
'''
disk_specs = []
keys = range(-2000, -2050, -1)
if disks:
devs = [disk['adapter'] for disk in disks]
log.trace('Creating disks %s', devs)
for disk, key in zip(disks, keys):
# create the disk
filename, datastore, datastore_ref = None, None, None
size = float(disk['size'])
# when creating both SCSI controller and Hard disk at the same time
# we need the randomly assigned (temporary) key of the newly created
# SCSI controller
controller_key = 1000 # Default is the first SCSI controller
if 'address' in disk: # 0:0
controller_bus_number, unit_number = disk['address'].split(':')
controller_bus_number = int(controller_bus_number)
unit_number = int(unit_number)
controller_key = _get_scsi_controller_key(
controller_bus_number,
scsi_ctrls=scsi_controllers)
elif 'controller' in disk:
for contr in scsi_controllers:
if contr['label'] == disk['controller']:
controller_key = contr['key']
break
else:
raise salt.exceptions.VMwareObjectNotFoundError(
'The given controller does not exist: '
'{0}'.format(disk['controller']))
if 'datastore' in disk:
datastore_ref = \
salt.utils.vmware.get_datastores(
service_instance, parent,
datastore_names=[disk['datastore']])[0]
datastore = disk['datastore']
if 'filename' in disk:
filename = disk['filename']
# XOR filename, datastore
if (not filename and datastore) or (filename and not datastore):
raise salt.exceptions.ArgumentValueError(
'You must specify both filename and datastore attributes'
' to place your disk to a specific datastore '
'{0}, {1}'.format(datastore, filename))
disk_spec = _apply_hard_disk(
unit_number,
key,
disk_label=disk['adapter'],
size=size,
unit=disk['unit'],
controller_key=controller_key,
operation='add',
thin_provision=disk['thin_provision'],
eagerly_scrub=disk['eagerly_scrub'] if 'eagerly_scrub' in disk else None,
datastore=datastore_ref,
filename=filename)
disk_specs.append(disk_spec)
unit_number += 1
return disk_specs | 0.000602 |
def inception_v3(inputs,
dropout_keep_prob=0.8,
num_classes=1000,
is_training=True,
restore_logits=True,
scope=''):
"""Latest Inception from http://arxiv.org/abs/1512.00567.
"Rethinking the Inception Architecture for Computer Vision"
Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
Zbigniew Wojna
Args:
inputs: a tensor of size [batch_size, height, width, channels].
dropout_keep_prob: dropout keep_prob.
num_classes: number of predicted classes.
is_training: whether is training or not.
restore_logits: whether or not the logits layers should be restored.
Useful for fine-tuning a model with different num_classes.
scope: Optional scope for name_scope.
Returns:
a list containing 'logits', 'aux_logits' Tensors.
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points = {}
with tf.name_scope(scope, 'inception_v3', [inputs]):
with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
is_training=is_training):
with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
stride=1, padding='VALID'):
# 299 x 299 x 3
end_points['conv0'] = ops.conv2d(inputs, 32, [3, 3], stride=2,
scope='conv0')
# 149 x 149 x 32
end_points['conv1'] = ops.conv2d(end_points['conv0'], 32, [3, 3],
scope='conv1')
# 147 x 147 x 32
end_points['conv2'] = ops.conv2d(end_points['conv1'], 64, [3, 3],
padding='SAME', scope='conv2')
# 147 x 147 x 64
end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3],
stride=2, scope='pool1')
# 73 x 73 x 64
end_points['conv3'] = ops.conv2d(end_points['pool1'], 80, [1, 1],
scope='conv3')
# 73 x 73 x 80.
end_points['conv4'] = ops.conv2d(end_points['conv3'], 192, [3, 3],
scope='conv4')
# 71 x 71 x 192.
end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3],
stride=2, scope='pool2')
# 35 x 35 x 192.
net = end_points['pool2']
# Inception blocks
with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
stride=1, padding='SAME'):
# mixed: 35 x 35 x 256.
with tf.variable_scope('mixed_35x35x256a'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 64, [1, 1])
with tf.variable_scope('branch5x5'):
branch5x5 = ops.conv2d(net, 48, [1, 1])
branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 64, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 32, [1, 1])
net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool])
end_points['mixed_35x35x256a'] = net
# mixed_1: 35 x 35 x 288.
with tf.variable_scope('mixed_35x35x288a'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 64, [1, 1])
with tf.variable_scope('branch5x5'):
branch5x5 = ops.conv2d(net, 48, [1, 1])
branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 64, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool])
end_points['mixed_35x35x288a'] = net
# mixed_2: 35 x 35 x 288.
with tf.variable_scope('mixed_35x35x288b'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 64, [1, 1])
with tf.variable_scope('branch5x5'):
branch5x5 = ops.conv2d(net, 48, [1, 1])
branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 64, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool])
end_points['mixed_35x35x288b'] = net
# mixed_3: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768a'):
with tf.variable_scope('branch3x3'):
branch3x3 = ops.conv2d(net, 384, [3, 3], stride=2, padding='VALID')
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 64, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3],
stride=2, padding='VALID')
with tf.variable_scope('branch_pool'):
branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
net = tf.concat(axis=3, values=[branch3x3, branch3x3dbl, branch_pool])
end_points['mixed_17x17x768a'] = net
# mixed4: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768b'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 192, [1, 1])
with tf.variable_scope('branch7x7'):
branch7x7 = ops.conv2d(net, 128, [1, 1])
branch7x7 = ops.conv2d(branch7x7, 128, [1, 7])
branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = ops.conv2d(net, 128, [1, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7])
branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool])
end_points['mixed_17x17x768b'] = net
# mixed_5: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768c'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 192, [1, 1])
with tf.variable_scope('branch7x7'):
branch7x7 = ops.conv2d(net, 160, [1, 1])
branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = ops.conv2d(net, 160, [1, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool])
end_points['mixed_17x17x768c'] = net
# mixed_6: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768d'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 192, [1, 1])
with tf.variable_scope('branch7x7'):
branch7x7 = ops.conv2d(net, 160, [1, 1])
branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = ops.conv2d(net, 160, [1, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool])
end_points['mixed_17x17x768d'] = net
# mixed_7: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768e'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 192, [1, 1])
with tf.variable_scope('branch7x7'):
branch7x7 = ops.conv2d(net, 192, [1, 1])
branch7x7 = ops.conv2d(branch7x7, 192, [1, 7])
branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = ops.conv2d(net, 192, [1, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool])
end_points['mixed_17x17x768e'] = net
# Auxiliary Head logits
aux_logits = tf.identity(end_points['mixed_17x17x768e'])
with tf.variable_scope('aux_logits'):
aux_logits = ops.avg_pool(aux_logits, [5, 5], stride=3,
padding='VALID')
aux_logits = ops.conv2d(aux_logits, 128, [1, 1], scope='proj')
# Shape of feature map before the final layer.
shape = aux_logits.get_shape()
aux_logits = ops.conv2d(aux_logits, 768, shape[1:3], stddev=0.01,
padding='VALID')
aux_logits = ops.flatten(aux_logits)
aux_logits = ops.fc(aux_logits, num_classes, activation=None,
stddev=0.001, restore=restore_logits)
end_points['aux_logits'] = aux_logits
# mixed_8: 8 x 8 x 1280.
# Note that the scope below is not changed to not void previous
# checkpoints.
# (TODO) Fix the scope when appropriate.
with tf.variable_scope('mixed_17x17x1280a'):
with tf.variable_scope('branch3x3'):
branch3x3 = ops.conv2d(net, 192, [1, 1])
branch3x3 = ops.conv2d(branch3x3, 320, [3, 3], stride=2,
padding='VALID')
with tf.variable_scope('branch7x7x3'):
branch7x7x3 = ops.conv2d(net, 192, [1, 1])
branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7])
branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1])
branch7x7x3 = ops.conv2d(branch7x7x3, 192, [3, 3],
stride=2, padding='VALID')
with tf.variable_scope('branch_pool'):
branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
net = tf.concat(axis=3, values=[branch3x3, branch7x7x3, branch_pool])
end_points['mixed_17x17x1280a'] = net
# mixed_9: 8 x 8 x 2048.
with tf.variable_scope('mixed_8x8x2048a'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 320, [1, 1])
with tf.variable_scope('branch3x3'):
branch3x3 = ops.conv2d(net, 384, [1, 1])
branch3x3 = tf.concat(axis=3, values=[ops.conv2d(branch3x3, 384, [1, 3]),
ops.conv2d(branch3x3, 384, [3, 1])])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 448, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
branch3x3dbl = tf.concat(axis=3, values=[ops.conv2d(branch3x3dbl, 384, [1, 3]),
ops.conv2d(branch3x3dbl, 384, [3, 1])])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(axis=3, values=[branch1x1, branch3x3, branch3x3dbl, branch_pool])
end_points['mixed_8x8x2048a'] = net
# mixed_10: 8 x 8 x 2048.
with tf.variable_scope('mixed_8x8x2048b'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 320, [1, 1])
with tf.variable_scope('branch3x3'):
branch3x3 = ops.conv2d(net, 384, [1, 1])
branch3x3 = tf.concat(axis=3, values=[ops.conv2d(branch3x3, 384, [1, 3]),
ops.conv2d(branch3x3, 384, [3, 1])])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 448, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
branch3x3dbl = tf.concat(axis=3, values=[ops.conv2d(branch3x3dbl, 384, [1, 3]),
ops.conv2d(branch3x3dbl, 384, [3, 1])])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(axis=3, values=[branch1x1, branch3x3, branch3x3dbl, branch_pool])
end_points['mixed_8x8x2048b'] = net
# Final pooling and prediction
with tf.variable_scope('logits'):
shape = net.get_shape()
net = ops.avg_pool(net, shape[1:3], padding='VALID', scope='pool')
# 1 x 1 x 2048
net = ops.dropout(net, dropout_keep_prob, scope='dropout')
net = ops.flatten(net, scope='flatten')
# 2048
logits = ops.fc(net, num_classes, activation=None, scope='logits',
restore=restore_logits)
# 1000
end_points['logits'] = logits
end_points['predictions'] = tf.nn.softmax(logits, name='predictions')
return logits, end_points | 0.007303 |
def AutorizarLiquidacion(self):
"Generar o ajustar una liquidación única y obtener del CAE"
# limpio los elementos que no correspondan por estar vacios:
for campo in ["guia", "dte", "gasto", "tributo"]:
if campo in self.solicitud and not self.solicitud[campo]:
del self.solicitud[campo]
for item in self.solicitud['itemDetalleLiquidacion']:
if not item.get("liquidacionCompraAsociada", True):
del item["liquidacionCompraAsociada"]
# llamo al webservice:
ret = self.client.generarLiquidacion(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
solicitud=self.solicitud,
)
# analizo la respusta
ret = ret['respuesta']
self.__analizar_errores(ret)
self.AnalizarLiquidacion(ret)
return True | 0.002049 |
def get_first(self, sql):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
"""
with self.get_conn() as cur:
cur.execute(sql)
return cur.fetchone() | 0.005666 |
def draw_hydrogen_bonds(self,color="black"):
"""For each bond that has been determined to be important, a line gets drawn.
"""
self.draw_hbonds=""
if self.hbonds!=None:
for bond in self.hbonds.hbonds_for_drawing:
x = str((self.molecule.x_dim-self.molecule.molsize1)/2)
y = str((self.molecule.y_dim-self.molecule.molsize2)/2)
self.draw_hbonds ="<g id='"+str(bond[0])+"' class='HBonds' transform='translate("+x+","+y+")' x='"+x+"' y='"+y+"'>'"
atom = self.topology_data.universe.atoms[bond[0]-1] #zero-based index vs one-based index
residue = (atom.resname, str(atom.resid), atom.segid)
if bond[2] in ["N","O","H"]:
#backbone interactions
self.draw_hbonds=self.draw_hbonds+"<line id='"+str(bond[0])+"' class='HBonds' x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:white;stroke-width:15' />"
self.draw_hbonds=self.draw_hbonds+"<line id='"+str(bond[0])+"' class='HBonds' x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:"+color+";stroke-width:4' />"
else:
#sidechain interactions
self.draw_hbonds=self.draw_hbonds+"<line id='"+str(bond[0])+"' class='HBonds' x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:white;stroke-width:15' />"
self.draw_hbonds=self.draw_hbonds+"<line id='"+str(bond[0])+"' class='HBonds' stroke-dasharray='5,5' x1='"+str(int(self.molecule.nearest_points_coords[residue][0]))+"' y1='"+str(int(self.molecule.nearest_points_coords[residue][1]))+"' x2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][0]))+"' y2='"+str(float(self.molecule.ligand_atom_coords_from_diagr[bond[1]][1]))+"' style='stroke:"+color+";stroke-width:4' />"
self.draw_hbonds+="</g>" | 0.017747 |
def render( self, tag, single, between, kwargs ):
"""Append the actual tags to content."""
out = "<%s" % tag
for key, value in list( kwargs.items( ) ):
if value is not None: # when value is None that means stuff like <... checked>
key = key.strip('_') # strip this so class_ will mean class, etc.
if key == 'http_equiv': # special cases, maybe change _ to - overall?
key = 'http-equiv'
elif key == 'accept_charset':
key = 'accept-charset'
out = "%s %s=\"%s\"" % ( out, key, escape( value ) )
else:
out = "%s %s" % ( out, key )
if between is not None:
out = "%s>%s</%s>" % ( out, between, tag )
else:
if single:
out = "%s />" % out
else:
out = "%s>" % out
if self.parent is not None:
self.parent.content.append( out )
else:
return out | 0.018886 |
def convert_bytes(value):
"""
Reduces bytes to more convenient units (i.e. KiB, GiB, TiB, etc.).
Args:
values (int): Value in Bytes
Returns:
tup (tuple): Tuple of value, unit (e.g. (10, 'MiB'))
"""
n = np.rint(len(str(value))/4).astype(int)
return value/(1024**n), sizes[n] | 0.003135 |
def discover(url, options={}):
"""
Retrieve the API definition from the given URL and construct
a Patchboard to interface with it.
"""
try:
resp = requests.get(url, headers=Patchboard.default_headers)
except Exception as e:
raise PatchboardError("Problem discovering API: {0}".format(e))
# Parse as JSON (Requests uses json.loads())
try:
api_spec = resp.json()
except ValueError as e:
raise PatchboardError("Unparseable API description: {0}".format(e))
# Return core handle object
return Patchboard(api_spec, options) | 0.001681 |
def _edit_main(self, request):
"""Adds the link to the new unit testing results on the repo's main wiki page.
"""
self.prefix = "{}_Pull_Request_{}".format(request.repo.name, request.pull.number)
if not self.testmode:
page = site.pages[self.basepage]
text = page.text()
else:
text = "This is a fake wiki page.\n\n<!--@CI:Placeholder-->"
self.newpage = self.prefix
link = "Pull Request #{}".format(request.pull.number)
text = text.replace("<!--@CI:Placeholder-->",
"* [[{}|{}]]\n<!--@CI:Placeholder-->".format(self.newpage, link))
if not self.testmode:
result = page.save(text, summary="Added {} unit test link.".format(link), minor=True, bot=True)
return result[u'result'] == u'Success'
else:
return text | 0.007804 |
def _get_diff(self, cp_file):
"""Get a diff between running config and a proposed file."""
diff = []
self._create_sot_file()
diff_out = self.device.show(
'show diff rollback-patch file {0} file {1}'.format(
'sot_file', self.replace_file.split('/')[-1]), raw_text=True)
try:
diff_out = diff_out.split(
'#Generating Rollback Patch')[1].replace(
'Rollback Patch is Empty', '').strip()
for line in diff_out.splitlines():
if line:
if line[0].strip() != '!':
diff.append(line.rstrip(' '))
except (AttributeError, KeyError):
raise ReplaceConfigException(
'Could not calculate diff. It\'s possible the given file doesn\'t exist.')
return '\n'.join(diff) | 0.003421 |
def disconnect(self):
"""
diconnect from the connected device
:return: bool
"""
cmd_response = self.__send_command(const.CMD_EXIT)
if cmd_response.get('status'):
self.is_connect = False
if self.__sock:
self.__sock.close()
return True
else:
raise ZKErrorResponse("can't disconnect") | 0.004975 |
def compression_type(self):
"""Return the latest compresion type used in this MAR.
Returns:
One of None, 'bz2', or 'xz'
"""
best_compression = None
for e in self.mardata.index.entries:
self.fileobj.seek(e.offset)
magic = self.fileobj.read(10)
compression = guess_compression(magic)
if compression == 'xz':
best_compression = 'xz'
break
elif compression == 'bz2' and best_compression is None:
best_compression = 'bz2'
return best_compression | 0.003279 |
async def write(self, data, eof = False, buffering = True):
"""
Write output to current output stream
"""
if not self.outputstream:
self.outputstream = Stream()
self._startResponse()
elif (not buffering or eof) and not self._sendHeaders:
self._startResponse()
if not isinstance(data, bytes):
data = data.encode(self.encoding)
await self.outputstream.write(data, self.connection, eof, False, buffering) | 0.013861 |
def sg_summary_gradient(tensor, gradient, prefix=None, name=None):
r"""Register `tensor` to summary report as `gradient`
Args:
tensor: A `Tensor` to log as gradient
gradient: A 0-D `Tensor`. A gradient to log
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None
"""
# defaults
prefix = '' if prefix is None else prefix + '/'
# summary name
name = prefix + _pretty_name(tensor) if name is None else prefix + name
# summary statistics
# noinspection PyBroadException
_scalar(name + '/grad', tf.reduce_mean(tf.abs(gradient)))
_histogram(name + '/grad-h', tf.abs(gradient)) | 0.001344 |
def add_child(self, **kwargs):
"""Creates a new ``Node`` based on the extending class and adds it as
a child to this ``Node``.
:param kwargs:
arguments for constructing the data object associated with this
``Node``
:returns:
extender of the ``Node`` class
"""
data_class = self.graph.data_content_type.model_class()
node = Node.objects.create(graph=self.graph)
data_class.objects.create(node=node, **kwargs)
node.parents.add(self)
self.children.add(node)
return node | 0.006745 |
def competitions_data_download_file(self, id, file_name, **kwargs): # noqa: E501
"""Download competition data file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.competitions_data_download_file(id, file_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Competition name (required)
:param str file_name: Competition name (required)
:return: Result
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.competitions_data_download_file_with_http_info(id, file_name, **kwargs) # noqa: E501
else:
(data) = self.competitions_data_download_file_with_http_info(id, file_name, **kwargs) # noqa: E501
return data | 0.001961 |
def StrPrefixOf(prefix, input_string):
"""
Return True if the concrete value of the input_string starts with prefix
otherwise false.
:param prefix: prefix we want to check
:param input_string: the string we want to check
:return: True if the input_string starts with prefix else false
"""
return re.match(r'^' + prefix.value, input_string.value) is not None | 0.002558 |
def difference(self, another_moc, *args):
"""
Difference between the MOC instance and other MOCs.
Parameters
----------
another_moc : `~mocpy.moc.MOC`
The MOC used that will be substracted to self.
args : `~mocpy.moc.MOC`
Other additional MOCs to perform the difference with.
Returns
-------
result : `~mocpy.moc.MOC` or `~mocpy.tmoc.TimeMOC`
The resulting MOC.
"""
interval_set = self._interval_set.difference(another_moc._interval_set)
for moc in args:
interval_set = interval_set.difference(moc._interval_set)
return self.__class__(interval_set) | 0.002845 |
def jobs(self, state=None, user=None, queue=None, limit=None,
started_time_begin=None, started_time_end=None,
finished_time_begin=None, finished_time_end=None):
"""
The jobs resource provides a list of the MapReduce jobs that have
finished. It does not currently return a full list of parameters.
:param str user: user name
:param str state: the job state
:param str queue: queue name
:param str limit: total number of app objects to be returned
:param str started_time_begin: jobs with start time beginning with
this time, specified in ms since epoch
:param str started_time_end: jobs with start time ending with this
time, specified in ms since epoch
:param str finished_time_begin: jobs with finish time beginning with
this time, specified in ms since epoch
:param str finished_time_end: jobs with finish time ending with this
time, specified in ms since epoch
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
:raises yarn_api_client.errors.IllegalArgumentError: if `state`
incorrect
"""
path = '/ws/v1/history/mapreduce/jobs'
legal_states = set([s for s, _ in JobStateInternal])
if state is not None and state not in legal_states:
msg = 'Job Internal State %s is illegal' % (state,)
raise IllegalArgumentError(msg)
loc_args = (
('state', state),
('user', user),
('queue', queue),
('limit', limit),
('startedTimeBegin', started_time_begin),
('startedTimeEnd', started_time_end),
('finishedTimeBegin', finished_time_begin),
('finishedTimeEnd', finished_time_end))
params = self.construct_parameters(loc_args)
return self.request(path, **params) | 0.002029 |
def trace(fun, *a, **k):
""" define a tracer for a rule function
for log and statistic purposes """
@wraps(fun)
def tracer(*a, **k):
ret = fun(*a, **k)
print('trace:fun: %s\n ret=%s\n a=%s\nk%s\n' %
(str(fun), str(ret), str(a), str(k)))
return ret
return tracer | 0.003135 |
def save_log_entry(self, log_entry_form, *args, **kwargs):
"""Pass through to provider LogEntryAdminSession.update_log_entry"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.update_resource
if log_entry_form.is_for_update():
return self.update_log_entry(log_entry_form, *args, **kwargs)
else:
return self.create_log_entry(log_entry_form, *args, **kwargs) | 0.004435 |
def _remove_trailing_spaces(line):
"""Remove trailing spaces unless they are quoted with a backslash."""
while line.endswith(' ') and not line.endswith('\\ '):
line = line[:-1]
return line.replace('\\ ', ' ') | 0.008197 |
def set_oauth_app_info(self, client_id, client_secret, redirect_uri):
"""Set the app information to use with OAuth2.
This function need only be called if your praw.ini site configuration
does not already contain the necessary information.
Go to https://www.reddit.com/prefs/apps/ to discover the appropriate
values for your application.
:param client_id: the client_id of your application
:param client_secret: the client_secret of your application
:param redirect_uri: the redirect_uri of your application
"""
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri | 0.002837 |
def _populate_audio_file(self):
"""
Create the ``self.audio_file`` object by reading
the audio file at ``self.audio_file_path_absolute``.
"""
self.log(u"Populate audio file...")
if self.audio_file_path_absolute is not None:
self.log([u"audio_file_path_absolute is '%s'", self.audio_file_path_absolute])
self.audio_file = AudioFile(
file_path=self.audio_file_path_absolute,
logger=self.logger
)
self.audio_file.read_properties()
else:
self.log(u"audio_file_path_absolute is None")
self.log(u"Populate audio file... done") | 0.004438 |
def handle_exception(self, frame, exc_info):
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
type_, value, tb = exc_info
# Python 3 is broken see http://bugs.python.org/issue17413
_value = value
if not isinstance(_value, BaseException):
_value = type_(value)
fake_exc_info = type_, _value, tb
log.error('Exception during trace', exc_info=fake_exc_info)
self.obj_cache[id(exc_info)] = exc_info
self.extra_vars['__exception__'] = exc_info
exception = type_.__name__
exception_description = str(value)
init = 'Echo|%s' % dump({
'for': '__exception__',
'val': escape('%s: %s') % (exception, exception_description)
})
# User exception is 4 frames away from exception
frame = frame or sys._getframe().f_back.f_back.f_back.f_back
self.interaction(
frame, tb, exception, exception_description, init=init
) | 0.001903 |
def to_dict(self):
'''Dump a feature collection's features to a dictionary.
This does not include additional data, such as whether
or not the collection is read-only. The returned dictionary
is suitable for serialization into JSON, CBOR, or similar
data formats.
'''
def is_non_native_sc(ty, encoded):
return (ty == 'StringCounter'
and not is_native_string_counter(encoded))
fc = {}
native = ('StringCounter', 'Unicode')
for name, feat in self._features.iteritems():
if name.startswith(self.EPHEMERAL_PREFIX):
continue
if not isinstance(name, unicode):
name = name.decode('utf-8')
tyname = registry.feature_type_name(name, feat)
encoded = registry.get(tyname).dumps(feat)
# This tomfoolery is to support *native untagged* StringCounters.
if tyname not in native or is_non_native_sc(tyname, encoded):
encoded = cbor.Tag(cbor_names_to_tags[tyname], encoded)
fc[name] = encoded
return fc | 0.001753 |
def measure_cost(repeat, scipy_trans_lhs, scipy_dns_lhs, func_name, *args, **kwargs):
"""Measure time cost of running a function
"""
mx.nd.waitall()
args_list = []
for arg in args:
args_list.append(arg)
start = time.time()
if scipy_trans_lhs:
args_list[0] = np.transpose(args_list[0]) if scipy_dns_lhs else sp.spmatrix.transpose(args_list[0])
for _ in range(repeat):
func_name(*args_list, **kwargs)
mx.nd.waitall()
end = time.time()
diff = end - start
return diff / repeat | 0.005515 |
def authorize(self):
'''
Prepare the master to expect a signing request
'''
with salt.utils.files.fopen(self.path, 'w+') as fp_:
fp_.write(str(int(time.time()))) # future lint: disable=blacklisted-function
return True | 0.011111 |
def find_args(self):
"""Build self.args using all the fields."""
return self.mpi_cmd + ['-n', str(self.n)] + self.mpi_args + \
self.program + self.program_args | 0.015789 |
def get_user_by_email(self, email):
"""
Returns details for user with the given email address.
If there is more than one match will only return the first. Use
get_users() for full result set.
"""
results = self.get_users(filter='email eq "%s"' % (email))
if results['totalResults'] == 0:
logging.warning("Found no matches for given email.")
return
elif results['totalResults'] > 1:
logging.warning("Found %s matches for email %s" %
(results['totalResults'], email))
return results['resources'][0] | 0.004815 |
def require_request_model(cls, validate=True):
"""
Makes a handler require that a request body that map towards the given
model is provided. Unless the ``validate`` option is set to ``False`` the
data will be validated against the model's fields.
The model will be passed to the handler as the last positional argument. ::
@require_request_model(Model)
async def handle_model(request, model):
return 200, model
"""
def decorator(handler):
async def new_handler(request, *args, **kwargs):
body = await request.json()
model = cls(**body)
if validate:
model.validate()
return await handler(request, model, *args, **kwargs)
return new_handler
return decorator | 0.001252 |
def get_project_content_commit_date(root_dir='.', exclusions=None):
"""Get the datetime for the most recent commit to a project that
affected Sphinx content.
*Content* is considered any file with one of these extensions:
- ``rst`` (README.rst and LICENSE.rst are excluded)
- ``ipynb``
- ``png``
- ``jpeg``
- ``jpg``
- ``svg``
- ``gif``
This function allows project infrastructure and configuration files to be
updated without changing the timestamp.
Parameters
----------
root_dir : `str`, optional
Root directory. This is the current working directory by default.
exclusions : `list` of `str`, optional
List of file paths or directory paths to ignore.
Returns
-------
commit_date : `datetime.datetime`
Datetime of the most recent content commit.
Raises
------
RuntimeError
Raised if no content files are found.
"""
logger = logging.getLogger(__name__)
# Supported 'content' extensions
extensions = ('rst', 'ipynb', 'png', 'jpeg', 'jpg', 'svg', 'gif')
content_paths = []
for extname in extensions:
content_paths += get_filepaths_with_extension(
extname,
root_dir=root_dir)
# Known files that should be excluded; lower case for comparison
exclude = Matcher(exclusions if exclusions
else ['readme.rst', 'license.rst'])
# filter out excluded files
content_paths = [p for p in content_paths
if not (exclude(p) or exclude(p.split(os.path.sep)[0]))]
logger.debug('Found content paths: {}'.format(', '.join(content_paths)))
if not content_paths:
raise RuntimeError('No content files found in {}'.format(root_dir))
commit_datetimes = []
for filepath in content_paths:
try:
datetime = read_git_commit_timestamp_for_file(
filepath,
repo_path=root_dir)
commit_datetimes.append(datetime)
except IOError:
logger.warning(
'Could not get commit for {}, skipping'.format(filepath))
if not commit_datetimes:
raise RuntimeError('No content commits could be found')
latest_datetime = max(commit_datetimes)
return latest_datetime | 0.000434 |
def parsed(self):
"""Get the code object which represents the compiled Python file.
This property is cached and only parses the content once.
"""
if not self._parsed:
self._parsed = compile(self.content, self.path, 'exec')
return self._parsed | 0.006734 |
def dvds_current_releases(self, **kwargs):
"""Gets the upcoming movies from the API.
Args:
page_limit (optional): number of movies to show per page, default=16
page (optional): results page number, default=1
country (optional): localized data for selected country, default="us"
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('dvds_current_releases')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | 0.003396 |
def pull(self, action, image_name, **kwargs):
"""
Pulls an image for a container configuration
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param image_name: Image name.
:type image_name: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict
"""
config_id = action.config_id
registry, __, image = config_id.config_name.rpartition('/')
if registry and '.' in registry and registry not in self._login_registries:
self.login(action, registry, insecure_registry=kwargs.get('insecure_registry'))
log.info("Pulling image %s:%s.", config_id.config_name, config_id.instance_name)
res = action.client.pull(repository=config_id.config_name, tag=config_id.instance_name, **kwargs)
log.debug("Done pulling image %s:%s.", config_id.config_name, config_id.instance_name)
self._policy.images[action.client_name].refresh_repo(config_id.config_name)
log.debug("Refreshed image cache for repo %s.", config_id.config_name)
return res | 0.007544 |
def register(self, patterns, obj=None, instances=None, **reg_kwargs):
"""Register one object which can be matched/searched by regex.
:param patterns: a list/tuple/set of regex-pattern.
:param obj: return it while search/match success.
:param instances: instance list will search/match the patterns.
:param reg_kwargs: kwargs for re.compile.
"""
assert obj, "bool(obj) should be True."
patterns = patterns if isinstance(patterns, (list, tuple, set)) else [patterns]
instances = instances or []
instances = (
instances if isinstance(instances, (list, tuple, set)) else [instances]
)
for pattern in patterns:
pattern_compiled = re.compile(pattern, **reg_kwargs)
self.container.append((pattern_compiled, obj, instances))
if self.ensure_mapping:
# check all instances to avoid one-to-many instances.
self._check_instances()
else:
# no need to check all instances.
for instance in instances:
assert self.search(instance) == [obj] or self.match(instance) == [
obj
], (
"instance %s should fit at least one pattern %s"
% (instance, pattern)
) | 0.003605 |
def create(Bucket,
ACL=None, LocationConstraint=None,
GrantFullControl=None,
GrantRead=None,
GrantReadACP=None,
GrantWrite=None,
GrantWriteACP=None,
region=None, key=None, keyid=None, profile=None):
'''
Given a valid config, create an S3 Bucket.
Returns {created: true} if the bucket was created and returns
{created: False} if the bucket was not created.
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.create my_bucket \\
GrantFullControl='[email protected]' \\
GrantRead='uri="http://acs.amazonaws.com/groups/global/AllUsers"' \\
GrantReadACP='emailaddress="[email protected]",id="2345678909876432"' \\
LocationConstraint=us-west-1
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
kwargs = {}
for arg in ('ACL', 'GrantFullControl',
'GrantRead', 'GrantReadACP',
'GrantWrite', 'GrantWriteACP'):
if locals()[arg] is not None:
kwargs[arg] = str(locals()[arg]) # future lint: disable=blacklisted-function
if LocationConstraint:
kwargs['CreateBucketConfiguration'] = {'LocationConstraint': LocationConstraint}
location = conn.create_bucket(Bucket=Bucket,
**kwargs)
conn.get_waiter("bucket_exists").wait(Bucket=Bucket)
if location:
log.info('The newly created bucket name is located at %s', location['Location'])
return {'created': True, 'name': Bucket, 'Location': location['Location']}
else:
log.warning('Bucket was not created')
return {'created': False}
except ClientError as e:
return {'created': False, 'error': __utils__['boto3.get_error'](e)} | 0.004059 |
def _pprint(dic):
"""Prints a dictionary with one indentation level"""
for key, value in dic.items():
print(" {0}: {1}".format(key, value)) | 0.006173 |
def to_cloudformation(self):
"""Generates CloudFormation resources from a SAM API resource
:returns: a tuple containing the RestApi, Deployment, and Stage for an empty Api.
:rtype: tuple
"""
rest_api = self._construct_rest_api()
deployment = self._construct_deployment(rest_api)
swagger = None
if rest_api.Body is not None:
swagger = rest_api.Body
elif rest_api.BodyS3Location is not None:
swagger = rest_api.BodyS3Location
stage = self._construct_stage(deployment, swagger)
permissions = self._construct_authorizer_lambda_permission()
return rest_api, deployment, stage, permissions | 0.004237 |
def sign_digest(sock, keygrip, digest, sp=subprocess, environ=None):
"""Sign a digest using specified key using GPG agent."""
hash_algo = 8 # SHA256
assert len(digest) == 32
assert communicate(sock, 'RESET').startswith(b'OK')
ttyname = check_output(args=['tty'], sp=sp).strip()
options = ['ttyname={}'.format(ttyname)] # set TTY for passphrase entry
display = (environ or os.environ).get('DISPLAY')
if display is not None:
options.append('display={}'.format(display))
for opt in options:
assert communicate(sock, 'OPTION {}'.format(opt)) == b'OK'
assert communicate(sock, 'SIGKEY {}'.format(keygrip)) == b'OK'
hex_digest = binascii.hexlify(digest).upper().decode('ascii')
assert communicate(sock, 'SETHASH {} {}'.format(hash_algo,
hex_digest)) == b'OK'
assert communicate(sock, 'SETKEYDESC '
'Sign+a+new+TREZOR-based+subkey') == b'OK'
assert communicate(sock, 'PKSIGN') == b'OK'
while True:
line = recvline(sock).strip()
if line.startswith(b'S PROGRESS'):
continue
else:
break
line = unescape(line)
log.debug('unescaped: %r', line)
prefix, sig = line.split(b' ', 1)
if prefix != b'D':
raise ValueError(prefix)
sig, leftover = parse(sig)
assert not leftover, leftover
return parse_sig(sig) | 0.000698 |
def translate_query_params(cls, **kwargs):
"""
Translate an arbirtary keyword argument to the expected query.
TODO: refactor this into something less insane.
XXX: Clean this up. It's *too* flexible.
In the v2 API, many endpoints expect a particular query argument to be
in the form of `query=xxx` where `xxx` would be the name of perhaps
the name, ID or otherwise. This function ought to take a more aptly
named parameter specified in `TRANSLATE_QUERY_PARAM`, and substitute it
into the `query` keyword argument. The purpose is so that some models
(optionally) have nicer named keyword arguments than `query` for easier
to read python.
If a query argument is given then the output should be that value. If a
substitute value is given as a keyword specified in
`TRANSLATE_QUERY_PARAM`(and query is not) then the `query` argument
will be that keyword argument.
Eg. No query param
TRANSLATE_QUERY_PARAM = ('name',)
kwargs = {'name': 'PagerDuty',}
...
output = {'query': 'PagerDuty'}
or, query param explicitly
TRANSLATE_QUERY_PARAM = ('name',)
kwargs = {'name': 'PagerDuty', 'query': 'XXXXPlopperDuty'}
...
output = {'query': 'XXXXPlopperDuty'}
or, TRANSLATE_QUERY_PARAM is None
TRANSLATE_QUERY_PARAM = None
kwargs = {'name': 'PagerDuty', 'query': 'XXXXPlopperDuty'}
...
output = {'output': 'XXXXPlopperDuty', 'name': 'PagerDuty'}
"""
values = []
output = kwargs.copy()
query = kwargs.pop('query', None)
# remove any of the TRANSLATE_QUERY_PARAMs in output
for param in (cls.TRANSLATE_QUERY_PARAM or []):
popped = output.pop(param, None)
if popped is not None:
values.append(popped)
# if query is provided, just use it
if query is not None:
output['query'] = query
return output
# if query is not provided, use the first parameter we removed from
# the kwargs
try:
output['query'] = next(iter(values))
except StopIteration:
pass
return output | 0.000857 |
def live_source_load(self, source):
"""
Send new source code to the bot
:param source:
:param good_cb: callback called if code was good
:param bad_cb: callback called if code was bad (will get contents of exception)
:return:
"""
source = source.rstrip('\n')
if source != self.source:
self.source = source
b64_source = base64.b64encode(bytes(bytearray(source, "ascii")))
self.send_command(CMD_LOAD_BASE64, b64_source) | 0.005714 |
def appropriate_for(self, usage, alg='HS256'):
"""
Make sure there is a key instance present that can be used for
the specified usage.
"""
try:
_use = USE[usage]
except:
raise ValueError('Unknown key usage')
else:
if not self.use or self.use == _use:
if _use == 'sig':
return self.get_key()
else:
return self.encryption_key(alg)
raise WrongUsage("This key can't be used for {}".format(usage)) | 0.005272 |
def _getdevicetuple(iobtdevice):
"""
Returns an (addr, name, COD) device tuple from a IOBluetoothDevice object.
"""
addr = _macutil.formatdevaddr(iobtdevice.getAddressString())
name = iobtdevice.getName()
cod = iobtdevice.getClassOfDevice()
return (addr, name, cod) | 0.003413 |
def print_colors(palette, outfile="Palette.png"):
"""
print color palette (a tuple) to a PNG file for quick check
"""
fig = plt.figure()
ax = fig.add_subplot(111)
xmax = 20 * (len(palette) + 1)
x1s = np.arange(0, xmax, 20)
xintervals = [10] * len(palette)
xx = zip(x1s, xintervals)
ax.broken_barh(xx, (5, 10), facecolors=palette)
ax.set_ylim(0, 20)
ax.set_xlim(0, xmax)
ax.set_axis_off()
savefig(outfile) | 0.00216 |
def _double_single_alleles(df, chrom):
""" Double any single alleles in the specified chromosome.
Parameters
----------
df : pandas.DataFrame
SNPs
chrom : str
chromosome of alleles to double
Returns
-------
df : pandas.DataFrame
SNPs with specified chromosome's single alleles doubled
"""
# find all single alleles of the specified chromosome
single_alleles = np.where(
(df["chrom"] == chrom) & (df["genotype"].str.len() == 1)
)[0]
# double those alleles
df.ix[single_alleles, "genotype"] = df.ix[single_alleles, "genotype"] * 2
return df | 0.004225 |
def __get_session(self):
""" Opens a db session """
db_path = self.__get_config().get(ConfigKeys.asset_allocation_database_path)
self.session = dal.get_session(db_path)
return self.session | 0.013636 |
def __send_request(self, url, params=None):
"""Send request"""
r = self.fetch(url, payload=params)
return r.text | 0.014599 |
def PushItem(self, item, block=True):
"""Pushes an item onto the queue.
Args:
item (object): item to add.
block (Optional[bool]): True to block the process when the queue is full.
Raises:
QueueFull: if the item could not be pushed the queue because it's full.
"""
try:
self._queue.put(item, block=block)
except Queue.Full as exception:
raise errors.QueueFull(exception) | 0.007059 |
def attention_mask_same_segment(
query_segment, memory_segment=None, dtype=tf.float32):
"""Bias for attention where attention between segments is disallowed.
Args:
query_segment: a mtf.Tensor with shape [..., length_dim]
memory_segment: a mtf.Tensor with shape [..., memory_length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., length_dim, memory_length_dim]
"""
memory_segment = rename_length_to_memory_length(
memory_segment or query_segment)
return mtf.cast(mtf.not_equal(query_segment, memory_segment), dtype) * -1e9 | 0.008696 |
def get_assessment_part_mdata():
"""Return default mdata map for AssessmentPart"""
return {
'assessment_part': {
'element_label': {
'text': 'assessment part',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
'assessment': {
'element_label': {
'text': 'assessment',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
'weight': {
'element_label': {
'text': 'weight',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'enter a cardinal value',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_cardinal_values': [None],
'syntax': 'CARDINAL',
'minimum_cardinal': None,
'maximum_cardinal': None,
'cardinal_set': []
},
'allocated_time': {
'element_label': {
'text': 'allocated time',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'enter a valid duration object.',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_duration_values': [None],
'syntax': 'DURATION',
'date_time_set': [],
},
} | 0.000296 |
def pmap(f, iterable, n=None, dummy=False, p=None):
"""
parallel map of a function to an iterable
if each item in iterable is itself an iterable, then
automatically call f(*item) instead of f(item)
Arguments:
f: function
iterable: any iterable where each item is sent to f
n: number of cpus (default is number on machine)
dummy: use dummy pool.
p: existing pool to re-use
"""
# make it easier to debug.
if n == 1:
for r in it.starmap(f, iterable):
yield r
raise StopIteration
if p is None:
po = pool(n, dummy)
else:
po = p
assert hasattr(po, 'imap')
f = _func_star(f)
try:
for r in po.imap(f, iterable):
yield r
# explicitly clean up created pool
finally:
if p is None:
try:
po.close()
po.join()
except:
pass | 0.003165 |
def uninstall(*package_names):
"""
Uninstall one or more packages using the Python equivalent of ``pip uninstall --yes``.
The package(s) to uninstall must be installed, otherwise pip will raise an
``UninstallationError``. You can check for installed packages using
:func:`is_installed()`.
:param package_names: The names of one or more Python packages (strings).
"""
command = UninstallCommand()
opts, args = command.parse_args(['--yes'] + list(package_names))
command.run(opts, args) | 0.003802 |
def lessThan(self, left, right):
'''Return ordering of *left* vs *right*.'''
sourceModel = self.sourceModel()
if sourceModel:
leftItem = sourceModel.item(left)
rightItem = sourceModel.item(right)
if (isinstance(leftItem, Directory)
and not isinstance(rightItem, Directory)):
return self.sortOrder() == Qt.AscendingOrder
elif (not isinstance(leftItem, Directory)
and isinstance(rightItem, Directory)):
return self.sortOrder() == Qt.DescendingOrder
return super(FilesystemSortProxy, self).lessThan(left, right) | 0.007634 |
def s2n(self, offset, length, signed=0):
"""
Convert slice to integer, based on sign and endian flags.
Usually this offset is assumed to be relative to the beginning of the
start of the EXIF information.
For some cameras that use relative tags, this offset may be relative
to some other starting point.
"""
self.file.seek(self.offset + offset)
sliced = self.file.read(length)
if self.endian == 'I':
val = s2n_intel(sliced)
else:
val = s2n_motorola(sliced)
# Sign extension?
if signed:
msb = 1 << (8 * length - 1)
if val & msb:
val -= (msb << 1)
return val | 0.002717 |
def update_user_info(self, **kwargs):
"""Update user info and settings.
:param \*\*kwargs: settings to be merged with
:func:`User.get_configfile` setings and sent to Filemail.
:rtype: ``bool``
"""
if kwargs:
self.config.update(kwargs)
method, url = get_URL('user_update')
res = getattr(self.session, method)(url, params=self.config)
if res.status_code == 200:
return True
hellraiser(res) | 0.008032 |
def _secondary_values(self):
"""Getter for secondary series values (flattened)"""
return [
val for serie in self.secondary_series for val in serie.values
if val is not None
] | 0.009009 |
def as_boxes(self, colors=None):
"""
A rough Trimesh representation of the voxels with a box
for each filled voxel.
Parameters
----------
colors : (3,) or (4,) float or uint8
(X, Y, Z, 3) or (X, Y, Z, 4) float or uint8
Where matrix.shape == (X, Y, Z)
Returns
---------
mesh : trimesh.Trimesh
Mesh with one box per filled cell.
"""
matrix = self._data['matrix']
centers = matrix_to_points(
matrix=matrix,
pitch=self._data['pitch'],
origin=self._data['origin'])
if colors is not None:
colors = np.asanyarray(colors)
if (colors.ndim == 4 and
colors.shape[:3] == matrix.shape and
colors.shape[3] in [3, 4]):
colors = colors[matrix > 0]
elif not (colors.shape == (3,) or colors.shape == (4,)):
log.warning('colors incorrect shape!')
colors = None
mesh = multibox(centers=centers,
pitch=self.pitch,
colors=colors)
return mesh | 0.001691 |
def timed_grep_nodes_for_patterns(self, versions_to_patterns, timeout_seconds, filename="system.log"):
"""
Searches all nodes in the cluster for a specific regular expression based on the node's version.
Params:
@versions_to_patterns : an instance of LogPatternToVersionMap, specifying the different log patterns based on a node's version.
@version : the earliest version the new pattern was introduced.
@timeout_seconds : the amount of time to spend searching the logs for.
@filename : the name of the file to search for the patterns. Defaults to "system.log".
Returns the first node where the pattern was found, along with the matching lines.
Raises a TimeoutError if the pattern is not found within the specified timeout period.
"""
end_time = time.time() + timeout_seconds
while True:
if time.time() > end_time:
raise TimeoutError(time.strftime("%d %b %Y %H:%M:%S", time.gmtime()) +
" Unable to find: " + versions_to_patterns.patterns + " in any node log within " + str(timeout_seconds) + "s")
for node in self.nodelist():
pattern = versions_to_patterns(node.get_cassandra_version())
matchings = node.grep_log(pattern, filename)
if matchings:
ret = namedtuple('Node_Log_Matching', 'node matchings')
return ret(node=node, matchings=matchings)
time.sleep(1) | 0.006527 |
Subsets and Splits