code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def flush(self):
# type: () -> BatchClient
"""Send buffered metrics in batch requests"""
address = self.remote_address
while len(self._batches) > 0:
self._socket.sendto(self._batches[0], address)
self._batches.popleft()
return self | Send buffered metrics in batch requests |
def fetch_raw(self):
"""
Execute the query and return by batches.
Optional keyword arguments are passed to Query.execute(). Whether
this is real-time or stored logs is dependent on the value of
``fetch_type``.
:return: generator of dict results
"""
for results in super(LogQuery, self).execute():
if 'records' in results and results['records']:
yield results['records'] | Execute the query and return by batches.
Optional keyword arguments are passed to Query.execute(). Whether
this is real-time or stored logs is dependent on the value of
``fetch_type``.
:return: generator of dict results |
def response_handler(msg: Dict[str, str]) -> None:
"""Handle response sent by browser."""
from wdom.document import getElementByWdomId
id = msg['id']
elm = getElementByWdomId(id)
if elm:
elm.on_response(msg)
else:
logger.warning('No such element: wdom_id={}'.format(id)) | Handle response sent by browser. |
def estimate_noiseperbl(data):
""" Takes large data array and sigma clips it to find noise per bl for input to detect_bispectra.
Takes mean across pols and channels for now, as in detect_bispectra.
"""
# define noise per baseline for data seen by detect_bispectra or image
datamean = data.mean(axis=2).imag # use imaginary part to estimate noise without calibrated, on-axis signal
(datameanmin, datameanmax) = rtlib.sigma_clip(datamean.flatten())
good = n.where( (datamean>datameanmin) & (datamean<datameanmax) )
noiseperbl = datamean[good].std() # measure single noise for input to detect_bispectra
logger.debug('Clipped to %d%% of data (%.3f to %.3f). Noise = %.3f.' % (100.*len(good[0])/len(datamean.flatten()), datameanmin, datameanmax, noiseperbl))
return noiseperbl | Takes large data array and sigma clips it to find noise per bl for input to detect_bispectra.
Takes mean across pols and channels for now, as in detect_bispectra. |
def worker(self):
"""
Calculates the quartet weights for the test at a random
subsampled chunk of loci.
"""
## subsample loci
fullseqs = self.sample_loci()
## find all iterations of samples for this quartet
liters = itertools.product(*self.imap.values())
## run tree inference for each iteration of sampledict
hashval = uuid.uuid4().hex
weights = []
for ridx, lidx in enumerate(liters):
## get subalignment for this iteration and make to nex
a,b,c,d = lidx
sub = {}
for i in lidx:
if self.rmap[i] == "p1":
sub["A"] = fullseqs[i]
elif self.rmap[i] == "p2":
sub["B"] = fullseqs[i]
elif self.rmap[i] == "p3":
sub["C"] = fullseqs[i]
else:
sub["D"] = fullseqs[i]
## write as nexus file
nex = []
for tax in list("ABCD"):
nex.append(">{} {}".format(tax, sub[tax]))
## check for too much missing or lack of variants
nsites, nvar = count_var(nex)
## only run test if there's variation present
if nvar > self.minsnps:
## format as nexus file
nexus = "{} {}\n".format(4, len(fullseqs[a])) + "\n".join(nex)
## infer ML tree
treeorder = self.run_tree_inference(nexus, "{}.{}".format(hashval, ridx))
## add to list
weights.append(treeorder)
## cleanup - remove all files with the hash val
rfiles = glob.glob(os.path.join(tempfile.tempdir, "*{}*".format(hashval)))
for rfile in rfiles:
if os.path.exists(rfile):
os.remove(rfile)
## return result as weights for the set topologies.
trees = ["ABCD", "ACBD", "ADBC"]
wdict = {i:float(weights.count(i))/len(weights) for i in trees}
return wdict | Calculates the quartet weights for the test at a random
subsampled chunk of loci. |
def from_config(cls, config, name, section_key="score_caches"):
"""
score_caches:
redis_sentinel:
class: ores.score_caches.RedisSentinel
prefix: ores-derp
ttl: 9001
socket_timeout: 0.1
cluster: mymaster
hosts:
- localhost:5000
- localhost:5001
- localhost:5002
"""
sentinel_logger.info("Loading RedisSentinel '{0}' from config.".format(name))
section = config[section_key][name]
kwargs = {k: v for k, v in section.items() if k != "class"}
return cls.from_parameters(**kwargs) | score_caches:
redis_sentinel:
class: ores.score_caches.RedisSentinel
prefix: ores-derp
ttl: 9001
socket_timeout: 0.1
cluster: mymaster
hosts:
- localhost:5000
- localhost:5001
- localhost:5002 |
def most_probable_alleles(allele_list):
"""
This module accepts a list of tuples of (allele, p_value) pairs. It returns the 2 most probable
alleles for that group.
"""
all_alleles = defaultdict()
# First collect all the keys. Make a dict with allele as key and list of pvalues as value
for allele, pvalue in allele_list:
allele = re.split(':', allele)
# Ensure allele has enough resolution for mhc:peptide prediciton.
# HLA-A*02:01:04 -> ['HLA-A*02', '01', '04'] => At least 2 fields are required for
# satisfying criteria.
if len(allele) < 2:
continue
allele = ':'.join([allele[0], allele[1]]) # stitch back together
try:
all_alleles[allele].append(float(pvalue))
except KeyError:
all_alleles[allele] = [float(pvalue)]
# If there are less than 2 alleles, report all
if len(all_alleles.keys()) <= 2:
return all_alleles.keys()
# Else, get the two with most evidence. Evidence is gauged by
# a) How many files (of the 3) thought that Allele was present
# b) In a tie, who has a lower avg p value
# In the lambda function, if 2 alleles have the same number of calls, the sum of the p values is
# a measure of the avg because avg = sum / n and n is equal in both of them.
else:
return sorted(all_alleles.keys(), key=lambda x: \
(-len(all_alleles[x]), sum(all_alleles[x])))[0:2] | This module accepts a list of tuples of (allele, p_value) pairs. It returns the 2 most probable
alleles for that group. |
def package_info(pkg_name):
"""Prints the information of a package.
Args:
pkg_name (str): The name of the desired package to get information
"""
indent = " "
for config, _ in _iter_packages():
if pkg_name == config["name"]:
print("Package:", pkg_name)
print(indent, "Platform:", config["platform"])
print(indent, "Version:", config["version"])
print(indent, "Path:", config["path"])
print(indent, "Worlds:")
for world in config["maps"]:
world_info(world["name"], world_config=world, initial_indent=" ") | Prints the information of a package.
Args:
pkg_name (str): The name of the desired package to get information |
def line(self, plunge, bearing, *args, **kwargs):
"""
Plot points representing linear features on the axes. Additional
arguments and keyword arguments are passed on to `plot`.
Parameters
----------
plunge, bearing : number or sequence of numbers
The plunge and bearing of the line(s) in degrees. The plunge is
measured in degrees downward from the end of the feature specified
by the bearing.
**kwargs
Additional parameters are passed on to `plot`.
Returns
-------
A sequence of Line2D artists representing the point(s) specified by
`strike` and `dip`.
"""
lon, lat = stereonet_math.line(plunge, bearing)
args, kwargs = self._point_plot_defaults(args, kwargs)
return self.plot([lon], [lat], *args, **kwargs) | Plot points representing linear features on the axes. Additional
arguments and keyword arguments are passed on to `plot`.
Parameters
----------
plunge, bearing : number or sequence of numbers
The plunge and bearing of the line(s) in degrees. The plunge is
measured in degrees downward from the end of the feature specified
by the bearing.
**kwargs
Additional parameters are passed on to `plot`.
Returns
-------
A sequence of Line2D artists representing the point(s) specified by
`strike` and `dip`. |
def variance(numbers, type='population'):
"""
Calculates the population or sample variance of a list of numbers.
A large number means the results are all over the place, while a
small number means the results are comparatively close to the average.
Args:
numbers: a list of integers or floating point numbers to compare.
type: string, 'population' or 'sample', the kind of variance to be computed.
Returns:
The computed population or sample variance.
Defaults to population variance.
Requires:
The math module, average()
"""
mean = average(numbers)
variance = 0
for number in numbers:
variance += (mean - number) ** 2
if type == 'population':
return variance / len(numbers)
else:
return variance / (len(numbers) - 1) | Calculates the population or sample variance of a list of numbers.
A large number means the results are all over the place, while a
small number means the results are comparatively close to the average.
Args:
numbers: a list of integers or floating point numbers to compare.
type: string, 'population' or 'sample', the kind of variance to be computed.
Returns:
The computed population or sample variance.
Defaults to population variance.
Requires:
The math module, average() |
def domain(value,
allow_empty = False,
allow_ips = False,
**kwargs):
"""Validate that ``value`` is a valid domain name.
.. caution::
This validator does not verify that ``value`` **exists** as a domain. It
merely verifies that its contents *might* exist as a domain.
.. note::
This validator checks to validate that ``value`` resembles a valid
domain name. It is - generally - compliant with
`RFC 1035 <https://tools.ietf.org/html/rfc1035>`_ and
`RFC 6761 <https://tools.ietf.org/html/rfc6761>`_, however it diverges
in a number of key ways:
* Including authentication (e.g. ``username:[email protected]``) will
fail validation.
* Including a path (e.g. ``domain.dev/path/to/file``) will fail validation.
* Including a port (e.g. ``domain.dev:8080``) will fail validation.
If you are hoping to validate a more complete URL, we recommend that you
see :func:`url <validator_collection.validators.url>`.
.. hint::
Leading and trailing whitespace will be automatically stripped.
:param value: The value to validate.
:type value: :class:`str <python:str>` / :obj:`None <python:None>`
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:param allow_ips: If ``True``, will succeed when validating IP addresses,
If ``False``, will raise a :class:`InvalidDomainError` if ``value`` is an IP
address. Defaults to ``False``.
:type allow_ips: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:rtype: :class:`str <python:str>` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises CannotCoerceError: if ``value`` is not a :class:`str <python:str>` or
:obj:`None <python:None>`
:raises InvalidDomainError: if ``value`` is not a valid domain name or
empty with ``allow_empty`` set to ``True``
:raises SlashInDomainError: if ``value`` contains a slash or backslash
:raises AtInDomainError: if ``value`` contains an ``@`` symbol
:raises ColonInDomainError: if ``value`` contains a ``:`` symbol
:raises WhitespaceInDomainError: if ``value`` contains whitespace
"""
is_recursive = kwargs.pop('is_recursive', False)
if not value and not allow_empty:
raise errors.EmptyValueError('value (%s) was empty' % value)
elif not value:
return None
if not isinstance(value, basestring):
raise errors.CannotCoerceError('value must be a valid string, '
'was %s' % type(value))
if '/' in value:
raise errors.SlashInDomainError('valid domain name cannot contain "/"')
if '\\' in value:
raise errors.SlashInDomainError('valid domain name cannot contain "\\"')
if '@' in value:
raise errors.AtInDomainError('valid domain name cannot contain "@"')
if ':' in value:
raise errors.ColonInDomainError('valid domain name cannot contain ":"')
value = value.strip().lower()
for item in string_.whitespace:
if item in value:
raise errors.WhitespaceInDomainError('valid domain name cannot contain '
'whitespace')
if value in SPECIAL_USE_DOMAIN_NAMES:
return value
if allow_ips:
try:
ip_address(value, allow_empty = allow_empty)
is_valid = True
except (ValueError, TypeError, AttributeError):
is_valid = False
if is_valid:
return value
is_valid = DOMAIN_REGEX.match(value)
if not is_valid and not is_recursive:
with_prefix = 'http://' + value
try:
url(with_prefix, force_run = True, is_recursive = True) # pylint: disable=E1123
except ValueError:
raise errors.InvalidDomainError('value (%s) is not a valid domain' % value)
return value | Validate that ``value`` is a valid domain name.
.. caution::
This validator does not verify that ``value`` **exists** as a domain. It
merely verifies that its contents *might* exist as a domain.
.. note::
This validator checks to validate that ``value`` resembles a valid
domain name. It is - generally - compliant with
`RFC 1035 <https://tools.ietf.org/html/rfc1035>`_ and
`RFC 6761 <https://tools.ietf.org/html/rfc6761>`_, however it diverges
in a number of key ways:
* Including authentication (e.g. ``username:[email protected]``) will
fail validation.
* Including a path (e.g. ``domain.dev/path/to/file``) will fail validation.
* Including a port (e.g. ``domain.dev:8080``) will fail validation.
If you are hoping to validate a more complete URL, we recommend that you
see :func:`url <validator_collection.validators.url>`.
.. hint::
Leading and trailing whitespace will be automatically stripped.
:param value: The value to validate.
:type value: :class:`str <python:str>` / :obj:`None <python:None>`
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:param allow_ips: If ``True``, will succeed when validating IP addresses,
If ``False``, will raise a :class:`InvalidDomainError` if ``value`` is an IP
address. Defaults to ``False``.
:type allow_ips: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:rtype: :class:`str <python:str>` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises CannotCoerceError: if ``value`` is not a :class:`str <python:str>` or
:obj:`None <python:None>`
:raises InvalidDomainError: if ``value`` is not a valid domain name or
empty with ``allow_empty`` set to ``True``
:raises SlashInDomainError: if ``value`` contains a slash or backslash
:raises AtInDomainError: if ``value`` contains an ``@`` symbol
:raises ColonInDomainError: if ``value`` contains a ``:`` symbol
:raises WhitespaceInDomainError: if ``value`` contains whitespace |
def calcTightAnchors(args, d, patches):
"""
Recursively generates the number of anchor points specified in the
patches argument, such that all patches are d cells away
from their nearest neighbors.
"""
centerPoint = (int(args.worldSize/2), int(args.worldSize/2))
anchors = []
if patches == 0:
pass
elif patches == 1:
anchors.append(centerPoint)
elif patches % 2 == 0:
dsout = int((patches-2)//2) + 1
add_anchors(centerPoint, d, dsout, anchors, True)
if d != 0:
anchors = list(set(anchors))
anchors.sort()
if dsout != 1:
return (anchors +
calcTightAnchors(args, d, patches-2)
)[:patches*patches]
# to cut off the extras in the case where d=0
else:
# Note - an odd number of args.patchesPerSide requires that there be
# a patch at the centerpoint
dsout = int((patches-1)//2)
add_anchors(centerPoint, d, dsout, anchors, False)
if dsout != 1:
return anchors + calcTightAnchors(d, patches-2)
return anchors | Recursively generates the number of anchor points specified in the
patches argument, such that all patches are d cells away
from their nearest neighbors. |
def create_conf_file (self):
"""Create configuration file."""
cmd_obj = self.distribution.get_command_obj("install")
cmd_obj.ensure_finalized()
# we have to write a configuration file because we need the
# <install_data> directory (and other stuff like author, url, ...)
# all paths are made absolute by cnormpath()
data = []
for d in ['purelib', 'platlib', 'lib', 'headers', 'scripts', 'data']:
attr = 'install_%s' % d
if cmd_obj.root:
# cut off root path prefix
cutoff = len(cmd_obj.root)
# don't strip the path separator
if cmd_obj.root.endswith(os.sep):
cutoff -= 1
val = getattr(cmd_obj, attr)[cutoff:]
else:
val = getattr(cmd_obj, attr)
if attr == 'install_data':
cdir = os.path.join(val, "share", "linkchecker")
data.append('config_dir = %r' % cnormpath(cdir))
elif attr == 'install_lib':
if cmd_obj.root:
_drive, tail = os.path.splitdrive(val)
if tail.startswith(os.sep):
tail = tail[1:]
self.install_lib = os.path.join(cmd_obj.root, tail)
else:
self.install_lib = val
data.append("%s = %r" % (attr, cnormpath(val)))
self.distribution.create_conf_file(data, directory=self.install_lib)
return self.get_conf_output() | Create configuration file. |
def _set_config(c):
"""Set gl configuration for GLFW """
glfw.glfwWindowHint(glfw.GLFW_RED_BITS, c['red_size'])
glfw.glfwWindowHint(glfw.GLFW_GREEN_BITS, c['green_size'])
glfw.glfwWindowHint(glfw.GLFW_BLUE_BITS, c['blue_size'])
glfw.glfwWindowHint(glfw.GLFW_ALPHA_BITS, c['alpha_size'])
glfw.glfwWindowHint(glfw.GLFW_ACCUM_RED_BITS, 0)
glfw.glfwWindowHint(glfw.GLFW_ACCUM_GREEN_BITS, 0)
glfw.glfwWindowHint(glfw.GLFW_ACCUM_BLUE_BITS, 0)
glfw.glfwWindowHint(glfw.GLFW_ACCUM_ALPHA_BITS, 0)
glfw.glfwWindowHint(glfw.GLFW_DEPTH_BITS, c['depth_size'])
glfw.glfwWindowHint(glfw.GLFW_STENCIL_BITS, c['stencil_size'])
# glfw.glfwWindowHint(glfw.GLFW_CONTEXT_VERSION_MAJOR, c['major_version'])
# glfw.glfwWindowHint(glfw.GLFW_CONTEXT_VERSION_MINOR, c['minor_version'])
# glfw.glfwWindowHint(glfw.GLFW_SRGB_CAPABLE, c['srgb'])
glfw.glfwWindowHint(glfw.GLFW_SAMPLES, c['samples'])
glfw.glfwWindowHint(glfw.GLFW_STEREO, c['stereo'])
if not c['double_buffer']:
raise RuntimeError('GLFW must double buffer, consider using a '
'different backend, or using double buffering') | Set gl configuration for GLFW |
def video_in_option(self, param, profile='Day'):
"""
Return video input option.
Params:
param - parameter, such as 'DayNightColor'
profile - 'Day', 'Night' or 'Normal'
"""
if profile == 'Day':
field = param
else:
field = '{}Options.{}'.format(profile, param)
return utils.pretty(
[opt for opt in self.video_in_options.split()
if '].{}='.format(field) in opt][0]) | Return video input option.
Params:
param - parameter, such as 'DayNightColor'
profile - 'Day', 'Night' or 'Normal' |
def open(self):
""" Open connection.
"""
# Only connect once
if self._rpc is not None:
return self._rpc
# Get connection URL from rtorrent.rc
self.load_config()
# Reading abilities are on the downfall, so...
if not config.scgi_url:
raise error.UserError("You need to configure a XMLRPC connection, read"
" https://pyrocore.readthedocs.io/en/latest/setup.html")
# Connect and get instance ID (also ensures we're connectable)
self._rpc = xmlrpc.RTorrentProxy(config.scgi_url)
self.versions, self.version_info = self._rpc._set_mappings()
self.engine_id = self._rpc.session.name()
time_usec = self._rpc.system.time_usec()
# Make sure xmlrpc-c works as expected
if time_usec < 2**32:
self.LOG.warn("Your xmlrpc-c is broken (64 bit integer support missing,"
" %r returned instead)" % (type(time_usec),))
# Get other manifest values
self.engine_software = "rTorrent %s/%s" % self.versions
if "+ssh:" in config.scgi_url:
self.startup = int(self._rpc.startup_time() or time.time())
else:
self._session_dir = self._rpc.session.path()
if not self._session_dir:
raise error.UserError("You need a session directory, read"
" https://pyrocore.readthedocs.io/en/latest/setup.html")
if not os.path.exists(self._session_dir):
raise error.UserError("Non-existing session directory %r" % self._session_dir)
self._download_dir = os.path.expanduser(self._rpc.directory.default())
if not os.path.exists(self._download_dir):
raise error.UserError("Non-existing download directory %r" % self._download_dir)
self.startup = os.path.getmtime(os.path.join(self._session_dir, "rtorrent.lock"))
# Return connection
self.LOG.debug(repr(self))
return self._rpc | Open connection. |
def convert_table(self, markup):
""" Subtitutes <table> content to Wikipedia markup.
"""
for table in re.findall(self.re["html-table"], markup):
wiki = table
wiki = re.sub(r"<table(.*?)>", "{|\\1", wiki)
wiki = re.sub(r"<tr(.*?)>", "|-\\1", wiki)
wiki = re.sub(r"<td(.*?)>", "|\\1|", wiki)
wiki = wiki.replace("</td>", "\n")
wiki = wiki.replace("</tr>", "\n")
wiki = wiki.replace("</table>", "\n|}")
markup = markup.replace(table, wiki)
return markup | Subtitutes <table> content to Wikipedia markup. |
def merge(self, schema):
"""
Merge the contents from the schema. Only objects not already contained
in this schema's collections are merged. This is to provide for
bidirectional import which produce cyclic includes.
@returns: self
@rtype: L{Schema}
"""
for item in schema.attributes.items():
if item[0] in self.attributes:
continue
self.all.append(item[1])
self.attributes[item[0]] = item[1]
for item in schema.elements.items():
if item[0] in self.elements:
continue
self.all.append(item[1])
self.elements[item[0]] = item[1]
for item in schema.types.items():
if item[0] in self.types:
continue
self.all.append(item[1])
self.types[item[0]] = item[1]
for item in schema.groups.items():
if item[0] in self.groups:
continue
self.all.append(item[1])
self.groups[item[0]] = item[1]
for item in schema.agrps.items():
if item[0] in self.agrps:
continue
self.all.append(item[1])
self.agrps[item[0]] = item[1]
schema.merged = True
return self | Merge the contents from the schema. Only objects not already contained
in this schema's collections are merged. This is to provide for
bidirectional import which produce cyclic includes.
@returns: self
@rtype: L{Schema} |
def _setup_redis(self):
"""Returns a Redis Client"""
if not self.closed:
try:
self.logger.debug("Creating redis connection to host " +
str(self.settings['REDIS_HOST']))
self.redis_conn = redis.StrictRedis(host=self.settings['REDIS_HOST'],
port=self.settings['REDIS_PORT'],
db=self.settings['REDIS_DB'])
self.redis_conn.info()
self.redis_connected = True
self.logger.info("Successfully connected to redis")
except KeyError as e:
self.logger.error('Missing setting named ' + str(e),
{'ex': traceback.format_exc()})
except:
self.logger.error("Couldn't initialize redis client.",
{'ex': traceback.format_exc()})
raise | Returns a Redis Client |
def convert_column(data, schemae):
"""Convert known types from primitive to rich."""
ctype = schemae.converted_type
if ctype == parquet_thrift.ConvertedType.DECIMAL:
scale_factor = Decimal("10e-{}".format(schemae.scale))
if schemae.type == parquet_thrift.Type.INT32 or schemae.type == parquet_thrift.Type.INT64:
return [Decimal(unscaled) * scale_factor for unscaled in data]
return [Decimal(intbig(unscaled)) * scale_factor for unscaled in data]
elif ctype == parquet_thrift.ConvertedType.DATE:
return [datetime.date.fromordinal(d) for d in data]
elif ctype == parquet_thrift.ConvertedType.TIME_MILLIS:
return [datetime.timedelta(milliseconds=d) for d in data]
elif ctype == parquet_thrift.ConvertedType.TIMESTAMP_MILLIS:
return [datetime.datetime.utcfromtimestamp(d / 1000.0) for d in data]
elif ctype == parquet_thrift.ConvertedType.UTF8:
return [codecs.decode(item, "utf-8") for item in data]
elif ctype == parquet_thrift.ConvertedType.UINT_8:
return _convert_unsigned(data, 'b')
elif ctype == parquet_thrift.ConvertedType.UINT_16:
return _convert_unsigned(data, 'h')
elif ctype == parquet_thrift.ConvertedType.UINT_32:
return _convert_unsigned(data, 'i')
elif ctype == parquet_thrift.ConvertedType.UINT_64:
return _convert_unsigned(data, 'q')
elif ctype == parquet_thrift.ConvertedType.JSON:
return [json.loads(s) for s in codecs.iterdecode(data, "utf-8")]
elif ctype == parquet_thrift.ConvertedType.BSON and bson:
return [bson.BSON(s).decode() for s in data]
else:
logger.info("Converted type '%s'' not handled",
parquet_thrift.ConvertedType._VALUES_TO_NAMES[ctype]) # pylint:disable=protected-access
return data | Convert known types from primitive to rich. |
def _add_scheme():
"""
urllib.parse doesn't support the mongodb scheme, but it's easy
to make it so.
"""
lists = [
urllib.parse.uses_relative,
urllib.parse.uses_netloc,
urllib.parse.uses_query,
]
for l in lists:
l.append('mongodb') | urllib.parse doesn't support the mongodb scheme, but it's easy
to make it so. |
def get_body_region(defined):
"""Return the start and end offsets of function body"""
scope = defined.get_scope()
pymodule = defined.get_module()
lines = pymodule.lines
node = defined.get_ast()
start_line = node.lineno
if defined.get_doc() is None:
start_line = node.body[0].lineno
elif len(node.body) > 1:
start_line = node.body[1].lineno
start = lines.get_line_start(start_line)
scope_start = pymodule.logical_lines.logical_line_in(scope.start)
if scope_start[1] >= start_line:
# a one-liner!
# XXX: what if colon appears in a string
start = pymodule.source_code.index(':', start) + 1
while pymodule.source_code[start].isspace():
start += 1
end = min(lines.get_line_end(scope.end) + 1, len(pymodule.source_code))
return start, end | Return the start and end offsets of function body |
def delta_crl_distribution_points(self):
"""
Returns delta CRL URLs - only applies to complete CRLs
:return:
A list of zero or more DistributionPoint objects
"""
if self._delta_crl_distribution_points is None:
self._delta_crl_distribution_points = []
if self.freshest_crl_value is not None:
for distribution_point in self.freshest_crl_value:
distribution_point_name = distribution_point['distribution_point']
# RFC 5280 indicates conforming CA should not use the relative form
if distribution_point_name.name == 'name_relative_to_crl_issuer':
continue
# This library is currently only concerned with HTTP-based CRLs
for general_name in distribution_point_name.chosen:
if general_name.name == 'uniform_resource_identifier':
self._delta_crl_distribution_points.append(distribution_point)
return self._delta_crl_distribution_points | Returns delta CRL URLs - only applies to complete CRLs
:return:
A list of zero or more DistributionPoint objects |
def on_train_begin(self, **kwargs:Any)->None:
"Initializes the best value."
self.best = float('inf') if self.operator == np.less else -float('inf') | Initializes the best value. |
def get_selected_subassistant_path(self, **kwargs):
"""Recursively searches self._tree - has format of (Assistant: [list_of_subassistants]) -
for specific path from first to last selected subassistants.
Args:
kwargs: arguments containing names of the given assistants in form of
subassistant_0 = 'name', subassistant_1 = 'another_name', ...
Returns:
list of subassistants objects from tree sorted from first to last
"""
path = [self]
previous_subas_list = None
currently_searching = self.get_subassistant_tree()[1]
# len(path) - 1 always points to next subassistant_N, so we can use it to control iteration
while settings.SUBASSISTANT_N_STRING.format(len(path) - 1) in kwargs and \
kwargs[settings.SUBASSISTANT_N_STRING.format(len(path) - 1)]:
for sa, subas_list in currently_searching:
if sa.name == kwargs[settings.SUBASSISTANT_N_STRING.format(len(path) - 1)]:
currently_searching = subas_list
path.append(sa)
break # sorry if you shed a tear ;)
if subas_list == previous_subas_list:
raise exceptions.AssistantNotFoundException(
'No assistant {n} after path {p}.'.format(
n=kwargs[settings.SUBASSISTANT_N_STRING.format(len(path) - 1)],
p=path))
previous_subas_list = subas_list
return path | Recursively searches self._tree - has format of (Assistant: [list_of_subassistants]) -
for specific path from first to last selected subassistants.
Args:
kwargs: arguments containing names of the given assistants in form of
subassistant_0 = 'name', subassistant_1 = 'another_name', ...
Returns:
list of subassistants objects from tree sorted from first to last |
def _browser_init(self):
"""
Init the browsing instance if not setup
:rtype: None
"""
if self.session:
return
self.session = requests.Session()
headers = {}
if self.user_agent:
headers['User-agent'] = self.user_agent
self.session.headers.update(headers)
if self._auth_method in [None, "", "HTTPBasicAuth"]:
if self._auth_username is not None:
self.session.auth = (self._auth_username, self._auth_password) | Init the browsing instance if not setup
:rtype: None |
def readShocks(self):
'''
Reads values of shock variables for the current period from history arrays. For each var-
iable X named in self.shock_vars, this attribute of self is set to self.X_hist[self.t_sim,:].
This method is only ever called if self.read_shocks is True. This can be achieved by using
the method makeShockHistory() (or manually after storing a "handcrafted" shock history).
Parameters
----------
None
Returns
-------
None
'''
for var_name in self.shock_vars:
setattr(self,var_name,getattr(self,var_name+'_hist')[self.t_sim,:]) | Reads values of shock variables for the current period from history arrays. For each var-
iable X named in self.shock_vars, this attribute of self is set to self.X_hist[self.t_sim,:].
This method is only ever called if self.read_shocks is True. This can be achieved by using
the method makeShockHistory() (or manually after storing a "handcrafted" shock history).
Parameters
----------
None
Returns
-------
None |
def getFingerprintForExpression(self, body, sparsity=1.0):
"""Resolve an expression
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
Fingerprint
Raises:
CorticalioException: if the request was not successful
"""
return self._expressions.resolveExpression(self._retina, body, sparsity) | Resolve an expression
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
Fingerprint
Raises:
CorticalioException: if the request was not successful |
def ray_triangle_id(triangles,
ray_origins,
ray_directions,
triangles_normal=None,
tree=None,
multiple_hits=True):
"""
Find the intersections between a group of triangles and rays
Parameters
-------------
triangles : (n, 3, 3) float
Triangles in space
ray_origins : (m, 3) float
Ray origin points
ray_directions : (m, 3) float
Ray direction vectors
triangles_normal : (n, 3) float
Normal vector of triangles, optional
tree : rtree.Index
Rtree object holding triangle bounds
Returns
-----------
index_triangle : (h,) int
Index of triangles hit
index_ray : (h,) int
Index of ray that hit triangle
locations : (h, 3) float
Position of intersection in space
"""
triangles = np.asanyarray(triangles, dtype=np.float64)
ray_origins = np.asanyarray(ray_origins, dtype=np.float64)
ray_directions = np.asanyarray(ray_directions, dtype=np.float64)
# if we didn't get passed an r-tree for the bounds of each
# triangle create one here
if tree is None:
tree = triangles_mod.bounds_tree(triangles)
# find the list of likely triangles and which ray they
# correspond with, via rtree queries
ray_candidates, ray_id = ray_triangle_candidates(
ray_origins=ray_origins,
ray_directions=ray_directions,
tree=tree)
# get subsets which are corresponding rays and triangles
# (c,3,3) triangle candidates
triangle_candidates = triangles[ray_candidates]
# (c,3) origins and vectors for the rays
line_origins = ray_origins[ray_id]
line_directions = ray_directions[ray_id]
# get the plane origins and normals from the triangle candidates
plane_origins = triangle_candidates[:, 0, :]
if triangles_normal is None:
plane_normals, triangle_ok = triangles_mod.normals(
triangle_candidates)
if not triangle_ok.all():
raise ValueError('Invalid triangles!')
else:
plane_normals = triangles_normal[ray_candidates]
# find the intersection location of the rays with the planes
location, valid = intersections.planes_lines(
plane_origins=plane_origins,
plane_normals=plane_normals,
line_origins=line_origins,
line_directions=line_directions)
if (len(triangle_candidates) == 0 or
not valid.any()):
return [], [], []
# find the barycentric coordinates of each plane intersection on the
# triangle candidates
barycentric = triangles_mod.points_to_barycentric(
triangle_candidates[valid], location)
# the plane intersection is inside the triangle if all barycentric coordinates
# are between 0.0 and 1.0
hit = np.logical_and((barycentric > -tol.zero).all(axis=1),
(barycentric < (1 + tol.zero)).all(axis=1))
# the result index of the triangle is a candidate with a valid plane intersection and
# a triangle which contains the plane intersection point
index_tri = ray_candidates[valid][hit]
# the ray index is a subset with a valid plane intersection and contained
# by a triangle
index_ray = ray_id[valid][hit]
# locations are already valid plane intersections, just mask by hits
location = location[hit]
# only return points that are forward from the origin
vector = location - ray_origins[index_ray]
distance = util.diagonal_dot(vector, ray_directions[index_ray])
forward = distance > -1e-6
index_tri = index_tri[forward]
index_ray = index_ray[forward]
location = location[forward]
distance = distance[forward]
if multiple_hits:
return index_tri, index_ray, location
# since we are not returning multiple hits, we need to
# figure out which hit is first
if len(index_ray) == 0:
return index_tri, index_ray, location
first = np.zeros(len(index_ray), dtype=np.bool)
groups = grouping.group(index_ray)
for group in groups:
index = group[distance[group].argmin()]
first[index] = True
return index_tri[first], index_ray[first], location[first] | Find the intersections between a group of triangles and rays
Parameters
-------------
triangles : (n, 3, 3) float
Triangles in space
ray_origins : (m, 3) float
Ray origin points
ray_directions : (m, 3) float
Ray direction vectors
triangles_normal : (n, 3) float
Normal vector of triangles, optional
tree : rtree.Index
Rtree object holding triangle bounds
Returns
-----------
index_triangle : (h,) int
Index of triangles hit
index_ray : (h,) int
Index of ray that hit triangle
locations : (h, 3) float
Position of intersection in space |
def register_service(cls, service):
"""Add a service to the thread's StackInABox instance.
:param service: StackInABoxService instance to add to the test
For return value and errors see StackInABox.register()
"""
logger.debug('Registering service {0}'.format(service.name))
return local_store.instance.register(service) | Add a service to the thread's StackInABox instance.
:param service: StackInABoxService instance to add to the test
For return value and errors see StackInABox.register() |
def _import_astorb_to_database(
self,
astorbDictList):
"""*import the astorb orbital elements to database*
**Key Arguments:**
- ``astorbDictList`` -- the astorb database parsed as a list of dictionaries
**Return:**
- None
"""
self.log.info('starting the ``_import_astorb_to_database`` method')
print "Refreshing the orbital elements database table"
dbSettings = self.settings["database settings"]["atlasMovers"]
insert_list_of_dictionaries_into_database_tables(
dbConn=self.atlasMoversDBConn,
log=self.log,
dictList=astorbDictList,
dbTableName="orbital_elements",
uniqueKeyList=["name"],
dateModified=True,
batchSize=10000,
replace=True,
dbSettings=dbSettings
)
print "Finished refreshing the orbital elements database table"
self.log.info('completed the ``_import_astorb_to_database`` method')
return None | *import the astorb orbital elements to database*
**Key Arguments:**
- ``astorbDictList`` -- the astorb database parsed as a list of dictionaries
**Return:**
- None |
def persist(self):
"""
Banana banana
"""
os.makedirs(self.__symbol_folder, exist_ok=True)
os.makedirs(self.__aliases_folder, exist_ok=True)
os.makedirs(self.__comments_folder, exist_ok=True)
for name, sym in self.__symbols.items():
with open(self.__get_pickle_path(self.__symbol_folder, name, True), 'wb') as _:
pickle.dump(sym, _)
for name, aliases in self.__aliases.items():
if aliases:
with open(self.__get_pickle_path(self.__aliases_folder, name, True), 'wb') as _:
pickle.dump(aliases, _)
for name, comment in self.__comments.items():
if comment:
with open(self.__get_pickle_path(self.__comments_folder, name, True), 'wb') as _:
pickle.dump(comment, _) | Banana banana |
def thumbnail(self):
"""Path to the thumbnail of the album."""
if self._thumbnail:
# stop if it is already set
return self._thumbnail
# Test the thumbnail from the Markdown file.
thumbnail = self.meta.get('thumbnail', [''])[0]
if thumbnail and isfile(join(self.src_path, thumbnail)):
self._thumbnail = url_from_path(join(
self.name, get_thumb(self.settings, thumbnail)))
self.logger.debug("Thumbnail for %r : %s", self, self._thumbnail)
return self._thumbnail
else:
# find and return the first landscape image
for f in self.medias:
ext = splitext(f.filename)[1]
if ext.lower() in self.settings['img_extensions']:
# Use f.size if available as it is quicker (in cache), but
# fallback to the size of src_path if dst_path is missing
size = f.size
if size is None:
size = get_size(f.src_path)
if size['width'] > size['height']:
self._thumbnail = (url_quote(self.name) + '/' +
f.thumbnail)
self.logger.debug(
"Use 1st landscape image as thumbnail for %r : %s",
self, self._thumbnail)
return self._thumbnail
# else simply return the 1st media file
if not self._thumbnail and self.medias:
for media in self.medias:
if media.thumbnail is not None:
self._thumbnail = (url_quote(self.name) + '/' +
media.thumbnail)
break
else:
self.logger.warning("No thumbnail found for %r", self)
return None
self.logger.debug("Use the 1st image as thumbnail for %r : %s",
self, self._thumbnail)
return self._thumbnail
# use the thumbnail of their sub-directories
if not self._thumbnail:
for path, album in self.gallery.get_albums(self.path):
if album.thumbnail:
self._thumbnail = (url_quote(self.name) + '/' +
album.thumbnail)
self.logger.debug(
"Using thumbnail from sub-directory for %r : %s",
self, self._thumbnail)
return self._thumbnail
self.logger.error('Thumbnail not found for %r', self)
return None | Path to the thumbnail of the album. |
def parse(self):
"""Parse a Supybot IRC stream.
Returns an iterator of dicts. Each dicts contains information
about the date, type, nick and body of a single log entry.
:returns: iterator of parsed lines
:raises ParseError: when an invalid line is found parsing the given
stream
"""
for line in self.stream:
line = line.rstrip('\n')
self.nline += 1
if self.SUPYBOT_EMPTY_REGEX.match(line):
continue
ts, msg = self._parse_supybot_timestamp(line)
if self.SUPYBOT_EMPTY_COMMENT_REGEX.match(msg):
continue
elif self.SUPYBOT_EMPTY_COMMENT_ACTION_REGEX.match(msg):
continue
elif self.SUPYBOT_EMPTY_BOT_REGEX.match(msg):
continue
itype, nick, body = self._parse_supybot_msg(msg)
item = self._build_item(ts, itype, nick, body)
yield item | Parse a Supybot IRC stream.
Returns an iterator of dicts. Each dicts contains information
about the date, type, nick and body of a single log entry.
:returns: iterator of parsed lines
:raises ParseError: when an invalid line is found parsing the given
stream |
def compact_bucket(db, buck_key, limit):
"""
Perform the compaction operation. This reads in the bucket
information from the database, builds a compacted bucket record,
inserts that record in the appropriate place in the database, then
removes outdated updates.
:param db: A database handle for the Redis database.
:param buck_key: A turnstile.limits.BucketKey instance containing
the bucket key.
:param limit: The turnstile.limits.Limit object corresponding to
the bucket.
"""
# Suck in the bucket records and generate our bucket
records = db.lrange(str(buck_key), 0, -1)
loader = limits.BucketLoader(limit.bucket_class, db, limit,
str(buck_key), records, stop_summarize=True)
# We now have the bucket loaded in; generate a 'bucket' record
buck_record = msgpack.dumps(dict(bucket=loader.bucket.dehydrate(),
uuid=str(uuid.uuid4())))
# Now we need to insert it into the record list
result = db.linsert(str(buck_key), 'after', loader.last_summarize_rec,
buck_record)
# Were we successful?
if result < 0:
# Insert failed; we'll try again when max_age is hit
LOG.warning("Bucket compaction on %s failed; will retry" % buck_key)
return
# OK, we have confirmed that the compacted bucket record has been
# inserted correctly; now all we need to do is trim off the
# outdated update records
db.ltrim(str(buck_key), loader.last_summarize_idx + 1, -1) | Perform the compaction operation. This reads in the bucket
information from the database, builds a compacted bucket record,
inserts that record in the appropriate place in the database, then
removes outdated updates.
:param db: A database handle for the Redis database.
:param buck_key: A turnstile.limits.BucketKey instance containing
the bucket key.
:param limit: The turnstile.limits.Limit object corresponding to
the bucket. |
def fetch(self):
"""
Fetch this student's courses page. It's recommended to do that when
creating the object (this is the default) because the remote sessions
are short.
"""
soup = self.session.get_results_soup()
self.courses = CoursesList(soup) | Fetch this student's courses page. It's recommended to do that when
creating the object (this is the default) because the remote sessions
are short. |
def hdfFromKwargs(hdf=None, **kwargs):
"""If given an instance that has toHDF() method that method is invoked to get that object's HDF representation"""
if not hdf:
hdf = HDF()
for key, value in kwargs.iteritems():
if isinstance(value, dict):
#print "dict:",value
for k,v in value.iteritems():
dkey = "%s.%s"%(key,k)
#print "k,v,dkey:",k,v,dkey
args = {dkey:v}
hdfFromKwargs(hdf=hdf, **args)
elif isinstance(value, (list, tuple)):
#print "list:",value
for i, item in enumerate(value):
ikey = "%s.%s"%(key,i)
#print "i,item:",i,item, ikey
if isinstance(item, (list, tuple)):
args = {ikey:item}
hdfFromKwargs(hdf=hdf, **args)
elif isinstance(item, dict):
args = {ikey:item}
hdfFromKwargs(hdf=hdf, **args)
elif getattr(item, "HDF_ATTRIBUTES", False):
attrs = {}
for attr in item.HDF_ATTRIBUTES:
attrs[attr] = getattr(item, attr, "")
hdfFromKwargs(hdf=hdf, **{ikey:attrs})
else:
hdf.setValue(ikey, str(item))
elif getattr(value, "HDF_ATTRIBUTES", False):
attrs = {}
for attr in value.HDF_ATTRIBUTES:
attrs[attr] = getattr(value, attr, "")
hdfFromKwargs(hdf=hdf, **{key:attrs})
else:
hdf.setValue(key, str(value))
#print "HDF:",hdf.dump()
return hdf | If given an instance that has toHDF() method that method is invoked to get that object's HDF representation |
def set_confound_pipeline(self, confound_pipeline):
"""
There may be times when the pipeline is updated (e.g. teneto) but you want the confounds from the preprocessing pipieline (e.g. fmriprep).
To do this, you set the confound_pipeline to be the preprocessing pipeline where the confound files are.
Parameters
----------
confound_pipeline : str
Directory in the BIDS_dir where the confounds file is.
"""
self.add_history(inspect.stack()[0][3], locals(), 1)
if not os.path.exists(self.BIDS_dir + '/derivatives/' + confound_pipeline):
print('Specified direvative directory not found.')
self.get_pipeline_alternatives()
else:
# Todo: perform check that pipeline is valid
self.confound_pipeline = confound_pipeline | There may be times when the pipeline is updated (e.g. teneto) but you want the confounds from the preprocessing pipieline (e.g. fmriprep).
To do this, you set the confound_pipeline to be the preprocessing pipeline where the confound files are.
Parameters
----------
confound_pipeline : str
Directory in the BIDS_dir where the confounds file is. |
def rest(o) -> Optional[ISeq]:
"""If o is a ISeq, return the elements after the first in o. If o is None,
returns an empty seq. Otherwise, coerces o to a seq and returns the rest."""
if o is None:
return None
if isinstance(o, ISeq):
s = o.rest
if s is None:
return lseq.EMPTY
return s
n = to_seq(o)
if n is None:
return lseq.EMPTY
return n.rest | If o is a ISeq, return the elements after the first in o. If o is None,
returns an empty seq. Otherwise, coerces o to a seq and returns the rest. |
def get_coords(x, y, params):
"""
Transforms the given coordinates from plane-space to Mandelbrot-space (real and imaginary).
:param x: X coordinate on the plane.
:param y: Y coordinate on the plane.
:param params: Current application parameters.
:type params: params.Params
:return: Tuple containing the re-mapped coordinates in Mandelbrot-space.
"""
n_x = x * 2.0 / params.plane_w * params.plane_ratio - 1.0
n_y = y * 2.0 / params.plane_h - 1.0
mb_x = params.zoom * n_x
mb_y = params.zoom * n_y
return mb_x, mb_y | Transforms the given coordinates from plane-space to Mandelbrot-space (real and imaginary).
:param x: X coordinate on the plane.
:param y: Y coordinate on the plane.
:param params: Current application parameters.
:type params: params.Params
:return: Tuple containing the re-mapped coordinates in Mandelbrot-space. |
def read_entity(self, entity_id, mount_point=DEFAULT_MOUNT_POINT):
"""Query an entity by its identifier.
Supported methods:
GET: /auth/{mount_point}/entity/id/{id}. Produces: 200 application/json
:param entity_id: Identifier of the entity.
:type entity_id: str
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
api_path = '/v1/{mount_point}/entity/id/{id}'.format(
mount_point=mount_point,
id=entity_id,
)
response = self._adapter.get(url=api_path)
return response.json() | Query an entity by its identifier.
Supported methods:
GET: /auth/{mount_point}/entity/id/{id}. Produces: 200 application/json
:param entity_id: Identifier of the entity.
:type entity_id: str
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict |
def decompress(images, delete_png=False, delete_json=False, folder=None):
"""Reverse compression from tif to png and save them in original format
(ome.tif). TIFF-tags are gotten from json-files named the same as given
images.
Parameters
----------
images : list of filenames
Image to decompress.
delete_png : bool
Wheter to delete PNG images.
delete_json : bool
Wheter to delete TIFF-tags stored in json files on compress.
Returns
-------
list of filenames
List of decompressed files.
"""
if type(images) == str:
# only one image
return decompress([images])
filenames = copy(images) # as images property will change when looping
decompressed_images = []
for orig_filename in filenames:
debug('decompressing {}'.format(orig_filename))
try:
filename, extension = os.path.splitext(orig_filename)
# if decompressed file should be put in specified folder
if folder:
basename = os.path.basename(filename)
new_filename = os.path.join(folder, basename + '.ome.tif')
else:
new_filename = filename + '.ome.tif'
# check if tif exists
if os.path.isfile(new_filename):
decompressed_images.append(new_filename)
msg = "Aborting decompress, TIFF already exists:" \
" {}".format(orig_filename)
raise AssertionError(msg)
if extension != '.png':
msg = "Aborting decompress, not a " \
"PNG: {}".format(orig_filename)
raise AssertionError(msg)
# open image, load and close file pointer
img = Image.open(orig_filename)
img.load() # load img-data before switching mode, also closes fp
# get tags from json
info = {}
with open(filename + '.json', 'r') as f:
tags = json.load(f)
# convert dictionary to original types (lost in json conversion)
for tag,val in tags.items():
if tag == 'palette':
# hack hack
continue
if type(val) == list:
val = tuple(val)
if type(val[0]) == list:
# list of list
val = tuple(tuple(x) for x in val)
info[int(tag)] = val
# check for color map
if 'palette' in tags:
img.putpalette(tags['palette'])
# save as tif
debug('saving to {}'.format(new_filename))
img.save(new_filename, tiffinfo=info)
decompressed_images.append(new_filename)
if delete_png:
os.remove(orig_filename)
if delete_json:
os.remove(filename + '.json')
except (IOError, AssertionError) as e:
# print error - continue
print('leicaexperiment {}'.format(e))
return decompressed_images | Reverse compression from tif to png and save them in original format
(ome.tif). TIFF-tags are gotten from json-files named the same as given
images.
Parameters
----------
images : list of filenames
Image to decompress.
delete_png : bool
Wheter to delete PNG images.
delete_json : bool
Wheter to delete TIFF-tags stored in json files on compress.
Returns
-------
list of filenames
List of decompressed files. |
def add_line_to_file(self, line, filename, expect=None, shutit_pexpect_child=None, match_regexp=None, loglevel=logging.DEBUG):
"""Deprecated.
Use replace/insert_text instead.
Adds line to file if it doesn't exist (unless Force is set, which it is not by default).
Creates the file if it doesn't exist.
Must be exactly the line passed in to match.
Returns True if line(s) added OK, False if not.
If you have a lot of non-unique lines to add, it's a good idea to have a sentinel value to add first, and then if that returns true, force the remainder.
@param line: Line to add. If a list, processed per-item, and match_regexp ignored.
@param filename: Filename to add it to.
@param expect: See send()
@param shutit_pexpect_child: See send()
@param match_regexp: If supplied, a regexp to look for in the file instead of the line itself, handy if the line has awkward characters in it.
@type line: string
@type filename: string
@type match_regexp: string
"""
shutit_global.shutit_global_object.yield_to_draw()
if isinstance(line, str):
lines = [line]
elif isinstance(line, list):
lines = line
match_regexp = None
fail = False
for fline in lines:
if match_regexp is None:
this_match_regexp = fline
else:
this_match_regexp = match_regexp
if not self.replace_text(fline,
filename,
pattern=this_match_regexp,
shutit_pexpect_child=shutit_pexpect_child,
expect=expect,
loglevel=loglevel):
fail = True
if fail:
return False
return True | Deprecated.
Use replace/insert_text instead.
Adds line to file if it doesn't exist (unless Force is set, which it is not by default).
Creates the file if it doesn't exist.
Must be exactly the line passed in to match.
Returns True if line(s) added OK, False if not.
If you have a lot of non-unique lines to add, it's a good idea to have a sentinel value to add first, and then if that returns true, force the remainder.
@param line: Line to add. If a list, processed per-item, and match_regexp ignored.
@param filename: Filename to add it to.
@param expect: See send()
@param shutit_pexpect_child: See send()
@param match_regexp: If supplied, a regexp to look for in the file instead of the line itself, handy if the line has awkward characters in it.
@type line: string
@type filename: string
@type match_regexp: string |
def listfolder(p):
"""
generator of list folder in the path.
folders only
"""
for entry in scandir.scandir(p):
if entry.is_dir():
yield entry.name | generator of list folder in the path.
folders only |
def handler_view(self, request, resource_name, ids=None):
""" Handler for resources.
.. versionadded:: 0.5.7
Content-Type check
:return django.http.HttpResponse
"""
signal_request.send(sender=self, request=request)
time_start = time.time()
self.update_urls(request, resource_name=resource_name, ids=ids)
resource = self.resource_map[resource_name]
allowed_http_methods = resource.Meta.allowed_methods
if request.method not in allowed_http_methods:
response = HttpResponseNotAllowed(
permitted_methods=allowed_http_methods)
signal_response.send(
sender=self, request=request, response=response,
duration=time.time() - time_start)
return response
if resource.Meta.authenticators and not (
request.method == "GET" and
resource.Meta.disable_get_authentication):
user = resource.authenticate(request)
if user is None or not user.is_authenticated():
response = HttpResponse("Not Authenticated", status=401)
signal_response.send(
sender=self, request=request, response=response,
duration=time.time() - time_start)
return response
kwargs = dict(request=request)
if ids is not None:
kwargs['ids'] = ids.split(",")
try:
if request.method == "GET":
response = self.handler_view_get(resource, **kwargs)
elif request.method == "POST":
response = self.handler_view_post(resource, **kwargs)
elif request.method == "PUT":
response = self.handler_view_put(resource, **kwargs)
elif request.method == "DELETE":
response = self.handler_view_delete(resource, **kwargs)
except JSONAPIError as e:
response = HttpResponse(
json.dumps({"errors": [e.data]}, cls=DatetimeDecimalEncoder),
content_type=self.CONTENT_TYPE, status=e.status)
signal_response.send(sender=self, request=request, response=response,
duration=time.time() - time_start)
return response | Handler for resources.
.. versionadded:: 0.5.7
Content-Type check
:return django.http.HttpResponse |
def psicomputations(variance, Z, variational_posterior, return_psi2_n=False):
"""
Compute psi-statistics for ss-linear kernel
"""
# here are the "statistics" for psi0, psi1 and psi2
# Produced intermediate results:
# psi0 N
# psi1 NxM
# psi2 MxM
mu = variational_posterior.mean
S = variational_posterior.variance
psi0 = (variance*(np.square(mu)+S)).sum(axis=1)
Zv = variance * Z
psi1 = np.dot(mu,Zv.T)
if return_psi2_n:
psi2 = psi1[:,:,None] * psi1[:,None,:] + np.dot(S[:,None,:] * Zv[None,:,:], Zv.T)
else:
psi2 = np.dot(S.sum(axis=0) * Zv, Zv.T) + tdot(psi1.T)
return psi0, psi1, psi2 | Compute psi-statistics for ss-linear kernel |
def subscribe(self, sr):
"""Login required. Send POST to subscribe to a subreddit. If ``sr`` is the name of the subreddit, a GET request is sent to retrieve the full id of the subreddit, which is necessary for this API call. Returns True or raises :class:`exceptions.UnexpectedResponse` if non-"truthy" value in response.
URL: ``http://www.reddit.com/api/subscribe/``
:param sr: full id of subreddit or name of subreddit (full id is preferred)
"""
if not sr.startswith('t5_'):
sr = self.subreddit(sr).name
data = dict(action='sub', sr=sr)
j = self.post('api', 'subscribe', data=data)
return assert_truthy(j) | Login required. Send POST to subscribe to a subreddit. If ``sr`` is the name of the subreddit, a GET request is sent to retrieve the full id of the subreddit, which is necessary for this API call. Returns True or raises :class:`exceptions.UnexpectedResponse` if non-"truthy" value in response.
URL: ``http://www.reddit.com/api/subscribe/``
:param sr: full id of subreddit or name of subreddit (full id is preferred) |
def resource(self, api_path=None, base_path='/api/now', chunk_size=None):
"""Overrides :meth:`resource` provided by :class:`pysnow.Client` with extras for OAuth
:param api_path: Path to the API to operate on
:param base_path: (optional) Base path override
:param chunk_size: Response stream parser chunk size (in bytes)
:return:
- :class:`Resource` object
:raises:
- InvalidUsage: If a path fails validation
"""
if isinstance(self.token, dict):
self.session = self._get_oauth_session()
return super(OAuthClient, self).resource(api_path, base_path, chunk_size)
raise MissingToken("You must set_token() before creating a resource with OAuthClient") | Overrides :meth:`resource` provided by :class:`pysnow.Client` with extras for OAuth
:param api_path: Path to the API to operate on
:param base_path: (optional) Base path override
:param chunk_size: Response stream parser chunk size (in bytes)
:return:
- :class:`Resource` object
:raises:
- InvalidUsage: If a path fails validation |
def gen_etree(self):
"""convert an RST tree (DGParentedTree -> lxml etree)"""
relations_elem = self.gen_relations()
header = E('header')
header.append(relations_elem)
self.gen_body()
tree = E('rst')
tree.append(header)
# The <body> contains both <segment>, as well as <group> elements.
# While the order of the elements should theoretically be irrelevant,
# rs3 files usually list the segments before the groups.
body = E('body')
for segment in self.body['segments']:
body.append(segment)
for group in self.body['groups']:
body.append(group)
tree.append(body)
return tree | convert an RST tree (DGParentedTree -> lxml etree) |
def repeat(col, n):
"""
Repeats a string column n times, and returns it as a new string column.
>>> df = spark.createDataFrame([('ab',)], ['s',])
>>> df.select(repeat(df.s, 3).alias('s')).collect()
[Row(s=u'ababab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.repeat(_to_java_column(col), n)) | Repeats a string column n times, and returns it as a new string column.
>>> df = spark.createDataFrame([('ab',)], ['s',])
>>> df.select(repeat(df.s, 3).alias('s')).collect()
[Row(s=u'ababab')] |
def rename_pool(service, old_name, new_name):
"""
Rename a Ceph pool from old_name to new_name
:param service: six.string_types. The Ceph user name to run the command under
:param old_name: six.string_types
:param new_name: six.string_types
:return: None
"""
validator(value=old_name, valid_type=six.string_types)
validator(value=new_name, valid_type=six.string_types)
cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name]
check_call(cmd) | Rename a Ceph pool from old_name to new_name
:param service: six.string_types. The Ceph user name to run the command under
:param old_name: six.string_types
:param new_name: six.string_types
:return: None |
def register(cls, config={}):
"""
This function is basically a shortcut of boot for accessors
that have only the config dict argument.
Args
----
config (dict): the configuration dictionary
"""
if cls.accessor is not None:
if cls.instance is None:
cls.instance = cls.accessor(config) | This function is basically a shortcut of boot for accessors
that have only the config dict argument.
Args
----
config (dict): the configuration dictionary |
def service_running(service_name, **kwargs):
"""Determine whether a system service is running.
:param service_name: the name of the service
:param **kwargs: additional args to pass to the service command. This is
used to pass additional key=value arguments to the
service command line for managing specific instance
units (e.g. service ceph-osd status id=2). The kwargs
are ignored in systemd services.
"""
if init_is_systemd():
return service('is-active', service_name)
else:
if os.path.exists(_UPSTART_CONF.format(service_name)):
try:
cmd = ['status', service_name]
for key, value in six.iteritems(kwargs):
parameter = '%s=%s' % (key, value)
cmd.append(parameter)
output = subprocess.check_output(
cmd, stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError:
return False
else:
# This works for upstart scripts where the 'service' command
# returns a consistent string to represent running
# 'start/running'
if ("start/running" in output or
"is running" in output or
"up and running" in output):
return True
elif os.path.exists(_INIT_D_CONF.format(service_name)):
# Check System V scripts init script return codes
return service('status', service_name)
return False | Determine whether a system service is running.
:param service_name: the name of the service
:param **kwargs: additional args to pass to the service command. This is
used to pass additional key=value arguments to the
service command line for managing specific instance
units (e.g. service ceph-osd status id=2). The kwargs
are ignored in systemd services. |
def read(self):
"""Reads record from current position in reader.
Returns:
original bytes stored in a single record.
"""
data = None
while True:
last_offset = self.tell()
try:
(chunk, record_type) = self.__try_read_record()
if record_type == _RECORD_TYPE_NONE:
self.__sync()
elif record_type == _RECORD_TYPE_FULL:
if data is not None:
logging.warning(
"Ordering corruption: Got FULL record while already "
"in a chunk at offset %d", last_offset)
return chunk
elif record_type == _RECORD_TYPE_FIRST:
if data is not None:
logging.warning(
"Ordering corruption: Got FIRST record while already "
"in a chunk at offset %d", last_offset)
data = chunk
elif record_type == _RECORD_TYPE_MIDDLE:
if data is None:
logging.warning(
"Ordering corruption: Got MIDDLE record before FIRST "
"record at offset %d", last_offset)
else:
data += chunk
elif record_type == _RECORD_TYPE_LAST:
if data is None:
logging.warning(
"Ordering corruption: Got LAST record but no chunk is in "
"progress at offset %d", last_offset)
else:
result = data + chunk
data = None
return result
else:
raise errors.InvalidRecordError(
"Unsupported record type: %s" % record_type)
except errors.InvalidRecordError, e:
logging.warning("Invalid record encountered at %s (%s). Syncing to "
"the next block", last_offset, e)
data = None
self.__sync() | Reads record from current position in reader.
Returns:
original bytes stored in a single record. |
def get_subnets_for_net(self, net):
"""Returns the subnets in a network. """
try:
subnet_list = self.neutronclient.list_subnets(network_id=net)
subnet_dat = subnet_list.get('subnets')
return subnet_dat
except Exception as exc:
LOG.error("Failed to list subnet net %(net)s, Exc: %(exc)s",
{'net': net, 'exc': str(exc)})
return None | Returns the subnets in a network. |
async def get_access_token(consumer_key, consumer_secret,
oauth_token, oauth_token_secret,
oauth_verifier, **kwargs):
"""
get the access token of the user
Parameters
----------
consumer_key : str
Your consumer key
consumer_secret : str
Your consumer secret
oauth_token : str
OAuth token from :func:`get_oauth_token`
oauth_token_secret : str
OAuth token secret from :func:`get_oauth_token`
oauth_verifier : str
OAuth verifier from :func:`get_oauth_verifier`
Returns
-------
dict
Access tokens
"""
client = BasePeonyClient(consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token=oauth_token,
access_token_secret=oauth_token_secret,
api_version="",
suffix="")
response = await client.api.oauth.access_token.get(
_suffix="",
oauth_verifier=oauth_verifier
)
return parse_token(response) | get the access token of the user
Parameters
----------
consumer_key : str
Your consumer key
consumer_secret : str
Your consumer secret
oauth_token : str
OAuth token from :func:`get_oauth_token`
oauth_token_secret : str
OAuth token secret from :func:`get_oauth_token`
oauth_verifier : str
OAuth verifier from :func:`get_oauth_verifier`
Returns
-------
dict
Access tokens |
def create_pipeline(self, name, description, **kwargs):
'''Creates a pipeline with the provided attributes.
Args:
name required name string
kwargs {name, description, orgWide, aclEntries} user
specifiable ones only
return (status code, pipeline_dict) (as created)
'''
#req sanity check
if not (name and description):
return requests.codes.bad_request, None
kwargs.update({'name':name, 'description':description})
new_pl = StreakPipeline(**kwargs)
uri = '/'.join([
self.api_uri,
self.pipelines_suffix
])
code, r_data = self._req('put', uri, new_pl.to_dict())
return code, r_data | Creates a pipeline with the provided attributes.
Args:
name required name string
kwargs {name, description, orgWide, aclEntries} user
specifiable ones only
return (status code, pipeline_dict) (as created) |
def wait_for_and_switch_to_alert(driver, timeout=settings.LARGE_TIMEOUT):
"""
Wait for a browser alert to appear, and switch to it. This should be usable
as a drop-in replacement for driver.switch_to.alert when the alert box
may not exist yet.
@Params
driver - the webdriver object (required)
timeout - the time to wait for the alert in seconds
"""
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 10)):
try:
alert = driver.switch_to.alert
# Raises exception if no alert present
dummy_variable = alert.text # noqa
return alert
except NoAlertPresentException:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
raise Exception("Alert was not present after %s seconds!" % timeout) | Wait for a browser alert to appear, and switch to it. This should be usable
as a drop-in replacement for driver.switch_to.alert when the alert box
may not exist yet.
@Params
driver - the webdriver object (required)
timeout - the time to wait for the alert in seconds |
def clearData(self):
"""Clears all histograms (keeps bins)"""
self._counts = np.zeros_like(self._bins)
self.histo.setOpts(height=self._counts) | Clears all histograms (keeps bins) |
def first_time_setup(self):
"""First time running Open Sesame?
Create keyring and an auto-unlock key in default keyring. Make sure
these things don't already exist.
"""
if not self._auto_unlock_key_position():
pw = password.create_passwords()[0]
attrs = {'application': self.keyring}
gkr.item_create_sync(self.default_keyring
,gkr.ITEM_GENERIC_SECRET
,self.keyring
,attrs
,pw
,True)
found_pos = self._auto_unlock_key_position()
item_info = gkr.item_get_info_sync(self.default_keyring, found_pos)
gkr.create_sync(self.keyring, item_info.get_secret()) | First time running Open Sesame?
Create keyring and an auto-unlock key in default keyring. Make sure
these things don't already exist. |
def sample(self, fraction, seed=None, exact=False):
"""
Sample a fraction of the current SFrame's rows.
Parameters
----------
fraction : float
Fraction of the rows to fetch. Must be between 0 and 1.
if exact is False (default), the number of rows returned is
approximately the fraction times the number of rows.
seed : int, optional
Seed for the random number generator used to sample.
exact: bool, optional
Defaults to False. If exact=True, an exact fraction is returned,
but at a performance penalty.
Returns
-------
out : SFrame
A new SFrame containing sampled rows of the current SFrame.
Examples
--------
Suppose we have an SFrame with 6,145 rows.
>>> import random
>>> sf = SFrame({'id': range(0, 6145)})
Retrieve about 30% of the SFrame rows with repeatable results by
setting the random seed.
>>> len(sf.sample(.3, seed=5))
1783
"""
if seed is None:
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
if (fraction > 1 or fraction < 0):
raise ValueError('Invalid sampling rate: ' + str(fraction))
if (self.num_rows() == 0 or self.num_columns() == 0):
return self
else:
with cython_context():
return SFrame(_proxy=self.__proxy__.sample(fraction, seed, exact)) | Sample a fraction of the current SFrame's rows.
Parameters
----------
fraction : float
Fraction of the rows to fetch. Must be between 0 and 1.
if exact is False (default), the number of rows returned is
approximately the fraction times the number of rows.
seed : int, optional
Seed for the random number generator used to sample.
exact: bool, optional
Defaults to False. If exact=True, an exact fraction is returned,
but at a performance penalty.
Returns
-------
out : SFrame
A new SFrame containing sampled rows of the current SFrame.
Examples
--------
Suppose we have an SFrame with 6,145 rows.
>>> import random
>>> sf = SFrame({'id': range(0, 6145)})
Retrieve about 30% of the SFrame rows with repeatable results by
setting the random seed.
>>> len(sf.sample(.3, seed=5))
1783 |
def _str_to_datetime(self, str_value):
"""Parses a `YYYY-MM-DD` string into a datetime object."""
try:
ldt = [int(f) for f in str_value.split('-')]
dt = datetime.datetime(*ldt)
except (ValueError, TypeError):
return None
return dt | Parses a `YYYY-MM-DD` string into a datetime object. |
def render_to_response(self, context, **response_kwargs):
"""
Returns a JSON response, transforming 'context' to make the payload.
"""
response_kwargs['content_type'] = 'application/json'
return self.response_class(
self.convert_context_to_json(context),
**response_kwargs
) | Returns a JSON response, transforming 'context' to make the payload. |
def execute(self, query, args=None):
"""Execute a query.
query -- string, query to execute on server
args -- optional sequence or mapping, parameters to use with query.
Note: If args is a sequence, then %s must be used as the
parameter placeholder in the query. If a mapping is used,
%(key)s must be used as the placeholder.
Returns long integer rows affected, if any
"""
del self.messages[:]
db = self._get_db()
if isinstance(query, unicode):
query = query.encode(db.unicode_literal.charset)
if args is not None:
query = query % db.literal(args)
try:
r = None
r = self._query(query)
except TypeError, m:
if m.args[0] in ("not enough arguments for format string",
"not all arguments converted"):
self.messages.append((ProgrammingError, m.args[0]))
self.errorhandler(self, ProgrammingError, m.args[0])
else:
self.messages.append((TypeError, m))
self.errorhandler(self, TypeError, m)
except (SystemExit, KeyboardInterrupt):
raise
except:
exc, value, tb = sys.exc_info()
del tb
self.messages.append((exc, value))
self.errorhandler(self, exc, value)
self._executed = query
if not self._defer_warnings: self._warning_check()
return r | Execute a query.
query -- string, query to execute on server
args -- optional sequence or mapping, parameters to use with query.
Note: If args is a sequence, then %s must be used as the
parameter placeholder in the query. If a mapping is used,
%(key)s must be used as the placeholder.
Returns long integer rows affected, if any |
def x_runtime(f, *args, **kwargs):
"""X-Runtime Flask Response Decorator."""
_t0 = now()
r = f(*args, **kwargs)
_t1 = now()
r.headers['X-Runtime'] = '{0}s'.format(Decimal(str(_t1 - _t0)))
return r | X-Runtime Flask Response Decorator. |
def _update_resource_view(self, log=False):
# type: () -> bool
"""Check if resource view exists in HDX and if so, update resource view
Returns:
bool: True if updated and False if not
"""
update = False
if 'id' in self.data and self._load_from_hdx('resource view', self.data['id']):
update = True
else:
if 'resource_id' in self.data:
resource_views = self.get_all_for_resource(self.data['resource_id'])
for resource_view in resource_views:
if self.data['title'] == resource_view['title']:
self.old_data = self.data
self.data = resource_view.data
update = True
break
if update:
if log:
logger.warning('resource view exists. Updating %s' % self.data['id'])
self._merge_hdx_update('resource view', 'id')
return update | Check if resource view exists in HDX and if so, update resource view
Returns:
bool: True if updated and False if not |
def pseudo_organization(organization, classification, default=None):
""" helper for setting an appropriate ID for organizations """
if organization and classification:
raise ScrapeValueError('cannot specify both classification and organization')
elif classification:
return _make_pseudo_id(classification=classification)
elif organization:
if isinstance(organization, Organization):
return organization._id
elif isinstance(organization, str):
return organization
else:
return _make_pseudo_id(**organization)
elif default is not None:
return _make_pseudo_id(classification=default)
else:
return None | helper for setting an appropriate ID for organizations |
def wait_pid(pid, timeout=None, callback=None):
"""Wait for process with pid 'pid' to terminate and return its
exit status code as an integer.
If pid is not a children of os.getpid() (current process) just
waits until the process disappears and return None.
If pid does not exist at all return None immediately.
Raise TimeoutExpired on timeout expired (if specified).
"""
def check_timeout(delay):
if timeout is not None:
if time.time() >= stop_at:
if callback:
callback(pid)
else:
raise TimeoutExpired
time.sleep(delay)
return min(delay * 2, 0.04)
if timeout is not None:
waitcall = lambda: os.waitpid(pid, os.WNOHANG)
stop_at = time.time() + timeout
else:
waitcall = lambda: os.waitpid(pid, 0)
delay = 0.0001
while 1:
try:
retpid, status = waitcall()
except OSError as err:
if err.errno == errno.EINTR:
delay = check_timeout(delay)
continue
elif err.errno == errno.ECHILD:
# This has two meanings:
# - pid is not a child of os.getpid() in which case
# we keep polling until it's gone
# - pid never existed in the first place
# In both cases we'll eventually return None as we
# can't determine its exit status code.
while 1:
if pid_exists(pid):
delay = check_timeout(delay)
else:
return
else:
raise
else:
if retpid == 0:
# WNOHANG was used, pid is still running
delay = check_timeout(delay)
continue
# process exited due to a signal; return the integer of
# that signal
if os.WIFSIGNALED(status):
return os.WTERMSIG(status)
# process exited using exit(2) system call; return the
# integer exit(2) system call has been called with
elif os.WIFEXITED(status):
return os.WEXITSTATUS(status)
else:
# should never happen
raise RuntimeError("unknown process exit status") | Wait for process with pid 'pid' to terminate and return its
exit status code as an integer.
If pid is not a children of os.getpid() (current process) just
waits until the process disappears and return None.
If pid does not exist at all return None immediately.
Raise TimeoutExpired on timeout expired (if specified). |
def call(self):
'''show a value dialog'''
from wx_loader import wx
dlg = wx.TextEntryDialog(None, self.title, self.title, defaultValue=str(self.default))
if dlg.ShowModal() != wx.ID_OK:
return None
return dlg.GetValue() | show a value dialog |
def _single_tree_paths(self, tree):
"""Get all traversal paths from a single tree."""
skel = tree.consolidate()
tree = defaultdict(list)
for edge in skel.edges:
svert = edge[0]
evert = edge[1]
tree[svert].append(evert)
tree[evert].append(svert)
def dfs(path, visited):
paths = []
stack = [ (path, visited) ]
while stack:
path, visited = stack.pop(0)
vertex = path[-1]
children = tree[vertex]
visited[vertex] = True
children = [ child for child in children if not visited[child] ]
if len(children) == 0:
paths.append(path)
for child in children:
stack.append(
(path + [child], copy.deepcopy(visited))
)
return paths
root = skel.edges[0,0]
paths = dfs([root], defaultdict(bool))
root = np.argmax([ len(_) for _ in paths ])
root = paths[root][-1]
paths = dfs([ root ], defaultdict(bool))
return [ np.flip(skel.vertices[path], axis=0) for path in paths ] | Get all traversal paths from a single tree. |
def get_module_name(package):
"""
package must have these attributes:
e.g.:
package.DISTRIBUTION_NAME = "DragonPyEmulator"
package.DIST_GROUP = "console_scripts"
package.ENTRY_POINT = "DragonPy"
:return: a string like: "dragonpy.core.cli"
"""
distribution = get_distribution(package.DISTRIBUTION_NAME)
entry_info = distribution.get_entry_info(package.DIST_GROUP, package.ENTRY_POINT)
if not entry_info:
raise RuntimeError(
"Can't find entry info for distribution: %r (group: %r, entry point: %r)" % (
package.DISTRIBUTION_NAME, package.DIST_GROUP, package.ENTRY_POINT
)
)
return entry_info.module_name | package must have these attributes:
e.g.:
package.DISTRIBUTION_NAME = "DragonPyEmulator"
package.DIST_GROUP = "console_scripts"
package.ENTRY_POINT = "DragonPy"
:return: a string like: "dragonpy.core.cli" |
def template_subst(template, subs, delims=('<', '>')):
""" Perform substitution of content into tagged string.
For substitutions into template input files for external computational
packages, no checks for valid syntax are performed.
Each key in `subs` corresponds to a delimited
substitution tag to be replaced in `template` by the entire text of the
value of that key. For example, the dict ``{"ABC": "text"}`` would
convert ``The <ABC> is working`` to ``The text is working``, using the
default delimiters of '<' and '>'. Substitutions are performed in
iteration order from `subs`; recursive substitution
as the tag parsing proceeds is thus
feasible if an :class:`~collections.OrderedDict` is used and substitution
key/value pairs are added in the proper order.
Start and end delimiters for the tags are modified by `delims`. For
example, to substitute a tag of the form **{\|TAG\|}**, the tuple
``("{|","|}")`` should be passed to `subs_delims`. Any elements in
`delims` past the second are ignored. No checking is
performed for whether the delimiters are "sensible" or not.
Parameters
----------
template
|str| --
Template containing tags delimited by `subs_delims`,
with tag names and substitution contents provided in `subs`
subs
|dict| of |str| --
Each item's key and value are the tag name and corresponding content to
be substituted into the provided template.
delims
iterable of |str| --
Iterable containing the 'open' and 'close' strings used to mark tags
in the template, which are drawn from elements zero and one,
respectively. Any elements beyond these are ignored.
Returns
-------
subst_text
|str| --
String generated from the parsed template, with all tag
substitutions performed.
"""
# Store the template into the working variable
subst_text = template
# Iterate over subs and perform the .replace() calls
for (k,v) in subs.items():
subst_text = subst_text.replace(
delims[0] + k + delims[1], v)
## next tup
# Return the result
return subst_text | Perform substitution of content into tagged string.
For substitutions into template input files for external computational
packages, no checks for valid syntax are performed.
Each key in `subs` corresponds to a delimited
substitution tag to be replaced in `template` by the entire text of the
value of that key. For example, the dict ``{"ABC": "text"}`` would
convert ``The <ABC> is working`` to ``The text is working``, using the
default delimiters of '<' and '>'. Substitutions are performed in
iteration order from `subs`; recursive substitution
as the tag parsing proceeds is thus
feasible if an :class:`~collections.OrderedDict` is used and substitution
key/value pairs are added in the proper order.
Start and end delimiters for the tags are modified by `delims`. For
example, to substitute a tag of the form **{\|TAG\|}**, the tuple
``("{|","|}")`` should be passed to `subs_delims`. Any elements in
`delims` past the second are ignored. No checking is
performed for whether the delimiters are "sensible" or not.
Parameters
----------
template
|str| --
Template containing tags delimited by `subs_delims`,
with tag names and substitution contents provided in `subs`
subs
|dict| of |str| --
Each item's key and value are the tag name and corresponding content to
be substituted into the provided template.
delims
iterable of |str| --
Iterable containing the 'open' and 'close' strings used to mark tags
in the template, which are drawn from elements zero and one,
respectively. Any elements beyond these are ignored.
Returns
-------
subst_text
|str| --
String generated from the parsed template, with all tag
substitutions performed. |
def emit(self, action, event, **kwargs):
"""
Send an event to all the client listening for notifications
:param action: Action name
:param event: Event to send
:param kwargs: Add this meta to the notification (project_id for example)
"""
for listener in self._listeners:
listener.put_nowait((action, event, kwargs)) | Send an event to all the client listening for notifications
:param action: Action name
:param event: Event to send
:param kwargs: Add this meta to the notification (project_id for example) |
def find_document_type_by_name(self, entity_name, active='Y',
match_case=True):
"""
search document types by name and active(Y/N) status
:param entity_name: entity name
:return:
"""
all_types = self.get_dictionary('Document_Type_DE')
if match_case:
filtered = filter(
lambda x: x['Active'] == active and x['EntryName'].find(entity_name) >= 0,
all_types)
else:
token = entity_name.lower()
filtered = filter(
lambda x: x['Active'] == active and x['EntryName'].lower().find(token) >= 0,
all_types)
return filtered | search document types by name and active(Y/N) status
:param entity_name: entity name
:return: |
def normalize_volume(volume):
'''convert volume metadata from es to archivant format
This function makes side effect on input volume
output example::
{
'id': 'AU0paPZOMZchuDv1iDv8',
'type': 'volume',
'metadata': {'_language': 'en',
'key1': 'value1',
'key2': 'value2',
'key3': 'value3'},
'attachments': [{'id': 'a910e1kjdo2d192d1dko1p2kd1209d',
'type' : 'attachment',
'url': 'fsdb:///624bffa8a6f90813b7982d0e5b4c1475ebec40e3',
'metadata': {'download_count': 0,
'mime': 'application/json',
'name': 'tmp9fyat_',
'notes': 'this file is awsome',
'sha1': '624bffa8a6f90813b7982d0e5b4c1475ebec40e3',
'size': 10}
}]
}
'''
res = dict()
res['type'] = 'volume'
res['id'] = volume['_id']
if '_score' in volume:
res['score'] = volume['_score']
source = volume['_source']
attachments = source['_attachments']
del(source['_attachments'])
del(source['_text_' + source['_language']])
res['metadata'] = source
atts = list()
for attachment in attachments:
atts.append(Archivant.normalize_attachment(attachment))
res['attachments'] = atts
return res | convert volume metadata from es to archivant format
This function makes side effect on input volume
output example::
{
'id': 'AU0paPZOMZchuDv1iDv8',
'type': 'volume',
'metadata': {'_language': 'en',
'key1': 'value1',
'key2': 'value2',
'key3': 'value3'},
'attachments': [{'id': 'a910e1kjdo2d192d1dko1p2kd1209d',
'type' : 'attachment',
'url': 'fsdb:///624bffa8a6f90813b7982d0e5b4c1475ebec40e3',
'metadata': {'download_count': 0,
'mime': 'application/json',
'name': 'tmp9fyat_',
'notes': 'this file is awsome',
'sha1': '624bffa8a6f90813b7982d0e5b4c1475ebec40e3',
'size': 10}
}]
} |
def _set_zoning(self, v, load=False):
"""
Setter method for zoning, mapped from YANG variable /zoning (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_zoning is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_zoning() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=zoning.zoning, is_container='container', presence=False, yang_name="zoning", rest_name="zoning", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Zoning commands', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """zoning must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=zoning.zoning, is_container='container', presence=False, yang_name="zoning", rest_name="zoning", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Zoning commands', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='container', is_config=True)""",
})
self.__zoning = t
if hasattr(self, '_set'):
self._set() | Setter method for zoning, mapped from YANG variable /zoning (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_zoning is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_zoning() directly. |
def dumps(self, contentType=None, version=None):
'''
[OPTIONAL] Identical to :meth:`dump`, except the serialized form
is returned as a string representation. As documented in
:meth:`dump`, the return value can optionally be a three-element
tuple of (contentType, version, data) if the provided content-type
should be overridden or enhanced. The default implementation just
wraps :meth:`dump`.
'''
buf = six.StringIO()
ret = self.dump(buf, contentType, version)
if ret is None:
return buf.getvalue()
return (ret[0], ret[1], buf.getvalue()) | [OPTIONAL] Identical to :meth:`dump`, except the serialized form
is returned as a string representation. As documented in
:meth:`dump`, the return value can optionally be a three-element
tuple of (contentType, version, data) if the provided content-type
should be overridden or enhanced. The default implementation just
wraps :meth:`dump`. |
def isin_start(elems, line):
"""Check if an element from a list starts a string.
:type elems: list
:type line: str
"""
found = False
elems = [elems] if type(elems) is not list else elems
for e in elems:
if line.lstrip().lower().startswith(e):
found = True
break
return found | Check if an element from a list starts a string.
:type elems: list
:type line: str |
def get_languages(self):
"""
:calls: `GET /repos/:owner/:repo/languages <http://developer.github.com/v3/repos>`_
:rtype: dict of string to integer
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/languages"
)
return data | :calls: `GET /repos/:owner/:repo/languages <http://developer.github.com/v3/repos>`_
:rtype: dict of string to integer |
def _get_cache_size(replace=False):
"""Get size of cache."""
if not replace:
size = _cached_search_compile.cache_info().currsize
else:
size = _cached_replace_compile.cache_info().currsize
return size | Get size of cache. |
def get_session_identifiers(cls, folder=None, inputfile=None):
""" Retrieve the list of session identifiers contained in the
data on the folder or the inputfile.
For this plugin, it returns the list of excel sheet available.
:kwarg folder: the path to the folder containing the files to
check. This folder may contain sub-folders.
:kwarg inputfile: the path to the input file to use
"""
sessions = []
if inputfile and folder:
raise MQ2Exception(
'You should specify either a folder or a file')
if folder:
if not os.path.isdir(folder):
return sessions
for root, dirs, files in os.walk(folder):
for filename in files:
filename = os.path.join(root, filename)
for ext in SUPPORTED_FILES:
if filename.endswith(ext):
wbook = xlrd.open_workbook(filename)
for sheet in wbook.sheets():
if sheet.name not in sessions:
sessions.append(sheet.name)
elif inputfile:
if os.path.isdir(inputfile):
return sessions
for ext in SUPPORTED_FILES:
if inputfile.endswith(ext):
wbook = xlrd.open_workbook(inputfile)
for sheet in wbook.sheets():
if sheet.name not in sessions:
sessions.append(sheet.name)
return sessions | Retrieve the list of session identifiers contained in the
data on the folder or the inputfile.
For this plugin, it returns the list of excel sheet available.
:kwarg folder: the path to the folder containing the files to
check. This folder may contain sub-folders.
:kwarg inputfile: the path to the input file to use |
def walk_revctrl(dirname='', ff=''):
"""Return files found by the file-finder 'ff'.
"""
file_finder = None
items = []
if not ff:
distutils.log.error('No file-finder passed to walk_revctrl')
sys.exit(1)
for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
if ff == ep.name:
distutils.log.info('using %s file-finder', ep.name)
file_finder = ep.load()
finder_items = []
with pythonpath_off():
for item in file_finder(dirname):
if not basename(item).startswith(('.svn', '.hg', '.git')):
finder_items.append(item)
distutils.log.info('%d files found', len(finder_items))
items.extend(finder_items)
if file_finder is None:
distutils.log.error('Failed to load %s file-finder; setuptools-%s extension missing?',
ff, 'subversion' if ff == 'svn' else ff)
sys.exit(1)
# Returning a non-empty list prevents egg_info from reading the
# existing SOURCES.txt
return items or [''] | Return files found by the file-finder 'ff'. |
def _check_env_vars_set(self, dir_env_var, file_env_var):
"""
Check to see if the default cert dir/file environment vars are present.
:return: bool
"""
return (
os.environ.get(file_env_var) is not None or
os.environ.get(dir_env_var) is not None
) | Check to see if the default cert dir/file environment vars are present.
:return: bool |
def _get_subject_uri(self, guid=None):
"""
Returns the full path that uniquely identifies
the subject endpoint.
"""
uri = self.uri + '/v1/subject'
if guid:
uri += '/' + urllib.quote_plus(guid)
return uri | Returns the full path that uniquely identifies
the subject endpoint. |
def _get_list(self, key, operation, create=False):
"""
Get (and maybe create) a list by name.
"""
return self._get_by_type(key, operation, create, b'list', []) | Get (and maybe create) a list by name. |
def _init_draw(self):
"""Initializes the drawing of the frames by setting the images to
random colors.
This function is called by TimedAnimation.
"""
if self.original is not None:
self.original.set_data(np.random.random((10, 10, 3)))
self.processed.set_data(np.random.random((10, 10, 3))) | Initializes the drawing of the frames by setting the images to
random colors.
This function is called by TimedAnimation. |
def walkscan(x0, y0, xn=0.25, xp=0.25, yn=0.25, yp=0.25):
"""Scan pixels in a random walk pattern with given step probabilities. The
random walk will continue indefinitely unless a skip transformation is used
with the 'stop' parameter set or a clip transformation is used with the
'abort' parameter set to True. The probabilities are normalized to sum to 1.
:param x0: Initial x-coordinate
:type x0: int
:param y0: Initial y-coordinate
:type y0: int
:param xn: Probability of moving in the negative x direction
:type xn: float
:param xp: Probability of moving in the positive x direction
:type xp: float
:param yn: Probability of moving in the negative y direction
:type yn: float
:param yp: Probability of moving in the positive y direction
:type yp: float
"""
# Validate inputs
if xn < 0: raise ValueError("Negative x probabilty must be non-negative")
if xp < 0: raise ValueError("Positive x probabilty must be non-negative")
if yn < 0: raise ValueError("Negative y probabilty must be non-negative")
if yp < 0: raise ValueError("Positive y probabilty must be non-negative")
# Compute normalized probability
total = xp + xn + yp + yn
xn /= total
xp /= total
yn /= total
yp /= total
# Compute cumulative probability
cxn = xn
cxp = cxn + xp
cyn = cxp + yn
# Initialize position
x, y = x0, y0
while True:
yield x, y
# Take random step
probability = random.random()
if probability <= cxn:
x -= 1
elif probability <= cxp:
x += 1
elif probability <= cyn:
y -= 1
else:
y += 1 | Scan pixels in a random walk pattern with given step probabilities. The
random walk will continue indefinitely unless a skip transformation is used
with the 'stop' parameter set or a clip transformation is used with the
'abort' parameter set to True. The probabilities are normalized to sum to 1.
:param x0: Initial x-coordinate
:type x0: int
:param y0: Initial y-coordinate
:type y0: int
:param xn: Probability of moving in the negative x direction
:type xn: float
:param xp: Probability of moving in the positive x direction
:type xp: float
:param yn: Probability of moving in the negative y direction
:type yn: float
:param yp: Probability of moving in the positive y direction
:type yp: float |
def add(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - normally should be 'module' already
data_uoa - UOA of the module to be created
(desc) - module description
(license) - module license
(copyright) - module copyright
(developer) - module developer
(developer_email) - module developer
(developer_webpage) - module developer
(actions) - dict with actions {"func1":{}, "func2":{} ...}
(dict) - other meta description to add to entry
(quiet) - minimal interaction
(func) - just add one dummy action
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output of the 'add' kernel function
}
"""
# Check if global writing is allowed
r=ck.check_writing({'module_uoa':work['self_module_uoa']})
if r['return']>0: return r
o=i.get('out','')
# Find path to module 'module' to get dummies
r=ck.access({'action':'load',
'module_uoa':work['self_module_uoa'],
'data_uoa':work['self_module_uoa'],
'common_func':'yes'})
if r['return']>0: return r
p=r['path']
pm=os.path.join(p,cfg['dummy_module'])
pma=os.path.join(p,cfg['dummy_module_action'])
# Load module dummy
r=ck.load_text_file({'text_file':pm})
if r['return']>0: return r
spm=r['string']
# Load module action dummy
r=ck.load_text_file({'text_file':pma})
if r['return']>0: return r
spma=r['string']
# Prepare meta description
desc=i.get('desc','')
license=i.get('license','')
copyright=i.get('copyright','')
developer=i.get('developer','')
developer_email=i.get('developer_email','')
developer_webpage=i.get('developer_webpage','')
actions=i.get('actions',{})
func=i.get('func','')
if func!='':
actions[func]={}
quiet=i.get('quiet','')
# If console mode, ask some questions
if quiet!='yes' and o=='con':
if desc=='':
ck.out('')
r=ck.inp({'text':'Add brief module description: '})
desc=r['string']
if license=='' and ck.cfg.get('default_license','')!='':
ck.out('')
r=ck.inp({'text':'Add brief module license (or Enter to use "'+ck.cfg['default_license']+'"): '})
license=r['string']
if license=='': license=ck.cfg['default_license']
if copyright=='' and ck.cfg.get('default_copyright','')!='':
ck.out('')
r=ck.inp({'text':'Add brief module copyright (or Enter to use "'+ck.cfg['default_copyright']+'"): '})
copyright=r['string']
if copyright=='': copyright=ck.cfg['default_copyright']
if developer=='' and ck.cfg.get('default_developer','')!='':
ck.out('')
r=ck.inp({'text':'Add module\'s developer (or Enter to use "'+ck.cfg['default_developer']+'"): '})
developer=r['string']
if developer=='': developer=ck.cfg['default_developer']
if developer_email=='' and ck.cfg.get('default_developer_email','')!='':
ck.out('')
r=ck.inp({'text':'Add module\'s developer email (or Enter to use "'+ck.cfg['default_developer_email']+'"): '})
developer_email=r['string']
if developer_email=='': developer_email=ck.cfg['default_developer_email']
if developer_webpage=='' and ck.cfg.get('default_developer_webpage','')!='':
ck.out('')
r=ck.inp({'text':'Add module\'s developer webpage (or Enter to use "'+ck.cfg['default_developer_webpage']+'"): '})
developer_webpage=r['string']
if developer_webpage=='': developer_webpage=ck.cfg['default_developer_webpage']
if len(actions)==0:
act='*'
while act!='':
ck.out('')
r=ck.inp({'text':'Add action function (or Enter to stop): '})
act=r['string']
if act!='':
actions[act]={}
r1=ck.inp({'text':'Support web (y/N): '})
x=r1['string'].lower()
if x=='yes' or x=='y':
fweb='yes'
actions[act]['for_web']=fweb
r1=ck.inp({'text':'Add action description: '})
adesc=r1['string']
if adesc!='':
actions[act]['desc']=adesc
ck.out('')
# Prepare meta description
dd={}
if desc!='':
dd['desc']=desc
spm=spm.replace('$#desc#$', desc)
if license!='':
dd['license']=license
spm=spm.replace('$#license#$', license)
if copyright!='':
dd['copyright']=copyright
spm=spm.replace('$#copyright#$', copyright)
dev=''
if developer!='':
dev=developer
dd['developer']=developer
if developer_email!='':
if dev!='': dev+=', '
dev+=developer_email
dd['developer_email']=developer_email
if developer_webpage!='':
if dev!='': dev+=', '
dev+=developer_webpage
dd['developer_webpage']=developer_webpage
spm=spm.replace('$#developer#$', dev)
dd['actions']=actions
# Substitute actions
for act in actions:
adesc=actions[act].get('desc','TBD: action description')
spm+='\n'+spma.replace('$#action#$', act).replace('$#desc#$',adesc)
dx=i.get('dict',{})
r=ck.merge_dicts({'dict1':dx, 'dict2':dd})
if r['return']>0: return r
# Add entry (it will ask further questions about alias and user-friendly name)
i['common_func']='yes'
i['dict']=dx
i['sort_keys']='yes'
r=ck.access(i)
if r['return']>0: return r
# Add module code
p=r['path']
pf=os.path.join(p, ck.cfg['module_full_code_name'])
if o=='con':
ck.out('')
ck.out('Creating module code '+pf+' ...')
# Write module code
rx=ck.save_text_file({'text_file':pf, 'string':spm})
if rx['return']>0: return rx
return r | Input: {
(repo_uoa) - repo UOA
module_uoa - normally should be 'module' already
data_uoa - UOA of the module to be created
(desc) - module description
(license) - module license
(copyright) - module copyright
(developer) - module developer
(developer_email) - module developer
(developer_webpage) - module developer
(actions) - dict with actions {"func1":{}, "func2":{} ...}
(dict) - other meta description to add to entry
(quiet) - minimal interaction
(func) - just add one dummy action
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output of the 'add' kernel function
} |
def df2arff(df, dataset_name, pods_data):
"""Write an arff file from a data set loaded in from pods"""
def java_simple_date(date_format):
date_format = date_format.replace('%Y', 'yyyy').replace('%m', 'MM').replace('%d', 'dd').replace('%H', 'HH')
return date_format.replace('%h', 'hh').replace('%M', 'mm').replace('%S', 'ss').replace('%f', 'SSSSSS')
def tidy_field(atr):
return str(atr).replace(' / ', '/').replace(' ', '_')
types = {'STRING': [str], 'INTEGER': [int, np.int64, np.uint8], 'REAL': [np.float64]}
d = {}
d['attributes'] = []
for atr in df.columns:
if isinstance(atr, str):
if len(atr)>8 and atr[:9] == 'discrete(':
import json
elements = json.loads(atr[9:-1])
d['attributes'].append((tidy_field(elements[1]),
list(elements[0].keys())))
mask = {}
c = pd.Series(index=df.index)
for key, val in elements[0].items():
mask = df[atr]==val
c[mask] = key
df[atr] = c
continue
if len(atr)>7 and atr[:8] == 'integer(':
name = atr[8:-1]
d['attributes'].append((tidy_field(name), 'INTEGER'))
df[atr] = df[atr].astype(int)
continue
if len(atr)>7 and atr[:8]=='datenum(':
from matplotlib.dates import num2date
elements = atr[8:-1].split(',')
d['attributes'].append((elements[0] + '_datenum_' + java_simple_date(elements[1]), 'STRING'))
df[atr] = num2date(df[atr].values) #
df[atr] = df[atr].dt.strftime(elements[1])
continue
if len(atr)>9 and atr[:10]=='timestamp(':
def timestamp2date(values):
import datetime
"""Convert timestamp into a date object"""
new = []
for value in values:
new.append(np.datetime64(datetime.datetime.fromtimestamp(value)))
return np.asarray(new)
elements = atr[10:-1].split(',')
d['attributes'].append((elements[0] + '_datenum_' + java_simple_date(elements[1]), 'STRING'))
df[atr] = timestamp2date(df[atr].values) #
df[atr] = df[atr].dt.strftime(elements[1])
continue
if len(atr)>10 and atr[:11]=='datetime64(':
elements = atr[11:-1].split(',')
d['attributes'].append((elements[0] + '_datenum_' + java_simple_date(elements[1]), 'STRING'))
df[atr] = df[atr].dt.strftime(elements[1])
continue
if len(atr)>11 and atr[:12]=='decimalyear(':
def decyear2date(values):
"""Convert decimal year into a date object"""
new = []
for i, decyear in enumerate(values):
year = int(np.floor(decyear))
dec = decyear-year
end = np.datetime64(str(year+1)+'-01-01')
start = np.datetime64(str(year)+'-01-01')
diff=end-start
days = dec*(diff/np.timedelta64(1, 'D'))
# round to nearest day
add = np.timedelta64(int(np.round(days)), 'D')
new.append(start+add)
return np.asarray(new)
elements = atr[12:-1].split(',')
d['attributes'].append((elements[0] + '_datenum_' + java_simple_date(elements[1]), 'STRING'))
df[atr] = decyear2date(df[atr].values) #
df[atr] = df[atr].dt.strftime(elements[1])
continue
field = tidy_field(atr)
el = df[atr][0]
type_assigned=False
for t in types:
if isinstance(el, tuple(types[t])):
d['attributes'].append((field, t))
type_assigned=True
break
if not type_assigned:
import json
d['attributes'].append((field+'_json', 'STRING'))
df[atr] = df[atr].apply(json.dumps)
d['data'] = []
for ind, row in df.iterrows():
d['data'].append(list(row))
import textwrap as tw
width = 78
d['description'] = dataset_name + "\n\n"
if 'info' in pods_data and pods_data['info']:
d['description'] += "\n".join(tw.wrap(pods_data['info'], width)) + "\n\n"
if 'details' in pods_data and pods_data['details']:
d['description'] += "\n".join(tw.wrap(pods_data['details'], width))
if 'citation' in pods_data and pods_data['citation']:
d['description'] += "\n\n" + "Citation" "\n\n" + "\n".join(tw.wrap(pods_data['citation'], width))
d['relation'] = dataset_name
import arff
string = arff.dumps(d)
import re
string = re.sub(r'\@ATTRIBUTE "?(.*)_datenum_(.*)"? STRING',
r'@ATTRIBUTE "\1" DATE [\2]',
string)
f = open(dataset_name + '.arff', 'w')
f.write(string)
f.close() | Write an arff file from a data set loaded in from pods |
def outliers(df,output_type = 'values',dtype = 'number',sensitivity = 1.5):# can output boolean array or values
""" Returns potential outliers as either a boolean array or a subset of the original.
Parameters:
df - array_like
Series or dataframe to check
output_type - string, default 'values'
if 'values' is specified, then will output the values in the series that are suspected
outliers. Else, a boolean array will be outputted, where True means the value is an outlier
dtype - string, default 'number'
the way to treat the object. Possible values are 'number','datetime',
'timedelt','datetimetz','category',or 'object'
sensitivity - number, default 1.5
The value to multipy by the iter-quartile range when determining outliers. This number is used
for categorical data as well.
"""
if dtype in ('number','datetime','timedelt','datetimetz'):
if not dtype == 'number':
df = pd.to_numeric(df,errors = 'coerce')
quart25, quart75 = percentiles(df,q = [.25,.75])
out_range= sensitivity * (quart75 - quart25)
lower_bound,upper_bound = quart25-out_range, quart75+out_range
bool_array = (df < lower_bound)|(df > upper_bound)
else:
value_counts = df.value_counts() # Trying to find categorical outliers.
quart25 = cum_percentile(value_counts,.25)
quart75 = cum_percentile(value_counts,.75)
out_values = int(sensitivity * (quart75 - quart25) + quart75 + 1)
if out_values >= len(value_counts):
bool_array = _utils.bc_vec(df,value = False)
else:
outlier_values = value_counts[value_counts <= value_counts.iloc[out_values]].index
bool_array = df.isin(outlier_values)
if output_type == 'values':
return df[bool_array]
return bool_array | Returns potential outliers as either a boolean array or a subset of the original.
Parameters:
df - array_like
Series or dataframe to check
output_type - string, default 'values'
if 'values' is specified, then will output the values in the series that are suspected
outliers. Else, a boolean array will be outputted, where True means the value is an outlier
dtype - string, default 'number'
the way to treat the object. Possible values are 'number','datetime',
'timedelt','datetimetz','category',or 'object'
sensitivity - number, default 1.5
The value to multipy by the iter-quartile range when determining outliers. This number is used
for categorical data as well. |
def Read(self, length):
"""Read from the file."""
if not self.IsFile():
raise IOError("%s is not a file." % self.pathspec.last.path)
available = min(self.size - self.offset, length)
if available > 0:
# This raises a RuntimeError in some situations.
try:
data = self.fd.read_random(self.offset, available,
self.pathspec.last.ntfs_type,
self.pathspec.last.ntfs_id)
except RuntimeError as e:
raise IOError(e)
self.offset += len(data)
return data
return b"" | Read from the file. |
def weight_layers(name, bilm_ops, l2_coef=None,
use_top_only=False, do_layer_norm=False, reuse=False):
"""
Weight the layers of a biLM with trainable scalar weights to
compute ELMo representations.
For each output layer, this returns two ops. The first computes
a layer specific weighted average of the biLM layers, and
the second the l2 regularizer loss term.
The regularization terms are also add to tf.GraphKeys.REGULARIZATION_LOSSES
Input:
name = a string prefix used for the trainable variable names
bilm_ops = the tensorflow ops returned to compute internal
representations from a biLM. This is the return value
from BidirectionalLanguageModel(...)(ids_placeholder)
l2_coef: the l2 regularization coefficient $\lambda$.
Pass None or 0.0 for no regularization.
use_top_only: if True, then only use the top layer.
do_layer_norm: if True, then apply layer normalization to each biLM
layer before normalizing
reuse: reuse an aggregation variable scope.
Output:
{
'weighted_op': op to compute weighted average for output,
'regularization_op': op to compute regularization term
}
"""
def _l2_regularizer(weights):
if l2_coef is not None:
return l2_coef * tf.reduce_sum(tf.square(weights))
else:
return 0.0
# Get ops for computing LM embeddings and mask
lm_embeddings = bilm_ops['lm_embeddings']
mask = bilm_ops['mask']
n_lm_layers = int(lm_embeddings.get_shape()[1])
lm_dim = int(lm_embeddings.get_shape()[3])
# import pdb; pdb.set_trace()
with tf.control_dependencies([lm_embeddings, mask]):
# Cast the mask and broadcast for layer use.
mask_float = tf.cast(mask, 'float32')
broadcast_mask = tf.expand_dims(mask_float, axis=-1)
def _do_ln(x):
# do layer normalization excluding the mask
x_masked = x * broadcast_mask
N = tf.reduce_sum(mask_float) * lm_dim
mean = tf.reduce_sum(x_masked) / N
variance = tf.reduce_sum(((x_masked - mean) * broadcast_mask)**2) / N
return tf.nn.batch_normalization(
x, mean, variance, None, None, 1E-12
)
if use_top_only:
layers = tf.split(lm_embeddings, n_lm_layers, axis=1)
# just the top layer
sum_pieces = tf.squeeze(layers[-1], squeeze_dims=1)
# no regularization
reg = 0.0
else:
with tf.variable_scope("aggregation", reuse=reuse):
W = tf.get_variable(
'{}_ELMo_W'.format(name),
shape=(n_lm_layers, ),
initializer=tf.zeros_initializer,
regularizer=_l2_regularizer,
trainable=True,
)
# normalize the weights
normed_weights = tf.split(
tf.nn.softmax(W + 1.0 / n_lm_layers), n_lm_layers
)
# split LM layers
layers = tf.split(lm_embeddings, n_lm_layers, axis=1)
# compute the weighted, normalized LM activations
pieces = []
for w, t in zip(normed_weights, layers):
if do_layer_norm:
pieces.append(w * _do_ln(tf.squeeze(t, squeeze_dims=1)))
else:
pieces.append(w * tf.squeeze(t, squeeze_dims=1))
sum_pieces = tf.add_n(pieces)
# get the regularizer
reg = [
r for r in tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if r.name.find('{}_ELMo_W/'.format(name)) >= 0
]
if len(reg) != 1:
raise ValueError
# scale the weighted sum by gamma
with tf.variable_scope("aggregation", reuse=reuse):
gamma = tf.get_variable(
'{}_ELMo_gamma'.format(name),
shape=(1, ),
initializer=tf.ones_initializer,
regularizer=None,
trainable=True,
)
weighted_lm_layers = sum_pieces * gamma
weighted_lm_layers_masked = sum_pieces * broadcast_mask
weighted_lm_layers_sum = tf.reduce_sum(weighted_lm_layers_masked, 1)
mask_sum = tf.reduce_sum(mask_float, 1)
mask_sum = tf.maximum(mask_sum, [1])
weighted_lm_layers_mean = weighted_lm_layers_sum / tf.expand_dims(mask_sum, - 1)
word_emb_2n = tf.squeeze(layers[0], [1])
word_emb_1n = tf.slice(word_emb_2n, [0, 0, 0], [-1, -1, lm_dim // 2]) # to 512
lstm_outputs1 = tf.squeeze(layers[1], [1])
lstm_outputs2 = tf.squeeze(layers[2], [1])
ret = {'weighted_op': weighted_lm_layers,
'mean_op': weighted_lm_layers_mean,
'regularization_op': reg,
'word_emb': word_emb_1n,
'lstm_outputs1': lstm_outputs1,
'lstm_outputs2': lstm_outputs2, }
return ret | Weight the layers of a biLM with trainable scalar weights to
compute ELMo representations.
For each output layer, this returns two ops. The first computes
a layer specific weighted average of the biLM layers, and
the second the l2 regularizer loss term.
The regularization terms are also add to tf.GraphKeys.REGULARIZATION_LOSSES
Input:
name = a string prefix used for the trainable variable names
bilm_ops = the tensorflow ops returned to compute internal
representations from a biLM. This is the return value
from BidirectionalLanguageModel(...)(ids_placeholder)
l2_coef: the l2 regularization coefficient $\lambda$.
Pass None or 0.0 for no regularization.
use_top_only: if True, then only use the top layer.
do_layer_norm: if True, then apply layer normalization to each biLM
layer before normalizing
reuse: reuse an aggregation variable scope.
Output:
{
'weighted_op': op to compute weighted average for output,
'regularization_op': op to compute regularization term
} |
def prep_vrn_file(in_file, vcaller, work_dir, somatic_info, writer_class, seg_file=None, params=None):
"""Select heterozygous variants in the normal sample with sufficient depth.
writer_class implements write_header and write_row to write VCF outputs
from a record and extracted tumor/normal statistics.
"""
data = somatic_info.tumor_data
if not params:
params = PARAMS
out_file = os.path.join(work_dir, "%s-%s-prep.csv" % (utils.splitext_plus(os.path.basename(in_file))[0],
vcaller))
if not utils.file_uptodate(out_file, in_file):
# ready_bed = _identify_heterogeneity_blocks_seg(in_file, seg_file, params, work_dir, somatic_info)
ready_bed = None
if ready_bed and utils.file_exists(ready_bed):
sub_file = _create_subset_file(in_file, ready_bed, work_dir, data)
else:
sub_file = in_file
max_depth = max_normal_germline_depth(sub_file, params, somatic_info)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = writer_class(out_handle)
writer.write_header()
bcf_in = pysam.VariantFile(sub_file)
for rec in bcf_in:
stats = _is_possible_loh(rec, bcf_in, params, somatic_info, max_normal_depth=max_depth)
if chromhacks.is_autosomal(rec.chrom) and stats is not None:
writer.write_row(rec, stats)
return out_file | Select heterozygous variants in the normal sample with sufficient depth.
writer_class implements write_header and write_row to write VCF outputs
from a record and extracted tumor/normal statistics. |
def _send(self, data):
"""Send data to statsd."""
if not self._sock:
self.connect()
self._do_send(data) | Send data to statsd. |
def to_feather(self, fname):
"""
Write out the binary feather-format for DataFrames.
.. versionadded:: 0.20.0
Parameters
----------
fname : str
string file path
"""
from pandas.io.feather_format import to_feather
to_feather(self, fname) | Write out the binary feather-format for DataFrames.
.. versionadded:: 0.20.0
Parameters
----------
fname : str
string file path |
def _label_path_from_index(self, index):
"""
given image index, find out annotation path
Parameters:
----------
index: int
index of a specific image
Returns:
----------
full path of annotation file
"""
label_file = os.path.join(self.data_path, 'Annotations', index + '.xml')
assert os.path.exists(label_file), 'Path does not exist: {}'.format(label_file)
return label_file | given image index, find out annotation path
Parameters:
----------
index: int
index of a specific image
Returns:
----------
full path of annotation file |
def main():
"""
1. Reads in a meraculous config file and outputs all of the associated config
files to $PWD/configs
2. The name of each run and the path to the directory is passed to a
multiprocessing core that controls which assemblies are executed and when.
"""
parser = CommandLine()
#this block from here: http://stackoverflow.com/a/4042861/5843327
if len(sys.argv)==1:
parser.parser.print_help()
sys.exit(1)
parser.parse()
myArgs = parser.args
#Figure out how many processors to give to each assembly since we will be
# running some things in parallel. The MerParse class will handle overriding
# whatever is found in the config file in the read_config() method.
procsPerAssembly = int(myArgs.maxProcs / myArgs.simultaneous)
setattr(myArgs, "maxProcs", procsPerAssembly)
# 1. Reads in a meraculous config file and outputs all of the associated config
# files to $PWD/configs
merparser = MerParse(myArgs.inputConfig, myArgs.sweep, myArgs.sstart,
myArgs.sstop, myArgs.sinterval, myArgs.maxProcs,
asPrefix = myArgs.prefix,
asSI = myArgs.index,
genus = myArgs.genus,
species = myArgs.species)
configPaths = merparser.sweeper_output()
#make the assemblies dir ONCE to avoid a race condition for os.makedirs()
cwd = os.path.abspath(os.getcwd())
allAssembliesDir = os.path.join(cwd, "assemblies")
if not os.path.exists(allAssembliesDir):
os.makedirs(allAssembliesDir)
#instantiate all of the classes that we will be using in parallel processing.
# configPaths above returns a dict with the run name and abs path of config
# as key:value pairs
instances = []
for runName in configPaths:
configPath = configPaths.get(runName)
thisInstance = MerRunner(runName, configPath, myArgs.cleanup)
instances.append(thisInstance)
if len(instances) == 0:
print("There are no meraculous folders in this directory. Exiting")
elif len(instances) > 0:
# run the program for each instance
# pool size is the number of simultaneous runs for the server
pool = ThreadPool(myArgs.simultaneous)
results = pool.map(mer_runner_dummy, instances)
pool.close()
pool.join() | 1. Reads in a meraculous config file and outputs all of the associated config
files to $PWD/configs
2. The name of each run and the path to the directory is passed to a
multiprocessing core that controls which assemblies are executed and when. |
def _get_resource_type_cls(self, name, resource):
"""Attempts to return troposphere class that represents Type of
provided resource. Attempts to find the troposphere class who's
`resource_type` field is the same as the provided resources `Type`
field.
:param resource: Resource to find troposphere class for
:return: None: If no class found for provided resource
type: Type of provided resource
:raise ResourceTypeNotDefined:
Provided resource does not have a `Type` field
"""
# If provided resource does not have `Type` field
if 'Type' not in resource:
raise ResourceTypeNotDefined(name)
# Attempt to find troposphere resource with:
# `resource_type` == resource['Type']
try:
return self.inspect_resources[resource['Type']]
except KeyError:
# is there a custom mapping?
for custom_member in self._custom_members:
if custom_member.resource_type == resource['Type']:
return custom_member
# If no resource with `resource_type` == resource['Type'] found
return None | Attempts to return troposphere class that represents Type of
provided resource. Attempts to find the troposphere class who's
`resource_type` field is the same as the provided resources `Type`
field.
:param resource: Resource to find troposphere class for
:return: None: If no class found for provided resource
type: Type of provided resource
:raise ResourceTypeNotDefined:
Provided resource does not have a `Type` field |
def _cond_select_value_nonrecur(d,cond_match=None,**kwargs):
'''
d = {
"ActiveArea":"50829",
"Artist":"315",
"AsShotPreProfileMatrix":"50832",
"AnalogBalance":"50727",
"AsShotICCProfile":"50831",
"AsShotProfileName":"50934",
"AntiAliasStrength":"50738",
"AsShotNeutral":"50728",
"AsShotWhiteXY":"50729"
}
_cond_select_value_nonrecur(d,"50")
_cond_select_value_nonrecur(d,"72")
regex = re.compile("8$")
_cond_select_value_nonrecur(d,regex)
'''
if('cond_func' in kwargs):
cond_func = kwargs['cond_func']
else:
cond_func = _text_cond
if('cond_func_args' in kwargs):
cond_func_args = kwargs['cond_func_args']
else:
cond_func_args = []
rslt = {}
for key in d:
value = d[key]
if(cond_func(value,cond_match,*cond_func_args)):
rslt[key] = d[key]
else:
pass
return(rslt) | d = {
"ActiveArea":"50829",
"Artist":"315",
"AsShotPreProfileMatrix":"50832",
"AnalogBalance":"50727",
"AsShotICCProfile":"50831",
"AsShotProfileName":"50934",
"AntiAliasStrength":"50738",
"AsShotNeutral":"50728",
"AsShotWhiteXY":"50729"
}
_cond_select_value_nonrecur(d,"50")
_cond_select_value_nonrecur(d,"72")
regex = re.compile("8$")
_cond_select_value_nonrecur(d,regex) |
def get_issues(self, repo, keys):
""" Grab all the issues """
key1, key2 = keys
key3 = key1[:-1] # Just the singular form of key1
url = self.base_url + "/api/0/" + repo + "/" + key1
response = self.session.get(url, params=dict(status='Open'))
if not bool(response):
error = response.json()
code = error['error_code']
if code == 'ETRACKERDISABLED':
return []
else:
raise IOError('Failed to talk to %r %r' % (url, error))
issues = []
for result in response.json()[key2]:
idx = six.text_type(result['id'])
result['html_url'] = "/".join([self.base_url, repo, key3, idx])
issues.append((repo, result))
return issues | Grab all the issues |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.