code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def _reprJSON(self):
return {'__PepSeq__': [self.sequence, self.missedCleavage,
self.isUnique, list(self.proteins),
self.proteinPositions]} | Returns a JSON serializable represenation of a ``PeptideSequence``
class instance. Use :func:`maspy.proteindb.PeptideSequence._fromJSON()`
to generate a new ``PeptideSequence`` instance from the return value.
:returns: a JSON serializable python object |
def _fromJSON(cls, jsonobject):
newInstance = cls(jsonobject[0], jsonobject[1])
newInstance.isUnique = jsonobject[2]
newInstance.proteins = set(jsonobject[3])
newInstance.proteinPositions = jsonobject[4]
return newInstance | Generates a new instance of :class:`maspy.proteindb.PeptideSequence`
from a decoded JSON object (as generated by
:func:`maspy.proteindb.PeptideSequence._reprJSON()`).
:param jsonobject: decoded JSON object
:returns: a new instance of :class:`PeptideSequence` |
def _reprJSON(self):
jsonDict = self.__dict__
jsonDict['uniquePeptides'] = list(jsonDict['uniquePeptides'])
jsonDict['sharedPeptides'] = list(jsonDict['sharedPeptides'])
return {'__ProtSeq__': jsonDict} | Returns a JSON serializable represenation of a ``ProteinSequence``
class instance. Use :func:`maspy.proteindb.ProteinSequence._fromJSON()`
to generate a new ``ProteinSequence`` instance from the return value.
:returns: a JSON serializable python object |
def _fromJSON(cls, jsonobject):
newInstance = cls(None, None)
newInstance.__dict__.update(jsonobject)
newInstance.uniquePeptides = set(newInstance.uniquePeptides)
newInstance.sharedPeptides = set(newInstance.sharedPeptides)
return newInstance | Generates a new instance of :class:`maspy.proteindb.ProteinSequence`
from a decoded JSON object (as generated by
:func:`maspy.proteindb.ProteinSequence._reprJSON()`).
:param jsonobject: decoded JSON object
:returns: a new instance of :class:`ProteinSequence` |
def save(self, path, compress=True):
with aux.PartiallySafeReplace() as msr:
filename = self.info['name'] + '.proteindb'
filepath = aux.joinpath(path, filename)
with msr.open(filepath, mode='w+b') as openfile:
self._writeContainer(openfile, compress=compress) | Writes the ``.proteins`` and ``.peptides`` entries to the hard disk
as a ``proteindb`` file.
.. note::
If ``.save()`` is called and no ``proteindb`` file is present in the
specified path a new files is generated, otherwise the old file is
replaced.
:param path: filedirectory to which the ``proteindb`` file is written.
The output file name is specified by ``self.info['name']``
:param compress: bool, True to use zip file compression |
def _writeContainer(self, filelike, compress=True):
aux.writeJsonZipfile(filelike, self.proteins, compress, 'w', 'proteins')
aux.writeJsonZipfile(filelike, self.peptides, compress, 'a', 'peptides')
zipcomp = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
with zipfile.ZipFile(filelike, 'a', allowZip64=True) as containerFile:
infodata = {key: value for key, value in
viewitems(self.info) if key != 'path'
}
containerFile.writestr('info', json.dumps(infodata, zipcomp)) | Writes the ``.proteins`` and ``.peptides`` entries to the
``proteindb`` format. In addition it also dumps the ``self.info`` entry
to the zipfile with the filename ``info``. For details see
:func:`maspy.auxiliary.writeJsonZipfile()`
:param filelike: path to a file (str) or a file-like object
:param compress: bool, True to use zip file compression |
def load(cls, path, name):
filepath = aux.joinpath(path, name + '.proteindb')
with zipfile.ZipFile(filepath, 'r', allowZip64=True) as containerZip:
#Convert the zipfile data into a str object, necessary since
#containerZip.read() returns a bytes object.
proteinsString = io.TextIOWrapper(containerZip.open('proteins'),
encoding='utf-8'
).read()
peptidesString = io.TextIOWrapper(containerZip.open('peptides'),
encoding='utf-8'
).read()
infoString = io.TextIOWrapper(containerZip.open('info'),
encoding='utf-8'
).read()
newInstance = cls()
newInstance.proteins = json.loads(proteinsString,
object_hook=ProteinSequence.jsonHook)
newInstance.peptides = json.loads(peptidesString,
object_hook=PeptideSequence.jsonHook)
newInstance.info.update(json.loads(infoString))
return newInstance | Imports the specified ``proteindb`` file from the hard disk.
:param path: filedirectory of the ``proteindb`` file
:param name: filename without the file extension ".proteindb"
.. note:: this generates rather large files, which actually take longer
to import than to newly generate. Maybe saving / loading should be
limited to the protein database whitout in silico digestion
information. |
def _calculateCoverageMasks(proteindb, peptidedb):
for proteinId, proteinEntry in viewitems(proteindb):
coverageMaskUnique = numpy.zeros(proteinEntry.length(), dtype='bool')
for peptide in proteinEntry.uniquePeptides:
startPos, endPos = peptidedb[peptide].proteinPositions[proteinId]
coverageMaskUnique[startPos-1:endPos] = True
coverageMaskShared = numpy.zeros(proteinEntry.length(), dtype='bool')
for peptide in proteinEntry.sharedPeptides:
startPos, endPos = peptidedb[peptide].proteinPositions[proteinId]
coverageMaskShared[startPos-1:endPos] = True
setattr(proteinEntry, 'coverageMaskUnique', coverageMaskUnique)
setattr(proteinEntry, 'coverageMaskShared', coverageMaskShared) | Calcualte the sequence coverage masks for all proteindb elements.
Private method used by :class:`ProteinDatabase`.
A coverage mask is a numpy boolean array with the length of the protein
sequence. Each protein position that has been covered in at least one
peptide is set to True. Coverage masks are calculated for unique and for
shared peptides. Peptides are matched to proteins according to positions
derived by the digestion of the FASTA file.
Alternatively peptides could also be matched to proteins just by
sequence as it is done in :func:`pyteomics.parser.coverage`, but this is
not the case here.
:param proteindb: a dictionary containing :class:`ProteinSequence`
entries, for example ``ProteinDatabase.proteins``
:param proteindb: a dictionary containing :class:`PeptideSequence`
entries, for example ``ProteinDatabase.peptides``
Sets two attributes for each ``ProteinSequence`` entry:
``.coverageMaskUnique`` = coverage mask of unique peptides
``.coverageMaskShared`` = coverage mask of shared peptides |
def fetch_keywords(codedata) :
# Read row in codedata and count keywords in codes with langauge
tmp = {}
language_counts = {}
for index, (language, code) in enumerate(codedata) :
if language not in shaman.SUPPORTING_LANGUAGES :
continue
if language not in tmp :
tmp[language] = {}
language_counts[language] = 0
language_counts[language] += 1
for keyword in shaman.KeywordFetcher.fetch( code ) :
# if keyword exists in fetched data, add '1' to keyword data
tmp[language][keyword] = tmp[language].get(keyword, 0) + 1
print('Fetch keyword %d/%d ' % (index, len(codedata)), end='\r')
# Get dataset indexed by keyword
ret = {}
for language in tmp :
for keyword, count in tmp[ language ].items() :
if keyword not in ret :
ret[ keyword ] = {}
ret[ keyword ][ language ] = (count / language_counts[ language ]) # Probability
print('Fetch keyword completed ')
return ret | Fetch keywords by shaman.KeywordFetcher
Get average probabilities of keyword and language |
def match_patterns(codedata) :
ret = {}
for index1, pattern in enumerate(shaman.PatternMatcher.PATTERNS) :
print('Matching pattern %d "%s"' % (index1+1, pattern))
matcher = shaman.PatternMatcher(pattern)
tmp = {}
for index2, (language, code) in enumerate(codedata) :
if language not in shaman.SUPPORTING_LANGUAGES :
continue
if len(code) <= 20 or len(code) > 100000 :
continue
if language not in tmp :
tmp[language] = []
ratio = matcher.getratio(code)
tmp[language].append(ratio)
print('Matching patterns %d/%d ' % (index2, len(codedata)), end='\r')
ret[pattern] = {}
for language, data in tmp.items() :
ret[pattern][language] = sum(tmp[language]) / max(len(tmp[language]), 1)
print('Matching patterns completed ')
return ret | Match patterns by shaman.PatternMatcher
Get average ratio of pattern and language |
def facility(self, column=None, value=None, **kwargs):
return self._resolve_call('RAD_FACILITY', column, value, **kwargs) | Check information related to Radiation facilities.
>>> RADInfo().facility('state_code', 'CA') |
def facility_type(self, column=None, value=None, **kwargs):
return self._resolve_call('RAD_FACILITY_TYPE', column, value, **kwargs) | Basic identifying information for a RADInfo facility, including
the improved facility information maintained by the Facility
Registry System (FRS).
>>> RADInfo().facility_type('cit_ref_code', '40CFR300') |
def geo(self, column=None, value=None, **kwargs):
return self._resolve_call('RAD_GEO_LOCATION', column, value, **kwargs) | Locate a facility through geographic location.
>>> RADInfo().geo('geometric_type_code', '001') |
def regulation(self, column=None, value=None, **kwargs):
return self._resolve_call('RAD_REGULATION', column, value, **kwargs) | Provides relevant information about applicable regulations.
>>> RADInfo().regulation('title_id', 40) |
def regulatory_program(self, column=None, value=None, **kwargs):
return self._resolve_call('RAD_REGULATORY_PROG', column,
value, **kwargs) | Identifies the regulatory authority governing a facility, and, by
virtue of that identification, also identifies the regulatory program
of interest and the type of facility.
>>> RADInfo().regulatory_program('sec_cit_ref_flag', 'N') |
def collect_basic_info():
s = sys.version_info
_collect(json.dumps({'sys.version_info':tuple(s)}))
_collect(sys.version)
return sys.version | collect basic info about the system, os, python version... |
def call(function):
message = 'call:%s.%s' % (function.__module__,function.__name__)
@functools.wraps(function)
def wrapper(*args, **kwargs):
_collect(message)
return function(*args, **kwargs)
return wrapper | decorator that collect function call count. |
def _parse_ip_stats_link_show(raw_result):
show_re = (
r'.+?RX:.*?\n'
r'\s*(?P<rx_bytes>\d+)\s+(?P<rx_packets>\d+)\s+(?P<rx_errors>\d+)\s+'
r'(?P<rx_dropped>\d+)\s+(?P<rx_overrun>\d+)\s+(?P<rx_mcast>\d+)'
r'.+?TX:.*?\n'
r'\s*(?P<tx_bytes>\d+)\s+(?P<tx_packets>\d+)\s+(?P<tx_errors>\d+)\s+'
r'(?P<tx_dropped>\d+)\s+(?P<tx_carrier>\d+)\s+(?P<tx_collisions>\d+)'
)
re_result = match(show_re, raw_result, DOTALL)
result = None
if (re_result):
result = re_result.groupdict()
for key, value in result.items():
if value is not None:
if value.isdigit():
result[key] = int(value)
return result | Parse the 'ip -s link show dev <dev>' command raw output.
:param str raw_result: vtysh raw result string.
:rtype: dict
:return: The parsed result of the show interface command in a \
dictionary of the form:
::
{
'rx_bytes': 0,
'rx_packets': 0,
'rx_errors': 0,
'rx_dropped': 0,
'rx_overrun': 0,
'rx_mcast': 0,
'tx_bytes': 0,
'tx_packets': 0,
'tx_errors': 0,
'tx_dropped': 0,
'tx_carrier': 0,
'tx_collisions': 0,
} |
def interface(enode, portlbl, addr=None, up=None, shell=None):
assert portlbl
port = enode.ports[portlbl]
if addr is not None:
assert ip_interface(addr)
cmd = 'ip addr add {addr} dev {port}'.format(addr=addr, port=port)
response = enode(cmd, shell=shell)
assert not response
if up is not None:
cmd = 'ip link set dev {port} {state}'.format(
port=port, state='up' if up else 'down'
)
response = enode(cmd, shell=shell)
assert not response | Configure a interface.
All parameters left as ``None`` are ignored and thus no configuration
action is taken for that parameter (left "as-is").
:param enode: Engine node to communicate with.
:type enode: topology.platforms.base.BaseNode
:param str portlbl: Port label to configure. Port label will be mapped to
real port automatically.
:param str addr: IPv4 or IPv6 address to add to the interface:
- IPv4 address and netmask to assign to the interface in the form
``'192.168.20.20/24'``.
- IPv6 address and subnets to assign to the interface in the form
``'2001::1/120'``.
:param bool up: Bring up or down the interface.
:param str shell: Shell name to execute commands.
If ``None``, use the Engine Node default shell. |
def remove_ip(enode, portlbl, addr, shell=None):
assert portlbl
assert ip_interface(addr)
port = enode.ports[portlbl]
cmd = 'ip addr del {addr} dev {port}'.format(addr=addr, port=port)
response = enode(cmd, shell=shell)
assert not response | Remove an IP address from an interface.
All parameters left as ``None`` are ignored and thus no configuration
action is taken for that parameter (left "as-is").
:param enode: Engine node to communicate with.
:type enode: topology.platforms.base.BaseNode
:param str portlbl: Port label to configure. Port label will be mapped to
real port automatically.
:param str addr: IPv4 or IPv6 address to remove from the interface:
- IPv4 address to remove from the interface in the form
``'192.168.20.20'`` or ``'192.168.20.20/24'``.
- IPv6 address to remove from the interface in the form
``'2001::1'`` or ``'2001::1/120'``.
:param str shell: Shell name to execute commands.
If ``None``, use the Engine Node default shell. |
def add_route(enode, route, via, shell=None):
via = ip_address(via)
version = '-4'
if (via.version == 6) or \
(route != 'default' and ip_network(route).version == 6):
version = '-6'
cmd = 'ip {version} route add {route} via {via}'.format(
version=version, route=route, via=via
)
response = enode(cmd, shell=shell)
assert not response | Add a new static route.
:param enode: Engine node to communicate with.
:type enode: topology.platforms.base.BaseNode
:param str route: Route to add, an IP in the form ``'192.168.20.20/24'``
or ``'2001::0/24'`` or ``'default'``.
:param str via: Via for the route as an IP in the form
``'192.168.20.20/24'`` or ``'2001::0/24'``.
:param shell: Shell name to execute commands. If ``None``, use the Engine
Node default shell.
:type shell: str or None |
def add_link_type_vlan(enode, portlbl, name, vlan_id, shell=None):
assert name
if name in enode.ports:
raise ValueError('Port {name} already exists'.format(name=name))
assert portlbl
assert vlan_id
port = enode.ports[portlbl]
cmd = 'ip link add link {dev} name {name} type vlan id {vlan_id}'.format(
dev=port, name=name, vlan_id=vlan_id)
response = enode(cmd, shell=shell)
assert not response, 'Cannot add virtual link {name}'.format(name=name)
enode.ports[name] = name | Add a new virtual link with the type set to VLAN.
Creates a new vlan device {name} on device {port}.
Will raise an exception if value is already assigned.
:param enode: Engine node to communicate with.
:type enode: topology.platforms.base.BaseNode
:param str portlbl: Port label to configure. Port label will be mapped
automatically.
:param str name: specifies the name of the new virtual device.
:param str vlan_id: specifies the VLAN identifier.
:param str shell: Shell name to execute commands. If ``None``, use the
Engine Node default shell. |
def remove_link_type_vlan(enode, name, shell=None):
assert name
if name not in enode.ports:
raise ValueError('Port {name} doesn\'t exists'.format(name=name))
cmd = 'ip link del link dev {name}'.format(name=name)
response = enode(cmd, shell=shell)
assert not response, 'Cannot remove virtual link {name}'.format(name=name)
del enode.ports[name] | Delete a virtual link.
Deletes a vlan device with the name {name}.
Will raise an expection if the port is not already present.
:param enode: Engine node to communicate with.
:type enode: topology.platforms.base.BaseNode
:param str name: specifies the name of the new
virtual device.
:param str shell: Shell name to execute commands. If ``None``, use the
Engine Node default shell. |
def show_interface(enode, dev, shell=None):
assert dev
cmd = 'ip addr list dev {ldev}'.format(ldev=dev)
response = enode(cmd, shell=shell)
first_half_dict = _parse_ip_addr_show(response)
d = None
if (first_half_dict):
cmd = 'ip -s link list dev {ldev}'.format(ldev=dev)
response = enode(cmd, shell=shell)
second_half_dict = _parse_ip_stats_link_show(response)
d = first_half_dict.copy()
d.update(second_half_dict)
return d | Show the configured parameters and stats of an interface.
:param enode: Engine node to communicate with.
:type enode: topology.platforms.base.BaseNode
:param str dev: Unix network device name. Ex 1, 2, 3..
:rtype: dict
:return: A combined dictionary as returned by both
:func:`topology_lib_ip.parser._parse_ip_addr_show`
:func:`topology_lib_ip.parser._parse_ip_stats_link_show` |
def build_mmd(target_folder=DEFAULT_LIBRARY_DIR):
mmd_dir = tempfile.mkdtemp()
mmd_repo = pygit2.clone_repository('https://github.com/jasedit/MultiMarkdown-5', mmd_dir,
checkout_branch='fix_windows')
mmd_repo.init_submodules()
mmd_repo.update_submodules()
build_dir = os.path.join(mmd_dir, 'build')
old_pwd = os.getcwd()
os.chdir(build_dir)
cmake_cmd = ['cmake', '-DCMAKE_BUILD_TYPE=Release', '-DSHAREDBUILD=1', '..']
if platform.system() == 'Windows':
is_64bit = platform.architecture()[0] == '64bit'
generator = 'Visual Studio 14 2015{0}'.format(' Win64' if is_64bit else '')
cmake_cmd.insert(-1, '-G')
cmake_cmd.insert(-1, '{0}'.format(generator))
subprocess.call(cmake_cmd)
PLATFORM_BUILDS[platform.system()]()
lib_file = 'libMultiMarkdown' + SHLIB_EXT[platform.system()]
if not os.path.exists(target_folder):
os.mkdir(target_folder)
src = os.path.join(build_dir, SHLIB_PREFIX[platform.system()], lib_file)
dest = os.path.join(target_folder, lib_file)
shutil.copyfile(src, dest)
os.chdir(old_pwd)
shutil.rmtree(mmd_dir, ignore_errors=True) | Build and install the MultiMarkdown shared library. |
def _setup():
projex_path = os.getenv('PROJEX_PATH')
if not projex_path:
return
base_path = os.path.dirname(__file__)
logger.debug('Loading PROJEX_PATH: %s' % projex_path)
# load the defaults from the install directory
# load the paths from the environment
paths = projex_path.split(os.path.pathsep)
paths += [
os.path.join(base_path, 'userplug'),
os.path.join(base_path, 'stdplug'),
os.path.join(base_path, 'lib'),
]
sys.path = paths + sys.path | Sets up the global import environment variables by registering the
sub-folders for projex as import locations. When defining your
custom manager, you will want to overload this method to do any
sort of global initialization that you wish before continuing.
:warning This method is called by the _setup method, and should
not be called directly. |
def appendPath(self, path):
# normalize the path
path = os.path.normcase(nstr(path)).strip()
if path and path != '.' and path not in sys.path:
sys.path.append(path)
self._addedpaths.append(path)
return True
return False | Appends the inputted path to the end of the sys.path variable,
provided the path does not already exist in it.
:param path
:type str
:return bool: success |
def expandvars(self, text, environ=None, cache=None):
if not environ:
environ = os.environ
# make sure we have data
if not text:
return ''
# check for circular dependencies
cache = cache or {}
# return the cleaned variable
output = nstr(text)
keys = re.findall('\$(\w+)|\${(\w+)\}|\%(\w+)\%', text)
for first, second, third in keys:
repl = ''
key = ''
if first:
repl = '$%s' % first
key = first
elif second:
repl = '${%s}' % second
key = second
elif third:
repl = '%%%s%%' % third
key = third
else:
continue
value = environ.get(key)
if value:
if key not in cache:
cache[key] = value
value = self.expandvars(value, environ, cache)
else:
err = '%s environ variable causes an infinite loop.' % key
logger.warning(err)
value = cache[key]
else:
value = repl
output = output.replace(repl, value)
return os.path.expanduser(output) | Recursively expands the text variables, vs. the os.path \
method which only works at one level. The cache value should be \
left blank as it is used to protect against recursion.
:param text | <str>
environ | <dict> || None
cache | <dict> { <str>: <str>, .. }
:return <str> |
def pushPath(self, path):
# normalize the path
path = os.path.normcase(nstr(path)).strip()
if path and path != '.' and path not in sys.path:
sys.path.append(path)
self._addedpaths.insert(0, path)
return True
return False | Pushes the inputted path at the front of the sys.path variable, making
it the first path python uses when importing a module.
:param path
:type str
:return bool: success |
def requires(self, *modules):
self._setup()
for module in modules:
if '-' in module:
parts = module.split('-')
module = parts[0]
version = '-'.join(parts)
else:
version = ''
if module in self._loadedRequires:
continue
self._loadedRequires.append(module)
path_key = 'PROJEX_%s_PATH' % nstr(module).upper()
env_path = os.getenv(path_key)
logger.debug('Looking up %s: %s' % (path_key, env_path))
# push the path for the particular module if found in the env
if env_path:
self.pushPath(env_path) | Registers the system paths for the inputted modules so that they can
be imported properly. By default, this will check to see if the
key PROJEX_[MODULE]_PATH exists in the environment, and if so, insert
that path to the front of the sys.path for import. Out of the box
installations will register import paths to default projex folders
and won't need to define these path variables. (lib/,stdplug/,userplug)
:param *modules ( <str>, .. )
:usage |>>> import projex
|>>> projex.logger.setLevel( projex.logging.DEBUG )
|>>> projex.environ().requires( 'orb', 'anansi' )
|DEBUG: EnvManager.requires: PROJEX_ORB_PATH
|DEBUG: EnvManager.requires: PROJEX_ANANSI_PATH
|>>> import opb
|>>> import openwebdk |
def refactor(module, name, repl):
name = nstr(name)
# import a module when refactoring based on a string
if isinstance(module, basestring):
try:
module = __import__(module)
except ImportError:
logger.exception('Could not import module: %s' % module)
return False
try:
glbls = module.__dict__
except AttributeError:
err = '%s cannot support refactoring.' % module.__name__
logger.exception(err)
return False
if name in glbls:
# refactor the value
glbls[name] = repl
return True
else:
err = '%s is not a member of %s.' % (name, module.__name__)
logger.warning(err)
return False | Replaces the name in the module dictionary with the inputted replace \
value.
:param module | <str> || <module>
name | <str>
repl | <variant>
:return <bool> |
def current():
if not EnvManager._current:
path = os.environ.get('PROJEX_ENVMGR_PATH')
module = os.environ.get('PROJEX_ENVMGR_MODULE')
clsname = os.environ.get('PROJEX_ENVMGR_CLASS')
cls = EnvManager
if module and clsname:
# check if the user specified an import path
if path:
logger.info('Adding env manager path: %s' % path)
sys.path.insert(0, path)
logger.info('Loading env manager: %s.%s' % (module, clsname))
try:
__import__(module)
mod = sys.modules[module]
cls = getattr(mod, clsname)
except ImportError:
logger.error('Could not import env manager %s', module)
except KeyError:
logger.error('Could not import env manager %s', module)
except AttributeError:
msg = '%s is not a valid class of %s' % (clsname, module)
logger.error(msg)
EnvManager._current = cls()
return EnvManager._current | Returns the current environment manager for the projex system.
:return <EnvManager> |
def fileImport(filepath, ignore=None):
basepath, package = EnvManager.packageSplit(filepath)
if not (basepath and package):
return None
# make sure this is not part of the ignored package list
if ignore and package in ignore:
return None
basepath = os.path.normcase(basepath)
if basepath not in sys.path:
sys.path.insert(0, basepath)
logger.debug('Importing: %s' % package)
try:
__import__(package)
module = sys.modules[package]
except ImportError:
logger.exception('ImportError: %s' % package)
return None
except KeyError:
logger.exception('Could not find sys.modules package: %s' % package)
return None
except StandardError:
logger.exception('Unknown error occurred not import %s' % package)
return None
return module | Imports the module located at the given filepath.
:param filepath | <str>
ignore | [<str>, ..] || None
:return <module> || None |
def packageSplit(filepath):
filepath = nstr(filepath).strip().strip('.')
if not filepath:
return '', ''
basepath, module = os.path.split(nstr(filepath))
module = os.path.splitext(module)[0]
pathsplit = os.path.normpath(basepath).split(os.path.sep)
packagesplit = []
if module and module != '__init__':
packagesplit.append(module)
testpath = os.path.sep.join(pathsplit + ['__init__.py'])
while os.path.exists(testpath):
packagesplit.insert(0, pathsplit[-1])
pathsplit = pathsplit[:-1]
testpath = os.path.sep.join(pathsplit + ['__init__.py'])
return os.path.sep.join(pathsplit), '.'.join(packagesplit) | Determines the python path, and package information for the inputted
filepath.
:param filepath | <str>
:return (<str> path, <str> package) |
def join_all(self, *parts):
url = util.join_all(self.domain, *parts)
return url | Join all parts with domain. Example domain: https://www.python.org
:param parts: Other parts, example: "/doc", "/py27"
:return: url |
def add_params(self, endpoint, params):
assert endpoint.startswith(self.domain)
return util.add_params(endpoint, params) | Combine query endpoint and params. |
def generate_requirements_files(self, base_dir='.'):
print("Creating requirements files\n")
# TODO How to deal with requirements that are not simple, e.g. a github url
shared = self._get_shared_section()
requirements_dir = self._make_requirements_directory(base_dir)
for section in self.config.sections():
if section == 'metadata':
continue
requirements = {}
for option in self.config.options(section):
requirements[option] = self.config.get(section, option)
if not requirements:
# No need to write out an empty file
continue
filename = os.path.join(requirements_dir, '%s.txt' % section)
self._write_requirements_file(shared, section, requirements, filename) | Generate set of requirements files for config |
def _write_default_sections(self):
self.config.add_section('metadata')
self.config.set('metadata', 'shared', 'common')
self.config.add_section('common')
self.config.add_section('development')
self.config.add_section('production') | Starting from scratch, so create a default rc file |
def _parse_requirements(self, input):
results = []
for line in input:
(package, version) = self._parse_line(line)
if package:
results.append((package, version))
return tuple(results) | Parse a list of requirements specifications.
Lines that look like "foobar==1.0" are parsed; all other lines are
silently ignored.
Returns a tuple of tuples, where each inner tuple is:
(package, version) |
def create_rc_file(self, packages):
print("Creating rcfile '%s'\n" % self.rc_filename)
# TODO bug with == in config file
if not self.config.sections():
self._write_default_sections()
sections = {}
section_text = []
for i, section in enumerate(self.config.sections()):
if section == 'metadata':
continue
sections[i] = section
section_text.append('%s. %s' % (i, section))
section_text = ' / '.join(section_text)
self._remap_stdin()
package_names = set()
lines = packages.readlines()
requirements = self._parse_requirements(lines)
for (package, version) in requirements:
package_names.add(package)
section, configured_version = self._get_option(package)
# Package already exists in configuration
if section:
# If there is a configured version, update it. If not, leave it unversioned.
if configured_version:
if configured_version != version:
print("Updating '%s' version from '%s' to '%s'"
% (package, configured_version, version))
self.config.set(section, package, version)
continue
section = self._get_section(package, sections, section_text)
self._set_option(section, package, version)
for section in self.config.sections():
if section == 'metadata':
continue
for option in self.config.options(section):
if option not in package_names:
print("Removing package '%s'" % option)
self.config.remove_option(section, option)
rc_file = open(self.rc_filename, 'w+')
self.config.write(rc_file)
rc_file.close() | Create a set of requirements files for config |
def upgrade_packages(self, packages):
print("Upgrading packages\n")
package_list = []
requirements = self._parse_requirements(packages.readlines())
for (package, version) in requirements:
package_list.append(package)
if package_list:
args = [
"pip",
"install",
"-U",
]
args.extend(package_list)
subprocess.check_call(args)
else:
print("No packages to upgrade") | Upgrade all specified packages to latest version |
def determine_extra_packages(self, packages):
args = [
"pip",
"freeze",
]
installed = subprocess.check_output(args, universal_newlines=True)
installed_list = set()
lines = installed.strip().split('\n')
for (package, version) in self._parse_requirements(lines):
installed_list.add(package)
package_list = set()
for (package, version) in self._parse_requirements(packages.readlines()):
package_list.add(package)
removal_list = installed_list - package_list
return tuple(removal_list) | Return all packages that are installed, but missing from "packages".
Return value is a tuple of the package names |
def remove_extra_packages(self, packages, dry_run=False):
removal_list = self.determine_extra_packages(packages)
if not removal_list:
print("No packages to be removed")
else:
if dry_run:
print("The following packages would be removed:\n %s\n" %
"\n ".join(removal_list))
else:
print("Removing packages\n")
args = [
"pip",
"uninstall",
"-y",
]
args.extend(list(removal_list))
subprocess.check_call(args) | Remove all packages missing from list |
def rewrap(self, **kwargs):
if self.inplace:
for key, val in kwargs.items():
setattr(self, key, val)
return self
else:
for key in ['obj', 'default', 'skipmissing', 'inplace', 'empty']:
kwargs.setdefault(key, getattr(self, key))
return pluckable(**kwargs) | Inplace constructor. Depending on `self.inplace`, rewrap `obj`, or
just update internal vars, possibly including the `obj`. |
def _filtered_list(self, selector):
res = []
for elem in self.obj:
self._append(elem, selector, res)
return res | Iterate over `self.obj` list, extracting `selector` from each
element. The `selector` can be a simple integer index, or any valid
key (hashable object). |
def _sliced_list(self, selector):
if self.skipmissing:
return self.obj[selector]
# TODO: can be optimized by observing list bounds
keys = xrange(selector.start or 0,
selector.stop or sys.maxint,
selector.step or 1)
res = []
for key in keys:
self._append(self.obj, key, res, skipmissing=False)
return res | For slice selectors operating on lists, we need to handle them
differently, depending on ``skipmissing``. In explicit mode, we may have
to expand the list with ``default`` values. |
def _extract_from_object(self, selector):
if isinstance(selector, slice):
# we must expand the slice manually, in order to be able to apply to
# for example, to mapping types, or general objects
# (e.g. slice `4::2` will filter all even numerical keys/attrs >=4)
start = selector.start or 0
step = selector.step or 1
if selector.stop is None:
if hasattr(self.obj, "keys"):
# filter keys by slice
keys = \
[k for k in self.obj.keys() if isinstance(k, baseinteger) \
and k >= start and (k - start) % step == 0]
elif hasattr(self.obj, "__len__"):
# object we slice should have a length (__len__ method),
keys = xrange(start, len(self.obj), step)
else:
# otherwise, we don't know how to slice, so just skip it,
# instead of failing
keys = []
else:
keys = xrange(start, selector.stop, step)
else:
keys = [selector]
res = []
for key in keys:
self._append(self.obj, key, res)
return res | Extracts all values from `self.obj` object addressed with a `selector`.
Selector can be a ``slice``, or a singular value extractor in form of a
valid dictionary key (hashable object).
Object (operated on) can be anything with an itemgetter or attrgetter,
including, but limited to `dict`, and `list`.
Itemgetter is preferred over attrgetter, except when called as `.key`.
If `selector` is a singular value extractor (like a string, integer,
etc), a single value (for a given key) is returned if key exists, an
empty list if not.
If `selector` is a ``slice``, each key from that range is extracted;
failing-back, again, to an empty list. |
def items(self):
if self.empty:
return iter([])
val = self.value
if hasattr(val, "iteritems"):
return val.iteritems()
elif hasattr(val, "items"):
return val.items()
else:
return iter(self) | Behave like `dict.items` for mapping types (iterator over (key, value)
pairs), and like `iter` for sequence types (iterator over values). |
def forceutc(t: Union[str, datetime.datetime, datetime.date, np.datetime64]) -> Union[datetime.datetime, datetime.date]:
# need to passthrough None for simpler external logic.
# %% polymorph to datetime
if isinstance(t, str):
t = parse(t)
elif isinstance(t, np.datetime64):
t = t.astype(datetime.datetime)
elif isinstance(t, datetime.datetime):
pass
elif isinstance(t, datetime.date):
return t
elif isinstance(t, (np.ndarray, list, tuple)):
return np.asarray([forceutc(T) for T in t])
else:
raise TypeError('datetime only input')
# %% enforce UTC on datetime
if t.tzinfo is None: # datetime-naive
t = t.replace(tzinfo=UTC)
else: # datetime-aware
t = t.astimezone(UTC) # changes timezone, preserving absolute time. E.g. noon EST = 5PM UTC
return t | Add UTC to datetime-naive and convert to UTC for datetime aware
input: python datetime (naive, utc, non-utc) or Numpy datetime64 #FIXME add Pandas and AstroPy time classes
output: utc datetime |
def on_assert_failed_print_details(actual, expected):
try:
yield
except AssertionError:
# diff = difflib.unified_diff(expected.splitlines(), actual.splitlines(),
# "expected", "actual")
diff = difflib.ndiff(expected.splitlines(), actual.splitlines())
diff_text = u"\n".join(diff)
print(u"DIFF (+ ACTUAL, - EXPECTED):\n{0}\n".format(diff_text))
if DEBUG:
print(u"expected:\n{0}\n".format(expected))
print(u"actual:\n{0}\n".format(actual))
raise | Print text details in case of assertation failed errors.
.. sourcecode:: python
with on_assert_failed_print_details(actual_text, expected_text):
assert actual == expected |
def on_error_print_details(actual, expected):
try:
yield
except Exception:
diff = difflib.ndiff(expected.splitlines(), actual.splitlines())
diff_text = u"\n".join(diff)
print(u"DIFF (+ ACTUAL, - EXPECTED):\n{0}\n".format(diff_text))
if DEBUG:
print(u"expected:\n{0}\n".format(expected))
print(u"actual:\n{0}".format(actual))
raise | Print text details in case of assertation failed errors.
.. sourcecode:: python
with on_error_print_details(actual_text, expected_text):
... # Do something |
def step_a_new_working_directory(context):
command_util.ensure_context_attribute_exists(context, "workdir", None)
command_util.ensure_workdir_exists(context)
shutil.rmtree(context.workdir, ignore_errors=True) | Creates a new, empty working directory |
def step_use_curdir_as_working_directory(context):
context.workdir = os.path.abspath(".")
command_util.ensure_workdir_exists(context) | Uses the current directory as working directory |
def step_a_file_named_filename_and_encoding_with(context, filename, encoding):
__encoding_is_valid = True
assert context.text is not None, "ENSURE: multiline text is provided."
assert not os.path.isabs(filename)
assert __encoding_is_valid
command_util.ensure_workdir_exists(context)
filename2 = os.path.join(context.workdir, filename)
pathutil.create_textfile_with_contents(filename2, context.text, encoding) | Creates a textual file with the content provided as docstring. |
def step_a_file_named_filename_with(context, filename):
step_a_file_named_filename_and_encoding_with(context, filename, "UTF-8")
# -- SPECIAL CASE: For usage with behave steps.
if filename.endswith(".feature"):
command_util.ensure_context_attribute_exists(context, "features", [])
context.features.append(filename) | Creates a textual file with the content provided as docstring. |
def step_an_empty_file_named_filename(context, filename):
assert not os.path.isabs(filename)
command_util.ensure_workdir_exists(context)
filename2 = os.path.join(context.workdir, filename)
pathutil.create_textfile_with_contents(filename2, "") | Creates an empty file. |
def step_i_run_command(context, command):
command_util.ensure_workdir_exists(context)
context.command_result = command_shell.run(command, cwd=context.workdir)
command_util.workdir_save_coverage_files(context.workdir)
if False and DEBUG:
print(u"run_command: {0}".format(command))
print(u"run_command.output {0}".format(context.command_result.output)) | Run a command as subprocess, collect its output and returncode. |
def step_it_should_pass_with(context):
'''
EXAMPLE:
...
when I run "behave ..."
then it should pass with:
'''
assert context.text is not None, "ENSURE: multiline text is provided."
step_command_output_should_contain(context)
assert_that(context.command_result.returncode, equal_to(0),
context.command_result.output) | EXAMPLE:
...
when I run "behave ..."
then it should pass with:
"""
TEXT
""" |
def step_it_should_fail_with(context):
'''
EXAMPLE:
...
when I run "behave ..."
then it should fail with:
'''
assert context.text is not None, "ENSURE: multiline text is provided."
step_command_output_should_contain(context)
assert_that(context.command_result.returncode, is_not(equal_to(0))) | EXAMPLE:
...
when I run "behave ..."
then it should fail with:
"""
TEXT
""" |
def step_command_output_should_contain_text(context, text):
'''
EXAMPLE:
...
Then the command output should contain "TEXT"
'''
expected_text = text
if "{__WORKDIR__}" in expected_text or "{__CWD__}" in expected_text:
expected_text = textutil.template_substitute(text,
__WORKDIR__ = posixpath_normpath(context.workdir),
__CWD__ = posixpath_normpath(os.getcwd())
)
actual_output = context.command_result.output
with on_assert_failed_print_details(actual_output, expected_text):
textutil.assert_normtext_should_contain(actual_output, expected_textf step_command_output_should_contain_text(context, text):
'''
EXAMPLE:
...
Then the command output should contain "TEXT"
'''
expected_text = text
if "{__WORKDIR__}" in expected_text or "{__CWD__}" in expected_text:
expected_text = textutil.template_substitute(text,
__WORKDIR__ = posixpath_normpath(context.workdir),
__CWD__ = posixpath_normpath(os.getcwd())
)
actual_output = context.command_result.output
with on_assert_failed_print_details(actual_output, expected_text):
textutil.assert_normtext_should_contain(actual_output, expected_text) | EXAMPLE:
...
Then the command output should contain "TEXT" |
def step_command_output_should_not_contain_text(context, text):
'''
EXAMPLE:
...
then the command output should not contain "TEXT"
'''
expected_text = text
if "{__WORKDIR__}" in text or "{__CWD__}" in text:
expected_text = textutil.template_substitute(text,
__WORKDIR__ = posixpath_normpath(context.workdir),
__CWD__ = posixpath_normpath(os.getcwd())
)
actual_output = context.command_result.output
with on_assert_failed_print_details(actual_output, expected_text):
textutil.assert_normtext_should_not_contain(actual_output, expected_textf step_command_output_should_not_contain_text(context, text):
'''
EXAMPLE:
...
then the command output should not contain "TEXT"
'''
expected_text = text
if "{__WORKDIR__}" in text or "{__CWD__}" in text:
expected_text = textutil.template_substitute(text,
__WORKDIR__ = posixpath_normpath(context.workdir),
__CWD__ = posixpath_normpath(os.getcwd())
)
actual_output = context.command_result.output
with on_assert_failed_print_details(actual_output, expected_text):
textutil.assert_normtext_should_not_contain(actual_output, expected_text) | EXAMPLE:
...
then the command output should not contain "TEXT" |
def step_command_output_should_contain_exactly_text(context, text):
expected_text = text
if "{__WORKDIR__}" in text or "{__CWD__}" in text:
expected_text = textutil.template_substitute(text,
__WORKDIR__ = posixpath_normpath(context.workdir),
__CWD__ = posixpath_normpath(os.getcwd())
)
actual_output = context.command_result.output
textutil.assert_text_should_contain_exactly(actual_output, expected_text) | Verifies that the command output of the last command contains the
expected text.
.. code-block:: gherkin
When I run "echo Hello"
Then the command output should contain "Hello" |
def compile(self, prog, features=Features.ALL):
return LPProg(Parser(Tokenizer(prog, features), features).program(), features) | Currently this compiler simply returns an interpreter instead of compiling
TODO: Write this compiler to increase LPProg run speed and to prevent exceeding maximum recursion depth
Args:
prog (str): A string containing the program.
features (FeatureSet): The set of features to enable during compilation.
Returns:
LPProg |
def cprint(self, cstr):
cstr = str(cstr) # Force it to be a string
cstr_len = len(cstr)
prev_cstr_len = len(self._prev_cstr)
num_spaces = 0
if cstr_len < prev_cstr_len:
num_spaces = abs(prev_cstr_len - cstr_len)
try:
print(cstr + " " * num_spaces, end='\r')
self._prev_cstr = cstr
except UnicodeEncodeError:
print('Processing...', end='\r')
self._prev_cstr = 'Processing...' | Clear line, then reprint on same line
:param cstr: string to print on current line |
def get_file_list(path, max_depth=1, cur_depth=0):
if os.path.exists(path):
for name in os.listdir(path):
if name.startswith('.'):
continue
full_path = os.path.join(path, name)
if os.path.isdir(full_path):
if cur_depth == max_depth:
continue
file_list = get_file_list(full_path, max_depth, cur_depth + 1)
for result in file_list:
yield result
else:
yield full_path | Recursively returns a list of all files up to ``max_depth``
in a directory. |
def get_applied_migrations(databases=None):
if not databases:
databases = get_capable_databases()
else:
# We only loop through databases that are listed as "capable"
all_databases = list(get_capable_databases())
databases = list(
itertools.ifilter(lambda x: x in all_databases, databases)
)
results = defaultdict(list)
for db in databases:
for x in Migration.objects.using(db).order_by("migration_label"):
results[db].append(x.migration_label)
return results | Returns a dictionary containing lists of all applied migrations
where the key is the database alias. |
def get_all_migrations(path, databases=None):
# database: [(number, full_path)]
possible_migrations = defaultdict(list)
try:
in_directory = sorted(get_file_list(path))
except OSError:
import traceback
print "An error occurred while reading migrations from %r:" % path
traceback.print_exc()
return {}
# Iterate through our results and discover which migrations are
# actually runnable
for full_path in in_directory:
child_path, script = os.path.split(full_path)
name, ext = os.path.splitext(script)
# the database component is default if this is in the root directory
# is <directory> if in a subdirectory
if path == child_path:
db = DEFAULT_DB_ALIAS
else:
db = os.path.split(child_path)[-1]
# filter by database if set
if databases and db not in databases:
continue
match = MIGRATION_NAME_RE.match(name)
if match is None:
raise MigrationError("Invalid migration file prefix %r "
"(must begin with a number)" % name)
number = int(match.group(1))
if ext in [".sql", ".py"]:
possible_migrations[db].append((number, full_path))
return possible_migrations | Returns a dictionary of database => [migrations] representing all
migrations contained in ``path``. |
def get_pending_migrations(path, databases=None, stop_at=None):
if stop_at is None:
stop_at = float("inf")
# database: [(number, full_path)]
possible_migrations = get_all_migrations(path, databases)
# database: [full_path]
applied_migrations = get_applied_migrations(databases)
# database: [full_path]
to_execute = defaultdict(list)
for database, scripts in possible_migrations.iteritems():
applied = applied_migrations[database]
pending = to_execute[database]
for number, migration in scripts:
path, script = os.path.split(migration)
if script not in applied and number <= stop_at:
pending.append(script)
return dict((k, v) for k, v in to_execute.iteritems() if v) | Returns a dictionary of database => [migrations] representing all pending
migrations. |
def updateFgiAnnotationFromFi(fgiContainer, fiContainer, largerBetter):
for fgi in listvalues(fgiContainer.container):
annotations = list()
for specfile, fiId in zip(fgi.specfiles, fgi.featureIds):
fi = fiContainer.getItem(specfile, fiId)
if not fi.isAnnotated:
continue
annotations.append([fi.score, fi.peptide, fi.sequence])
annotations.sort(reverse=largerBetter)
if len(annotations) > 0:
fgi.isAnnotated = True
fgi.score = annotations[0][0]
fgi.peptide = annotations[0][1]
fgi.sequence = annotations[0][2]
else:
fgi.isAnnotated = False | #TODO: docstring
:param fgiContainer:
:param fiContainer:
:param largerBetter: |
def continuityGrouping(values, limit):
lastValue = values[0]
lastPos = 0
groupStartPos = 0
groupPos = list()
for currPos, currValue in enumerate(values):
if currValue - lastValue > limit:
groupPos.append((groupStartPos, lastPos))
groupStartPos = currPos
lastPos = currPos
lastValue = currValue
groupPos.append((groupStartPos, lastPos))
return groupPos | #TODO docstring
:param values: ``numpy.array`` containg ``int`` or ``float``, must be sorted
:param limit: the maximal difference between two values, if this number is
exceeded a new group is generated
:returns: a list containing array start and end positions of continuous
groups |
def massTimeContinuityGroups(arrays, mKey, tKey, mLimit, tLimit):
arrayPositions = numpy.array(range(listvalues(arrays)[0].size))
finalGroupPositions = list()
for start, end in continuityGrouping(arrays[mKey], mLimit):
if start == end:
finalGroupPositions.append(arrayPositions[start:end+1])
continue
#Perform time continuity grouping on the mass continuity groups
preSelectionT = arrays[tKey][start:end+1]
preSelectionM = arrays[mKey][start:end+1]
preSelectionPositions = arrayPositions[start:end+1]
_sort = numpy.argsort(preSelectionT)
preGroups = continuityGrouping(preSelectionT[_sort], tLimit)
#Perform a second round of mass continuity grouping
finalGroupPrePos = list()
for _start, _end in preGroups:
preGroupPos = sorted(_sort[_start:_end+1])
secGroups = continuityGrouping(preSelectionM[preGroupPos], mLimit)
for fStart, fEnd in secGroups:
finalGroupPrePos.append(preGroupPos[fStart:fEnd+1])
#Add the final group positions
for _pos in finalGroupPrePos:
finalGroupPositions.append(preSelectionPositions[_pos])
return finalGroupPositions | #TODO docstring
:param arrays: a dictionary containing ``numpy.arrays``, must be sorted
according to the "mKey" (mass key) value.
:param mKey: "arrays" key that contains the mass ``numpy.array``
:param tKey: "arrays" key that contains the time ``numpy.array``
:param mLimit: maximal mass difference for separating continuity groups
:param tLimit: maximal time difference for separating continuity groups
:returns: a list containing array positions of continuous groups. |
def getContGroupArrays(arrays, groupPositions, arrayKeys=None):
if arrayKeys is None:
arrayKeys = list(viewkeys(arrays))
matchingArrays = dict()
for key in arrayKeys:
matchingArrays[key] = arrays[key][groupPositions]
return matchingArrays | Convinience function to generate a subset of arrays from specified array
positions.
:param arrays: a dictionary containing ``numpy.arrays``
:param groupPositions: arrays positions that should be included in the
subset of arrays
:param arrayKeys: a list of "arrays" keys that should be included in the
subset of arrays, if None all keys are selected
:returns: a dictionary containing ``numpy.arrays`` |
def calcDistMatchArr(matchArr, tKey, mKey):
#Calculate all sorted list of all eucledian feature distances
matchArrSize = listvalues(matchArr)[0].size
distInfo = {'posPairs': list(), 'eucDist': list()}
_matrix = numpy.swapaxes(numpy.array([matchArr[tKey], matchArr[mKey]]), 0, 1)
for pos1 in range(matchArrSize-1):
for pos2 in range(pos1+1, matchArrSize):
distInfo['posPairs'].append((pos1, pos2))
distInfo['posPairs'] = numpy.array(distInfo['posPairs'])
distInfo['eucDist'] = scipy.spatial.distance.pdist(_matrix)
distSort = numpy.argsort(distInfo['eucDist'])
for key in list(viewkeys(distInfo)):
distInfo[key] = distInfo[key][distSort]
return distInfo | Calculate the euclidean distance of all array positions in "matchArr".
:param matchArr: a dictionary of ``numpy.arrays`` containing at least two
entries that are treated as cartesian coordinates.
:param tKey: #TODO: docstring
:param mKey: #TODO: docstring
:returns: #TODO: docstring
{'eucDist': numpy.array([eucDistance, eucDistance, ...]),
'posPairs': numpy.array([[pos1, pos2], [pos1, pos2], ...])
} |
def proximityGrouping(matchArr, distInfo, distLimit, categoryKey):
#Group fi according to their proximity
matchArrSize = listvalues(matchArr)[0].size
linkageGroups = {p: [p] for p in range(matchArrSize)}
posToGroup = {p: p for p in range(matchArrSize)}
groupCategories = {p: set([s]) for p, s in zip(range(matchArrSize),
matchArr[categoryKey]
)
}
for (pos1, pos2), dist in zip(distInfo['posPairs'], distInfo['eucDist']):
if dist > distLimit:
break
id1 = posToGroup[pos1]
id2 = posToGroup[pos2]
if groupCategories[id1].intersection(groupCategories[id2]):
continue
linkageGroups[id1].extend(linkageGroups[id2])
groupCategories[id1].update(groupCategories[id2])
for _pos in linkageGroups[id2]:
posToGroup[_pos] = id1
del linkageGroups[id2]
del groupCategories[id2]
return linkageGroups | #TODO: docstring. Group according to the distance value provided by
``distInfo['eucDist']`` with the limitation that each ... category value
can occur only once per group.
:param matchArr: #TODO: docstring
:param distInfo: #TODO: docstring, must be sorted, provide keys "posPairs"
and "eucDist". As generated by :func:`calcDistMatchArr()`
:param distLimit: #TODO: docstring
:param categoryKey: #TODO: docstring
:returns: #TODO: docstring |
def fiGroupFromLinkageGroup(matchArr, arrPos, groupId, timeKey, massKey):
fgi = Fgi(groupId)
matchArr['isAnnotated'][arrPos]
minT = numpy.min(matchArr[timeKey][arrPos])
maxT = numpy.max(matchArr[timeKey][arrPos])
minM = numpy.min(matchArr[massKey][arrPos])
maxM = numpy.max(matchArr[massKey][arrPos])
fgi.isValid = True
fgi.specfiles = matchArr['specfile'][arrPos].tolist()
fgi.featureIds = matchArr['id'][arrPos].tolist()
fgi.isAnnotated = numpy.any(matchArr['isAnnotated'][arrPos])
fgi.coordinates = ((minT, maxT), (minM, maxM))
#fgi.clusterType = clusterType
return fgi | #TODO: docstring |
def generateFeatureGroups(fgiContainer, linkageGroups, matchArr, timeKey,
massKey, logMassKey, massScalingFactor):
#Generate feature groups from the linked features
newFgiIds = list()
for linkageGroup in viewvalues(linkageGroups):
fgiId = fgiContainer._getNextFgiId()
fgi = fiGroupFromLinkageGroup(matchArr, linkageGroup, fgiId,
timeKey, massKey
)
fgiContainer.container[fgiId] = fgi
fgi.metrics = clusterMetrics(matchArr[timeKey][linkageGroup],
matchArr[logMassKey][linkageGroup],
massScalingFactor=massScalingFactor
)
fgi.rt = fgi.metrics['meanTime']
fgi.mz = fgi.metrics['meanMass']
newFgiIds.append(fgiId)
return newFgiIds | #TODO: docstring
:param fgiContainer:
:param linkageGroups:
:returns: a list of ids of the newly generated :class:`Fgi` |
def clusterMetrics(timeValues, massValues, massScalingFactor=1):
metrics = dict()
metrics['meanTime'] = numpy.mean(timeValues)
metrics['meanMass'] = numpy.mean(massValues)
metrics['devTime'] = timeValues - metrics['meanTime']
metrics['devMass'] = massValues - metrics['meanMass']
#metrics['devMass'] = (1-metrics['meanMass']/massValues)
metrics['spreadTime'] = numpy.max(timeValues) - numpy.min(timeValues)
metrics['spreadMass'] = numpy.max(massValues) - numpy.min(massValues)
#metrics['spreadMass'] = (1-numpy.min(massValues) / numpy.max(massValues))
metrics['devEuc'] = numpy.sqrt(numpy.power(metrics['devTime'], 2) +
numpy.power(metrics['devMass']*massScalingFactor, 2)
)
metrics['meanEuc'] = numpy.mean(metrics['devEuc'])
metrics['devTime'] = metrics['devTime'].tolist()
metrics['devMass'] = metrics['devMass'].tolist()
metrics['devEuc'] = metrics['devEuc'].tolist()
return metrics | #TODO: docstring |
def getArrays(self, attr=None, sort=False, reverse=False,
selector=None, defaultValue=None, report='lfq'):
selector = (lambda fgi: fgi.isValid) if selector is None else selector
attr = attr if attr is not None else []
attr = set(['id', 'intensities'] + aux.toList(attr))
items = self.getItems(sort, reverse, selector)
arrays = _getArrays(items, attr, defaultValue)
for specfile in self._matrixTemplate:
arrays[specfile] = list()
for intensities in arrays['intensities']:
for specfile, intensitiy in zip(self._matrixTemplate, intensities):
arrays[specfile].append(intensitiy)
for specfile in self._matrixTemplate:
arrays[specfile] = numpy.array(arrays[specfile],
dtype=numpy.float64
)
del arrays['intensities']
return arrays | #TODO: docstring |
def getItems(self, sort=False, reverse=False, selector=None):
selector = (lambda fgi: fgi.isValid) if selector is None else selector
_container = {'_': self.container}
return _getItems(_container, '_', sort, reverse, selector) | #TODO: docstring |
def load(self, path, name):
filename = name + '.fgic'
filepath = aux.joinpath(path, filename)
with zipfile.ZipFile(filepath, 'r') as containerZip:
#Convert the zipfile data into a str object, necessary since
#containerZip.read() returns a bytes object.
jsonString = io.TextIOWrapper(containerZip.open('data'),
encoding='utf-8'
).read()
infoString = io.TextIOWrapper(containerZip.open('info'),
encoding='utf-8'
).read()
self.container = json.loads(jsonString, object_hook=Fgi.jsonHook)
self.info.update(json.loads(infoString))
self._matrixTemplate = self.info['_matrixTemplate']
del self.info['_matrixTemplate'] | Imports the specified ``fgic`` file from the hard disk.
:param path: filedirectory to which the ``fgic`` file is written.
:param name: filename, without file extension |
def updateIntensities(self, fiContainer, iKey='intensity'):
for fgi in listvalues(self.container):
intensities = list()
specfileIds = {i: j for i, j in zip(fgi.specfiles, fgi.featureIds)}
for specfile in self._matrixTemplate:
if specfile in specfileIds:
fi = fiContainer.getItem(specfile, specfileIds[specfile])
intensities.append(getattr(fi, iKey))
else:
intensities.append(None)
fgi.intensities = intensities | #TODO: docstring
:param fiContainer:
:param iKey: Attribute name of :class:`Fi` that contains the feature
intensity or an abundance measure. Default "intensity" |
def command(argv, scope):
if inspect.ismodule(scope):
scope = vars(scope)
for cmd in scope.values():
if not isinstance(cmd, climethod):
continue
if cmd.__name__ in argv:
return cmd
return None | Looks up a particular command from the inputted arguments for the given \
scope.
:param argv | [<str>, ..]
scope | <dict>
:return <climethod> || None |
def commands(scope):
if inspect.ismodule(scope):
scope = vars(scope)
return [cmd for cmd in scope.values() if isinstance(cmd, climethod)] | Looks up all climethod instances from the inputted scope.
:return [<climethod>, ..] |
def generate(module):
inter = Interface(PROGRAM_NAME)
inter.register(module, True)
return inter | Generates a new interface from the inputted module.
:param module | <module>
:return <Interface> |
def parser(scope, usage=''):
subcmds = []
for cmd in commands(scope):
subcmds.append(cmd.usage())
if subcmds:
subcmds.sort()
usage += '\n\nSub-Commands:\n '
usage += '\n '.join(subcmds)
parse = PARSER_CLASS(usage=usage)
parse.prog = PROGRAM_NAME
return parse | Generates a default parser for the inputted scope.
:param scope | <dict> || <module>
usage | <str>
callable | <str>
:return <OptionParser> |
def process(argv, scope, interface=None):
cmd = command(argv, scope)
if cmd:
sys.exit(cmd.run(argv))
name = PROGRAM_NAME
if interface:
name = interface.name()
_parser = parser(scope, '{0} [options] [<subcommand>] [<arg>]'.format(name))
options, args = _parser.parse_args(argv)
return options.__dict__, args | Processes any commands within the scope that matches the inputted arguments.
If a subcommand is found, then it is run, and the system exists with the
return value from the command.
:param argv | [<str>, ..]
scope | <dict>
:return (<dict> options, <tuple> arguments) |
def usage(self):
arg_list = ' '.join(self.cmd_args).upper()
name = self.interface.name()
return '%s [options] %s %s' % (name, self.__name__, arg_list) | Returns the usage string for this method.
:return <str> |
def parser(self):
usage = self.usage()
if self.__doc__:
usage += '\n' + nstr(self.__doc__)
parse = PARSER_CLASS(usage=usage)
shorts = {v: k for k, v in self.short_keys.items()}
for key, default in self.cmd_opts.items():
# default key, cannot be duplicated
if key == 'help':
continue
try:
short = '-' + shorts[key]
except KeyError:
short = ''
if default is True:
action = 'store_false'
elif default is False:
action = 'store_true'
else:
action = 'store'
# add the option
parse.add_option(short, '--%s' % key, action=action, default=default)
return parse | Creates a parser for the method based on the documentation.
:return <OptionParser> |
def run(self, argv):
(opts, args) = self.parser().parse_args(argv)
func_args = args[args.index(self.__name__) + 1:]
func_kwds = opts.__dict__
return self.__call__(*func_args, **func_kwds) | Parses the inputted options and executes the method.
:param argv | [<str>, ..] |
def register(self, obj, autogenerate=False):
scope = self._scope
# register a module
if type(obj).__name__ == 'module':
for key, value in vars(obj).items():
# register a climethod
if isinstance(value, climethod):
value.interface = self
scope[key] = value
# register a function
elif inspect.isfunction(value) and autogenerate:
meth = climethod(value)
meth.interface = self
scope[key] = meth
# register a climethod
elif isinstance(obj, climethod):
obj.interface = self
scope[obj.__name__] = obj
# register a function
elif inspect.isfunction(obj) and autogenerate:
meth = climethod(obj)
meth.interface = self
scope[meth.__name__] = meth | Registers the inputted object to this scope.
:param obj | <module> || <function> || <climethod> |
def clone(cls, srcpath, destpath, encoding='utf-8'):
cmd = [GIT, 'clone', '--quiet', '--bare', srcpath, destpath]
subprocess.check_call(cmd)
return cls(destpath, encoding) | Clone an existing repository to a new bare repository. |
def create(cls, path, encoding='utf-8'):
cmd = [GIT, 'init', '--quiet', '--bare', path]
subprocess.check_call(cmd)
return cls(path, encoding) | Create a new bare repository |
def get_as_dict(self):
u
def convert(val):
if isinstance(val, tuple):
return tuple(convert(v) for v in val)
elif isinstance(val, list):
return [convert(v) for v in val]
elif isinstance(val, (dict, ElasticDict)):
return {k: convert(v) for k, v in val.iteritems()}
else:
return val
return convert(self.__dict__) | u"""
Exports self as ordinary dict(), replacing recursively all instances of ElasticDict() to dict()
:rtype: dict() |
def create_from(value):
u
def convert(val):
if isinstance(val, tuple):
return tuple(convert(v) for v in val)
elif isinstance(val, list):
return [convert(v) for v in val]
elif isinstance(val, (dict, ElasticDict)):
return ElasticDict({k: convert(v) for k, v in val.iteritems()})
else:
return val
return convert(value) | u"""
Create an instance of ElasticDict() where all nested dict()'s are replaced to ElasticDict()
:rtype: ElasticDict (if value is dict()), else type(value) |
def cache(self, dependency: Dependency, value):
if dependency.threadlocal:
setattr(self._local, dependency.name, value)
elif dependency.singleton:
self._singleton[dependency.name] = value | Store an instance of dependency in the cache.
Does nothing if dependency is NOT a threadlocal
or a singleton.
:param dependency: The ``Dependency`` to cache
:param value: The value to cache for dependency
:type dependency: Dependency |
def cached(self, dependency):
if dependency.threadlocal:
return getattr(self._local, dependency.name, None)
elif dependency.singleton:
return self._singleton.get(dependency.name) | Get a cached instance of dependency.
:param dependency: The ``Dependency`` to retrievie value for
:type dependency: ``Dependency``
:return: The cached value |
def _set(self, name, factory, singleton=False, threadlocal=False):
name = name or factory.__name__
factory._giveme_registered_name = name
dep = Dependency(name, factory, singleton, threadlocal)
self._registry[name] = dep | Add a dependency factory to the registry
:param name: Name of dependency
:param factory: function/callable that returns dependency
:param singleton: When True, makes the dependency a singleton.
Factory will only be called on first use, subsequent
uses receive a cached value.
:param threadlocal: When True, register dependency as a threadlocal singleton,
Same functionality as ``singleton`` except :class:`Threading.local` is used
to cache return values. |
def get(self, name: str):
dep = None
try:
dep = self._registry[name]
except KeyError:
raise DependencyNotFoundError(name) from None
value = self.cached(dep)
if value is None:
value = dep.factory()
self.cache(dep, value)
return value | Get an instance of dependency,
this can be either a cached instance
or a new one (in which case the factory is called) |
def register(self, function=None, *, singleton=False, threadlocal=False, name=None):
def decorator(function=None):
self._set(name, function, singleton, threadlocal)
return function
if function:
return decorator(function)
return decorator | Add an object to the injector's registry.
Can be used as a decorator like so:
>>> @injector.register
... def my_dependency(): ...
or a plain function call by passing in a callable
injector.register(my_dependency)
:param function: The function or callable to add to the registry
:param name: Set the name of the dependency. Defaults to the name of `function`
:param singleton: When True, register dependency as a singleton, this
means that `function` is called on first use and its
return value cached for subsequent uses. Defaults to False
:param threadlocal: When True, register dependency as a threadlocal singleton,
Same functionality as ``singleton`` except :class:`Threading.local` is used
to cache return values.
:type function: callable
:type singleton: bool
:type threadlocal: bool
:type name: string |
def inject(self, function=None, **names):
def decorator(function):
@wraps(function)
def wrapper(*args, **kwargs):
sig = signature(function)
params = sig.parameters
bound = sig.bind_partial(*args, **kwargs)
bound.apply_defaults()
injected_kwargs = {}
for key, value in params.items():
if key not in bound.arguments:
name = names.get(key)
if name:
# Raise error when dep named explicitly
# and missing
injected_kwargs[key] = self.get(name)
else:
try:
injected_kwargs[key] = self.get(key)
except DependencyNotFoundError as e:
warnings.warn(
ambigious_not_found_msg.format(key),
DependencyNotFoundWarning
)
injected_kwargs.update(bound.kwargs)
return function(*bound.args, **injected_kwargs)
return wrapper
if function:
return decorator(function)
return decorator | Inject dependencies into `funtion`'s arguments when called.
>>> @injector.inject
... def use_dependency(dependency_name):
...
>>> use_dependency()
The `Injector` will look for registered dependencies
matching named arguments and automatically pass
them to the given function when it's called.
:param function: The function to inject into
:type function: callable
:param \**names: in the form of ``argument='name'`` to override
the default behavior which matches dependency names with argument
names. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.