code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def add_reference(self, reftype: str, label: str, target): # The self.data[reftype] dict springs into being during the # register_references event handler at startup, which looks in the # kb registry for all registered reference names. self.data[reftype][label] = target
Add reference object in references under rtype/label=target
def resource_references(self, resource) -> Mapping[str, List[Any]]: references = dict() for reference_label in resource.props.references: references[reference_label] = [] # Iterate over each value on this field, e.g. # tags: tag1, tag2, tag3 for target_label in resource.props.references.get(reference_label): # Ask the site to get the object target = self.get_reference(reference_label, target_label) references[reference_label].append(target) return references
Resolve and return reference resources pointed to by object Fields in resource.props can flag that they are references by using the references type. This method scans the model, finds any fields that are references, and returns the reference resources pointed to by those references. Note that we shouldn't get to the point of dangling references. Our custom Sphinx event should raise a references error during the build process (though maybe it is just a warning?)
def start(self, retry_limit=None): # Run tweepy stream wrapper_listener = TweepyWrapperListener(listener=self.listener) stream = tweepy.Stream(auth=self.client.tweepy_api.auth, listener=wrapper_listener) retry_counter = 0 while retry_limit is None or retry_counter <= retry_limit: try: retry_counter += 1 if not self.client.config.get('user_stream'): logging.info('Listening to public stream') stream.filter(follow=self.filter.follow, track=self.filter.track) else: if self.filter.follow: logging.warning('Follow filters won\'t be used in user stream') logging.info('Listening to user stream') stream.userstream(track=self.filter.track) except AttributeError as e: # Known Tweepy's issue https://github.com/tweepy/tweepy/issues/576 if "'NoneType' object has no attribute 'strip'" in str(e): pass else: raise
Try to connect to Twitter's streaming API. :param retry_limit: The maximum number of retries in case of failures. Default is None (unlimited) :raises :class:`~tweepy.error.TweepyError`: If there's some critical API error
def _findProteinClusters(protToPeps, pepToProts): clusters = list() resolvingProteins = set(protToPeps) while resolvingProteins: protein = resolvingProteins.pop() proteinCluster = set([protein]) peptides = set(protToPeps[protein]) parsedPeptides = set() while len(peptides) != len(parsedPeptides): for peptide in peptides: proteinCluster.update(pepToProts[peptide]) parsedPeptides.update(peptides) for protein in proteinCluster: peptides.update(protToPeps[protein]) clusters.append(proteinCluster) resolvingProteins = resolvingProteins.difference(proteinCluster) return clusters
Find protein clusters in the specified protein to peptide mappings. A protein cluster is a group of proteins that are somehow directly or indirectly connected by shared peptides. :param protToPeps: dict, for each protein (=key) contains a set of associated peptides (=value). For Example {protein: {peptide, ...}, ...} :param pepToProts: dict, for each peptide (=key) contains a set of parent proteins (=value). For Example {peptide: {protein, ...}, ...} :returns: a list of protein clusters, each cluster is a set of proteins
def _findSamesetProteins(protToPeps, proteins=None): proteins = viewkeys(protToPeps) if proteins is None else proteins equalEvidence = ddict(set) for protein in proteins: peptides = protToPeps[protein] equalEvidence[tuple(sorted(peptides))].add(protein) equalProteins = list() for proteins in viewvalues(equalEvidence): if len(proteins) > 1: equalProteins.append(tuple(sorted(proteins))) return equalProteins
Find proteins that are mapped to an identical set of peptides. :param protToPeps: dict, for each protein (=key) contains a set of associated peptides (=value). For Example {protein: {peptide, ...}, ...} :param proteins: iterable, proteins that are tested for having equal evidence. If not specified all proteins are tested :returns: a list of sorted protein tuples that share equal peptide evidence
def _findSubsetProteins(proteins, protToPeps, pepToProts): proteinsEqual = lambda prot1, prot2: protToPeps[prot1] == protToPeps[prot2] subGroups = list() for protein in proteins: peptideCounts = Counter() for peptide in protToPeps[protein]: proteins = pepToProts[peptide] peptideCounts.update(proteins) peptideCount = peptideCounts.pop(protein) superGroups = set() for sharingProtein, sharedPeptides in peptideCounts.most_common(): if peptideCount == sharedPeptides: if not proteinsEqual(protein, sharingProtein): superGroups.add(sharingProtein) else: break if superGroups: subGroups.append((protein, superGroups)) return subGroups
Find proteins which peptides are a sub-set, but not a same-set to other proteins. :param proteins: iterable, proteins that are tested for being a subset :param pepToProts: dict, for each peptide (=key) contains a set of parent proteins (=value). For Example {peptide: {protein, ...}, ...} :param protToPeps: dict, for each protein (=key) contains a set of associated peptides (=value). For Example {protein: {peptide, ...}, ...} :returns: a list of pairs of protein and their superset proteins. [(protein, {superset protein, ...}), ...]
def _mergeProteinEntries(proteinLists, protToPeps): mergedProtToPeps = dict(protToPeps) for proteins in proteinLists: for protein in proteins: peptides = mergedProtToPeps.pop(protein) mergedProtein = tuple(sorted(proteins)) mergedProtToPeps[mergedProtein] = peptides return mergedProtToPeps
Returns a new "protToPeps" dictionary with entries merged that are present in proteinLists. NOTE: The key of the merged entry is a tuple of the sorted protein keys. This behaviour might change in the future; the tuple might be replaced by simply one of the protein entries which is then representative for all. :param proteinLists: a list of protein groups that will be merged [{protein, ...}, ...] :param protToPeps: dict, for each protein (=key) contains a set of associated peptides (=value). For Example {protein: {peptide, ...}, ...} :returns: dict, {protein: set([peptid, ...])}
def _reducedProtToPeps(protToPeps, proteins): return {k: v for k, v in viewitems(protToPeps) if k not in proteins}
Returns a new, reduced "protToPeps" dictionary that does not contain entries present in "proteins". :param protToPeps: dict, for each protein (=key) contains a set of associated peptides (=value). For Example {protein: {peptide, ...}, ...} :param proteins: a list of proteinSet :returns: dict, protToPeps not containing entries from "proteins"
def _findUniqueMappingValues(mapping): uniqueMappingValues = set() for entries in viewvalues(mapping): if len(entries) == 1: uniqueMappingValues.update(entries) return uniqueMappingValues
Find mapping entries that are unique for one key (value length of 1). .. Note: This function can be used to find unique proteins by providing a peptide to protein mapping. :param mapping: dict, for each key contains a set of entries :returns: a set of unique mapping values
def _findUniqueMappingKeys(mapping): uniqueMappingKeys = set() for key, entries in viewitems(mapping): if len(entries) == 1: uniqueMappingKeys.add(key) return uniqueMappingKeys
Find mapping keys that only have one entry (value length of 1. :param mapping: dict, for each key contains a set of entries :returns: a set of unique mapping keys
def _invertMapping(mapping): invertedMapping = ddict(set) for key, values in viewitems(mapping): for value in values: invertedMapping[value].add(key) return invertedMapping
Converts a protein to peptide or peptide to protein mapping. :param mapping: dict, for each key contains a set of entries :returns: an inverted mapping that each entry of the values points to a set of initial keys.
def _getValueCounts(mapping): return Counter({k: len(v) for k, v in viewitems(mapping)})
Returns a counter object; contains for each key of the mapping the counts of the respective value element (= set length). :param mapping: dict, for each key contains a set of entries. :returns: a counter
def _mappingGetValueSet(mapping, keys): setUnion = set() for k in keys: setUnion = setUnion.union(mapping[k]) return setUnion
Return a combined set of values from the mapping. :param mapping: dict, for each key contains a set of entries returns a set of combined entries
def _flattenMergedProteins(proteins): proteinSet = set() for protein in proteins: if isinstance(protein, tuple): proteinSet.update(protein) else: proteinSet.add(protein) return proteinSet
Return a set where merged protein entries in proteins are flattened. :param proteins: an iterable of proteins, can contain merged protein entries in the form of tuple([protein1, protein2]). returns a set of protein entries, where all entries are strings
def getGroups(self, proteinId): return [self.groups[gId] for gId in self._proteinToGroupIds[proteinId]]
Return a list of protein groups a protein is associated with.
def addProteinGroup(self, groupRepresentative): groupId = self._getNextGroupId() self.groups[groupId] = ProteinGroup(groupId, groupRepresentative) self.addLeadingToGroups(groupRepresentative, groupId) return groupId
Adds a new protein group and returns the groupId. The groupId is defined using an internal counter, which is incremented every time a protein group is added. The groupRepresentative is added as a leading protein. :param groupRepresentative: the protein group representing protein :returns: the protein groups groupId
def addLeadingToGroups(self, proteinIds, groupIds): for groupId in AUX.toList(groupIds): self.groups[groupId].addLeadingProteins(proteinIds) self._addProteinIdsToGroupMapping(proteinIds, groupId)
Add one or multiple leading proteins to one or multiple protein groups. :param proteinIds: a proteinId or a list of proteinIds, a proteinId must be a string. :param groupIds: a groupId or a list of groupIds, a groupId must be a string.
def addSubsetToGroups(self, proteinIds, groupIds): for groupId in AUX.toList(groupIds): self.groups[groupId].addSubsetProteins(proteinIds) self._addProteinIdsToGroupMapping(proteinIds, groupId)
Add one or multiple subset proteins to one or multiple protein groups. :param proteinIds: a proteinId or a list of proteinIds, a proteinId must be a string. :param groupIds: a groupId or a list of groupIds, a groupId must be a string.
def addSubsumableToGroups(self, proteinIds, groupIds): for groupId in AUX.toList(groupIds): self.groups[groupId].addSubsumableProteins(proteinIds) self._addProteinIdsToGroupMapping(proteinIds, groupId)
Add one or multiple subsumable proteins to one or multiple protein groups. :param proteinIds: a proteinId or a list of proteinIds, a proteinId must be a string. :param groupIds: a groupId or a list of groupIds, a groupId must be a string.
def _addProteinIdsToGroupMapping(self, proteinIds, groupId): for proteinId in AUX.toList(proteinIds): self._proteinToGroupIds[proteinId].add(groupId)
Add a groupId to one or multiple entries of the internal proteinToGroupId mapping. :param proteinIds: a proteinId or a list of proteinIds, a proteinId must be a string. :param groupId: str, a groupId
def _addProteins(self, proteinIds, containerNames): proteinIds = AUX.toList(proteinIds) for containerName in containerNames: proteinContainer = getattr(self, containerName) proteinContainer.update(proteinIds)
Add one or multiple proteinIds to the respective container. :param proteinIds: a proteinId or a list of proteinIds, a proteinId must be a string. :param containerNames: list, entries must be one or multiple of 'leading', 'subset', 'subsumableProteins' or 'proteins' :param addToProteins: bool, if True the proteinIds are added to the
def satisfies(self, other): if other.isnocare: return True if self.isnocare: return False if self.arbitrary: return True if self.constant and not other.arbitrary: return True if self.value is other.value and not other.arbitrary\ and not other.constant: return True return False
Check if the capabilities of a primitive are enough to satisfy a requirement. Should be called on a Requirement that is acting as a capability of a primitive. This method returning true means that the capability advertised here is enough to handle representing the data described by the Requirement passed in as 'other'. Here is a chart showing what satisfies what. other A C 0 1 |Y N N N N s A|Y Y Y Y Y e C|Y - Y Y Y l 0|Y * * Y N f 1|Y * * N Y ' ' = No Care A = arbitrary C = Constant 0 = ZERO 1 = ONE Y = YES N = NO - = Could satisfy with multiple instances * = Not yet determined behavior. Used for bitbanging controllers.
def _list(self, foldername="INBOX", reverse=False, since=None): folder = self.folder \ if foldername == "INBOX" \ else self._getfolder(foldername) def sortcmp(d): try: return d[1].date except: return -1 lst = folder.items() if not since else folder.items_since(since) sorted_lst = sorted(lst, key=sortcmp, reverse=1 if reverse else 0) itemlist = [(folder, key, msg) for key,msg in sorted_lst] return itemlist
Do structured list output. Sorts the list by date, possibly reversed, filtered from 'since'. The returned list is: foldername, message key, message object
def ls(self, foldername="INBOX", reverse=False, since=None, grep=None, field=None, stream=sys.stdout): if foldername == "": foldername = "INBOX" msg_list = self._list(foldername, reverse, since) for folder, mk, m in msg_list: try: # I am very unsure about this defaulting of foldername output_items = ( "%s%s%s" % (folder.folder or foldername or "INBOX", SEPERATOR, mk), m.date, m.get_from()[0:50] if m.get_from() else "", m.get_flags(), re.sub("\n", "", m.get_subject() or "") ) output_string = "% -20s % 20s % 50s [%s] %s" % output_items if not grep or (grep and grep in output_string): if field: print(output_items[int(field)], file=stream) else: print(output_string, file=stream) except IOError as e: if e.errno == errno.EPIPE: # Broken pipe we can ignore return self.logger.exception("whoops!") except Exception as e: self.logger.exception("whoops!")
Do standard text list of the folder to the stream. 'foldername' is the folder to list.. INBOX by default. 'since' allows the listing to be date filtered since that date. It should be a float, a time since epoch. 'grep' allows text matching on the whole record 'field' allows only 1 field to be output
def lisp(self, foldername="INBOX", reverse=False, since=None, stream=sys.stdout): def fromval(hdr): if hdr: return parseaddr(hdr) for folder, mk, m in self._list(foldername, reverse, since): try: print(json.dumps({ 'folder': folder.folder or foldername or "INBOX", 'key': "%s%s%s" % (folder.folder or foldername or "INBOX", SEPERATOR, mk), 'date': str(m.date), "flags": m.get_flags(), 'from': fromval(m.get_from()), 'subject': re.sub("\n|\'|\"", _escape, m.get_subject() or "") }), file=stream) except IOError as e: if e.errno == errno.EPIPE: # Broken pipe we can ignore return self.logger.exception("whoops!") except Exception as e: self.logger.exception("whoops!")
Do JSON list of the folder to the stream. 'since' allows the listing to be date filtered since that date. It should be a float, a time since epoch.
def lsfolders(self, stream=sys.stdout): for f in self.folder.folders(): print(f.folder.strip("."), file=stream)
List the subfolders
def _get(self, msgid): foldername, msgkey = msgid.split(SEPERATOR) folder = self.folder if foldername == "INBOX" else self._getfolder(foldername) # Now look up the message msg = folder[msgkey] msg.is_seen = True hdr = list(msg.items()) for p in msg.walk(): yield hdr,p return
Yields the message header against each part from the message.
def gettext(self, msgid, stream=sys.stdout, splitter="--text follows this line--\n"): for hdr,part in self._get(msgid): if part.get_content_type() == "text/plain": for name,val in hdr: # Use the subtype, since we're printing just that - tidy it up first if name.lower() == "content-type": val = part["content-type"] val = " ".join([l.strip() for l in val.split("\n")]) print("%s: %s" % (name,val), file=stream) print(splitter, file=stream) payload = part.get_payload(decode=True) # There seems to be a problem with the parser not doing charsets for parts chartype = part.get_charset() \ or _get_charset(part.get("Content-Type", "")) \ or "us-ascii" print(payload.decode(chartype), file=stream) break
Get the first text part we can find and print it as a message. This is a simple cowpath, most of the time you want the first plain part. 'msgid' is the message to be used 'stream' is printed to with the header, splitter, first-textpart 'splitter' is text used to split the header from the body, Emacs uses this
def getrawpart(self, msgid, stream=sys.stdout): for hdr, part in self._get(msgid): pl = part.get_payload(decode=True) if pl != None: print(pl, file=stream) break
Get the first part from the message and print it raw.
def getrawpartid(self, msgid, partid, stream=sys.stdout): parts = [part for hdr,part in self._get(msgid)] part = parts[int(partid)] pl = part.get_payload(decode=True) if pl != None: print(pl, file=stream)
Get a specific part from the message and print it raw.
def getraw(self, msgid, stream=sys.stdout): foldername, msgkey = msgid.split(SEPERATOR) folder = self.folder if foldername == "INBOX" else self._getfolder(foldername) msg = folder[msgkey] print(msg.content)
Get the whole message and print it.
def getstruct(self, msgid, as_json=False, stream=sys.stdout): parts = [part.get_content_type() for hdr, part in self._get(msgid)] if as_json: print(json.dumps(parts), file=stream) else: for c in parts: print(c, file=stream)
Get and print the whole message. as_json indicates whether to print the part list as JSON or not.
def _extract_alphabet(self, grammar): alphabet = set([]) for terminal in grammar.Terminals: alphabet |= set([x for x in terminal]) self.alphabet = list(alphabet)
Extract an alphabet from the given grammar.
def _mpda(self, re_grammar, splitstring=0): cnfgrammar = CNFGenerator(re_grammar) if not self.alphabet: self._extract_alphabet(cnfgrammar) cnftopda = CnfPda(self.alphabet) productions = {} nonterminals = [] nonterminals.append(cnfgrammar.init_symbol) for key in list(cnfgrammar.grammar_nonterminals): if key != cnfgrammar.init_symbol: nonterminals.append(key) for key in list(cnfgrammar.grammar_nonterminals): j = 0 productions[key] = {} # print 'testing '+key for pair in cnfgrammar.grammar_rules: cnf_form = list(pair) if cnf_form[0] == key: productions[key][j] = {} if isinstance(cnf_form[1], type(())): # print list(p[1]) productions[key][j]['b0'] = list(cnf_form[1])[0] productions[key][j]['b1'] = list(cnf_form[1])[1] else: # print p[1] productions[key][j]['a'] = cnf_form[1] j = j + 1 return cnftopda.initialize( nonterminals, productions, list( cnfgrammar.grammar_terminals), splitstring)
Args: re_grammar (list): A list of grammar rules splitstring (bool): A boolean for enabling or disabling the splitting of symbols using a space Returns: PDA: The generated PDA
def yyparse(self, cfgfile, splitstring=0): re_grammar = self._read_file(cfgfile) mma = self._mpda(re_grammar, splitstring) return mma
Args: cfgfile (str): The path for the file containing the CFG rules splitstring (bool): A boolean for enabling or disabling the splitting of symbols using a space Returns: PDA: The generated PDA
def natural(a, b): stra = nstr(a).lower() strb = nstr(b).lower() # test to see if the two are identical if stra == strb: return 0 # look up all the pairs of items aresults = EXPR_NATURAL.findall(stra) bresults = EXPR_NATURAL.findall(strb) # make sure we have the same number of results bcount = len(bresults) for i in range(len(aresults)): # make sure we don't exceed the number of elements in b if bcount <= i: break atext, anum = aresults[i] btext, bnum = bresults[i] # compare the text components if atext != btext: return cmp(atext, btext) if not anum: anum = 0 if not bnum: bnum = 0 # compare the numeric components anum = int(anum) bnum = int(bnum) if anum != bnum: return cmp(anum, bnum) # b has less characters than a, so should sort before return 1
Sorts the inputted items by their natural order, trying to extract a \ number from them to sort by. :param a <str> b <str> :return <int> 1 || 0 || -1 :usage |>>> from projex import sorting |>>> a = [ 'test1', 'test2', 'test10', 'test20', 'test09' ] |>>> a.sort() |>>> print a |['test09', 'test1', 'test10', 'test2', 'test20'] |>>> a.sort( sorting.natural ) |>>> print a |['test1', 'test2', 'test09', 'test10', 'test20']
def action(self, column=None, value=None, **kwargs): return self._resolve_call('GIC_ACTION', column, value, **kwargs)
The underlying GICS table provides codes and descriptions identifying the current status or disposition of a grant project. >>> GICS().action('action_code', 'A')
def applicant(self, column=None, value=None, **kwargs): return self._resolve_call('GIC_APPLICANT', column, value, **kwargs)
Find the applicant information for a grant. >>> GICS().applicant('zip_code', 94105)
def assistance(self, column=None, value=None, **kwargs): return self._resolve_call('GIC_ASST_PGM', column, value, **kwargs)
Provides the Catalog of Federal Domestic Assistance (CFDA) codes and names.
def authority(self, column=None, value=None, **kwargs): return self._resolve_call('GIC_AUTHORITY', column, value, **kwargs)
Provides codes and associated authorizing statutes.
def construction(self, column=None, value=None, **kwargs): return self._resolve_call('GIC_CONSTRUCTION', column, value, **kwargs)
Identifies monetary, descriptive, and milestone information for Wastewater Treatment construction grants. >>> GICS().construction('complete_percent', 91)
def eligible_cost(self, column=None, value=None, **kwargs): return self._resolve_call('GIC_ELIGIBLE_COST', column, value, **kwargs)
The assistance dollar amounts by eligible cost category. >>> GICS().eligible_cost('amount', 100000)
def grant(self, column=None, value=None, **kwargs): return self._resolve_call('GIC_GRANT', column, value, **kwargs)
Provides various award, project, and grant personnel information. >>> GICS().grant('project_city_name', 'San Francisco')
def grant_assistance(self, column=None, value=None, **kwargs): return self._resolve_call('GIC_GRANT_ASST_PGM', column, value, **kwargs)
Many-to-many table connecting grants and assistance.
def grant_authority(self, column=None, value=None, **kwargs): return self._resolve_call('GIC_GRANT_AUTH', column, value, **kwargs)
Many-to-many table connecting grants and authority.
def lab_office(self, column=None, value=None, **kwargs): return self._resolve_call('GIC_LAB_OFFICE', column, value, **kwargs)
Abbreviations, names, and locations of labratories and offices.
def milestone(self, column=None, value=None, **kwargs): return self._resolve_call('GIC_MILESTONE', column, value, **kwargs)
Status codes and related dates of certain grants, >>> GICS().milestone('milestone_date', '16-MAR-01')
def record_type(self, column=None, value=None, **kwargs): return self._resolve_call('GIC_RECORD_TYPE', column, value, **kwargs)
Codes and descriptions indicating whether an award is for a new project or for the continuation of a currently funded one. >>> GICS().record_type('record_type_code', 'A')
def srf_cap(self, column=None, value=None, **kwargs): return self._resolve_call('GIC_SRF_CAP', column, value, **kwargs)
Fiscal dollar amounts for State Revolving Fund Capitalization Grants. >>> GICS().srf_cap('grant_number', '340001900')
def status(self, column=None, value=None, **kwargs): return self._resolve_call('GIC_STATUS', column, value, **kwargs)
Provides codes and descriptions of project milestones. >>> GICS().status('status_code', 'AF')
def recClearTag(element): children = element.getchildren() if len(children) > 0: for child in children: recClearTag(child) element.tag = clearTag(element.tag)
Applies maspy.xml.clearTag() to the tag attribute of the "element" and recursively to all child elements. :param element: an :instance:`xml.etree.Element`
def recRemoveTreeFormating(element): children = element.getchildren() if len(children) > 0: for child in children: recRemoveTreeFormating(child) if element.text is not None: if len(element.text.strip()) == 0: element.text = None else: element.text = element.text.strip() if element.tail is not None: if len(element.tail.strip()) == 0: element.tail = None else: element.tail = element.tail.strip()
Removes whitespace characters, which are leftovers from previous xml formatting. :param element: an instance of lxml.etree._Element str.strip() is applied to the "text" and the "tail" attribute of the element and recursively to all child elements.
def recCopyElement(oldelement): newelement = ETREE.Element(oldelement.tag, oldelement.attrib) if len(oldelement.getchildren()) > 0: for childelement in oldelement.getchildren(): newelement.append(recCopyElement(childelement)) return newelement
Generates a copy of an xml element and recursively of all child elements. :param oldelement: an instance of lxml.etree._Element :returns: a copy of the "oldelement" .. warning:: doesn't copy ``.text`` or ``.tail`` of xml elements
def cvParamFromDict(attributes): keys = ['accession', 'value', 'unitAccession'] return tuple(attributes[key] if key in attributes else None for key in keys)
Python representation of a mzML cvParam = tuple(accession, value, unitAccession). :param attributes: #TODO: docstring :returns: #TODO: docstring
def userParamFromDict(attributes): keys = ['name', 'value', 'unitAccession', 'type'] return tuple(attributes[key] if key in attributes else None for key in keys)
Python representation of a mzML userParam = tuple(name, value, unitAccession, type) :param attributes: #TODO: docstring :returns: #TODO: docstring
def getParam(xmlelement): elementTag = clearTag(xmlelement.tag) if elementTag in ['userParam', 'cvParam', 'referenceableParamGroupRef']: if elementTag == 'cvParam': param = cvParamFromDict(xmlelement.attrib) elif elementTag == 'userParam': param = userParamFromDict(xmlelement.attrib) else: param = refParamGroupFromDict(xmlelement.attrib) else: param = False return param
Converts an mzML xml element to a param tuple. :param xmlelement: #TODO docstring :returns: a param tuple or False if the xmlelement is not a parameter ('userParam', 'cvParam' or 'referenceableParamGroupRef')
def extractParams(xmlelement): params = list() children = list() for child in xmlelement.getchildren(): param = getParam(child) if param: params.append(param) else: children.append(child) return params, children
#TODO docstring :param xmlelement: #TODO docstring :returns: #TODO docstring
def xmlAddParams(parentelement, params): if not params: return None for param in params: if len(param) == 3: cvAttrib = {'cvRef': param[0].split(':')[0], 'accession': param[0], 'name':oboTranslator.getNameWithId(param[0]) } if param[1]: cvAttrib.update({'value': param[1]}) else: cvAttrib.update({'value': ''}) if param[2]: unitName = oboTranslator.getNameWithId(param[2]) cvAttrib.update({'unitAccession': param[2], 'unitCvRef': param[2].split(':')[0], 'unitName': unitName }) paramElement = ETREE.Element('cvParam', **cvAttrib) elif len(param) == 4: userAttrib = {'name': param[0]} if param[1]: userAttrib.update({'value': param[1]}) else: userAttrib.update({'value': ''}) if param[2]: userAttrib.update({'unitAccession': param[2], 'unitCvRef': param[2].split(':')[0] }) if param[3]: userAttrib.update({'type': param[3]}) paramElement = ETREE.Element('userParam', **userAttrib) elif param[0] == 'ref': refAttrib = {'ref': param[1]} paramElement = ETREE.Element('referenceableParamGroupRef', **refAttrib ) parentelement.append(paramElement)
Generates new mzML parameter xml elements and adds them to the 'parentelement' as xml children elements. :param parentelement: :class:`xml.etree.Element`, an mzML element :param params: a list of mzML parameter tuples ('cvParam', 'userParam' or 'referencableParamGroup')
def interpretBitEncoding(bitEncoding): if bitEncoding == '64': floattype = 'd' # 64-bit numpyType = numpy.float64 elif bitEncoding == '32': floattype = 'f' # 32-bit numpyType = numpy.float32 else: errorText = ''.join(['bitEncoding \'', bitEncoding, '\' not defined. ', 'Must be \'64\' or \'32\'' ]) raise TypeError(errorText) return (floattype, numpyType)
Returns a floattype string and a numpy array type. :param bitEncoding: Must be either '64' or '32' :returns: (floattype, numpyType)
def decodeBinaryData(binaryData, arrayLength, bitEncoding, compression): #TODO: should raise an error if a wrong compression is specified bitEncodedData = binaryData.encode("utf-8") bitDecodedData = B64DEC(bitEncodedData) floattype, numpyType = interpretBitEncoding(bitEncoding) if compression == 'zlib': decompressedData = zlib.decompress(bitDecodedData) else: decompressedData = bitDecodedData fmt = '{endian}{arraylength}{floattype}'.format(endian='<', arraylength=arrayLength, floattype=floattype ) dataArray = numpy.array(UNPACK(fmt, decompressedData), dtype=numpyType) return dataArray
Function to decode a mzML byte array into a numpy array. This is the inverse function of :func:`encodeBinaryData`. Concept inherited from :func:`pymzml.spec.Spectrum._decode` of the python library `pymzML <https://pymzml.github.io/>`_. :param binaryData: #TODO: docstring :param arrayLength: #TODO: docstring :param binEncoding: #TODO: docstring :param compression: #TODO: docstring :returns: #TODO: docstring
def encodeBinaryData(dataArray, bitEncoding, compression): #TODO: should raise an error if a wrong compression is specified arrayLength = len(dataArray) floattype, __ = interpretBitEncoding(bitEncoding) fmt = '{endian}{arraylength}{floattype}'.format(endian='<', arraylength=arrayLength, floattype=floattype ) packedData = PACK(fmt, *dataArray) if compression == 'zlib': compressedData = zlib.compress(packedData) else: compressedData = packedData encodedData = B64ENC(compressedData) return encodedData, arrayLength
Function to encode a ``numpy.array`` into a mzML byte array. This is the inverse function of :func:`decodeBinaryData`. :param dataArray: #TODO: docstring :param bitEncoding: #TODO: docstring :param compression: #TODO: docstring :returns: #TODO: docstring
def findBinaryDataType(params): binaryDataType = None cvParam = None for param in params: if param[0] in binaryDataArrayTypes: binaryDataType = binaryDataArrayTypes[param[0]] cvParam = param break return binaryDataType, cvParam
#TODO: docstring from: http://www.peptideatlas.org/tmp/mzML1.1.0.html#binaryDataArray a binaryDataArray "MUST supply a *child* term of MS:1000518 (binary data type) only once" :param params: #TODO: docstring :returns: #TODO: docstring
def extractBinaries(binaryDataArrayList, arrayLength): extractedArrays = dict() arrayInfo = dict() for binaryData in binaryDataArrayList: if findParam(binaryData['params'], 'MS:1000523') is not None: bitEncoding = '64' else: bitEncoding = '32' if findParam(binaryData['params'], 'MS:1000574') is not None: compression = 'zlib' else: compression = None dataType, dataTypeParam = findBinaryDataType(binaryData['params']) if binaryData['binary']: extractedArrays[dataType] = decodeBinaryData(binaryData['binary'], arrayLength, bitEncoding, compression ) else: __, numpyType = interpretBitEncoding(bitEncoding) extractedArrays[dataType] = numpy.array([], dtype=numpyType) binaryData['binary'] = None arrayInfo[dataType] = {'dataProcessingRef': None, 'params': binaryData['params'] } if 'dataProcessingRef' in binaryData: arrayInfo[dataType]['dataProcessingRef'] = \ binaryData['dataProcessingRef'] return extractedArrays, arrayInfo
#TODO: docstring :param binaryDataArrayList: #TODO: docstring :param arrayLength: #TODO: docstring :returns: #TODO: docstring
def sublistReader(xmlelement): #Note: actually I'm not 100% sure how this function behaves elements = list() params, children = extractParams(xmlelement) for child in children: currElement = dict() currElement.update(child.attrib) childparams, subchildren = extractParams(child) if childparams: currElement['params'] = childparams for subchild in subchildren: subchildTag = clearTag(subchild.tag) if 'List' in subchildTag: listelements, listparams = sublistReader(subchild) simplelist = [listelement['params'] for listelement in listelements] currElement[subchildTag] = simplelist else: subchildparams, _ = extractParams(subchild) currElement[subchildTag] = subchildparams if subchildTag == 'binary' and subchild.text: currElement[subchildTag] = subchild.text.strip() elements.append(currElement) return elements, params
#TODO: docstring
def next(self): try: self.event, self.element = next(self.iterator) self.elementTag = clearTag(self.element.tag) except StopIteration: clearParsedElements(self.element) raise StopIteration return self.event, self.element, self.elementTag
#TODO: docstring :returns: #TODO: docstring
def loadMetadata(self): #TODO: change that spectra dont have to be iterated to extract metadata #node if self._parsed: raise TypeError('Mzml file already parsed.') [None for _ in self._parseMzml()] self._parsed = True
#TODO: docstring
def parseSpectra(self): #Note: the spectra need to be iterated completely to save the #metadataNode if self._parsed: raise TypeError('Mzml file already parsed.') self._parsed = True return self._parseMzml()
#TODO: docstring :returns: #TODO: docstring
def calc_partition_function(mass, omega_array, temperature_array): Kappa_t= mass*omega_array**2 return _np.sqrt(4*_np.pi**2*_scipy.constants.Boltzmann**2*temperature_array**2/(mass*Kappa_t))
Calculates the partition function of your system at each point in time. Parameters ---------- mass : float The mass of the particle in kg omega_array : array array which represents omega at every point in your time trace and should therefore have the same length as the Hamiltonian temperature_array : array array which represents the temperature at every point in your time trace and should therefore have the same length as the Hamiltonian Returns: ------- Partition function : array The Partition Function at every point in time over a given trap-frequency and temperature change.
def calc_entropy(phase_space_density_array): entropy = -_scipy.constants.Boltzmann*_np.log(phase_space_density_array) return entropy
Calculates the entropy of your system at each point in time for your given phase space density evolution in time. Parameters ---------- phase_space_density_array : array array which represents the phase space density at every point in time Returns: ------- entropy : array The entropy of the particle at every point in time via the phase space density method.
def calc_hamiltonian(self, mass, omega_array): Kappa_t= mass*omega_array**2 self.E_pot = 0.5*Kappa_t*self.position_data**2 self.E_kin = 0.5*mass*(_np.insert(_np.diff(self.position_data), 0, (self.position_data[1]-self.position_data[0]))*self.SampleFreq)**2 self.Hamiltonian = self.E_pot + self.E_kin return self.Hamiltonian
Calculates the standard (pot+kin) Hamiltonian of your system. Parameters ---------- mass : float The mass of the particle in kg omega_array : array array which represents omega at every point in your time trace and should therefore have the same length as self.position_data Requirements ------------ self.position_data : array Already filtered for the degree of freedom of intrest and converted into meters. Returns ------- Hamiltonian : array The calculated Hamiltonian
def calc_phase_space_density(self, mass, omega_array, temperature_array): return self.calc_hamiltonian(mass, omega_array)/calc_partition_function(mass, omega_array,temperature_array)
Calculates the partition function of your system at each point in time. Parameters ---------- mass : float The mass of the particle in kg omega_array : array array which represents omega at every point in your time trace and should therefore have the same length as the Hamiltonian temperature_array : array array which represents the temperature at every point in your time trace and should therefore have the same length as the Hamiltonian Requirements ------------ self.position_data : array Already filtered for the degree of freedom of intrest and converted into meters. Returns: ------- Phasespace-density : array The Partition Function at every point in time over a given trap-frequency and temperature change.
def extract_thermodynamic_quantities(self,temperature_array): beta = 1/(_scipy.constants.Boltzmann*temperature_array) self.Q = self.Hamiltonian*(_np.insert(_np.diff(beta),0,beta[1]-beta[0])*self.SampleFreq) self.W = self.Hamiltonian-self.Q self.Delta_E_kin = _np.diff(self.E_kin)*self.SampleFreq self.Delta_E_pot = _np.diff(self.E_pot)*self.SampleFreq self.Delta_E = _np.diff(self.Hamiltonian)*self.SampleFreq return self.Q, self.W
Calculates the thermodynamic quantities of your system at each point in time. Calculated Quantities: self.Q (heat),self.W (work), self.Delta_E_kin, self.Delta_E_pot self.Delta_E (change of Hamiltonian), Parameters ---------- temperature_array : array array which represents the temperature at every point in your time trace and should therefore have the same length as the Hamiltonian Requirements ------------ execute calc_hamiltonian on the DataObject first Returns: ------- Q : array The heat exchanged by the particle at every point in time over a given trap-frequency and temperature change. W : array The work "done" by the particle at every point in time over a given trap-frequency and temperature change.
def calc_mean_and_variance_of_variances(self, NumberOfOscillations): SplittedArraySize = int(self.SampleFreq/self.FTrap.n) * NumberOfOscillations VoltageArraySize = len(self.voltage) SnippetsVariances = _np.var(self.voltage[:VoltageArraySize-_np.mod(VoltageArraySize,SplittedArraySize)].reshape(-1,SplittedArraySize),axis=1) return _np.mean(SnippetsVariances), _np.var(SnippetsVariances)
Calculates the mean and variance of a set of varainces. This set is obtained by splitting the timetrace into chunks of points with a length of NumberOfOscillations oscillations. Parameters ---------- NumberOfOscillations : int The number of oscillations each chunk of the timetrace used to calculate the variance should contain. Returns ------- Mean : float Variance : float
def register_template_directory(kb_app: kb, sphinx_app: Sphinx, sphinx_env: BuildEnvironment, docnames=List[str], ): template_bridge = sphinx_app.builder.templates actions = ResourceAction.get_callbacks(kb_app) for action in actions: f = os.path.dirname(inspect.getfile(action)) template_bridge.loaders.append(SphinxFileSystemLoader(f))
Add this resource's templates dir to template paths
def add_directives(kb_app: kb, sphinx_app: Sphinx, sphinx_env: BuildEnvironment, docnames=List[str], ): for k, v in list(kb_app.config.resources.items()): sphinx_app.add_directive(k, ResourceDirective)
For each resource type, register a new Sphinx directive
def stamp_title(kb_app: kb, sphinx_app: Sphinx, doctree: doctree): # First, find out which resource this is. Won't be easy. resources = sphinx_app.env.resources confdir = sphinx_app.confdir source = PurePath(doctree.attributes['source']) # Get the relative path inside the docs dir, without .rst, then # get the resource docname = str(source.relative_to(confdir)).split('.rst')[0] resource = resources.get(docname) if resource: # Stamp the title on the resource title = get_rst_title(doctree) resource.title = title
Walk the tree and extra RST title into resource.title
def init_app(self, app, config_prefix="PYBANKID"): if "pybankid" not in app.extensions: app.extensions["pybankid"] = {} if config_prefix in app.extensions["pybankid"]: raise Exception('duplicate config_prefix "{0}"'.format(config_prefix)) app.config.setdefault(self._config_key("CERT_PATH"), "") app.config.setdefault(self._config_key("KEY_PATH"), "") app.config.setdefault(self._config_key("TEST_SERVER"), False) # Adding the three url endpoints. app.add_url_rule( "/authenticate/<personal_number>", view_func=self._authenticate ) app.add_url_rule("/sign/<personal_number>", view_func=self._sign) app.add_url_rule("/collect/<order_ref>", view_func=self._collect) if hasattr(app, "teardown_appcontext"): app.teardown_appcontext(self.teardown) else: app.teardown_request(self.teardown)
Initialize the `app` for use with this :class:`~PyBankID`. This is called automatically if `app` is passed to :meth:`~PyBankID.__init__`. The app is configured according to the configuration variables ``PREFIX_CERT_PATH``, ``PREFIX_KEY_PATH`` and ``PREFIX_TEST_SERVER``, where "PREFIX" defaults to "PYBANKID". :param flask.Flask app: the application to configure for use with this :class:`~PyBankID` :param str config_prefix: determines the set of configuration variables used to configure this :class:`~PyBankID`.
def client(self): ctx = stack.top attr_name = self._config_key("client") if ctx is not None: if not hasattr(ctx, attr_name): setattr( ctx, attr_name, BankIDClient( ( current_app.config.get(self._config_key("CERT_PATH")), current_app.config.get(self._config_key("KEY_PATH")), ), current_app.config.get(self._config_key("TEST_SERVER")), ), ) return getattr(ctx, attr_name)
The automatically created :py:class:`bankid.client.BankIDClient` object. :return: The BankID client. :rtype: :py:class:`bankid.jsonclient.BankIDJSONClient`
def handle_exception(error): response = jsonify(error.to_dict()) response.status_code = error.status_code return response
Simple method for handling exceptions raised by `PyBankID`. :param flask_pybankid.FlaskPyBankIDError error: The exception to handle. :return: The exception represented as a dictionary. :rtype: dict
def create_from_pybankid_exception(cls, exception): return cls( "{0}: {1}".format(exception.__class__.__name__, str(exception)), _exception_class_to_status_code.get(exception.__class__), )
Class method for initiating from a `PyBankID` exception. :param bankid.exceptions.BankIDError exception: :return: The wrapped exception. :rtype: :py:class:`~FlaskPyBankIDError`
def to_dict(self): rv = dict(self.payload or ()) rv["message"] = self.message return rv
Create a dict representation of this exception. :return: The dictionary representation. :rtype: dict
def sequence(minimum, maximum): function = 'sequences' opts = {'min': minimum, 'max': maximum, 'col': 1, 'format': 'plain', 'rnd': 'new'} deal = get_http(RANDOM_URL, function, opts) deal_arr = str_to_arr(deal) return deal_arr
Randomize a sequence of integers.
def string(num, length, digits=False, upper=True, lower=True, unique=False): function = 'strings' # Convert arguments to random.org style # for a discussion on the method see: http://bit.ly/TKGkOF digits = convert(digits) upper = convert(upper) lower = convert(lower) unique = convert(unique) opts = {'num': num, 'len': length, 'digits': digits, 'upperalpha': upper, 'loweralpha': lower, 'format': 'plain', 'rnd': 'new'} seq = get_http(RANDOM_URL, function, opts) seq = seq.strip().split('\n') # convert to list # seq_arr = str_to_arr(seq) return seq
Random strings.
def quota(ip=None): # TODO: Add arbitrary user defined IP check url = 'http://www.random.org/quota/?format=plain' data = urlopen(url) credit = int(data.read().strip()) if data.code == 200: return credit else: return "ERROR: Server responded with code %s" % data.code
Check your quota.
def get_http(base_url, function, opts): url = (os.path.join(base_url, function) + '/?' + urlencode(opts)) data = urlopen(url) if data.code != 200: raise ValueError("Random.rg returned server code: " + str(data.code)) return data.read()
HTTP request generator.
def read(*p): with open(os.path.join(*p), 'r') as fi: return fi.read()
Build a file path from paths and return the contents.
def execute(self, processProtocol, command, env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None): raise NotImplementedError()
Form a command and start a process in the desired environment.
def run(self, command, env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None): deferred = defer.Deferred() processProtocol = _SummaryProcessProtocol(deferred) d = defer.maybeDeferred(self.execute, processProtocol, command, env, path, uid, gid, usePTY, childFDs) d.addErrback(deferred.errback) return deferred
Execute a command and return the results of the completed run.
def getOutput(self, command, env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None): deferred = defer.Deferred() processProtocol = _SummaryProcessProtocol(deferred) self.execute(processProtocol, command, env, path, uid, gid, usePTY, childFDs) @deferred.addCallback def getStdOut(tuple_): stdout, _stderr, _returnCode = tuple_ return stdout return deferred
Execute a command and get the output of the finished process.
def getExitCode(self, command, env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None): deferred = defer.Deferred() processProtocol = _SummaryProcessProtocol(deferred) self.execute(processProtocol, command, env, path, uid, gid, usePTY, childFDs) @deferred.addCallback def getStdOut(tuple_): _stdout, _stderr, exitCode = tuple_ return exitCode return deferred
Execute a command and get the return code of the finished process.
def validate_task(original_task): task = original_task._asdict() # Default values for inputs and outputs if 'inputs' not in task or task['inputs'] is None: task['inputs'] = ['*'] # Outputs list cannot be empty if ('outputs' not in task or task['outputs'] is None or len(task['outputs']) == 0): task['outputs'] = ['*'] # Convert to tuples (even for single values) if not hasattr(task['inputs'], '__iter__') or isinstance(task['inputs'], str): task['inputs'] = (task['inputs'],) else: task['inputs'] = tuple(task['inputs']) if not hasattr(task['outputs'], '__iter__') or isinstance(task['outputs'], str): task['outputs'] = (task['outputs'],) else: task['outputs'] = tuple(task['outputs']) if not callable(task['fn']): raise TypeError('Task function must be a callable object') if (len(task['outputs']) > 1 and not inspect.isgeneratorfunction(task['fn'])): raise TypeError('Multiple outputs are only supported with \ generator functions') if inspect.isgeneratorfunction(task['fn']): if task['outputs'][0] == '*': raise TypeError('Generator functions cannot be used for tasks with \ output specification "*"') return Task(**task)
Validates task and adds default values for missing options using the following steps. 1. If there is no input list specified or if it is None, the input spec is assumed to be ['*']. 2. If there are not outputs specified, or if the output spec is None or an empty list, the output spec is assumed to be ['*']. 3. If the input or output spec is not iterable, they are converted into single element tuples. If they are any iterable, they are converted into tuples. 4. The task['fn'] option must be callable. 5. If number of outputs is more than one, task['fn'] must be a generator function. 6. Generator functions are not supported for output spec of '*'. Returns new task with updated options
def run_task(task, workspace): data = copy.copy(workspace) task = validate_task(task) # Prepare input to task inputs = [input_parser(key, data) for key in task.inputs] if inspect.isgeneratorfunction(task.fn): # Multiple output task # Assuming number of outputs are equal to number of return values data.update(zip(task.outputs, task.fn(*inputs))) else: # Single output task results = task.fn(*inputs) if task.outputs[0] != '*': results = {task.outputs[0]: results} elif not isinstance(results, dict): raise TypeError('Result should be a dict for output type *') data.update(results) return data
Runs the task and updates the workspace with results. Parameters ---------- task - dict Task Description Examples: {'task': task_func, 'inputs': ['a', 'b'], 'outputs': 'c'} {'task': task_func, 'inputs': '*', 'outputs': '*'} {'task': task_func, 'inputs': ['*','a'], 'outputs': 'b'} Returns a new workspace with results
def run_hook(name, workspace, hooks): data = copy.copy(workspace) for hook_listener in hooks.get(name, []): # Hook functions may mutate the data and returns nothing hook_listener(data) return data
Runs all hooks added under the give name. Parameters ---------- name - str Name of the hook to invoke workspace - dict Workspace that the hook functions operate on hooks - dict of lists Mapping with hook names and callback functions
def add_task(self, fn, inputs=None, outputs=None): # self.tasks.append({'task': task, 'inputs': inputs, 'outputs': outputs}) self.tasks.append(Task(fn, inputs, outputs)) return self
Adds a task to the workflow. Returns self to facilitate chaining method calls
def add_hook(self, name, function): if not callable(function): return ValueError('Hook function should be callable') if name not in self.hooks: self.hooks[name] = [] self.hooks[name].append(function) return self
Adds a function to be called for hook of a given name. The function gets entire workspace as input and does not return anything. Example: def hook_fcn(workspace): pass
def dns(self): dns = { 'elb': self.dns_elb(), 'elb_region': self.dns_elb_region(), 'global': self.dns_global(), 'region': self.dns_region(), 'instance': self.dns_instance(), } return dns
DNS details.
def s3_app_bucket(self, include_region=False): if include_region: s3_app_bucket = self.format['s3_app_region_bucket'].format(**self.data) else: s3_app_bucket = self.format['s3_app_bucket'].format(**self.data) return s3_app_bucket
Generate s3 application bucket name. Args: include_region (bool): Include region in the name generation.
def shared_s3_app_bucket(self, include_region=False): if include_region: shared_s3_app_bucket = self.format['shared_s3_app_region_bucket'].format(**self.data) else: shared_s3_app_bucket = self.format['shared_s3_app_bucket'].format(**self.data) return shared_s3_app_bucket
Generate shared s3 application bucket name. Args: include_region (bool): Include region in the name generation.
def iam(self): iam = { 'group': self.format['iam_group'].format(**self.data), 'lambda_role': self.format['iam_lambda_role'].format(**self.data), 'policy': self.format['iam_policy'].format(**self.data), 'profile': self.format['iam_profile'].format(**self.data), 'role': self.format['iam_role'].format(**self.data), 'user': self.format['iam_user'].format(**self.data), 'base': self.format['iam_base'].format(**self.data), } return iam
Generate iam details.
def archaius(self): bucket = self.format['s3_bucket'].format(**self.data) path = self.format['s3_bucket_path'].format(**self.data) archaius_name = self.format['s3_archaius_name'].format(**self.data) archaius = {'s3': archaius_name, 'bucket': bucket, 'path': path} return archaius
Generate archaius bucket path.