repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
riga/law
law/target/formatter.py
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/target/formatter.py#L73-L82
def find_formatter(name, path): """ Returns the formatter class whose name attribute is *name* when *name* is not *AUTO_FORMATTER*. Otherwise, the first formatter that accepts *path* is returned. Internally, this method simply uses :py:func:`get_formatter` or :py:func:`find_formatters` depending on the value of *name*. """ if name == AUTO_FORMATTER: return find_formatters(path, silent=False)[0] else: return get_formatter(name, silent=False)
[ "def", "find_formatter", "(", "name", ",", "path", ")", ":", "if", "name", "==", "AUTO_FORMATTER", ":", "return", "find_formatters", "(", "path", ",", "silent", "=", "False", ")", "[", "0", "]", "else", ":", "return", "get_formatter", "(", "name", ",", "silent", "=", "False", ")" ]
Returns the formatter class whose name attribute is *name* when *name* is not *AUTO_FORMATTER*. Otherwise, the first formatter that accepts *path* is returned. Internally, this method simply uses :py:func:`get_formatter` or :py:func:`find_formatters` depending on the value of *name*.
[ "Returns", "the", "formatter", "class", "whose", "name", "attribute", "is", "*", "name", "*", "when", "*", "name", "*", "is", "not", "*", "AUTO_FORMATTER", "*", ".", "Otherwise", "the", "first", "formatter", "that", "accepts", "*", "path", "*", "is", "returned", ".", "Internally", "this", "method", "simply", "uses", ":", "py", ":", "func", ":", "get_formatter", "or", ":", "py", ":", "func", ":", "find_formatters", "depending", "on", "the", "value", "of", "*", "name", "*", "." ]
python
train
onelogin/python3-saml
src/onelogin/saml2/logout_response.py
https://github.com/onelogin/python3-saml/blob/064b7275fba1e5f39a9116ba1cdcc5d01fc34daa/src/onelogin/saml2/logout_response.py#L69-L142
def is_valid(self, request_data, request_id=None, raise_exceptions=False): """ Determines if the SAML LogoutResponse is valid :param request_id: The ID of the LogoutRequest sent by this SP to the IdP :type request_id: string :param raise_exceptions: Whether to return false on failure or raise an exception :type raise_exceptions: Boolean :return: Returns if the SAML LogoutResponse is or not valid :rtype: boolean """ self.__error = None try: idp_data = self.__settings.get_idp_data() idp_entity_id = idp_data['entityId'] get_data = request_data['get_data'] if self.__settings.is_strict(): res = OneLogin_Saml2_XML.validate_xml(self.document, 'saml-schema-protocol-2.0.xsd', self.__settings.is_debug_active()) if isinstance(res, str): raise OneLogin_Saml2_ValidationError( 'Invalid SAML Logout Request. Not match the saml-schema-protocol-2.0.xsd', OneLogin_Saml2_ValidationError.INVALID_XML_FORMAT ) security = self.__settings.get_security_data() # Check if the InResponseTo of the Logout Response matches the ID of the Logout Request (requestId) if provided in_response_to = self.document.get('InResponseTo', None) if request_id is not None and in_response_to and in_response_to != request_id: raise OneLogin_Saml2_ValidationError( 'The InResponseTo of the Logout Response: %s, does not match the ID of the Logout request sent by the SP: %s' % (in_response_to, request_id), OneLogin_Saml2_ValidationError.WRONG_INRESPONSETO ) # Check issuer issuer = self.get_issuer() if issuer is not None and issuer != idp_entity_id: raise OneLogin_Saml2_ValidationError( 'Invalid issuer in the Logout Response (expected %(idpEntityId)s, got %(issuer)s)' % { 'idpEntityId': idp_entity_id, 'issuer': issuer }, OneLogin_Saml2_ValidationError.WRONG_ISSUER ) current_url = OneLogin_Saml2_Utils.get_self_url_no_query(request_data) # Check destination destination = self.document.get('Destination', None) if destination and current_url not in destination: raise OneLogin_Saml2_ValidationError( 'The LogoutResponse was received at %s instead of %s' % (current_url, destination), OneLogin_Saml2_ValidationError.WRONG_DESTINATION ) if security['wantMessagesSigned']: if 'Signature' not in get_data: raise OneLogin_Saml2_ValidationError( 'The Message of the Logout Response is not signed and the SP require it', OneLogin_Saml2_ValidationError.NO_SIGNED_MESSAGE ) return True # pylint: disable=R0801 except Exception as err: self.__error = str(err) debug = self.__settings.is_debug_active() if debug: print(err) if raise_exceptions: raise return False
[ "def", "is_valid", "(", "self", ",", "request_data", ",", "request_id", "=", "None", ",", "raise_exceptions", "=", "False", ")", ":", "self", ".", "__error", "=", "None", "try", ":", "idp_data", "=", "self", ".", "__settings", ".", "get_idp_data", "(", ")", "idp_entity_id", "=", "idp_data", "[", "'entityId'", "]", "get_data", "=", "request_data", "[", "'get_data'", "]", "if", "self", ".", "__settings", ".", "is_strict", "(", ")", ":", "res", "=", "OneLogin_Saml2_XML", ".", "validate_xml", "(", "self", ".", "document", ",", "'saml-schema-protocol-2.0.xsd'", ",", "self", ".", "__settings", ".", "is_debug_active", "(", ")", ")", "if", "isinstance", "(", "res", ",", "str", ")", ":", "raise", "OneLogin_Saml2_ValidationError", "(", "'Invalid SAML Logout Request. Not match the saml-schema-protocol-2.0.xsd'", ",", "OneLogin_Saml2_ValidationError", ".", "INVALID_XML_FORMAT", ")", "security", "=", "self", ".", "__settings", ".", "get_security_data", "(", ")", "# Check if the InResponseTo of the Logout Response matches the ID of the Logout Request (requestId) if provided", "in_response_to", "=", "self", ".", "document", ".", "get", "(", "'InResponseTo'", ",", "None", ")", "if", "request_id", "is", "not", "None", "and", "in_response_to", "and", "in_response_to", "!=", "request_id", ":", "raise", "OneLogin_Saml2_ValidationError", "(", "'The InResponseTo of the Logout Response: %s, does not match the ID of the Logout request sent by the SP: %s'", "%", "(", "in_response_to", ",", "request_id", ")", ",", "OneLogin_Saml2_ValidationError", ".", "WRONG_INRESPONSETO", ")", "# Check issuer", "issuer", "=", "self", ".", "get_issuer", "(", ")", "if", "issuer", "is", "not", "None", "and", "issuer", "!=", "idp_entity_id", ":", "raise", "OneLogin_Saml2_ValidationError", "(", "'Invalid issuer in the Logout Response (expected %(idpEntityId)s, got %(issuer)s)'", "%", "{", "'idpEntityId'", ":", "idp_entity_id", ",", "'issuer'", ":", "issuer", "}", ",", "OneLogin_Saml2_ValidationError", ".", "WRONG_ISSUER", ")", "current_url", "=", "OneLogin_Saml2_Utils", ".", "get_self_url_no_query", "(", "request_data", ")", "# Check destination", "destination", "=", "self", ".", "document", ".", "get", "(", "'Destination'", ",", "None", ")", "if", "destination", "and", "current_url", "not", "in", "destination", ":", "raise", "OneLogin_Saml2_ValidationError", "(", "'The LogoutResponse was received at %s instead of %s'", "%", "(", "current_url", ",", "destination", ")", ",", "OneLogin_Saml2_ValidationError", ".", "WRONG_DESTINATION", ")", "if", "security", "[", "'wantMessagesSigned'", "]", ":", "if", "'Signature'", "not", "in", "get_data", ":", "raise", "OneLogin_Saml2_ValidationError", "(", "'The Message of the Logout Response is not signed and the SP require it'", ",", "OneLogin_Saml2_ValidationError", ".", "NO_SIGNED_MESSAGE", ")", "return", "True", "# pylint: disable=R0801", "except", "Exception", "as", "err", ":", "self", ".", "__error", "=", "str", "(", "err", ")", "debug", "=", "self", ".", "__settings", ".", "is_debug_active", "(", ")", "if", "debug", ":", "print", "(", "err", ")", "if", "raise_exceptions", ":", "raise", "return", "False" ]
Determines if the SAML LogoutResponse is valid :param request_id: The ID of the LogoutRequest sent by this SP to the IdP :type request_id: string :param raise_exceptions: Whether to return false on failure or raise an exception :type raise_exceptions: Boolean :return: Returns if the SAML LogoutResponse is or not valid :rtype: boolean
[ "Determines", "if", "the", "SAML", "LogoutResponse", "is", "valid", ":", "param", "request_id", ":", "The", "ID", "of", "the", "LogoutRequest", "sent", "by", "this", "SP", "to", "the", "IdP", ":", "type", "request_id", ":", "string" ]
python
train
bitesofcode/projexui
projexui/widgets/xchart/xchartaxis.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchart/xchartaxis.py#L118-L133
def labelCount(self): """ Returns the label count for this axis. If the labels have been defined then the length of the labels list will be provided, otherwise the hardcoded label count will be returned. :return <int> """ if self._labels is None: count = self.maximumLabelCount() if count is None: return 1 else: return count return len(self._labels)
[ "def", "labelCount", "(", "self", ")", ":", "if", "self", ".", "_labels", "is", "None", ":", "count", "=", "self", ".", "maximumLabelCount", "(", ")", "if", "count", "is", "None", ":", "return", "1", "else", ":", "return", "count", "return", "len", "(", "self", ".", "_labels", ")" ]
Returns the label count for this axis. If the labels have been defined then the length of the labels list will be provided, otherwise the hardcoded label count will be returned. :return <int>
[ "Returns", "the", "label", "count", "for", "this", "axis", ".", "If", "the", "labels", "have", "been", "defined", "then", "the", "length", "of", "the", "labels", "list", "will", "be", "provided", "otherwise", "the", "hardcoded", "label", "count", "will", "be", "returned", ".", ":", "return", "<int", ">" ]
python
train
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/api.py
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/api.py#L170-L187
def get_kb_mapping(kb_name="", key="", value="", match_type="e", default="", limit=None): """Get one unique mapping. If not found, return default. :param kb_name: the name of the kb :param key: include only lines matching this on left side in the results :param value: include only lines matching this on right side in the results :param match_type: s = substring match, e = exact match :param default: default value if no mapping is found :return: a mapping """ mappings = get_kb_mappings(kb_name, key=key, value=value, match_type=match_type, limit=limit) if len(mappings) == 0: return default else: return mappings[0]
[ "def", "get_kb_mapping", "(", "kb_name", "=", "\"\"", ",", "key", "=", "\"\"", ",", "value", "=", "\"\"", ",", "match_type", "=", "\"e\"", ",", "default", "=", "\"\"", ",", "limit", "=", "None", ")", ":", "mappings", "=", "get_kb_mappings", "(", "kb_name", ",", "key", "=", "key", ",", "value", "=", "value", ",", "match_type", "=", "match_type", ",", "limit", "=", "limit", ")", "if", "len", "(", "mappings", ")", "==", "0", ":", "return", "default", "else", ":", "return", "mappings", "[", "0", "]" ]
Get one unique mapping. If not found, return default. :param kb_name: the name of the kb :param key: include only lines matching this on left side in the results :param value: include only lines matching this on right side in the results :param match_type: s = substring match, e = exact match :param default: default value if no mapping is found :return: a mapping
[ "Get", "one", "unique", "mapping", ".", "If", "not", "found", "return", "default", "." ]
python
train
saltstack/salt
salt/modules/win_lgpo.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_lgpo.py#L6596-L6615
def _regexSearchKeyValueCombo(policy_data, policy_regpath, policy_regkey): ''' helper function to do a search of Policy data from a registry.pol file for a policy_regpath and policy_regkey combo ''' if policy_data: specialValueRegex = salt.utils.stringutils.to_bytes(r'(\*\*Del\.|\*\*DelVals\.){0,1}') _thisSearch = b''.join([salt.utils.stringutils.to_bytes(r'\['), re.escape(policy_regpath), b'\00;', specialValueRegex, re.escape(policy_regkey), b'\00;']) match = re.search(_thisSearch, policy_data, re.IGNORECASE) if match: # add 2 so we get the ']' and the \00 # to return the full policy entry return policy_data[match.start():(policy_data.index(b']', match.end())) + 2] return None
[ "def", "_regexSearchKeyValueCombo", "(", "policy_data", ",", "policy_regpath", ",", "policy_regkey", ")", ":", "if", "policy_data", ":", "specialValueRegex", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_bytes", "(", "r'(\\*\\*Del\\.|\\*\\*DelVals\\.){0,1}'", ")", "_thisSearch", "=", "b''", ".", "join", "(", "[", "salt", ".", "utils", ".", "stringutils", ".", "to_bytes", "(", "r'\\['", ")", ",", "re", ".", "escape", "(", "policy_regpath", ")", ",", "b'\\00;'", ",", "specialValueRegex", ",", "re", ".", "escape", "(", "policy_regkey", ")", ",", "b'\\00;'", "]", ")", "match", "=", "re", ".", "search", "(", "_thisSearch", ",", "policy_data", ",", "re", ".", "IGNORECASE", ")", "if", "match", ":", "# add 2 so we get the ']' and the \\00", "# to return the full policy entry", "return", "policy_data", "[", "match", ".", "start", "(", ")", ":", "(", "policy_data", ".", "index", "(", "b']'", ",", "match", ".", "end", "(", ")", ")", ")", "+", "2", "]", "return", "None" ]
helper function to do a search of Policy data from a registry.pol file for a policy_regpath and policy_regkey combo
[ "helper", "function", "to", "do", "a", "search", "of", "Policy", "data", "from", "a", "registry", ".", "pol", "file", "for", "a", "policy_regpath", "and", "policy_regkey", "combo" ]
python
train
klahnakoski/pyLibrary
mo_math/vendor/strangman/stats.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_math/vendor/strangman/stats.py#L306-L322
def medianscore(inlist): """ Returns the 'middle' score of the passed list. If there is an even number of scores, the mean of the 2 middle scores is returned. Usage: lmedianscore(inlist) """ newlist = copy.deepcopy(inlist) newlist.sort() if len(newlist) % 2 == 0: # if even number of scores, average middle 2 index = len(newlist) / 2 # integer division correct median = float(newlist[index] + newlist[index - 1]) / 2 else: index = len(newlist) / 2 # int divsion gives mid value when count from 0 median = newlist[index] return median
[ "def", "medianscore", "(", "inlist", ")", ":", "newlist", "=", "copy", ".", "deepcopy", "(", "inlist", ")", "newlist", ".", "sort", "(", ")", "if", "len", "(", "newlist", ")", "%", "2", "==", "0", ":", "# if even number of scores, average middle 2", "index", "=", "len", "(", "newlist", ")", "/", "2", "# integer division correct", "median", "=", "float", "(", "newlist", "[", "index", "]", "+", "newlist", "[", "index", "-", "1", "]", ")", "/", "2", "else", ":", "index", "=", "len", "(", "newlist", ")", "/", "2", "# int divsion gives mid value when count from 0", "median", "=", "newlist", "[", "index", "]", "return", "median" ]
Returns the 'middle' score of the passed list. If there is an even number of scores, the mean of the 2 middle scores is returned. Usage: lmedianscore(inlist)
[ "Returns", "the", "middle", "score", "of", "the", "passed", "list", ".", "If", "there", "is", "an", "even", "number", "of", "scores", "the", "mean", "of", "the", "2", "middle", "scores", "is", "returned", "." ]
python
train
saltstack/salt
salt/client/ssh/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/__init__.py#L1535-L1555
def lowstate_file_refs(chunks): ''' Create a list of file ref objects to reconcile ''' refs = {} for chunk in chunks: saltenv = 'base' crefs = [] for state in chunk: if state == '__env__': saltenv = chunk[state] elif state == 'saltenv': saltenv = chunk[state] elif state.startswith('__'): continue crefs.extend(salt_refs(chunk[state])) if crefs: if saltenv not in refs: refs[saltenv] = [] refs[saltenv].append(crefs) return refs
[ "def", "lowstate_file_refs", "(", "chunks", ")", ":", "refs", "=", "{", "}", "for", "chunk", "in", "chunks", ":", "saltenv", "=", "'base'", "crefs", "=", "[", "]", "for", "state", "in", "chunk", ":", "if", "state", "==", "'__env__'", ":", "saltenv", "=", "chunk", "[", "state", "]", "elif", "state", "==", "'saltenv'", ":", "saltenv", "=", "chunk", "[", "state", "]", "elif", "state", ".", "startswith", "(", "'__'", ")", ":", "continue", "crefs", ".", "extend", "(", "salt_refs", "(", "chunk", "[", "state", "]", ")", ")", "if", "crefs", ":", "if", "saltenv", "not", "in", "refs", ":", "refs", "[", "saltenv", "]", "=", "[", "]", "refs", "[", "saltenv", "]", ".", "append", "(", "crefs", ")", "return", "refs" ]
Create a list of file ref objects to reconcile
[ "Create", "a", "list", "of", "file", "ref", "objects", "to", "reconcile" ]
python
train
consbio/parserutils
parserutils/elements.py
https://github.com/consbio/parserutils/blob/f13f80db99ed43479336b116e38512e3566e4623/parserutils/elements.py#L500-L515
def remove_element_attributes(elem_to_parse, *args): """ Removes the specified keys from the element's attributes, and returns a dict containing the attributes that have been removed. """ element = get_element(elem_to_parse) if element is None: return element if len(args): attribs = element.attrib return {key: attribs.pop(key) for key in args if key in attribs} return {}
[ "def", "remove_element_attributes", "(", "elem_to_parse", ",", "*", "args", ")", ":", "element", "=", "get_element", "(", "elem_to_parse", ")", "if", "element", "is", "None", ":", "return", "element", "if", "len", "(", "args", ")", ":", "attribs", "=", "element", ".", "attrib", "return", "{", "key", ":", "attribs", ".", "pop", "(", "key", ")", "for", "key", "in", "args", "if", "key", "in", "attribs", "}", "return", "{", "}" ]
Removes the specified keys from the element's attributes, and returns a dict containing the attributes that have been removed.
[ "Removes", "the", "specified", "keys", "from", "the", "element", "s", "attributes", "and", "returns", "a", "dict", "containing", "the", "attributes", "that", "have", "been", "removed", "." ]
python
train
adrn/gala
gala/dynamics/_genfunc/toy_potentials.py
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/_genfunc/toy_potentials.py#L18-L26
def angact_ho(x,omega): """ Calculate angle and action variable in sho potential with parameter omega """ action = (x[3:]**2+(omega*x[:3])**2)/(2.*omega) angle = np.array([np.arctan(-x[3+i]/omega[i]/x[i]) if x[i]!=0. else -np.sign(x[3+i])*np.pi/2. for i in range(3)]) for i in range(3): if(x[i]<0): angle[i]+=np.pi return np.concatenate((action,angle % (2.*np.pi)))
[ "def", "angact_ho", "(", "x", ",", "omega", ")", ":", "action", "=", "(", "x", "[", "3", ":", "]", "**", "2", "+", "(", "omega", "*", "x", "[", ":", "3", "]", ")", "**", "2", ")", "/", "(", "2.", "*", "omega", ")", "angle", "=", "np", ".", "array", "(", "[", "np", ".", "arctan", "(", "-", "x", "[", "3", "+", "i", "]", "/", "omega", "[", "i", "]", "/", "x", "[", "i", "]", ")", "if", "x", "[", "i", "]", "!=", "0.", "else", "-", "np", ".", "sign", "(", "x", "[", "3", "+", "i", "]", ")", "*", "np", ".", "pi", "/", "2.", "for", "i", "in", "range", "(", "3", ")", "]", ")", "for", "i", "in", "range", "(", "3", ")", ":", "if", "(", "x", "[", "i", "]", "<", "0", ")", ":", "angle", "[", "i", "]", "+=", "np", ".", "pi", "return", "np", ".", "concatenate", "(", "(", "action", ",", "angle", "%", "(", "2.", "*", "np", ".", "pi", ")", ")", ")" ]
Calculate angle and action variable in sho potential with parameter omega
[ "Calculate", "angle", "and", "action", "variable", "in", "sho", "potential", "with", "parameter", "omega" ]
python
train
saltstack/salt
salt/states/ssh_auth.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/ssh_auth.py#L392-L509
def absent(name, user, enc='ssh-rsa', comment='', source='', options=None, config='.ssh/authorized_keys', fingerprint_hash_type=None): ''' Verifies that the specified SSH key is absent name The SSH key to manage user The user who owns the SSH authorized keys file to modify enc Defines what type of key is being used; can be ed25519, ecdsa, ssh-rsa or ssh-dss comment The comment to be placed with the SSH public key options The options passed to the key, pass a list object source The source file for the key(s). Can contain any number of public keys, in standard "authorized_keys" format. If this is set, comment, enc and options will be ignored. .. versionadded:: 2015.8.0 config The location of the authorized keys file relative to the user's home directory, defaults to ".ssh/authorized_keys". Token expansion %u and %h for username and home path supported. fingerprint_hash_type The public key fingerprint hash type that the public key fingerprint was originally hashed with. This defaults to ``sha256`` if not specified. .. versionadded:: 2016.11.7 ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if __opts__['test']: ret['result'], ret['comment'] = _absent_test( user, name, enc, comment, options or [], source, config, fingerprint_hash_type) return ret # Extract Key from file if source is present if source != '': key = __salt__['cp.get_file_str']( source, saltenv=__env__) filehasoptions = False # check if this is of form {options} {enc} {key} {comment} sshre = re.compile(r'^(ssh\-|ecds).*') key = key.rstrip().split('\n') for keyline in key: filehasoptions = sshre.match(keyline) if not filehasoptions: ret['comment'] = __salt__['ssh.rm_auth_key_from_file'](user, source, config, saltenv=__env__, fingerprint_hash_type=fingerprint_hash_type) else: # Split keyline to get key keyline = keyline.split(' ') ret['comment'] = __salt__['ssh.rm_auth_key'](user, keyline[1], config=config, fingerprint_hash_type=fingerprint_hash_type) else: # Get just the key sshre = re.compile(r'^(.*?)\s?((?:ssh\-|ecds)[\w-]+\s.+)$') fullkey = sshre.search(name) # if it is {key} [comment] if not fullkey: key_and_comment = name.split(None, 1) name = key_and_comment[0] if len(key_and_comment) == 2: comment = key_and_comment[1] else: # if there are options, set them if fullkey.group(1): options = fullkey.group(1).split(',') # key is of format: {enc} {key} [comment] comps = fullkey.group(2).split() enc = comps[0] name = comps[1] if len(comps) == 3: comment = comps[2] ret['comment'] = __salt__['ssh.rm_auth_key'](user, name, config=config, fingerprint_hash_type=fingerprint_hash_type) if ret['comment'] == 'User authorized keys file not present': ret['result'] = False return ret elif ret['comment'] == 'Key removed': ret['changes'][name] = 'Removed' return ret
[ "def", "absent", "(", "name", ",", "user", ",", "enc", "=", "'ssh-rsa'", ",", "comment", "=", "''", ",", "source", "=", "''", ",", "options", "=", "None", ",", "config", "=", "'.ssh/authorized_keys'", ",", "fingerprint_hash_type", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", "}", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", ",", "ret", "[", "'comment'", "]", "=", "_absent_test", "(", "user", ",", "name", ",", "enc", ",", "comment", ",", "options", "or", "[", "]", ",", "source", ",", "config", ",", "fingerprint_hash_type", ")", "return", "ret", "# Extract Key from file if source is present", "if", "source", "!=", "''", ":", "key", "=", "__salt__", "[", "'cp.get_file_str'", "]", "(", "source", ",", "saltenv", "=", "__env__", ")", "filehasoptions", "=", "False", "# check if this is of form {options} {enc} {key} {comment}", "sshre", "=", "re", ".", "compile", "(", "r'^(ssh\\-|ecds).*'", ")", "key", "=", "key", ".", "rstrip", "(", ")", ".", "split", "(", "'\\n'", ")", "for", "keyline", "in", "key", ":", "filehasoptions", "=", "sshre", ".", "match", "(", "keyline", ")", "if", "not", "filehasoptions", ":", "ret", "[", "'comment'", "]", "=", "__salt__", "[", "'ssh.rm_auth_key_from_file'", "]", "(", "user", ",", "source", ",", "config", ",", "saltenv", "=", "__env__", ",", "fingerprint_hash_type", "=", "fingerprint_hash_type", ")", "else", ":", "# Split keyline to get key", "keyline", "=", "keyline", ".", "split", "(", "' '", ")", "ret", "[", "'comment'", "]", "=", "__salt__", "[", "'ssh.rm_auth_key'", "]", "(", "user", ",", "keyline", "[", "1", "]", ",", "config", "=", "config", ",", "fingerprint_hash_type", "=", "fingerprint_hash_type", ")", "else", ":", "# Get just the key", "sshre", "=", "re", ".", "compile", "(", "r'^(.*?)\\s?((?:ssh\\-|ecds)[\\w-]+\\s.+)$'", ")", "fullkey", "=", "sshre", ".", "search", "(", "name", ")", "# if it is {key} [comment]", "if", "not", "fullkey", ":", "key_and_comment", "=", "name", ".", "split", "(", "None", ",", "1", ")", "name", "=", "key_and_comment", "[", "0", "]", "if", "len", "(", "key_and_comment", ")", "==", "2", ":", "comment", "=", "key_and_comment", "[", "1", "]", "else", ":", "# if there are options, set them", "if", "fullkey", ".", "group", "(", "1", ")", ":", "options", "=", "fullkey", ".", "group", "(", "1", ")", ".", "split", "(", "','", ")", "# key is of format: {enc} {key} [comment]", "comps", "=", "fullkey", ".", "group", "(", "2", ")", ".", "split", "(", ")", "enc", "=", "comps", "[", "0", "]", "name", "=", "comps", "[", "1", "]", "if", "len", "(", "comps", ")", "==", "3", ":", "comment", "=", "comps", "[", "2", "]", "ret", "[", "'comment'", "]", "=", "__salt__", "[", "'ssh.rm_auth_key'", "]", "(", "user", ",", "name", ",", "config", "=", "config", ",", "fingerprint_hash_type", "=", "fingerprint_hash_type", ")", "if", "ret", "[", "'comment'", "]", "==", "'User authorized keys file not present'", ":", "ret", "[", "'result'", "]", "=", "False", "return", "ret", "elif", "ret", "[", "'comment'", "]", "==", "'Key removed'", ":", "ret", "[", "'changes'", "]", "[", "name", "]", "=", "'Removed'", "return", "ret" ]
Verifies that the specified SSH key is absent name The SSH key to manage user The user who owns the SSH authorized keys file to modify enc Defines what type of key is being used; can be ed25519, ecdsa, ssh-rsa or ssh-dss comment The comment to be placed with the SSH public key options The options passed to the key, pass a list object source The source file for the key(s). Can contain any number of public keys, in standard "authorized_keys" format. If this is set, comment, enc and options will be ignored. .. versionadded:: 2015.8.0 config The location of the authorized keys file relative to the user's home directory, defaults to ".ssh/authorized_keys". Token expansion %u and %h for username and home path supported. fingerprint_hash_type The public key fingerprint hash type that the public key fingerprint was originally hashed with. This defaults to ``sha256`` if not specified. .. versionadded:: 2016.11.7
[ "Verifies", "that", "the", "specified", "SSH", "key", "is", "absent" ]
python
train
JarryShaw/PyPCAPKit
src/interface/__init__.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/interface/__init__.py#L53-L131
def extract(fin=None, fout=None, format=None, # basic settings auto=True, extension=True, store=True, # internal settings files=False, nofile=False, verbose=False, # output settings engine=None, layer=None, protocol=None, # extraction settings ip=False, ipv4=False, ipv6=False, tcp=False, strict=True, # reassembly settings trace=False, trace_fout=None, trace_format=None, # trace settings trace_byteorder=sys.byteorder, trace_nanosecond=False): # trace settings """Extract a PCAP file. Keyword arguments: * fin -- str, file name to be read; if file not exist, raise an error * fout -- str, file name to be written * format -- str, file format of output <keyword> 'plist' / 'json' / 'tree' / 'html' * auto -- bool, if automatically run till EOF (default is True) <keyword> True / False * extension -- bool, if check and append extensions to output file (default is True) <keyword> True / False * store -- bool, if store extracted packet info (default is True) <keyword> True / False * files -- bool, if split each frame into different files (default is False) <keyword> True / False * nofile -- bool, if no output file is to be dumped (default is False) <keyword> True / False * verbose -- bool, if print verbose output information (default is False) <keyword> True / False * engine -- str, extraction engine to be used <keyword> 'default | pcapkit' * layer -- str, extract til which layer <keyword> 'Link' / 'Internet' / 'Transport' / 'Application' * protocol -- str, extract til which protocol <keyword> available protocol name * ip -- bool, if record data for IPv4 & IPv6 reassembly (default is False) <keyword> True / False * ipv4 -- bool, if perform IPv4 reassembly (default is False) <keyword> True / False * ipv6 -- bool, if perform IPv6 reassembly (default is False) <keyword> True / False * tcp -- bool, if perform TCP reassembly (default is False) <keyword> True / False * strict -- bool, if set strict flag for reassembly (default is True) <keyword> True / False * trace -- bool, if trace TCP traffic flows (default is False) <keyword> True / False * trace_fout -- str, path name for flow tracer if necessary * trace_format -- str, output file format of flow tracer <keyword> 'plist' / 'json' / 'tree' / 'html' / 'pcap' * trace_byteorder -- str, output file byte order <keyword> 'little' / 'big' * trace_nanosecond -- bool, output nanosecond-resolution file flag <keyword> True / False Returns: * Extractor -- an Extractor object form `pcapkit.extractor` """ if isinstance(layer, type) and issubclass(layer, Protocol): layer = layer.__layer__ if isinstance(protocol, type) and issubclass(protocol, Protocol): protocol = protocol.__index__() str_check(fin or '', fout or '', format or '', trace_fout or '', trace_format or '', engine or '', layer or '', *(protocol or '')) bool_check(files, nofile, verbose, auto, extension, store, ip, ipv4, ipv6, tcp, strict, trace) return Extractor(fin=fin, fout=fout, format=format, store=store, files=files, nofile=nofile, auto=auto, verbose=verbose, extension=extension, engine=engine, layer=layer, protocol=protocol, ip=ip, ipv4=ipv4, ipv6=ipv6, tcp=tcp, strict=strict, trace=trace, trace_fout=trace_fout, trace_format=trace_format, trace_byteorder=trace_byteorder, trace_nanosecond=trace_nanosecond)
[ "def", "extract", "(", "fin", "=", "None", ",", "fout", "=", "None", ",", "format", "=", "None", ",", "# basic settings", "auto", "=", "True", ",", "extension", "=", "True", ",", "store", "=", "True", ",", "# internal settings", "files", "=", "False", ",", "nofile", "=", "False", ",", "verbose", "=", "False", ",", "# output settings", "engine", "=", "None", ",", "layer", "=", "None", ",", "protocol", "=", "None", ",", "# extraction settings", "ip", "=", "False", ",", "ipv4", "=", "False", ",", "ipv6", "=", "False", ",", "tcp", "=", "False", ",", "strict", "=", "True", ",", "# reassembly settings", "trace", "=", "False", ",", "trace_fout", "=", "None", ",", "trace_format", "=", "None", ",", "# trace settings", "trace_byteorder", "=", "sys", ".", "byteorder", ",", "trace_nanosecond", "=", "False", ")", ":", "# trace settings", "if", "isinstance", "(", "layer", ",", "type", ")", "and", "issubclass", "(", "layer", ",", "Protocol", ")", ":", "layer", "=", "layer", ".", "__layer__", "if", "isinstance", "(", "protocol", ",", "type", ")", "and", "issubclass", "(", "protocol", ",", "Protocol", ")", ":", "protocol", "=", "protocol", ".", "__index__", "(", ")", "str_check", "(", "fin", "or", "''", ",", "fout", "or", "''", ",", "format", "or", "''", ",", "trace_fout", "or", "''", ",", "trace_format", "or", "''", ",", "engine", "or", "''", ",", "layer", "or", "''", ",", "*", "(", "protocol", "or", "''", ")", ")", "bool_check", "(", "files", ",", "nofile", ",", "verbose", ",", "auto", ",", "extension", ",", "store", ",", "ip", ",", "ipv4", ",", "ipv6", ",", "tcp", ",", "strict", ",", "trace", ")", "return", "Extractor", "(", "fin", "=", "fin", ",", "fout", "=", "fout", ",", "format", "=", "format", ",", "store", "=", "store", ",", "files", "=", "files", ",", "nofile", "=", "nofile", ",", "auto", "=", "auto", ",", "verbose", "=", "verbose", ",", "extension", "=", "extension", ",", "engine", "=", "engine", ",", "layer", "=", "layer", ",", "protocol", "=", "protocol", ",", "ip", "=", "ip", ",", "ipv4", "=", "ipv4", ",", "ipv6", "=", "ipv6", ",", "tcp", "=", "tcp", ",", "strict", "=", "strict", ",", "trace", "=", "trace", ",", "trace_fout", "=", "trace_fout", ",", "trace_format", "=", "trace_format", ",", "trace_byteorder", "=", "trace_byteorder", ",", "trace_nanosecond", "=", "trace_nanosecond", ")" ]
Extract a PCAP file. Keyword arguments: * fin -- str, file name to be read; if file not exist, raise an error * fout -- str, file name to be written * format -- str, file format of output <keyword> 'plist' / 'json' / 'tree' / 'html' * auto -- bool, if automatically run till EOF (default is True) <keyword> True / False * extension -- bool, if check and append extensions to output file (default is True) <keyword> True / False * store -- bool, if store extracted packet info (default is True) <keyword> True / False * files -- bool, if split each frame into different files (default is False) <keyword> True / False * nofile -- bool, if no output file is to be dumped (default is False) <keyword> True / False * verbose -- bool, if print verbose output information (default is False) <keyword> True / False * engine -- str, extraction engine to be used <keyword> 'default | pcapkit' * layer -- str, extract til which layer <keyword> 'Link' / 'Internet' / 'Transport' / 'Application' * protocol -- str, extract til which protocol <keyword> available protocol name * ip -- bool, if record data for IPv4 & IPv6 reassembly (default is False) <keyword> True / False * ipv4 -- bool, if perform IPv4 reassembly (default is False) <keyword> True / False * ipv6 -- bool, if perform IPv6 reassembly (default is False) <keyword> True / False * tcp -- bool, if perform TCP reassembly (default is False) <keyword> True / False * strict -- bool, if set strict flag for reassembly (default is True) <keyword> True / False * trace -- bool, if trace TCP traffic flows (default is False) <keyword> True / False * trace_fout -- str, path name for flow tracer if necessary * trace_format -- str, output file format of flow tracer <keyword> 'plist' / 'json' / 'tree' / 'html' / 'pcap' * trace_byteorder -- str, output file byte order <keyword> 'little' / 'big' * trace_nanosecond -- bool, output nanosecond-resolution file flag <keyword> True / False Returns: * Extractor -- an Extractor object form `pcapkit.extractor`
[ "Extract", "a", "PCAP", "file", "." ]
python
train
mitsei/dlkit
dlkit/json_/learning/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/objects.py#L375-L387
def clear_cognitive_process(self): """Clears the cognitive process. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.clear_avatar_template if (self.get_cognitive_process_metadata().is_read_only() or self.get_cognitive_process_metadata().is_required()): raise errors.NoAccess() self._my_map['cognitiveProcessId'] = self._cognitive_process_default
[ "def", "clear_cognitive_process", "(", "self", ")", ":", "# Implemented from template for osid.resource.ResourceForm.clear_avatar_template", "if", "(", "self", ".", "get_cognitive_process_metadata", "(", ")", ".", "is_read_only", "(", ")", "or", "self", ".", "get_cognitive_process_metadata", "(", ")", ".", "is_required", "(", ")", ")", ":", "raise", "errors", ".", "NoAccess", "(", ")", "self", ".", "_my_map", "[", "'cognitiveProcessId'", "]", "=", "self", ".", "_cognitive_process_default" ]
Clears the cognitive process. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
[ "Clears", "the", "cognitive", "process", "." ]
python
train
davidrpugh/pyCollocation
pycollocation/solvers/solvers.py
https://github.com/davidrpugh/pyCollocation/blob/9376f3488a992dc416cfd2a4dbb396d094927569/pycollocation/solvers/solvers.py#L41-L58
def _evaluate_rhs(cls, funcs, nodes, problem): """ Compute the value of the right-hand side of the system of ODEs. Parameters ---------- basis_funcs : list(function) nodes : numpy.ndarray problem : TwoPointBVPLike Returns ------- evaluated_rhs : list(float) """ evald_funcs = cls._evaluate_functions(funcs, nodes) evald_rhs = problem.rhs(nodes, *evald_funcs, **problem.params) return evald_rhs
[ "def", "_evaluate_rhs", "(", "cls", ",", "funcs", ",", "nodes", ",", "problem", ")", ":", "evald_funcs", "=", "cls", ".", "_evaluate_functions", "(", "funcs", ",", "nodes", ")", "evald_rhs", "=", "problem", ".", "rhs", "(", "nodes", ",", "*", "evald_funcs", ",", "*", "*", "problem", ".", "params", ")", "return", "evald_rhs" ]
Compute the value of the right-hand side of the system of ODEs. Parameters ---------- basis_funcs : list(function) nodes : numpy.ndarray problem : TwoPointBVPLike Returns ------- evaluated_rhs : list(float)
[ "Compute", "the", "value", "of", "the", "right", "-", "hand", "side", "of", "the", "system", "of", "ODEs", "." ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datamodel/obo_parser.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/obo_parser.py#L492-L497
def write_hier_all(self, out=sys.stdout, len_dash=1, max_depth=None, num_child=None, short_prt=False): """Write hierarchy for all GO Terms in obo file.""" # Print: [biological_process, molecular_function, and cellular_component] for go_id in ['GO:0008150', 'GO:0003674', 'GO:0005575']: self.write_hier(go_id, out, len_dash, max_depth, num_child, short_prt, None)
[ "def", "write_hier_all", "(", "self", ",", "out", "=", "sys", ".", "stdout", ",", "len_dash", "=", "1", ",", "max_depth", "=", "None", ",", "num_child", "=", "None", ",", "short_prt", "=", "False", ")", ":", "# Print: [biological_process, molecular_function, and cellular_component]", "for", "go_id", "in", "[", "'GO:0008150'", ",", "'GO:0003674'", ",", "'GO:0005575'", "]", ":", "self", ".", "write_hier", "(", "go_id", ",", "out", ",", "len_dash", ",", "max_depth", ",", "num_child", ",", "short_prt", ",", "None", ")" ]
Write hierarchy for all GO Terms in obo file.
[ "Write", "hierarchy", "for", "all", "GO", "Terms", "in", "obo", "file", "." ]
python
train
mbj4668/pyang
pyang/plugins/sample-xml-skeleton.py
https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/plugins/sample-xml-skeleton.py#L228-L237
def list_comment(self, node, elem, minel): """Add list annotation to `elem`.""" lo = "0" if minel is None else minel.arg maxel = node.search_one("max-elements") hi = "" if maxel is None else maxel.arg elem.insert(0, etree.Comment( " # entries: %s..%s " % (lo, hi))) if node.keyword == 'list': elem.insert(0, etree.Comment( " # keys: " + ",".join([k.arg for k in node.i_key])))
[ "def", "list_comment", "(", "self", ",", "node", ",", "elem", ",", "minel", ")", ":", "lo", "=", "\"0\"", "if", "minel", "is", "None", "else", "minel", ".", "arg", "maxel", "=", "node", ".", "search_one", "(", "\"max-elements\"", ")", "hi", "=", "\"\"", "if", "maxel", "is", "None", "else", "maxel", ".", "arg", "elem", ".", "insert", "(", "0", ",", "etree", ".", "Comment", "(", "\" # entries: %s..%s \"", "%", "(", "lo", ",", "hi", ")", ")", ")", "if", "node", ".", "keyword", "==", "'list'", ":", "elem", ".", "insert", "(", "0", ",", "etree", ".", "Comment", "(", "\" # keys: \"", "+", "\",\"", ".", "join", "(", "[", "k", ".", "arg", "for", "k", "in", "node", ".", "i_key", "]", ")", ")", ")" ]
Add list annotation to `elem`.
[ "Add", "list", "annotation", "to", "elem", "." ]
python
train
boriel/zxbasic
zxbpp.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbpp.py#L324-L333
def p_include_once(p): """ include_once : INCLUDE ONCE STRING """ if ENABLED: p[0] = include_once(p[3], p.lineno(3), local_first=True) else: p[0] = [] if not p[0]: p.lexer.next_token = '_ENDFILE_'
[ "def", "p_include_once", "(", "p", ")", ":", "if", "ENABLED", ":", "p", "[", "0", "]", "=", "include_once", "(", "p", "[", "3", "]", ",", "p", ".", "lineno", "(", "3", ")", ",", "local_first", "=", "True", ")", "else", ":", "p", "[", "0", "]", "=", "[", "]", "if", "not", "p", "[", "0", "]", ":", "p", ".", "lexer", ".", "next_token", "=", "'_ENDFILE_'" ]
include_once : INCLUDE ONCE STRING
[ "include_once", ":", "INCLUDE", "ONCE", "STRING" ]
python
train
josiahcarlson/rom
rom/util.py
https://github.com/josiahcarlson/rom/blob/8b5607a856341df85df33422accc30ba9294dbdb/rom/util.py#L520-L554
def save(self, *objects, **kwargs): ''' This method is an alternate API for saving many entities (possibly not tracked by the session). You can call:: session.save(obj) session.save(obj1, obj2, ...) session.save([obj1, obj2, ...]) And the entities will be flushed to Redis. You can pass the keyword arguments ``full``, ``all``, and ``force`` with the same meaning and semantics as the ``.commit()`` method. ''' from rom import Model full = kwargs.get('full') all = kwargs.get('all') force = kwargs.get('force') changes = 0 items = deque() items.extend(objects) while items: o = items.popleft() if isinstance(o, (list, tuple)): items.extendleft(reversed(o)) elif isinstance(o, Model): if not o._deleted and (all or o._modified): changes += o.save(full, force) else: raise ORMError( "Cannot save an object that is not an instance of a Model (you provided %r)"%( o,)) return changes
[ "def", "save", "(", "self", ",", "*", "objects", ",", "*", "*", "kwargs", ")", ":", "from", "rom", "import", "Model", "full", "=", "kwargs", ".", "get", "(", "'full'", ")", "all", "=", "kwargs", ".", "get", "(", "'all'", ")", "force", "=", "kwargs", ".", "get", "(", "'force'", ")", "changes", "=", "0", "items", "=", "deque", "(", ")", "items", ".", "extend", "(", "objects", ")", "while", "items", ":", "o", "=", "items", ".", "popleft", "(", ")", "if", "isinstance", "(", "o", ",", "(", "list", ",", "tuple", ")", ")", ":", "items", ".", "extendleft", "(", "reversed", "(", "o", ")", ")", "elif", "isinstance", "(", "o", ",", "Model", ")", ":", "if", "not", "o", ".", "_deleted", "and", "(", "all", "or", "o", ".", "_modified", ")", ":", "changes", "+=", "o", ".", "save", "(", "full", ",", "force", ")", "else", ":", "raise", "ORMError", "(", "\"Cannot save an object that is not an instance of a Model (you provided %r)\"", "%", "(", "o", ",", ")", ")", "return", "changes" ]
This method is an alternate API for saving many entities (possibly not tracked by the session). You can call:: session.save(obj) session.save(obj1, obj2, ...) session.save([obj1, obj2, ...]) And the entities will be flushed to Redis. You can pass the keyword arguments ``full``, ``all``, and ``force`` with the same meaning and semantics as the ``.commit()`` method.
[ "This", "method", "is", "an", "alternate", "API", "for", "saving", "many", "entities", "(", "possibly", "not", "tracked", "by", "the", "session", ")", ".", "You", "can", "call", "::" ]
python
test
StackStorm/pybind
pybind/slxos/v17r_2_00/cluster/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_2_00/cluster/__init__.py#L418-L441
def _set_client_pw(self, v, load=False): """ Setter method for client_pw, mapped from YANG variable /cluster/client_pw (container) If this variable is read-only (config: false) in the source YANG file, then _set_client_pw is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_client_pw() directly. YANG Description: Client Pseudo Wire """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=client_pw.client_pw, is_container='container', presence=True, yang_name="client-pw", rest_name="client-pw", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Client Pseudo Wire', u'cli-add-mode': None, u'sort-priority': u'RUNNCFG_MCT_PW_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-mct', defining_module='brocade-mct', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """client_pw must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=client_pw.client_pw, is_container='container', presence=True, yang_name="client-pw", rest_name="client-pw", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Client Pseudo Wire', u'cli-add-mode': None, u'sort-priority': u'RUNNCFG_MCT_PW_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-mct', defining_module='brocade-mct', yang_type='container', is_config=True)""", }) self.__client_pw = t if hasattr(self, '_set'): self._set()
[ "def", "_set_client_pw", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "client_pw", ".", "client_pw", ",", "is_container", "=", "'container'", ",", "presence", "=", "True", ",", "yang_name", "=", "\"client-pw\"", ",", "rest_name", "=", "\"client-pw\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Client Pseudo Wire'", ",", "u'cli-add-mode'", ":", "None", ",", "u'sort-priority'", ":", "u'RUNNCFG_MCT_PW_CONFIG'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-mct'", ",", "defining_module", "=", "'brocade-mct'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"client_pw must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=client_pw.client_pw, is_container='container', presence=True, yang_name=\"client-pw\", rest_name=\"client-pw\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Client Pseudo Wire', u'cli-add-mode': None, u'sort-priority': u'RUNNCFG_MCT_PW_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-mct', defining_module='brocade-mct', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__client_pw", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for client_pw, mapped from YANG variable /cluster/client_pw (container) If this variable is read-only (config: false) in the source YANG file, then _set_client_pw is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_client_pw() directly. YANG Description: Client Pseudo Wire
[ "Setter", "method", "for", "client_pw", "mapped", "from", "YANG", "variable", "/", "cluster", "/", "client_pw", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_client_pw", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_client_pw", "()", "directly", "." ]
python
train
riptano/ccm
ccmlib/node.py
https://github.com/riptano/ccm/blob/275699f79d102b5039b79cc17fa6305dccf18412/ccmlib/node.py#L422-L433
def mark_log(self, filename='system.log'): """ Returns "a mark" to the current position of this node Cassandra log. This is for use with the from_mark parameter of watch_log_for_* methods, allowing to watch the log from the position when this method was called. """ log_file = os.path.join(self.get_path(), 'logs', filename) if not os.path.exists(log_file): return 0 with open(log_file) as f: f.seek(0, os.SEEK_END) return f.tell()
[ "def", "mark_log", "(", "self", ",", "filename", "=", "'system.log'", ")", ":", "log_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "get_path", "(", ")", ",", "'logs'", ",", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "log_file", ")", ":", "return", "0", "with", "open", "(", "log_file", ")", "as", "f", ":", "f", ".", "seek", "(", "0", ",", "os", ".", "SEEK_END", ")", "return", "f", ".", "tell", "(", ")" ]
Returns "a mark" to the current position of this node Cassandra log. This is for use with the from_mark parameter of watch_log_for_* methods, allowing to watch the log from the position when this method was called.
[ "Returns", "a", "mark", "to", "the", "current", "position", "of", "this", "node", "Cassandra", "log", ".", "This", "is", "for", "use", "with", "the", "from_mark", "parameter", "of", "watch_log_for_", "*", "methods", "allowing", "to", "watch", "the", "log", "from", "the", "position", "when", "this", "method", "was", "called", "." ]
python
train
DAI-Lab/Copulas
copulas/univariate/gaussian.py
https://github.com/DAI-Lab/Copulas/blob/821df61c3d36a6b81ef2883935f935c2eaaa862c/copulas/univariate/gaussian.py#L69-L79
def cumulative_distribution(self, X): """Cumulative distribution function for gaussian distribution. Arguments: X: `np.ndarray` of shape (n, 1). Returns: np.ndarray: Cumulative density for X. """ self.check_fit() return norm.cdf(X, loc=self.mean, scale=self.std)
[ "def", "cumulative_distribution", "(", "self", ",", "X", ")", ":", "self", ".", "check_fit", "(", ")", "return", "norm", ".", "cdf", "(", "X", ",", "loc", "=", "self", ".", "mean", ",", "scale", "=", "self", ".", "std", ")" ]
Cumulative distribution function for gaussian distribution. Arguments: X: `np.ndarray` of shape (n, 1). Returns: np.ndarray: Cumulative density for X.
[ "Cumulative", "distribution", "function", "for", "gaussian", "distribution", "." ]
python
train
erinxocon/spotify-local
src/spotify_local/utils.py
https://github.com/erinxocon/spotify-local/blob/8188eef221e3d8b9f408ff430d80e74560360459/src/spotify_local/utils.py#L34-L38
def get_csrf_token(): """Retrieve a simple csrf token for to prevent cross site request forgery.""" url = get_url("/simplecsrf/token.json") r = s.get(url=url, headers=DEFAULT_ORIGIN) return r.json()["token"]
[ "def", "get_csrf_token", "(", ")", ":", "url", "=", "get_url", "(", "\"/simplecsrf/token.json\"", ")", "r", "=", "s", ".", "get", "(", "url", "=", "url", ",", "headers", "=", "DEFAULT_ORIGIN", ")", "return", "r", ".", "json", "(", ")", "[", "\"token\"", "]" ]
Retrieve a simple csrf token for to prevent cross site request forgery.
[ "Retrieve", "a", "simple", "csrf", "token", "for", "to", "prevent", "cross", "site", "request", "forgery", "." ]
python
train
casacore/python-casacore
casacore/util/substitute.py
https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/util/substitute.py#L62-L206
def substitute(s, objlist=(), globals={}, locals={}): """Substitute global python variables in a command string. This function parses a string and tries to substitute parts like `$name` by their value. It is uses by :mod:`image` and :mod:`table` to handle image and table objects in a command, but also other variables (integers, strings, etc.) can be substituted. The following rules apply: 1. A name must start with an underscore or alphabetic, followed by zero or more alphanumerics and underscores. 2. String parts enclosed in single or double quotes are literals and are left untouched. Furthermore a $ can be escaped by a backslash, which is useful if an environment variable is used. Note that an extra backslash is required in Python to escape the backslash. The output contains the quotes and backslashes. 3. A variable is looked up in the given local and global namespaces. 4. If the variable `name` has a vector value, its substitution is enclosed in square brackets and separated by commas. 5. A string value is enclosed in double quotes. If the value contains a double quote, that quote is enclosed in single quotes. 6. If the name's value has a type mentioned in the argument `objlist`, it is substituted by `$n` (where n is a sequence number) and its value is added to the objects of that type in `objlist`. 7. If the name is unknown or has an unknown type, it is left untouched. The `objlist` argument is a list of tuples or lists where each tuple or list has three fields: 1. The first field is the object type (e.g. `table`) 2. The second field is a prefix for the sequence number (usually empty). E.g. regions could have prefix 'r' resulting in a substitution like `$r1`. 3. The third field is a list of objects to be substituted. New objects get appended to it. Usually the list is initially empty. Apart from substituting variables, it also substitutes `$(expression)` by the expression result. It correctly handles parentheses and quotes in the expression. For example:: >>> a = 2 >>> b = 3 >>> substitute('$(a+b)+$a') '5+2' >>> substitute('$(a+b+a)') '7' >>> substitute('$((a+b)+$a)') '$((a+b)+$a)' >>> substitute('$((a+b)*(a+b))') '25' >>> substitute('$(len("ab cd( de"))') '9' Substitution is NOT recursive. E.g. if a=1 and b="$a", the result of substitute("$b") is "$a" and not 1. """ # Get the local variables at the caller level if not given. if not locals: locals = getlocals(3) # Initialize some variables. backslash = False dollar = False nparen = 0 name = '' evalstr = '' squote = False dquote = False out = '' # Loop through the entire string. for tmp in s: if backslash: out += tmp backslash = False continue # If a dollar is found, we might have a name or expression. # Alphabetics and underscore are always part of name. if dollar and nparen == 0: if tmp == '_' or ('a' <= tmp <= 'z') or ('A' <= tmp <= 'Z'): name += tmp continue # Numerics are only part if not first character. if '0' <= tmp <= '9' and name != '': name += tmp continue # $( indicates the start of an expression to evaluate. if tmp == '(' and name == '': nparen = 1 evalstr = '' continue # End of name found. Try to substitute. out += substitutename(name, objlist, globals, locals) dollar = False # Handle possible single or double quotes. if tmp == '"' and not squote: dquote = not dquote elif tmp == "'" and not dquote: squote = not squote if not dquote and not squote: # Count the number of balanced parentheses # (outside quoted strings) in the subexpression. if nparen > 0: if tmp == '(': nparen += 1 elif tmp == ')': nparen -= 1 if nparen == 0: # The last closing parenthese is found. # Evaluate the subexpression. # Add the result to the output. out += substituteexpr(evalstr, globals, locals) dollar = False evalstr += tmp continue # Set a switch if we have a dollar (outside quoted # and eval strings). if tmp == '$': dollar = True name = '' continue # No special character; add it to output or evalstr. # Set a switch if we have a backslash. if nparen == 0: out += tmp else: evalstr += tmp if tmp == '\\': backslash = True # The entire string has been handled. # Substitute a possible last name. # Insert a possible incomplete eval string as such. if dollar: out += substitutename(name, objlist, globals, locals) else: if nparen > 0: out += '$(' + evalstr return out
[ "def", "substitute", "(", "s", ",", "objlist", "=", "(", ")", ",", "globals", "=", "{", "}", ",", "locals", "=", "{", "}", ")", ":", "# Get the local variables at the caller level if not given.", "if", "not", "locals", ":", "locals", "=", "getlocals", "(", "3", ")", "# Initialize some variables.", "backslash", "=", "False", "dollar", "=", "False", "nparen", "=", "0", "name", "=", "''", "evalstr", "=", "''", "squote", "=", "False", "dquote", "=", "False", "out", "=", "''", "# Loop through the entire string.", "for", "tmp", "in", "s", ":", "if", "backslash", ":", "out", "+=", "tmp", "backslash", "=", "False", "continue", "# If a dollar is found, we might have a name or expression.", "# Alphabetics and underscore are always part of name.", "if", "dollar", "and", "nparen", "==", "0", ":", "if", "tmp", "==", "'_'", "or", "(", "'a'", "<=", "tmp", "<=", "'z'", ")", "or", "(", "'A'", "<=", "tmp", "<=", "'Z'", ")", ":", "name", "+=", "tmp", "continue", "# Numerics are only part if not first character.", "if", "'0'", "<=", "tmp", "<=", "'9'", "and", "name", "!=", "''", ":", "name", "+=", "tmp", "continue", "# $( indicates the start of an expression to evaluate.", "if", "tmp", "==", "'('", "and", "name", "==", "''", ":", "nparen", "=", "1", "evalstr", "=", "''", "continue", "# End of name found. Try to substitute.", "out", "+=", "substitutename", "(", "name", ",", "objlist", ",", "globals", ",", "locals", ")", "dollar", "=", "False", "# Handle possible single or double quotes.", "if", "tmp", "==", "'\"'", "and", "not", "squote", ":", "dquote", "=", "not", "dquote", "elif", "tmp", "==", "\"'\"", "and", "not", "dquote", ":", "squote", "=", "not", "squote", "if", "not", "dquote", "and", "not", "squote", ":", "# Count the number of balanced parentheses", "# (outside quoted strings) in the subexpression.", "if", "nparen", ">", "0", ":", "if", "tmp", "==", "'('", ":", "nparen", "+=", "1", "elif", "tmp", "==", "')'", ":", "nparen", "-=", "1", "if", "nparen", "==", "0", ":", "# The last closing parenthese is found.", "# Evaluate the subexpression.", "# Add the result to the output.", "out", "+=", "substituteexpr", "(", "evalstr", ",", "globals", ",", "locals", ")", "dollar", "=", "False", "evalstr", "+=", "tmp", "continue", "# Set a switch if we have a dollar (outside quoted", "# and eval strings).", "if", "tmp", "==", "'$'", ":", "dollar", "=", "True", "name", "=", "''", "continue", "# No special character; add it to output or evalstr.", "# Set a switch if we have a backslash.", "if", "nparen", "==", "0", ":", "out", "+=", "tmp", "else", ":", "evalstr", "+=", "tmp", "if", "tmp", "==", "'\\\\'", ":", "backslash", "=", "True", "# The entire string has been handled.", "# Substitute a possible last name.", "# Insert a possible incomplete eval string as such.", "if", "dollar", ":", "out", "+=", "substitutename", "(", "name", ",", "objlist", ",", "globals", ",", "locals", ")", "else", ":", "if", "nparen", ">", "0", ":", "out", "+=", "'$('", "+", "evalstr", "return", "out" ]
Substitute global python variables in a command string. This function parses a string and tries to substitute parts like `$name` by their value. It is uses by :mod:`image` and :mod:`table` to handle image and table objects in a command, but also other variables (integers, strings, etc.) can be substituted. The following rules apply: 1. A name must start with an underscore or alphabetic, followed by zero or more alphanumerics and underscores. 2. String parts enclosed in single or double quotes are literals and are left untouched. Furthermore a $ can be escaped by a backslash, which is useful if an environment variable is used. Note that an extra backslash is required in Python to escape the backslash. The output contains the quotes and backslashes. 3. A variable is looked up in the given local and global namespaces. 4. If the variable `name` has a vector value, its substitution is enclosed in square brackets and separated by commas. 5. A string value is enclosed in double quotes. If the value contains a double quote, that quote is enclosed in single quotes. 6. If the name's value has a type mentioned in the argument `objlist`, it is substituted by `$n` (where n is a sequence number) and its value is added to the objects of that type in `objlist`. 7. If the name is unknown or has an unknown type, it is left untouched. The `objlist` argument is a list of tuples or lists where each tuple or list has three fields: 1. The first field is the object type (e.g. `table`) 2. The second field is a prefix for the sequence number (usually empty). E.g. regions could have prefix 'r' resulting in a substitution like `$r1`. 3. The third field is a list of objects to be substituted. New objects get appended to it. Usually the list is initially empty. Apart from substituting variables, it also substitutes `$(expression)` by the expression result. It correctly handles parentheses and quotes in the expression. For example:: >>> a = 2 >>> b = 3 >>> substitute('$(a+b)+$a') '5+2' >>> substitute('$(a+b+a)') '7' >>> substitute('$((a+b)+$a)') '$((a+b)+$a)' >>> substitute('$((a+b)*(a+b))') '25' >>> substitute('$(len("ab cd( de"))') '9' Substitution is NOT recursive. E.g. if a=1 and b="$a", the result of substitute("$b") is "$a" and not 1.
[ "Substitute", "global", "python", "variables", "in", "a", "command", "string", "." ]
python
train
jcushman/pdfquery
pdfquery/pdfquery.py
https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/pdfquery.py#L439-L454
def get_pyquery(self, tree=None, page_numbers=None): """ Wrap given tree in pyquery and return. If no tree supplied, will generate one from given page_numbers, or all page numbers. """ if not page_numbers: page_numbers = [] if tree is None: if not page_numbers and self.tree is not None: tree = self.tree else: tree = self.get_tree(page_numbers) if hasattr(tree, 'getroot'): tree = tree.getroot() return PyQuery(tree, css_translator=PDFQueryTranslator())
[ "def", "get_pyquery", "(", "self", ",", "tree", "=", "None", ",", "page_numbers", "=", "None", ")", ":", "if", "not", "page_numbers", ":", "page_numbers", "=", "[", "]", "if", "tree", "is", "None", ":", "if", "not", "page_numbers", "and", "self", ".", "tree", "is", "not", "None", ":", "tree", "=", "self", ".", "tree", "else", ":", "tree", "=", "self", ".", "get_tree", "(", "page_numbers", ")", "if", "hasattr", "(", "tree", ",", "'getroot'", ")", ":", "tree", "=", "tree", ".", "getroot", "(", ")", "return", "PyQuery", "(", "tree", ",", "css_translator", "=", "PDFQueryTranslator", "(", ")", ")" ]
Wrap given tree in pyquery and return. If no tree supplied, will generate one from given page_numbers, or all page numbers.
[ "Wrap", "given", "tree", "in", "pyquery", "and", "return", ".", "If", "no", "tree", "supplied", "will", "generate", "one", "from", "given", "page_numbers", "or", "all", "page", "numbers", "." ]
python
train
SmokinCaterpillar/pypet
pypet/pypetlogging.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/pypetlogging.py#L710-L716
def finalize(self): """Disables redirection""" if self._original_steam is not None and self._redirection: sys.stdout = self._original_steam print('Disabled redirection of `stdout`.') self._redirection = False self._original_steam = None
[ "def", "finalize", "(", "self", ")", ":", "if", "self", ".", "_original_steam", "is", "not", "None", "and", "self", ".", "_redirection", ":", "sys", ".", "stdout", "=", "self", ".", "_original_steam", "print", "(", "'Disabled redirection of `stdout`.'", ")", "self", ".", "_redirection", "=", "False", "self", ".", "_original_steam", "=", "None" ]
Disables redirection
[ "Disables", "redirection" ]
python
test
eyurtsev/FlowCytometryTools
FlowCytometryTools/__init__.py
https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/__init__.py#L15-L21
def _get_paths(): """Generate paths to test data. Done in a function to protect namespace a bit.""" import os base_path = os.path.dirname(os.path.abspath(__file__)) test_data_dir = os.path.join(base_path, 'tests', 'data', 'Plate01') test_data_file = os.path.join(test_data_dir, 'RFP_Well_A3.fcs') return test_data_dir, test_data_file
[ "def", "_get_paths", "(", ")", ":", "import", "os", "base_path", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", "test_data_dir", "=", "os", ".", "path", ".", "join", "(", "base_path", ",", "'tests'", ",", "'data'", ",", "'Plate01'", ")", "test_data_file", "=", "os", ".", "path", ".", "join", "(", "test_data_dir", ",", "'RFP_Well_A3.fcs'", ")", "return", "test_data_dir", ",", "test_data_file" ]
Generate paths to test data. Done in a function to protect namespace a bit.
[ "Generate", "paths", "to", "test", "data", ".", "Done", "in", "a", "function", "to", "protect", "namespace", "a", "bit", "." ]
python
train
xhtml2pdf/xhtml2pdf
xhtml2pdf/paragraph.py
https://github.com/xhtml2pdf/xhtml2pdf/blob/230357a392f48816532d3c2fa082a680b80ece48/xhtml2pdf/paragraph.py#L483-L502
def split(self, availWidth, availHeight): """ Split ourselves in two paragraphs. """ logger.debug("*** split (%f, %f)", availWidth, availHeight) splitted = [] if self.splitIndex: text1 = self.text[:self.splitIndex] text2 = self.text[self.splitIndex:] p1 = Paragraph(Text(text1), self.style, debug=self.debug) p2 = Paragraph(Text(text2), self.style, debug=self.debug, splitted=True) splitted = [p1, p2] logger.debug("*** text1 %s / text %s", len(text1), len(text2)) logger.debug('*** return %s', self.splitted) return splitted
[ "def", "split", "(", "self", ",", "availWidth", ",", "availHeight", ")", ":", "logger", ".", "debug", "(", "\"*** split (%f, %f)\"", ",", "availWidth", ",", "availHeight", ")", "splitted", "=", "[", "]", "if", "self", ".", "splitIndex", ":", "text1", "=", "self", ".", "text", "[", ":", "self", ".", "splitIndex", "]", "text2", "=", "self", ".", "text", "[", "self", ".", "splitIndex", ":", "]", "p1", "=", "Paragraph", "(", "Text", "(", "text1", ")", ",", "self", ".", "style", ",", "debug", "=", "self", ".", "debug", ")", "p2", "=", "Paragraph", "(", "Text", "(", "text2", ")", ",", "self", ".", "style", ",", "debug", "=", "self", ".", "debug", ",", "splitted", "=", "True", ")", "splitted", "=", "[", "p1", ",", "p2", "]", "logger", ".", "debug", "(", "\"*** text1 %s / text %s\"", ",", "len", "(", "text1", ")", ",", "len", "(", "text2", ")", ")", "logger", ".", "debug", "(", "'*** return %s'", ",", "self", ".", "splitted", ")", "return", "splitted" ]
Split ourselves in two paragraphs.
[ "Split", "ourselves", "in", "two", "paragraphs", "." ]
python
train
OpenTreeOfLife/peyotl
peyotl/phylesystem/git_workflows.py
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/phylesystem/git_workflows.py#L91-L100
def merge_from_master(git_action, study_id, auth_info, parent_sha): """merge from master into the WIP for this study/author this is needed to allow a worker's future saves to be merged seamlessly into master """ return _merge_from_master(git_action, doc_id=study_id, auth_info=auth_info, parent_sha=parent_sha, doctype_display_name="study")
[ "def", "merge_from_master", "(", "git_action", ",", "study_id", ",", "auth_info", ",", "parent_sha", ")", ":", "return", "_merge_from_master", "(", "git_action", ",", "doc_id", "=", "study_id", ",", "auth_info", "=", "auth_info", ",", "parent_sha", "=", "parent_sha", ",", "doctype_display_name", "=", "\"study\"", ")" ]
merge from master into the WIP for this study/author this is needed to allow a worker's future saves to be merged seamlessly into master
[ "merge", "from", "master", "into", "the", "WIP", "for", "this", "study", "/", "author", "this", "is", "needed", "to", "allow", "a", "worker", "s", "future", "saves", "to", "be", "merged", "seamlessly", "into", "master" ]
python
train
ga4gh/ga4gh-server
ga4gh/server/backend.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/backend.py#L655-L663
def runGetCallSet(self, id_): """ Returns a callset with the given id """ compoundId = datamodel.CallSetCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(compoundId.variant_set_id) callSet = variantSet.getCallSet(id_) return self.runGetRequest(callSet)
[ "def", "runGetCallSet", "(", "self", ",", "id_", ")", ":", "compoundId", "=", "datamodel", ".", "CallSetCompoundId", ".", "parse", "(", "id_", ")", "dataset", "=", "self", ".", "getDataRepository", "(", ")", ".", "getDataset", "(", "compoundId", ".", "dataset_id", ")", "variantSet", "=", "dataset", ".", "getVariantSet", "(", "compoundId", ".", "variant_set_id", ")", "callSet", "=", "variantSet", ".", "getCallSet", "(", "id_", ")", "return", "self", ".", "runGetRequest", "(", "callSet", ")" ]
Returns a callset with the given id
[ "Returns", "a", "callset", "with", "the", "given", "id" ]
python
train
eaton-lab/toytree
versioner.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/versioner.py#L121-L127
def _get_init_release_tag(self): """ parses init.py to get previous version """ self.init_version = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", open(self.init_file, "r").read(), re.M).group(1)
[ "def", "_get_init_release_tag", "(", "self", ")", ":", "self", ".", "init_version", "=", "re", ".", "search", "(", "r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"", ",", "open", "(", "self", ".", "init_file", ",", "\"r\"", ")", ".", "read", "(", ")", ",", "re", ".", "M", ")", ".", "group", "(", "1", ")" ]
parses init.py to get previous version
[ "parses", "init", ".", "py", "to", "get", "previous", "version" ]
python
train
xen/webcraft
webcraft/admin/saform.py
https://github.com/xen/webcraft/blob/74ff1e5b253048d9260446bfbc95de2e402a8005/webcraft/admin/saform.py#L91-L117
def generate_form(model, only=None, meta=None): """ Generate WTForm based on SQLAlchemy table :param model: SQLAlchemy sa.Table :param only: list or set of columns that should be used in final form :param meta: Meta class with settings for form :return: WTForm object """ fields = OrderedDict() if meta: fields['Meta'] = meta for name, column in model.__dict__['columns'].items(): if only: if not name in only: continue if not isinstance(column, Column): continue fields[name] = TYPE_MAP[column.type.__class__]( name, render_kw={'placeholder': name} ) form = type( 'Add{}Form'.format(model.name.capitalize()), (Form,), fields ) return form
[ "def", "generate_form", "(", "model", ",", "only", "=", "None", ",", "meta", "=", "None", ")", ":", "fields", "=", "OrderedDict", "(", ")", "if", "meta", ":", "fields", "[", "'Meta'", "]", "=", "meta", "for", "name", ",", "column", "in", "model", ".", "__dict__", "[", "'columns'", "]", ".", "items", "(", ")", ":", "if", "only", ":", "if", "not", "name", "in", "only", ":", "continue", "if", "not", "isinstance", "(", "column", ",", "Column", ")", ":", "continue", "fields", "[", "name", "]", "=", "TYPE_MAP", "[", "column", ".", "type", ".", "__class__", "]", "(", "name", ",", "render_kw", "=", "{", "'placeholder'", ":", "name", "}", ")", "form", "=", "type", "(", "'Add{}Form'", ".", "format", "(", "model", ".", "name", ".", "capitalize", "(", ")", ")", ",", "(", "Form", ",", ")", ",", "fields", ")", "return", "form" ]
Generate WTForm based on SQLAlchemy table :param model: SQLAlchemy sa.Table :param only: list or set of columns that should be used in final form :param meta: Meta class with settings for form :return: WTForm object
[ "Generate", "WTForm", "based", "on", "SQLAlchemy", "table", ":", "param", "model", ":", "SQLAlchemy", "sa", ".", "Table", ":", "param", "only", ":", "list", "or", "set", "of", "columns", "that", "should", "be", "used", "in", "final", "form", ":", "param", "meta", ":", "Meta", "class", "with", "settings", "for", "form", ":", "return", ":", "WTForm", "object" ]
python
train
mitsei/dlkit
dlkit/handcar/repository/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/repository/managers.py#L2919-L2945
def get_composition_repository_assignment_session(self, proxy): """Gets the session for assigning composition to repository mappings. arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.CompositionRepositoryAssignmentSession) - a CompositionRepositoryAssignmentSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_composition_repository_assignment() is false compliance: optional - This method must be implemented if supports_composition_repository_assignment() is true. """ if not self.supports_composition_repository_assignment(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.CompositionRepositoryAssignmentSession(proxy, runtime=self._runtime) except AttributeError: raise # OperationFailed() return session
[ "def", "get_composition_repository_assignment_session", "(", "self", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_composition_repository_assignment", "(", ")", ":", "raise", "Unimplemented", "(", ")", "try", ":", "from", ".", "import", "sessions", "except", "ImportError", ":", "raise", "# OperationFailed()", "proxy", "=", "self", ".", "_convert_proxy", "(", "proxy", ")", "try", ":", "session", "=", "sessions", ".", "CompositionRepositoryAssignmentSession", "(", "proxy", ",", "runtime", "=", "self", ".", "_runtime", ")", "except", "AttributeError", ":", "raise", "# OperationFailed()", "return", "session" ]
Gets the session for assigning composition to repository mappings. arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.CompositionRepositoryAssignmentSession) - a CompositionRepositoryAssignmentSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_composition_repository_assignment() is false compliance: optional - This method must be implemented if supports_composition_repository_assignment() is true.
[ "Gets", "the", "session", "for", "assigning", "composition", "to", "repository", "mappings", "." ]
python
train
simon-anders/htseq
python2/src/StepVector.py
https://github.com/simon-anders/htseq/blob/6f7d66e757e610228c33ebf2bb5dc8cc5051c7f0/python2/src/StepVector.py#L466-L495
def create( cls, length = sys.maxint, typecode = 'd', start_index = 0 ): """Construct a StepVector of the given length, with indices starting at the given start_index and counting up to (but not including) start_index + length. The typecode may be: 'd' for float values (C type 'double'), 'i' for int values, 'b' for Boolean values, 'O' for arbitrary Python objects as value. The vector is initialized with the value zero (or, for typecode 'O', with None). """ if typecode == 'd': swigclass = _StepVector_float elif typecode == 'i': swigclass = _StepVector_int elif typecode == 'b': swigclass = _StepVector_bool elif typecode == 'O': swigclass = _StepVector_obj else: raise ValueError, "unsupported typecode" obj = cls() obj._typecode = typecode obj._swigobj = swigclass( ) obj.start = start_index obj.stop = start_index + length return obj
[ "def", "create", "(", "cls", ",", "length", "=", "sys", ".", "maxint", ",", "typecode", "=", "'d'", ",", "start_index", "=", "0", ")", ":", "if", "typecode", "==", "'d'", ":", "swigclass", "=", "_StepVector_float", "elif", "typecode", "==", "'i'", ":", "swigclass", "=", "_StepVector_int", "elif", "typecode", "==", "'b'", ":", "swigclass", "=", "_StepVector_bool", "elif", "typecode", "==", "'O'", ":", "swigclass", "=", "_StepVector_obj", "else", ":", "raise", "ValueError", ",", "\"unsupported typecode\"", "obj", "=", "cls", "(", ")", "obj", ".", "_typecode", "=", "typecode", "obj", ".", "_swigobj", "=", "swigclass", "(", ")", "obj", ".", "start", "=", "start_index", "obj", ".", "stop", "=", "start_index", "+", "length", "return", "obj" ]
Construct a StepVector of the given length, with indices starting at the given start_index and counting up to (but not including) start_index + length. The typecode may be: 'd' for float values (C type 'double'), 'i' for int values, 'b' for Boolean values, 'O' for arbitrary Python objects as value. The vector is initialized with the value zero (or, for typecode 'O', with None).
[ "Construct", "a", "StepVector", "of", "the", "given", "length", "with", "indices", "starting", "at", "the", "given", "start_index", "and", "counting", "up", "to", "(", "but", "not", "including", ")", "start_index", "+", "length", "." ]
python
train
indico/indico-plugins
chat/indico_chat/xmpp.py
https://github.com/indico/indico-plugins/blob/fe50085cc63be9b8161b09539e662e7b04e4b38e/chat/indico_chat/xmpp.py#L44-L56
def create_room(room): """Creates a MUC room on the XMPP server.""" if room.custom_server: return def _create_room(xmpp): muc = xmpp.plugin['xep_0045'] muc.joinMUC(room.jid, xmpp.requested_jid.user) muc.configureRoom(room.jid, _set_form_values(xmpp, room)) current_plugin.logger.info('Creating room %s', room.jid) _execute_xmpp(_create_room)
[ "def", "create_room", "(", "room", ")", ":", "if", "room", ".", "custom_server", ":", "return", "def", "_create_room", "(", "xmpp", ")", ":", "muc", "=", "xmpp", ".", "plugin", "[", "'xep_0045'", "]", "muc", ".", "joinMUC", "(", "room", ".", "jid", ",", "xmpp", ".", "requested_jid", ".", "user", ")", "muc", ".", "configureRoom", "(", "room", ".", "jid", ",", "_set_form_values", "(", "xmpp", ",", "room", ")", ")", "current_plugin", ".", "logger", ".", "info", "(", "'Creating room %s'", ",", "room", ".", "jid", ")", "_execute_xmpp", "(", "_create_room", ")" ]
Creates a MUC room on the XMPP server.
[ "Creates", "a", "MUC", "room", "on", "the", "XMPP", "server", "." ]
python
train
KE-works/pykechain
pykechain/models/scope.py
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/scope.py#L225-L235
def add_manager(self, manager): """ Add a single manager to the scope. :param manager: single username to be added to the scope list of managers :type manager: basestring :raises APIError: when unable to update the scope manager """ select_action = 'add_manager' self._update_scope_project_team(select_action=select_action, user=manager, user_type='manager')
[ "def", "add_manager", "(", "self", ",", "manager", ")", ":", "select_action", "=", "'add_manager'", "self", ".", "_update_scope_project_team", "(", "select_action", "=", "select_action", ",", "user", "=", "manager", ",", "user_type", "=", "'manager'", ")" ]
Add a single manager to the scope. :param manager: single username to be added to the scope list of managers :type manager: basestring :raises APIError: when unable to update the scope manager
[ "Add", "a", "single", "manager", "to", "the", "scope", "." ]
python
train
cloudendpoints/endpoints-python
endpoints/protojson.py
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/protojson.py#L68-L84
def __pad_value(value, pad_len_multiple, pad_char): """Add padding characters to the value if needed. Args: value: The string value to be padded. pad_len_multiple: Pad the result so its length is a multiple of pad_len_multiple. pad_char: The character to use for padding. Returns: The string value with padding characters added. """ assert pad_len_multiple > 0 assert len(pad_char) == 1 padding_length = (pad_len_multiple - (len(value) % pad_len_multiple)) % pad_len_multiple return value + pad_char * padding_length
[ "def", "__pad_value", "(", "value", ",", "pad_len_multiple", ",", "pad_char", ")", ":", "assert", "pad_len_multiple", ">", "0", "assert", "len", "(", "pad_char", ")", "==", "1", "padding_length", "=", "(", "pad_len_multiple", "-", "(", "len", "(", "value", ")", "%", "pad_len_multiple", ")", ")", "%", "pad_len_multiple", "return", "value", "+", "pad_char", "*", "padding_length" ]
Add padding characters to the value if needed. Args: value: The string value to be padded. pad_len_multiple: Pad the result so its length is a multiple of pad_len_multiple. pad_char: The character to use for padding. Returns: The string value with padding characters added.
[ "Add", "padding", "characters", "to", "the", "value", "if", "needed", "." ]
python
train
limix/numpy-sugar
numpy_sugar/linalg/dot.py
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/dot.py#L60-L83
def cdot(L, out=None): r"""Product of a Cholesky matrix with itself transposed. Args: L (array_like): Cholesky matrix. out (:class:`numpy.ndarray`, optional): copy result to. Returns: :class:`numpy.ndarray`: :math:`\mathrm L\mathrm L^\intercal`. """ L = asarray(L, float) layout_error = "Wrong matrix layout." if L.ndim != 2: raise ValueError(layout_error) if L.shape[0] != L.shape[1]: raise ValueError(layout_error) if out is None: out = empty((L.shape[0], L.shape[1]), float) return einsum("ij,kj->ik", L, L, out=out)
[ "def", "cdot", "(", "L", ",", "out", "=", "None", ")", ":", "L", "=", "asarray", "(", "L", ",", "float", ")", "layout_error", "=", "\"Wrong matrix layout.\"", "if", "L", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "layout_error", ")", "if", "L", ".", "shape", "[", "0", "]", "!=", "L", ".", "shape", "[", "1", "]", ":", "raise", "ValueError", "(", "layout_error", ")", "if", "out", "is", "None", ":", "out", "=", "empty", "(", "(", "L", ".", "shape", "[", "0", "]", ",", "L", ".", "shape", "[", "1", "]", ")", ",", "float", ")", "return", "einsum", "(", "\"ij,kj->ik\"", ",", "L", ",", "L", ",", "out", "=", "out", ")" ]
r"""Product of a Cholesky matrix with itself transposed. Args: L (array_like): Cholesky matrix. out (:class:`numpy.ndarray`, optional): copy result to. Returns: :class:`numpy.ndarray`: :math:`\mathrm L\mathrm L^\intercal`.
[ "r", "Product", "of", "a", "Cholesky", "matrix", "with", "itself", "transposed", "." ]
python
train
fhcrc/taxtastic
taxtastic/utils.py
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/utils.py#L274-L292
def sqlite_default(): ''' Prepend default scheme if none is specified. This helps provides backwards compatibility with old versions of taxtastic where sqlite was the automatic default database. ''' def parse_url(url): # TODO: need separate option for a config file if url.endswith('.db') or url.endswith('.sqlite'): if not url.startswith('sqlite:///'): url = 'sqlite:///' + url elif url.endswith('.cfg') or url.endswith('.conf'): conf = configparser.SafeConfigParser(allow_no_value=True) conf.optionxform = str # options are case-sensitive conf.read(url) url = conf.get('sqlalchemy', 'url') return url return parse_url
[ "def", "sqlite_default", "(", ")", ":", "def", "parse_url", "(", "url", ")", ":", "# TODO: need separate option for a config file", "if", "url", ".", "endswith", "(", "'.db'", ")", "or", "url", ".", "endswith", "(", "'.sqlite'", ")", ":", "if", "not", "url", ".", "startswith", "(", "'sqlite:///'", ")", ":", "url", "=", "'sqlite:///'", "+", "url", "elif", "url", ".", "endswith", "(", "'.cfg'", ")", "or", "url", ".", "endswith", "(", "'.conf'", ")", ":", "conf", "=", "configparser", ".", "SafeConfigParser", "(", "allow_no_value", "=", "True", ")", "conf", ".", "optionxform", "=", "str", "# options are case-sensitive", "conf", ".", "read", "(", "url", ")", "url", "=", "conf", ".", "get", "(", "'sqlalchemy'", ",", "'url'", ")", "return", "url", "return", "parse_url" ]
Prepend default scheme if none is specified. This helps provides backwards compatibility with old versions of taxtastic where sqlite was the automatic default database.
[ "Prepend", "default", "scheme", "if", "none", "is", "specified", ".", "This", "helps", "provides", "backwards", "compatibility", "with", "old", "versions", "of", "taxtastic", "where", "sqlite", "was", "the", "automatic", "default", "database", "." ]
python
train
njsmith/colorspacious
colorspacious/illuminants.py
https://github.com/njsmith/colorspacious/blob/59e0226003fb1b894597c5081e8ca5a3aa4fcefd/colorspacious/illuminants.py#L97-L116
def as_XYZ100_w(whitepoint): """A convenience function for getting whitepoints. ``whitepoint`` can be either a string naming a standard illuminant (see :func:`standard_illuminant_XYZ100`), or else a whitepoint given explicitly as an array-like of XYZ values. We internally call this function anywhere you have to specify a whitepoint (e.g. for CIECAM02 or CIELAB conversions). Always uses the "standard" 2 degree observer. """ if isinstance(whitepoint, str): return standard_illuminant_XYZ100(whitepoint) else: whitepoint = np.asarray(whitepoint, dtype=float) if whitepoint.shape[-1] != 3: raise ValueError("Bad whitepoint shape") return whitepoint
[ "def", "as_XYZ100_w", "(", "whitepoint", ")", ":", "if", "isinstance", "(", "whitepoint", ",", "str", ")", ":", "return", "standard_illuminant_XYZ100", "(", "whitepoint", ")", "else", ":", "whitepoint", "=", "np", ".", "asarray", "(", "whitepoint", ",", "dtype", "=", "float", ")", "if", "whitepoint", ".", "shape", "[", "-", "1", "]", "!=", "3", ":", "raise", "ValueError", "(", "\"Bad whitepoint shape\"", ")", "return", "whitepoint" ]
A convenience function for getting whitepoints. ``whitepoint`` can be either a string naming a standard illuminant (see :func:`standard_illuminant_XYZ100`), or else a whitepoint given explicitly as an array-like of XYZ values. We internally call this function anywhere you have to specify a whitepoint (e.g. for CIECAM02 or CIELAB conversions). Always uses the "standard" 2 degree observer.
[ "A", "convenience", "function", "for", "getting", "whitepoints", "." ]
python
train
monarch-initiative/dipper
dipper/sources/Panther.py
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Panther.py#L92-L107
def parse(self, limit=None): """ :return: None """ if self.test_only: self.test_mode = True if self.tax_ids is None: LOG.info("No taxon filter set; Dumping all orthologous associations.") else: LOG.info("Only the following taxa will be dumped: %s", self.tax_ids) self._get_orthologs(limit) return
[ "def", "parse", "(", "self", ",", "limit", "=", "None", ")", ":", "if", "self", ".", "test_only", ":", "self", ".", "test_mode", "=", "True", "if", "self", ".", "tax_ids", "is", "None", ":", "LOG", ".", "info", "(", "\"No taxon filter set; Dumping all orthologous associations.\"", ")", "else", ":", "LOG", ".", "info", "(", "\"Only the following taxa will be dumped: %s\"", ",", "self", ".", "tax_ids", ")", "self", ".", "_get_orthologs", "(", "limit", ")", "return" ]
:return: None
[ ":", "return", ":", "None" ]
python
train
mabuchilab/QNET
src/qnet/printing/sympy.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/printing/sympy.py#L50-L93
def derationalize_denom(expr): """Try to de-rationalize the denominator of the given expression. The purpose is to allow to reconstruct e.g. ``1/sqrt(2)`` from ``sqrt(2)/2``. Specifically, this matches `expr` against the following pattern:: Mul(..., Rational(n, d), Pow(d, Rational(1, 2)), ...) and returns a tuple ``(numerator, denom_sq, post_factor)``, where ``numerator`` and ``denom_sq`` are ``n`` and ``d`` in the above pattern (of type `int`), respectively, and ``post_factor`` is the product of the remaining factors (``...`` in `expr`). The result will fulfill the following identity:: (numerator / sqrt(denom_sq)) * post_factor == expr If `expr` does not follow the appropriate pattern, a :exc:`ValueError` is raised. """ r_pos = -1 p_pos = -1 numerator = S.Zero denom_sq = S.One post_factors = [] if isinstance(expr, Mul): for pos, factor in enumerate(expr.args): if isinstance(factor, Rational) and r_pos < 0: r_pos = pos numerator, denom_sq = factor.p, factor.q elif isinstance(factor, Pow) and r_pos >= 0: if factor == sqrt(denom_sq): p_pos = pos else: post_factors.append(factor) else: post_factors.append(factor) if r_pos >= 0 and p_pos >= 0: return numerator, denom_sq, Mul(*post_factors) else: raise ValueError("Cannot derationalize") else: raise ValueError("expr is not a Mul instance")
[ "def", "derationalize_denom", "(", "expr", ")", ":", "r_pos", "=", "-", "1", "p_pos", "=", "-", "1", "numerator", "=", "S", ".", "Zero", "denom_sq", "=", "S", ".", "One", "post_factors", "=", "[", "]", "if", "isinstance", "(", "expr", ",", "Mul", ")", ":", "for", "pos", ",", "factor", "in", "enumerate", "(", "expr", ".", "args", ")", ":", "if", "isinstance", "(", "factor", ",", "Rational", ")", "and", "r_pos", "<", "0", ":", "r_pos", "=", "pos", "numerator", ",", "denom_sq", "=", "factor", ".", "p", ",", "factor", ".", "q", "elif", "isinstance", "(", "factor", ",", "Pow", ")", "and", "r_pos", ">=", "0", ":", "if", "factor", "==", "sqrt", "(", "denom_sq", ")", ":", "p_pos", "=", "pos", "else", ":", "post_factors", ".", "append", "(", "factor", ")", "else", ":", "post_factors", ".", "append", "(", "factor", ")", "if", "r_pos", ">=", "0", "and", "p_pos", ">=", "0", ":", "return", "numerator", ",", "denom_sq", ",", "Mul", "(", "*", "post_factors", ")", "else", ":", "raise", "ValueError", "(", "\"Cannot derationalize\"", ")", "else", ":", "raise", "ValueError", "(", "\"expr is not a Mul instance\"", ")" ]
Try to de-rationalize the denominator of the given expression. The purpose is to allow to reconstruct e.g. ``1/sqrt(2)`` from ``sqrt(2)/2``. Specifically, this matches `expr` against the following pattern:: Mul(..., Rational(n, d), Pow(d, Rational(1, 2)), ...) and returns a tuple ``(numerator, denom_sq, post_factor)``, where ``numerator`` and ``denom_sq`` are ``n`` and ``d`` in the above pattern (of type `int`), respectively, and ``post_factor`` is the product of the remaining factors (``...`` in `expr`). The result will fulfill the following identity:: (numerator / sqrt(denom_sq)) * post_factor == expr If `expr` does not follow the appropriate pattern, a :exc:`ValueError` is raised.
[ "Try", "to", "de", "-", "rationalize", "the", "denominator", "of", "the", "given", "expression", "." ]
python
train
tweekmonster/moult
moult/filesystem_scanner.py
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/filesystem_scanner.py#L114-L149
def scan_directory(pym, directory, sentinel, installed, depth=0): '''Entry point scan that creates a PyModule instance if needed. ''' if not pym: d = os.path.abspath(directory) basename = os.path.basename(d) pym = utils.find_package(basename, installed) if not pym: version = 'DIRECTORY' if os.path.isfile(os.path.join(d, '__init__.py')): version = 'MODULE' pym = PyModule(basename, version, d) installed.insert(0, pym) else: pym.is_scan = True # Keep track of how many file scans resulted in nothing bad_scans = 0 for item in _scan_directory(directory, sentinel, depth): if os.path.isfile(item): if bad_scans > 100: # Keep in mind this counter resets if it a good scan happens # in *this* directory. If you have a module with more than 100 # files in a single directory, you should probably refactor it. log.debug('Stopping scan of directory since it looks like a data dump: %s', directory) break if not scan_file(pym, item, sentinel, installed): bad_scans += 1 else: bad_scans = 0 elif os.path.isdir(item): scan_directory(pym, item, sentinel, installed, depth + 1) return pym
[ "def", "scan_directory", "(", "pym", ",", "directory", ",", "sentinel", ",", "installed", ",", "depth", "=", "0", ")", ":", "if", "not", "pym", ":", "d", "=", "os", ".", "path", ".", "abspath", "(", "directory", ")", "basename", "=", "os", ".", "path", ".", "basename", "(", "d", ")", "pym", "=", "utils", ".", "find_package", "(", "basename", ",", "installed", ")", "if", "not", "pym", ":", "version", "=", "'DIRECTORY'", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "d", ",", "'__init__.py'", ")", ")", ":", "version", "=", "'MODULE'", "pym", "=", "PyModule", "(", "basename", ",", "version", ",", "d", ")", "installed", ".", "insert", "(", "0", ",", "pym", ")", "else", ":", "pym", ".", "is_scan", "=", "True", "# Keep track of how many file scans resulted in nothing", "bad_scans", "=", "0", "for", "item", "in", "_scan_directory", "(", "directory", ",", "sentinel", ",", "depth", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "item", ")", ":", "if", "bad_scans", ">", "100", ":", "# Keep in mind this counter resets if it a good scan happens", "# in *this* directory. If you have a module with more than 100", "# files in a single directory, you should probably refactor it.", "log", ".", "debug", "(", "'Stopping scan of directory since it looks like a data dump: %s'", ",", "directory", ")", "break", "if", "not", "scan_file", "(", "pym", ",", "item", ",", "sentinel", ",", "installed", ")", ":", "bad_scans", "+=", "1", "else", ":", "bad_scans", "=", "0", "elif", "os", ".", "path", ".", "isdir", "(", "item", ")", ":", "scan_directory", "(", "pym", ",", "item", ",", "sentinel", ",", "installed", ",", "depth", "+", "1", ")", "return", "pym" ]
Entry point scan that creates a PyModule instance if needed.
[ "Entry", "point", "scan", "that", "creates", "a", "PyModule", "instance", "if", "needed", "." ]
python
train
fermiPy/fermipy
fermipy/irfs.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/irfs.py#L846-L873
def calc_counts(skydir, ltc, event_class, event_types, egy_bins, cth_bins, fn, npts=1): """Calculate the expected counts vs. true energy and incidence angle for a source with spectral parameterization ``fn``. Parameters ---------- skydir : `~astropy.coordinate.SkyCoord` ltc : `~fermipy.irfs.LTCube` egy_bins : `~numpy.ndarray` Bin edges in observed energy in MeV. cth_bins : `~numpy.ndarray` Bin edges in cosine of the true incidence angle. npts : int Number of points by which to oversample each energy bin. """ #npts = int(np.ceil(32. / bins_per_dec(egy_bins))) egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts)) exp = calc_exp(skydir, ltc, event_class, event_types, egy_bins, cth_bins) dnde = fn.dnde(egy_bins) cnts = loglog_quad(egy_bins, exp * dnde[:, None], 0) cnts = sum_bins(cnts, 0, npts) return cnts
[ "def", "calc_counts", "(", "skydir", ",", "ltc", ",", "event_class", ",", "event_types", ",", "egy_bins", ",", "cth_bins", ",", "fn", ",", "npts", "=", "1", ")", ":", "#npts = int(np.ceil(32. / bins_per_dec(egy_bins)))", "egy_bins", "=", "np", ".", "exp", "(", "utils", ".", "split_bin_edges", "(", "np", ".", "log", "(", "egy_bins", ")", ",", "npts", ")", ")", "exp", "=", "calc_exp", "(", "skydir", ",", "ltc", ",", "event_class", ",", "event_types", ",", "egy_bins", ",", "cth_bins", ")", "dnde", "=", "fn", ".", "dnde", "(", "egy_bins", ")", "cnts", "=", "loglog_quad", "(", "egy_bins", ",", "exp", "*", "dnde", "[", ":", ",", "None", "]", ",", "0", ")", "cnts", "=", "sum_bins", "(", "cnts", ",", "0", ",", "npts", ")", "return", "cnts" ]
Calculate the expected counts vs. true energy and incidence angle for a source with spectral parameterization ``fn``. Parameters ---------- skydir : `~astropy.coordinate.SkyCoord` ltc : `~fermipy.irfs.LTCube` egy_bins : `~numpy.ndarray` Bin edges in observed energy in MeV. cth_bins : `~numpy.ndarray` Bin edges in cosine of the true incidence angle. npts : int Number of points by which to oversample each energy bin.
[ "Calculate", "the", "expected", "counts", "vs", ".", "true", "energy", "and", "incidence", "angle", "for", "a", "source", "with", "spectral", "parameterization", "fn", "." ]
python
train
ihgazni2/elist
elist/elist.py
https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L516-L527
def array_map(ol,map_func,*args): ''' obseleted,just for compatible from elist.elist import * ol = [1,2,3,4] def map_func(ele,mul,plus): return(ele*mul+plus) array_map(ol,map_func,2,100) ''' rslt = list(map(lambda ele:map_func(ele,*args),ol)) return(rslt)
[ "def", "array_map", "(", "ol", ",", "map_func", ",", "*", "args", ")", ":", "rslt", "=", "list", "(", "map", "(", "lambda", "ele", ":", "map_func", "(", "ele", ",", "*", "args", ")", ",", "ol", ")", ")", "return", "(", "rslt", ")" ]
obseleted,just for compatible from elist.elist import * ol = [1,2,3,4] def map_func(ele,mul,plus): return(ele*mul+plus) array_map(ol,map_func,2,100)
[ "obseleted", "just", "for", "compatible", "from", "elist", ".", "elist", "import", "*", "ol", "=", "[", "1", "2", "3", "4", "]", "def", "map_func", "(", "ele", "mul", "plus", ")", ":", "return", "(", "ele", "*", "mul", "+", "plus", ")" ]
python
valid
wmayner/pyphi
pyphi/macro.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/macro.py#L289-L301
def _coarsegrain_space(coarse_grain, is_cut, system): """Spatially coarse-grain the TPM and CM.""" tpm = coarse_grain.macro_tpm( system.tpm, check_independence=(not is_cut)) node_indices = coarse_grain.macro_indices state = coarse_grain.macro_state(system.state) # Universal connectivity, for now. n = len(node_indices) cm = np.ones((n, n)) return SystemAttrs(tpm, cm, node_indices, state)
[ "def", "_coarsegrain_space", "(", "coarse_grain", ",", "is_cut", ",", "system", ")", ":", "tpm", "=", "coarse_grain", ".", "macro_tpm", "(", "system", ".", "tpm", ",", "check_independence", "=", "(", "not", "is_cut", ")", ")", "node_indices", "=", "coarse_grain", ".", "macro_indices", "state", "=", "coarse_grain", ".", "macro_state", "(", "system", ".", "state", ")", "# Universal connectivity, for now.", "n", "=", "len", "(", "node_indices", ")", "cm", "=", "np", ".", "ones", "(", "(", "n", ",", "n", ")", ")", "return", "SystemAttrs", "(", "tpm", ",", "cm", ",", "node_indices", ",", "state", ")" ]
Spatially coarse-grain the TPM and CM.
[ "Spatially", "coarse", "-", "grain", "the", "TPM", "and", "CM", "." ]
python
train
xypnox/email_purifier
epurifier/email_checker.py
https://github.com/xypnox/email_purifier/blob/a9ecde9c5293b5c283e0c5b4cf8744c76418fb6f/epurifier/email_checker.py#L46-L52
def CorrectWrongEmails(self, askInput=True): '''Corrects Emails in wrong_emails''' for email in self.wrong_emails: corrected_email = self.CorrectEmail(email) self.emails[self.emails.index(email)] = corrected_email self.wrong_emails = []
[ "def", "CorrectWrongEmails", "(", "self", ",", "askInput", "=", "True", ")", ":", "for", "email", "in", "self", ".", "wrong_emails", ":", "corrected_email", "=", "self", ".", "CorrectEmail", "(", "email", ")", "self", ".", "emails", "[", "self", ".", "emails", ".", "index", "(", "email", ")", "]", "=", "corrected_email", "self", ".", "wrong_emails", "=", "[", "]" ]
Corrects Emails in wrong_emails
[ "Corrects", "Emails", "in", "wrong_emails" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/discretization.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/discretization.py#L1254-L1371
def gumbel_softmax_discrete_bottleneck(x, bottleneck_bits, beta=0.25, decay=0.999, epsilon=1e-5, temperature_warmup_steps=150000, hard=False, summary=True): """VQ-VAE using Gumbel-Softmax. Different from `gumbel_softmax()` function as this function calculates the KL by using the discrete entropy instead of taking the argmax, and it also uses an exponential moving average to update the codebook while the `gumbel_softmax()` function includes no codebook update. Args: x: A `float`-like `Tensor` containing the latent vectors to be compared to the codebook, whose squared difference is used as the Gumbel-Softmax logits. bottleneck_bits: An `int` that sets the size of the bottleneck in `log_2`. beta: Beta factor for commitment loss (Default: 0.25). decay: Decay factor for exponential moving average (Default: 0.999). epsilon: Small value to avoid dividing by zero in EMA update (Default: 1e-5). temperature_warmup_steps: Number of steps it takes to decay temperature to 0 (Default: 150000). hard: When `True`, we use hard Gumbel-Softmax samples and force discrete latents by taking the argmax. When `False`, we use soft samples, which we treat as codebook weights (Default: False). summary: When `True`, we save histogram summaries of the KL term (Default: True). Returns: x_means_assignments: A `float`-like `Tensor` containing the codebook assignments. When `hard == True`, this is one-hot, containing the arg-max of the Gumbel-Softmax samples (and we use the straightthrough gradient). Otherwise, it contains the Gumbel-Softmax samples exactly, which are values from the `(K-1)`-simplex where `K` is the bottleneck size. loss: The loss, which is the sum of the KL between the Gumbel-Softmax and the uniform prior and the commitment loss multiplied by the beta factor. We approximate the KL by using the entropy of a categorical distribution instead of the Gumbel Softmax. """ bottleneck_size = 2**bottleneck_bits x_shape = common_layers.shape_list(x) hidden_size = x_shape[-1] means, ema_means, ema_count = get_vq_codebook(bottleneck_size, hidden_size) x = tf.reshape(x, [-1, hidden_size]) bottleneck_size = common_layers.shape_list(means)[0] x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True) means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True) scalar_prod = tf.matmul(x, means, transpose_b=True) dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod class_probs = tf.nn.softmax(dist) log_class_probs = tf.nn.log_softmax(dist) gumbel_samples = gumbel_sample(common_layers.shape_list(dist)) steps = temperature_warmup_steps gumbel_samples *= common_layers.inverse_exp_decay(steps // 5) * 0.5 temperature = 1.2 - common_layers.inverse_lin_decay(steps) # 10% of the time keep reasonably high temperature to keep learning. temperature = tf.cond( tf.less(tf.random_uniform([]), 0.9), lambda: temperature, lambda: tf.random_uniform([], minval=0.5, maxval=1.0)) gumbel_softmax_samples = tf.nn.softmax( (log_class_probs + gumbel_samples) / temperature) # Calculate KL between q and a uniform prior. kl = tf.reduce_sum( class_probs * (log_class_probs - tf.log(1.0 / bottleneck_size)), -1) if summary: tf.summary.histogram("KL", tf.reshape(kl, [-1])) # Straight-through gradient estimation when we're using hard assignments. if hard: x_means_idx = tf.reshape(tf.argmax(gumbel_softmax_samples, axis=-1), [-1]) x_means_hot = tf.one_hot(x_means_idx, bottleneck_size) x_means_assignments = gumbel_softmax_samples + tf.stop_gradient( x_means_hot - gumbel_softmax_samples) else: x_means_assignments = gumbel_softmax_samples x_means_assignments_flat = tf.reshape(x_means_assignments, [-1, bottleneck_size]) x_means = tf.matmul(x_means_assignments_flat, means) commitment_loss = tf.reduce_mean( tf.squared_difference(x, tf.stop_gradient(x_means))) # Update the ema variables. updated_ema_count = moving_averages.assign_moving_average( ema_count, tf.reduce_sum( tf.reshape(x_means_assignments, shape=[-1, bottleneck_size]), axis=0), decay, zero_debias=False) dw = tf.matmul(x_means_assignments, x, transpose_a=True) updated_ema_means = tf.identity( moving_averages.assign_moving_average( ema_means, dw, decay, zero_debias=False)) n = tf.reduce_sum(updated_ema_count, axis=-1, keepdims=True) updated_ema_count = ( (updated_ema_count + epsilon) / (n + bottleneck_size * epsilon) * n) updated_ema_means /= tf.expand_dims(updated_ema_count, axis=-1) with tf.control_dependencies([commitment_loss]): update_means = means.assign(updated_ema_means) with tf.control_dependencies([update_means]): loss = beta * commitment_loss # Add KL loss. loss += tf.reduce_mean(kl) x_means_assignments = tf.reshape(x_means_assignments, x_shape[:-1] + [bottleneck_size]) return x_means_assignments, loss
[ "def", "gumbel_softmax_discrete_bottleneck", "(", "x", ",", "bottleneck_bits", ",", "beta", "=", "0.25", ",", "decay", "=", "0.999", ",", "epsilon", "=", "1e-5", ",", "temperature_warmup_steps", "=", "150000", ",", "hard", "=", "False", ",", "summary", "=", "True", ")", ":", "bottleneck_size", "=", "2", "**", "bottleneck_bits", "x_shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "hidden_size", "=", "x_shape", "[", "-", "1", "]", "means", ",", "ema_means", ",", "ema_count", "=", "get_vq_codebook", "(", "bottleneck_size", ",", "hidden_size", ")", "x", "=", "tf", ".", "reshape", "(", "x", ",", "[", "-", "1", ",", "hidden_size", "]", ")", "bottleneck_size", "=", "common_layers", ".", "shape_list", "(", "means", ")", "[", "0", "]", "x_norm_sq", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "square", "(", "x", ")", ",", "axis", "=", "-", "1", ",", "keepdims", "=", "True", ")", "means_norm_sq", "=", "tf", ".", "reduce_sum", "(", "tf", ".", "square", "(", "means", ")", ",", "axis", "=", "-", "1", ",", "keepdims", "=", "True", ")", "scalar_prod", "=", "tf", ".", "matmul", "(", "x", ",", "means", ",", "transpose_b", "=", "True", ")", "dist", "=", "x_norm_sq", "+", "tf", ".", "transpose", "(", "means_norm_sq", ")", "-", "2", "*", "scalar_prod", "class_probs", "=", "tf", ".", "nn", ".", "softmax", "(", "dist", ")", "log_class_probs", "=", "tf", ".", "nn", ".", "log_softmax", "(", "dist", ")", "gumbel_samples", "=", "gumbel_sample", "(", "common_layers", ".", "shape_list", "(", "dist", ")", ")", "steps", "=", "temperature_warmup_steps", "gumbel_samples", "*=", "common_layers", ".", "inverse_exp_decay", "(", "steps", "//", "5", ")", "*", "0.5", "temperature", "=", "1.2", "-", "common_layers", ".", "inverse_lin_decay", "(", "steps", ")", "# 10% of the time keep reasonably high temperature to keep learning.", "temperature", "=", "tf", ".", "cond", "(", "tf", ".", "less", "(", "tf", ".", "random_uniform", "(", "[", "]", ")", ",", "0.9", ")", ",", "lambda", ":", "temperature", ",", "lambda", ":", "tf", ".", "random_uniform", "(", "[", "]", ",", "minval", "=", "0.5", ",", "maxval", "=", "1.0", ")", ")", "gumbel_softmax_samples", "=", "tf", ".", "nn", ".", "softmax", "(", "(", "log_class_probs", "+", "gumbel_samples", ")", "/", "temperature", ")", "# Calculate KL between q and a uniform prior.", "kl", "=", "tf", ".", "reduce_sum", "(", "class_probs", "*", "(", "log_class_probs", "-", "tf", ".", "log", "(", "1.0", "/", "bottleneck_size", ")", ")", ",", "-", "1", ")", "if", "summary", ":", "tf", ".", "summary", ".", "histogram", "(", "\"KL\"", ",", "tf", ".", "reshape", "(", "kl", ",", "[", "-", "1", "]", ")", ")", "# Straight-through gradient estimation when we're using hard assignments.", "if", "hard", ":", "x_means_idx", "=", "tf", ".", "reshape", "(", "tf", ".", "argmax", "(", "gumbel_softmax_samples", ",", "axis", "=", "-", "1", ")", ",", "[", "-", "1", "]", ")", "x_means_hot", "=", "tf", ".", "one_hot", "(", "x_means_idx", ",", "bottleneck_size", ")", "x_means_assignments", "=", "gumbel_softmax_samples", "+", "tf", ".", "stop_gradient", "(", "x_means_hot", "-", "gumbel_softmax_samples", ")", "else", ":", "x_means_assignments", "=", "gumbel_softmax_samples", "x_means_assignments_flat", "=", "tf", ".", "reshape", "(", "x_means_assignments", ",", "[", "-", "1", ",", "bottleneck_size", "]", ")", "x_means", "=", "tf", ".", "matmul", "(", "x_means_assignments_flat", ",", "means", ")", "commitment_loss", "=", "tf", ".", "reduce_mean", "(", "tf", ".", "squared_difference", "(", "x", ",", "tf", ".", "stop_gradient", "(", "x_means", ")", ")", ")", "# Update the ema variables.", "updated_ema_count", "=", "moving_averages", ".", "assign_moving_average", "(", "ema_count", ",", "tf", ".", "reduce_sum", "(", "tf", ".", "reshape", "(", "x_means_assignments", ",", "shape", "=", "[", "-", "1", ",", "bottleneck_size", "]", ")", ",", "axis", "=", "0", ")", ",", "decay", ",", "zero_debias", "=", "False", ")", "dw", "=", "tf", ".", "matmul", "(", "x_means_assignments", ",", "x", ",", "transpose_a", "=", "True", ")", "updated_ema_means", "=", "tf", ".", "identity", "(", "moving_averages", ".", "assign_moving_average", "(", "ema_means", ",", "dw", ",", "decay", ",", "zero_debias", "=", "False", ")", ")", "n", "=", "tf", ".", "reduce_sum", "(", "updated_ema_count", ",", "axis", "=", "-", "1", ",", "keepdims", "=", "True", ")", "updated_ema_count", "=", "(", "(", "updated_ema_count", "+", "epsilon", ")", "/", "(", "n", "+", "bottleneck_size", "*", "epsilon", ")", "*", "n", ")", "updated_ema_means", "/=", "tf", ".", "expand_dims", "(", "updated_ema_count", ",", "axis", "=", "-", "1", ")", "with", "tf", ".", "control_dependencies", "(", "[", "commitment_loss", "]", ")", ":", "update_means", "=", "means", ".", "assign", "(", "updated_ema_means", ")", "with", "tf", ".", "control_dependencies", "(", "[", "update_means", "]", ")", ":", "loss", "=", "beta", "*", "commitment_loss", "# Add KL loss.", "loss", "+=", "tf", ".", "reduce_mean", "(", "kl", ")", "x_means_assignments", "=", "tf", ".", "reshape", "(", "x_means_assignments", ",", "x_shape", "[", ":", "-", "1", "]", "+", "[", "bottleneck_size", "]", ")", "return", "x_means_assignments", ",", "loss" ]
VQ-VAE using Gumbel-Softmax. Different from `gumbel_softmax()` function as this function calculates the KL by using the discrete entropy instead of taking the argmax, and it also uses an exponential moving average to update the codebook while the `gumbel_softmax()` function includes no codebook update. Args: x: A `float`-like `Tensor` containing the latent vectors to be compared to the codebook, whose squared difference is used as the Gumbel-Softmax logits. bottleneck_bits: An `int` that sets the size of the bottleneck in `log_2`. beta: Beta factor for commitment loss (Default: 0.25). decay: Decay factor for exponential moving average (Default: 0.999). epsilon: Small value to avoid dividing by zero in EMA update (Default: 1e-5). temperature_warmup_steps: Number of steps it takes to decay temperature to 0 (Default: 150000). hard: When `True`, we use hard Gumbel-Softmax samples and force discrete latents by taking the argmax. When `False`, we use soft samples, which we treat as codebook weights (Default: False). summary: When `True`, we save histogram summaries of the KL term (Default: True). Returns: x_means_assignments: A `float`-like `Tensor` containing the codebook assignments. When `hard == True`, this is one-hot, containing the arg-max of the Gumbel-Softmax samples (and we use the straightthrough gradient). Otherwise, it contains the Gumbel-Softmax samples exactly, which are values from the `(K-1)`-simplex where `K` is the bottleneck size. loss: The loss, which is the sum of the KL between the Gumbel-Softmax and the uniform prior and the commitment loss multiplied by the beta factor. We approximate the KL by using the entropy of a categorical distribution instead of the Gumbel Softmax.
[ "VQ", "-", "VAE", "using", "Gumbel", "-", "Softmax", "." ]
python
train
ray-project/ray
python/ray/tune/automlboard/frontend/view.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/automlboard/frontend/view.py#L134-L161
def get_trial_info(current_trial): """Get job information for current trial.""" if current_trial.end_time and ("_" in current_trial.end_time): # end time is parsed from result.json and the format # is like: yyyy-mm-dd_hh-MM-ss, which will be converted # to yyyy-mm-dd hh:MM:ss here time_obj = datetime.datetime.strptime(current_trial.end_time, "%Y-%m-%d_%H-%M-%S") end_time = time_obj.strftime("%Y-%m-%d %H:%M:%S") else: end_time = current_trial.end_time if current_trial.metrics: metrics = eval(current_trial.metrics) else: metrics = None trial_info = { "trial_id": current_trial.trial_id, "job_id": current_trial.job_id, "trial_status": current_trial.trial_status, "start_time": current_trial.start_time, "end_time": end_time, "params": eval(current_trial.params.encode("utf-8")), "metrics": metrics } return trial_info
[ "def", "get_trial_info", "(", "current_trial", ")", ":", "if", "current_trial", ".", "end_time", "and", "(", "\"_\"", "in", "current_trial", ".", "end_time", ")", ":", "# end time is parsed from result.json and the format", "# is like: yyyy-mm-dd_hh-MM-ss, which will be converted", "# to yyyy-mm-dd hh:MM:ss here", "time_obj", "=", "datetime", ".", "datetime", ".", "strptime", "(", "current_trial", ".", "end_time", ",", "\"%Y-%m-%d_%H-%M-%S\"", ")", "end_time", "=", "time_obj", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", "else", ":", "end_time", "=", "current_trial", ".", "end_time", "if", "current_trial", ".", "metrics", ":", "metrics", "=", "eval", "(", "current_trial", ".", "metrics", ")", "else", ":", "metrics", "=", "None", "trial_info", "=", "{", "\"trial_id\"", ":", "current_trial", ".", "trial_id", ",", "\"job_id\"", ":", "current_trial", ".", "job_id", ",", "\"trial_status\"", ":", "current_trial", ".", "trial_status", ",", "\"start_time\"", ":", "current_trial", ".", "start_time", ",", "\"end_time\"", ":", "end_time", ",", "\"params\"", ":", "eval", "(", "current_trial", ".", "params", ".", "encode", "(", "\"utf-8\"", ")", ")", ",", "\"metrics\"", ":", "metrics", "}", "return", "trial_info" ]
Get job information for current trial.
[ "Get", "job", "information", "for", "current", "trial", "." ]
python
train
gwpy/gwpy
gwpy/signal/filter_design.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/filter_design.py#L569-L629
def notch(frequency, sample_rate, type='iir', **kwargs): """Design a ZPK notch filter for the given frequency and sampling rate Parameters ---------- frequency : `float`, `~astropy.units.Quantity` frequency (default in Hertz) at which to apply the notch sample_rate : `float`, `~astropy.units.Quantity` number of samples per second for `TimeSeries` to which this notch filter will be applied type : `str`, optional, default: 'iir' type of filter to apply, currently only 'iir' is supported **kwargs other keyword arguments to pass to `scipy.signal.iirdesign` Returns ------- zpk : `tuple` of `complex` or `float` the filter components in digital zero-pole-gain format See Also -------- scipy.signal.iirdesign for details on the IIR filter design method Notes ----- By default a digital filter is returned, meaning the zeros and poles are given in the Z-domain in units of radians/sample. Examples -------- To create a low-pass filter at 1000 Hz for 4096 Hz-sampled data: >>> from gwpy.signal.filter_design import notch >>> n = notch(100, 4096) To view the filter, you can use the `~gwpy.plot.BodePlot`: >>> from gwpy.plot import BodePlot >>> plot = BodePlot(n, sample_rate=4096) >>> plot.show() """ frequency = Quantity(frequency, 'Hz').value sample_rate = Quantity(sample_rate, 'Hz').value nyq = 0.5 * sample_rate df = 1.0 # pylint: disable=invalid-name df2 = 0.1 low1 = (frequency - df)/nyq high1 = (frequency + df)/nyq low2 = (frequency - df2)/nyq high2 = (frequency + df2)/nyq if type == 'iir': kwargs.setdefault('gpass', 1) kwargs.setdefault('gstop', 10) kwargs.setdefault('ftype', 'ellip') return signal.iirdesign([low1, high1], [low2, high2], output='zpk', **kwargs) else: raise NotImplementedError("Generating %r notch filters has not been " "implemented yet" % type)
[ "def", "notch", "(", "frequency", ",", "sample_rate", ",", "type", "=", "'iir'", ",", "*", "*", "kwargs", ")", ":", "frequency", "=", "Quantity", "(", "frequency", ",", "'Hz'", ")", ".", "value", "sample_rate", "=", "Quantity", "(", "sample_rate", ",", "'Hz'", ")", ".", "value", "nyq", "=", "0.5", "*", "sample_rate", "df", "=", "1.0", "# pylint: disable=invalid-name", "df2", "=", "0.1", "low1", "=", "(", "frequency", "-", "df", ")", "/", "nyq", "high1", "=", "(", "frequency", "+", "df", ")", "/", "nyq", "low2", "=", "(", "frequency", "-", "df2", ")", "/", "nyq", "high2", "=", "(", "frequency", "+", "df2", ")", "/", "nyq", "if", "type", "==", "'iir'", ":", "kwargs", ".", "setdefault", "(", "'gpass'", ",", "1", ")", "kwargs", ".", "setdefault", "(", "'gstop'", ",", "10", ")", "kwargs", ".", "setdefault", "(", "'ftype'", ",", "'ellip'", ")", "return", "signal", ".", "iirdesign", "(", "[", "low1", ",", "high1", "]", ",", "[", "low2", ",", "high2", "]", ",", "output", "=", "'zpk'", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "NotImplementedError", "(", "\"Generating %r notch filters has not been \"", "\"implemented yet\"", "%", "type", ")" ]
Design a ZPK notch filter for the given frequency and sampling rate Parameters ---------- frequency : `float`, `~astropy.units.Quantity` frequency (default in Hertz) at which to apply the notch sample_rate : `float`, `~astropy.units.Quantity` number of samples per second for `TimeSeries` to which this notch filter will be applied type : `str`, optional, default: 'iir' type of filter to apply, currently only 'iir' is supported **kwargs other keyword arguments to pass to `scipy.signal.iirdesign` Returns ------- zpk : `tuple` of `complex` or `float` the filter components in digital zero-pole-gain format See Also -------- scipy.signal.iirdesign for details on the IIR filter design method Notes ----- By default a digital filter is returned, meaning the zeros and poles are given in the Z-domain in units of radians/sample. Examples -------- To create a low-pass filter at 1000 Hz for 4096 Hz-sampled data: >>> from gwpy.signal.filter_design import notch >>> n = notch(100, 4096) To view the filter, you can use the `~gwpy.plot.BodePlot`: >>> from gwpy.plot import BodePlot >>> plot = BodePlot(n, sample_rate=4096) >>> plot.show()
[ "Design", "a", "ZPK", "notch", "filter", "for", "the", "given", "frequency", "and", "sampling", "rate" ]
python
train
ihgazni2/elist
elist/elist.py
https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L6851-L6860
def get_wfsmat(l): ''' l = ['v_7', 'v_3', 'v_1', 'v_4', ['v_4', 'v_2'], 'v_5', 'v_6', 'v_1', 'v_6', 'v_7', 'v_5', ['v_4', ['v_1', 'v_8', 'v_3', 'v_4', 'v_2', 'v_7', [['v_3', 'v_2'], 'v_4', 'v_5', 'v_1', 'v_3', 'v_1', 'v_2', 'v_5', 'v_8', 'v_8', 'v_7'], 'v_5', 'v_8', 'v_7', 'v_1', 'v_5'], 'v_6'], 'v_4', 'v_5', 'v_8', 'v_5'] get_wfs(l) ''' ltree = ListTree(l) vdescmat = ltree.desc wfsmat = matrix_map(vdescmat,lambda v,ix,iy:v['path']) wfsmat.pop(0) return(wfsmat)
[ "def", "get_wfsmat", "(", "l", ")", ":", "ltree", "=", "ListTree", "(", "l", ")", "vdescmat", "=", "ltree", ".", "desc", "wfsmat", "=", "matrix_map", "(", "vdescmat", ",", "lambda", "v", ",", "ix", ",", "iy", ":", "v", "[", "'path'", "]", ")", "wfsmat", ".", "pop", "(", "0", ")", "return", "(", "wfsmat", ")" ]
l = ['v_7', 'v_3', 'v_1', 'v_4', ['v_4', 'v_2'], 'v_5', 'v_6', 'v_1', 'v_6', 'v_7', 'v_5', ['v_4', ['v_1', 'v_8', 'v_3', 'v_4', 'v_2', 'v_7', [['v_3', 'v_2'], 'v_4', 'v_5', 'v_1', 'v_3', 'v_1', 'v_2', 'v_5', 'v_8', 'v_8', 'v_7'], 'v_5', 'v_8', 'v_7', 'v_1', 'v_5'], 'v_6'], 'v_4', 'v_5', 'v_8', 'v_5'] get_wfs(l)
[ "l", "=", "[", "v_7", "v_3", "v_1", "v_4", "[", "v_4", "v_2", "]", "v_5", "v_6", "v_1", "v_6", "v_7", "v_5", "[", "v_4", "[", "v_1", "v_8", "v_3", "v_4", "v_2", "v_7", "[[", "v_3", "v_2", "]", "v_4", "v_5", "v_1", "v_3", "v_1", "v_2", "v_5", "v_8", "v_8", "v_7", "]", "v_5", "v_8", "v_7", "v_1", "v_5", "]", "v_6", "]", "v_4", "v_5", "v_8", "v_5", "]", "get_wfs", "(", "l", ")" ]
python
valid
floydhub/floyd-cli
floyd/cli/run.py
https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/cli/run.py#L227-L350
def run(ctx, cpu, gpu, env, message, data, mode, open_notebook, follow, tensorboard, gpu2, cpu2, max_runtime, task, command): """ Start a new job on FloydHub. Floyd will upload contents of the current directory and run your command. """ # cli_default is used for any option that has default value cli_default = {'description': '', 'command': ''} # Error early if more than one --env is passed. Then get the first/only # --env out of the list so all other operations work normally (they don't # expect an iterable). For details on this approach, see the comment above # the --env click option if not env: cli_default['env'] = DEFAULT_ENV env = None elif len(env) > 1: floyd_logger.error( "You passed more than one environment: {}. Please specify a single environment.".format(env) ) sys.exit(1) else: env = env[0] if not mode: cli_default['mode'] = 'command' experiment_config = ExperimentConfigManager.get_config() access_token = AuthConfigManager.get_access_token() namespace = experiment_config.namespace or access_token.username if not ProjectClient().exists(experiment_config.name, namespace=namespace): floyd_logger.error('Invalid project id, please run ' '"floyd init PROJECT_NAME" before scheduling a job.') sys.exit(1) experiment_name = "{}/{}".format(namespace, experiment_config.name) success, data_ids, show_data_info = process_data_ids(data) if not success: sys.exit(2) # Create module module_inputs = [{'name': data_str.split(':')[1], 'type': 'dir'} for data_str in data_ids] instance_type = None if gpu2: instance_type = G2_INSTANCE_TYPE elif cpu2: instance_type = C2_INSTANCE_TYPE elif gpu: instance_type = G1_INSTANCE_TYPE elif cpu: instance_type = C1_INSTANCE_TYPE if not instance_type: cli_default['instance_type'] = C1_INSTANCE_TYPE yaml_config = read_yaml_config() arch = INSTANCE_ARCH_MAP[ resolve_final_instance_type(instance_type, yaml_config, task, cli_default) ] if not validate_env(env or cli_default['env'], arch): sys.exit(3) command_str = ' '.join(command) if command_str and mode in ('jupyter', 'serve'): floyd_logger.error('Command argument "%s" cannot be used with mode: %s.\nSee http://docs.floydhub.com/guides/run_a_job/#mode for more information about run modes.', command_str, mode) # noqa sys.exit(3) if command_str == '': # set to none so it won't override floyd config command_str = None module = Module(name=experiment_name, description=message or '', command=command_str, mode=mode, family_id=experiment_config.family_id, inputs=module_inputs, env=env, instance_type=instance_type, yaml_config=yaml_config, task=task) try: module_id = ModuleClient().create(module, cli_default) except BadRequestException as e: if 'Project not found, ID' in e.message: floyd_logger.error( 'ERROR: Please run "floyd init PROJECT_NAME" before scheduling a job.') else: floyd_logger.error('ERROR: %s', e.message) sys.exit(4) floyd_logger.debug("Created module with id : %s", module_id) # Create experiment request # Get the actual command entered in the command line if max_runtime: max_runtime = int(max_runtime) full_command = get_command_line(instance_type, env, message, data, mode, open_notebook, command_str) experiment_request = ExperimentRequest(name=experiment_name, description=message, full_command=full_command, module_id=module_id, max_runtime=max_runtime, env=env, data_ids=data_ids, family_id=experiment_config.family_id, instance_type=instance_type, yaml_config=yaml_config, task=task) expt_client = ExperimentClient() expt_info = expt_client.create(experiment_request, cli_default) floyd_logger.debug("Created job : %s", expt_info['id']) job_name = expt_info['name'] show_new_job_info(expt_client, job_name, expt_info, mode, open_notebook, show_data_info) if follow: floyd_logger.info("\nFollow flag detected (--follow): Opening logs ...") instance_log_id = instance_log_id = get_log_id(job_name) follow_logs(instance_log_id)
[ "def", "run", "(", "ctx", ",", "cpu", ",", "gpu", ",", "env", ",", "message", ",", "data", ",", "mode", ",", "open_notebook", ",", "follow", ",", "tensorboard", ",", "gpu2", ",", "cpu2", ",", "max_runtime", ",", "task", ",", "command", ")", ":", "# cli_default is used for any option that has default value", "cli_default", "=", "{", "'description'", ":", "''", ",", "'command'", ":", "''", "}", "# Error early if more than one --env is passed. Then get the first/only", "# --env out of the list so all other operations work normally (they don't", "# expect an iterable). For details on this approach, see the comment above", "# the --env click option", "if", "not", "env", ":", "cli_default", "[", "'env'", "]", "=", "DEFAULT_ENV", "env", "=", "None", "elif", "len", "(", "env", ")", ">", "1", ":", "floyd_logger", ".", "error", "(", "\"You passed more than one environment: {}. Please specify a single environment.\"", ".", "format", "(", "env", ")", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "env", "=", "env", "[", "0", "]", "if", "not", "mode", ":", "cli_default", "[", "'mode'", "]", "=", "'command'", "experiment_config", "=", "ExperimentConfigManager", ".", "get_config", "(", ")", "access_token", "=", "AuthConfigManager", ".", "get_access_token", "(", ")", "namespace", "=", "experiment_config", ".", "namespace", "or", "access_token", ".", "username", "if", "not", "ProjectClient", "(", ")", ".", "exists", "(", "experiment_config", ".", "name", ",", "namespace", "=", "namespace", ")", ":", "floyd_logger", ".", "error", "(", "'Invalid project id, please run '", "'\"floyd init PROJECT_NAME\" before scheduling a job.'", ")", "sys", ".", "exit", "(", "1", ")", "experiment_name", "=", "\"{}/{}\"", ".", "format", "(", "namespace", ",", "experiment_config", ".", "name", ")", "success", ",", "data_ids", ",", "show_data_info", "=", "process_data_ids", "(", "data", ")", "if", "not", "success", ":", "sys", ".", "exit", "(", "2", ")", "# Create module", "module_inputs", "=", "[", "{", "'name'", ":", "data_str", ".", "split", "(", "':'", ")", "[", "1", "]", ",", "'type'", ":", "'dir'", "}", "for", "data_str", "in", "data_ids", "]", "instance_type", "=", "None", "if", "gpu2", ":", "instance_type", "=", "G2_INSTANCE_TYPE", "elif", "cpu2", ":", "instance_type", "=", "C2_INSTANCE_TYPE", "elif", "gpu", ":", "instance_type", "=", "G1_INSTANCE_TYPE", "elif", "cpu", ":", "instance_type", "=", "C1_INSTANCE_TYPE", "if", "not", "instance_type", ":", "cli_default", "[", "'instance_type'", "]", "=", "C1_INSTANCE_TYPE", "yaml_config", "=", "read_yaml_config", "(", ")", "arch", "=", "INSTANCE_ARCH_MAP", "[", "resolve_final_instance_type", "(", "instance_type", ",", "yaml_config", ",", "task", ",", "cli_default", ")", "]", "if", "not", "validate_env", "(", "env", "or", "cli_default", "[", "'env'", "]", ",", "arch", ")", ":", "sys", ".", "exit", "(", "3", ")", "command_str", "=", "' '", ".", "join", "(", "command", ")", "if", "command_str", "and", "mode", "in", "(", "'jupyter'", ",", "'serve'", ")", ":", "floyd_logger", ".", "error", "(", "'Command argument \"%s\" cannot be used with mode: %s.\\nSee http://docs.floydhub.com/guides/run_a_job/#mode for more information about run modes.'", ",", "command_str", ",", "mode", ")", "# noqa", "sys", ".", "exit", "(", "3", ")", "if", "command_str", "==", "''", ":", "# set to none so it won't override floyd config", "command_str", "=", "None", "module", "=", "Module", "(", "name", "=", "experiment_name", ",", "description", "=", "message", "or", "''", ",", "command", "=", "command_str", ",", "mode", "=", "mode", ",", "family_id", "=", "experiment_config", ".", "family_id", ",", "inputs", "=", "module_inputs", ",", "env", "=", "env", ",", "instance_type", "=", "instance_type", ",", "yaml_config", "=", "yaml_config", ",", "task", "=", "task", ")", "try", ":", "module_id", "=", "ModuleClient", "(", ")", ".", "create", "(", "module", ",", "cli_default", ")", "except", "BadRequestException", "as", "e", ":", "if", "'Project not found, ID'", "in", "e", ".", "message", ":", "floyd_logger", ".", "error", "(", "'ERROR: Please run \"floyd init PROJECT_NAME\" before scheduling a job.'", ")", "else", ":", "floyd_logger", ".", "error", "(", "'ERROR: %s'", ",", "e", ".", "message", ")", "sys", ".", "exit", "(", "4", ")", "floyd_logger", ".", "debug", "(", "\"Created module with id : %s\"", ",", "module_id", ")", "# Create experiment request", "# Get the actual command entered in the command line", "if", "max_runtime", ":", "max_runtime", "=", "int", "(", "max_runtime", ")", "full_command", "=", "get_command_line", "(", "instance_type", ",", "env", ",", "message", ",", "data", ",", "mode", ",", "open_notebook", ",", "command_str", ")", "experiment_request", "=", "ExperimentRequest", "(", "name", "=", "experiment_name", ",", "description", "=", "message", ",", "full_command", "=", "full_command", ",", "module_id", "=", "module_id", ",", "max_runtime", "=", "max_runtime", ",", "env", "=", "env", ",", "data_ids", "=", "data_ids", ",", "family_id", "=", "experiment_config", ".", "family_id", ",", "instance_type", "=", "instance_type", ",", "yaml_config", "=", "yaml_config", ",", "task", "=", "task", ")", "expt_client", "=", "ExperimentClient", "(", ")", "expt_info", "=", "expt_client", ".", "create", "(", "experiment_request", ",", "cli_default", ")", "floyd_logger", ".", "debug", "(", "\"Created job : %s\"", ",", "expt_info", "[", "'id'", "]", ")", "job_name", "=", "expt_info", "[", "'name'", "]", "show_new_job_info", "(", "expt_client", ",", "job_name", ",", "expt_info", ",", "mode", ",", "open_notebook", ",", "show_data_info", ")", "if", "follow", ":", "floyd_logger", ".", "info", "(", "\"\\nFollow flag detected (--follow): Opening logs ...\"", ")", "instance_log_id", "=", "instance_log_id", "=", "get_log_id", "(", "job_name", ")", "follow_logs", "(", "instance_log_id", ")" ]
Start a new job on FloydHub. Floyd will upload contents of the current directory and run your command.
[ "Start", "a", "new", "job", "on", "FloydHub", "." ]
python
train
saltstack/salt
salt/utils/aws.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/aws.py#L371-L540
def query(params=None, setname=None, requesturl=None, location=None, return_url=False, return_root=False, opts=None, provider=None, endpoint=None, product='ec2', sigver='2'): ''' Perform a query against AWS services using Signature Version 2 Signing Process. This is documented at: http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html Regions and endpoints are documented at: http://docs.aws.amazon.com/general/latest/gr/rande.html Default ``product`` is ``ec2``. Valid ``product`` names are: .. code-block: yaml - autoscaling (Auto Scaling) - cloudformation (CloudFormation) - ec2 (Elastic Compute Cloud) - elasticache (ElastiCache) - elasticbeanstalk (Elastic BeanStalk) - elasticloadbalancing (Elastic Load Balancing) - elasticmapreduce (Elastic MapReduce) - iam (Identity and Access Management) - importexport (Import/Export) - monitoring (CloudWatch) - rds (Relational Database Service) - simpledb (SimpleDB) - sns (Simple Notification Service) - sqs (Simple Queue Service) ''' if params is None: params = {} if opts is None: opts = {} function = opts.get('function', (None, product)) providers = opts.get('providers', {}) if provider is None: prov_dict = providers.get(function[1], {}).get(product, {}) if prov_dict: driver = list(list(prov_dict.keys()))[0] provider = providers.get(driver, product) else: prov_dict = providers.get(provider, {}).get(product, {}) service_url = prov_dict.get('service_url', 'amazonaws.com') if not location: location = get_location(opts, prov_dict) if endpoint is None: if not requesturl: endpoint = prov_dict.get( 'endpoint', '{0}.{1}.{2}'.format(product, location, service_url) ) requesturl = 'https://{0}/'.format(endpoint) else: endpoint = urlparse(requesturl).netloc if endpoint == '': endpoint_err = ('Could not find a valid endpoint in the ' 'requesturl: {0}. Looking for something ' 'like https://some.aws.endpoint/?args').format( requesturl ) log.error(endpoint_err) if return_url is True: return {'error': endpoint_err}, requesturl return {'error': endpoint_err} log.debug('Using AWS endpoint: %s', endpoint) method = 'GET' aws_api_version = prov_dict.get( 'aws_api_version', prov_dict.get( '{0}_api_version'.format(product), DEFAULT_AWS_API_VERSION ) ) # Fallback to ec2's id & key if none is found, for this component if not prov_dict.get('id', None): prov_dict['id'] = providers.get(provider, {}).get('ec2', {}).get('id', {}) prov_dict['key'] = providers.get(provider, {}).get('ec2', {}).get('key', {}) if sigver == '4': headers, requesturl = sig4( method, endpoint, params, prov_dict, aws_api_version, location, product, requesturl=requesturl ) params_with_headers = {} else: params_with_headers = sig2( method, endpoint, params, prov_dict, aws_api_version ) headers = {} attempts = 0 while attempts < AWS_MAX_RETRIES: log.debug('AWS Request: %s', requesturl) log.trace('AWS Request Parameters: %s', params_with_headers) try: result = requests.get(requesturl, headers=headers, params=params_with_headers) log.debug('AWS Response Status Code: %s', result.status_code) log.trace( 'AWS Response Text: %s', result.text ) result.raise_for_status() break except requests.exceptions.HTTPError as exc: root = ET.fromstring(exc.response.content) data = xml.to_dict(root) # check to see if we should retry the query err_code = data.get('Errors', {}).get('Error', {}).get('Code', '') if attempts < AWS_MAX_RETRIES and err_code and err_code in AWS_RETRY_CODES: attempts += 1 log.error( 'AWS Response Status Code and Error: [%s %s] %s; ' 'Attempts remaining: %s', exc.response.status_code, exc, data, attempts ) sleep_exponential_backoff(attempts) continue log.error( 'AWS Response Status Code and Error: [%s %s] %s', exc.response.status_code, exc, data ) if return_url is True: return {'error': data}, requesturl return {'error': data} else: log.error( 'AWS Response Status Code and Error: [%s %s] %s', exc.response.status_code, exc, data ) if return_url is True: return {'error': data}, requesturl return {'error': data} root = ET.fromstring(result.text) items = root[1] if return_root is True: items = root if setname: if sys.version_info < (2, 7): children_len = len(root.getchildren()) else: children_len = len(root) for item in range(0, children_len): comps = root[item].tag.split('}') if comps[1] == setname: items = root[item] ret = [] for item in items: ret.append(xml.to_dict(item)) if return_url is True: return ret, requesturl return ret
[ "def", "query", "(", "params", "=", "None", ",", "setname", "=", "None", ",", "requesturl", "=", "None", ",", "location", "=", "None", ",", "return_url", "=", "False", ",", "return_root", "=", "False", ",", "opts", "=", "None", ",", "provider", "=", "None", ",", "endpoint", "=", "None", ",", "product", "=", "'ec2'", ",", "sigver", "=", "'2'", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "if", "opts", "is", "None", ":", "opts", "=", "{", "}", "function", "=", "opts", ".", "get", "(", "'function'", ",", "(", "None", ",", "product", ")", ")", "providers", "=", "opts", ".", "get", "(", "'providers'", ",", "{", "}", ")", "if", "provider", "is", "None", ":", "prov_dict", "=", "providers", ".", "get", "(", "function", "[", "1", "]", ",", "{", "}", ")", ".", "get", "(", "product", ",", "{", "}", ")", "if", "prov_dict", ":", "driver", "=", "list", "(", "list", "(", "prov_dict", ".", "keys", "(", ")", ")", ")", "[", "0", "]", "provider", "=", "providers", ".", "get", "(", "driver", ",", "product", ")", "else", ":", "prov_dict", "=", "providers", ".", "get", "(", "provider", ",", "{", "}", ")", ".", "get", "(", "product", ",", "{", "}", ")", "service_url", "=", "prov_dict", ".", "get", "(", "'service_url'", ",", "'amazonaws.com'", ")", "if", "not", "location", ":", "location", "=", "get_location", "(", "opts", ",", "prov_dict", ")", "if", "endpoint", "is", "None", ":", "if", "not", "requesturl", ":", "endpoint", "=", "prov_dict", ".", "get", "(", "'endpoint'", ",", "'{0}.{1}.{2}'", ".", "format", "(", "product", ",", "location", ",", "service_url", ")", ")", "requesturl", "=", "'https://{0}/'", ".", "format", "(", "endpoint", ")", "else", ":", "endpoint", "=", "urlparse", "(", "requesturl", ")", ".", "netloc", "if", "endpoint", "==", "''", ":", "endpoint_err", "=", "(", "'Could not find a valid endpoint in the '", "'requesturl: {0}. Looking for something '", "'like https://some.aws.endpoint/?args'", ")", ".", "format", "(", "requesturl", ")", "log", ".", "error", "(", "endpoint_err", ")", "if", "return_url", "is", "True", ":", "return", "{", "'error'", ":", "endpoint_err", "}", ",", "requesturl", "return", "{", "'error'", ":", "endpoint_err", "}", "log", ".", "debug", "(", "'Using AWS endpoint: %s'", ",", "endpoint", ")", "method", "=", "'GET'", "aws_api_version", "=", "prov_dict", ".", "get", "(", "'aws_api_version'", ",", "prov_dict", ".", "get", "(", "'{0}_api_version'", ".", "format", "(", "product", ")", ",", "DEFAULT_AWS_API_VERSION", ")", ")", "# Fallback to ec2's id & key if none is found, for this component", "if", "not", "prov_dict", ".", "get", "(", "'id'", ",", "None", ")", ":", "prov_dict", "[", "'id'", "]", "=", "providers", ".", "get", "(", "provider", ",", "{", "}", ")", ".", "get", "(", "'ec2'", ",", "{", "}", ")", ".", "get", "(", "'id'", ",", "{", "}", ")", "prov_dict", "[", "'key'", "]", "=", "providers", ".", "get", "(", "provider", ",", "{", "}", ")", ".", "get", "(", "'ec2'", ",", "{", "}", ")", ".", "get", "(", "'key'", ",", "{", "}", ")", "if", "sigver", "==", "'4'", ":", "headers", ",", "requesturl", "=", "sig4", "(", "method", ",", "endpoint", ",", "params", ",", "prov_dict", ",", "aws_api_version", ",", "location", ",", "product", ",", "requesturl", "=", "requesturl", ")", "params_with_headers", "=", "{", "}", "else", ":", "params_with_headers", "=", "sig2", "(", "method", ",", "endpoint", ",", "params", ",", "prov_dict", ",", "aws_api_version", ")", "headers", "=", "{", "}", "attempts", "=", "0", "while", "attempts", "<", "AWS_MAX_RETRIES", ":", "log", ".", "debug", "(", "'AWS Request: %s'", ",", "requesturl", ")", "log", ".", "trace", "(", "'AWS Request Parameters: %s'", ",", "params_with_headers", ")", "try", ":", "result", "=", "requests", ".", "get", "(", "requesturl", ",", "headers", "=", "headers", ",", "params", "=", "params_with_headers", ")", "log", ".", "debug", "(", "'AWS Response Status Code: %s'", ",", "result", ".", "status_code", ")", "log", ".", "trace", "(", "'AWS Response Text: %s'", ",", "result", ".", "text", ")", "result", ".", "raise_for_status", "(", ")", "break", "except", "requests", ".", "exceptions", ".", "HTTPError", "as", "exc", ":", "root", "=", "ET", ".", "fromstring", "(", "exc", ".", "response", ".", "content", ")", "data", "=", "xml", ".", "to_dict", "(", "root", ")", "# check to see if we should retry the query", "err_code", "=", "data", ".", "get", "(", "'Errors'", ",", "{", "}", ")", ".", "get", "(", "'Error'", ",", "{", "}", ")", ".", "get", "(", "'Code'", ",", "''", ")", "if", "attempts", "<", "AWS_MAX_RETRIES", "and", "err_code", "and", "err_code", "in", "AWS_RETRY_CODES", ":", "attempts", "+=", "1", "log", ".", "error", "(", "'AWS Response Status Code and Error: [%s %s] %s; '", "'Attempts remaining: %s'", ",", "exc", ".", "response", ".", "status_code", ",", "exc", ",", "data", ",", "attempts", ")", "sleep_exponential_backoff", "(", "attempts", ")", "continue", "log", ".", "error", "(", "'AWS Response Status Code and Error: [%s %s] %s'", ",", "exc", ".", "response", ".", "status_code", ",", "exc", ",", "data", ")", "if", "return_url", "is", "True", ":", "return", "{", "'error'", ":", "data", "}", ",", "requesturl", "return", "{", "'error'", ":", "data", "}", "else", ":", "log", ".", "error", "(", "'AWS Response Status Code and Error: [%s %s] %s'", ",", "exc", ".", "response", ".", "status_code", ",", "exc", ",", "data", ")", "if", "return_url", "is", "True", ":", "return", "{", "'error'", ":", "data", "}", ",", "requesturl", "return", "{", "'error'", ":", "data", "}", "root", "=", "ET", ".", "fromstring", "(", "result", ".", "text", ")", "items", "=", "root", "[", "1", "]", "if", "return_root", "is", "True", ":", "items", "=", "root", "if", "setname", ":", "if", "sys", ".", "version_info", "<", "(", "2", ",", "7", ")", ":", "children_len", "=", "len", "(", "root", ".", "getchildren", "(", ")", ")", "else", ":", "children_len", "=", "len", "(", "root", ")", "for", "item", "in", "range", "(", "0", ",", "children_len", ")", ":", "comps", "=", "root", "[", "item", "]", ".", "tag", ".", "split", "(", "'}'", ")", "if", "comps", "[", "1", "]", "==", "setname", ":", "items", "=", "root", "[", "item", "]", "ret", "=", "[", "]", "for", "item", "in", "items", ":", "ret", ".", "append", "(", "xml", ".", "to_dict", "(", "item", ")", ")", "if", "return_url", "is", "True", ":", "return", "ret", ",", "requesturl", "return", "ret" ]
Perform a query against AWS services using Signature Version 2 Signing Process. This is documented at: http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html Regions and endpoints are documented at: http://docs.aws.amazon.com/general/latest/gr/rande.html Default ``product`` is ``ec2``. Valid ``product`` names are: .. code-block: yaml - autoscaling (Auto Scaling) - cloudformation (CloudFormation) - ec2 (Elastic Compute Cloud) - elasticache (ElastiCache) - elasticbeanstalk (Elastic BeanStalk) - elasticloadbalancing (Elastic Load Balancing) - elasticmapreduce (Elastic MapReduce) - iam (Identity and Access Management) - importexport (Import/Export) - monitoring (CloudWatch) - rds (Relational Database Service) - simpledb (SimpleDB) - sns (Simple Notification Service) - sqs (Simple Queue Service)
[ "Perform", "a", "query", "against", "AWS", "services", "using", "Signature", "Version", "2", "Signing", "Process", ".", "This", "is", "documented", "at", ":" ]
python
train
tkem/uritools
uritools/encoding.py
https://github.com/tkem/uritools/blob/e77ba4acd937b68da9850138563debd4c925ef9f/uritools/encoding.py#L40-L53
def uriencode(uristring, safe='', encoding='utf-8', errors='strict'): """Encode a URI string or string component.""" if not isinstance(uristring, bytes): uristring = uristring.encode(encoding, errors) if not isinstance(safe, bytes): safe = safe.encode('ascii') try: encoded = _encoded[safe] except KeyError: encoded = _encoded[b''][:] for i in _tointseq(safe): encoded[i] = _fromint(i) _encoded[safe] = encoded return b''.join(map(encoded.__getitem__, _tointseq(uristring)))
[ "def", "uriencode", "(", "uristring", ",", "safe", "=", "''", ",", "encoding", "=", "'utf-8'", ",", "errors", "=", "'strict'", ")", ":", "if", "not", "isinstance", "(", "uristring", ",", "bytes", ")", ":", "uristring", "=", "uristring", ".", "encode", "(", "encoding", ",", "errors", ")", "if", "not", "isinstance", "(", "safe", ",", "bytes", ")", ":", "safe", "=", "safe", ".", "encode", "(", "'ascii'", ")", "try", ":", "encoded", "=", "_encoded", "[", "safe", "]", "except", "KeyError", ":", "encoded", "=", "_encoded", "[", "b''", "]", "[", ":", "]", "for", "i", "in", "_tointseq", "(", "safe", ")", ":", "encoded", "[", "i", "]", "=", "_fromint", "(", "i", ")", "_encoded", "[", "safe", "]", "=", "encoded", "return", "b''", ".", "join", "(", "map", "(", "encoded", ".", "__getitem__", ",", "_tointseq", "(", "uristring", ")", ")", ")" ]
Encode a URI string or string component.
[ "Encode", "a", "URI", "string", "or", "string", "component", "." ]
python
train
lepture/python-livereload
livereload/server.py
https://github.com/lepture/python-livereload/blob/f80cb3ae0f8f2cdf38203a712fe25ef7f1899c34/livereload/server.py#L181-L209
def watch(self, filepath, func=None, delay=None, ignore=None): """Add the given filepath for watcher list. Once you have intialized a server, watch file changes before serve the server:: server.watch('static/*.stylus', 'make static') def alert(): print('foo') server.watch('foo.txt', alert) server.serve() :param filepath: files to be watched, it can be a filepath, a directory, or a glob pattern :param func: the function to be called, it can be a string of shell command, or any callable object without parameters :param delay: Delay sending the reload message. Use 'forever' to not send it. This is useful to compile sass files to css, but reload on changed css files then only. :param ignore: A function return True to ignore a certain pattern of filepath. """ if isinstance(func, string_types): cmd = func func = shell(func) func.name = "shell: {}".format(cmd) self.watcher.watch(filepath, func, delay, ignore=ignore)
[ "def", "watch", "(", "self", ",", "filepath", ",", "func", "=", "None", ",", "delay", "=", "None", ",", "ignore", "=", "None", ")", ":", "if", "isinstance", "(", "func", ",", "string_types", ")", ":", "cmd", "=", "func", "func", "=", "shell", "(", "func", ")", "func", ".", "name", "=", "\"shell: {}\"", ".", "format", "(", "cmd", ")", "self", ".", "watcher", ".", "watch", "(", "filepath", ",", "func", ",", "delay", ",", "ignore", "=", "ignore", ")" ]
Add the given filepath for watcher list. Once you have intialized a server, watch file changes before serve the server:: server.watch('static/*.stylus', 'make static') def alert(): print('foo') server.watch('foo.txt', alert) server.serve() :param filepath: files to be watched, it can be a filepath, a directory, or a glob pattern :param func: the function to be called, it can be a string of shell command, or any callable object without parameters :param delay: Delay sending the reload message. Use 'forever' to not send it. This is useful to compile sass files to css, but reload on changed css files then only. :param ignore: A function return True to ignore a certain pattern of filepath.
[ "Add", "the", "given", "filepath", "for", "watcher", "list", "." ]
python
train
gregreen/dustmaps
dustmaps/json_serializers.py
https://github.com/gregreen/dustmaps/blob/c8f571a71da0d951bf8ea865621bee14492bdfd9/dustmaps/json_serializers.py#L308-L380
def get_encoder(ndarray_mode='b64'): """ Returns a JSON encoder that can handle: * :obj:`numpy.ndarray` * :obj:`numpy.floating` (converted to :obj:`float`) * :obj:`numpy.integer` (converted to :obj:`int`) * :obj:`numpy.dtype` * :obj:`astropy.units.Quantity` * :obj:`astropy.coordinates.SkyCoord` Args: ndarray_mode (Optional[:obj:`str`]): Which method to use to serialize :obj:`numpy.ndarray` objects. Defaults to :obj:`'b64'`, which converts the array data to binary64 encoding (non-human-readable), and stores the datatype/shape in human-readable formats. Other options are :obj:`'readable'`, which produces fully human-readable output, and :obj:`'npy'`, which uses numpy's built-in :obj:`save` function and produces completely unreadable output. Of all the methods :obj:`'npy'` is the most reliable, but also least human-readable. :obj:`'readable'` produces the most human-readable output, but is the least reliable and loses precision. Returns: A subclass of :obj:`json.JSONEncoder`. """ # Use specified numpy.ndarray serialization mode serialize_fns = { 'b64': serialize_ndarray_b64, 'readable': serialize_ndarray_readable, 'npy': serialize_ndarray_npy} if ndarray_mode not in serialize_fns: raise ValueError('"ndarray_mode" must be one of {}'.format( serialize_fns.keys)) serialize_ndarray = serialize_fns[ndarray_mode] class MultiJSONEncoder(json.JSONEncoder): """ A JSON encoder that can handle: * :obj:`numpy.ndarray` * :obj:`numpy.floating` (converted to :obj:`float`) * :obj:`numpy.integer` (converted to :obj:`int`) * :obj:`numpy.dtype` * :obj:`astropy.units.Quantity` * :obj:`astropy.coordinates.SkyCoord` """ def default(self, o): if isinstance(o, coords.SkyCoord): return serialize_skycoord(o) if isinstance(o, units.Quantity): return serialize_quantity(o) elif isinstance(o, np.ndarray): return serialize_ndarray(o) elif isinstance(o, np.dtype): return serialize_dtype(o) elif isinstance(o, np.floating): return float(o) elif isinstance(o, np.integer): return int(o) elif isinstance(o, np.bool_): return bool(o) elif isinstance(o, np.void): try: o = np.array(o) except: pass else: return o return json.JSONEncoder.default(self, o) return MultiJSONEncoder
[ "def", "get_encoder", "(", "ndarray_mode", "=", "'b64'", ")", ":", "# Use specified numpy.ndarray serialization mode", "serialize_fns", "=", "{", "'b64'", ":", "serialize_ndarray_b64", ",", "'readable'", ":", "serialize_ndarray_readable", ",", "'npy'", ":", "serialize_ndarray_npy", "}", "if", "ndarray_mode", "not", "in", "serialize_fns", ":", "raise", "ValueError", "(", "'\"ndarray_mode\" must be one of {}'", ".", "format", "(", "serialize_fns", ".", "keys", ")", ")", "serialize_ndarray", "=", "serialize_fns", "[", "ndarray_mode", "]", "class", "MultiJSONEncoder", "(", "json", ".", "JSONEncoder", ")", ":", "\"\"\"\n A JSON encoder that can handle:\n * :obj:`numpy.ndarray`\n * :obj:`numpy.floating` (converted to :obj:`float`)\n * :obj:`numpy.integer` (converted to :obj:`int`)\n * :obj:`numpy.dtype`\n * :obj:`astropy.units.Quantity`\n * :obj:`astropy.coordinates.SkyCoord`\n \"\"\"", "def", "default", "(", "self", ",", "o", ")", ":", "if", "isinstance", "(", "o", ",", "coords", ".", "SkyCoord", ")", ":", "return", "serialize_skycoord", "(", "o", ")", "if", "isinstance", "(", "o", ",", "units", ".", "Quantity", ")", ":", "return", "serialize_quantity", "(", "o", ")", "elif", "isinstance", "(", "o", ",", "np", ".", "ndarray", ")", ":", "return", "serialize_ndarray", "(", "o", ")", "elif", "isinstance", "(", "o", ",", "np", ".", "dtype", ")", ":", "return", "serialize_dtype", "(", "o", ")", "elif", "isinstance", "(", "o", ",", "np", ".", "floating", ")", ":", "return", "float", "(", "o", ")", "elif", "isinstance", "(", "o", ",", "np", ".", "integer", ")", ":", "return", "int", "(", "o", ")", "elif", "isinstance", "(", "o", ",", "np", ".", "bool_", ")", ":", "return", "bool", "(", "o", ")", "elif", "isinstance", "(", "o", ",", "np", ".", "void", ")", ":", "try", ":", "o", "=", "np", ".", "array", "(", "o", ")", "except", ":", "pass", "else", ":", "return", "o", "return", "json", ".", "JSONEncoder", ".", "default", "(", "self", ",", "o", ")", "return", "MultiJSONEncoder" ]
Returns a JSON encoder that can handle: * :obj:`numpy.ndarray` * :obj:`numpy.floating` (converted to :obj:`float`) * :obj:`numpy.integer` (converted to :obj:`int`) * :obj:`numpy.dtype` * :obj:`astropy.units.Quantity` * :obj:`astropy.coordinates.SkyCoord` Args: ndarray_mode (Optional[:obj:`str`]): Which method to use to serialize :obj:`numpy.ndarray` objects. Defaults to :obj:`'b64'`, which converts the array data to binary64 encoding (non-human-readable), and stores the datatype/shape in human-readable formats. Other options are :obj:`'readable'`, which produces fully human-readable output, and :obj:`'npy'`, which uses numpy's built-in :obj:`save` function and produces completely unreadable output. Of all the methods :obj:`'npy'` is the most reliable, but also least human-readable. :obj:`'readable'` produces the most human-readable output, but is the least reliable and loses precision. Returns: A subclass of :obj:`json.JSONEncoder`.
[ "Returns", "a", "JSON", "encoder", "that", "can", "handle", ":", "*", ":", "obj", ":", "numpy", ".", "ndarray", "*", ":", "obj", ":", "numpy", ".", "floating", "(", "converted", "to", ":", "obj", ":", "float", ")", "*", ":", "obj", ":", "numpy", ".", "integer", "(", "converted", "to", ":", "obj", ":", "int", ")", "*", ":", "obj", ":", "numpy", ".", "dtype", "*", ":", "obj", ":", "astropy", ".", "units", ".", "Quantity", "*", ":", "obj", ":", "astropy", ".", "coordinates", ".", "SkyCoord" ]
python
train
quizl/quizler
quizler/utils.py
https://github.com/quizl/quizler/blob/44b3fd91f7074e7013ffde8147455f45ebdccc46/quizler/utils.py#L32-L44
def get_common_terms(*api_envs): """Get all term duplicates across all user word sets as a list of (title of first word set, title of second word set, set of terms) tuples.""" common_terms = [] # pylint: disable=no-value-for-parameter wordsets = get_user_sets(*api_envs) # pylint: enable=no-value-for-parameter for wordset1, wordset2 in combinations(wordsets, 2): common = wordset1.has_common(wordset2) if common: common_terms.append((wordset1.title, wordset2.title, common)) return common_terms
[ "def", "get_common_terms", "(", "*", "api_envs", ")", ":", "common_terms", "=", "[", "]", "# pylint: disable=no-value-for-parameter", "wordsets", "=", "get_user_sets", "(", "*", "api_envs", ")", "# pylint: enable=no-value-for-parameter", "for", "wordset1", ",", "wordset2", "in", "combinations", "(", "wordsets", ",", "2", ")", ":", "common", "=", "wordset1", ".", "has_common", "(", "wordset2", ")", "if", "common", ":", "common_terms", ".", "append", "(", "(", "wordset1", ".", "title", ",", "wordset2", ".", "title", ",", "common", ")", ")", "return", "common_terms" ]
Get all term duplicates across all user word sets as a list of (title of first word set, title of second word set, set of terms) tuples.
[ "Get", "all", "term", "duplicates", "across", "all", "user", "word", "sets", "as", "a", "list", "of", "(", "title", "of", "first", "word", "set", "title", "of", "second", "word", "set", "set", "of", "terms", ")", "tuples", "." ]
python
train
mental32/spotify.py
spotify/models/player.py
https://github.com/mental32/spotify.py/blob/bb296cac7c3dd289908906b7069bd80f43950515/spotify/models/player.py#L137-L146
async def next(self, *, device: Optional[SomeDevice] = None): """Skips to next track in the user’s queue. Parameters ---------- device : Optional[:obj:`SomeDevice`] The Device object or id of the device this command is targeting. If not supplied, the user’s currently active device is the target. """ await self._user.http.skip_next(device_id=str(device))
[ "async", "def", "next", "(", "self", ",", "*", ",", "device", ":", "Optional", "[", "SomeDevice", "]", "=", "None", ")", ":", "await", "self", ".", "_user", ".", "http", ".", "skip_next", "(", "device_id", "=", "str", "(", "device", ")", ")" ]
Skips to next track in the user’s queue. Parameters ---------- device : Optional[:obj:`SomeDevice`] The Device object or id of the device this command is targeting. If not supplied, the user’s currently active device is the target.
[ "Skips", "to", "next", "track", "in", "the", "user’s", "queue", "." ]
python
test
theolind/pymysensors
mysensors/gateway_tcp.py
https://github.com/theolind/pymysensors/blob/a139ab6e2f6b71ebaf37282f69bfd0f7fe6193b6/mysensors/gateway_tcp.py#L127-L161
def _connect(self): """Connect to the socket.""" try: while True: _LOGGER.info('Trying to connect to %s', self.server_address) try: yield from asyncio.wait_for( self.loop.create_connection( lambda: self.protocol, *self.server_address), self.reconnect_timeout, loop=self.loop) self.tcp_check_timer = time.time() self.tcp_disconnect_timer = time.time() self._check_connection() return except asyncio.TimeoutError: _LOGGER.error( 'Connecting to socket timed out for %s', self.server_address) _LOGGER.info( 'Waiting %s secs before trying to connect again', self.reconnect_timeout) yield from asyncio.sleep( self.reconnect_timeout, loop=self.loop) except OSError: _LOGGER.error( 'Failed to connect to socket at %s', self.server_address) _LOGGER.info( 'Waiting %s secs before trying to connect again', self.reconnect_timeout) yield from asyncio.sleep( self.reconnect_timeout, loop=self.loop) except asyncio.CancelledError: _LOGGER.debug( 'Connect attempt to %s cancelled', self.server_address)
[ "def", "_connect", "(", "self", ")", ":", "try", ":", "while", "True", ":", "_LOGGER", ".", "info", "(", "'Trying to connect to %s'", ",", "self", ".", "server_address", ")", "try", ":", "yield", "from", "asyncio", ".", "wait_for", "(", "self", ".", "loop", ".", "create_connection", "(", "lambda", ":", "self", ".", "protocol", ",", "*", "self", ".", "server_address", ")", ",", "self", ".", "reconnect_timeout", ",", "loop", "=", "self", ".", "loop", ")", "self", ".", "tcp_check_timer", "=", "time", ".", "time", "(", ")", "self", ".", "tcp_disconnect_timer", "=", "time", ".", "time", "(", ")", "self", ".", "_check_connection", "(", ")", "return", "except", "asyncio", ".", "TimeoutError", ":", "_LOGGER", ".", "error", "(", "'Connecting to socket timed out for %s'", ",", "self", ".", "server_address", ")", "_LOGGER", ".", "info", "(", "'Waiting %s secs before trying to connect again'", ",", "self", ".", "reconnect_timeout", ")", "yield", "from", "asyncio", ".", "sleep", "(", "self", ".", "reconnect_timeout", ",", "loop", "=", "self", ".", "loop", ")", "except", "OSError", ":", "_LOGGER", ".", "error", "(", "'Failed to connect to socket at %s'", ",", "self", ".", "server_address", ")", "_LOGGER", ".", "info", "(", "'Waiting %s secs before trying to connect again'", ",", "self", ".", "reconnect_timeout", ")", "yield", "from", "asyncio", ".", "sleep", "(", "self", ".", "reconnect_timeout", ",", "loop", "=", "self", ".", "loop", ")", "except", "asyncio", ".", "CancelledError", ":", "_LOGGER", ".", "debug", "(", "'Connect attempt to %s cancelled'", ",", "self", ".", "server_address", ")" ]
Connect to the socket.
[ "Connect", "to", "the", "socket", "." ]
python
train
NuGrid/NuGridPy
nugridpy/mesa.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L3725-L3774
def _read_mesafile(filename,data_rows=0,only='all'): """ private routine that is not directly called by the user""" f=open(filename,'r') vv=[] v=[] lines = [] line = '' for i in range(0,6): line = f.readline() lines.extend([line]) hval = lines[2].split() hlist = lines[1].split() header_attr = {} for a,b in zip(hlist,hval): header_attr[a] = float(b) if only is 'header_attr': return header_attr cols = {} colnum = lines[4].split() colname = lines[5].split() for a,b in zip(colname,colnum): cols[a] = int(b) data = [] old_percent = 0 for i in range(data_rows): # writing reading status percent = int(i*100/np.max([1, data_rows-1])) if percent >= old_percent + 5: sys.stdout.flush() sys.stdout.write("\r reading " + "...%d%%" % percent) old_percent = percent line = f.readline() v=line.split() try: vv=np.array(v,dtype='float64') except ValueError: for item in v: if item.__contains__('.') and not item.__contains__('E'): v[v.index(item)]='0' data.append(vv) print(' \n') f.close() a=np.array(data) data = [] return header_attr, cols, a
[ "def", "_read_mesafile", "(", "filename", ",", "data_rows", "=", "0", ",", "only", "=", "'all'", ")", ":", "f", "=", "open", "(", "filename", ",", "'r'", ")", "vv", "=", "[", "]", "v", "=", "[", "]", "lines", "=", "[", "]", "line", "=", "''", "for", "i", "in", "range", "(", "0", ",", "6", ")", ":", "line", "=", "f", ".", "readline", "(", ")", "lines", ".", "extend", "(", "[", "line", "]", ")", "hval", "=", "lines", "[", "2", "]", ".", "split", "(", ")", "hlist", "=", "lines", "[", "1", "]", ".", "split", "(", ")", "header_attr", "=", "{", "}", "for", "a", ",", "b", "in", "zip", "(", "hlist", ",", "hval", ")", ":", "header_attr", "[", "a", "]", "=", "float", "(", "b", ")", "if", "only", "is", "'header_attr'", ":", "return", "header_attr", "cols", "=", "{", "}", "colnum", "=", "lines", "[", "4", "]", ".", "split", "(", ")", "colname", "=", "lines", "[", "5", "]", ".", "split", "(", ")", "for", "a", ",", "b", "in", "zip", "(", "colname", ",", "colnum", ")", ":", "cols", "[", "a", "]", "=", "int", "(", "b", ")", "data", "=", "[", "]", "old_percent", "=", "0", "for", "i", "in", "range", "(", "data_rows", ")", ":", "# writing reading status", "percent", "=", "int", "(", "i", "*", "100", "/", "np", ".", "max", "(", "[", "1", ",", "data_rows", "-", "1", "]", ")", ")", "if", "percent", ">=", "old_percent", "+", "5", ":", "sys", ".", "stdout", ".", "flush", "(", ")", "sys", ".", "stdout", ".", "write", "(", "\"\\r reading \"", "+", "\"...%d%%\"", "%", "percent", ")", "old_percent", "=", "percent", "line", "=", "f", ".", "readline", "(", ")", "v", "=", "line", ".", "split", "(", ")", "try", ":", "vv", "=", "np", ".", "array", "(", "v", ",", "dtype", "=", "'float64'", ")", "except", "ValueError", ":", "for", "item", "in", "v", ":", "if", "item", ".", "__contains__", "(", "'.'", ")", "and", "not", "item", ".", "__contains__", "(", "'E'", ")", ":", "v", "[", "v", ".", "index", "(", "item", ")", "]", "=", "'0'", "data", ".", "append", "(", "vv", ")", "print", "(", "' \\n'", ")", "f", ".", "close", "(", ")", "a", "=", "np", ".", "array", "(", "data", ")", "data", "=", "[", "]", "return", "header_attr", ",", "cols", ",", "a" ]
private routine that is not directly called by the user
[ "private", "routine", "that", "is", "not", "directly", "called", "by", "the", "user" ]
python
train
widdowquinn/pyani
bin/genbank_get_genomes_by_taxon.py
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/genbank_get_genomes_by_taxon.py#L263-L275
def extract_filestem(data): """Extract filestem from Entrez eSummary data. Function expects esummary['DocumentSummarySet']['DocumentSummary'][0] Some illegal characters may occur in AssemblyName - for these, a more robust regex replace/escape may be required. Sadly, NCBI don't just use standard percent escapes, but instead replace certain characters with underscores: white space, slash, comma, hash, brackets. """ escapes = re.compile(r"[\s/,#\(\)]") escname = re.sub(escapes, '_', data['AssemblyName']) return '_'.join([data['AssemblyAccession'], escname])
[ "def", "extract_filestem", "(", "data", ")", ":", "escapes", "=", "re", ".", "compile", "(", "r\"[\\s/,#\\(\\)]\"", ")", "escname", "=", "re", ".", "sub", "(", "escapes", ",", "'_'", ",", "data", "[", "'AssemblyName'", "]", ")", "return", "'_'", ".", "join", "(", "[", "data", "[", "'AssemblyAccession'", "]", ",", "escname", "]", ")" ]
Extract filestem from Entrez eSummary data. Function expects esummary['DocumentSummarySet']['DocumentSummary'][0] Some illegal characters may occur in AssemblyName - for these, a more robust regex replace/escape may be required. Sadly, NCBI don't just use standard percent escapes, but instead replace certain characters with underscores: white space, slash, comma, hash, brackets.
[ "Extract", "filestem", "from", "Entrez", "eSummary", "data", "." ]
python
train
django-import-export/django-import-export
import_export/resources.py
https://github.com/django-import-export/django-import-export/blob/127f00d03fd0ad282615b064b7f444a639e6ff0c/import_export/resources.py#L297-L310
def save_instance(self, instance, using_transactions=True, dry_run=False): """ Takes care of saving the object to the database. Keep in mind that this is done by calling ``instance.save()``, so objects are not created in bulk! """ self.before_save_instance(instance, using_transactions, dry_run) if not using_transactions and dry_run: # we don't have transactions and we want to do a dry_run pass else: instance.save() self.after_save_instance(instance, using_transactions, dry_run)
[ "def", "save_instance", "(", "self", ",", "instance", ",", "using_transactions", "=", "True", ",", "dry_run", "=", "False", ")", ":", "self", ".", "before_save_instance", "(", "instance", ",", "using_transactions", ",", "dry_run", ")", "if", "not", "using_transactions", "and", "dry_run", ":", "# we don't have transactions and we want to do a dry_run", "pass", "else", ":", "instance", ".", "save", "(", ")", "self", ".", "after_save_instance", "(", "instance", ",", "using_transactions", ",", "dry_run", ")" ]
Takes care of saving the object to the database. Keep in mind that this is done by calling ``instance.save()``, so objects are not created in bulk!
[ "Takes", "care", "of", "saving", "the", "object", "to", "the", "database", "." ]
python
train
Clinical-Genomics/scout
scout/commands/update/genes.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/commands/update/genes.py#L42-L106
def genes(context, build, api_key): """ Load the hgnc aliases to the mongo database. """ LOG.info("Running scout update genes") adapter = context.obj['adapter'] # Fetch the omim information api_key = api_key or context.obj.get('omim_api_key') if not api_key: LOG.warning("Please provide a omim api key to load the omim gene panel") context.abort() try: mim_files = fetch_mim_files(api_key, mim2genes=True, morbidmap=True, genemap2=True) except Exception as err: LOG.warning(err) context.abort() LOG.warning("Dropping all gene information") adapter.drop_genes(build) LOG.info("Genes dropped") LOG.warning("Dropping all transcript information") adapter.drop_transcripts(build) LOG.info("transcripts dropped") hpo_genes = fetch_hpo_genes() if build: builds = [build] else: builds = ['37', '38'] hgnc_lines = fetch_hgnc() exac_lines = fetch_exac_constraint() for build in builds: ensembl_genes = fetch_ensembl_genes(build=build) # load the genes hgnc_genes = load_hgnc_genes( adapter=adapter, ensembl_lines=ensembl_genes, hgnc_lines=hgnc_lines, exac_lines=exac_lines, mim2gene_lines=mim_files['mim2genes'], genemap_lines=mim_files['genemap2'], hpo_lines=hpo_genes, build=build, ) ensembl_genes = {} for gene_obj in hgnc_genes: ensembl_id = gene_obj['ensembl_id'] ensembl_genes[ensembl_id] = gene_obj # Fetch the transcripts from ensembl ensembl_transcripts = fetch_ensembl_transcripts(build=build) transcripts = load_transcripts(adapter, ensembl_transcripts, build, ensembl_genes) adapter.update_indexes() LOG.info("Genes, transcripts and Exons loaded")
[ "def", "genes", "(", "context", ",", "build", ",", "api_key", ")", ":", "LOG", ".", "info", "(", "\"Running scout update genes\"", ")", "adapter", "=", "context", ".", "obj", "[", "'adapter'", "]", "# Fetch the omim information", "api_key", "=", "api_key", "or", "context", ".", "obj", ".", "get", "(", "'omim_api_key'", ")", "if", "not", "api_key", ":", "LOG", ".", "warning", "(", "\"Please provide a omim api key to load the omim gene panel\"", ")", "context", ".", "abort", "(", ")", "try", ":", "mim_files", "=", "fetch_mim_files", "(", "api_key", ",", "mim2genes", "=", "True", ",", "morbidmap", "=", "True", ",", "genemap2", "=", "True", ")", "except", "Exception", "as", "err", ":", "LOG", ".", "warning", "(", "err", ")", "context", ".", "abort", "(", ")", "LOG", ".", "warning", "(", "\"Dropping all gene information\"", ")", "adapter", ".", "drop_genes", "(", "build", ")", "LOG", ".", "info", "(", "\"Genes dropped\"", ")", "LOG", ".", "warning", "(", "\"Dropping all transcript information\"", ")", "adapter", ".", "drop_transcripts", "(", "build", ")", "LOG", ".", "info", "(", "\"transcripts dropped\"", ")", "hpo_genes", "=", "fetch_hpo_genes", "(", ")", "if", "build", ":", "builds", "=", "[", "build", "]", "else", ":", "builds", "=", "[", "'37'", ",", "'38'", "]", "hgnc_lines", "=", "fetch_hgnc", "(", ")", "exac_lines", "=", "fetch_exac_constraint", "(", ")", "for", "build", "in", "builds", ":", "ensembl_genes", "=", "fetch_ensembl_genes", "(", "build", "=", "build", ")", "# load the genes", "hgnc_genes", "=", "load_hgnc_genes", "(", "adapter", "=", "adapter", ",", "ensembl_lines", "=", "ensembl_genes", ",", "hgnc_lines", "=", "hgnc_lines", ",", "exac_lines", "=", "exac_lines", ",", "mim2gene_lines", "=", "mim_files", "[", "'mim2genes'", "]", ",", "genemap_lines", "=", "mim_files", "[", "'genemap2'", "]", ",", "hpo_lines", "=", "hpo_genes", ",", "build", "=", "build", ",", ")", "ensembl_genes", "=", "{", "}", "for", "gene_obj", "in", "hgnc_genes", ":", "ensembl_id", "=", "gene_obj", "[", "'ensembl_id'", "]", "ensembl_genes", "[", "ensembl_id", "]", "=", "gene_obj", "# Fetch the transcripts from ensembl", "ensembl_transcripts", "=", "fetch_ensembl_transcripts", "(", "build", "=", "build", ")", "transcripts", "=", "load_transcripts", "(", "adapter", ",", "ensembl_transcripts", ",", "build", ",", "ensembl_genes", ")", "adapter", ".", "update_indexes", "(", ")", "LOG", ".", "info", "(", "\"Genes, transcripts and Exons loaded\"", ")" ]
Load the hgnc aliases to the mongo database.
[ "Load", "the", "hgnc", "aliases", "to", "the", "mongo", "database", "." ]
python
test
chriso/timeseries
timeseries/data_frame.py
https://github.com/chriso/timeseries/blob/8b81e6cfd955a7cf75a421dfdb71b3f9e53be64d/timeseries/data_frame.py#L26-L30
def forecast(self, horizon, **kwargs): '''Forecast all time series in the group. See the `TimeSeries.forecast()` method for more information.''' return DataFrame({ name: series.forecast(horizon, **kwargs) \ for name, series in self.groups.iteritems() })
[ "def", "forecast", "(", "self", ",", "horizon", ",", "*", "*", "kwargs", ")", ":", "return", "DataFrame", "(", "{", "name", ":", "series", ".", "forecast", "(", "horizon", ",", "*", "*", "kwargs", ")", "for", "name", ",", "series", "in", "self", ".", "groups", ".", "iteritems", "(", ")", "}", ")" ]
Forecast all time series in the group. See the `TimeSeries.forecast()` method for more information.
[ "Forecast", "all", "time", "series", "in", "the", "group", ".", "See", "the", "TimeSeries", ".", "forecast", "()", "method", "for", "more", "information", "." ]
python
train
celery/django-celery
djcelery/loaders.py
https://github.com/celery/django-celery/blob/5d1ecb09c6304d22cc447c7c08fba0bd1febc2ef/djcelery/loaders.py#L57-L66
def read_configuration(self): """Load configuration from Django settings.""" self.configured = True # Default backend needs to be the database backend for backward # compatibility. backend = (getattr(settings, 'CELERY_RESULT_BACKEND', None) or getattr(settings, 'CELERY_BACKEND', None)) if not backend: settings.CELERY_RESULT_BACKEND = 'database' return DictAttribute(settings)
[ "def", "read_configuration", "(", "self", ")", ":", "self", ".", "configured", "=", "True", "# Default backend needs to be the database backend for backward", "# compatibility.", "backend", "=", "(", "getattr", "(", "settings", ",", "'CELERY_RESULT_BACKEND'", ",", "None", ")", "or", "getattr", "(", "settings", ",", "'CELERY_BACKEND'", ",", "None", ")", ")", "if", "not", "backend", ":", "settings", ".", "CELERY_RESULT_BACKEND", "=", "'database'", "return", "DictAttribute", "(", "settings", ")" ]
Load configuration from Django settings.
[ "Load", "configuration", "from", "Django", "settings", "." ]
python
train
matiskay/html-similarity
html_similarity/style_similarity.py
https://github.com/matiskay/html-similarity/blob/eef5586b1cf30134254690b2150260ef82cbd18f/html_similarity/style_similarity.py#L26-L41
def style_similarity(page1, page2): """ Computes CSS style Similarity between two DOM trees A = classes(Document_1) B = classes(Document_2) style_similarity = |A & B| / (|A| + |B| - |A & B|) :param page1: html of the page1 :param page2: html of the page2 :return: Number between 0 and 1. If the number is next to 1 the page are really similar. """ classes_page1 = get_classes(page1) classes_page2 = get_classes(page2) return jaccard_similarity(classes_page1, classes_page2)
[ "def", "style_similarity", "(", "page1", ",", "page2", ")", ":", "classes_page1", "=", "get_classes", "(", "page1", ")", "classes_page2", "=", "get_classes", "(", "page2", ")", "return", "jaccard_similarity", "(", "classes_page1", ",", "classes_page2", ")" ]
Computes CSS style Similarity between two DOM trees A = classes(Document_1) B = classes(Document_2) style_similarity = |A & B| / (|A| + |B| - |A & B|) :param page1: html of the page1 :param page2: html of the page2 :return: Number between 0 and 1. If the number is next to 1 the page are really similar.
[ "Computes", "CSS", "style", "Similarity", "between", "two", "DOM", "trees" ]
python
train
CZ-NIC/yangson
yangson/xpathparser.py
https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/xpathparser.py#L303-L323
def _qname(self) -> Optional[QualName]: """Parse XML QName.""" if self.test_string("*"): self.skip_ws() return False ident = self.yang_identifier() ws = self.skip_ws() try: next = self.peek() except EndOfInput: return ident, None if next == "(": return self._node_type(ident) if not ws and self.test_string(":"): res = ( self.yang_identifier(), self.sctx.schema_data.prefix2ns(ident, self.sctx.text_mid)) else: res = (ident, None) self.skip_ws() return res
[ "def", "_qname", "(", "self", ")", "->", "Optional", "[", "QualName", "]", ":", "if", "self", ".", "test_string", "(", "\"*\"", ")", ":", "self", ".", "skip_ws", "(", ")", "return", "False", "ident", "=", "self", ".", "yang_identifier", "(", ")", "ws", "=", "self", ".", "skip_ws", "(", ")", "try", ":", "next", "=", "self", ".", "peek", "(", ")", "except", "EndOfInput", ":", "return", "ident", ",", "None", "if", "next", "==", "\"(\"", ":", "return", "self", ".", "_node_type", "(", "ident", ")", "if", "not", "ws", "and", "self", ".", "test_string", "(", "\":\"", ")", ":", "res", "=", "(", "self", ".", "yang_identifier", "(", ")", ",", "self", ".", "sctx", ".", "schema_data", ".", "prefix2ns", "(", "ident", ",", "self", ".", "sctx", ".", "text_mid", ")", ")", "else", ":", "res", "=", "(", "ident", ",", "None", ")", "self", ".", "skip_ws", "(", ")", "return", "res" ]
Parse XML QName.
[ "Parse", "XML", "QName", "." ]
python
train
annoviko/pyclustering
pyclustering/cluster/bang.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/bang.py#L731-L749
def __calculate_volume(self): """! @brief Calculates volume of current spatial block. @details If empty dimension is detected (where all points has the same value) then such dimension is ignored during calculation of volume. @return (double) Volume of current spatial block. """ volume = 0.0 for i in range(0, len(self.__max_corner)): side_length = self.__max_corner[i] - self.__min_corner[i] if side_length != 0.0: if volume == 0.0: volume = side_length else: volume *= side_length return volume
[ "def", "__calculate_volume", "(", "self", ")", ":", "volume", "=", "0.0", "for", "i", "in", "range", "(", "0", ",", "len", "(", "self", ".", "__max_corner", ")", ")", ":", "side_length", "=", "self", ".", "__max_corner", "[", "i", "]", "-", "self", ".", "__min_corner", "[", "i", "]", "if", "side_length", "!=", "0.0", ":", "if", "volume", "==", "0.0", ":", "volume", "=", "side_length", "else", ":", "volume", "*=", "side_length", "return", "volume" ]
! @brief Calculates volume of current spatial block. @details If empty dimension is detected (where all points has the same value) then such dimension is ignored during calculation of volume. @return (double) Volume of current spatial block.
[ "!" ]
python
valid
valency/deeputils
deeputils/common.py
https://github.com/valency/deeputils/blob/27efd91668de0223ed8b07cfadf2151632521520/deeputils/common.py#L65-L82
def dict_merge(a, b, k): """ Merge two dictionary lists :param a: original list :param b: alternative list, element will replace the one in original list with same key :param k: key :return: the merged list """ c = a.copy() for j in range(len(b)): flag = False for i in range(len(c)): if c[i][k] == b[j][k]: c[i] = b[j].copy() flag = True if not flag: c.append(b[j].copy()) return c
[ "def", "dict_merge", "(", "a", ",", "b", ",", "k", ")", ":", "c", "=", "a", ".", "copy", "(", ")", "for", "j", "in", "range", "(", "len", "(", "b", ")", ")", ":", "flag", "=", "False", "for", "i", "in", "range", "(", "len", "(", "c", ")", ")", ":", "if", "c", "[", "i", "]", "[", "k", "]", "==", "b", "[", "j", "]", "[", "k", "]", ":", "c", "[", "i", "]", "=", "b", "[", "j", "]", ".", "copy", "(", ")", "flag", "=", "True", "if", "not", "flag", ":", "c", ".", "append", "(", "b", "[", "j", "]", ".", "copy", "(", ")", ")", "return", "c" ]
Merge two dictionary lists :param a: original list :param b: alternative list, element will replace the one in original list with same key :param k: key :return: the merged list
[ "Merge", "two", "dictionary", "lists", ":", "param", "a", ":", "original", "list", ":", "param", "b", ":", "alternative", "list", "element", "will", "replace", "the", "one", "in", "original", "list", "with", "same", "key", ":", "param", "k", ":", "key", ":", "return", ":", "the", "merged", "list" ]
python
valid
StellarCN/py-stellar-base
stellar_base/operation.py
https://github.com/StellarCN/py-stellar-base/blob/cce2e782064fb3955c85e1696e630d67b1010848/stellar_base/operation.py#L681-L737
def from_xdr_object(cls, op_xdr_object): """Creates a :class:`SetOptions` object from an XDR Operation object. """ if not op_xdr_object.sourceAccount: source = None else: source = encode_check( 'account', op_xdr_object.sourceAccount[0].ed25519).decode() if not op_xdr_object.body.setOptionsOp.inflationDest: inflation_dest = None else: inflation_dest = encode_check( 'account', op_xdr_object.body.setOptionsOp.inflationDest[0] .ed25519).decode() clear_flags = op_xdr_object.body.setOptionsOp.clearFlags # list set_flags = op_xdr_object.body.setOptionsOp.setFlags master_weight = op_xdr_object.body.setOptionsOp.masterWeight low_threshold = op_xdr_object.body.setOptionsOp.lowThreshold med_threshold = op_xdr_object.body.setOptionsOp.medThreshold high_threshold = op_xdr_object.body.setOptionsOp.highThreshold home_domain = op_xdr_object.body.setOptionsOp.homeDomain if op_xdr_object.body.setOptionsOp.signer: key = op_xdr_object.body.setOptionsOp.signer[0].key if key.type == Xdr.const.SIGNER_KEY_TYPE_ED25519: signer_address = encode_check('account', key.ed25519).decode() signer_type = 'ed25519PublicKey' if key.type == Xdr.const.SIGNER_KEY_TYPE_PRE_AUTH_TX: signer_address = key.preAuthTx signer_type = 'preAuthTx' if key.type == Xdr.const.SIGNER_KEY_TYPE_HASH_X: signer_address = key.hashX signer_type = 'hashX' signer_weight = op_xdr_object.body.setOptionsOp.signer[0].weight else: signer_address = None signer_type = None signer_weight = None return cls( source=source, inflation_dest=inflation_dest, clear_flags=clear_flags, set_flags=set_flags, master_weight=master_weight, low_threshold=low_threshold, med_threshold=med_threshold, high_threshold=high_threshold, home_domain=home_domain, signer_address=signer_address, signer_type=signer_type, signer_weight=signer_weight)
[ "def", "from_xdr_object", "(", "cls", ",", "op_xdr_object", ")", ":", "if", "not", "op_xdr_object", ".", "sourceAccount", ":", "source", "=", "None", "else", ":", "source", "=", "encode_check", "(", "'account'", ",", "op_xdr_object", ".", "sourceAccount", "[", "0", "]", ".", "ed25519", ")", ".", "decode", "(", ")", "if", "not", "op_xdr_object", ".", "body", ".", "setOptionsOp", ".", "inflationDest", ":", "inflation_dest", "=", "None", "else", ":", "inflation_dest", "=", "encode_check", "(", "'account'", ",", "op_xdr_object", ".", "body", ".", "setOptionsOp", ".", "inflationDest", "[", "0", "]", ".", "ed25519", ")", ".", "decode", "(", ")", "clear_flags", "=", "op_xdr_object", ".", "body", ".", "setOptionsOp", ".", "clearFlags", "# list", "set_flags", "=", "op_xdr_object", ".", "body", ".", "setOptionsOp", ".", "setFlags", "master_weight", "=", "op_xdr_object", ".", "body", ".", "setOptionsOp", ".", "masterWeight", "low_threshold", "=", "op_xdr_object", ".", "body", ".", "setOptionsOp", ".", "lowThreshold", "med_threshold", "=", "op_xdr_object", ".", "body", ".", "setOptionsOp", ".", "medThreshold", "high_threshold", "=", "op_xdr_object", ".", "body", ".", "setOptionsOp", ".", "highThreshold", "home_domain", "=", "op_xdr_object", ".", "body", ".", "setOptionsOp", ".", "homeDomain", "if", "op_xdr_object", ".", "body", ".", "setOptionsOp", ".", "signer", ":", "key", "=", "op_xdr_object", ".", "body", ".", "setOptionsOp", ".", "signer", "[", "0", "]", ".", "key", "if", "key", ".", "type", "==", "Xdr", ".", "const", ".", "SIGNER_KEY_TYPE_ED25519", ":", "signer_address", "=", "encode_check", "(", "'account'", ",", "key", ".", "ed25519", ")", ".", "decode", "(", ")", "signer_type", "=", "'ed25519PublicKey'", "if", "key", ".", "type", "==", "Xdr", ".", "const", ".", "SIGNER_KEY_TYPE_PRE_AUTH_TX", ":", "signer_address", "=", "key", ".", "preAuthTx", "signer_type", "=", "'preAuthTx'", "if", "key", ".", "type", "==", "Xdr", ".", "const", ".", "SIGNER_KEY_TYPE_HASH_X", ":", "signer_address", "=", "key", ".", "hashX", "signer_type", "=", "'hashX'", "signer_weight", "=", "op_xdr_object", ".", "body", ".", "setOptionsOp", ".", "signer", "[", "0", "]", ".", "weight", "else", ":", "signer_address", "=", "None", "signer_type", "=", "None", "signer_weight", "=", "None", "return", "cls", "(", "source", "=", "source", ",", "inflation_dest", "=", "inflation_dest", ",", "clear_flags", "=", "clear_flags", ",", "set_flags", "=", "set_flags", ",", "master_weight", "=", "master_weight", ",", "low_threshold", "=", "low_threshold", ",", "med_threshold", "=", "med_threshold", ",", "high_threshold", "=", "high_threshold", ",", "home_domain", "=", "home_domain", ",", "signer_address", "=", "signer_address", ",", "signer_type", "=", "signer_type", ",", "signer_weight", "=", "signer_weight", ")" ]
Creates a :class:`SetOptions` object from an XDR Operation object.
[ "Creates", "a", ":", "class", ":", "SetOptions", "object", "from", "an", "XDR", "Operation", "object", "." ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/rst/rs3/rs3graph.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/rst/rs3/rs3graph.py#L468-L549
def get_rst_spans(rst_graph): """ Returns a list of 5-tuples describing each RST span (i.e. the nucleus or satellite of a relation) in the document. (This function is meant for people who prefer to work with R / DataFrames / CSV files instead of graphs.) Parameters ---------- docgraph : DiscourseDocumentGraph a document graph which contains RST annotations Returns ------- all_spans : list of (str, str, str, int, int) each list element represents an RST span (i.e. the nucleus or satellite) as a 5-tuple (relation string, span type, relation type, token onset, token offset). In the example ('rst:16-rst:2', 'N', 'evaluation-s', 9, 24), the relation string 'rst:16-rst:2' consists of two parts, the relation root node ID and the node ID of its nucleus (span type 'N'). In the example ('rst:16-rst:4-rst:3', 'N1', 'list', 20, 24), the relation string consists of 3 parts, the relation root node ID and the node IDs of its nucleii (span type 'N1', 'N2'). Examples -------- [('rst:16-rst:4-rst:3', 'N1', 'list', 20, 24), ('rst:16-rst:4-rst:3', 'N2', 'list', 9, 19), ('rst:16-rst:2', 'N', 'evaluation-s', 9, 24), ('rst:16-rst:2', 'S', 'evaluation-s', 4, 8)] """ token_map = TokenMapper(rst_graph).id2index rst_relations = get_rst_relations(rst_graph) all_spans = [] for dom_node in rst_relations: if 'multinuc' in rst_relations[dom_node]: nuc_count = 1 multinuc_start, multinuc_end = sys.maxint, 0 multinuc_spans = rst_relations[dom_node]['multinuc'] multinuc_rel_id = "{0}-{1}".format( dom_node, '-'.join(target for target, _rel, _toks in multinuc_spans)) for _, relname, toks in multinuc_spans: nuc_start, nuc_end = get_segment_token_offsets(toks, token_map) multinuc_span = (multinuc_rel_id, "N{}".format(nuc_count), relname, nuc_start, nuc_end) all_spans.append(multinuc_span) nuc_count += 1 # determine the token offsets of the whole multinuc relation iteratively if nuc_start < multinuc_start: multinuc_start = nuc_start if nuc_end > multinuc_end: multinuc_end = nuc_end if 'satellites' in rst_relations[dom_node]: # find the nucleus if 'nucleus' in rst_relations[dom_node]: nuc_id, nuc_toks = rst_relations[dom_node]['nucleus'] nuc_start, nuc_end = get_segment_token_offsets(nuc_toks, token_map) elif 'multinuc' in rst_relations[dom_node]: nuc_id = dom_node # multinuc as a whole is the nucleus nuc_start, nuc_end = multinuc_start, multinuc_end elif 'tokens' in rst_relations[dom_node]: nuc_id = dom_node # dominating segment node directly dominates these tokens nuc_start, nuc_end = get_segment_token_offsets( rst_relations[dom_node]['tokens'], token_map) else: raise ValueError( "Can't find a nucleus for these satellites: {}".format( rst_relations[dom_node]['satellites'])) sat_spans = rst_relations[dom_node]['satellites'] for satellite, relname, sat_toks in sat_spans: sat_start, sat_end = get_segment_token_offsets(sat_toks, token_map) nucleus_span = ("{0}-{1}".format(nuc_id, satellite), 'N', relname, nuc_start, nuc_end) all_spans.append(nucleus_span) satellite_span = ("{0}-{1}".format(nuc_id, satellite), 'S', relname, sat_start, sat_end) all_spans.append(satellite_span) return all_spans
[ "def", "get_rst_spans", "(", "rst_graph", ")", ":", "token_map", "=", "TokenMapper", "(", "rst_graph", ")", ".", "id2index", "rst_relations", "=", "get_rst_relations", "(", "rst_graph", ")", "all_spans", "=", "[", "]", "for", "dom_node", "in", "rst_relations", ":", "if", "'multinuc'", "in", "rst_relations", "[", "dom_node", "]", ":", "nuc_count", "=", "1", "multinuc_start", ",", "multinuc_end", "=", "sys", ".", "maxint", ",", "0", "multinuc_spans", "=", "rst_relations", "[", "dom_node", "]", "[", "'multinuc'", "]", "multinuc_rel_id", "=", "\"{0}-{1}\"", ".", "format", "(", "dom_node", ",", "'-'", ".", "join", "(", "target", "for", "target", ",", "_rel", ",", "_toks", "in", "multinuc_spans", ")", ")", "for", "_", ",", "relname", ",", "toks", "in", "multinuc_spans", ":", "nuc_start", ",", "nuc_end", "=", "get_segment_token_offsets", "(", "toks", ",", "token_map", ")", "multinuc_span", "=", "(", "multinuc_rel_id", ",", "\"N{}\"", ".", "format", "(", "nuc_count", ")", ",", "relname", ",", "nuc_start", ",", "nuc_end", ")", "all_spans", ".", "append", "(", "multinuc_span", ")", "nuc_count", "+=", "1", "# determine the token offsets of the whole multinuc relation iteratively", "if", "nuc_start", "<", "multinuc_start", ":", "multinuc_start", "=", "nuc_start", "if", "nuc_end", ">", "multinuc_end", ":", "multinuc_end", "=", "nuc_end", "if", "'satellites'", "in", "rst_relations", "[", "dom_node", "]", ":", "# find the nucleus", "if", "'nucleus'", "in", "rst_relations", "[", "dom_node", "]", ":", "nuc_id", ",", "nuc_toks", "=", "rst_relations", "[", "dom_node", "]", "[", "'nucleus'", "]", "nuc_start", ",", "nuc_end", "=", "get_segment_token_offsets", "(", "nuc_toks", ",", "token_map", ")", "elif", "'multinuc'", "in", "rst_relations", "[", "dom_node", "]", ":", "nuc_id", "=", "dom_node", "# multinuc as a whole is the nucleus", "nuc_start", ",", "nuc_end", "=", "multinuc_start", ",", "multinuc_end", "elif", "'tokens'", "in", "rst_relations", "[", "dom_node", "]", ":", "nuc_id", "=", "dom_node", "# dominating segment node directly dominates these tokens", "nuc_start", ",", "nuc_end", "=", "get_segment_token_offsets", "(", "rst_relations", "[", "dom_node", "]", "[", "'tokens'", "]", ",", "token_map", ")", "else", ":", "raise", "ValueError", "(", "\"Can't find a nucleus for these satellites: {}\"", ".", "format", "(", "rst_relations", "[", "dom_node", "]", "[", "'satellites'", "]", ")", ")", "sat_spans", "=", "rst_relations", "[", "dom_node", "]", "[", "'satellites'", "]", "for", "satellite", ",", "relname", ",", "sat_toks", "in", "sat_spans", ":", "sat_start", ",", "sat_end", "=", "get_segment_token_offsets", "(", "sat_toks", ",", "token_map", ")", "nucleus_span", "=", "(", "\"{0}-{1}\"", ".", "format", "(", "nuc_id", ",", "satellite", ")", ",", "'N'", ",", "relname", ",", "nuc_start", ",", "nuc_end", ")", "all_spans", ".", "append", "(", "nucleus_span", ")", "satellite_span", "=", "(", "\"{0}-{1}\"", ".", "format", "(", "nuc_id", ",", "satellite", ")", ",", "'S'", ",", "relname", ",", "sat_start", ",", "sat_end", ")", "all_spans", ".", "append", "(", "satellite_span", ")", "return", "all_spans" ]
Returns a list of 5-tuples describing each RST span (i.e. the nucleus or satellite of a relation) in the document. (This function is meant for people who prefer to work with R / DataFrames / CSV files instead of graphs.) Parameters ---------- docgraph : DiscourseDocumentGraph a document graph which contains RST annotations Returns ------- all_spans : list of (str, str, str, int, int) each list element represents an RST span (i.e. the nucleus or satellite) as a 5-tuple (relation string, span type, relation type, token onset, token offset). In the example ('rst:16-rst:2', 'N', 'evaluation-s', 9, 24), the relation string 'rst:16-rst:2' consists of two parts, the relation root node ID and the node ID of its nucleus (span type 'N'). In the example ('rst:16-rst:4-rst:3', 'N1', 'list', 20, 24), the relation string consists of 3 parts, the relation root node ID and the node IDs of its nucleii (span type 'N1', 'N2'). Examples -------- [('rst:16-rst:4-rst:3', 'N1', 'list', 20, 24), ('rst:16-rst:4-rst:3', 'N2', 'list', 9, 19), ('rst:16-rst:2', 'N', 'evaluation-s', 9, 24), ('rst:16-rst:2', 'S', 'evaluation-s', 4, 8)]
[ "Returns", "a", "list", "of", "5", "-", "tuples", "describing", "each", "RST", "span", "(", "i", ".", "e", ".", "the", "nucleus", "or", "satellite", "of", "a", "relation", ")", "in", "the", "document", ".", "(", "This", "function", "is", "meant", "for", "people", "who", "prefer", "to", "work", "with", "R", "/", "DataFrames", "/", "CSV", "files", "instead", "of", "graphs", ".", ")" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/utils/yellowfin.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/yellowfin.py#L460-L519
def apply_gradients(self, grads_and_vars, global_step=None, name=None): """Applying gradients and tune hyperparams with YellowFin. Args: grads_and_vars: List of (gradient, variable) pairs as returned by compute_gradients(). global_step: Optional Variable to increment by one after the variables have been updated. name: Optional name for the returned operation. Default to the name passed to the Optimizer constructor. Returns: (A group of operations) Variable Update with Momentum ops, YellowFin ops(Curvature, Variance, Distance) ops, SingleStep and lr_mu tuning ops, Step increment ops. """ self._grad, self._vars = zip(*[(g, t) for g, t in grads_and_vars if g is not None]) # Var update with Momentum. with tf.variable_scope("apply_updates"): # Gradient Clipping? if self._clip_thresh_var is not None: self._grad, _ = tf.clip_by_global_norm( self._grad, self._clip_thresh_var) apply_grad_op = self._momentum_optimizer.apply_gradients( zip(self._grad, self._vars), global_step=global_step, name=name) else: apply_grad_op = self._momentum_optimizer.apply_gradients( zip(self._grad, self._vars), global_step=global_step, name=name) # Begin lr and mu tuning. with tf.variable_scope("prepare_yellowFin_variables"): # the dependencies ideally only need to be after clip is done, # i.e. depends on self._grads. However, the control_dependencies # does not support indexed slice for sparse gradients. # The alternative dependencies here might be slightly slower due # to less parallelization. with tf.control_dependencies([apply_grad_op,]): prepare_variables_op = self._prepare_variables() with tf.variable_scope("yellowfin"): with tf.control_dependencies([prepare_variables_op]): yellowfin_op = self._yellowfin() # Update YellowFin step variable. with tf.control_dependencies([yellowfin_op]): self._increment_step_op = tf.assign_add(self._step, 1).op return tf.group(apply_grad_op, prepare_variables_op, yellowfin_op, self._increment_step_op)
[ "def", "apply_gradients", "(", "self", ",", "grads_and_vars", ",", "global_step", "=", "None", ",", "name", "=", "None", ")", ":", "self", ".", "_grad", ",", "self", ".", "_vars", "=", "zip", "(", "*", "[", "(", "g", ",", "t", ")", "for", "g", ",", "t", "in", "grads_and_vars", "if", "g", "is", "not", "None", "]", ")", "# Var update with Momentum.", "with", "tf", ".", "variable_scope", "(", "\"apply_updates\"", ")", ":", "# Gradient Clipping?", "if", "self", ".", "_clip_thresh_var", "is", "not", "None", ":", "self", ".", "_grad", ",", "_", "=", "tf", ".", "clip_by_global_norm", "(", "self", ".", "_grad", ",", "self", ".", "_clip_thresh_var", ")", "apply_grad_op", "=", "self", ".", "_momentum_optimizer", ".", "apply_gradients", "(", "zip", "(", "self", ".", "_grad", ",", "self", ".", "_vars", ")", ",", "global_step", "=", "global_step", ",", "name", "=", "name", ")", "else", ":", "apply_grad_op", "=", "self", ".", "_momentum_optimizer", ".", "apply_gradients", "(", "zip", "(", "self", ".", "_grad", ",", "self", ".", "_vars", ")", ",", "global_step", "=", "global_step", ",", "name", "=", "name", ")", "# Begin lr and mu tuning.", "with", "tf", ".", "variable_scope", "(", "\"prepare_yellowFin_variables\"", ")", ":", "# the dependencies ideally only need to be after clip is done,", "# i.e. depends on self._grads. However, the control_dependencies", "# does not support indexed slice for sparse gradients.", "# The alternative dependencies here might be slightly slower due", "# to less parallelization.", "with", "tf", ".", "control_dependencies", "(", "[", "apply_grad_op", ",", "]", ")", ":", "prepare_variables_op", "=", "self", ".", "_prepare_variables", "(", ")", "with", "tf", ".", "variable_scope", "(", "\"yellowfin\"", ")", ":", "with", "tf", ".", "control_dependencies", "(", "[", "prepare_variables_op", "]", ")", ":", "yellowfin_op", "=", "self", ".", "_yellowfin", "(", ")", "# Update YellowFin step variable.", "with", "tf", ".", "control_dependencies", "(", "[", "yellowfin_op", "]", ")", ":", "self", ".", "_increment_step_op", "=", "tf", ".", "assign_add", "(", "self", ".", "_step", ",", "1", ")", ".", "op", "return", "tf", ".", "group", "(", "apply_grad_op", ",", "prepare_variables_op", ",", "yellowfin_op", ",", "self", ".", "_increment_step_op", ")" ]
Applying gradients and tune hyperparams with YellowFin. Args: grads_and_vars: List of (gradient, variable) pairs as returned by compute_gradients(). global_step: Optional Variable to increment by one after the variables have been updated. name: Optional name for the returned operation. Default to the name passed to the Optimizer constructor. Returns: (A group of operations) Variable Update with Momentum ops, YellowFin ops(Curvature, Variance, Distance) ops, SingleStep and lr_mu tuning ops, Step increment ops.
[ "Applying", "gradients", "and", "tune", "hyperparams", "with", "YellowFin", "." ]
python
train
tanghaibao/jcvi
jcvi/assembly/patch.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/patch.py#L174-L202
def pasteprepare(args): """ %prog pasteprepare bacs.fasta Prepare sequences for paste. """ p = OptionParser(pasteprepare.__doc__) p.add_option("--flank", default=5000, type="int", help="Get the seq of size on two ends [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) goodfasta, = args flank = opts.flank pf = goodfasta.rsplit(".", 1)[0] extbed = pf + ".ext.bed" sizes = Sizes(goodfasta) fw = open(extbed, "w") for bac, size in sizes.iter_sizes(): print("\t".join(str(x) for x in \ (bac, 0, min(flank, size), bac + "L")), file=fw) print("\t".join(str(x) for x in \ (bac, max(size - flank, 0), size, bac + "R")), file=fw) fw.close() fastaFromBed(extbed, goodfasta, name=True)
[ "def", "pasteprepare", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "pasteprepare", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--flank\"", ",", "default", "=", "5000", ",", "type", "=", "\"int\"", ",", "help", "=", "\"Get the seq of size on two ends [default: %default]\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "goodfasta", ",", "=", "args", "flank", "=", "opts", ".", "flank", "pf", "=", "goodfasta", ".", "rsplit", "(", "\".\"", ",", "1", ")", "[", "0", "]", "extbed", "=", "pf", "+", "\".ext.bed\"", "sizes", "=", "Sizes", "(", "goodfasta", ")", "fw", "=", "open", "(", "extbed", ",", "\"w\"", ")", "for", "bac", ",", "size", "in", "sizes", ".", "iter_sizes", "(", ")", ":", "print", "(", "\"\\t\"", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "(", "bac", ",", "0", ",", "min", "(", "flank", ",", "size", ")", ",", "bac", "+", "\"L\"", ")", ")", ",", "file", "=", "fw", ")", "print", "(", "\"\\t\"", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "(", "bac", ",", "max", "(", "size", "-", "flank", ",", "0", ")", ",", "size", ",", "bac", "+", "\"R\"", ")", ")", ",", "file", "=", "fw", ")", "fw", ".", "close", "(", ")", "fastaFromBed", "(", "extbed", ",", "goodfasta", ",", "name", "=", "True", ")" ]
%prog pasteprepare bacs.fasta Prepare sequences for paste.
[ "%prog", "pasteprepare", "bacs", ".", "fasta" ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/rst/urml.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/rst/urml.py#L366-L385
def extract_relationtypes(urml_xml_tree): """ extracts the allowed RST relation names and relation types from an URML XML file. Parameters ---------- urml_xml_tree : lxml.etree._ElementTree lxml ElementTree representation of an URML XML file Returns ------- relations : dict of (str, str) Returns a dictionary with RST relation names as keys (str) and relation types (either 'par' or 'hyp') as values (str). """ return {rel.attrib['name']: rel.attrib['type'] for rel in urml_xml_tree.iterfind('//header/reltypes/rel') if 'type' in rel.attrib}
[ "def", "extract_relationtypes", "(", "urml_xml_tree", ")", ":", "return", "{", "rel", ".", "attrib", "[", "'name'", "]", ":", "rel", ".", "attrib", "[", "'type'", "]", "for", "rel", "in", "urml_xml_tree", ".", "iterfind", "(", "'//header/reltypes/rel'", ")", "if", "'type'", "in", "rel", ".", "attrib", "}" ]
extracts the allowed RST relation names and relation types from an URML XML file. Parameters ---------- urml_xml_tree : lxml.etree._ElementTree lxml ElementTree representation of an URML XML file Returns ------- relations : dict of (str, str) Returns a dictionary with RST relation names as keys (str) and relation types (either 'par' or 'hyp') as values (str).
[ "extracts", "the", "allowed", "RST", "relation", "names", "and", "relation", "types", "from", "an", "URML", "XML", "file", "." ]
python
train
aws/aws-encryption-sdk-python
src/aws_encryption_sdk/internal/formatting/serialize.py
https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/src/aws_encryption_sdk/internal/formatting/serialize.py#L184-L252
def serialize_frame( algorithm, plaintext, message_id, data_encryption_key, frame_length, sequence_number, is_final_frame, signer=None ): """Receives a message plaintext, breaks off a frame, encrypts and serializes the frame, and returns the encrypted frame and the remaining plaintext. :param algorithm: Algorithm to use for encryption :type algorithm: aws_encryption_sdk.identifiers.Algorithm :param bytes plaintext: Source plaintext to encrypt and serialize :param bytes message_id: Message ID :param bytes data_encryption_key: Data key with which to encrypt message :param int frame_length: Length of the framed data :param int sequence_number: Sequence number for frame to be generated :param bool is_final_frame: Boolean stating whether or not this frame is a final frame :param signer: Cryptographic signer object (optional) :type signer: aws_encryption_sdk.Signer :returns: Serialized frame and remaining plaintext :rtype: tuple of bytes :raises SerializationError: if number of frames is too large """ if sequence_number < 1: raise SerializationError("Frame sequence number must be greater than 0") if sequence_number > aws_encryption_sdk.internal.defaults.MAX_FRAME_COUNT: raise SerializationError("Max frame count exceeded") if is_final_frame: content_string = ContentAADString.FINAL_FRAME_STRING_ID else: content_string = ContentAADString.FRAME_STRING_ID frame_plaintext = plaintext[:frame_length] frame_ciphertext = encrypt( algorithm=algorithm, key=data_encryption_key, plaintext=frame_plaintext, associated_data=aws_encryption_sdk.internal.formatting.encryption_context.assemble_content_aad( message_id=message_id, aad_content_string=content_string, seq_num=sequence_number, length=len(frame_plaintext), ), iv=frame_iv(algorithm, sequence_number), ) plaintext = plaintext[frame_length:] if is_final_frame: _LOGGER.debug("Serializing final frame") packed_frame = struct.pack( ">II{iv_len}sI{content_len}s{auth_len}s".format( iv_len=algorithm.iv_len, content_len=len(frame_ciphertext.ciphertext), auth_len=algorithm.auth_len ), SequenceIdentifier.SEQUENCE_NUMBER_END.value, sequence_number, frame_ciphertext.iv, len(frame_ciphertext.ciphertext), frame_ciphertext.ciphertext, frame_ciphertext.tag, ) else: _LOGGER.debug("Serializing frame") packed_frame = struct.pack( ">I{iv_len}s{content_len}s{auth_len}s".format( iv_len=algorithm.iv_len, content_len=frame_length, auth_len=algorithm.auth_len ), sequence_number, frame_ciphertext.iv, frame_ciphertext.ciphertext, frame_ciphertext.tag, ) if signer is not None: signer.update(packed_frame) return packed_frame, plaintext
[ "def", "serialize_frame", "(", "algorithm", ",", "plaintext", ",", "message_id", ",", "data_encryption_key", ",", "frame_length", ",", "sequence_number", ",", "is_final_frame", ",", "signer", "=", "None", ")", ":", "if", "sequence_number", "<", "1", ":", "raise", "SerializationError", "(", "\"Frame sequence number must be greater than 0\"", ")", "if", "sequence_number", ">", "aws_encryption_sdk", ".", "internal", ".", "defaults", ".", "MAX_FRAME_COUNT", ":", "raise", "SerializationError", "(", "\"Max frame count exceeded\"", ")", "if", "is_final_frame", ":", "content_string", "=", "ContentAADString", ".", "FINAL_FRAME_STRING_ID", "else", ":", "content_string", "=", "ContentAADString", ".", "FRAME_STRING_ID", "frame_plaintext", "=", "plaintext", "[", ":", "frame_length", "]", "frame_ciphertext", "=", "encrypt", "(", "algorithm", "=", "algorithm", ",", "key", "=", "data_encryption_key", ",", "plaintext", "=", "frame_plaintext", ",", "associated_data", "=", "aws_encryption_sdk", ".", "internal", ".", "formatting", ".", "encryption_context", ".", "assemble_content_aad", "(", "message_id", "=", "message_id", ",", "aad_content_string", "=", "content_string", ",", "seq_num", "=", "sequence_number", ",", "length", "=", "len", "(", "frame_plaintext", ")", ",", ")", ",", "iv", "=", "frame_iv", "(", "algorithm", ",", "sequence_number", ")", ",", ")", "plaintext", "=", "plaintext", "[", "frame_length", ":", "]", "if", "is_final_frame", ":", "_LOGGER", ".", "debug", "(", "\"Serializing final frame\"", ")", "packed_frame", "=", "struct", ".", "pack", "(", "\">II{iv_len}sI{content_len}s{auth_len}s\"", ".", "format", "(", "iv_len", "=", "algorithm", ".", "iv_len", ",", "content_len", "=", "len", "(", "frame_ciphertext", ".", "ciphertext", ")", ",", "auth_len", "=", "algorithm", ".", "auth_len", ")", ",", "SequenceIdentifier", ".", "SEQUENCE_NUMBER_END", ".", "value", ",", "sequence_number", ",", "frame_ciphertext", ".", "iv", ",", "len", "(", "frame_ciphertext", ".", "ciphertext", ")", ",", "frame_ciphertext", ".", "ciphertext", ",", "frame_ciphertext", ".", "tag", ",", ")", "else", ":", "_LOGGER", ".", "debug", "(", "\"Serializing frame\"", ")", "packed_frame", "=", "struct", ".", "pack", "(", "\">I{iv_len}s{content_len}s{auth_len}s\"", ".", "format", "(", "iv_len", "=", "algorithm", ".", "iv_len", ",", "content_len", "=", "frame_length", ",", "auth_len", "=", "algorithm", ".", "auth_len", ")", ",", "sequence_number", ",", "frame_ciphertext", ".", "iv", ",", "frame_ciphertext", ".", "ciphertext", ",", "frame_ciphertext", ".", "tag", ",", ")", "if", "signer", "is", "not", "None", ":", "signer", ".", "update", "(", "packed_frame", ")", "return", "packed_frame", ",", "plaintext" ]
Receives a message plaintext, breaks off a frame, encrypts and serializes the frame, and returns the encrypted frame and the remaining plaintext. :param algorithm: Algorithm to use for encryption :type algorithm: aws_encryption_sdk.identifiers.Algorithm :param bytes plaintext: Source plaintext to encrypt and serialize :param bytes message_id: Message ID :param bytes data_encryption_key: Data key with which to encrypt message :param int frame_length: Length of the framed data :param int sequence_number: Sequence number for frame to be generated :param bool is_final_frame: Boolean stating whether or not this frame is a final frame :param signer: Cryptographic signer object (optional) :type signer: aws_encryption_sdk.Signer :returns: Serialized frame and remaining plaintext :rtype: tuple of bytes :raises SerializationError: if number of frames is too large
[ "Receives", "a", "message", "plaintext", "breaks", "off", "a", "frame", "encrypts", "and", "serializes", "the", "frame", "and", "returns", "the", "encrypted", "frame", "and", "the", "remaining", "plaintext", "." ]
python
train
eandersson/amqpstorm
amqpstorm/channel.py
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/channel.py#L297-L318
def start_consuming(self, to_tuple=False, auto_decode=True): """Start consuming messages. :param bool to_tuple: Should incoming messages be converted to a tuple before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return: """ while not self.is_closed: self.process_data_events( to_tuple=to_tuple, auto_decode=auto_decode ) if self.consumer_tags: sleep(IDLE_WAIT) continue break
[ "def", "start_consuming", "(", "self", ",", "to_tuple", "=", "False", ",", "auto_decode", "=", "True", ")", ":", "while", "not", "self", ".", "is_closed", ":", "self", ".", "process_data_events", "(", "to_tuple", "=", "to_tuple", ",", "auto_decode", "=", "auto_decode", ")", "if", "self", ".", "consumer_tags", ":", "sleep", "(", "IDLE_WAIT", ")", "continue", "break" ]
Start consuming messages. :param bool to_tuple: Should incoming messages be converted to a tuple before delivery. :param bool auto_decode: Auto-decode strings when possible. :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :return:
[ "Start", "consuming", "messages", "." ]
python
train
wakatime/wakatime
wakatime/packages/urllib3/util/selectors.py
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/urllib3/util/selectors.py#L565-L581
def DefaultSelector(): """ This function serves as a first call for DefaultSelector to detect if the select module is being monkey-patched incorrectly by eventlet, greenlet, and preserve proper behavior. """ global _DEFAULT_SELECTOR if _DEFAULT_SELECTOR is None: if _can_allocate('kqueue'): _DEFAULT_SELECTOR = KqueueSelector elif _can_allocate('epoll'): _DEFAULT_SELECTOR = EpollSelector elif _can_allocate('poll'): _DEFAULT_SELECTOR = PollSelector elif hasattr(select, 'select'): _DEFAULT_SELECTOR = SelectSelector else: # Platform-specific: AppEngine raise ValueError('Platform does not have a selector') return _DEFAULT_SELECTOR()
[ "def", "DefaultSelector", "(", ")", ":", "global", "_DEFAULT_SELECTOR", "if", "_DEFAULT_SELECTOR", "is", "None", ":", "if", "_can_allocate", "(", "'kqueue'", ")", ":", "_DEFAULT_SELECTOR", "=", "KqueueSelector", "elif", "_can_allocate", "(", "'epoll'", ")", ":", "_DEFAULT_SELECTOR", "=", "EpollSelector", "elif", "_can_allocate", "(", "'poll'", ")", ":", "_DEFAULT_SELECTOR", "=", "PollSelector", "elif", "hasattr", "(", "select", ",", "'select'", ")", ":", "_DEFAULT_SELECTOR", "=", "SelectSelector", "else", ":", "# Platform-specific: AppEngine", "raise", "ValueError", "(", "'Platform does not have a selector'", ")", "return", "_DEFAULT_SELECTOR", "(", ")" ]
This function serves as a first call for DefaultSelector to detect if the select module is being monkey-patched incorrectly by eventlet, greenlet, and preserve proper behavior.
[ "This", "function", "serves", "as", "a", "first", "call", "for", "DefaultSelector", "to", "detect", "if", "the", "select", "module", "is", "being", "monkey", "-", "patched", "incorrectly", "by", "eventlet", "greenlet", "and", "preserve", "proper", "behavior", "." ]
python
train
creare-com/pydem
pydem/dem_processing.py
https://github.com/creare-com/pydem/blob/c2fc8d84cfb411df84f71a6dec9edc4b544f710a/pydem/dem_processing.py#L234-L244
def get(self, key, side): """ Returns an edge given a particular key Parmeters ---------- key : tuple (te, be, le, re) tuple that identifies a tile side : str top, bottom, left, or right, which edge to return """ return getattr(self, side).ravel()[self.keys[key]]
[ "def", "get", "(", "self", ",", "key", ",", "side", ")", ":", "return", "getattr", "(", "self", ",", "side", ")", ".", "ravel", "(", ")", "[", "self", ".", "keys", "[", "key", "]", "]" ]
Returns an edge given a particular key Parmeters ---------- key : tuple (te, be, le, re) tuple that identifies a tile side : str top, bottom, left, or right, which edge to return
[ "Returns", "an", "edge", "given", "a", "particular", "key", "Parmeters", "----------", "key", ":", "tuple", "(", "te", "be", "le", "re", ")", "tuple", "that", "identifies", "a", "tile", "side", ":", "str", "top", "bottom", "left", "or", "right", "which", "edge", "to", "return" ]
python
train
O365/python-o365
O365/drive.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/drive.py#L741-L761
def get_versions(self): """ Returns a list of available versions for this item :return: list of versions :rtype: list[DriveItemVersion] """ if not self.object_id: return [] url = self.build_url( self._endpoints.get('versions').format(id=self.object_id)) response = self.con.get(url) if not response: return [] data = response.json() # Everything received from cloud must be passed as self._cloud_data_key return [DriveItemVersion(parent=self, **{self._cloud_data_key: item}) for item in data.get('value', [])]
[ "def", "get_versions", "(", "self", ")", ":", "if", "not", "self", ".", "object_id", ":", "return", "[", "]", "url", "=", "self", ".", "build_url", "(", "self", ".", "_endpoints", ".", "get", "(", "'versions'", ")", ".", "format", "(", "id", "=", "self", ".", "object_id", ")", ")", "response", "=", "self", ".", "con", ".", "get", "(", "url", ")", "if", "not", "response", ":", "return", "[", "]", "data", "=", "response", ".", "json", "(", ")", "# Everything received from cloud must be passed as self._cloud_data_key", "return", "[", "DriveItemVersion", "(", "parent", "=", "self", ",", "*", "*", "{", "self", ".", "_cloud_data_key", ":", "item", "}", ")", "for", "item", "in", "data", ".", "get", "(", "'value'", ",", "[", "]", ")", "]" ]
Returns a list of available versions for this item :return: list of versions :rtype: list[DriveItemVersion]
[ "Returns", "a", "list", "of", "available", "versions", "for", "this", "item" ]
python
train
mitsei/dlkit
dlkit/services/relationship.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/relationship.py#L935-L943
def use_isolated_family_view(self): """Pass through to provider RelationshipLookupSession.use_isolated_family_view""" self._family_view = ISOLATED # self._get_provider_session('relationship_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_isolated_family_view() except AttributeError: pass
[ "def", "use_isolated_family_view", "(", "self", ")", ":", "self", ".", "_family_view", "=", "ISOLATED", "# self._get_provider_session('relationship_lookup_session') # To make sure the session is tracked", "for", "session", "in", "self", ".", "_get_provider_sessions", "(", ")", ":", "try", ":", "session", ".", "use_isolated_family_view", "(", ")", "except", "AttributeError", ":", "pass" ]
Pass through to provider RelationshipLookupSession.use_isolated_family_view
[ "Pass", "through", "to", "provider", "RelationshipLookupSession", ".", "use_isolated_family_view" ]
python
train
joanvila/aioredlock
aioredlock/redis.py
https://github.com/joanvila/aioredlock/blob/6c62f0895c93b26b87ca8e3fe36bc024c81be421/aioredlock/redis.py#L71-L83
async def _create_redis_pool(*args, **kwargs): """ Adapter to support both aioredis-0.3.0 and aioredis-1.0.0 For aioredis-1.0.0 and later calls: aioredis.create_redis_pool(*args, **kwargs) For aioredis-0.3.0 calls: aioredis.create_pool(*args, **kwargs) """ if StrictVersion(aioredis.__version__) >= StrictVersion('1.0.0'): # pragma no cover return await aioredis.create_redis_pool(*args, **kwargs) else: # pragma no cover return await aioredis.create_pool(*args, **kwargs)
[ "async", "def", "_create_redis_pool", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "StrictVersion", "(", "aioredis", ".", "__version__", ")", ">=", "StrictVersion", "(", "'1.0.0'", ")", ":", "# pragma no cover", "return", "await", "aioredis", ".", "create_redis_pool", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "# pragma no cover", "return", "await", "aioredis", ".", "create_pool", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Adapter to support both aioredis-0.3.0 and aioredis-1.0.0 For aioredis-1.0.0 and later calls: aioredis.create_redis_pool(*args, **kwargs) For aioredis-0.3.0 calls: aioredis.create_pool(*args, **kwargs)
[ "Adapter", "to", "support", "both", "aioredis", "-", "0", ".", "3", ".", "0", "and", "aioredis", "-", "1", ".", "0", ".", "0", "For", "aioredis", "-", "1", ".", "0", ".", "0", "and", "later", "calls", ":", "aioredis", ".", "create_redis_pool", "(", "*", "args", "**", "kwargs", ")", "For", "aioredis", "-", "0", ".", "3", ".", "0", "calls", ":", "aioredis", ".", "create_pool", "(", "*", "args", "**", "kwargs", ")" ]
python
train
singularityhub/singularity-cli
spython/main/parse/docker.py
https://github.com/singularityhub/singularity-cli/blob/cb36b4504812ca87e29c6a40b222a545d1865799/spython/main/parse/docker.py#L299-L310
def _expose(self, line): '''Again, just add to metadata, and comment in install. Parameters ========== line: the line from the recipe file to parse to INSTALL ''' ports = self._setup('EXPOSE', line) if len(ports) > 0: self.ports += ports return self._comment("# %s" %line)
[ "def", "_expose", "(", "self", ",", "line", ")", ":", "ports", "=", "self", ".", "_setup", "(", "'EXPOSE'", ",", "line", ")", "if", "len", "(", "ports", ")", ">", "0", ":", "self", ".", "ports", "+=", "ports", "return", "self", ".", "_comment", "(", "\"# %s\"", "%", "line", ")" ]
Again, just add to metadata, and comment in install. Parameters ========== line: the line from the recipe file to parse to INSTALL
[ "Again", "just", "add", "to", "metadata", "and", "comment", "in", "install", ".", "Parameters", "==========", "line", ":", "the", "line", "from", "the", "recipe", "file", "to", "parse", "to", "INSTALL" ]
python
train
PlaidWeb/Publ
publ/caching.py
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/caching.py#L20-L35
def do_not_cache(): """ Return whether we should cache a page render """ from . import index # pylint: disable=cyclic-import if index.in_progress(): # We are reindexing the site return True if request.if_none_match or request.if_modified_since: # we might be returning a 304 NOT MODIFIED based on a client request, # and we don't want to cache that as the result for *all* client # requests to this URI return True return False
[ "def", "do_not_cache", "(", ")", ":", "from", ".", "import", "index", "# pylint: disable=cyclic-import", "if", "index", ".", "in_progress", "(", ")", ":", "# We are reindexing the site", "return", "True", "if", "request", ".", "if_none_match", "or", "request", ".", "if_modified_since", ":", "# we might be returning a 304 NOT MODIFIED based on a client request,", "# and we don't want to cache that as the result for *all* client", "# requests to this URI", "return", "True", "return", "False" ]
Return whether we should cache a page render
[ "Return", "whether", "we", "should", "cache", "a", "page", "render" ]
python
train
atlassian-api/atlassian-python-api
atlassian/jira.py
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/jira.py#L293-L306
def update_project(self, project_key, data, expand=None): """ Updates a project. Update project: /rest/api/2/project/{projectIdOrKey} :param project_key: project key of project that needs to be updated :param data: dictionary containing the data to be updated :param expand: the parameters to expand """ if expand: url = '/rest/api/2/project/{projectIdOrKey}?expand={expand}'.format(projectIdOrKey=project_key, expand=expand) else: url = '/rest/api/2/project/{projectIdOrKey}'.format(projectIdOrKey=project_key) return self.put(url, data)
[ "def", "update_project", "(", "self", ",", "project_key", ",", "data", ",", "expand", "=", "None", ")", ":", "if", "expand", ":", "url", "=", "'/rest/api/2/project/{projectIdOrKey}?expand={expand}'", ".", "format", "(", "projectIdOrKey", "=", "project_key", ",", "expand", "=", "expand", ")", "else", ":", "url", "=", "'/rest/api/2/project/{projectIdOrKey}'", ".", "format", "(", "projectIdOrKey", "=", "project_key", ")", "return", "self", ".", "put", "(", "url", ",", "data", ")" ]
Updates a project. Update project: /rest/api/2/project/{projectIdOrKey} :param project_key: project key of project that needs to be updated :param data: dictionary containing the data to be updated :param expand: the parameters to expand
[ "Updates", "a", "project", ".", "Update", "project", ":", "/", "rest", "/", "api", "/", "2", "/", "project", "/", "{", "projectIdOrKey", "}" ]
python
train
contains-io/containment
containment/builder.py
https://github.com/contains-io/containment/blob/4f7e2c2338e0ca7c107a7b3a9913bb5e6e07245f/containment/builder.py#L126-L134
def pave_community(self): """ Usage: containment pave_community """ settings.project_config.path.mkdir() settings.project_config.base.write_text(self.context.base_text) settings.project_config.os_packages.write_text("[]") settings.project_config.lang_packages.write_text("{}")
[ "def", "pave_community", "(", "self", ")", ":", "settings", ".", "project_config", ".", "path", ".", "mkdir", "(", ")", "settings", ".", "project_config", ".", "base", ".", "write_text", "(", "self", ".", "context", ".", "base_text", ")", "settings", ".", "project_config", ".", "os_packages", ".", "write_text", "(", "\"[]\"", ")", "settings", ".", "project_config", ".", "lang_packages", ".", "write_text", "(", "\"{}\"", ")" ]
Usage: containment pave_community
[ "Usage", ":", "containment", "pave_community" ]
python
train
twisted/epsilon
epsilon/amprouter.py
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/amprouter.py#L147-L168
def bindRoute(self, receiver, routeName=_unspecified): """ Create a new route to associate the given route name with the given receiver. @type routeName: C{unicode} or L{NoneType} @param routeName: The identifier for the newly created route. If C{None}, boxes with no route in them will be delivered to this receiver. @rtype: L{Route} """ if routeName is _unspecified: routeName = self.createRouteIdentifier() # self._sender may yet be None; if so, this route goes into _unstarted # and will have its sender set correctly in startReceivingBoxes below. route = Route(self, receiver, routeName) mapping = self._routes if mapping is None: mapping = self._unstarted mapping[routeName] = route return route
[ "def", "bindRoute", "(", "self", ",", "receiver", ",", "routeName", "=", "_unspecified", ")", ":", "if", "routeName", "is", "_unspecified", ":", "routeName", "=", "self", ".", "createRouteIdentifier", "(", ")", "# self._sender may yet be None; if so, this route goes into _unstarted", "# and will have its sender set correctly in startReceivingBoxes below.", "route", "=", "Route", "(", "self", ",", "receiver", ",", "routeName", ")", "mapping", "=", "self", ".", "_routes", "if", "mapping", "is", "None", ":", "mapping", "=", "self", ".", "_unstarted", "mapping", "[", "routeName", "]", "=", "route", "return", "route" ]
Create a new route to associate the given route name with the given receiver. @type routeName: C{unicode} or L{NoneType} @param routeName: The identifier for the newly created route. If C{None}, boxes with no route in them will be delivered to this receiver. @rtype: L{Route}
[ "Create", "a", "new", "route", "to", "associate", "the", "given", "route", "name", "with", "the", "given", "receiver", "." ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L3400-L3404
def ticket_comments(self, ticket_id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/ticket_comments#list-comments" api_path = "/api/v2/tickets/{ticket_id}/comments.json" api_path = api_path.format(ticket_id=ticket_id) return self.call(api_path, **kwargs)
[ "def", "ticket_comments", "(", "self", ",", "ticket_id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/tickets/{ticket_id}/comments.json\"", "api_path", "=", "api_path", ".", "format", "(", "ticket_id", "=", "ticket_id", ")", "return", "self", ".", "call", "(", "api_path", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/core/ticket_comments#list-comments
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "ticket_comments#list", "-", "comments" ]
python
train
StanfordVL/robosuite
robosuite/devices/keyboard.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/devices/keyboard.py#L65-L74
def get_controller_state(self): """Returns the current state of the keyboard, a dictionary of pos, orn, grasp, and reset.""" dpos = self.pos - self.last_pos self.last_pos = np.array(self.pos) return dict( dpos=dpos, rotation=self.rotation, grasp=int(self.grasp), reset=self._reset_state, )
[ "def", "get_controller_state", "(", "self", ")", ":", "dpos", "=", "self", ".", "pos", "-", "self", ".", "last_pos", "self", ".", "last_pos", "=", "np", ".", "array", "(", "self", ".", "pos", ")", "return", "dict", "(", "dpos", "=", "dpos", ",", "rotation", "=", "self", ".", "rotation", ",", "grasp", "=", "int", "(", "self", ".", "grasp", ")", ",", "reset", "=", "self", ".", "_reset_state", ",", ")" ]
Returns the current state of the keyboard, a dictionary of pos, orn, grasp, and reset.
[ "Returns", "the", "current", "state", "of", "the", "keyboard", "a", "dictionary", "of", "pos", "orn", "grasp", "and", "reset", "." ]
python
train
saltstack/salt
salt/fileserver/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/__init__.py#L570-L579
def file_find(self, load): ''' Convenience function for calls made using the LocalClient ''' path = load.get('path') if not path: return {'path': '', 'rel': ''} tgt_env = load.get('saltenv', 'base') return self.find_file(path, tgt_env)
[ "def", "file_find", "(", "self", ",", "load", ")", ":", "path", "=", "load", ".", "get", "(", "'path'", ")", "if", "not", "path", ":", "return", "{", "'path'", ":", "''", ",", "'rel'", ":", "''", "}", "tgt_env", "=", "load", ".", "get", "(", "'saltenv'", ",", "'base'", ")", "return", "self", ".", "find_file", "(", "path", ",", "tgt_env", ")" ]
Convenience function for calls made using the LocalClient
[ "Convenience", "function", "for", "calls", "made", "using", "the", "LocalClient" ]
python
train
ChrisCummins/labm8
fs.py
https://github.com/ChrisCummins/labm8/blob/dd10d67a757aefb180cb508f86696f99440c94f5/fs.py#L411-L419
def mkdir(*components, **kwargs): """ Make directory "path", including any required parents. If directory already exists, do nothing. """ _path = path(*components) if not isdir(_path): os.makedirs(_path, **kwargs) return _path
[ "def", "mkdir", "(", "*", "components", ",", "*", "*", "kwargs", ")", ":", "_path", "=", "path", "(", "*", "components", ")", "if", "not", "isdir", "(", "_path", ")", ":", "os", ".", "makedirs", "(", "_path", ",", "*", "*", "kwargs", ")", "return", "_path" ]
Make directory "path", including any required parents. If directory already exists, do nothing.
[ "Make", "directory", "path", "including", "any", "required", "parents", ".", "If", "directory", "already", "exists", "do", "nothing", "." ]
python
train
numenta/nupic
src/nupic/support/console_printer.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/support/console_printer.py#L52-L90
def cPrint(self, level, message, *args, **kw): """Print a message to the console. Prints only if level <= self.consolePrinterVerbosity Printing with level 0 is equivalent to using a print statement, and should normally be avoided. :param level: (int) indicating the urgency of the message with lower values meaning more urgent (messages at level 0 are the most urgent and are always printed) :param message: (string) possibly with format specifiers :param args: specifies the values for any format specifiers in message :param kw: newline is the only keyword argument. True (default) if a newline should be printed """ if level > self.consolePrinterVerbosity: return if len(kw) > 1: raise KeyError("Invalid keywords for cPrint: %s" % str(kw.keys())) newline = kw.get("newline", True) if len(kw) == 1 and 'newline' not in kw: raise KeyError("Invalid keyword for cPrint: %s" % kw.keys()[0]) if len(args) == 0: if newline: print message else: print message, else: if newline: print message % args else: print message % args,
[ "def", "cPrint", "(", "self", ",", "level", ",", "message", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "if", "level", ">", "self", ".", "consolePrinterVerbosity", ":", "return", "if", "len", "(", "kw", ")", ">", "1", ":", "raise", "KeyError", "(", "\"Invalid keywords for cPrint: %s\"", "%", "str", "(", "kw", ".", "keys", "(", ")", ")", ")", "newline", "=", "kw", ".", "get", "(", "\"newline\"", ",", "True", ")", "if", "len", "(", "kw", ")", "==", "1", "and", "'newline'", "not", "in", "kw", ":", "raise", "KeyError", "(", "\"Invalid keyword for cPrint: %s\"", "%", "kw", ".", "keys", "(", ")", "[", "0", "]", ")", "if", "len", "(", "args", ")", "==", "0", ":", "if", "newline", ":", "print", "message", "else", ":", "print", "message", ",", "else", ":", "if", "newline", ":", "print", "message", "%", "args", "else", ":", "print", "message", "%", "args", "," ]
Print a message to the console. Prints only if level <= self.consolePrinterVerbosity Printing with level 0 is equivalent to using a print statement, and should normally be avoided. :param level: (int) indicating the urgency of the message with lower values meaning more urgent (messages at level 0 are the most urgent and are always printed) :param message: (string) possibly with format specifiers :param args: specifies the values for any format specifiers in message :param kw: newline is the only keyword argument. True (default) if a newline should be printed
[ "Print", "a", "message", "to", "the", "console", "." ]
python
valid
abw333/dominoes
dominoes/game.py
https://github.com/abw333/dominoes/blob/ea9f532c9b834117a5c07d214711515872f7537e/dominoes/game.py#L16-L28
def _validate_player(player): ''' Checks that a player is a valid player. Valid players are: 0, 1, 2, and 3. :param int player: player to be validated :return: None :raises NoSuchPlayerException: if the player is invalid ''' valid_players = range(4) if player not in valid_players: valid_players = ', '.join(str(p) for p in valid_players) raise dominoes.NoSuchPlayerException('{} is not a valid player. Valid players' ' are: {}'.format(player, valid_players))
[ "def", "_validate_player", "(", "player", ")", ":", "valid_players", "=", "range", "(", "4", ")", "if", "player", "not", "in", "valid_players", ":", "valid_players", "=", "', '", ".", "join", "(", "str", "(", "p", ")", "for", "p", "in", "valid_players", ")", "raise", "dominoes", ".", "NoSuchPlayerException", "(", "'{} is not a valid player. Valid players'", "' are: {}'", ".", "format", "(", "player", ",", "valid_players", ")", ")" ]
Checks that a player is a valid player. Valid players are: 0, 1, 2, and 3. :param int player: player to be validated :return: None :raises NoSuchPlayerException: if the player is invalid
[ "Checks", "that", "a", "player", "is", "a", "valid", "player", ".", "Valid", "players", "are", ":", "0", "1", "2", "and", "3", "." ]
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/setuptools/command/build_py.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/setuptools/command/build_py.py#L70-L89
def _get_data_files(self): """Generate list of '(package,src_dir,build_dir,filenames)' tuples""" self.analyze_manifest() data = [] for package in self.packages or (): # Locate package source directory src_dir = self.get_package_dir(package) # Compute package build directory build_dir = os.path.join(*([self.build_lib] + package.split('.'))) # Length of path to strip from found files plen = len(src_dir) + 1 # Strip directory from globbed filenames filenames = [ file[plen:] for file in self.find_data_files(package, src_dir) ] data.append((package, src_dir, build_dir, filenames)) return data
[ "def", "_get_data_files", "(", "self", ")", ":", "self", ".", "analyze_manifest", "(", ")", "data", "=", "[", "]", "for", "package", "in", "self", ".", "packages", "or", "(", ")", ":", "# Locate package source directory", "src_dir", "=", "self", ".", "get_package_dir", "(", "package", ")", "# Compute package build directory", "build_dir", "=", "os", ".", "path", ".", "join", "(", "*", "(", "[", "self", ".", "build_lib", "]", "+", "package", ".", "split", "(", "'.'", ")", ")", ")", "# Length of path to strip from found files", "plen", "=", "len", "(", "src_dir", ")", "+", "1", "# Strip directory from globbed filenames", "filenames", "=", "[", "file", "[", "plen", ":", "]", "for", "file", "in", "self", ".", "find_data_files", "(", "package", ",", "src_dir", ")", "]", "data", ".", "append", "(", "(", "package", ",", "src_dir", ",", "build_dir", ",", "filenames", ")", ")", "return", "data" ]
Generate list of '(package,src_dir,build_dir,filenames)' tuples
[ "Generate", "list", "of", "(", "package", "src_dir", "build_dir", "filenames", ")", "tuples" ]
python
test
openstack/horizon
openstack_dashboard/dashboards/project/instances/utils.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/dashboards/project/instances/utils.py#L171-L191
def flavor_field_data(request, include_empty_option=False): """Returns a list of tuples of all image flavors. Generates a list of image flavors available. And returns a list of (id, name) tuples. :param request: django http request object :param include_empty_option: flag to include a empty tuple in the front of the list :return: list of (id, name) tuples """ flavors = flavor_list(request) if flavors: flavors_list = sort_flavor_list(request, flavors) if include_empty_option: return [("", _("Select Flavor")), ] + flavors_list return flavors_list if include_empty_option: return [("", _("No flavors available")), ] return []
[ "def", "flavor_field_data", "(", "request", ",", "include_empty_option", "=", "False", ")", ":", "flavors", "=", "flavor_list", "(", "request", ")", "if", "flavors", ":", "flavors_list", "=", "sort_flavor_list", "(", "request", ",", "flavors", ")", "if", "include_empty_option", ":", "return", "[", "(", "\"\"", ",", "_", "(", "\"Select Flavor\"", ")", ")", ",", "]", "+", "flavors_list", "return", "flavors_list", "if", "include_empty_option", ":", "return", "[", "(", "\"\"", ",", "_", "(", "\"No flavors available\"", ")", ")", ",", "]", "return", "[", "]" ]
Returns a list of tuples of all image flavors. Generates a list of image flavors available. And returns a list of (id, name) tuples. :param request: django http request object :param include_empty_option: flag to include a empty tuple in the front of the list :return: list of (id, name) tuples
[ "Returns", "a", "list", "of", "tuples", "of", "all", "image", "flavors", "." ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/dfa_server.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L1690-L1701
def sync_projects(self): """Sync projects. This function will retrieve project from keystone and populate them dfa database and dcnm """ p = self.keystone_event._service.projects.list() for proj in p: if proj.name in not_create_project_name: continue LOG.info("Syncing project %s" % proj.name) self.project_create_func(proj.id, proj=proj)
[ "def", "sync_projects", "(", "self", ")", ":", "p", "=", "self", ".", "keystone_event", ".", "_service", ".", "projects", ".", "list", "(", ")", "for", "proj", "in", "p", ":", "if", "proj", ".", "name", "in", "not_create_project_name", ":", "continue", "LOG", ".", "info", "(", "\"Syncing project %s\"", "%", "proj", ".", "name", ")", "self", ".", "project_create_func", "(", "proj", ".", "id", ",", "proj", "=", "proj", ")" ]
Sync projects. This function will retrieve project from keystone and populate them dfa database and dcnm
[ "Sync", "projects", "." ]
python
train
lago-project/lago
lago/providers/libvirt/vm.py
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/providers/libvirt/vm.py#L409-L446
def extract_paths_dead(self, paths, ignore_nopath): """ Extract the given paths from the domain using guestfs. Using guestfs can have side-effects and should be used as a second option, mainly when SSH is not available. Args: paths(list of str): paths to extract ignore_nopath(boolean): if True will ignore none existing paths. Returns: None Raises: :exc:`~lago.utils.LagoException`: if :mod:`guestfs` is not importable. :exc:`~lago.plugins.vm.ExtractPathNoPathError`: if a none existing path was found on the VM, and `ignore_nopath` is True. :exc:`~lago.plugins.vm.ExtractPathError`: on failure extracting the files. """ if not self._has_guestfs: raise LagoException( ('guestfs module not available, cannot ' )('extract files with libguestfs') ) LOGGER.debug( '%s: attempting to extract files with libguestfs', self.vm.name() ) guestfs_tools.extract_paths( disk_path=self.vm.spec['disks'][0]['path'], disk_root=self.vm.spec['disks'][0]['metadata'].get( 'root-partition', 'root' ), paths=paths, ignore_nopath=ignore_nopath )
[ "def", "extract_paths_dead", "(", "self", ",", "paths", ",", "ignore_nopath", ")", ":", "if", "not", "self", ".", "_has_guestfs", ":", "raise", "LagoException", "(", "(", "'guestfs module not available, cannot '", ")", "(", "'extract files with libguestfs'", ")", ")", "LOGGER", ".", "debug", "(", "'%s: attempting to extract files with libguestfs'", ",", "self", ".", "vm", ".", "name", "(", ")", ")", "guestfs_tools", ".", "extract_paths", "(", "disk_path", "=", "self", ".", "vm", ".", "spec", "[", "'disks'", "]", "[", "0", "]", "[", "'path'", "]", ",", "disk_root", "=", "self", ".", "vm", ".", "spec", "[", "'disks'", "]", "[", "0", "]", "[", "'metadata'", "]", ".", "get", "(", "'root-partition'", ",", "'root'", ")", ",", "paths", "=", "paths", ",", "ignore_nopath", "=", "ignore_nopath", ")" ]
Extract the given paths from the domain using guestfs. Using guestfs can have side-effects and should be used as a second option, mainly when SSH is not available. Args: paths(list of str): paths to extract ignore_nopath(boolean): if True will ignore none existing paths. Returns: None Raises: :exc:`~lago.utils.LagoException`: if :mod:`guestfs` is not importable. :exc:`~lago.plugins.vm.ExtractPathNoPathError`: if a none existing path was found on the VM, and `ignore_nopath` is True. :exc:`~lago.plugins.vm.ExtractPathError`: on failure extracting the files.
[ "Extract", "the", "given", "paths", "from", "the", "domain", "using", "guestfs", ".", "Using", "guestfs", "can", "have", "side", "-", "effects", "and", "should", "be", "used", "as", "a", "second", "option", "mainly", "when", "SSH", "is", "not", "available", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/structural/__init__.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/__init__.py#L87-L126
def finalize_sv(samples, config): """Combine results from multiple sv callers into a single ordered 'sv' key. """ by_bam = collections.OrderedDict() for x in samples: batch = dd.get_batch(x) or [dd.get_sample_name(x)] try: by_bam[x["align_bam"], tuple(batch)].append(x) except KeyError: by_bam[x["align_bam"], tuple(batch)] = [x] by_batch = collections.OrderedDict() lead_batches = {} for grouped_calls in by_bam.values(): def orig_svcaller_order(x): orig_callers = tz.get_in(["config", "algorithm", "svcaller_orig"], x) cur_caller = tz.get_in(["config", "algorithm", "svcaller"], x) return orig_callers.index(cur_caller) sorted_svcalls = sorted([x for x in grouped_calls if "sv" in x], key=orig_svcaller_order) final = grouped_calls[0] if len(sorted_svcalls) > 0: final["sv"] = reduce(operator.add, [x["sv"] for x in sorted_svcalls]) final["config"]["algorithm"]["svcaller"] = final["config"]["algorithm"].pop("svcaller_orig") batch = dd.get_batch(final) or dd.get_sample_name(final) batches = batch if isinstance(batch, (list, tuple)) else [batch] if len(batches) > 1: lead_batches[(dd.get_sample_name(final), dd.get_phenotype(final) == "germline")] = batches[0] for batch in batches: try: by_batch[batch].append(final) except KeyError: by_batch[batch] = [final] out = [] for batch, items in by_batch.items(): if any("svplots" in dd.get_tools_on(d) for d in items): items = plot.by_regions(items) for data in items: if lead_batches.get((dd.get_sample_name(data), dd.get_phenotype(data) == "germline")) in [batch, None]: out.append([data]) return out
[ "def", "finalize_sv", "(", "samples", ",", "config", ")", ":", "by_bam", "=", "collections", ".", "OrderedDict", "(", ")", "for", "x", "in", "samples", ":", "batch", "=", "dd", ".", "get_batch", "(", "x", ")", "or", "[", "dd", ".", "get_sample_name", "(", "x", ")", "]", "try", ":", "by_bam", "[", "x", "[", "\"align_bam\"", "]", ",", "tuple", "(", "batch", ")", "]", ".", "append", "(", "x", ")", "except", "KeyError", ":", "by_bam", "[", "x", "[", "\"align_bam\"", "]", ",", "tuple", "(", "batch", ")", "]", "=", "[", "x", "]", "by_batch", "=", "collections", ".", "OrderedDict", "(", ")", "lead_batches", "=", "{", "}", "for", "grouped_calls", "in", "by_bam", ".", "values", "(", ")", ":", "def", "orig_svcaller_order", "(", "x", ")", ":", "orig_callers", "=", "tz", ".", "get_in", "(", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"svcaller_orig\"", "]", ",", "x", ")", "cur_caller", "=", "tz", ".", "get_in", "(", "[", "\"config\"", ",", "\"algorithm\"", ",", "\"svcaller\"", "]", ",", "x", ")", "return", "orig_callers", ".", "index", "(", "cur_caller", ")", "sorted_svcalls", "=", "sorted", "(", "[", "x", "for", "x", "in", "grouped_calls", "if", "\"sv\"", "in", "x", "]", ",", "key", "=", "orig_svcaller_order", ")", "final", "=", "grouped_calls", "[", "0", "]", "if", "len", "(", "sorted_svcalls", ")", ">", "0", ":", "final", "[", "\"sv\"", "]", "=", "reduce", "(", "operator", ".", "add", ",", "[", "x", "[", "\"sv\"", "]", "for", "x", "in", "sorted_svcalls", "]", ")", "final", "[", "\"config\"", "]", "[", "\"algorithm\"", "]", "[", "\"svcaller\"", "]", "=", "final", "[", "\"config\"", "]", "[", "\"algorithm\"", "]", ".", "pop", "(", "\"svcaller_orig\"", ")", "batch", "=", "dd", ".", "get_batch", "(", "final", ")", "or", "dd", ".", "get_sample_name", "(", "final", ")", "batches", "=", "batch", "if", "isinstance", "(", "batch", ",", "(", "list", ",", "tuple", ")", ")", "else", "[", "batch", "]", "if", "len", "(", "batches", ")", ">", "1", ":", "lead_batches", "[", "(", "dd", ".", "get_sample_name", "(", "final", ")", ",", "dd", ".", "get_phenotype", "(", "final", ")", "==", "\"germline\"", ")", "]", "=", "batches", "[", "0", "]", "for", "batch", "in", "batches", ":", "try", ":", "by_batch", "[", "batch", "]", ".", "append", "(", "final", ")", "except", "KeyError", ":", "by_batch", "[", "batch", "]", "=", "[", "final", "]", "out", "=", "[", "]", "for", "batch", ",", "items", "in", "by_batch", ".", "items", "(", ")", ":", "if", "any", "(", "\"svplots\"", "in", "dd", ".", "get_tools_on", "(", "d", ")", "for", "d", "in", "items", ")", ":", "items", "=", "plot", ".", "by_regions", "(", "items", ")", "for", "data", "in", "items", ":", "if", "lead_batches", ".", "get", "(", "(", "dd", ".", "get_sample_name", "(", "data", ")", ",", "dd", ".", "get_phenotype", "(", "data", ")", "==", "\"germline\"", ")", ")", "in", "[", "batch", ",", "None", "]", ":", "out", ".", "append", "(", "[", "data", "]", ")", "return", "out" ]
Combine results from multiple sv callers into a single ordered 'sv' key.
[ "Combine", "results", "from", "multiple", "sv", "callers", "into", "a", "single", "ordered", "sv", "key", "." ]
python
train
python-openxml/python-docx
docx/dml/color.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/dml/color.py#L63-L80
def theme_color(self): """ A member of :ref:`MsoThemeColorIndex` or |None| if no theme color is specified. When :attr:`type` is `MSO_COLOR_TYPE.THEME`, the value of this property will always be a member of :ref:`MsoThemeColorIndex`. When :attr:`type` has any other value, the value of this property is |None|. Assigning a member of :ref:`MsoThemeColorIndex` causes :attr:`type` to become `MSO_COLOR_TYPE.THEME`. Any existing RGB value is retained but ignored by Word. Assigning |None| causes any color specification to be removed such that the effective color is inherited from the style hierarchy. """ color = self._color if color is None or color.themeColor is None: return None return color.themeColor
[ "def", "theme_color", "(", "self", ")", ":", "color", "=", "self", ".", "_color", "if", "color", "is", "None", "or", "color", ".", "themeColor", "is", "None", ":", "return", "None", "return", "color", ".", "themeColor" ]
A member of :ref:`MsoThemeColorIndex` or |None| if no theme color is specified. When :attr:`type` is `MSO_COLOR_TYPE.THEME`, the value of this property will always be a member of :ref:`MsoThemeColorIndex`. When :attr:`type` has any other value, the value of this property is |None|. Assigning a member of :ref:`MsoThemeColorIndex` causes :attr:`type` to become `MSO_COLOR_TYPE.THEME`. Any existing RGB value is retained but ignored by Word. Assigning |None| causes any color specification to be removed such that the effective color is inherited from the style hierarchy.
[ "A", "member", "of", ":", "ref", ":", "MsoThemeColorIndex", "or", "|None|", "if", "no", "theme", "color", "is", "specified", ".", "When", ":", "attr", ":", "type", "is", "MSO_COLOR_TYPE", ".", "THEME", "the", "value", "of", "this", "property", "will", "always", "be", "a", "member", "of", ":", "ref", ":", "MsoThemeColorIndex", ".", "When", ":", "attr", ":", "type", "has", "any", "other", "value", "the", "value", "of", "this", "property", "is", "|None|", "." ]
python
train
DataONEorg/d1_python
gmn/src/d1_gmn/app/management/commands/async_client.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/management/commands/async_client.py#L285-L290
def _datetime_to_iso8601(self, query_dict): """Encode any datetime query parameters to ISO8601.""" return { k: v if not isinstance(v, datetime.datetime) else v.isoformat() for k, v in list(query_dict.items()) }
[ "def", "_datetime_to_iso8601", "(", "self", ",", "query_dict", ")", ":", "return", "{", "k", ":", "v", "if", "not", "isinstance", "(", "v", ",", "datetime", ".", "datetime", ")", "else", "v", ".", "isoformat", "(", ")", "for", "k", ",", "v", "in", "list", "(", "query_dict", ".", "items", "(", ")", ")", "}" ]
Encode any datetime query parameters to ISO8601.
[ "Encode", "any", "datetime", "query", "parameters", "to", "ISO8601", "." ]
python
train
apple/turicreate
src/unity/python/turicreate/toolkits/_supervised_learning.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_supervised_learning.py#L261-L334
def create(dataset, target, model_name, features=None, validation_set='auto', distributed='auto', verbose=True, seed=None, **kwargs): """ Create a :class:`~turicreate.toolkits.SupervisedLearningModel`, This is generic function that allows you to create any model that implements SupervisedLearningModel This function is normally not called, call specific model's create function instead Parameters ---------- dataset : SFrame Dataset for training the model. target : string Name of the column containing the target variable. The values in this column must be 0 or 1, of integer type. model_name : string Name of the model features : list[string], optional List of feature names used by feature column validation_set : SFrame, optional A dataset for monitoring the model's generalization performance. For each row of the progress table, the chosen metrics are computed for both the provided training dataset and the validation_set. The format of this SFrame must be the same as the training set. By default this argument is set to 'auto' and a validation set is automatically sampled and used for progress printing. If validation_set is set to None, then no additional metrics are computed. The default value is 'auto'. distributed: env The distributed environment verbose : boolean whether print out messages during training seed : int, optional Seed for random number generation. Set this value to ensure that the same model is created every time. kwargs : dict Additional parameter options that can be passed """ # Perform error-checking and trim inputs to specified columns dataset, validation_set = _validate_data(dataset, target, features, validation_set) # Sample a validation set from the training data if requested if isinstance(validation_set, str): assert validation_set == 'auto' if dataset.num_rows() >= 100: if verbose: print_validation_track_notification() dataset, validation_set = dataset.random_split(.95, seed=seed, exact=True) else: validation_set = _turicreate.SFrame() elif validation_set is None: validation_set = _turicreate.SFrame() # Sanitize model-specific options options = {k.lower(): kwargs[k] for k in kwargs} # Create a model instance and train it model = _turicreate.extensions.__dict__[model_name]() with QuietProgress(verbose): model.train(dataset, target, validation_set, options) return SupervisedLearningModel(model, model_name)
[ "def", "create", "(", "dataset", ",", "target", ",", "model_name", ",", "features", "=", "None", ",", "validation_set", "=", "'auto'", ",", "distributed", "=", "'auto'", ",", "verbose", "=", "True", ",", "seed", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Perform error-checking and trim inputs to specified columns", "dataset", ",", "validation_set", "=", "_validate_data", "(", "dataset", ",", "target", ",", "features", ",", "validation_set", ")", "# Sample a validation set from the training data if requested", "if", "isinstance", "(", "validation_set", ",", "str", ")", ":", "assert", "validation_set", "==", "'auto'", "if", "dataset", ".", "num_rows", "(", ")", ">=", "100", ":", "if", "verbose", ":", "print_validation_track_notification", "(", ")", "dataset", ",", "validation_set", "=", "dataset", ".", "random_split", "(", ".95", ",", "seed", "=", "seed", ",", "exact", "=", "True", ")", "else", ":", "validation_set", "=", "_turicreate", ".", "SFrame", "(", ")", "elif", "validation_set", "is", "None", ":", "validation_set", "=", "_turicreate", ".", "SFrame", "(", ")", "# Sanitize model-specific options", "options", "=", "{", "k", ".", "lower", "(", ")", ":", "kwargs", "[", "k", "]", "for", "k", "in", "kwargs", "}", "# Create a model instance and train it", "model", "=", "_turicreate", ".", "extensions", ".", "__dict__", "[", "model_name", "]", "(", ")", "with", "QuietProgress", "(", "verbose", ")", ":", "model", ".", "train", "(", "dataset", ",", "target", ",", "validation_set", ",", "options", ")", "return", "SupervisedLearningModel", "(", "model", ",", "model_name", ")" ]
Create a :class:`~turicreate.toolkits.SupervisedLearningModel`, This is generic function that allows you to create any model that implements SupervisedLearningModel This function is normally not called, call specific model's create function instead Parameters ---------- dataset : SFrame Dataset for training the model. target : string Name of the column containing the target variable. The values in this column must be 0 or 1, of integer type. model_name : string Name of the model features : list[string], optional List of feature names used by feature column validation_set : SFrame, optional A dataset for monitoring the model's generalization performance. For each row of the progress table, the chosen metrics are computed for both the provided training dataset and the validation_set. The format of this SFrame must be the same as the training set. By default this argument is set to 'auto' and a validation set is automatically sampled and used for progress printing. If validation_set is set to None, then no additional metrics are computed. The default value is 'auto'. distributed: env The distributed environment verbose : boolean whether print out messages during training seed : int, optional Seed for random number generation. Set this value to ensure that the same model is created every time. kwargs : dict Additional parameter options that can be passed
[ "Create", "a", ":", "class", ":", "~turicreate", ".", "toolkits", ".", "SupervisedLearningModel" ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/abitimer.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/abitimer.py#L233-L247
def get_sections(self, section_name): """ Return the list of sections stored in self.timers() given `section_name` A fake section is returned if the timer does not have section_name. """ sections = [] for timer in self.timers(): for sect in timer.sections: if sect.name == section_name: sections.append(sect) break else: sections.append(AbinitTimerSection.fake()) return sections
[ "def", "get_sections", "(", "self", ",", "section_name", ")", ":", "sections", "=", "[", "]", "for", "timer", "in", "self", ".", "timers", "(", ")", ":", "for", "sect", "in", "timer", ".", "sections", ":", "if", "sect", ".", "name", "==", "section_name", ":", "sections", ".", "append", "(", "sect", ")", "break", "else", ":", "sections", ".", "append", "(", "AbinitTimerSection", ".", "fake", "(", ")", ")", "return", "sections" ]
Return the list of sections stored in self.timers() given `section_name` A fake section is returned if the timer does not have section_name.
[ "Return", "the", "list", "of", "sections", "stored", "in", "self", ".", "timers", "()", "given", "section_name", "A", "fake", "section", "is", "returned", "if", "the", "timer", "does", "not", "have", "section_name", "." ]
python
train
mozilla/socorrolib
socorrolib/lib/transform_rules.py
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/transform_rules.py#L57-L73
def predicate(self, *args, **kwargs): """the default predicate for Support Classifiers invokes any derivied _predicate function, trapping any exceptions raised in the process. We are obligated to catch these exceptions to give subsequent rules the opportunity to act. An error during the predicate application is a failure of the rule, not a failure of the classification system itself """ try: return self._predicate(*args, **kwargs) except Exception, x: self.config.logger.debug( 'Rule %s predicicate failed because of "%s"', to_str(self.__class__), x, exc_info=True ) return False
[ "def", "predicate", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "self", ".", "_predicate", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "Exception", ",", "x", ":", "self", ".", "config", ".", "logger", ".", "debug", "(", "'Rule %s predicicate failed because of \"%s\"'", ",", "to_str", "(", "self", ".", "__class__", ")", ",", "x", ",", "exc_info", "=", "True", ")", "return", "False" ]
the default predicate for Support Classifiers invokes any derivied _predicate function, trapping any exceptions raised in the process. We are obligated to catch these exceptions to give subsequent rules the opportunity to act. An error during the predicate application is a failure of the rule, not a failure of the classification system itself
[ "the", "default", "predicate", "for", "Support", "Classifiers", "invokes", "any", "derivied", "_predicate", "function", "trapping", "any", "exceptions", "raised", "in", "the", "process", ".", "We", "are", "obligated", "to", "catch", "these", "exceptions", "to", "give", "subsequent", "rules", "the", "opportunity", "to", "act", ".", "An", "error", "during", "the", "predicate", "application", "is", "a", "failure", "of", "the", "rule", "not", "a", "failure", "of", "the", "classification", "system", "itself" ]
python
train