Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
8,100
bitshares/uptick
uptick/vesting.py
claim
def claim(ctx, vestingid, account, amount): """ Claim funds from the vesting balance """ vesting = Vesting(vestingid) if amount: amount = Amount(float(amount), "BTS") else: amount = vesting.claimable print_tx( ctx.bitshares.vesting_balance_withdraw( vesting["id"], amount=amount, account=vesting["owner"] ) )
python
def claim(ctx, vestingid, account, amount): """ Claim funds from the vesting balance """ vesting = Vesting(vestingid) if amount: amount = Amount(float(amount), "BTS") else: amount = vesting.claimable print_tx( ctx.bitshares.vesting_balance_withdraw( vesting["id"], amount=amount, account=vesting["owner"] ) )
['def', 'claim', '(', 'ctx', ',', 'vestingid', ',', 'account', ',', 'amount', ')', ':', 'vesting', '=', 'Vesting', '(', 'vestingid', ')', 'if', 'amount', ':', 'amount', '=', 'Amount', '(', 'float', '(', 'amount', ')', ',', '"BTS"', ')', 'else', ':', 'amount', '=', 'vesting', '.', 'claimable', 'print_tx', '(', 'ctx', '.', 'bitshares', '.', 'vesting_balance_withdraw', '(', 'vesting', '[', '"id"', ']', ',', 'amount', '=', 'amount', ',', 'account', '=', 'vesting', '[', '"owner"', ']', ')', ')']
Claim funds from the vesting balance
['Claim', 'funds', 'from', 'the', 'vesting', 'balance']
train
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/vesting.py#L34-L46
8,101
python-openxml/python-docx
docx/oxml/text/font.py
CT_RPr._get_bool_val
def _get_bool_val(self, name): """ Return the value of the boolean child element having *name*, e.g. 'b', 'i', and 'smallCaps'. """ element = getattr(self, name) if element is None: return None return element.val
python
def _get_bool_val(self, name): """ Return the value of the boolean child element having *name*, e.g. 'b', 'i', and 'smallCaps'. """ element = getattr(self, name) if element is None: return None return element.val
['def', '_get_bool_val', '(', 'self', ',', 'name', ')', ':', 'element', '=', 'getattr', '(', 'self', ',', 'name', ')', 'if', 'element', 'is', 'None', ':', 'return', 'None', 'return', 'element', '.', 'val']
Return the value of the boolean child element having *name*, e.g. 'b', 'i', and 'smallCaps'.
['Return', 'the', 'value', 'of', 'the', 'boolean', 'child', 'element', 'having', '*', 'name', '*', 'e', '.', 'g', '.', 'b', 'i', 'and', 'smallCaps', '.']
train
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/oxml/text/font.py#L267-L275
8,102
wummel/patool
patoolib/programs/cpio.py
extract_cpio
def extract_cpio (archive, compression, cmd, verbosity, interactive, outdir): """Extract a CPIO archive.""" cmdlist = [util.shell_quote(cmd), '--extract', '--make-directories', '--preserve-modification-time'] if sys.platform.startswith('linux') and not cmd.endswith('bsdcpio'): cmdlist.extend(['--no-absolute-filenames', '--force-local', '--nonmatching', r'"*\.\.*"']) if verbosity > 1: cmdlist.append('-v') cmdlist.extend(['<', util.shell_quote(os.path.abspath(archive))]) return (cmdlist, {'cwd': outdir, 'shell': True})
python
def extract_cpio (archive, compression, cmd, verbosity, interactive, outdir): """Extract a CPIO archive.""" cmdlist = [util.shell_quote(cmd), '--extract', '--make-directories', '--preserve-modification-time'] if sys.platform.startswith('linux') and not cmd.endswith('bsdcpio'): cmdlist.extend(['--no-absolute-filenames', '--force-local', '--nonmatching', r'"*\.\.*"']) if verbosity > 1: cmdlist.append('-v') cmdlist.extend(['<', util.shell_quote(os.path.abspath(archive))]) return (cmdlist, {'cwd': outdir, 'shell': True})
['def', 'extract_cpio', '(', 'archive', ',', 'compression', ',', 'cmd', ',', 'verbosity', ',', 'interactive', ',', 'outdir', ')', ':', 'cmdlist', '=', '[', 'util', '.', 'shell_quote', '(', 'cmd', ')', ',', "'--extract'", ',', "'--make-directories'", ',', "'--preserve-modification-time'", ']', 'if', 'sys', '.', 'platform', '.', 'startswith', '(', "'linux'", ')', 'and', 'not', 'cmd', '.', 'endswith', '(', "'bsdcpio'", ')', ':', 'cmdlist', '.', 'extend', '(', '[', "'--no-absolute-filenames'", ',', "'--force-local'", ',', "'--nonmatching'", ',', 'r\'"*\\.\\.*"\'', ']', ')', 'if', 'verbosity', '>', '1', ':', 'cmdlist', '.', 'append', '(', "'-v'", ')', 'cmdlist', '.', 'extend', '(', '[', "'<'", ',', 'util', '.', 'shell_quote', '(', 'os', '.', 'path', '.', 'abspath', '(', 'archive', ')', ')', ']', ')', 'return', '(', 'cmdlist', ',', '{', "'cwd'", ':', 'outdir', ',', "'shell'", ':', 'True', '}', ')']
Extract a CPIO archive.
['Extract', 'a', 'CPIO', 'archive', '.']
train
https://github.com/wummel/patool/blob/d7e64d9fd60faaa4b3f824bd97c43ce59b185c40/patoolib/programs/cpio.py#L21-L31
8,103
DataONEorg/d1_python
client_cli/src/d1_cli/impl/command_parser.py
CLI.do_clearrep
def do_clearrep(self, line): """clearrep Set the replication policy to default. The default replication policy has no preferred or blocked member nodes, allows replication and sets the preferred number of replicas to 3. """ self._split_args(line, 0, 0) self._command_processor.get_session().get_replication_policy().clear() self._print_info_if_verbose("Cleared the replication policy")
python
def do_clearrep(self, line): """clearrep Set the replication policy to default. The default replication policy has no preferred or blocked member nodes, allows replication and sets the preferred number of replicas to 3. """ self._split_args(line, 0, 0) self._command_processor.get_session().get_replication_policy().clear() self._print_info_if_verbose("Cleared the replication policy")
['def', 'do_clearrep', '(', 'self', ',', 'line', ')', ':', 'self', '.', '_split_args', '(', 'line', ',', '0', ',', '0', ')', 'self', '.', '_command_processor', '.', 'get_session', '(', ')', '.', 'get_replication_policy', '(', ')', '.', 'clear', '(', ')', 'self', '.', '_print_info_if_verbose', '(', '"Cleared the replication policy"', ')']
clearrep Set the replication policy to default. The default replication policy has no preferred or blocked member nodes, allows replication and sets the preferred number of replicas to 3.
['clearrep', 'Set', 'the', 'replication', 'policy', 'to', 'default', '.']
train
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L304-L313
8,104
campaignmonitor/createsend-python
lib/createsend/client.py
Client.transfer_credits
def transfer_credits(self, credits, can_use_my_credits_when_they_run_out): """Transfer credits to or from this client. :param credits: An Integer representing the number of credits to transfer. This value may be either positive if you want to allocate credits from your account to the client, or negative if you want to deduct credits from the client back into your account. :param can_use_my_credits_when_they_run_out: A Boolean value representing which, if set to true, will allow the client to continue sending using your credits or payment details once they run out of credits, and if set to false, will prevent the client from using your credits to continue sending until you allocate more credits to them. :returns: An object of the following form representing the result: { AccountCredits # Integer representing credits in your account now ClientCredits # Integer representing credits in this client's account now } """ body = { "Credits": credits, "CanUseMyCreditsWhenTheyRunOut": can_use_my_credits_when_they_run_out} response = self._post(self.uri_for('credits'), json.dumps(body)) return json_to_py(response)
python
def transfer_credits(self, credits, can_use_my_credits_when_they_run_out): """Transfer credits to or from this client. :param credits: An Integer representing the number of credits to transfer. This value may be either positive if you want to allocate credits from your account to the client, or negative if you want to deduct credits from the client back into your account. :param can_use_my_credits_when_they_run_out: A Boolean value representing which, if set to true, will allow the client to continue sending using your credits or payment details once they run out of credits, and if set to false, will prevent the client from using your credits to continue sending until you allocate more credits to them. :returns: An object of the following form representing the result: { AccountCredits # Integer representing credits in your account now ClientCredits # Integer representing credits in this client's account now } """ body = { "Credits": credits, "CanUseMyCreditsWhenTheyRunOut": can_use_my_credits_when_they_run_out} response = self._post(self.uri_for('credits'), json.dumps(body)) return json_to_py(response)
['def', 'transfer_credits', '(', 'self', ',', 'credits', ',', 'can_use_my_credits_when_they_run_out', ')', ':', 'body', '=', '{', '"Credits"', ':', 'credits', ',', '"CanUseMyCreditsWhenTheyRunOut"', ':', 'can_use_my_credits_when_they_run_out', '}', 'response', '=', 'self', '.', '_post', '(', 'self', '.', 'uri_for', '(', "'credits'", ')', ',', 'json', '.', 'dumps', '(', 'body', ')', ')', 'return', 'json_to_py', '(', 'response', ')']
Transfer credits to or from this client. :param credits: An Integer representing the number of credits to transfer. This value may be either positive if you want to allocate credits from your account to the client, or negative if you want to deduct credits from the client back into your account. :param can_use_my_credits_when_they_run_out: A Boolean value representing which, if set to true, will allow the client to continue sending using your credits or payment details once they run out of credits, and if set to false, will prevent the client from using your credits to continue sending until you allocate more credits to them. :returns: An object of the following form representing the result: { AccountCredits # Integer representing credits in your account now ClientCredits # Integer representing credits in this client's account now }
['Transfer', 'credits', 'to', 'or', 'from', 'this', 'client', '.']
train
https://github.com/campaignmonitor/createsend-python/blob/4bfe2fd5cb2fc9d8f12280b23569eea0a6c66426/lib/createsend/client.py#L125-L148
8,105
remix/partridge
partridge/readers.py
_load_feed
def _load_feed(path: str, view: View, config: nx.DiGraph) -> Feed: """Multi-file feed filtering""" config_ = remove_node_attributes(config, ["converters", "transformations"]) feed_ = Feed(path, view={}, config=config_) for filename, column_filters in view.items(): config_ = reroot_graph(config_, filename) view_ = {filename: column_filters} feed_ = Feed(feed_, view=view_, config=config_) return Feed(feed_, config=config)
python
def _load_feed(path: str, view: View, config: nx.DiGraph) -> Feed: """Multi-file feed filtering""" config_ = remove_node_attributes(config, ["converters", "transformations"]) feed_ = Feed(path, view={}, config=config_) for filename, column_filters in view.items(): config_ = reroot_graph(config_, filename) view_ = {filename: column_filters} feed_ = Feed(feed_, view=view_, config=config_) return Feed(feed_, config=config)
['def', '_load_feed', '(', 'path', ':', 'str', ',', 'view', ':', 'View', ',', 'config', ':', 'nx', '.', 'DiGraph', ')', '->', 'Feed', ':', 'config_', '=', 'remove_node_attributes', '(', 'config', ',', '[', '"converters"', ',', '"transformations"', ']', ')', 'feed_', '=', 'Feed', '(', 'path', ',', 'view', '=', '{', '}', ',', 'config', '=', 'config_', ')', 'for', 'filename', ',', 'column_filters', 'in', 'view', '.', 'items', '(', ')', ':', 'config_', '=', 'reroot_graph', '(', 'config_', ',', 'filename', ')', 'view_', '=', '{', 'filename', ':', 'column_filters', '}', 'feed_', '=', 'Feed', '(', 'feed_', ',', 'view', '=', 'view_', ',', 'config', '=', 'config_', ')', 'return', 'Feed', '(', 'feed_', ',', 'config', '=', 'config', ')']
Multi-file feed filtering
['Multi', '-', 'file', 'feed', 'filtering']
train
https://github.com/remix/partridge/blob/0ba80fa30035e5e09fd8d7a7bdf1f28b93d53d03/partridge/readers.py#L106-L114
8,106
dpa-newslab/livebridge
livebridge/base/posts.py
BasePost.target_doc
def target_doc(self): """Returns resource doc as at the target, when the posting was already created \ at the target. This property normally contains the **target_doc** data from \ the livebrigde storage item, saved in a syndication earlier. :returns: dict""" if not hasattr(self, "_target_doc") or not self._target_doc: if self._existing: self._target_doc = self._existing.get("target_doc", {}) return self._target_doc
python
def target_doc(self): """Returns resource doc as at the target, when the posting was already created \ at the target. This property normally contains the **target_doc** data from \ the livebrigde storage item, saved in a syndication earlier. :returns: dict""" if not hasattr(self, "_target_doc") or not self._target_doc: if self._existing: self._target_doc = self._existing.get("target_doc", {}) return self._target_doc
['def', 'target_doc', '(', 'self', ')', ':', 'if', 'not', 'hasattr', '(', 'self', ',', '"_target_doc"', ')', 'or', 'not', 'self', '.', '_target_doc', ':', 'if', 'self', '.', '_existing', ':', 'self', '.', '_target_doc', '=', 'self', '.', '_existing', '.', 'get', '(', '"target_doc"', ',', '{', '}', ')', 'return', 'self', '.', '_target_doc']
Returns resource doc as at the target, when the posting was already created \ at the target. This property normally contains the **target_doc** data from \ the livebrigde storage item, saved in a syndication earlier. :returns: dict
['Returns', 'resource', 'doc', 'as', 'at', 'the', 'target', 'when', 'the', 'posting', 'was', 'already', 'created', '\\', 'at', 'the', 'target', '.', 'This', 'property', 'normally', 'contains', 'the', '**', 'target_doc', '**', 'data', 'from', '\\', 'the', 'livebrigde', 'storage', 'item', 'saved', 'in', 'a', 'syndication', 'earlier', '.']
train
https://github.com/dpa-newslab/livebridge/blob/d930e887faa2f882d15b574f0f1fe4a580d7c5fa/livebridge/base/posts.py#L98-L107
8,107
mikemaccana/python-docx
docx.py
wordrelationships
def wordrelationships(relationshiplist): '''Generate a Word relationships file''' # Default list of relationships # FIXME: using string hack instead of making element #relationships = makeelement('Relationships', nsprefix='pr') relationships = etree.fromstring( '<Relationships xmlns="http://schemas.openxmlformats.org/package/2006' '/relationships"></Relationships>') count = 0 for relationship in relationshiplist: # Relationship IDs (rId) start at 1. rel_elm = makeelement('Relationship', nsprefix=None, attributes={'Id': 'rId'+str(count+1), 'Type': relationship[0], 'Target': relationship[1]} ) relationships.append(rel_elm) count += 1 return relationships
python
def wordrelationships(relationshiplist): '''Generate a Word relationships file''' # Default list of relationships # FIXME: using string hack instead of making element #relationships = makeelement('Relationships', nsprefix='pr') relationships = etree.fromstring( '<Relationships xmlns="http://schemas.openxmlformats.org/package/2006' '/relationships"></Relationships>') count = 0 for relationship in relationshiplist: # Relationship IDs (rId) start at 1. rel_elm = makeelement('Relationship', nsprefix=None, attributes={'Id': 'rId'+str(count+1), 'Type': relationship[0], 'Target': relationship[1]} ) relationships.append(rel_elm) count += 1 return relationships
['def', 'wordrelationships', '(', 'relationshiplist', ')', ':', '# Default list of relationships', '# FIXME: using string hack instead of making element', "#relationships = makeelement('Relationships', nsprefix='pr')", 'relationships', '=', 'etree', '.', 'fromstring', '(', '\'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006\'', '\'/relationships"></Relationships>\'', ')', 'count', '=', '0', 'for', 'relationship', 'in', 'relationshiplist', ':', '# Relationship IDs (rId) start at 1.', 'rel_elm', '=', 'makeelement', '(', "'Relationship'", ',', 'nsprefix', '=', 'None', ',', 'attributes', '=', '{', "'Id'", ':', "'rId'", '+', 'str', '(', 'count', '+', '1', ')', ',', "'Type'", ':', 'relationship', '[', '0', ']', ',', "'Target'", ':', 'relationship', '[', '1', ']', '}', ')', 'relationships', '.', 'append', '(', 'rel_elm', ')', 'count', '+=', '1', 'return', 'relationships']
Generate a Word relationships file
['Generate', 'a', 'Word', 'relationships', 'file']
train
https://github.com/mikemaccana/python-docx/blob/4c9b46dbebe3d2a9b82dbcd35af36584a36fd9fe/docx.py#L1031-L1049
8,108
saltstack/salt
salt/state.py
State.reconcile_extend
def reconcile_extend(self, high): ''' Pull the extend data and add it to the respective high data ''' errors = [] if '__extend__' not in high: return high, errors ext = high.pop('__extend__') for ext_chunk in ext: for name, body in six.iteritems(ext_chunk): if name not in high: state_type = next( x for x in body if not x.startswith('__') ) # Check for a matching 'name' override in high data ids = find_name(name, state_type, high) if len(ids) != 1: errors.append( 'Cannot extend ID \'{0}\' in \'{1}:{2}\'. It is not ' 'part of the high state.\n' 'This is likely due to a missing include statement ' 'or an incorrectly typed ID.\nEnsure that a ' 'state with an ID of \'{0}\' is available\nin ' 'environment \'{1}\' and to SLS \'{2}\''.format( name, body.get('__env__', 'base'), body.get('__sls__', 'base')) ) continue else: name = ids[0][0] for state, run in six.iteritems(body): if state.startswith('__'): continue if state not in high[name]: high[name][state] = run continue # high[name][state] is extended by run, both are lists for arg in run: update = False for hind in range(len(high[name][state])): if isinstance(arg, six.string_types) and isinstance(high[name][state][hind], six.string_types): # replacing the function, replace the index high[name][state].pop(hind) high[name][state].insert(hind, arg) update = True continue if isinstance(arg, dict) and isinstance(high[name][state][hind], dict): # It is an option, make sure the options match argfirst = next(iter(arg)) if argfirst == next(iter(high[name][state][hind])): # If argfirst is a requisite then we must merge # our requisite with that of the target state if argfirst in STATE_REQUISITE_KEYWORDS: high[name][state][hind][argfirst].extend(arg[argfirst]) # otherwise, its not a requisite and we are just extending (replacing) else: high[name][state][hind] = arg update = True if (argfirst == 'name' and next(iter(high[name][state][hind])) == 'names'): # If names are overwritten by name use the name high[name][state][hind] = arg if not update: high[name][state].append(arg) return high, errors
python
def reconcile_extend(self, high): ''' Pull the extend data and add it to the respective high data ''' errors = [] if '__extend__' not in high: return high, errors ext = high.pop('__extend__') for ext_chunk in ext: for name, body in six.iteritems(ext_chunk): if name not in high: state_type = next( x for x in body if not x.startswith('__') ) # Check for a matching 'name' override in high data ids = find_name(name, state_type, high) if len(ids) != 1: errors.append( 'Cannot extend ID \'{0}\' in \'{1}:{2}\'. It is not ' 'part of the high state.\n' 'This is likely due to a missing include statement ' 'or an incorrectly typed ID.\nEnsure that a ' 'state with an ID of \'{0}\' is available\nin ' 'environment \'{1}\' and to SLS \'{2}\''.format( name, body.get('__env__', 'base'), body.get('__sls__', 'base')) ) continue else: name = ids[0][0] for state, run in six.iteritems(body): if state.startswith('__'): continue if state not in high[name]: high[name][state] = run continue # high[name][state] is extended by run, both are lists for arg in run: update = False for hind in range(len(high[name][state])): if isinstance(arg, six.string_types) and isinstance(high[name][state][hind], six.string_types): # replacing the function, replace the index high[name][state].pop(hind) high[name][state].insert(hind, arg) update = True continue if isinstance(arg, dict) and isinstance(high[name][state][hind], dict): # It is an option, make sure the options match argfirst = next(iter(arg)) if argfirst == next(iter(high[name][state][hind])): # If argfirst is a requisite then we must merge # our requisite with that of the target state if argfirst in STATE_REQUISITE_KEYWORDS: high[name][state][hind][argfirst].extend(arg[argfirst]) # otherwise, its not a requisite and we are just extending (replacing) else: high[name][state][hind] = arg update = True if (argfirst == 'name' and next(iter(high[name][state][hind])) == 'names'): # If names are overwritten by name use the name high[name][state][hind] = arg if not update: high[name][state].append(arg) return high, errors
['def', 'reconcile_extend', '(', 'self', ',', 'high', ')', ':', 'errors', '=', '[', ']', 'if', "'__extend__'", 'not', 'in', 'high', ':', 'return', 'high', ',', 'errors', 'ext', '=', 'high', '.', 'pop', '(', "'__extend__'", ')', 'for', 'ext_chunk', 'in', 'ext', ':', 'for', 'name', ',', 'body', 'in', 'six', '.', 'iteritems', '(', 'ext_chunk', ')', ':', 'if', 'name', 'not', 'in', 'high', ':', 'state_type', '=', 'next', '(', 'x', 'for', 'x', 'in', 'body', 'if', 'not', 'x', '.', 'startswith', '(', "'__'", ')', ')', "# Check for a matching 'name' override in high data", 'ids', '=', 'find_name', '(', 'name', ',', 'state_type', ',', 'high', ')', 'if', 'len', '(', 'ids', ')', '!=', '1', ':', 'errors', '.', 'append', '(', "'Cannot extend ID \\'{0}\\' in \\'{1}:{2}\\'. It is not '", "'part of the high state.\\n'", "'This is likely due to a missing include statement '", "'or an incorrectly typed ID.\\nEnsure that a '", "'state with an ID of \\'{0}\\' is available\\nin '", "'environment \\'{1}\\' and to SLS \\'{2}\\''", '.', 'format', '(', 'name', ',', 'body', '.', 'get', '(', "'__env__'", ',', "'base'", ')', ',', 'body', '.', 'get', '(', "'__sls__'", ',', "'base'", ')', ')', ')', 'continue', 'else', ':', 'name', '=', 'ids', '[', '0', ']', '[', '0', ']', 'for', 'state', ',', 'run', 'in', 'six', '.', 'iteritems', '(', 'body', ')', ':', 'if', 'state', '.', 'startswith', '(', "'__'", ')', ':', 'continue', 'if', 'state', 'not', 'in', 'high', '[', 'name', ']', ':', 'high', '[', 'name', ']', '[', 'state', ']', '=', 'run', 'continue', '# high[name][state] is extended by run, both are lists', 'for', 'arg', 'in', 'run', ':', 'update', '=', 'False', 'for', 'hind', 'in', 'range', '(', 'len', '(', 'high', '[', 'name', ']', '[', 'state', ']', ')', ')', ':', 'if', 'isinstance', '(', 'arg', ',', 'six', '.', 'string_types', ')', 'and', 'isinstance', '(', 'high', '[', 'name', ']', '[', 'state', ']', '[', 'hind', ']', ',', 'six', '.', 'string_types', ')', ':', '# replacing the function, replace the index', 'high', '[', 'name', ']', '[', 'state', ']', '.', 'pop', '(', 'hind', ')', 'high', '[', 'name', ']', '[', 'state', ']', '.', 'insert', '(', 'hind', ',', 'arg', ')', 'update', '=', 'True', 'continue', 'if', 'isinstance', '(', 'arg', ',', 'dict', ')', 'and', 'isinstance', '(', 'high', '[', 'name', ']', '[', 'state', ']', '[', 'hind', ']', ',', 'dict', ')', ':', '# It is an option, make sure the options match', 'argfirst', '=', 'next', '(', 'iter', '(', 'arg', ')', ')', 'if', 'argfirst', '==', 'next', '(', 'iter', '(', 'high', '[', 'name', ']', '[', 'state', ']', '[', 'hind', ']', ')', ')', ':', '# If argfirst is a requisite then we must merge', '# our requisite with that of the target state', 'if', 'argfirst', 'in', 'STATE_REQUISITE_KEYWORDS', ':', 'high', '[', 'name', ']', '[', 'state', ']', '[', 'hind', ']', '[', 'argfirst', ']', '.', 'extend', '(', 'arg', '[', 'argfirst', ']', ')', '# otherwise, its not a requisite and we are just extending (replacing)', 'else', ':', 'high', '[', 'name', ']', '[', 'state', ']', '[', 'hind', ']', '=', 'arg', 'update', '=', 'True', 'if', '(', 'argfirst', '==', "'name'", 'and', 'next', '(', 'iter', '(', 'high', '[', 'name', ']', '[', 'state', ']', '[', 'hind', ']', ')', ')', '==', "'names'", ')', ':', '# If names are overwritten by name use the name', 'high', '[', 'name', ']', '[', 'state', ']', '[', 'hind', ']', '=', 'arg', 'if', 'not', 'update', ':', 'high', '[', 'name', ']', '[', 'state', ']', '.', 'append', '(', 'arg', ')', 'return', 'high', ',', 'errors']
Pull the extend data and add it to the respective high data
['Pull', 'the', 'extend', 'data', 'and', 'add', 'it', 'to', 'the', 'respective', 'high', 'data']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/state.py#L1454-L1520
8,109
rwl/pylon
pylon/util.py
format_from_extension
def format_from_extension(fname): """ Tries to infer a protocol from the file extension.""" _base, ext = os.path.splitext(fname) if not ext: return None try: format = known_extensions[ext.replace('.', '')] except KeyError: format = None return format
python
def format_from_extension(fname): """ Tries to infer a protocol from the file extension.""" _base, ext = os.path.splitext(fname) if not ext: return None try: format = known_extensions[ext.replace('.', '')] except KeyError: format = None return format
['def', 'format_from_extension', '(', 'fname', ')', ':', '_base', ',', 'ext', '=', 'os', '.', 'path', '.', 'splitext', '(', 'fname', ')', 'if', 'not', 'ext', ':', 'return', 'None', 'try', ':', 'format', '=', 'known_extensions', '[', 'ext', '.', 'replace', '(', "'.'", ',', "''", ')', ']', 'except', 'KeyError', ':', 'format', '=', 'None', 'return', 'format']
Tries to infer a protocol from the file extension.
['Tries', 'to', 'infer', 'a', 'protocol', 'from', 'the', 'file', 'extension', '.']
train
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/util.py#L159-L168
8,110
zhmcclient/python-zhmcclient
zhmcclient_mock/_urihandler.py
StorageGroupAddCandidatePortsHandler.post
def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion): """Operation: Add Candidate Adapter Ports to an FCP Storage Group.""" assert wait_for_completion is True # async not supported yet # The URI is a POST operation, so we need to construct the SG URI storage_group_oid = uri_parms[0] storage_group_uri = '/api/storage-groups/' + storage_group_oid try: storage_group = hmc.lookup_by_uri(storage_group_uri) except KeyError: raise InvalidResourceError(method, uri) check_required_fields(method, uri, body, ['adapter-port-uris']) # TODO: Check that storage group has type FCP # Reflect the result of adding the candidate ports candidate_adapter_port_uris = \ storage_group.properties['candidate-adapter-port-uris'] for ap_uri in body['adapter-port-uris']: if ap_uri in candidate_adapter_port_uris: raise ConflictError(method, uri, 483, "Adapter port is already in candidate " "list of storage group %s: %s" % (storage_group.name, ap_uri)) else: candidate_adapter_port_uris.append(ap_uri)
python
def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion): """Operation: Add Candidate Adapter Ports to an FCP Storage Group.""" assert wait_for_completion is True # async not supported yet # The URI is a POST operation, so we need to construct the SG URI storage_group_oid = uri_parms[0] storage_group_uri = '/api/storage-groups/' + storage_group_oid try: storage_group = hmc.lookup_by_uri(storage_group_uri) except KeyError: raise InvalidResourceError(method, uri) check_required_fields(method, uri, body, ['adapter-port-uris']) # TODO: Check that storage group has type FCP # Reflect the result of adding the candidate ports candidate_adapter_port_uris = \ storage_group.properties['candidate-adapter-port-uris'] for ap_uri in body['adapter-port-uris']: if ap_uri in candidate_adapter_port_uris: raise ConflictError(method, uri, 483, "Adapter port is already in candidate " "list of storage group %s: %s" % (storage_group.name, ap_uri)) else: candidate_adapter_port_uris.append(ap_uri)
['def', 'post', '(', 'method', ',', 'hmc', ',', 'uri', ',', 'uri_parms', ',', 'body', ',', 'logon_required', ',', 'wait_for_completion', ')', ':', 'assert', 'wait_for_completion', 'is', 'True', '# async not supported yet', '# The URI is a POST operation, so we need to construct the SG URI', 'storage_group_oid', '=', 'uri_parms', '[', '0', ']', 'storage_group_uri', '=', "'/api/storage-groups/'", '+', 'storage_group_oid', 'try', ':', 'storage_group', '=', 'hmc', '.', 'lookup_by_uri', '(', 'storage_group_uri', ')', 'except', 'KeyError', ':', 'raise', 'InvalidResourceError', '(', 'method', ',', 'uri', ')', 'check_required_fields', '(', 'method', ',', 'uri', ',', 'body', ',', '[', "'adapter-port-uris'", ']', ')', '# TODO: Check that storage group has type FCP', '# Reflect the result of adding the candidate ports', 'candidate_adapter_port_uris', '=', 'storage_group', '.', 'properties', '[', "'candidate-adapter-port-uris'", ']', 'for', 'ap_uri', 'in', 'body', '[', "'adapter-port-uris'", ']', ':', 'if', 'ap_uri', 'in', 'candidate_adapter_port_uris', ':', 'raise', 'ConflictError', '(', 'method', ',', 'uri', ',', '483', ',', '"Adapter port is already in candidate "', '"list of storage group %s: %s"', '%', '(', 'storage_group', '.', 'name', ',', 'ap_uri', ')', ')', 'else', ':', 'candidate_adapter_port_uris', '.', 'append', '(', 'ap_uri', ')']
Operation: Add Candidate Adapter Ports to an FCP Storage Group.
['Operation', ':', 'Add', 'Candidate', 'Adapter', 'Ports', 'to', 'an', 'FCP', 'Storage', 'Group', '.']
train
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient_mock/_urihandler.py#L2226-L2252
8,111
yougov/pmxbot
pmxbot/core.py
Handler.find_matching
def find_matching(cls, message, channel): """ Yield ``cls`` subclasses that match message and channel """ return ( handler for handler in cls._registry if isinstance(handler, cls) and handler.match(message, channel) )
python
def find_matching(cls, message, channel): """ Yield ``cls`` subclasses that match message and channel """ return ( handler for handler in cls._registry if isinstance(handler, cls) and handler.match(message, channel) )
['def', 'find_matching', '(', 'cls', ',', 'message', ',', 'channel', ')', ':', 'return', '(', 'handler', 'for', 'handler', 'in', 'cls', '.', '_registry', 'if', 'isinstance', '(', 'handler', ',', 'cls', ')', 'and', 'handler', '.', 'match', '(', 'message', ',', 'channel', ')', ')']
Yield ``cls`` subclasses that match message and channel
['Yield', 'cls', 'subclasses', 'that', 'match', 'message', 'and', 'channel']
train
https://github.com/yougov/pmxbot/blob/5da84a3258a0fd73cb35b60e39769a5d7bfb2ba7/pmxbot/core.py#L145-L154
8,112
estnltk/estnltk
estnltk/text.py
Text.spelling
def spelling(self): """Flag incorrectly spelled words. Returns a list of booleans, where element at each position denotes, if the word at the same position is spelled correctly. """ if not self.is_tagged(WORDS): self.tokenize_words() return [data[SPELLING] for data in vabamorf.spellcheck(self.word_texts, suggestions=False)]
python
def spelling(self): """Flag incorrectly spelled words. Returns a list of booleans, where element at each position denotes, if the word at the same position is spelled correctly. """ if not self.is_tagged(WORDS): self.tokenize_words() return [data[SPELLING] for data in vabamorf.spellcheck(self.word_texts, suggestions=False)]
['def', 'spelling', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'is_tagged', '(', 'WORDS', ')', ':', 'self', '.', 'tokenize_words', '(', ')', 'return', '[', 'data', '[', 'SPELLING', ']', 'for', 'data', 'in', 'vabamorf', '.', 'spellcheck', '(', 'self', '.', 'word_texts', ',', 'suggestions', '=', 'False', ')', ']']
Flag incorrectly spelled words. Returns a list of booleans, where element at each position denotes, if the word at the same position is spelled correctly.
['Flag', 'incorrectly', 'spelled', 'words', '.', 'Returns', 'a', 'list', 'of', 'booleans', 'where', 'element', 'at', 'each', 'position', 'denotes', 'if', 'the', 'word', 'at', 'the', 'same', 'position', 'is', 'spelled', 'correctly', '.']
train
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1163-L1170
8,113
silver-castle/mach9
mach9/response.py
redirect
def redirect(to, headers=None, status=302, content_type='text/html; charset=utf-8'): '''Abort execution and cause a 302 redirect (by default). :param to: path or fully qualified URL to redirect to :param headers: optional dict of headers to include in the new request :param status: status code (int) of the new request, defaults to 302 :param content_type: the content type (string) of the response :returns: the redirecting Response ''' headers = headers or {} # According to RFC 7231, a relative URI is now permitted. headers['Location'] = to return HTTPResponse( status=status, headers=headers, content_type=content_type)
python
def redirect(to, headers=None, status=302, content_type='text/html; charset=utf-8'): '''Abort execution and cause a 302 redirect (by default). :param to: path or fully qualified URL to redirect to :param headers: optional dict of headers to include in the new request :param status: status code (int) of the new request, defaults to 302 :param content_type: the content type (string) of the response :returns: the redirecting Response ''' headers = headers or {} # According to RFC 7231, a relative URI is now permitted. headers['Location'] = to return HTTPResponse( status=status, headers=headers, content_type=content_type)
['def', 'redirect', '(', 'to', ',', 'headers', '=', 'None', ',', 'status', '=', '302', ',', 'content_type', '=', "'text/html; charset=utf-8'", ')', ':', 'headers', '=', 'headers', 'or', '{', '}', '# According to RFC 7231, a relative URI is now permitted.', 'headers', '[', "'Location'", ']', '=', 'to', 'return', 'HTTPResponse', '(', 'status', '=', 'status', ',', 'headers', '=', 'headers', ',', 'content_type', '=', 'content_type', ')']
Abort execution and cause a 302 redirect (by default). :param to: path or fully qualified URL to redirect to :param headers: optional dict of headers to include in the new request :param status: status code (int) of the new request, defaults to 302 :param content_type: the content type (string) of the response :returns: the redirecting Response
['Abort', 'execution', 'and', 'cause', 'a', '302', 'redirect', '(', 'by', 'default', ')', '.']
train
https://github.com/silver-castle/mach9/blob/7a623aab3c70d89d36ade6901b6307e115400c5e/mach9/response.py#L376-L394
8,114
etingof/pysnmp
pysnmp/smi/builder.py
MibBuilder.loadModules
def loadModules(self, *modNames, **userCtx): """Load (optionally, compiling) pysnmp MIB modules""" # Build a list of available modules if not modNames: modNames = {} for mibSource in self._mibSources: for modName in mibSource.listdir(): modNames[modName] = None modNames = list(modNames) if not modNames: raise error.MibNotFoundError( 'No MIB module to load at %s' % (self,)) for modName in modNames: try: self.loadModule(modName, **userCtx) except error.MibNotFoundError: if not self._mibCompiler: raise debug.logger & debug.FLAG_BLD and debug.logger( 'loadModules: calling MIB compiler for %s' % modName) status = self._mibCompiler.compile(modName, genTexts=self.loadTexts) errs = '; '.join( hasattr(x, 'error') and str(x.error) or x for x in status.values() if x in ('failed', 'missing')) if errs: raise error.MibNotFoundError( '%s compilation error(s): %s' % (modName, errs)) # compilation succeeded, MIB might load now self.loadModule(modName, **userCtx) return self
python
def loadModules(self, *modNames, **userCtx): """Load (optionally, compiling) pysnmp MIB modules""" # Build a list of available modules if not modNames: modNames = {} for mibSource in self._mibSources: for modName in mibSource.listdir(): modNames[modName] = None modNames = list(modNames) if not modNames: raise error.MibNotFoundError( 'No MIB module to load at %s' % (self,)) for modName in modNames: try: self.loadModule(modName, **userCtx) except error.MibNotFoundError: if not self._mibCompiler: raise debug.logger & debug.FLAG_BLD and debug.logger( 'loadModules: calling MIB compiler for %s' % modName) status = self._mibCompiler.compile(modName, genTexts=self.loadTexts) errs = '; '.join( hasattr(x, 'error') and str(x.error) or x for x in status.values() if x in ('failed', 'missing')) if errs: raise error.MibNotFoundError( '%s compilation error(s): %s' % (modName, errs)) # compilation succeeded, MIB might load now self.loadModule(modName, **userCtx) return self
['def', 'loadModules', '(', 'self', ',', '*', 'modNames', ',', '*', '*', 'userCtx', ')', ':', '# Build a list of available modules', 'if', 'not', 'modNames', ':', 'modNames', '=', '{', '}', 'for', 'mibSource', 'in', 'self', '.', '_mibSources', ':', 'for', 'modName', 'in', 'mibSource', '.', 'listdir', '(', ')', ':', 'modNames', '[', 'modName', ']', '=', 'None', 'modNames', '=', 'list', '(', 'modNames', ')', 'if', 'not', 'modNames', ':', 'raise', 'error', '.', 'MibNotFoundError', '(', "'No MIB module to load at %s'", '%', '(', 'self', ',', ')', ')', 'for', 'modName', 'in', 'modNames', ':', 'try', ':', 'self', '.', 'loadModule', '(', 'modName', ',', '*', '*', 'userCtx', ')', 'except', 'error', '.', 'MibNotFoundError', ':', 'if', 'not', 'self', '.', '_mibCompiler', ':', 'raise', 'debug', '.', 'logger', '&', 'debug', '.', 'FLAG_BLD', 'and', 'debug', '.', 'logger', '(', "'loadModules: calling MIB compiler for %s'", '%', 'modName', ')', 'status', '=', 'self', '.', '_mibCompiler', '.', 'compile', '(', 'modName', ',', 'genTexts', '=', 'self', '.', 'loadTexts', ')', 'errs', '=', "'; '", '.', 'join', '(', 'hasattr', '(', 'x', ',', "'error'", ')', 'and', 'str', '(', 'x', '.', 'error', ')', 'or', 'x', 'for', 'x', 'in', 'status', '.', 'values', '(', ')', 'if', 'x', 'in', '(', "'failed'", ',', "'missing'", ')', ')', 'if', 'errs', ':', 'raise', 'error', '.', 'MibNotFoundError', '(', "'%s compilation error(s): %s'", '%', '(', 'modName', ',', 'errs', ')', ')', '# compilation succeeded, MIB might load now', 'self', '.', 'loadModule', '(', 'modName', ',', '*', '*', 'userCtx', ')', 'return', 'self']
Load (optionally, compiling) pysnmp MIB modules
['Load', '(', 'optionally', 'compiling', ')', 'pysnmp', 'MIB', 'modules']
train
https://github.com/etingof/pysnmp/blob/cde062dd42f67dfd2d7686286a322d40e9c3a4b7/pysnmp/smi/builder.py#L409-L450
8,115
RudolfCardinal/pythonlib
cardinal_pythonlib/dicts.py
merge_two_dicts
def merge_two_dicts(x: Dict, y: Dict) -> Dict: """ Given two dicts, merge them into a new dict as a shallow copy, e.g. .. code-block:: python z = merge_two_dicts(x, y) If you can guarantee Python 3.5, then a simpler syntax is: .. code-block:: python z = {**x, **y} See http://stackoverflow.com/questions/38987. """ z = x.copy() z.update(y) return z
python
def merge_two_dicts(x: Dict, y: Dict) -> Dict: """ Given two dicts, merge them into a new dict as a shallow copy, e.g. .. code-block:: python z = merge_two_dicts(x, y) If you can guarantee Python 3.5, then a simpler syntax is: .. code-block:: python z = {**x, **y} See http://stackoverflow.com/questions/38987. """ z = x.copy() z.update(y) return z
['def', 'merge_two_dicts', '(', 'x', ':', 'Dict', ',', 'y', ':', 'Dict', ')', '->', 'Dict', ':', 'z', '=', 'x', '.', 'copy', '(', ')', 'z', '.', 'update', '(', 'y', ')', 'return', 'z']
Given two dicts, merge them into a new dict as a shallow copy, e.g. .. code-block:: python z = merge_two_dicts(x, y) If you can guarantee Python 3.5, then a simpler syntax is: .. code-block:: python z = {**x, **y} See http://stackoverflow.com/questions/38987.
['Given', 'two', 'dicts', 'merge', 'them', 'into', 'a', 'new', 'dict', 'as', 'a', 'shallow', 'copy', 'e', '.', 'g', '.']
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/dicts.py#L60-L78
8,116
petl-developers/petl
petl/util/counting.py
parsecounter
def parsecounter(table, field, parsers=(('int', int), ('float', float))): """ Count the number of `str` or `unicode` values under the given fields that can be parsed as ints, floats or via custom parser functions. Return a pair of `Counter` objects, the first mapping parser names to the number of strings successfully parsed, the second mapping parser names to the number of errors. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar', 'baz'], ... ['A', 'aaa', 2], ... ['B', u'2', '3.4'], ... [u'B', u'3', u'7.8', True], ... ['D', '3.7', 9.0], ... ['E', 42]] >>> counter, errors = etl.parsecounter(table, 'bar') >>> counter Counter({'float': 3, 'int': 2}) >>> errors Counter({'int': 2, 'float': 1}) The `field` argument can be a field name or index (starting from zero). """ if isinstance(parsers, (list, tuple)): parsers = dict(parsers) counter, errors = Counter(), Counter() # need to initialise for n in parsers.keys(): counter[n] = 0 errors[n] = 0 for v in values(table, field): if isinstance(v, string_types): for name, parser in parsers.items(): try: parser(v) except: errors[name] += 1 else: counter[name] += 1 return counter, errors
python
def parsecounter(table, field, parsers=(('int', int), ('float', float))): """ Count the number of `str` or `unicode` values under the given fields that can be parsed as ints, floats or via custom parser functions. Return a pair of `Counter` objects, the first mapping parser names to the number of strings successfully parsed, the second mapping parser names to the number of errors. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar', 'baz'], ... ['A', 'aaa', 2], ... ['B', u'2', '3.4'], ... [u'B', u'3', u'7.8', True], ... ['D', '3.7', 9.0], ... ['E', 42]] >>> counter, errors = etl.parsecounter(table, 'bar') >>> counter Counter({'float': 3, 'int': 2}) >>> errors Counter({'int': 2, 'float': 1}) The `field` argument can be a field name or index (starting from zero). """ if isinstance(parsers, (list, tuple)): parsers = dict(parsers) counter, errors = Counter(), Counter() # need to initialise for n in parsers.keys(): counter[n] = 0 errors[n] = 0 for v in values(table, field): if isinstance(v, string_types): for name, parser in parsers.items(): try: parser(v) except: errors[name] += 1 else: counter[name] += 1 return counter, errors
['def', 'parsecounter', '(', 'table', ',', 'field', ',', 'parsers', '=', '(', '(', "'int'", ',', 'int', ')', ',', '(', "'float'", ',', 'float', ')', ')', ')', ':', 'if', 'isinstance', '(', 'parsers', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'parsers', '=', 'dict', '(', 'parsers', ')', 'counter', ',', 'errors', '=', 'Counter', '(', ')', ',', 'Counter', '(', ')', '# need to initialise', 'for', 'n', 'in', 'parsers', '.', 'keys', '(', ')', ':', 'counter', '[', 'n', ']', '=', '0', 'errors', '[', 'n', ']', '=', '0', 'for', 'v', 'in', 'values', '(', 'table', ',', 'field', ')', ':', 'if', 'isinstance', '(', 'v', ',', 'string_types', ')', ':', 'for', 'name', ',', 'parser', 'in', 'parsers', '.', 'items', '(', ')', ':', 'try', ':', 'parser', '(', 'v', ')', 'except', ':', 'errors', '[', 'name', ']', '+=', '1', 'else', ':', 'counter', '[', 'name', ']', '+=', '1', 'return', 'counter', ',', 'errors']
Count the number of `str` or `unicode` values under the given fields that can be parsed as ints, floats or via custom parser functions. Return a pair of `Counter` objects, the first mapping parser names to the number of strings successfully parsed, the second mapping parser names to the number of errors. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar', 'baz'], ... ['A', 'aaa', 2], ... ['B', u'2', '3.4'], ... [u'B', u'3', u'7.8', True], ... ['D', '3.7', 9.0], ... ['E', 42]] >>> counter, errors = etl.parsecounter(table, 'bar') >>> counter Counter({'float': 3, 'int': 2}) >>> errors Counter({'int': 2, 'float': 1}) The `field` argument can be a field name or index (starting from zero).
['Count', 'the', 'number', 'of', 'str', 'or', 'unicode', 'values', 'under', 'the', 'given', 'fields', 'that', 'can', 'be', 'parsed', 'as', 'ints', 'floats', 'or', 'via', 'custom', 'parser', 'functions', '.', 'Return', 'a', 'pair', 'of', 'Counter', 'objects', 'the', 'first', 'mapping', 'parser', 'names', 'to', 'the', 'number', 'of', 'strings', 'successfully', 'parsed', 'the', 'second', 'mapping', 'parser', 'names', 'to', 'the', 'number', 'of', 'errors', '.', 'E', '.', 'g', '.', '::']
train
https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/util/counting.py#L165-L206
8,117
quantopian/trading_calendars
trading_calendars/calendar_utils.py
TradingCalendarDispatcher.register_calendar_alias
def register_calendar_alias(self, alias, real_name, force=False): """ Register an alias for a calendar. This is useful when multiple exchanges should share a calendar, or when there are multiple ways to refer to the same exchange. After calling ``register_alias('alias', 'real_name')``, subsequent calls to ``get_calendar('alias')`` will return the same result as ``get_calendar('real_name')``. Parameters ---------- alias : str The name to be used to refer to a calendar. real_name : str The canonical name of the registered calendar. force : bool, optional If True, old calendars will be overwritten on a name collision. If False, name collisions will raise an exception. Default is False. """ if force: self.deregister_calendar(alias) if self.has_calendar(alias): raise CalendarNameCollision(calendar_name=alias) self._aliases[alias] = real_name # Ensure that the new alias doesn't create a cycle, and back it out if # we did. try: self.resolve_alias(alias) except CyclicCalendarAlias: del self._aliases[alias] raise
python
def register_calendar_alias(self, alias, real_name, force=False): """ Register an alias for a calendar. This is useful when multiple exchanges should share a calendar, or when there are multiple ways to refer to the same exchange. After calling ``register_alias('alias', 'real_name')``, subsequent calls to ``get_calendar('alias')`` will return the same result as ``get_calendar('real_name')``. Parameters ---------- alias : str The name to be used to refer to a calendar. real_name : str The canonical name of the registered calendar. force : bool, optional If True, old calendars will be overwritten on a name collision. If False, name collisions will raise an exception. Default is False. """ if force: self.deregister_calendar(alias) if self.has_calendar(alias): raise CalendarNameCollision(calendar_name=alias) self._aliases[alias] = real_name # Ensure that the new alias doesn't create a cycle, and back it out if # we did. try: self.resolve_alias(alias) except CyclicCalendarAlias: del self._aliases[alias] raise
['def', 'register_calendar_alias', '(', 'self', ',', 'alias', ',', 'real_name', ',', 'force', '=', 'False', ')', ':', 'if', 'force', ':', 'self', '.', 'deregister_calendar', '(', 'alias', ')', 'if', 'self', '.', 'has_calendar', '(', 'alias', ')', ':', 'raise', 'CalendarNameCollision', '(', 'calendar_name', '=', 'alias', ')', 'self', '.', '_aliases', '[', 'alias', ']', '=', 'real_name', "# Ensure that the new alias doesn't create a cycle, and back it out if", '# we did.', 'try', ':', 'self', '.', 'resolve_alias', '(', 'alias', ')', 'except', 'CyclicCalendarAlias', ':', 'del', 'self', '.', '_aliases', '[', 'alias', ']', 'raise']
Register an alias for a calendar. This is useful when multiple exchanges should share a calendar, or when there are multiple ways to refer to the same exchange. After calling ``register_alias('alias', 'real_name')``, subsequent calls to ``get_calendar('alias')`` will return the same result as ``get_calendar('real_name')``. Parameters ---------- alias : str The name to be used to refer to a calendar. real_name : str The canonical name of the registered calendar. force : bool, optional If True, old calendars will be overwritten on a name collision. If False, name collisions will raise an exception. Default is False.
['Register', 'an', 'alias', 'for', 'a', 'calendar', '.']
train
https://github.com/quantopian/trading_calendars/blob/951711c82c8a2875c09e96e2979faaf8734fb4df/trading_calendars/calendar_utils.py#L219-L255
8,118
aquatix/ns-api
ns_api.py
list_same
def list_same(list_a, list_b): """ Return the items from list_b that are also on list_a """ result = [] for item in list_b: if item in list_a: result.append(item) return result
python
def list_same(list_a, list_b): """ Return the items from list_b that are also on list_a """ result = [] for item in list_b: if item in list_a: result.append(item) return result
['def', 'list_same', '(', 'list_a', ',', 'list_b', ')', ':', 'result', '=', '[', ']', 'for', 'item', 'in', 'list_b', ':', 'if', 'item', 'in', 'list_a', ':', 'result', '.', 'append', '(', 'item', ')', 'return', 'result']
Return the items from list_b that are also on list_a
['Return', 'the', 'items', 'from', 'list_b', 'that', 'are', 'also', 'on', 'list_a']
train
https://github.com/aquatix/ns-api/blob/9b3379f8df6217132f457c4363457c16321c2448/ns_api.py#L137-L145
8,119
lsbardel/python-stdnet
stdnet/odm/related.py
do_pending_lookups
def do_pending_lookups(event, sender, **kwargs): """Handle any pending relations to the sending model. Sent from class_prepared.""" key = (sender._meta.app_label, sender._meta.name) for callback in pending_lookups.pop(key, []): callback(sender)
python
def do_pending_lookups(event, sender, **kwargs): """Handle any pending relations to the sending model. Sent from class_prepared.""" key = (sender._meta.app_label, sender._meta.name) for callback in pending_lookups.pop(key, []): callback(sender)
['def', 'do_pending_lookups', '(', 'event', ',', 'sender', ',', '*', '*', 'kwargs', ')', ':', 'key', '=', '(', 'sender', '.', '_meta', '.', 'app_label', ',', 'sender', '.', '_meta', '.', 'name', ')', 'for', 'callback', 'in', 'pending_lookups', '.', 'pop', '(', 'key', ',', '[', ']', ')', ':', 'callback', '(', 'sender', ')']
Handle any pending relations to the sending model. Sent from class_prepared.
['Handle', 'any', 'pending', 'relations', 'to', 'the', 'sending', 'model', '.', 'Sent', 'from', 'class_prepared', '.']
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/related.py#L66-L71
8,120
AnthonyBloomer/daftlistings
daftlistings/listing.py
Listing.dwelling_type
def dwelling_type(self): """ This method returns the dwelling type. :return: """ try: if self._data_from_search: info = self._data_from_search.find( 'ul', {"class": "info"}).text s = info.split('|') return s[0].strip() else: return self._ad_page_content.find( 'div', {'id': 'smi-summary-items'} ).find('span', {'class': 'header_text'}).text except Exception as e: if self._debug: logging.error( "Error getting dwelling_type. Error message: " + e.args[0]) return
python
def dwelling_type(self): """ This method returns the dwelling type. :return: """ try: if self._data_from_search: info = self._data_from_search.find( 'ul', {"class": "info"}).text s = info.split('|') return s[0].strip() else: return self._ad_page_content.find( 'div', {'id': 'smi-summary-items'} ).find('span', {'class': 'header_text'}).text except Exception as e: if self._debug: logging.error( "Error getting dwelling_type. Error message: " + e.args[0]) return
['def', 'dwelling_type', '(', 'self', ')', ':', 'try', ':', 'if', 'self', '.', '_data_from_search', ':', 'info', '=', 'self', '.', '_data_from_search', '.', 'find', '(', "'ul'", ',', '{', '"class"', ':', '"info"', '}', ')', '.', 'text', 's', '=', 'info', '.', 'split', '(', "'|'", ')', 'return', 's', '[', '0', ']', '.', 'strip', '(', ')', 'else', ':', 'return', 'self', '.', '_ad_page_content', '.', 'find', '(', "'div'", ',', '{', "'id'", ':', "'smi-summary-items'", '}', ')', '.', 'find', '(', "'span'", ',', '{', "'class'", ':', "'header_text'", '}', ')', '.', 'text', 'except', 'Exception', 'as', 'e', ':', 'if', 'self', '.', '_debug', ':', 'logging', '.', 'error', '(', '"Error getting dwelling_type. Error message: "', '+', 'e', '.', 'args', '[', '0', ']', ')', 'return']
This method returns the dwelling type. :return:
['This', 'method', 'returns', 'the', 'dwelling', 'type', '.', ':', 'return', ':']
train
https://github.com/AnthonyBloomer/daftlistings/blob/f6c1b52425bc740f443b5efe6632a4bf18ee997f/daftlistings/listing.py#L437-L457
8,121
odlgroup/odl
odl/contrib/mrc/uncompr_bin.py
FileReaderRawBinaryWithHeader.read_data
def read_data(self, dstart=None, dend=None): """Read data from `file` and return it as Numpy array. Parameters ---------- dstart : int, optional Offset in bytes of the data field. By default, it is taken to be the header size as determined from reading the header. Backwards indexing with negative values is also supported. Use a value larger than the header size to extract a data subset. dend : int, optional End position in bytes until which data is read (exclusive). Backwards indexing with negative values is also supported. Use a value different from the file size to extract a data subset. Returns ------- data : `numpy.ndarray` The data read from `file`. See Also -------- read_header """ self.file.seek(0, 2) # 2 means "from the end" filesize_bytes = self.file.tell() if dstart is None: dstart_abs = int(self.header_size) elif dstart < 0: dstart_abs = filesize_bytes + int(dstart) else: dstart_abs = int(dstart) if dend is None: dend_abs = int(filesize_bytes) elif dend < 0: dend_abs = int(dend) + filesize_bytes else: dend_abs = int(dend) if dstart_abs >= dend_abs: raise ValueError('invalid `dstart` and `dend`, resulting in ' 'absolute `dstart` >= `dend` ({} >= {})' ''.format(dstart_abs, dend_abs)) if dstart_abs < self.header_size: raise ValueError('invalid `dstart`, resulting in absolute ' '`dstart` < `header_size` ({} < {})' ''.format(dstart_abs, self.header_size)) if dend_abs > filesize_bytes: raise ValueError('invalid `dend`, resulting in absolute ' '`dend` > `filesize_bytes` ({} < {})' ''.format(dend_abs, filesize_bytes)) num_elems = (dend_abs - dstart_abs) / self.data_dtype.itemsize if num_elems != int(num_elems): raise ValueError( 'trying to read {} bytes, which is not a multiple of ' 'the itemsize {} of the data type {}' ''.format(dend_abs - dstart_abs, self.data_dtype.itemsize, self.data_dtype)) self.file.seek(dstart_abs) array = np.empty(int(num_elems), dtype=self.data_dtype) self.file.readinto(array.data) return array
python
def read_data(self, dstart=None, dend=None): """Read data from `file` and return it as Numpy array. Parameters ---------- dstart : int, optional Offset in bytes of the data field. By default, it is taken to be the header size as determined from reading the header. Backwards indexing with negative values is also supported. Use a value larger than the header size to extract a data subset. dend : int, optional End position in bytes until which data is read (exclusive). Backwards indexing with negative values is also supported. Use a value different from the file size to extract a data subset. Returns ------- data : `numpy.ndarray` The data read from `file`. See Also -------- read_header """ self.file.seek(0, 2) # 2 means "from the end" filesize_bytes = self.file.tell() if dstart is None: dstart_abs = int(self.header_size) elif dstart < 0: dstart_abs = filesize_bytes + int(dstart) else: dstart_abs = int(dstart) if dend is None: dend_abs = int(filesize_bytes) elif dend < 0: dend_abs = int(dend) + filesize_bytes else: dend_abs = int(dend) if dstart_abs >= dend_abs: raise ValueError('invalid `dstart` and `dend`, resulting in ' 'absolute `dstart` >= `dend` ({} >= {})' ''.format(dstart_abs, dend_abs)) if dstart_abs < self.header_size: raise ValueError('invalid `dstart`, resulting in absolute ' '`dstart` < `header_size` ({} < {})' ''.format(dstart_abs, self.header_size)) if dend_abs > filesize_bytes: raise ValueError('invalid `dend`, resulting in absolute ' '`dend` > `filesize_bytes` ({} < {})' ''.format(dend_abs, filesize_bytes)) num_elems = (dend_abs - dstart_abs) / self.data_dtype.itemsize if num_elems != int(num_elems): raise ValueError( 'trying to read {} bytes, which is not a multiple of ' 'the itemsize {} of the data type {}' ''.format(dend_abs - dstart_abs, self.data_dtype.itemsize, self.data_dtype)) self.file.seek(dstart_abs) array = np.empty(int(num_elems), dtype=self.data_dtype) self.file.readinto(array.data) return array
['def', 'read_data', '(', 'self', ',', 'dstart', '=', 'None', ',', 'dend', '=', 'None', ')', ':', 'self', '.', 'file', '.', 'seek', '(', '0', ',', '2', ')', '# 2 means "from the end"', 'filesize_bytes', '=', 'self', '.', 'file', '.', 'tell', '(', ')', 'if', 'dstart', 'is', 'None', ':', 'dstart_abs', '=', 'int', '(', 'self', '.', 'header_size', ')', 'elif', 'dstart', '<', '0', ':', 'dstart_abs', '=', 'filesize_bytes', '+', 'int', '(', 'dstart', ')', 'else', ':', 'dstart_abs', '=', 'int', '(', 'dstart', ')', 'if', 'dend', 'is', 'None', ':', 'dend_abs', '=', 'int', '(', 'filesize_bytes', ')', 'elif', 'dend', '<', '0', ':', 'dend_abs', '=', 'int', '(', 'dend', ')', '+', 'filesize_bytes', 'else', ':', 'dend_abs', '=', 'int', '(', 'dend', ')', 'if', 'dstart_abs', '>=', 'dend_abs', ':', 'raise', 'ValueError', '(', "'invalid `dstart` and `dend`, resulting in '", "'absolute `dstart` >= `dend` ({} >= {})'", "''", '.', 'format', '(', 'dstart_abs', ',', 'dend_abs', ')', ')', 'if', 'dstart_abs', '<', 'self', '.', 'header_size', ':', 'raise', 'ValueError', '(', "'invalid `dstart`, resulting in absolute '", "'`dstart` < `header_size` ({} < {})'", "''", '.', 'format', '(', 'dstart_abs', ',', 'self', '.', 'header_size', ')', ')', 'if', 'dend_abs', '>', 'filesize_bytes', ':', 'raise', 'ValueError', '(', "'invalid `dend`, resulting in absolute '", "'`dend` > `filesize_bytes` ({} < {})'", "''", '.', 'format', '(', 'dend_abs', ',', 'filesize_bytes', ')', ')', 'num_elems', '=', '(', 'dend_abs', '-', 'dstart_abs', ')', '/', 'self', '.', 'data_dtype', '.', 'itemsize', 'if', 'num_elems', '!=', 'int', '(', 'num_elems', ')', ':', 'raise', 'ValueError', '(', "'trying to read {} bytes, which is not a multiple of '", "'the itemsize {} of the data type {}'", "''", '.', 'format', '(', 'dend_abs', '-', 'dstart_abs', ',', 'self', '.', 'data_dtype', '.', 'itemsize', ',', 'self', '.', 'data_dtype', ')', ')', 'self', '.', 'file', '.', 'seek', '(', 'dstart_abs', ')', 'array', '=', 'np', '.', 'empty', '(', 'int', '(', 'num_elems', ')', ',', 'dtype', '=', 'self', '.', 'data_dtype', ')', 'self', '.', 'file', '.', 'readinto', '(', 'array', '.', 'data', ')', 'return', 'array']
Read data from `file` and return it as Numpy array. Parameters ---------- dstart : int, optional Offset in bytes of the data field. By default, it is taken to be the header size as determined from reading the header. Backwards indexing with negative values is also supported. Use a value larger than the header size to extract a data subset. dend : int, optional End position in bytes until which data is read (exclusive). Backwards indexing with negative values is also supported. Use a value different from the file size to extract a data subset. Returns ------- data : `numpy.ndarray` The data read from `file`. See Also -------- read_header
['Read', 'data', 'from', 'file', 'and', 'return', 'it', 'as', 'Numpy', 'array', '.']
train
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/mrc/uncompr_bin.py#L463-L526
8,122
markchil/gptools
gptools/utils.py
fixed_poch
def fixed_poch(a, n): """Implementation of the Pochhammer symbol :math:`(a)_n` which handles negative integer arguments properly. Need conditional statement because scipy's impelementation of the Pochhammer symbol is wrong for negative integer arguments. This function uses the definition from http://functions.wolfram.com/GammaBetaErf/Pochhammer/02/ Parameters ---------- a : float The argument. n : nonnegative int The order. """ # Old form, calls gamma function: # if a < 0.0 and a % 1 == 0 and n <= -a: # p = (-1.0)**n * scipy.misc.factorial(-a) / scipy.misc.factorial(-a - n) # else: # p = scipy.special.poch(a, n) # return p if (int(n) != n) or (n < 0): raise ValueError("Parameter n must be a nonnegative int!") n = int(n) # Direct form based on product: terms = [a + k for k in range(0, n)] return scipy.prod(terms)
python
def fixed_poch(a, n): """Implementation of the Pochhammer symbol :math:`(a)_n` which handles negative integer arguments properly. Need conditional statement because scipy's impelementation of the Pochhammer symbol is wrong for negative integer arguments. This function uses the definition from http://functions.wolfram.com/GammaBetaErf/Pochhammer/02/ Parameters ---------- a : float The argument. n : nonnegative int The order. """ # Old form, calls gamma function: # if a < 0.0 and a % 1 == 0 and n <= -a: # p = (-1.0)**n * scipy.misc.factorial(-a) / scipy.misc.factorial(-a - n) # else: # p = scipy.special.poch(a, n) # return p if (int(n) != n) or (n < 0): raise ValueError("Parameter n must be a nonnegative int!") n = int(n) # Direct form based on product: terms = [a + k for k in range(0, n)] return scipy.prod(terms)
['def', 'fixed_poch', '(', 'a', ',', 'n', ')', ':', '# Old form, calls gamma function:', '# if a < 0.0 and a % 1 == 0 and n <= -a:', '# p = (-1.0)**n * scipy.misc.factorial(-a) / scipy.misc.factorial(-a - n)', '# else:', '# p = scipy.special.poch(a, n)', '# return p', 'if', '(', 'int', '(', 'n', ')', '!=', 'n', ')', 'or', '(', 'n', '<', '0', ')', ':', 'raise', 'ValueError', '(', '"Parameter n must be a nonnegative int!"', ')', 'n', '=', 'int', '(', 'n', ')', '# Direct form based on product:', 'terms', '=', '[', 'a', '+', 'k', 'for', 'k', 'in', 'range', '(', '0', ',', 'n', ')', ']', 'return', 'scipy', '.', 'prod', '(', 'terms', ')']
Implementation of the Pochhammer symbol :math:`(a)_n` which handles negative integer arguments properly. Need conditional statement because scipy's impelementation of the Pochhammer symbol is wrong for negative integer arguments. This function uses the definition from http://functions.wolfram.com/GammaBetaErf/Pochhammer/02/ Parameters ---------- a : float The argument. n : nonnegative int The order.
['Implementation', 'of', 'the', 'Pochhammer', 'symbol', ':', 'math', ':', '(', 'a', ')', '_n', 'which', 'handles', 'negative', 'integer', 'arguments', 'properly', '.', 'Need', 'conditional', 'statement', 'because', 'scipy', 's', 'impelementation', 'of', 'the', 'Pochhammer', 'symbol', 'is', 'wrong', 'for', 'negative', 'integer', 'arguments', '.', 'This', 'function', 'uses', 'the', 'definition', 'from', 'http', ':', '//', 'functions', '.', 'wolfram', '.', 'com', '/', 'GammaBetaErf', '/', 'Pochhammer', '/', '02', '/', 'Parameters', '----------', 'a', ':', 'float', 'The', 'argument', '.', 'n', ':', 'nonnegative', 'int', 'The', 'order', '.']
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1369-L1395
8,123
apple/turicreate
src/external/xgboost/subtree/rabit/tracker/rabit_demo.py
mthread_submit
def mthread_submit(nslave, worker_args, worker_envs): """ customized submit script, that submit nslave jobs, each must contain args as parameter note this can be a lambda function containing additional parameters in input Parameters nslave number of slave process to start up args arguments to launch each job this usually includes the parameters of master_uri and parameters passed into submit """ procs = {} for i in range(nslave): procs[i] = Thread(target = exec_cmd, args = (args.command + worker_args, i, worker_envs)) procs[i].daemon = True procs[i].start() for i in range(nslave): procs[i].join()
python
def mthread_submit(nslave, worker_args, worker_envs): """ customized submit script, that submit nslave jobs, each must contain args as parameter note this can be a lambda function containing additional parameters in input Parameters nslave number of slave process to start up args arguments to launch each job this usually includes the parameters of master_uri and parameters passed into submit """ procs = {} for i in range(nslave): procs[i] = Thread(target = exec_cmd, args = (args.command + worker_args, i, worker_envs)) procs[i].daemon = True procs[i].start() for i in range(nslave): procs[i].join()
['def', 'mthread_submit', '(', 'nslave', ',', 'worker_args', ',', 'worker_envs', ')', ':', 'procs', '=', '{', '}', 'for', 'i', 'in', 'range', '(', 'nslave', ')', ':', 'procs', '[', 'i', ']', '=', 'Thread', '(', 'target', '=', 'exec_cmd', ',', 'args', '=', '(', 'args', '.', 'command', '+', 'worker_args', ',', 'i', ',', 'worker_envs', ')', ')', 'procs', '[', 'i', ']', '.', 'daemon', '=', 'True', 'procs', '[', 'i', ']', '.', 'start', '(', ')', 'for', 'i', 'in', 'range', '(', 'nslave', ')', ':', 'procs', '[', 'i', ']', '.', 'join', '(', ')']
customized submit script, that submit nslave jobs, each must contain args as parameter note this can be a lambda function containing additional parameters in input Parameters nslave number of slave process to start up args arguments to launch each job this usually includes the parameters of master_uri and parameters passed into submit
['customized', 'submit', 'script', 'that', 'submit', 'nslave', 'jobs', 'each', 'must', 'contain', 'args', 'as', 'parameter', 'note', 'this', 'can', 'be', 'a', 'lambda', 'function', 'containing', 'additional', 'parameters', 'in', 'input', 'Parameters', 'nslave', 'number', 'of', 'slave', 'process', 'to', 'start', 'up', 'args', 'arguments', 'to', 'launch', 'each', 'job', 'this', 'usually', 'includes', 'the', 'parameters', 'of', 'master_uri', 'and', 'parameters', 'passed', 'into', 'submit']
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/subtree/rabit/tracker/rabit_demo.py#L78-L93
8,124
saltstack/salt
salt/client/ssh/shell.py
Shell._run_cmd
def _run_cmd(self, cmd, key_accept=False, passwd_retries=3): ''' Execute a shell command via VT. This is blocking and assumes that ssh is being run ''' if not cmd: return '', 'No command or passphrase', 245 term = salt.utils.vt.Terminal( cmd, shell=True, log_stdout=True, log_stdout_level='trace', log_stderr=True, log_stderr_level='trace', stream_stdout=False, stream_stderr=False) sent_passwd = 0 send_password = True ret_stdout = '' ret_stderr = '' old_stdout = '' try: while term.has_unread_data: stdout, stderr = term.recv() if stdout: ret_stdout += stdout buff = old_stdout + stdout else: buff = stdout if stderr: ret_stderr += stderr if buff and RSTR_RE.search(buff): # We're getting results back, don't try to send passwords send_password = False if buff and SSH_PRIVATE_KEY_PASSWORD_PROMPT_RE.search(buff): if not self.priv_passwd: return '', 'Private key file need passphrase', 254 term.sendline(self.priv_passwd) continue if buff and SSH_PASSWORD_PROMPT_RE.search(buff) and send_password: if not self.passwd: return '', 'Permission denied, no authentication information', 254 if sent_passwd < passwd_retries: term.sendline(self.passwd) sent_passwd += 1 continue else: # asking for a password, and we can't seem to send it return '', 'Password authentication failed', 254 elif buff and KEY_VALID_RE.search(buff): if key_accept: term.sendline('yes') continue else: term.sendline('no') ret_stdout = ('The host key needs to be accepted, to ' 'auto accept run salt-ssh with the -i ' 'flag:\n{0}').format(stdout) return ret_stdout, '', 254 elif buff and buff.endswith('_||ext_mods||_'): mods_raw = salt.utils.json.dumps(self.mods, separators=(',', ':')) + '|_E|0|' term.sendline(mods_raw) if stdout: old_stdout = stdout time.sleep(0.01) return ret_stdout, ret_stderr, term.exitstatus finally: term.close(terminate=True, kill=True)
python
def _run_cmd(self, cmd, key_accept=False, passwd_retries=3): ''' Execute a shell command via VT. This is blocking and assumes that ssh is being run ''' if not cmd: return '', 'No command or passphrase', 245 term = salt.utils.vt.Terminal( cmd, shell=True, log_stdout=True, log_stdout_level='trace', log_stderr=True, log_stderr_level='trace', stream_stdout=False, stream_stderr=False) sent_passwd = 0 send_password = True ret_stdout = '' ret_stderr = '' old_stdout = '' try: while term.has_unread_data: stdout, stderr = term.recv() if stdout: ret_stdout += stdout buff = old_stdout + stdout else: buff = stdout if stderr: ret_stderr += stderr if buff and RSTR_RE.search(buff): # We're getting results back, don't try to send passwords send_password = False if buff and SSH_PRIVATE_KEY_PASSWORD_PROMPT_RE.search(buff): if not self.priv_passwd: return '', 'Private key file need passphrase', 254 term.sendline(self.priv_passwd) continue if buff and SSH_PASSWORD_PROMPT_RE.search(buff) and send_password: if not self.passwd: return '', 'Permission denied, no authentication information', 254 if sent_passwd < passwd_retries: term.sendline(self.passwd) sent_passwd += 1 continue else: # asking for a password, and we can't seem to send it return '', 'Password authentication failed', 254 elif buff and KEY_VALID_RE.search(buff): if key_accept: term.sendline('yes') continue else: term.sendline('no') ret_stdout = ('The host key needs to be accepted, to ' 'auto accept run salt-ssh with the -i ' 'flag:\n{0}').format(stdout) return ret_stdout, '', 254 elif buff and buff.endswith('_||ext_mods||_'): mods_raw = salt.utils.json.dumps(self.mods, separators=(',', ':')) + '|_E|0|' term.sendline(mods_raw) if stdout: old_stdout = stdout time.sleep(0.01) return ret_stdout, ret_stderr, term.exitstatus finally: term.close(terminate=True, kill=True)
['def', '_run_cmd', '(', 'self', ',', 'cmd', ',', 'key_accept', '=', 'False', ',', 'passwd_retries', '=', '3', ')', ':', 'if', 'not', 'cmd', ':', 'return', "''", ',', "'No command or passphrase'", ',', '245', 'term', '=', 'salt', '.', 'utils', '.', 'vt', '.', 'Terminal', '(', 'cmd', ',', 'shell', '=', 'True', ',', 'log_stdout', '=', 'True', ',', 'log_stdout_level', '=', "'trace'", ',', 'log_stderr', '=', 'True', ',', 'log_stderr_level', '=', "'trace'", ',', 'stream_stdout', '=', 'False', ',', 'stream_stderr', '=', 'False', ')', 'sent_passwd', '=', '0', 'send_password', '=', 'True', 'ret_stdout', '=', "''", 'ret_stderr', '=', "''", 'old_stdout', '=', "''", 'try', ':', 'while', 'term', '.', 'has_unread_data', ':', 'stdout', ',', 'stderr', '=', 'term', '.', 'recv', '(', ')', 'if', 'stdout', ':', 'ret_stdout', '+=', 'stdout', 'buff', '=', 'old_stdout', '+', 'stdout', 'else', ':', 'buff', '=', 'stdout', 'if', 'stderr', ':', 'ret_stderr', '+=', 'stderr', 'if', 'buff', 'and', 'RSTR_RE', '.', 'search', '(', 'buff', ')', ':', "# We're getting results back, don't try to send passwords", 'send_password', '=', 'False', 'if', 'buff', 'and', 'SSH_PRIVATE_KEY_PASSWORD_PROMPT_RE', '.', 'search', '(', 'buff', ')', ':', 'if', 'not', 'self', '.', 'priv_passwd', ':', 'return', "''", ',', "'Private key file need passphrase'", ',', '254', 'term', '.', 'sendline', '(', 'self', '.', 'priv_passwd', ')', 'continue', 'if', 'buff', 'and', 'SSH_PASSWORD_PROMPT_RE', '.', 'search', '(', 'buff', ')', 'and', 'send_password', ':', 'if', 'not', 'self', '.', 'passwd', ':', 'return', "''", ',', "'Permission denied, no authentication information'", ',', '254', 'if', 'sent_passwd', '<', 'passwd_retries', ':', 'term', '.', 'sendline', '(', 'self', '.', 'passwd', ')', 'sent_passwd', '+=', '1', 'continue', 'else', ':', "# asking for a password, and we can't seem to send it", 'return', "''", ',', "'Password authentication failed'", ',', '254', 'elif', 'buff', 'and', 'KEY_VALID_RE', '.', 'search', '(', 'buff', ')', ':', 'if', 'key_accept', ':', 'term', '.', 'sendline', '(', "'yes'", ')', 'continue', 'else', ':', 'term', '.', 'sendline', '(', "'no'", ')', 'ret_stdout', '=', '(', "'The host key needs to be accepted, to '", "'auto accept run salt-ssh with the -i '", "'flag:\\n{0}'", ')', '.', 'format', '(', 'stdout', ')', 'return', 'ret_stdout', ',', "''", ',', '254', 'elif', 'buff', 'and', 'buff', '.', 'endswith', '(', "'_||ext_mods||_'", ')', ':', 'mods_raw', '=', 'salt', '.', 'utils', '.', 'json', '.', 'dumps', '(', 'self', '.', 'mods', ',', 'separators', '=', '(', "','", ',', "':'", ')', ')', '+', "'|_E|0|'", 'term', '.', 'sendline', '(', 'mods_raw', ')', 'if', 'stdout', ':', 'old_stdout', '=', 'stdout', 'time', '.', 'sleep', '(', '0.01', ')', 'return', 'ret_stdout', ',', 'ret_stderr', ',', 'term', '.', 'exitstatus', 'finally', ':', 'term', '.', 'close', '(', 'terminate', '=', 'True', ',', 'kill', '=', 'True', ')']
Execute a shell command via VT. This is blocking and assumes that ssh is being run
['Execute', 'a', 'shell', 'command', 'via', 'VT', '.', 'This', 'is', 'blocking', 'and', 'assumes', 'that', 'ssh', 'is', 'being', 'run']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/shell.py#L372-L441
8,125
SmartTeleMax/iktomi
iktomi/templates/__init__.py
BoundTemplate.render
def render(self, template_name, __data=None, **kw): '''Given a template name and template data. Renders a template and returns as string''' return self.template.render(template_name, **self._vars(__data, **kw))
python
def render(self, template_name, __data=None, **kw): '''Given a template name and template data. Renders a template and returns as string''' return self.template.render(template_name, **self._vars(__data, **kw))
['def', 'render', '(', 'self', ',', 'template_name', ',', '__data', '=', 'None', ',', '*', '*', 'kw', ')', ':', 'return', 'self', '.', 'template', '.', 'render', '(', 'template_name', ',', '*', '*', 'self', '.', '_vars', '(', '__data', ',', '*', '*', 'kw', ')', ')']
Given a template name and template data. Renders a template and returns as string
['Given', 'a', 'template', 'name', 'and', 'template', 'data', '.', 'Renders', 'a', 'template', 'and', 'returns', 'as', 'string']
train
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/templates/__init__.py#L100-L104
8,126
bitesofcode/projexui
projexui/widgets/xscintillaedit/xscintillaedit.py
XScintillaEdit.removeComments
def removeComments( self, comment = None ): """ Inserts comments into the editor based on the current selection.\ If no comment string is supplied, then the comment from the language \ will be used. :param comment | <str> || None :return <bool> | success """ if ( not comment ): lang = self.language() if ( lang ): comment = lang.lineComment() if ( not comment ): return False startline, startcol, endline, endcol = self.getSelection() len_comment = len(comment) line, col = self.getCursorPosition() for lineno in range(startline, endline+1 ): self.setSelection(lineno, 0, lineno, len_comment) if ( self.selectedText() == comment ): self.removeSelectedText() self.setSelection(startline, startcol, endline, endcol) self.setCursorPosition(line, col) return True
python
def removeComments( self, comment = None ): """ Inserts comments into the editor based on the current selection.\ If no comment string is supplied, then the comment from the language \ will be used. :param comment | <str> || None :return <bool> | success """ if ( not comment ): lang = self.language() if ( lang ): comment = lang.lineComment() if ( not comment ): return False startline, startcol, endline, endcol = self.getSelection() len_comment = len(comment) line, col = self.getCursorPosition() for lineno in range(startline, endline+1 ): self.setSelection(lineno, 0, lineno, len_comment) if ( self.selectedText() == comment ): self.removeSelectedText() self.setSelection(startline, startcol, endline, endcol) self.setCursorPosition(line, col) return True
['def', 'removeComments', '(', 'self', ',', 'comment', '=', 'None', ')', ':', 'if', '(', 'not', 'comment', ')', ':', 'lang', '=', 'self', '.', 'language', '(', ')', 'if', '(', 'lang', ')', ':', 'comment', '=', 'lang', '.', 'lineComment', '(', ')', 'if', '(', 'not', 'comment', ')', ':', 'return', 'False', 'startline', ',', 'startcol', ',', 'endline', ',', 'endcol', '=', 'self', '.', 'getSelection', '(', ')', 'len_comment', '=', 'len', '(', 'comment', ')', 'line', ',', 'col', '=', 'self', '.', 'getCursorPosition', '(', ')', 'for', 'lineno', 'in', 'range', '(', 'startline', ',', 'endline', '+', '1', ')', ':', 'self', '.', 'setSelection', '(', 'lineno', ',', '0', ',', 'lineno', ',', 'len_comment', ')', 'if', '(', 'self', '.', 'selectedText', '(', ')', '==', 'comment', ')', ':', 'self', '.', 'removeSelectedText', '(', ')', 'self', '.', 'setSelection', '(', 'startline', ',', 'startcol', ',', 'endline', ',', 'endcol', ')', 'self', '.', 'setCursorPosition', '(', 'line', ',', 'col', ')', 'return', 'True']
Inserts comments into the editor based on the current selection.\ If no comment string is supplied, then the comment from the language \ will be used. :param comment | <str> || None :return <bool> | success
['Inserts', 'comments', 'into', 'the', 'editor', 'based', 'on', 'the', 'current', 'selection', '.', '\\', 'If', 'no', 'comment', 'string', 'is', 'supplied', 'then', 'the', 'comment', 'from', 'the', 'language', '\\', 'will', 'be', 'used', '.', ':', 'param', 'comment', '|', '<str', '>', '||', 'None', ':', 'return', '<bool', '>', '|', 'success']
train
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xscintillaedit/xscintillaedit.py#L448-L478
8,127
globocom/GloboNetworkAPI-client-python
networkapiclient/Ip.py
Ip.find_ip6_by_network
def find_ip6_by_network(self, id_network): """List IPv6 from network. :param id_network: Network ipv6 identifier. Integer value and greater than zero. :return: Dictionary with the following structure: :: {'ip': {'id': < id >, 'id_vlan': < id_vlan >, 'block1': <block1>, 'block2': <block2>, 'block3': <block3>, 'block4': <block4>, 'block5': <block5>, 'block6': <block6>, 'block7': <block7>, 'block8': <block8>, 'descricao': < description > 'equipamento': [ { all name equipamentos related } ], }} :raise IpNaoExisteError: Network does not have any ips. :raise InvalidParameterError: Network identifier is none or invalid. :raise DataBaseError: Networkapi failed to access the database. """ if not is_valid_int_param(id_network): raise InvalidParameterError( u'Network identifier is invalid or was not informed.') url = 'ip/id_network_ipv6/' + str(id_network) + "/" code, xml = self.submit(None, 'GET', url) key = "ips" return get_list_map(self.response(code, xml, [key]), key)
python
def find_ip6_by_network(self, id_network): """List IPv6 from network. :param id_network: Network ipv6 identifier. Integer value and greater than zero. :return: Dictionary with the following structure: :: {'ip': {'id': < id >, 'id_vlan': < id_vlan >, 'block1': <block1>, 'block2': <block2>, 'block3': <block3>, 'block4': <block4>, 'block5': <block5>, 'block6': <block6>, 'block7': <block7>, 'block8': <block8>, 'descricao': < description > 'equipamento': [ { all name equipamentos related } ], }} :raise IpNaoExisteError: Network does not have any ips. :raise InvalidParameterError: Network identifier is none or invalid. :raise DataBaseError: Networkapi failed to access the database. """ if not is_valid_int_param(id_network): raise InvalidParameterError( u'Network identifier is invalid or was not informed.') url = 'ip/id_network_ipv6/' + str(id_network) + "/" code, xml = self.submit(None, 'GET', url) key = "ips" return get_list_map(self.response(code, xml, [key]), key)
['def', 'find_ip6_by_network', '(', 'self', ',', 'id_network', ')', ':', 'if', 'not', 'is_valid_int_param', '(', 'id_network', ')', ':', 'raise', 'InvalidParameterError', '(', "u'Network identifier is invalid or was not informed.'", ')', 'url', '=', "'ip/id_network_ipv6/'", '+', 'str', '(', 'id_network', ')', '+', '"/"', 'code', ',', 'xml', '=', 'self', '.', 'submit', '(', 'None', ',', "'GET'", ',', 'url', ')', 'key', '=', '"ips"', 'return', 'get_list_map', '(', 'self', '.', 'response', '(', 'code', ',', 'xml', ',', '[', 'key', ']', ')', ',', 'key', ')']
List IPv6 from network. :param id_network: Network ipv6 identifier. Integer value and greater than zero. :return: Dictionary with the following structure: :: {'ip': {'id': < id >, 'id_vlan': < id_vlan >, 'block1': <block1>, 'block2': <block2>, 'block3': <block3>, 'block4': <block4>, 'block5': <block5>, 'block6': <block6>, 'block7': <block7>, 'block8': <block8>, 'descricao': < description > 'equipamento': [ { all name equipamentos related } ], }} :raise IpNaoExisteError: Network does not have any ips. :raise InvalidParameterError: Network identifier is none or invalid. :raise DataBaseError: Networkapi failed to access the database.
['List', 'IPv6', 'from', 'network', '.']
train
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Ip.py#L743-L779
8,128
explosion/thinc
thinc/api.py
split_backward
def split_backward(layers): # pragma: no cover """Separate a sequence of layers' `begin_update` methods into two lists of functions: one that computes the forward values, and the other that completes the backward pass. The backward sequence is only populated after the forward functions have been applied. """ backward = [] forward = [sink_return(op.begin_update, backward.append) for op in layers] return forward, backward
python
def split_backward(layers): # pragma: no cover """Separate a sequence of layers' `begin_update` methods into two lists of functions: one that computes the forward values, and the other that completes the backward pass. The backward sequence is only populated after the forward functions have been applied. """ backward = [] forward = [sink_return(op.begin_update, backward.append) for op in layers] return forward, backward
['def', 'split_backward', '(', 'layers', ')', ':', '# pragma: no cover', 'backward', '=', '[', ']', 'forward', '=', '[', 'sink_return', '(', 'op', '.', 'begin_update', ',', 'backward', '.', 'append', ')', 'for', 'op', 'in', 'layers', ']', 'return', 'forward', ',', 'backward']
Separate a sequence of layers' `begin_update` methods into two lists of functions: one that computes the forward values, and the other that completes the backward pass. The backward sequence is only populated after the forward functions have been applied.
['Separate', 'a', 'sequence', 'of', 'layers', 'begin_update', 'methods', 'into', 'two', 'lists', 'of', 'functions', ':', 'one', 'that', 'computes', 'the', 'forward', 'values', 'and', 'the', 'other', 'that', 'completes', 'the', 'backward', 'pass', '.', 'The', 'backward', 'sequence', 'is', 'only', 'populated', 'after', 'the', 'forward', 'functions', 'have', 'been', 'applied', '.']
train
https://github.com/explosion/thinc/blob/90129be5f0d6c665344245a7c37dbe1b8afceea2/thinc/api.py#L208-L216
8,129
RiotGames/cloud-inquisitor
backend/cloud_inquisitor/plugins/types/resources.py
EC2Instance.update
def update(self, data): """Updates the object information based on live data, if there were any changes made. Any changes will be automatically applied to the object, but will not be automatically persisted. You must manually call `db.session.add(instance)` on the object. Args: data (:obj:): AWS API Resource object fetched from AWS API Returns: True if there were any changes to the object, else false """ # If the instance was terminated, remove it if data.state['Name'] == 'terminated': self.delete(auto_commit=False) return True updated = self.set_property('launch_date', to_utc_date(data.launch_time).isoformat()) updated |= self.set_property('state', data.state['Name']) updated |= self.set_property('instance_type', data.instance_type) updated |= self.set_property('public_ip', data.public_ip_address or None) updated |= self.set_property('public_dns', data.public_dns_name or None) tags = {x['Key']: x['Value'] for x in data.tags or {}} existing_tags = {x.key: x for x in self.tags} # Check for new tags for key, value in list(tags.items()): updated |= self.set_tag(key, value) # Check for updated or removed tags for key in list(existing_tags.keys()): if key not in tags: updated |= self.delete_tag(key) return updated
python
def update(self, data): """Updates the object information based on live data, if there were any changes made. Any changes will be automatically applied to the object, but will not be automatically persisted. You must manually call `db.session.add(instance)` on the object. Args: data (:obj:): AWS API Resource object fetched from AWS API Returns: True if there were any changes to the object, else false """ # If the instance was terminated, remove it if data.state['Name'] == 'terminated': self.delete(auto_commit=False) return True updated = self.set_property('launch_date', to_utc_date(data.launch_time).isoformat()) updated |= self.set_property('state', data.state['Name']) updated |= self.set_property('instance_type', data.instance_type) updated |= self.set_property('public_ip', data.public_ip_address or None) updated |= self.set_property('public_dns', data.public_dns_name or None) tags = {x['Key']: x['Value'] for x in data.tags or {}} existing_tags = {x.key: x for x in self.tags} # Check for new tags for key, value in list(tags.items()): updated |= self.set_tag(key, value) # Check for updated or removed tags for key in list(existing_tags.keys()): if key not in tags: updated |= self.delete_tag(key) return updated
['def', 'update', '(', 'self', ',', 'data', ')', ':', '# If the instance was terminated, remove it', 'if', 'data', '.', 'state', '[', "'Name'", ']', '==', "'terminated'", ':', 'self', '.', 'delete', '(', 'auto_commit', '=', 'False', ')', 'return', 'True', 'updated', '=', 'self', '.', 'set_property', '(', "'launch_date'", ',', 'to_utc_date', '(', 'data', '.', 'launch_time', ')', '.', 'isoformat', '(', ')', ')', 'updated', '|=', 'self', '.', 'set_property', '(', "'state'", ',', 'data', '.', 'state', '[', "'Name'", ']', ')', 'updated', '|=', 'self', '.', 'set_property', '(', "'instance_type'", ',', 'data', '.', 'instance_type', ')', 'updated', '|=', 'self', '.', 'set_property', '(', "'public_ip'", ',', 'data', '.', 'public_ip_address', 'or', 'None', ')', 'updated', '|=', 'self', '.', 'set_property', '(', "'public_dns'", ',', 'data', '.', 'public_dns_name', 'or', 'None', ')', 'tags', '=', '{', 'x', '[', "'Key'", ']', ':', 'x', '[', "'Value'", ']', 'for', 'x', 'in', 'data', '.', 'tags', 'or', '{', '}', '}', 'existing_tags', '=', '{', 'x', '.', 'key', ':', 'x', 'for', 'x', 'in', 'self', '.', 'tags', '}', '# Check for new tags', 'for', 'key', ',', 'value', 'in', 'list', '(', 'tags', '.', 'items', '(', ')', ')', ':', 'updated', '|=', 'self', '.', 'set_tag', '(', 'key', ',', 'value', ')', '# Check for updated or removed tags', 'for', 'key', 'in', 'list', '(', 'existing_tags', '.', 'keys', '(', ')', ')', ':', 'if', 'key', 'not', 'in', 'tags', ':', 'updated', '|=', 'self', '.', 'delete_tag', '(', 'key', ')', 'return', 'updated']
Updates the object information based on live data, if there were any changes made. Any changes will be automatically applied to the object, but will not be automatically persisted. You must manually call `db.session.add(instance)` on the object. Args: data (:obj:): AWS API Resource object fetched from AWS API Returns: True if there were any changes to the object, else false
['Updates', 'the', 'object', 'information', 'based', 'on', 'live', 'data', 'if', 'there', 'were', 'any', 'changes', 'made', '.', 'Any', 'changes', 'will', 'be', 'automatically', 'applied', 'to', 'the', 'object', 'but', 'will', 'not', 'be', 'automatically', 'persisted', '.', 'You', 'must', 'manually', 'call', 'db', '.', 'session', '.', 'add', '(', 'instance', ')', 'on', 'the', 'object', '.']
train
https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/backend/cloud_inquisitor/plugins/types/resources.py#L599-L633
8,130
ampl/amplpy
amplpy/dataframe.py
DataFrame.toDict
def toDict(self): """ Return a dictionary with the DataFrame data. """ d = {} nindices = self.getNumIndices() for i in range(self.getNumRows()): row = list(self.getRowByIndex(i)) if nindices > 1: key = tuple(row[:nindices]) elif nindices == 1: key = row[0] else: key = None if len(row) - nindices == 0: d[key] = None elif len(row) - nindices == 1: d[key] = row[nindices] else: d[key] = tuple(row[nindices:]) return d
python
def toDict(self): """ Return a dictionary with the DataFrame data. """ d = {} nindices = self.getNumIndices() for i in range(self.getNumRows()): row = list(self.getRowByIndex(i)) if nindices > 1: key = tuple(row[:nindices]) elif nindices == 1: key = row[0] else: key = None if len(row) - nindices == 0: d[key] = None elif len(row) - nindices == 1: d[key] = row[nindices] else: d[key] = tuple(row[nindices:]) return d
['def', 'toDict', '(', 'self', ')', ':', 'd', '=', '{', '}', 'nindices', '=', 'self', '.', 'getNumIndices', '(', ')', 'for', 'i', 'in', 'range', '(', 'self', '.', 'getNumRows', '(', ')', ')', ':', 'row', '=', 'list', '(', 'self', '.', 'getRowByIndex', '(', 'i', ')', ')', 'if', 'nindices', '>', '1', ':', 'key', '=', 'tuple', '(', 'row', '[', ':', 'nindices', ']', ')', 'elif', 'nindices', '==', '1', ':', 'key', '=', 'row', '[', '0', ']', 'else', ':', 'key', '=', 'None', 'if', 'len', '(', 'row', ')', '-', 'nindices', '==', '0', ':', 'd', '[', 'key', ']', '=', 'None', 'elif', 'len', '(', 'row', ')', '-', 'nindices', '==', '1', ':', 'd', '[', 'key', ']', '=', 'row', '[', 'nindices', ']', 'else', ':', 'd', '[', 'key', ']', '=', 'tuple', '(', 'row', '[', 'nindices', ':', ']', ')', 'return', 'd']
Return a dictionary with the DataFrame data.
['Return', 'a', 'dictionary', 'with', 'the', 'DataFrame', 'data', '.']
train
https://github.com/ampl/amplpy/blob/39df6954049a11a8f666aed26853259b4687099a/amplpy/dataframe.py#L276-L296
8,131
fermiPy/fermipy
fermipy/spectrum.py
SpectralFunction.ednde_deriv
def ednde_deriv(self, x, params=None): """Evaluate derivative of E times differential flux with respect to E.""" params = self.params if params is None else params return np.squeeze(self.eval_ednde_deriv(x, params, self.scale, self.extra_params))
python
def ednde_deriv(self, x, params=None): """Evaluate derivative of E times differential flux with respect to E.""" params = self.params if params is None else params return np.squeeze(self.eval_ednde_deriv(x, params, self.scale, self.extra_params))
['def', 'ednde_deriv', '(', 'self', ',', 'x', ',', 'params', '=', 'None', ')', ':', 'params', '=', 'self', '.', 'params', 'if', 'params', 'is', 'None', 'else', 'params', 'return', 'np', '.', 'squeeze', '(', 'self', '.', 'eval_ednde_deriv', '(', 'x', ',', 'params', ',', 'self', '.', 'scale', ',', 'self', '.', 'extra_params', ')', ')']
Evaluate derivative of E times differential flux with respect to E.
['Evaluate', 'derivative', 'of', 'E', 'times', 'differential', 'flux', 'with', 'respect', 'to', 'E', '.']
train
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/spectrum.py#L294-L299
8,132
openstack/horizon
openstack_dashboard/api/neutron.py
FloatingIpManager.allocate
def allocate(self, pool, tenant_id=None, **params): """Allocates a floating IP to the tenant. You must provide a pool name or id for which you would like to allocate a floating IP. :returns: FloatingIp object corresponding to an allocated floating IP """ if not tenant_id: tenant_id = self.request.user.project_id create_dict = {'floating_network_id': pool, 'tenant_id': tenant_id} if 'subnet_id' in params: create_dict['subnet_id'] = params['subnet_id'] if 'floating_ip_address' in params: create_dict['floating_ip_address'] = params['floating_ip_address'] if 'description' in params: create_dict['description'] = params['description'] if 'dns_domain' in params: create_dict['dns_domain'] = params['dns_domain'] if 'dns_name' in params: create_dict['dns_name'] = params['dns_name'] fip = self.client.create_floatingip( {'floatingip': create_dict}).get('floatingip') self._set_instance_info(fip) return FloatingIp(fip)
python
def allocate(self, pool, tenant_id=None, **params): """Allocates a floating IP to the tenant. You must provide a pool name or id for which you would like to allocate a floating IP. :returns: FloatingIp object corresponding to an allocated floating IP """ if not tenant_id: tenant_id = self.request.user.project_id create_dict = {'floating_network_id': pool, 'tenant_id': tenant_id} if 'subnet_id' in params: create_dict['subnet_id'] = params['subnet_id'] if 'floating_ip_address' in params: create_dict['floating_ip_address'] = params['floating_ip_address'] if 'description' in params: create_dict['description'] = params['description'] if 'dns_domain' in params: create_dict['dns_domain'] = params['dns_domain'] if 'dns_name' in params: create_dict['dns_name'] = params['dns_name'] fip = self.client.create_floatingip( {'floatingip': create_dict}).get('floatingip') self._set_instance_info(fip) return FloatingIp(fip)
['def', 'allocate', '(', 'self', ',', 'pool', ',', 'tenant_id', '=', 'None', ',', '*', '*', 'params', ')', ':', 'if', 'not', 'tenant_id', ':', 'tenant_id', '=', 'self', '.', 'request', '.', 'user', '.', 'project_id', 'create_dict', '=', '{', "'floating_network_id'", ':', 'pool', ',', "'tenant_id'", ':', 'tenant_id', '}', 'if', "'subnet_id'", 'in', 'params', ':', 'create_dict', '[', "'subnet_id'", ']', '=', 'params', '[', "'subnet_id'", ']', 'if', "'floating_ip_address'", 'in', 'params', ':', 'create_dict', '[', "'floating_ip_address'", ']', '=', 'params', '[', "'floating_ip_address'", ']', 'if', "'description'", 'in', 'params', ':', 'create_dict', '[', "'description'", ']', '=', 'params', '[', "'description'", ']', 'if', "'dns_domain'", 'in', 'params', ':', 'create_dict', '[', "'dns_domain'", ']', '=', 'params', '[', "'dns_domain'", ']', 'if', "'dns_name'", 'in', 'params', ':', 'create_dict', '[', "'dns_name'", ']', '=', 'params', '[', "'dns_name'", ']', 'fip', '=', 'self', '.', 'client', '.', 'create_floatingip', '(', '{', "'floatingip'", ':', 'create_dict', '}', ')', '.', 'get', '(', "'floatingip'", ')', 'self', '.', '_set_instance_info', '(', 'fip', ')', 'return', 'FloatingIp', '(', 'fip', ')']
Allocates a floating IP to the tenant. You must provide a pool name or id for which you would like to allocate a floating IP. :returns: FloatingIp object corresponding to an allocated floating IP
['Allocates', 'a', 'floating', 'IP', 'to', 'the', 'tenant', '.']
train
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/neutron.py#L615-L640
8,133
horazont/aioopenssl
aioopenssl/__init__.py
STARTTLSTransport.starttls
def starttls(self, ssl_context=None, post_handshake_callback=None): """ Start a TLS stream on top of the socket. This is an invalid operation if the stream is not in RAW_OPEN state. If `ssl_context` is set, it overrides the `ssl_context` passed to the constructor. If `post_handshake_callback` is set, it overrides the `post_handshake_callback` passed to the constructor. .. versionchanged:: 0.4 This method is now a barrier with respect to reads and writes: before the handshake is completed (including the post handshake callback, if any), no data is received or sent. """ if self._state != _State.RAW_OPEN or self._closing: raise self._invalid_state("starttls() called") if ssl_context is not None: self._ssl_context = ssl_context self._extra.update( sslcontext=ssl_context ) else: self._ssl_context = self._ssl_context_factory(self) if post_handshake_callback is not None: self._tls_post_handshake_callback = post_handshake_callback self._waiter = asyncio.Future() self._waiter.add_done_callback(self._waiter_done) self._initiate_tls() try: yield from self._waiter finally: self._waiter = None
python
def starttls(self, ssl_context=None, post_handshake_callback=None): """ Start a TLS stream on top of the socket. This is an invalid operation if the stream is not in RAW_OPEN state. If `ssl_context` is set, it overrides the `ssl_context` passed to the constructor. If `post_handshake_callback` is set, it overrides the `post_handshake_callback` passed to the constructor. .. versionchanged:: 0.4 This method is now a barrier with respect to reads and writes: before the handshake is completed (including the post handshake callback, if any), no data is received or sent. """ if self._state != _State.RAW_OPEN or self._closing: raise self._invalid_state("starttls() called") if ssl_context is not None: self._ssl_context = ssl_context self._extra.update( sslcontext=ssl_context ) else: self._ssl_context = self._ssl_context_factory(self) if post_handshake_callback is not None: self._tls_post_handshake_callback = post_handshake_callback self._waiter = asyncio.Future() self._waiter.add_done_callback(self._waiter_done) self._initiate_tls() try: yield from self._waiter finally: self._waiter = None
['def', 'starttls', '(', 'self', ',', 'ssl_context', '=', 'None', ',', 'post_handshake_callback', '=', 'None', ')', ':', 'if', 'self', '.', '_state', '!=', '_State', '.', 'RAW_OPEN', 'or', 'self', '.', '_closing', ':', 'raise', 'self', '.', '_invalid_state', '(', '"starttls() called"', ')', 'if', 'ssl_context', 'is', 'not', 'None', ':', 'self', '.', '_ssl_context', '=', 'ssl_context', 'self', '.', '_extra', '.', 'update', '(', 'sslcontext', '=', 'ssl_context', ')', 'else', ':', 'self', '.', '_ssl_context', '=', 'self', '.', '_ssl_context_factory', '(', 'self', ')', 'if', 'post_handshake_callback', 'is', 'not', 'None', ':', 'self', '.', '_tls_post_handshake_callback', '=', 'post_handshake_callback', 'self', '.', '_waiter', '=', 'asyncio', '.', 'Future', '(', ')', 'self', '.', '_waiter', '.', 'add_done_callback', '(', 'self', '.', '_waiter_done', ')', 'self', '.', '_initiate_tls', '(', ')', 'try', ':', 'yield', 'from', 'self', '.', '_waiter', 'finally', ':', 'self', '.', '_waiter', '=', 'None']
Start a TLS stream on top of the socket. This is an invalid operation if the stream is not in RAW_OPEN state. If `ssl_context` is set, it overrides the `ssl_context` passed to the constructor. If `post_handshake_callback` is set, it overrides the `post_handshake_callback` passed to the constructor. .. versionchanged:: 0.4 This method is now a barrier with respect to reads and writes: before the handshake is completed (including the post handshake callback, if any), no data is received or sent.
['Start', 'a', 'TLS', 'stream', 'on', 'top', 'of', 'the', 'socket', '.', 'This', 'is', 'an', 'invalid', 'operation', 'if', 'the', 'stream', 'is', 'not', 'in', 'RAW_OPEN', 'state', '.']
train
https://github.com/horazont/aioopenssl/blob/95cb39b5904d6a9702afcef6704181c850371081/aioopenssl/__init__.py#L649-L685
8,134
inasafe/inasafe
safe/common/parameters/default_select_parameter_widget.py
DefaultSelectParameterWidget.enable_radio_button
def enable_radio_button(self): """Enable radio button and custom value input area then set selected radio button to 'Do not report'. """ for button in self.default_input_button_group.buttons(): button.setEnabled(True) self.set_selected_radio_button() self.custom_value.setEnabled(True)
python
def enable_radio_button(self): """Enable radio button and custom value input area then set selected radio button to 'Do not report'. """ for button in self.default_input_button_group.buttons(): button.setEnabled(True) self.set_selected_radio_button() self.custom_value.setEnabled(True)
['def', 'enable_radio_button', '(', 'self', ')', ':', 'for', 'button', 'in', 'self', '.', 'default_input_button_group', '.', 'buttons', '(', ')', ':', 'button', '.', 'setEnabled', '(', 'True', ')', 'self', '.', 'set_selected_radio_button', '(', ')', 'self', '.', 'custom_value', '.', 'setEnabled', '(', 'True', ')']
Enable radio button and custom value input area then set selected radio button to 'Do not report'.
['Enable', 'radio', 'button', 'and', 'custom', 'value', 'input', 'area', 'then', 'set', 'selected', 'radio', 'button', 'to', 'Do', 'not', 'report', '.']
train
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/common/parameters/default_select_parameter_widget.py#L207-L214
8,135
StackStorm/pybind
pybind/slxos/v17r_1_01a/routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/neighbor/af_ipv4_vrf_neighbor_address_holder/af_ipv4_neighbor_addr/__init__.py
af_ipv4_neighbor_addr._set_advertisement_interval
def _set_advertisement_interval(self, v, load=False): """ Setter method for advertisement_interval, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/neighbor/af_ipv4_vrf_neighbor_address_holder/af_ipv4_neighbor_addr/advertisement_interval (container) If this variable is read-only (config: false) in the source YANG file, then _set_advertisement_interval is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_advertisement_interval() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=advertisement_interval.advertisement_interval, is_container='container', presence=False, yang_name="advertisement-interval", rest_name="advertisement-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Minimum interval between sending BGP routing updates', u'cli-compact-syntax': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """advertisement_interval must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=advertisement_interval.advertisement_interval, is_container='container', presence=False, yang_name="advertisement-interval", rest_name="advertisement-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Minimum interval between sending BGP routing updates', u'cli-compact-syntax': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""", }) self.__advertisement_interval = t if hasattr(self, '_set'): self._set()
python
def _set_advertisement_interval(self, v, load=False): """ Setter method for advertisement_interval, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/neighbor/af_ipv4_vrf_neighbor_address_holder/af_ipv4_neighbor_addr/advertisement_interval (container) If this variable is read-only (config: false) in the source YANG file, then _set_advertisement_interval is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_advertisement_interval() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=advertisement_interval.advertisement_interval, is_container='container', presence=False, yang_name="advertisement-interval", rest_name="advertisement-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Minimum interval between sending BGP routing updates', u'cli-compact-syntax': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """advertisement_interval must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=advertisement_interval.advertisement_interval, is_container='container', presence=False, yang_name="advertisement-interval", rest_name="advertisement-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Minimum interval between sending BGP routing updates', u'cli-compact-syntax': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""", }) self.__advertisement_interval = t if hasattr(self, '_set'): self._set()
['def', '_set_advertisement_interval', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'advertisement_interval', '.', 'advertisement_interval', ',', 'is_container', '=', "'container'", ',', 'presence', '=', 'False', ',', 'yang_name', '=', '"advertisement-interval"', ',', 'rest_name', '=', '"advertisement-interval"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Minimum interval between sending BGP routing updates'", ',', "u'cli-compact-syntax'", ':', 'None', ',', "u'cli-incomplete-command'", ':', 'None', '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-bgp'", ',', 'defining_module', '=', "'brocade-bgp'", ',', 'yang_type', '=', "'container'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""advertisement_interval must be of a type compatible with container"""', ',', "'defined-type'", ':', '"container"', ',', "'generated-type'", ':', '"""YANGDynClass(base=advertisement_interval.advertisement_interval, is_container=\'container\', presence=False, yang_name="advertisement-interval", rest_name="advertisement-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Minimum interval between sending BGP routing updates\', u\'cli-compact-syntax\': None, u\'cli-incomplete-command\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-bgp\', defining_module=\'brocade-bgp\', yang_type=\'container\', is_config=True)"""', ',', '}', ')', 'self', '.', '__advertisement_interval', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')']
Setter method for advertisement_interval, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/neighbor/af_ipv4_vrf_neighbor_address_holder/af_ipv4_neighbor_addr/advertisement_interval (container) If this variable is read-only (config: false) in the source YANG file, then _set_advertisement_interval is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_advertisement_interval() directly.
['Setter', 'method', 'for', 'advertisement_interval', 'mapped', 'from', 'YANG', 'variable', '/', 'routing_system', '/', 'router', '/', 'router_bgp', '/', 'address_family', '/', 'ipv4', '/', 'ipv4_unicast', '/', 'af_vrf', '/', 'neighbor', '/', 'af_ipv4_vrf_neighbor_address_holder', '/', 'af_ipv4_neighbor_addr', '/', 'advertisement_interval', '(', 'container', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_advertisement_interval', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_advertisement_interval', '()', 'directly', '.']
train
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/neighbor/af_ipv4_vrf_neighbor_address_holder/af_ipv4_neighbor_addr/__init__.py#L780-L801
8,136
senaite/senaite.jsonapi
src/senaite/jsonapi/v1/routes/registry.py
get
def get(context, request, key=None): """Return all registry items if key is None, otherwise try to fetch the registry key """ registry_records = api.get_registry_records_by_keyword(key) # Prepare batch size = req.get_batch_size() start = req.get_batch_start() batch = api.make_batch(registry_records, size, start) return { "pagesize": batch.get_pagesize(), "next": batch.make_next_url(), "previous": batch.make_prev_url(), "page": batch.get_pagenumber(), "pages": batch.get_numpages(), "count": batch.get_sequence_length(), "items": [registry_records], "url": api.url_for("senaite.jsonapi.v1.registry", key=key), }
python
def get(context, request, key=None): """Return all registry items if key is None, otherwise try to fetch the registry key """ registry_records = api.get_registry_records_by_keyword(key) # Prepare batch size = req.get_batch_size() start = req.get_batch_start() batch = api.make_batch(registry_records, size, start) return { "pagesize": batch.get_pagesize(), "next": batch.make_next_url(), "previous": batch.make_prev_url(), "page": batch.get_pagenumber(), "pages": batch.get_numpages(), "count": batch.get_sequence_length(), "items": [registry_records], "url": api.url_for("senaite.jsonapi.v1.registry", key=key), }
['def', 'get', '(', 'context', ',', 'request', ',', 'key', '=', 'None', ')', ':', 'registry_records', '=', 'api', '.', 'get_registry_records_by_keyword', '(', 'key', ')', '# Prepare batch', 'size', '=', 'req', '.', 'get_batch_size', '(', ')', 'start', '=', 'req', '.', 'get_batch_start', '(', ')', 'batch', '=', 'api', '.', 'make_batch', '(', 'registry_records', ',', 'size', ',', 'start', ')', 'return', '{', '"pagesize"', ':', 'batch', '.', 'get_pagesize', '(', ')', ',', '"next"', ':', 'batch', '.', 'make_next_url', '(', ')', ',', '"previous"', ':', 'batch', '.', 'make_prev_url', '(', ')', ',', '"page"', ':', 'batch', '.', 'get_pagenumber', '(', ')', ',', '"pages"', ':', 'batch', '.', 'get_numpages', '(', ')', ',', '"count"', ':', 'batch', '.', 'get_sequence_length', '(', ')', ',', '"items"', ':', '[', 'registry_records', ']', ',', '"url"', ':', 'api', '.', 'url_for', '(', '"senaite.jsonapi.v1.registry"', ',', 'key', '=', 'key', ')', ',', '}']
Return all registry items if key is None, otherwise try to fetch the registry key
['Return', 'all', 'registry', 'items', 'if', 'key', 'is', 'None', 'otherwise', 'try', 'to', 'fetch', 'the', 'registry', 'key']
train
https://github.com/senaite/senaite.jsonapi/blob/871959f4b1c9edbb477e9456325527ca78e13ec6/src/senaite/jsonapi/v1/routes/registry.py#L10-L29
8,137
honzajavorek/redis-collections
redis_collections/lists.py
List.extend
def extend(self, other): """ Adds the values from the iterable *other* to the end of this collection. """ def extend_trans(pipe): values = list(other.__iter__(pipe)) if use_redis else other len_self = pipe.rpush(self.key, *(self._pickle(v) for v in values)) if self.writeback: for i, v in enumerate(values, len_self - len(values)): self.cache[i] = v if self._same_redis(other, RedisCollection): use_redis = True self._transaction(extend_trans, other.key) else: use_redis = False self._transaction(extend_trans)
python
def extend(self, other): """ Adds the values from the iterable *other* to the end of this collection. """ def extend_trans(pipe): values = list(other.__iter__(pipe)) if use_redis else other len_self = pipe.rpush(self.key, *(self._pickle(v) for v in values)) if self.writeback: for i, v in enumerate(values, len_self - len(values)): self.cache[i] = v if self._same_redis(other, RedisCollection): use_redis = True self._transaction(extend_trans, other.key) else: use_redis = False self._transaction(extend_trans)
['def', 'extend', '(', 'self', ',', 'other', ')', ':', 'def', 'extend_trans', '(', 'pipe', ')', ':', 'values', '=', 'list', '(', 'other', '.', '__iter__', '(', 'pipe', ')', ')', 'if', 'use_redis', 'else', 'other', 'len_self', '=', 'pipe', '.', 'rpush', '(', 'self', '.', 'key', ',', '*', '(', 'self', '.', '_pickle', '(', 'v', ')', 'for', 'v', 'in', 'values', ')', ')', 'if', 'self', '.', 'writeback', ':', 'for', 'i', ',', 'v', 'in', 'enumerate', '(', 'values', ',', 'len_self', '-', 'len', '(', 'values', ')', ')', ':', 'self', '.', 'cache', '[', 'i', ']', '=', 'v', 'if', 'self', '.', '_same_redis', '(', 'other', ',', 'RedisCollection', ')', ':', 'use_redis', '=', 'True', 'self', '.', '_transaction', '(', 'extend_trans', ',', 'other', '.', 'key', ')', 'else', ':', 'use_redis', '=', 'False', 'self', '.', '_transaction', '(', 'extend_trans', ')']
Adds the values from the iterable *other* to the end of this collection.
['Adds', 'the', 'values', 'from', 'the', 'iterable', '*', 'other', '*', 'to', 'the', 'end', 'of', 'this', 'collection', '.']
train
https://github.com/honzajavorek/redis-collections/blob/07ca8efe88fb128f7dc7319dfa6a26cd39b3776b/redis_collections/lists.py#L374-L391
8,138
gccxml/pygccxml
pygccxml/parser/directory_cache.py
filename_repository_t.update_id_counter
def update_id_counter(self): """Update the `id_` counter so that it doesn't grow forever. """ if not self.__entries: self.__next_id = 1 else: self.__next_id = max(self.__entries.keys()) + 1
python
def update_id_counter(self): """Update the `id_` counter so that it doesn't grow forever. """ if not self.__entries: self.__next_id = 1 else: self.__next_id = max(self.__entries.keys()) + 1
['def', 'update_id_counter', '(', 'self', ')', ':', 'if', 'not', 'self', '.', '__entries', ':', 'self', '.', '__next_id', '=', '1', 'else', ':', 'self', '.', '__next_id', '=', 'max', '(', 'self', '.', '__entries', '.', 'keys', '(', ')', ')', '+', '1']
Update the `id_` counter so that it doesn't grow forever.
['Update', 'the', 'id_', 'counter', 'so', 'that', 'it', 'doesn', 't', 'grow', 'forever', '.']
train
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/directory_cache.py#L519-L526
8,139
lanius/tinyik
tinyik/optimizer.py
NewtonOptimizer.prepare
def prepare(self, f): """Accept an objective function for optimization.""" self.g = autograd.grad(f) self.h = autograd.hessian(f)
python
def prepare(self, f): """Accept an objective function for optimization.""" self.g = autograd.grad(f) self.h = autograd.hessian(f)
['def', 'prepare', '(', 'self', ',', 'f', ')', ':', 'self', '.', 'g', '=', 'autograd', '.', 'grad', '(', 'f', ')', 'self', '.', 'h', '=', 'autograd', '.', 'hessian', '(', 'f', ')']
Accept an objective function for optimization.
['Accept', 'an', 'objective', 'function', 'for', 'optimization', '.']
train
https://github.com/lanius/tinyik/blob/dffe5031ee044caf43e51746c4b0a6d45922d50e/tinyik/optimizer.py#L16-L19
8,140
konstantinstadler/pymrio
pymrio/core/mriosystem.py
IOSystem.report_accounts
def report_accounts(self, path, per_region=True, per_capita=False, pic_size=1000, format='rst', **kwargs): """ Generates a report to the given path for all extension This method calls .report_accounts for all extensions Notes ----- This looks prettier with the seaborn module (import seaborn before calling this method) Parameters ---------- path : string Root path for the report per_region : boolean, optional If true, reports the accounts per region per_capita : boolean, optional If true, reports the accounts per capita If per_capita and per_region are False, nothing will be done pic_size : int, optional size for the figures in px, 1000 by default format : string, optional file format of the report: 'rst'(default), 'html', 'latex', ... except for rst all depend on the module docutils (all writer_name from docutils can be used as format) ffname : string, optional root file name (without extension, per_capita or per_region will be attached) and folder names If None gets passed (default), self.name with be modified to get a valid name for the operation system without blanks **kwargs : key word arguments, optional This will be passed directly to the pd.DataFrame.plot method (through the self.plot_account method) """ for ext in self.get_extensions(data=True): ext.report_accounts(path=path, per_region=per_region, per_capita=per_capita, pic_size=pic_size, format=format, **kwargs)
python
def report_accounts(self, path, per_region=True, per_capita=False, pic_size=1000, format='rst', **kwargs): """ Generates a report to the given path for all extension This method calls .report_accounts for all extensions Notes ----- This looks prettier with the seaborn module (import seaborn before calling this method) Parameters ---------- path : string Root path for the report per_region : boolean, optional If true, reports the accounts per region per_capita : boolean, optional If true, reports the accounts per capita If per_capita and per_region are False, nothing will be done pic_size : int, optional size for the figures in px, 1000 by default format : string, optional file format of the report: 'rst'(default), 'html', 'latex', ... except for rst all depend on the module docutils (all writer_name from docutils can be used as format) ffname : string, optional root file name (without extension, per_capita or per_region will be attached) and folder names If None gets passed (default), self.name with be modified to get a valid name for the operation system without blanks **kwargs : key word arguments, optional This will be passed directly to the pd.DataFrame.plot method (through the self.plot_account method) """ for ext in self.get_extensions(data=True): ext.report_accounts(path=path, per_region=per_region, per_capita=per_capita, pic_size=pic_size, format=format, **kwargs)
['def', 'report_accounts', '(', 'self', ',', 'path', ',', 'per_region', '=', 'True', ',', 'per_capita', '=', 'False', ',', 'pic_size', '=', '1000', ',', 'format', '=', "'rst'", ',', '*', '*', 'kwargs', ')', ':', 'for', 'ext', 'in', 'self', '.', 'get_extensions', '(', 'data', '=', 'True', ')', ':', 'ext', '.', 'report_accounts', '(', 'path', '=', 'path', ',', 'per_region', '=', 'per_region', ',', 'per_capita', '=', 'per_capita', ',', 'pic_size', '=', 'pic_size', ',', 'format', '=', 'format', ',', '*', '*', 'kwargs', ')']
Generates a report to the given path for all extension This method calls .report_accounts for all extensions Notes ----- This looks prettier with the seaborn module (import seaborn before calling this method) Parameters ---------- path : string Root path for the report per_region : boolean, optional If true, reports the accounts per region per_capita : boolean, optional If true, reports the accounts per capita If per_capita and per_region are False, nothing will be done pic_size : int, optional size for the figures in px, 1000 by default format : string, optional file format of the report: 'rst'(default), 'html', 'latex', ... except for rst all depend on the module docutils (all writer_name from docutils can be used as format) ffname : string, optional root file name (without extension, per_capita or per_region will be attached) and folder names If None gets passed (default), self.name with be modified to get a valid name for the operation system without blanks **kwargs : key word arguments, optional This will be passed directly to the pd.DataFrame.plot method (through the self.plot_account method)
['Generates', 'a', 'report', 'to', 'the', 'given', 'path', 'for', 'all', 'extension']
train
https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L1529-L1574
8,141
pysathq/pysat
solvers/prepare.py
download_archive
def download_archive(sources): """ Downloads an archive and saves locally (taken from PySMT). """ # last element is expected to be the local archive name save_to = sources[-1] # not downloading the file again if it exists if os.path.exists(save_to): print('not downloading {0} since it exists locally'.format(save_to)) return # try all possible sources one by one for url in sources[:-1]: # make five attempts per source for i in range(5): # first attempt to get a response response = urlopen(url) # handling redirections u = urlopen(response.geturl()) meta = u.info() if meta.get('Content-Length') and len(meta.get('Content-Length')) > 0: filesz = int(meta.get('Content-Length')) if os.path.exists(save_to) and os.path.getsize(save_to) == filesz: print('not downloading {0} since it exists locally'.format(save_to)) return print('downloading: {0} ({1} bytes)...'.format(save_to, filesz), end=' ') with open(save_to, 'wb') as fp: block_sz = 8192 while True: buff = u.read(block_sz) if not buff: break fp.write(buff) print('done') break else: continue break # successfully got the file else: assert 0, 'something went wrong -- cannot download {0}'.format(save_to)
python
def download_archive(sources): """ Downloads an archive and saves locally (taken from PySMT). """ # last element is expected to be the local archive name save_to = sources[-1] # not downloading the file again if it exists if os.path.exists(save_to): print('not downloading {0} since it exists locally'.format(save_to)) return # try all possible sources one by one for url in sources[:-1]: # make five attempts per source for i in range(5): # first attempt to get a response response = urlopen(url) # handling redirections u = urlopen(response.geturl()) meta = u.info() if meta.get('Content-Length') and len(meta.get('Content-Length')) > 0: filesz = int(meta.get('Content-Length')) if os.path.exists(save_to) and os.path.getsize(save_to) == filesz: print('not downloading {0} since it exists locally'.format(save_to)) return print('downloading: {0} ({1} bytes)...'.format(save_to, filesz), end=' ') with open(save_to, 'wb') as fp: block_sz = 8192 while True: buff = u.read(block_sz) if not buff: break fp.write(buff) print('done') break else: continue break # successfully got the file else: assert 0, 'something went wrong -- cannot download {0}'.format(save_to)
['def', 'download_archive', '(', 'sources', ')', ':', '# last element is expected to be the local archive name', 'save_to', '=', 'sources', '[', '-', '1', ']', '# not downloading the file again if it exists', 'if', 'os', '.', 'path', '.', 'exists', '(', 'save_to', ')', ':', 'print', '(', "'not downloading {0} since it exists locally'", '.', 'format', '(', 'save_to', ')', ')', 'return', '# try all possible sources one by one', 'for', 'url', 'in', 'sources', '[', ':', '-', '1', ']', ':', '# make five attempts per source', 'for', 'i', 'in', 'range', '(', '5', ')', ':', '# first attempt to get a response', 'response', '=', 'urlopen', '(', 'url', ')', '# handling redirections', 'u', '=', 'urlopen', '(', 'response', '.', 'geturl', '(', ')', ')', 'meta', '=', 'u', '.', 'info', '(', ')', 'if', 'meta', '.', 'get', '(', "'Content-Length'", ')', 'and', 'len', '(', 'meta', '.', 'get', '(', "'Content-Length'", ')', ')', '>', '0', ':', 'filesz', '=', 'int', '(', 'meta', '.', 'get', '(', "'Content-Length'", ')', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'save_to', ')', 'and', 'os', '.', 'path', '.', 'getsize', '(', 'save_to', ')', '==', 'filesz', ':', 'print', '(', "'not downloading {0} since it exists locally'", '.', 'format', '(', 'save_to', ')', ')', 'return', 'print', '(', "'downloading: {0} ({1} bytes)...'", '.', 'format', '(', 'save_to', ',', 'filesz', ')', ',', 'end', '=', "' '", ')', 'with', 'open', '(', 'save_to', ',', "'wb'", ')', 'as', 'fp', ':', 'block_sz', '=', '8192', 'while', 'True', ':', 'buff', '=', 'u', '.', 'read', '(', 'block_sz', ')', 'if', 'not', 'buff', ':', 'break', 'fp', '.', 'write', '(', 'buff', ')', 'print', '(', "'done'", ')', 'break', 'else', ':', 'continue', 'break', '# successfully got the file', 'else', ':', 'assert', '0', ',', "'something went wrong -- cannot download {0}'", '.', 'format', '(', 'save_to', ')']
Downloads an archive and saves locally (taken from PySMT).
['Downloads', 'an', 'archive', 'and', 'saves', 'locally', '(', 'taken', 'from', 'PySMT', ')', '.']
train
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/solvers/prepare.py#L289-L335
8,142
kislyuk/aegea
aegea/packages/github3/github.py
GitHub.user
def user(self, login=None): """Returns a User object for the specified login name if provided. If no login name is provided, this will return a User object for the authenticated user. :param str login: (optional) :returns: :class:`User <github3.users.User>` """ if login: url = self._build_url('users', login) else: url = self._build_url('user') json = self._json(self._get(url), 200) return User(json, self._session) if json else None
python
def user(self, login=None): """Returns a User object for the specified login name if provided. If no login name is provided, this will return a User object for the authenticated user. :param str login: (optional) :returns: :class:`User <github3.users.User>` """ if login: url = self._build_url('users', login) else: url = self._build_url('user') json = self._json(self._get(url), 200) return User(json, self._session) if json else None
['def', 'user', '(', 'self', ',', 'login', '=', 'None', ')', ':', 'if', 'login', ':', 'url', '=', 'self', '.', '_build_url', '(', "'users'", ',', 'login', ')', 'else', ':', 'url', '=', 'self', '.', '_build_url', '(', "'user'", ')', 'json', '=', 'self', '.', '_json', '(', 'self', '.', '_get', '(', 'url', ')', ',', '200', ')', 'return', 'User', '(', 'json', ',', 'self', '.', '_session', ')', 'if', 'json', 'else', 'None']
Returns a User object for the specified login name if provided. If no login name is provided, this will return a User object for the authenticated user. :param str login: (optional) :returns: :class:`User <github3.users.User>`
['Returns', 'a', 'User', 'object', 'for', 'the', 'specified', 'login', 'name', 'if', 'provided', '.', 'If', 'no', 'login', 'name', 'is', 'provided', 'this', 'will', 'return', 'a', 'User', 'object', 'for', 'the', 'authenticated', 'user', '.']
train
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/github.py#L1452-L1466
8,143
fprimex/zdesk
zdesk/zdesk_api.py
ZendeskAPI.help_center_section_subscription_delete
def help_center_section_subscription_delete(self, section_id, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/subscriptions#delete-section-subscription" api_path = "/api/v2/help_center/sections/{section_id}/subscriptions/{id}.json" api_path = api_path.format(section_id=section_id, id=id) return self.call(api_path, method="DELETE", **kwargs)
python
def help_center_section_subscription_delete(self, section_id, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/subscriptions#delete-section-subscription" api_path = "/api/v2/help_center/sections/{section_id}/subscriptions/{id}.json" api_path = api_path.format(section_id=section_id, id=id) return self.call(api_path, method="DELETE", **kwargs)
['def', 'help_center_section_subscription_delete', '(', 'self', ',', 'section_id', ',', 'id', ',', '*', '*', 'kwargs', ')', ':', 'api_path', '=', '"/api/v2/help_center/sections/{section_id}/subscriptions/{id}.json"', 'api_path', '=', 'api_path', '.', 'format', '(', 'section_id', '=', 'section_id', ',', 'id', '=', 'id', ')', 'return', 'self', '.', 'call', '(', 'api_path', ',', 'method', '=', '"DELETE"', ',', '*', '*', 'kwargs', ')']
https://developer.zendesk.com/rest_api/docs/help_center/subscriptions#delete-section-subscription
['https', ':', '//', 'developer', '.', 'zendesk', '.', 'com', '/', 'rest_api', '/', 'docs', '/', 'help_center', '/', 'subscriptions#delete', '-', 'section', '-', 'subscription']
train
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L1871-L1875
8,144
dossier/dossier.fc
python/dossier/fc/exceptions.py
uni
def uni(key): '''as a crutch, we allow str-type keys, but they really should be unicode. ''' if isinstance(key, str): logger.warn('assuming utf8 on: %r', key) return unicode(key, 'utf-8') elif isinstance(key, unicode): return key else: raise NonUnicodeKeyError(key)
python
def uni(key): '''as a crutch, we allow str-type keys, but they really should be unicode. ''' if isinstance(key, str): logger.warn('assuming utf8 on: %r', key) return unicode(key, 'utf-8') elif isinstance(key, unicode): return key else: raise NonUnicodeKeyError(key)
['def', 'uni', '(', 'key', ')', ':', 'if', 'isinstance', '(', 'key', ',', 'str', ')', ':', 'logger', '.', 'warn', '(', "'assuming utf8 on: %r'", ',', 'key', ')', 'return', 'unicode', '(', 'key', ',', "'utf-8'", ')', 'elif', 'isinstance', '(', 'key', ',', 'unicode', ')', ':', 'return', 'key', 'else', ':', 'raise', 'NonUnicodeKeyError', '(', 'key', ')']
as a crutch, we allow str-type keys, but they really should be unicode.
['as', 'a', 'crutch', 'we', 'allow', 'str', '-', 'type', 'keys', 'but', 'they', 'really', 'should', 'be', 'unicode', '.']
train
https://github.com/dossier/dossier.fc/blob/3e969d0cb2592fc06afc1c849d2b22283450b5e2/python/dossier/fc/exceptions.py#L54-L65
8,145
buildbot/buildbot
master/buildbot/process/builder.py
Builder.getCollapseRequestsFn
def getCollapseRequestsFn(self): """Helper function to determine which collapseRequests function to use from L{_collapseRequests}, or None for no merging""" # first, seek through builder, global, and the default collapseRequests_fn = self.config.collapseRequests if collapseRequests_fn is None: collapseRequests_fn = self.master.config.collapseRequests if collapseRequests_fn is None: collapseRequests_fn = True # then translate False and True properly if collapseRequests_fn is False: collapseRequests_fn = None elif collapseRequests_fn is True: collapseRequests_fn = self._defaultCollapseRequestFn return collapseRequests_fn
python
def getCollapseRequestsFn(self): """Helper function to determine which collapseRequests function to use from L{_collapseRequests}, or None for no merging""" # first, seek through builder, global, and the default collapseRequests_fn = self.config.collapseRequests if collapseRequests_fn is None: collapseRequests_fn = self.master.config.collapseRequests if collapseRequests_fn is None: collapseRequests_fn = True # then translate False and True properly if collapseRequests_fn is False: collapseRequests_fn = None elif collapseRequests_fn is True: collapseRequests_fn = self._defaultCollapseRequestFn return collapseRequests_fn
['def', 'getCollapseRequestsFn', '(', 'self', ')', ':', '# first, seek through builder, global, and the default', 'collapseRequests_fn', '=', 'self', '.', 'config', '.', 'collapseRequests', 'if', 'collapseRequests_fn', 'is', 'None', ':', 'collapseRequests_fn', '=', 'self', '.', 'master', '.', 'config', '.', 'collapseRequests', 'if', 'collapseRequests_fn', 'is', 'None', ':', 'collapseRequests_fn', '=', 'True', '# then translate False and True properly', 'if', 'collapseRequests_fn', 'is', 'False', ':', 'collapseRequests_fn', '=', 'None', 'elif', 'collapseRequests_fn', 'is', 'True', ':', 'collapseRequests_fn', '=', 'self', '.', '_defaultCollapseRequestFn', 'return', 'collapseRequests_fn']
Helper function to determine which collapseRequests function to use from L{_collapseRequests}, or None for no merging
['Helper', 'function', 'to', 'determine', 'which', 'collapseRequests', 'function', 'to', 'use', 'from', 'L', '{', '_collapseRequests', '}', 'or', 'None', 'for', 'no', 'merging']
train
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/process/builder.py#L441-L457
8,146
johnnoone/aioconsul
aioconsul/client/kv_endpoint.py
ReadMixin.raw
async def raw(self, key, *, dc=None, watch=None, consistency=None): """Returns the specified key Parameters: key (str): Key to fetch dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. watch (Blocking): Do a blocking query consistency (Consistency): Force consistency Returns: ObjectMeta: where value is the raw value """ response = await self._read(key, dc=dc, raw=True, watch=watch, consistency=consistency) return consul(response)
python
async def raw(self, key, *, dc=None, watch=None, consistency=None): """Returns the specified key Parameters: key (str): Key to fetch dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. watch (Blocking): Do a blocking query consistency (Consistency): Force consistency Returns: ObjectMeta: where value is the raw value """ response = await self._read(key, dc=dc, raw=True, watch=watch, consistency=consistency) return consul(response)
['async', 'def', 'raw', '(', 'self', ',', 'key', ',', '*', ',', 'dc', '=', 'None', ',', 'watch', '=', 'None', ',', 'consistency', '=', 'None', ')', ':', 'response', '=', 'await', 'self', '.', '_read', '(', 'key', ',', 'dc', '=', 'dc', ',', 'raw', '=', 'True', ',', 'watch', '=', 'watch', ',', 'consistency', '=', 'consistency', ')', 'return', 'consul', '(', 'response', ')']
Returns the specified key Parameters: key (str): Key to fetch dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. watch (Blocking): Do a blocking query consistency (Consistency): Force consistency Returns: ObjectMeta: where value is the raw value
['Returns', 'the', 'specified', 'key']
train
https://github.com/johnnoone/aioconsul/blob/02f7a529d7dc2e49bed942111067aa5faf320e90/aioconsul/client/kv_endpoint.py#L122-L139
8,147
MacHu-GWU/single_file_module-project
sfm/iterable.py
flatten_all
def flatten_all(nested_iterable): """Flatten arbitrary depth of nesting. Good for unknown nesting structure iterable object. Example:: >>> list(flatten_all([[1, 2], "abc", [3, ["x", "y", "z"]], 4])) [1, 2, "abc", 3, "x", "y", "z", 4] **中文文档** 将任意维度的列表压平成一维列表。 注: 使用hasattr(i, "__iter__")方法做是否是可循环对象的判断, 性能要高于其他 任何方法, 例如: isinstance(i, collections.Iterable) """ for item in nested_iterable: if hasattr(item, "__iter__") and not isinstance(item, string_types): for i in flatten_all(item): yield i else: yield item
python
def flatten_all(nested_iterable): """Flatten arbitrary depth of nesting. Good for unknown nesting structure iterable object. Example:: >>> list(flatten_all([[1, 2], "abc", [3, ["x", "y", "z"]], 4])) [1, 2, "abc", 3, "x", "y", "z", 4] **中文文档** 将任意维度的列表压平成一维列表。 注: 使用hasattr(i, "__iter__")方法做是否是可循环对象的判断, 性能要高于其他 任何方法, 例如: isinstance(i, collections.Iterable) """ for item in nested_iterable: if hasattr(item, "__iter__") and not isinstance(item, string_types): for i in flatten_all(item): yield i else: yield item
['def', 'flatten_all', '(', 'nested_iterable', ')', ':', 'for', 'item', 'in', 'nested_iterable', ':', 'if', 'hasattr', '(', 'item', ',', '"__iter__"', ')', 'and', 'not', 'isinstance', '(', 'item', ',', 'string_types', ')', ':', 'for', 'i', 'in', 'flatten_all', '(', 'item', ')', ':', 'yield', 'i', 'else', ':', 'yield', 'item']
Flatten arbitrary depth of nesting. Good for unknown nesting structure iterable object. Example:: >>> list(flatten_all([[1, 2], "abc", [3, ["x", "y", "z"]], 4])) [1, 2, "abc", 3, "x", "y", "z", 4] **中文文档** 将任意维度的列表压平成一维列表。 注: 使用hasattr(i, "__iter__")方法做是否是可循环对象的判断, 性能要高于其他 任何方法, 例如: isinstance(i, collections.Iterable)
['Flatten', 'arbitrary', 'depth', 'of', 'nesting', '.', 'Good', 'for', 'unknown', 'nesting', 'structure', 'iterable', 'object', '.']
train
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/iterable.py#L41-L62
8,148
matllubos/django-is-core
is_core/utils/decorators.py
short_description
def short_description(description): """ Sets 'short_description' attribute (this attribute is in exports to generate header name). """ def decorator(func): if isinstance(func, property): func = func.fget func.short_description = description return func return decorator
python
def short_description(description): """ Sets 'short_description' attribute (this attribute is in exports to generate header name). """ def decorator(func): if isinstance(func, property): func = func.fget func.short_description = description return func return decorator
['def', 'short_description', '(', 'description', ')', ':', 'def', 'decorator', '(', 'func', ')', ':', 'if', 'isinstance', '(', 'func', ',', 'property', ')', ':', 'func', '=', 'func', '.', 'fget', 'func', '.', 'short_description', '=', 'description', 'return', 'func', 'return', 'decorator']
Sets 'short_description' attribute (this attribute is in exports to generate header name).
['Sets', 'short_description', 'attribute', '(', 'this', 'attribute', 'is', 'in', 'exports', 'to', 'generate', 'header', 'name', ')', '.']
train
https://github.com/matllubos/django-is-core/blob/3f87ec56a814738683c732dce5f07e0328c2300d/is_core/utils/decorators.py#L1-L10
8,149
libtcod/python-tcod
tcod/libtcodpy.py
random_get_int_mean
def random_get_int_mean( rnd: Optional[tcod.random.Random], mi: int, ma: int, mean: int ) -> int: """Return a random weighted integer in the range: ``mi`` <= n <= ``ma``. The result is affacted by calls to :any:`random_set_distribution`. Args: rnd (Optional[Random]): A Random instance, or None to use the default. low (int): The lower bound of the random range, inclusive. high (int): The upper bound of the random range, inclusive. mean (int): The mean return value. Returns: int: A random weighted integer in the range ``mi`` <= n <= ``ma``. """ return int( lib.TCOD_random_get_int_mean( rnd.random_c if rnd else ffi.NULL, mi, ma, mean ) )
python
def random_get_int_mean( rnd: Optional[tcod.random.Random], mi: int, ma: int, mean: int ) -> int: """Return a random weighted integer in the range: ``mi`` <= n <= ``ma``. The result is affacted by calls to :any:`random_set_distribution`. Args: rnd (Optional[Random]): A Random instance, or None to use the default. low (int): The lower bound of the random range, inclusive. high (int): The upper bound of the random range, inclusive. mean (int): The mean return value. Returns: int: A random weighted integer in the range ``mi`` <= n <= ``ma``. """ return int( lib.TCOD_random_get_int_mean( rnd.random_c if rnd else ffi.NULL, mi, ma, mean ) )
['def', 'random_get_int_mean', '(', 'rnd', ':', 'Optional', '[', 'tcod', '.', 'random', '.', 'Random', ']', ',', 'mi', ':', 'int', ',', 'ma', ':', 'int', ',', 'mean', ':', 'int', ')', '->', 'int', ':', 'return', 'int', '(', 'lib', '.', 'TCOD_random_get_int_mean', '(', 'rnd', '.', 'random_c', 'if', 'rnd', 'else', 'ffi', '.', 'NULL', ',', 'mi', ',', 'ma', ',', 'mean', ')', ')']
Return a random weighted integer in the range: ``mi`` <= n <= ``ma``. The result is affacted by calls to :any:`random_set_distribution`. Args: rnd (Optional[Random]): A Random instance, or None to use the default. low (int): The lower bound of the random range, inclusive. high (int): The upper bound of the random range, inclusive. mean (int): The mean return value. Returns: int: A random weighted integer in the range ``mi`` <= n <= ``ma``.
['Return', 'a', 'random', 'weighted', 'integer', 'in', 'the', 'range', ':', 'mi', '<', '=', 'n', '<', '=', 'ma', '.']
train
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/libtcodpy.py#L3746-L3766
8,150
tjcsl/cslbot
cslbot/helpers/handler.py
BotHandler.do_part
def do_part(self, cmdargs, nick, target, msgtype, send, c): """Leaves a channel. Prevent user from leaving the primary channel. """ channel = self.config['core']['channel'] botnick = self.config['core']['nick'] if not cmdargs: # don't leave the primary channel if target == channel: send("%s must have a home." % botnick) return else: cmdargs = target if not cmdargs.startswith(('#', '+', '@')): cmdargs = '#' + cmdargs # don't leave the primary channel if cmdargs == channel: send("%s must have a home." % botnick) return # don't leave the control channel if cmdargs == self.config['core']['ctrlchan']: send("%s must remain under control, or bad things will happen." % botnick) return self.send(cmdargs, nick, "Leaving at the request of %s" % nick, msgtype) c.part(cmdargs)
python
def do_part(self, cmdargs, nick, target, msgtype, send, c): """Leaves a channel. Prevent user from leaving the primary channel. """ channel = self.config['core']['channel'] botnick = self.config['core']['nick'] if not cmdargs: # don't leave the primary channel if target == channel: send("%s must have a home." % botnick) return else: cmdargs = target if not cmdargs.startswith(('#', '+', '@')): cmdargs = '#' + cmdargs # don't leave the primary channel if cmdargs == channel: send("%s must have a home." % botnick) return # don't leave the control channel if cmdargs == self.config['core']['ctrlchan']: send("%s must remain under control, or bad things will happen." % botnick) return self.send(cmdargs, nick, "Leaving at the request of %s" % nick, msgtype) c.part(cmdargs)
['def', 'do_part', '(', 'self', ',', 'cmdargs', ',', 'nick', ',', 'target', ',', 'msgtype', ',', 'send', ',', 'c', ')', ':', 'channel', '=', 'self', '.', 'config', '[', "'core'", ']', '[', "'channel'", ']', 'botnick', '=', 'self', '.', 'config', '[', "'core'", ']', '[', "'nick'", ']', 'if', 'not', 'cmdargs', ':', "# don't leave the primary channel", 'if', 'target', '==', 'channel', ':', 'send', '(', '"%s must have a home."', '%', 'botnick', ')', 'return', 'else', ':', 'cmdargs', '=', 'target', 'if', 'not', 'cmdargs', '.', 'startswith', '(', '(', "'#'", ',', "'+'", ',', "'@'", ')', ')', ':', 'cmdargs', '=', "'#'", '+', 'cmdargs', "# don't leave the primary channel", 'if', 'cmdargs', '==', 'channel', ':', 'send', '(', '"%s must have a home."', '%', 'botnick', ')', 'return', "# don't leave the control channel", 'if', 'cmdargs', '==', 'self', '.', 'config', '[', "'core'", ']', '[', "'ctrlchan'", ']', ':', 'send', '(', '"%s must remain under control, or bad things will happen."', '%', 'botnick', ')', 'return', 'self', '.', 'send', '(', 'cmdargs', ',', 'nick', ',', '"Leaving at the request of %s"', '%', 'nick', ',', 'msgtype', ')', 'c', '.', 'part', '(', 'cmdargs', ')']
Leaves a channel. Prevent user from leaving the primary channel.
['Leaves', 'a', 'channel', '.']
train
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/helpers/handler.py#L259-L285
8,151
olsoneric/pedemath
pedemath/matrix.py
Matrix44.rot_from_vectors
def rot_from_vectors(start_vec, end_vec): """Return the rotation matrix to rotate from one vector to another.""" dot = start_vec.dot(end_vec) # TODO: check if dot is a valid number angle = math.acos(dot) # TODO: check if angle is a valid number cross = start_vec.cross(end_vec) cross.normalize rot_matrix = Matrix44.from_axis_angle(cross, angle) # TODO: catch exception and return identity for invalid numbers return rot_matrix
python
def rot_from_vectors(start_vec, end_vec): """Return the rotation matrix to rotate from one vector to another.""" dot = start_vec.dot(end_vec) # TODO: check if dot is a valid number angle = math.acos(dot) # TODO: check if angle is a valid number cross = start_vec.cross(end_vec) cross.normalize rot_matrix = Matrix44.from_axis_angle(cross, angle) # TODO: catch exception and return identity for invalid numbers return rot_matrix
['def', 'rot_from_vectors', '(', 'start_vec', ',', 'end_vec', ')', ':', 'dot', '=', 'start_vec', '.', 'dot', '(', 'end_vec', ')', '# TODO: check if dot is a valid number', 'angle', '=', 'math', '.', 'acos', '(', 'dot', ')', '# TODO: check if angle is a valid number', 'cross', '=', 'start_vec', '.', 'cross', '(', 'end_vec', ')', 'cross', '.', 'normalize', 'rot_matrix', '=', 'Matrix44', '.', 'from_axis_angle', '(', 'cross', ',', 'angle', ')', '# TODO: catch exception and return identity for invalid numbers', 'return', 'rot_matrix']
Return the rotation matrix to rotate from one vector to another.
['Return', 'the', 'rotation', 'matrix', 'to', 'rotate', 'from', 'one', 'vector', 'to', 'another', '.']
train
https://github.com/olsoneric/pedemath/blob/4bffcfe7089e421d603eb0a9708b84789c2d16be/pedemath/matrix.py#L299-L311
8,152
gwpy/gwpy
gwpy/timeseries/statevector.py
StateVector.to_dqflags
def to_dqflags(self, bits=None, minlen=1, dtype=float, round=False): """Convert this `StateVector` into a `~gwpy.segments.DataQualityDict` The `StateTimeSeries` for each bit is converted into a `~gwpy.segments.DataQualityFlag` with the bits combined into a dict. Parameters ---------- minlen : `int`, optional, default: 1 minimum number of consecutive `True` values to identify as a `Segment`. This is useful to ignore single bit flips, for example. bits : `list`, optional a list of bit indices or bit names to select, defaults to `~StateVector.bits` Returns ------- DataQualityFlag list : `list` a list of `~gwpy.segments.flag.DataQualityFlag` reprensentations for each bit in this `StateVector` See Also -------- :meth:`StateTimeSeries.to_dqflag` for details on the segment representation method for `StateVector` bits """ from ..segments import DataQualityDict out = DataQualityDict() bitseries = self.get_bit_series(bits=bits) for bit, sts in bitseries.items(): out[bit] = sts.to_dqflag(name=bit, minlen=minlen, round=round, dtype=dtype, description=self.bits.description[bit]) return out
python
def to_dqflags(self, bits=None, minlen=1, dtype=float, round=False): """Convert this `StateVector` into a `~gwpy.segments.DataQualityDict` The `StateTimeSeries` for each bit is converted into a `~gwpy.segments.DataQualityFlag` with the bits combined into a dict. Parameters ---------- minlen : `int`, optional, default: 1 minimum number of consecutive `True` values to identify as a `Segment`. This is useful to ignore single bit flips, for example. bits : `list`, optional a list of bit indices or bit names to select, defaults to `~StateVector.bits` Returns ------- DataQualityFlag list : `list` a list of `~gwpy.segments.flag.DataQualityFlag` reprensentations for each bit in this `StateVector` See Also -------- :meth:`StateTimeSeries.to_dqflag` for details on the segment representation method for `StateVector` bits """ from ..segments import DataQualityDict out = DataQualityDict() bitseries = self.get_bit_series(bits=bits) for bit, sts in bitseries.items(): out[bit] = sts.to_dqflag(name=bit, minlen=minlen, round=round, dtype=dtype, description=self.bits.description[bit]) return out
['def', 'to_dqflags', '(', 'self', ',', 'bits', '=', 'None', ',', 'minlen', '=', '1', ',', 'dtype', '=', 'float', ',', 'round', '=', 'False', ')', ':', 'from', '.', '.', 'segments', 'import', 'DataQualityDict', 'out', '=', 'DataQualityDict', '(', ')', 'bitseries', '=', 'self', '.', 'get_bit_series', '(', 'bits', '=', 'bits', ')', 'for', 'bit', ',', 'sts', 'in', 'bitseries', '.', 'items', '(', ')', ':', 'out', '[', 'bit', ']', '=', 'sts', '.', 'to_dqflag', '(', 'name', '=', 'bit', ',', 'minlen', '=', 'minlen', ',', 'round', '=', 'round', ',', 'dtype', '=', 'dtype', ',', 'description', '=', 'self', '.', 'bits', '.', 'description', '[', 'bit', ']', ')', 'return', 'out']
Convert this `StateVector` into a `~gwpy.segments.DataQualityDict` The `StateTimeSeries` for each bit is converted into a `~gwpy.segments.DataQualityFlag` with the bits combined into a dict. Parameters ---------- minlen : `int`, optional, default: 1 minimum number of consecutive `True` values to identify as a `Segment`. This is useful to ignore single bit flips, for example. bits : `list`, optional a list of bit indices or bit names to select, defaults to `~StateVector.bits` Returns ------- DataQualityFlag list : `list` a list of `~gwpy.segments.flag.DataQualityFlag` reprensentations for each bit in this `StateVector` See Also -------- :meth:`StateTimeSeries.to_dqflag` for details on the segment representation method for `StateVector` bits
['Convert', 'this', 'StateVector', 'into', 'a', '~gwpy', '.', 'segments', '.', 'DataQualityDict']
train
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/statevector.py#L690-L726
8,153
zhebrak/raftos
raftos/state.py
Candidate.on_receive_append_entries
def on_receive_append_entries(self, data): """If we discover a Leader with the same term — step down""" if self.storage.term == data['term']: self.state.to_follower()
python
def on_receive_append_entries(self, data): """If we discover a Leader with the same term — step down""" if self.storage.term == data['term']: self.state.to_follower()
['def', 'on_receive_append_entries', '(', 'self', ',', 'data', ')', ':', 'if', 'self', '.', 'storage', '.', 'term', '==', 'data', '[', "'term'", ']', ':', 'self', '.', 'state', '.', 'to_follower', '(', ')']
If we discover a Leader with the same term — step down
['If', 'we', 'discover', 'a', 'Leader', 'with', 'the', 'same', 'term', '—', 'step', 'down']
train
https://github.com/zhebrak/raftos/blob/0d6f9e049b526279b1035f597291a96cf50c9b40/raftos/state.py#L338-L341
8,154
woocommerce/wc-api-python
woocommerce/oauth.py
OAuth.get_oauth_url
def get_oauth_url(self): """ Returns the URL with OAuth params """ params = OrderedDict() if "?" in self.url: url = self.url[:self.url.find("?")] for key, value in parse_qsl(urlparse(self.url).query): params[key] = value else: url = self.url params["oauth_consumer_key"] = self.consumer_key params["oauth_timestamp"] = self.timestamp params["oauth_nonce"] = self.generate_nonce() params["oauth_signature_method"] = "HMAC-SHA256" params["oauth_signature"] = self.generate_oauth_signature(params, url) query_string = urlencode(params) return "%s?%s" % (url, query_string)
python
def get_oauth_url(self): """ Returns the URL with OAuth params """ params = OrderedDict() if "?" in self.url: url = self.url[:self.url.find("?")] for key, value in parse_qsl(urlparse(self.url).query): params[key] = value else: url = self.url params["oauth_consumer_key"] = self.consumer_key params["oauth_timestamp"] = self.timestamp params["oauth_nonce"] = self.generate_nonce() params["oauth_signature_method"] = "HMAC-SHA256" params["oauth_signature"] = self.generate_oauth_signature(params, url) query_string = urlencode(params) return "%s?%s" % (url, query_string)
['def', 'get_oauth_url', '(', 'self', ')', ':', 'params', '=', 'OrderedDict', '(', ')', 'if', '"?"', 'in', 'self', '.', 'url', ':', 'url', '=', 'self', '.', 'url', '[', ':', 'self', '.', 'url', '.', 'find', '(', '"?"', ')', ']', 'for', 'key', ',', 'value', 'in', 'parse_qsl', '(', 'urlparse', '(', 'self', '.', 'url', ')', '.', 'query', ')', ':', 'params', '[', 'key', ']', '=', 'value', 'else', ':', 'url', '=', 'self', '.', 'url', 'params', '[', '"oauth_consumer_key"', ']', '=', 'self', '.', 'consumer_key', 'params', '[', '"oauth_timestamp"', ']', '=', 'self', '.', 'timestamp', 'params', '[', '"oauth_nonce"', ']', '=', 'self', '.', 'generate_nonce', '(', ')', 'params', '[', '"oauth_signature_method"', ']', '=', '"HMAC-SHA256"', 'params', '[', '"oauth_signature"', ']', '=', 'self', '.', 'generate_oauth_signature', '(', 'params', ',', 'url', ')', 'query_string', '=', 'urlencode', '(', 'params', ')', 'return', '"%s?%s"', '%', '(', 'url', ',', 'query_string', ')']
Returns the URL with OAuth params
['Returns', 'the', 'URL', 'with', 'OAuth', 'params']
train
https://github.com/woocommerce/wc-api-python/blob/dee5065eaff2d200ef9883c25799ff605fe5e667/woocommerce/oauth.py#L41-L60
8,155
aalireza/SimpleAudioIndexer
SimpleAudioIndexer/__init__.py
SimpleAudioIndexer.search_regexp
def search_regexp(self, pattern, audio_basename=None): """ First joins the words of the word_blocks of timestamps with space, per audio_basename. Then matches `pattern` and calculates the index of the word_block where the first and last word of the matched result appears in. Then presents the output like `search_all` method. Note that the leading and trailing spaces from the matched results would be removed while determining which word_block they belong to. Parameters ---------- pattern : str A regex pattern. audio_basename : str, optional Search only within the given audio_basename. Default is `False`. Returns ------- search_results : {str: {str: [(float, float)]}} A dictionary whose keys are queries and whose values are dictionaries whose keys are all the audiofiles in which the query is present and whose values are a list whose elements are 2-tuples whose first element is the starting second of the query and whose values are the ending second. e.g. {"apple": {"fruits.wav" : [(1.1, 1.12)]}} """ def indexes_in_transcript_to_start_end_second(index_tup, audio_basename): """ Calculates the word block index by having the beginning and ending index of the matched result from the transcription Parameters ---------- index_tup : (int, tup) index_tup is of the form tuple(index_start, index_end) audio_basename : str Retrun ------ [float, float] The time of the output of the matched result. Derived from two separate word blocks belonging to the beginning and the end of the index_start and index_end. """ space_indexes = [i for i, x in enumerate( transcription[audio_basename]) if x == " "] space_indexes.sort(reverse=True) index_start, index_end = index_tup # re.finditer returns the ending index by one more index_end -= 1 while transcription[audio_basename][index_start] == " ": index_start += 1 while transcription[audio_basename][index_end] == " ": index_end -= 1 block_number_start = 0 block_number_end = len(space_indexes) for block_cursor, space_index in enumerate(space_indexes): if index_start > space_index: block_number_start = (len(space_indexes) - block_cursor) break for block_cursor, space_index in enumerate(space_indexes): if index_end > space_index: block_number_end = (len(space_indexes) - block_cursor) break return (timestamps[audio_basename][block_number_start].start, timestamps[audio_basename][block_number_end].end) timestamps = self.get_timestamps() if audio_basename is not None: timestamps = {audio_basename: timestamps[audio_basename]} transcription = { audio_basename: ' '.join( [word_block.word for word_block in timestamps[audio_basename]] ) for audio_basename in timestamps} match_map = map( lambda audio_basename: tuple(( audio_basename, re.finditer(pattern, transcription[audio_basename]))), transcription.keys()) search_results = _PrettyDefaultDict(lambda: _PrettyDefaultDict(list)) for audio_basename, match_iter in match_map: for match in match_iter: search_results[match.group()][audio_basename].append( tuple(indexes_in_transcript_to_start_end_second( match.span(), audio_basename))) return search_results
python
def search_regexp(self, pattern, audio_basename=None): """ First joins the words of the word_blocks of timestamps with space, per audio_basename. Then matches `pattern` and calculates the index of the word_block where the first and last word of the matched result appears in. Then presents the output like `search_all` method. Note that the leading and trailing spaces from the matched results would be removed while determining which word_block they belong to. Parameters ---------- pattern : str A regex pattern. audio_basename : str, optional Search only within the given audio_basename. Default is `False`. Returns ------- search_results : {str: {str: [(float, float)]}} A dictionary whose keys are queries and whose values are dictionaries whose keys are all the audiofiles in which the query is present and whose values are a list whose elements are 2-tuples whose first element is the starting second of the query and whose values are the ending second. e.g. {"apple": {"fruits.wav" : [(1.1, 1.12)]}} """ def indexes_in_transcript_to_start_end_second(index_tup, audio_basename): """ Calculates the word block index by having the beginning and ending index of the matched result from the transcription Parameters ---------- index_tup : (int, tup) index_tup is of the form tuple(index_start, index_end) audio_basename : str Retrun ------ [float, float] The time of the output of the matched result. Derived from two separate word blocks belonging to the beginning and the end of the index_start and index_end. """ space_indexes = [i for i, x in enumerate( transcription[audio_basename]) if x == " "] space_indexes.sort(reverse=True) index_start, index_end = index_tup # re.finditer returns the ending index by one more index_end -= 1 while transcription[audio_basename][index_start] == " ": index_start += 1 while transcription[audio_basename][index_end] == " ": index_end -= 1 block_number_start = 0 block_number_end = len(space_indexes) for block_cursor, space_index in enumerate(space_indexes): if index_start > space_index: block_number_start = (len(space_indexes) - block_cursor) break for block_cursor, space_index in enumerate(space_indexes): if index_end > space_index: block_number_end = (len(space_indexes) - block_cursor) break return (timestamps[audio_basename][block_number_start].start, timestamps[audio_basename][block_number_end].end) timestamps = self.get_timestamps() if audio_basename is not None: timestamps = {audio_basename: timestamps[audio_basename]} transcription = { audio_basename: ' '.join( [word_block.word for word_block in timestamps[audio_basename]] ) for audio_basename in timestamps} match_map = map( lambda audio_basename: tuple(( audio_basename, re.finditer(pattern, transcription[audio_basename]))), transcription.keys()) search_results = _PrettyDefaultDict(lambda: _PrettyDefaultDict(list)) for audio_basename, match_iter in match_map: for match in match_iter: search_results[match.group()][audio_basename].append( tuple(indexes_in_transcript_to_start_end_second( match.span(), audio_basename))) return search_results
['def', 'search_regexp', '(', 'self', ',', 'pattern', ',', 'audio_basename', '=', 'None', ')', ':', 'def', 'indexes_in_transcript_to_start_end_second', '(', 'index_tup', ',', 'audio_basename', ')', ':', '"""\n Calculates the word block index by having the beginning and ending\n index of the matched result from the transcription\n\n Parameters\n ----------\n index_tup : (int, tup)\n index_tup is of the form tuple(index_start, index_end)\n audio_basename : str\n\n Retrun\n ------\n [float, float]\n The time of the output of the matched result. Derived from two\n separate word blocks belonging to the beginning and the end of\n the index_start and index_end.\n """', 'space_indexes', '=', '[', 'i', 'for', 'i', ',', 'x', 'in', 'enumerate', '(', 'transcription', '[', 'audio_basename', ']', ')', 'if', 'x', '==', '" "', ']', 'space_indexes', '.', 'sort', '(', 'reverse', '=', 'True', ')', 'index_start', ',', 'index_end', '=', 'index_tup', '# re.finditer returns the ending index by one more', 'index_end', '-=', '1', 'while', 'transcription', '[', 'audio_basename', ']', '[', 'index_start', ']', '==', '" "', ':', 'index_start', '+=', '1', 'while', 'transcription', '[', 'audio_basename', ']', '[', 'index_end', ']', '==', '" "', ':', 'index_end', '-=', '1', 'block_number_start', '=', '0', 'block_number_end', '=', 'len', '(', 'space_indexes', ')', 'for', 'block_cursor', ',', 'space_index', 'in', 'enumerate', '(', 'space_indexes', ')', ':', 'if', 'index_start', '>', 'space_index', ':', 'block_number_start', '=', '(', 'len', '(', 'space_indexes', ')', '-', 'block_cursor', ')', 'break', 'for', 'block_cursor', ',', 'space_index', 'in', 'enumerate', '(', 'space_indexes', ')', ':', 'if', 'index_end', '>', 'space_index', ':', 'block_number_end', '=', '(', 'len', '(', 'space_indexes', ')', '-', 'block_cursor', ')', 'break', 'return', '(', 'timestamps', '[', 'audio_basename', ']', '[', 'block_number_start', ']', '.', 'start', ',', 'timestamps', '[', 'audio_basename', ']', '[', 'block_number_end', ']', '.', 'end', ')', 'timestamps', '=', 'self', '.', 'get_timestamps', '(', ')', 'if', 'audio_basename', 'is', 'not', 'None', ':', 'timestamps', '=', '{', 'audio_basename', ':', 'timestamps', '[', 'audio_basename', ']', '}', 'transcription', '=', '{', 'audio_basename', ':', "' '", '.', 'join', '(', '[', 'word_block', '.', 'word', 'for', 'word_block', 'in', 'timestamps', '[', 'audio_basename', ']', ']', ')', 'for', 'audio_basename', 'in', 'timestamps', '}', 'match_map', '=', 'map', '(', 'lambda', 'audio_basename', ':', 'tuple', '(', '(', 'audio_basename', ',', 're', '.', 'finditer', '(', 'pattern', ',', 'transcription', '[', 'audio_basename', ']', ')', ')', ')', ',', 'transcription', '.', 'keys', '(', ')', ')', 'search_results', '=', '_PrettyDefaultDict', '(', 'lambda', ':', '_PrettyDefaultDict', '(', 'list', ')', ')', 'for', 'audio_basename', ',', 'match_iter', 'in', 'match_map', ':', 'for', 'match', 'in', 'match_iter', ':', 'search_results', '[', 'match', '.', 'group', '(', ')', ']', '[', 'audio_basename', ']', '.', 'append', '(', 'tuple', '(', 'indexes_in_transcript_to_start_end_second', '(', 'match', '.', 'span', '(', ')', ',', 'audio_basename', ')', ')', ')', 'return', 'search_results']
First joins the words of the word_blocks of timestamps with space, per audio_basename. Then matches `pattern` and calculates the index of the word_block where the first and last word of the matched result appears in. Then presents the output like `search_all` method. Note that the leading and trailing spaces from the matched results would be removed while determining which word_block they belong to. Parameters ---------- pattern : str A regex pattern. audio_basename : str, optional Search only within the given audio_basename. Default is `False`. Returns ------- search_results : {str: {str: [(float, float)]}} A dictionary whose keys are queries and whose values are dictionaries whose keys are all the audiofiles in which the query is present and whose values are a list whose elements are 2-tuples whose first element is the starting second of the query and whose values are the ending second. e.g. {"apple": {"fruits.wav" : [(1.1, 1.12)]}}
['First', 'joins', 'the', 'words', 'of', 'the', 'word_blocks', 'of', 'timestamps', 'with', 'space', 'per', 'audio_basename', '.', 'Then', 'matches', 'pattern', 'and', 'calculates', 'the', 'index', 'of', 'the', 'word_block', 'where', 'the', 'first', 'and', 'last', 'word', 'of', 'the', 'matched', 'result', 'appears', 'in', '.', 'Then', 'presents', 'the', 'output', 'like', 'search_all', 'method', '.']
train
https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L1603-L1693
8,156
inveniosoftware/invenio-files-rest
invenio_files_rest/serializer.py
MultipartObjectSchema.dump_links
def dump_links(self, o): """Dump links.""" links = { 'self': url_for( '.object_api', bucket_id=o.bucket_id, key=o.key, uploadId=o.upload_id, _external=True, ), 'object': url_for( '.object_api', bucket_id=o.bucket_id, key=o.key, _external=True, ), } version_id = self.context.get('object_version_id') if version_id: links.update({ 'object_version': url_for( '.object_api', bucket_id=o.bucket_id, key=o.key, versionId=version_id, _external=True, ) }) bucket = self.context.get('bucket') if bucket: links.update({ 'bucket': url_for( '.bucket_api', bucket_id=o.bucket_id, _external=True, ) }) return links
python
def dump_links(self, o): """Dump links.""" links = { 'self': url_for( '.object_api', bucket_id=o.bucket_id, key=o.key, uploadId=o.upload_id, _external=True, ), 'object': url_for( '.object_api', bucket_id=o.bucket_id, key=o.key, _external=True, ), } version_id = self.context.get('object_version_id') if version_id: links.update({ 'object_version': url_for( '.object_api', bucket_id=o.bucket_id, key=o.key, versionId=version_id, _external=True, ) }) bucket = self.context.get('bucket') if bucket: links.update({ 'bucket': url_for( '.bucket_api', bucket_id=o.bucket_id, _external=True, ) }) return links
['def', 'dump_links', '(', 'self', ',', 'o', ')', ':', 'links', '=', '{', "'self'", ':', 'url_for', '(', "'.object_api'", ',', 'bucket_id', '=', 'o', '.', 'bucket_id', ',', 'key', '=', 'o', '.', 'key', ',', 'uploadId', '=', 'o', '.', 'upload_id', ',', '_external', '=', 'True', ',', ')', ',', "'object'", ':', 'url_for', '(', "'.object_api'", ',', 'bucket_id', '=', 'o', '.', 'bucket_id', ',', 'key', '=', 'o', '.', 'key', ',', '_external', '=', 'True', ',', ')', ',', '}', 'version_id', '=', 'self', '.', 'context', '.', 'get', '(', "'object_version_id'", ')', 'if', 'version_id', ':', 'links', '.', 'update', '(', '{', "'object_version'", ':', 'url_for', '(', "'.object_api'", ',', 'bucket_id', '=', 'o', '.', 'bucket_id', ',', 'key', '=', 'o', '.', 'key', ',', 'versionId', '=', 'version_id', ',', '_external', '=', 'True', ',', ')', '}', ')', 'bucket', '=', 'self', '.', 'context', '.', 'get', '(', "'bucket'", ')', 'if', 'bucket', ':', 'links', '.', 'update', '(', '{', "'bucket'", ':', 'url_for', '(', "'.bucket_api'", ',', 'bucket_id', '=', 'o', '.', 'bucket_id', ',', '_external', '=', 'True', ',', ')', '}', ')', 'return', 'links']
Dump links.
['Dump', 'links', '.']
train
https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/serializer.py#L126-L166
8,157
UDST/urbansim
urbansim/models/dcm.py
MNLDiscreteChoiceModel.to_dict
def to_dict(self): """ Return a dict respresentation of an MNLDiscreteChoiceModel instance. """ return { 'model_type': 'discretechoice', 'model_expression': self.model_expression, 'sample_size': self.sample_size, 'name': self.name, 'probability_mode': self.probability_mode, 'choice_mode': self.choice_mode, 'choosers_fit_filters': self.choosers_fit_filters, 'choosers_predict_filters': self.choosers_predict_filters, 'alts_fit_filters': self.alts_fit_filters, 'alts_predict_filters': self.alts_predict_filters, 'interaction_predict_filters': self.interaction_predict_filters, 'estimation_sample_size': self.estimation_sample_size, 'prediction_sample_size': self.prediction_sample_size, 'choice_column': self.choice_column, 'fitted': self.fitted, 'log_likelihoods': self.log_likelihoods, 'fit_parameters': (yamlio.frame_to_yaml_safe(self.fit_parameters) if self.fitted else None) }
python
def to_dict(self): """ Return a dict respresentation of an MNLDiscreteChoiceModel instance. """ return { 'model_type': 'discretechoice', 'model_expression': self.model_expression, 'sample_size': self.sample_size, 'name': self.name, 'probability_mode': self.probability_mode, 'choice_mode': self.choice_mode, 'choosers_fit_filters': self.choosers_fit_filters, 'choosers_predict_filters': self.choosers_predict_filters, 'alts_fit_filters': self.alts_fit_filters, 'alts_predict_filters': self.alts_predict_filters, 'interaction_predict_filters': self.interaction_predict_filters, 'estimation_sample_size': self.estimation_sample_size, 'prediction_sample_size': self.prediction_sample_size, 'choice_column': self.choice_column, 'fitted': self.fitted, 'log_likelihoods': self.log_likelihoods, 'fit_parameters': (yamlio.frame_to_yaml_safe(self.fit_parameters) if self.fitted else None) }
['def', 'to_dict', '(', 'self', ')', ':', 'return', '{', "'model_type'", ':', "'discretechoice'", ',', "'model_expression'", ':', 'self', '.', 'model_expression', ',', "'sample_size'", ':', 'self', '.', 'sample_size', ',', "'name'", ':', 'self', '.', 'name', ',', "'probability_mode'", ':', 'self', '.', 'probability_mode', ',', "'choice_mode'", ':', 'self', '.', 'choice_mode', ',', "'choosers_fit_filters'", ':', 'self', '.', 'choosers_fit_filters', ',', "'choosers_predict_filters'", ':', 'self', '.', 'choosers_predict_filters', ',', "'alts_fit_filters'", ':', 'self', '.', 'alts_fit_filters', ',', "'alts_predict_filters'", ':', 'self', '.', 'alts_predict_filters', ',', "'interaction_predict_filters'", ':', 'self', '.', 'interaction_predict_filters', ',', "'estimation_sample_size'", ':', 'self', '.', 'estimation_sample_size', ',', "'prediction_sample_size'", ':', 'self', '.', 'prediction_sample_size', ',', "'choice_column'", ':', 'self', '.', 'choice_column', ',', "'fitted'", ':', 'self', '.', 'fitted', ',', "'log_likelihoods'", ':', 'self', '.', 'log_likelihoods', ',', "'fit_parameters'", ':', '(', 'yamlio', '.', 'frame_to_yaml_safe', '(', 'self', '.', 'fit_parameters', ')', 'if', 'self', '.', 'fitted', 'else', 'None', ')', '}']
Return a dict respresentation of an MNLDiscreteChoiceModel instance.
['Return', 'a', 'dict', 'respresentation', 'of', 'an', 'MNLDiscreteChoiceModel', 'instance', '.']
train
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L659-L684
8,158
Alignak-monitoring/alignak
alignak/daemons/receiverdaemon.py
Receiver.do_loop_turn
def do_loop_turn(self): """Receiver daemon main loop :return: None """ # Begin to clean modules self.check_and_del_zombie_modules() # Maybe the arbiter pushed a new configuration... if self.watch_for_new_conf(timeout=0.05): logger.info("I got a new configuration...") # Manage the new configuration self.setup_new_conf() # Maybe external modules raised 'objects' # we should get them _t0 = time.time() self.get_objects_from_from_queues() statsmgr.timer('core.get-objects-from-queues', time.time() - _t0) # Get external commands from the arbiters... _t0 = time.time() self.get_external_commands_from_arbiters() statsmgr.timer('external-commands.got.time', time.time() - _t0) statsmgr.gauge('external-commands.got.count', len(self.unprocessed_external_commands)) _t0 = time.time() self.push_external_commands_to_schedulers() statsmgr.timer('external-commands.pushed.time', time.time() - _t0) # Say to modules it's a new tick :) _t0 = time.time() self.hook_point('tick') statsmgr.timer('hook.tick', time.time() - _t0)
python
def do_loop_turn(self): """Receiver daemon main loop :return: None """ # Begin to clean modules self.check_and_del_zombie_modules() # Maybe the arbiter pushed a new configuration... if self.watch_for_new_conf(timeout=0.05): logger.info("I got a new configuration...") # Manage the new configuration self.setup_new_conf() # Maybe external modules raised 'objects' # we should get them _t0 = time.time() self.get_objects_from_from_queues() statsmgr.timer('core.get-objects-from-queues', time.time() - _t0) # Get external commands from the arbiters... _t0 = time.time() self.get_external_commands_from_arbiters() statsmgr.timer('external-commands.got.time', time.time() - _t0) statsmgr.gauge('external-commands.got.count', len(self.unprocessed_external_commands)) _t0 = time.time() self.push_external_commands_to_schedulers() statsmgr.timer('external-commands.pushed.time', time.time() - _t0) # Say to modules it's a new tick :) _t0 = time.time() self.hook_point('tick') statsmgr.timer('hook.tick', time.time() - _t0)
['def', 'do_loop_turn', '(', 'self', ')', ':', '# Begin to clean modules', 'self', '.', 'check_and_del_zombie_modules', '(', ')', '# Maybe the arbiter pushed a new configuration...', 'if', 'self', '.', 'watch_for_new_conf', '(', 'timeout', '=', '0.05', ')', ':', 'logger', '.', 'info', '(', '"I got a new configuration..."', ')', '# Manage the new configuration', 'self', '.', 'setup_new_conf', '(', ')', "# Maybe external modules raised 'objects'", '# we should get them', '_t0', '=', 'time', '.', 'time', '(', ')', 'self', '.', 'get_objects_from_from_queues', '(', ')', 'statsmgr', '.', 'timer', '(', "'core.get-objects-from-queues'", ',', 'time', '.', 'time', '(', ')', '-', '_t0', ')', '# Get external commands from the arbiters...', '_t0', '=', 'time', '.', 'time', '(', ')', 'self', '.', 'get_external_commands_from_arbiters', '(', ')', 'statsmgr', '.', 'timer', '(', "'external-commands.got.time'", ',', 'time', '.', 'time', '(', ')', '-', '_t0', ')', 'statsmgr', '.', 'gauge', '(', "'external-commands.got.count'", ',', 'len', '(', 'self', '.', 'unprocessed_external_commands', ')', ')', '_t0', '=', 'time', '.', 'time', '(', ')', 'self', '.', 'push_external_commands_to_schedulers', '(', ')', 'statsmgr', '.', 'timer', '(', "'external-commands.pushed.time'", ',', 'time', '.', 'time', '(', ')', '-', '_t0', ')', "# Say to modules it's a new tick :)", '_t0', '=', 'time', '.', 'time', '(', ')', 'self', '.', 'hook_point', '(', "'tick'", ')', 'statsmgr', '.', 'timer', '(', "'hook.tick'", ',', 'time', '.', 'time', '(', ')', '-', '_t0', ')']
Receiver daemon main loop :return: None
['Receiver', 'daemon', 'main', 'loop']
train
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/receiverdaemon.py#L307-L341
8,159
enricobacis/wos
wos/client.py
WosClient.connect
def connect(self): """Authenticate to WOS and set the SID cookie.""" if not self._SID: self._SID = self._auth.service.authenticate() print('Authenticated (SID: %s)' % self._SID) self._search.set_options(headers={'Cookie': 'SID="%s"' % self._SID}) self._auth.options.headers.update({'Cookie': 'SID="%s"' % self._SID}) return self._SID
python
def connect(self): """Authenticate to WOS and set the SID cookie.""" if not self._SID: self._SID = self._auth.service.authenticate() print('Authenticated (SID: %s)' % self._SID) self._search.set_options(headers={'Cookie': 'SID="%s"' % self._SID}) self._auth.options.headers.update({'Cookie': 'SID="%s"' % self._SID}) return self._SID
['def', 'connect', '(', 'self', ')', ':', 'if', 'not', 'self', '.', '_SID', ':', 'self', '.', '_SID', '=', 'self', '.', '_auth', '.', 'service', '.', 'authenticate', '(', ')', 'print', '(', "'Authenticated (SID: %s)'", '%', 'self', '.', '_SID', ')', 'self', '.', '_search', '.', 'set_options', '(', 'headers', '=', '{', "'Cookie'", ':', '\'SID="%s"\'', '%', 'self', '.', '_SID', '}', ')', 'self', '.', '_auth', '.', 'options', '.', 'headers', '.', 'update', '(', '{', "'Cookie'", ':', '\'SID="%s"\'', '%', 'self', '.', '_SID', '}', ')', 'return', 'self', '.', '_SID']
Authenticate to WOS and set the SID cookie.
['Authenticate', 'to', 'WOS', 'and', 'set', 'the', 'SID', 'cookie', '.']
train
https://github.com/enricobacis/wos/blob/a51f4d1a983c2c7529caac3e09606a432223630d/wos/client.py#L104-L112
8,160
schlamar/cov-core
cov_core.py
DistMaster.finish
def finish(self): """Combines coverage data and sets the list of coverage objects to report on.""" # Combine all the suffix files into the data file. self.cov.stop() self.cov.combine() self.cov.save()
python
def finish(self): """Combines coverage data and sets the list of coverage objects to report on.""" # Combine all the suffix files into the data file. self.cov.stop() self.cov.combine() self.cov.save()
['def', 'finish', '(', 'self', ')', ':', '# Combine all the suffix files into the data file.', 'self', '.', 'cov', '.', 'stop', '(', ')', 'self', '.', 'cov', '.', 'combine', '(', ')', 'self', '.', 'cov', '.', 'save', '(', ')']
Combines coverage data and sets the list of coverage objects to report on.
['Combines', 'coverage', 'data', 'and', 'sets', 'the', 'list', 'of', 'coverage', 'objects', 'to', 'report', 'on', '.']
train
https://github.com/schlamar/cov-core/blob/791b1f6890456ee9e3beec33c89a7c573a382b7b/cov_core.py#L203-L209
8,161
Unidata/MetPy
metpy/calc/tools.py
get_layer
def get_layer(pressure, *args, **kwargs): r"""Return an atmospheric layer from upper air data with the requested bottom and depth. This function will subset an upper air dataset to contain only the specified layer. The bottom of the layer can be specified with a pressure or height above the surface pressure. The bottom defaults to the surface pressure. The depth of the layer can be specified in terms of pressure or height above the bottom of the layer. If the top and bottom of the layer are not in the data, they are interpolated by default. Parameters ---------- pressure : array-like Atmospheric pressure profile *args : array-like Atmospheric variable(s) measured at the given pressures heights: array-like, optional Atmospheric heights corresponding to the given pressures. Defaults to using heights calculated from ``p`` assuming a standard atmosphere. bottom : `pint.Quantity`, optional The bottom of the layer as a pressure or height above the surface pressure. Defaults to the highest pressure or lowest height given. depth : `pint.Quantity`, optional The thickness of the layer as a pressure or height above the bottom of the layer. Defaults to 100 hPa. interpolate : bool, optional Interpolate the top and bottom points if they are not in the given data. Defaults to True. Returns ------- `pint.Quantity, pint.Quantity` The pressure and data variables of the layer """ # Pop off keyword arguments heights = kwargs.pop('heights', None) bottom = kwargs.pop('bottom', None) depth = kwargs.pop('depth', 100 * units.hPa) interpolate = kwargs.pop('interpolate', True) # If we get the depth kwarg, but it's None, set it to the default as well if depth is None: depth = 100 * units.hPa # Make sure pressure and datavars are the same length for datavar in args: if len(pressure) != len(datavar): raise ValueError('Pressure and data variables must have the same length.') # If the bottom is not specified, make it the surface pressure if bottom is None: bottom = np.nanmax(pressure) * pressure.units bottom_pressure, bottom_height = _get_bound_pressure_height(pressure, bottom, heights=heights, interpolate=interpolate) # Calculate the top if whatever units depth is in if depth.dimensionality == {'[length]': -1.0, '[mass]': 1.0, '[time]': -2.0}: top = bottom_pressure - depth elif depth.dimensionality == {'[length]': 1}: top = bottom_height + depth else: raise ValueError('Depth must be specified in units of length or pressure') top_pressure, _ = _get_bound_pressure_height(pressure, top, heights=heights, interpolate=interpolate) ret = [] # returned data variables in layer # Ensure pressures are sorted in ascending order sort_inds = np.argsort(pressure) pressure = pressure[sort_inds] # Mask based on top and bottom pressure inds = (_less_or_close(pressure, bottom_pressure) & _greater_or_close(pressure, top_pressure)) p_interp = pressure[inds] # Interpolate pressures at bounds if necessary and sort if interpolate: # If we don't have the bottom or top requested, append them if not np.any(np.isclose(top_pressure, p_interp)): p_interp = np.sort(np.append(p_interp, top_pressure)) * pressure.units if not np.any(np.isclose(bottom_pressure, p_interp)): p_interp = np.sort(np.append(p_interp, bottom_pressure)) * pressure.units ret.append(p_interp[::-1]) for datavar in args: # Ensure that things are sorted in ascending order datavar = datavar[sort_inds] if interpolate: # Interpolate for the possibly missing bottom/top values datavar_interp = log_interpolate_1d(p_interp, pressure, datavar) datavar = datavar_interp else: datavar = datavar[inds] ret.append(datavar[::-1]) return ret
python
def get_layer(pressure, *args, **kwargs): r"""Return an atmospheric layer from upper air data with the requested bottom and depth. This function will subset an upper air dataset to contain only the specified layer. The bottom of the layer can be specified with a pressure or height above the surface pressure. The bottom defaults to the surface pressure. The depth of the layer can be specified in terms of pressure or height above the bottom of the layer. If the top and bottom of the layer are not in the data, they are interpolated by default. Parameters ---------- pressure : array-like Atmospheric pressure profile *args : array-like Atmospheric variable(s) measured at the given pressures heights: array-like, optional Atmospheric heights corresponding to the given pressures. Defaults to using heights calculated from ``p`` assuming a standard atmosphere. bottom : `pint.Quantity`, optional The bottom of the layer as a pressure or height above the surface pressure. Defaults to the highest pressure or lowest height given. depth : `pint.Quantity`, optional The thickness of the layer as a pressure or height above the bottom of the layer. Defaults to 100 hPa. interpolate : bool, optional Interpolate the top and bottom points if they are not in the given data. Defaults to True. Returns ------- `pint.Quantity, pint.Quantity` The pressure and data variables of the layer """ # Pop off keyword arguments heights = kwargs.pop('heights', None) bottom = kwargs.pop('bottom', None) depth = kwargs.pop('depth', 100 * units.hPa) interpolate = kwargs.pop('interpolate', True) # If we get the depth kwarg, but it's None, set it to the default as well if depth is None: depth = 100 * units.hPa # Make sure pressure and datavars are the same length for datavar in args: if len(pressure) != len(datavar): raise ValueError('Pressure and data variables must have the same length.') # If the bottom is not specified, make it the surface pressure if bottom is None: bottom = np.nanmax(pressure) * pressure.units bottom_pressure, bottom_height = _get_bound_pressure_height(pressure, bottom, heights=heights, interpolate=interpolate) # Calculate the top if whatever units depth is in if depth.dimensionality == {'[length]': -1.0, '[mass]': 1.0, '[time]': -2.0}: top = bottom_pressure - depth elif depth.dimensionality == {'[length]': 1}: top = bottom_height + depth else: raise ValueError('Depth must be specified in units of length or pressure') top_pressure, _ = _get_bound_pressure_height(pressure, top, heights=heights, interpolate=interpolate) ret = [] # returned data variables in layer # Ensure pressures are sorted in ascending order sort_inds = np.argsort(pressure) pressure = pressure[sort_inds] # Mask based on top and bottom pressure inds = (_less_or_close(pressure, bottom_pressure) & _greater_or_close(pressure, top_pressure)) p_interp = pressure[inds] # Interpolate pressures at bounds if necessary and sort if interpolate: # If we don't have the bottom or top requested, append them if not np.any(np.isclose(top_pressure, p_interp)): p_interp = np.sort(np.append(p_interp, top_pressure)) * pressure.units if not np.any(np.isclose(bottom_pressure, p_interp)): p_interp = np.sort(np.append(p_interp, bottom_pressure)) * pressure.units ret.append(p_interp[::-1]) for datavar in args: # Ensure that things are sorted in ascending order datavar = datavar[sort_inds] if interpolate: # Interpolate for the possibly missing bottom/top values datavar_interp = log_interpolate_1d(p_interp, pressure, datavar) datavar = datavar_interp else: datavar = datavar[inds] ret.append(datavar[::-1]) return ret
['def', 'get_layer', '(', 'pressure', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '# Pop off keyword arguments', 'heights', '=', 'kwargs', '.', 'pop', '(', "'heights'", ',', 'None', ')', 'bottom', '=', 'kwargs', '.', 'pop', '(', "'bottom'", ',', 'None', ')', 'depth', '=', 'kwargs', '.', 'pop', '(', "'depth'", ',', '100', '*', 'units', '.', 'hPa', ')', 'interpolate', '=', 'kwargs', '.', 'pop', '(', "'interpolate'", ',', 'True', ')', "# If we get the depth kwarg, but it's None, set it to the default as well", 'if', 'depth', 'is', 'None', ':', 'depth', '=', '100', '*', 'units', '.', 'hPa', '# Make sure pressure and datavars are the same length', 'for', 'datavar', 'in', 'args', ':', 'if', 'len', '(', 'pressure', ')', '!=', 'len', '(', 'datavar', ')', ':', 'raise', 'ValueError', '(', "'Pressure and data variables must have the same length.'", ')', '# If the bottom is not specified, make it the surface pressure', 'if', 'bottom', 'is', 'None', ':', 'bottom', '=', 'np', '.', 'nanmax', '(', 'pressure', ')', '*', 'pressure', '.', 'units', 'bottom_pressure', ',', 'bottom_height', '=', '_get_bound_pressure_height', '(', 'pressure', ',', 'bottom', ',', 'heights', '=', 'heights', ',', 'interpolate', '=', 'interpolate', ')', '# Calculate the top if whatever units depth is in', 'if', 'depth', '.', 'dimensionality', '==', '{', "'[length]'", ':', '-', '1.0', ',', "'[mass]'", ':', '1.0', ',', "'[time]'", ':', '-', '2.0', '}', ':', 'top', '=', 'bottom_pressure', '-', 'depth', 'elif', 'depth', '.', 'dimensionality', '==', '{', "'[length]'", ':', '1', '}', ':', 'top', '=', 'bottom_height', '+', 'depth', 'else', ':', 'raise', 'ValueError', '(', "'Depth must be specified in units of length or pressure'", ')', 'top_pressure', ',', '_', '=', '_get_bound_pressure_height', '(', 'pressure', ',', 'top', ',', 'heights', '=', 'heights', ',', 'interpolate', '=', 'interpolate', ')', 'ret', '=', '[', ']', '# returned data variables in layer', '# Ensure pressures are sorted in ascending order', 'sort_inds', '=', 'np', '.', 'argsort', '(', 'pressure', ')', 'pressure', '=', 'pressure', '[', 'sort_inds', ']', '# Mask based on top and bottom pressure', 'inds', '=', '(', '_less_or_close', '(', 'pressure', ',', 'bottom_pressure', ')', '&', '_greater_or_close', '(', 'pressure', ',', 'top_pressure', ')', ')', 'p_interp', '=', 'pressure', '[', 'inds', ']', '# Interpolate pressures at bounds if necessary and sort', 'if', 'interpolate', ':', "# If we don't have the bottom or top requested, append them", 'if', 'not', 'np', '.', 'any', '(', 'np', '.', 'isclose', '(', 'top_pressure', ',', 'p_interp', ')', ')', ':', 'p_interp', '=', 'np', '.', 'sort', '(', 'np', '.', 'append', '(', 'p_interp', ',', 'top_pressure', ')', ')', '*', 'pressure', '.', 'units', 'if', 'not', 'np', '.', 'any', '(', 'np', '.', 'isclose', '(', 'bottom_pressure', ',', 'p_interp', ')', ')', ':', 'p_interp', '=', 'np', '.', 'sort', '(', 'np', '.', 'append', '(', 'p_interp', ',', 'bottom_pressure', ')', ')', '*', 'pressure', '.', 'units', 'ret', '.', 'append', '(', 'p_interp', '[', ':', ':', '-', '1', ']', ')', 'for', 'datavar', 'in', 'args', ':', '# Ensure that things are sorted in ascending order', 'datavar', '=', 'datavar', '[', 'sort_inds', ']', 'if', 'interpolate', ':', '# Interpolate for the possibly missing bottom/top values', 'datavar_interp', '=', 'log_interpolate_1d', '(', 'p_interp', ',', 'pressure', ',', 'datavar', ')', 'datavar', '=', 'datavar_interp', 'else', ':', 'datavar', '=', 'datavar', '[', 'inds', ']', 'ret', '.', 'append', '(', 'datavar', '[', ':', ':', '-', '1', ']', ')', 'return', 'ret']
r"""Return an atmospheric layer from upper air data with the requested bottom and depth. This function will subset an upper air dataset to contain only the specified layer. The bottom of the layer can be specified with a pressure or height above the surface pressure. The bottom defaults to the surface pressure. The depth of the layer can be specified in terms of pressure or height above the bottom of the layer. If the top and bottom of the layer are not in the data, they are interpolated by default. Parameters ---------- pressure : array-like Atmospheric pressure profile *args : array-like Atmospheric variable(s) measured at the given pressures heights: array-like, optional Atmospheric heights corresponding to the given pressures. Defaults to using heights calculated from ``p`` assuming a standard atmosphere. bottom : `pint.Quantity`, optional The bottom of the layer as a pressure or height above the surface pressure. Defaults to the highest pressure or lowest height given. depth : `pint.Quantity`, optional The thickness of the layer as a pressure or height above the bottom of the layer. Defaults to 100 hPa. interpolate : bool, optional Interpolate the top and bottom points if they are not in the given data. Defaults to True. Returns ------- `pint.Quantity, pint.Quantity` The pressure and data variables of the layer
['r', 'Return', 'an', 'atmospheric', 'layer', 'from', 'upper', 'air', 'data', 'with', 'the', 'requested', 'bottom', 'and', 'depth', '.']
train
https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/calc/tools.py#L509-L610
8,162
lotabout/pymustache
pymustache/mustache.py
delimiters_to_re
def delimiters_to_re(delimiters): """convert delimiters to corresponding regular expressions""" # caching delimiters = tuple(delimiters) if delimiters in re_delimiters: re_tag = re_delimiters[delimiters] else: open_tag, close_tag = delimiters # escape open_tag = ''.join([c if c.isalnum() else '\\' + c for c in open_tag]) close_tag = ''.join([c if c.isalnum() else '\\' + c for c in close_tag]) re_tag = re.compile(open_tag + r'([#^>&{/!=]?)\s*(.*?)\s*([}=]?)' + close_tag, re.DOTALL) re_delimiters[delimiters] = re_tag return re_tag
python
def delimiters_to_re(delimiters): """convert delimiters to corresponding regular expressions""" # caching delimiters = tuple(delimiters) if delimiters in re_delimiters: re_tag = re_delimiters[delimiters] else: open_tag, close_tag = delimiters # escape open_tag = ''.join([c if c.isalnum() else '\\' + c for c in open_tag]) close_tag = ''.join([c if c.isalnum() else '\\' + c for c in close_tag]) re_tag = re.compile(open_tag + r'([#^>&{/!=]?)\s*(.*?)\s*([}=]?)' + close_tag, re.DOTALL) re_delimiters[delimiters] = re_tag return re_tag
['def', 'delimiters_to_re', '(', 'delimiters', ')', ':', '# caching', 'delimiters', '=', 'tuple', '(', 'delimiters', ')', 'if', 'delimiters', 'in', 're_delimiters', ':', 're_tag', '=', 're_delimiters', '[', 'delimiters', ']', 'else', ':', 'open_tag', ',', 'close_tag', '=', 'delimiters', '# escape', 'open_tag', '=', "''", '.', 'join', '(', '[', 'c', 'if', 'c', '.', 'isalnum', '(', ')', 'else', "'\\\\'", '+', 'c', 'for', 'c', 'in', 'open_tag', ']', ')', 'close_tag', '=', "''", '.', 'join', '(', '[', 'c', 'if', 'c', '.', 'isalnum', '(', ')', 'else', "'\\\\'", '+', 'c', 'for', 'c', 'in', 'close_tag', ']', ')', 're_tag', '=', 're', '.', 'compile', '(', 'open_tag', '+', "r'([#^>&{/!=]?)\\s*(.*?)\\s*([}=]?)'", '+', 'close_tag', ',', 're', '.', 'DOTALL', ')', 're_delimiters', '[', 'delimiters', ']', '=', 're_tag', 'return', 're_tag']
convert delimiters to corresponding regular expressions
['convert', 'delimiters', 'to', 'corresponding', 'regular', 'expressions']
train
https://github.com/lotabout/pymustache/blob/d4089e49cda01fc11bab0c986d95e25150a60bac/pymustache/mustache.py#L69-L86
8,163
onelogin/python-saml
src/onelogin/saml2/metadata.py
OneLogin_Saml2_Metadata.builder
def builder(sp, authnsign=False, wsign=False, valid_until=None, cache_duration=None, contacts=None, organization=None): """ Builds the metadata of the SP :param sp: The SP data :type sp: string :param authnsign: authnRequestsSigned attribute :type authnsign: string :param wsign: wantAssertionsSigned attribute :type wsign: string :param valid_until: Metadata's expiry date :type valid_until: string|DateTime|Timestamp :param cache_duration: Duration of the cache in seconds :type cache_duration: int|string :param contacts: Contacts info :type contacts: dict :param organization: Organization info :type organization: dict """ if valid_until is None: valid_until = int(time()) + OneLogin_Saml2_Metadata.TIME_VALID if not isinstance(valid_until, basestring): if isinstance(valid_until, datetime): valid_until_time = valid_until.timetuple() else: valid_until_time = gmtime(valid_until) valid_until_str = strftime(r'%Y-%m-%dT%H:%M:%SZ', valid_until_time) else: valid_until_str = valid_until if cache_duration is None: cache_duration = OneLogin_Saml2_Metadata.TIME_CACHED if not isinstance(cache_duration, basestring): cache_duration_str = 'PT%sS' % cache_duration # 'P'eriod of 'T'ime x 'S'econds else: cache_duration_str = cache_duration if contacts is None: contacts = {} if organization is None: organization = {} str_attribute_consuming_service = '' if 'attributeConsumingService' in sp and len(sp['attributeConsumingService']): attr_cs_desc_str = '' if "serviceDescription" in sp['attributeConsumingService']: attr_cs_desc_str = """ <md:ServiceDescription xml:lang="en">%s</md:ServiceDescription> """ % sp['attributeConsumingService']['serviceDescription'] requested_attribute_data = [] for req_attribs in sp['attributeConsumingService']['requestedAttributes']: req_attr_nameformat_str = req_attr_friendlyname_str = req_attr_isrequired_str = '' req_attr_aux_str = ' />' if 'nameFormat' in req_attribs.keys() and req_attribs['nameFormat']: req_attr_nameformat_str = " NameFormat=\"%s\"" % req_attribs['nameFormat'] if 'friendlyName' in req_attribs.keys() and req_attribs['friendlyName']: req_attr_friendlyname_str = " FriendlyName=\"%s\"" % req_attribs['friendlyName'] if 'isRequired' in req_attribs.keys() and req_attribs['isRequired']: req_attr_isrequired_str = " isRequired=\"%s\"" % 'true' if req_attribs['isRequired'] else 'false' if 'attributeValue' in req_attribs.keys() and req_attribs['attributeValue']: if isinstance(req_attribs['attributeValue'], basestring): req_attribs['attributeValue'] = [req_attribs['attributeValue']] req_attr_aux_str = ">" for attrValue in req_attribs['attributeValue']: req_attr_aux_str += """ <saml:AttributeValue xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion">%(attributeValue)s</saml:AttributeValue>""" % \ { 'attributeValue': attrValue } req_attr_aux_str += """ </md:RequestedAttribute>""" requested_attribute = """ <md:RequestedAttribute Name="%(req_attr_name)s"%(req_attr_nameformat_str)s%(req_attr_friendlyname_str)s%(req_attr_isrequired_str)s%(req_attr_aux_str)s""" % \ { 'req_attr_name': req_attribs['name'], 'req_attr_nameformat_str': req_attr_nameformat_str, 'req_attr_friendlyname_str': req_attr_friendlyname_str, 'req_attr_isrequired_str': req_attr_isrequired_str, 'req_attr_aux_str': req_attr_aux_str } requested_attribute_data.append(requested_attribute) str_attribute_consuming_service = """ <md:AttributeConsumingService index="1"> <md:ServiceName xml:lang="en">%(service_name)s</md:ServiceName> %(attr_cs_desc)s%(requested_attribute_str)s </md:AttributeConsumingService> """ % \ { 'service_name': sp['attributeConsumingService']['serviceName'], 'attr_cs_desc': attr_cs_desc_str, 'requested_attribute_str': '\n'.join(requested_attribute_data) } sls = '' if 'singleLogoutService' in sp and 'url' in sp['singleLogoutService']: sls = """ <md:SingleLogoutService Binding="%(binding)s" Location="%(location)s" />\n""" % \ { 'binding': sp['singleLogoutService']['binding'], 'location': sp['singleLogoutService']['url'], } str_authnsign = 'true' if authnsign else 'false' str_wsign = 'true' if wsign else 'false' str_organization = '' if len(organization) > 0: organization_names = [] organization_displaynames = [] organization_urls = [] for (lang, info) in organization.items(): organization_names.append(""" <md:OrganizationName xml:lang="%s">%s</md:OrganizationName>""" % (lang, info['name'])) organization_displaynames.append(""" <md:OrganizationDisplayName xml:lang="%s">%s</md:OrganizationDisplayName>""" % (lang, info['displayname'])) organization_urls.append(""" <md:OrganizationURL xml:lang="%s">%s</md:OrganizationURL>""" % (lang, info['url'])) org_data = '\n'.join(organization_names) + '\n' + '\n'.join(organization_displaynames) + '\n' + '\n'.join(organization_urls) str_organization = """ <md:Organization> %(org)s </md:Organization>\n""" % {'org': org_data} str_contacts = '' if len(contacts) > 0: contacts_info = [] for (ctype, info) in contacts.items(): contact = """ <md:ContactPerson contactType="%(type)s"> <md:GivenName>%(name)s</md:GivenName> <md:EmailAddress>%(email)s</md:EmailAddress> </md:ContactPerson>""" % \ { 'type': ctype, 'name': info['givenName'], 'email': info['emailAddress'], } contacts_info.append(contact) str_contacts = '\n'.join(contacts_info) + '\n' metadata = u"""<?xml version="1.0"?> <md:EntityDescriptor xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata" %(valid)s %(cache)s entityID="%(entity_id)s"> <md:SPSSODescriptor AuthnRequestsSigned="%(authnsign)s" WantAssertionsSigned="%(wsign)s" protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol"> %(sls)s <md:NameIDFormat>%(name_id_format)s</md:NameIDFormat> <md:AssertionConsumerService Binding="%(binding)s" Location="%(location)s" index="1" /> %(attribute_consuming_service)s </md:SPSSODescriptor> %(organization)s%(contacts)s</md:EntityDescriptor>""" % \ { 'valid': ('validUntil="%s"' % valid_until_str) if valid_until_str else '', 'cache': ('cacheDuration="%s"' % cache_duration_str) if cache_duration_str else '', 'entity_id': sp['entityId'], 'authnsign': str_authnsign, 'wsign': str_wsign, 'name_id_format': sp['NameIDFormat'], 'binding': sp['assertionConsumerService']['binding'], 'location': sp['assertionConsumerService']['url'], 'sls': sls, 'organization': str_organization, 'contacts': str_contacts, 'attribute_consuming_service': str_attribute_consuming_service } return metadata
python
def builder(sp, authnsign=False, wsign=False, valid_until=None, cache_duration=None, contacts=None, organization=None): """ Builds the metadata of the SP :param sp: The SP data :type sp: string :param authnsign: authnRequestsSigned attribute :type authnsign: string :param wsign: wantAssertionsSigned attribute :type wsign: string :param valid_until: Metadata's expiry date :type valid_until: string|DateTime|Timestamp :param cache_duration: Duration of the cache in seconds :type cache_duration: int|string :param contacts: Contacts info :type contacts: dict :param organization: Organization info :type organization: dict """ if valid_until is None: valid_until = int(time()) + OneLogin_Saml2_Metadata.TIME_VALID if not isinstance(valid_until, basestring): if isinstance(valid_until, datetime): valid_until_time = valid_until.timetuple() else: valid_until_time = gmtime(valid_until) valid_until_str = strftime(r'%Y-%m-%dT%H:%M:%SZ', valid_until_time) else: valid_until_str = valid_until if cache_duration is None: cache_duration = OneLogin_Saml2_Metadata.TIME_CACHED if not isinstance(cache_duration, basestring): cache_duration_str = 'PT%sS' % cache_duration # 'P'eriod of 'T'ime x 'S'econds else: cache_duration_str = cache_duration if contacts is None: contacts = {} if organization is None: organization = {} str_attribute_consuming_service = '' if 'attributeConsumingService' in sp and len(sp['attributeConsumingService']): attr_cs_desc_str = '' if "serviceDescription" in sp['attributeConsumingService']: attr_cs_desc_str = """ <md:ServiceDescription xml:lang="en">%s</md:ServiceDescription> """ % sp['attributeConsumingService']['serviceDescription'] requested_attribute_data = [] for req_attribs in sp['attributeConsumingService']['requestedAttributes']: req_attr_nameformat_str = req_attr_friendlyname_str = req_attr_isrequired_str = '' req_attr_aux_str = ' />' if 'nameFormat' in req_attribs.keys() and req_attribs['nameFormat']: req_attr_nameformat_str = " NameFormat=\"%s\"" % req_attribs['nameFormat'] if 'friendlyName' in req_attribs.keys() and req_attribs['friendlyName']: req_attr_friendlyname_str = " FriendlyName=\"%s\"" % req_attribs['friendlyName'] if 'isRequired' in req_attribs.keys() and req_attribs['isRequired']: req_attr_isrequired_str = " isRequired=\"%s\"" % 'true' if req_attribs['isRequired'] else 'false' if 'attributeValue' in req_attribs.keys() and req_attribs['attributeValue']: if isinstance(req_attribs['attributeValue'], basestring): req_attribs['attributeValue'] = [req_attribs['attributeValue']] req_attr_aux_str = ">" for attrValue in req_attribs['attributeValue']: req_attr_aux_str += """ <saml:AttributeValue xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion">%(attributeValue)s</saml:AttributeValue>""" % \ { 'attributeValue': attrValue } req_attr_aux_str += """ </md:RequestedAttribute>""" requested_attribute = """ <md:RequestedAttribute Name="%(req_attr_name)s"%(req_attr_nameformat_str)s%(req_attr_friendlyname_str)s%(req_attr_isrequired_str)s%(req_attr_aux_str)s""" % \ { 'req_attr_name': req_attribs['name'], 'req_attr_nameformat_str': req_attr_nameformat_str, 'req_attr_friendlyname_str': req_attr_friendlyname_str, 'req_attr_isrequired_str': req_attr_isrequired_str, 'req_attr_aux_str': req_attr_aux_str } requested_attribute_data.append(requested_attribute) str_attribute_consuming_service = """ <md:AttributeConsumingService index="1"> <md:ServiceName xml:lang="en">%(service_name)s</md:ServiceName> %(attr_cs_desc)s%(requested_attribute_str)s </md:AttributeConsumingService> """ % \ { 'service_name': sp['attributeConsumingService']['serviceName'], 'attr_cs_desc': attr_cs_desc_str, 'requested_attribute_str': '\n'.join(requested_attribute_data) } sls = '' if 'singleLogoutService' in sp and 'url' in sp['singleLogoutService']: sls = """ <md:SingleLogoutService Binding="%(binding)s" Location="%(location)s" />\n""" % \ { 'binding': sp['singleLogoutService']['binding'], 'location': sp['singleLogoutService']['url'], } str_authnsign = 'true' if authnsign else 'false' str_wsign = 'true' if wsign else 'false' str_organization = '' if len(organization) > 0: organization_names = [] organization_displaynames = [] organization_urls = [] for (lang, info) in organization.items(): organization_names.append(""" <md:OrganizationName xml:lang="%s">%s</md:OrganizationName>""" % (lang, info['name'])) organization_displaynames.append(""" <md:OrganizationDisplayName xml:lang="%s">%s</md:OrganizationDisplayName>""" % (lang, info['displayname'])) organization_urls.append(""" <md:OrganizationURL xml:lang="%s">%s</md:OrganizationURL>""" % (lang, info['url'])) org_data = '\n'.join(organization_names) + '\n' + '\n'.join(organization_displaynames) + '\n' + '\n'.join(organization_urls) str_organization = """ <md:Organization> %(org)s </md:Organization>\n""" % {'org': org_data} str_contacts = '' if len(contacts) > 0: contacts_info = [] for (ctype, info) in contacts.items(): contact = """ <md:ContactPerson contactType="%(type)s"> <md:GivenName>%(name)s</md:GivenName> <md:EmailAddress>%(email)s</md:EmailAddress> </md:ContactPerson>""" % \ { 'type': ctype, 'name': info['givenName'], 'email': info['emailAddress'], } contacts_info.append(contact) str_contacts = '\n'.join(contacts_info) + '\n' metadata = u"""<?xml version="1.0"?> <md:EntityDescriptor xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata" %(valid)s %(cache)s entityID="%(entity_id)s"> <md:SPSSODescriptor AuthnRequestsSigned="%(authnsign)s" WantAssertionsSigned="%(wsign)s" protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol"> %(sls)s <md:NameIDFormat>%(name_id_format)s</md:NameIDFormat> <md:AssertionConsumerService Binding="%(binding)s" Location="%(location)s" index="1" /> %(attribute_consuming_service)s </md:SPSSODescriptor> %(organization)s%(contacts)s</md:EntityDescriptor>""" % \ { 'valid': ('validUntil="%s"' % valid_until_str) if valid_until_str else '', 'cache': ('cacheDuration="%s"' % cache_duration_str) if cache_duration_str else '', 'entity_id': sp['entityId'], 'authnsign': str_authnsign, 'wsign': str_wsign, 'name_id_format': sp['NameIDFormat'], 'binding': sp['assertionConsumerService']['binding'], 'location': sp['assertionConsumerService']['url'], 'sls': sls, 'organization': str_organization, 'contacts': str_contacts, 'attribute_consuming_service': str_attribute_consuming_service } return metadata
['def', 'builder', '(', 'sp', ',', 'authnsign', '=', 'False', ',', 'wsign', '=', 'False', ',', 'valid_until', '=', 'None', ',', 'cache_duration', '=', 'None', ',', 'contacts', '=', 'None', ',', 'organization', '=', 'None', ')', ':', 'if', 'valid_until', 'is', 'None', ':', 'valid_until', '=', 'int', '(', 'time', '(', ')', ')', '+', 'OneLogin_Saml2_Metadata', '.', 'TIME_VALID', 'if', 'not', 'isinstance', '(', 'valid_until', ',', 'basestring', ')', ':', 'if', 'isinstance', '(', 'valid_until', ',', 'datetime', ')', ':', 'valid_until_time', '=', 'valid_until', '.', 'timetuple', '(', ')', 'else', ':', 'valid_until_time', '=', 'gmtime', '(', 'valid_until', ')', 'valid_until_str', '=', 'strftime', '(', "r'%Y-%m-%dT%H:%M:%SZ'", ',', 'valid_until_time', ')', 'else', ':', 'valid_until_str', '=', 'valid_until', 'if', 'cache_duration', 'is', 'None', ':', 'cache_duration', '=', 'OneLogin_Saml2_Metadata', '.', 'TIME_CACHED', 'if', 'not', 'isinstance', '(', 'cache_duration', ',', 'basestring', ')', ':', 'cache_duration_str', '=', "'PT%sS'", '%', 'cache_duration', "# 'P'eriod of 'T'ime x 'S'econds", 'else', ':', 'cache_duration_str', '=', 'cache_duration', 'if', 'contacts', 'is', 'None', ':', 'contacts', '=', '{', '}', 'if', 'organization', 'is', 'None', ':', 'organization', '=', '{', '}', 'str_attribute_consuming_service', '=', "''", 'if', "'attributeConsumingService'", 'in', 'sp', 'and', 'len', '(', 'sp', '[', "'attributeConsumingService'", ']', ')', ':', 'attr_cs_desc_str', '=', "''", 'if', '"serviceDescription"', 'in', 'sp', '[', "'attributeConsumingService'", ']', ':', 'attr_cs_desc_str', '=', '""" <md:ServiceDescription xml:lang="en">%s</md:ServiceDescription>\n"""', '%', 'sp', '[', "'attributeConsumingService'", ']', '[', "'serviceDescription'", ']', 'requested_attribute_data', '=', '[', ']', 'for', 'req_attribs', 'in', 'sp', '[', "'attributeConsumingService'", ']', '[', "'requestedAttributes'", ']', ':', 'req_attr_nameformat_str', '=', 'req_attr_friendlyname_str', '=', 'req_attr_isrequired_str', '=', "''", 'req_attr_aux_str', '=', "' />'", 'if', "'nameFormat'", 'in', 'req_attribs', '.', 'keys', '(', ')', 'and', 'req_attribs', '[', "'nameFormat'", ']', ':', 'req_attr_nameformat_str', '=', '" NameFormat=\\"%s\\""', '%', 'req_attribs', '[', "'nameFormat'", ']', 'if', "'friendlyName'", 'in', 'req_attribs', '.', 'keys', '(', ')', 'and', 'req_attribs', '[', "'friendlyName'", ']', ':', 'req_attr_friendlyname_str', '=', '" FriendlyName=\\"%s\\""', '%', 'req_attribs', '[', "'friendlyName'", ']', 'if', "'isRequired'", 'in', 'req_attribs', '.', 'keys', '(', ')', 'and', 'req_attribs', '[', "'isRequired'", ']', ':', 'req_attr_isrequired_str', '=', '" isRequired=\\"%s\\""', '%', "'true'", 'if', 'req_attribs', '[', "'isRequired'", ']', 'else', "'false'", 'if', "'attributeValue'", 'in', 'req_attribs', '.', 'keys', '(', ')', 'and', 'req_attribs', '[', "'attributeValue'", ']', ':', 'if', 'isinstance', '(', 'req_attribs', '[', "'attributeValue'", ']', ',', 'basestring', ')', ':', 'req_attribs', '[', "'attributeValue'", ']', '=', '[', 'req_attribs', '[', "'attributeValue'", ']', ']', 'req_attr_aux_str', '=', '">"', 'for', 'attrValue', 'in', 'req_attribs', '[', "'attributeValue'", ']', ':', 'req_attr_aux_str', '+=', '"""\n <saml:AttributeValue xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion">%(attributeValue)s</saml:AttributeValue>"""', '%', '{', "'attributeValue'", ':', 'attrValue', '}', 'req_attr_aux_str', '+=', '"""\n </md:RequestedAttribute>"""', 'requested_attribute', '=', '""" <md:RequestedAttribute Name="%(req_attr_name)s"%(req_attr_nameformat_str)s%(req_attr_friendlyname_str)s%(req_attr_isrequired_str)s%(req_attr_aux_str)s"""', '%', '{', "'req_attr_name'", ':', 'req_attribs', '[', "'name'", ']', ',', "'req_attr_nameformat_str'", ':', 'req_attr_nameformat_str', ',', "'req_attr_friendlyname_str'", ':', 'req_attr_friendlyname_str', ',', "'req_attr_isrequired_str'", ':', 'req_attr_isrequired_str', ',', "'req_attr_aux_str'", ':', 'req_attr_aux_str', '}', 'requested_attribute_data', '.', 'append', '(', 'requested_attribute', ')', 'str_attribute_consuming_service', '=', '""" <md:AttributeConsumingService index="1">\n <md:ServiceName xml:lang="en">%(service_name)s</md:ServiceName>\n%(attr_cs_desc)s%(requested_attribute_str)s\n </md:AttributeConsumingService>\n"""', '%', '{', "'service_name'", ':', 'sp', '[', "'attributeConsumingService'", ']', '[', "'serviceName'", ']', ',', "'attr_cs_desc'", ':', 'attr_cs_desc_str', ',', "'requested_attribute_str'", ':', "'\\n'", '.', 'join', '(', 'requested_attribute_data', ')', '}', 'sls', '=', "''", 'if', "'singleLogoutService'", 'in', 'sp', 'and', "'url'", 'in', 'sp', '[', "'singleLogoutService'", ']', ':', 'sls', '=', '""" <md:SingleLogoutService Binding="%(binding)s"\n Location="%(location)s" />\\n"""', '%', '{', "'binding'", ':', 'sp', '[', "'singleLogoutService'", ']', '[', "'binding'", ']', ',', "'location'", ':', 'sp', '[', "'singleLogoutService'", ']', '[', "'url'", ']', ',', '}', 'str_authnsign', '=', "'true'", 'if', 'authnsign', 'else', "'false'", 'str_wsign', '=', "'true'", 'if', 'wsign', 'else', "'false'", 'str_organization', '=', "''", 'if', 'len', '(', 'organization', ')', '>', '0', ':', 'organization_names', '=', '[', ']', 'organization_displaynames', '=', '[', ']', 'organization_urls', '=', '[', ']', 'for', '(', 'lang', ',', 'info', ')', 'in', 'organization', '.', 'items', '(', ')', ':', 'organization_names', '.', 'append', '(', '""" <md:OrganizationName xml:lang="%s">%s</md:OrganizationName>"""', '%', '(', 'lang', ',', 'info', '[', "'name'", ']', ')', ')', 'organization_displaynames', '.', 'append', '(', '""" <md:OrganizationDisplayName xml:lang="%s">%s</md:OrganizationDisplayName>"""', '%', '(', 'lang', ',', 'info', '[', "'displayname'", ']', ')', ')', 'organization_urls', '.', 'append', '(', '""" <md:OrganizationURL xml:lang="%s">%s</md:OrganizationURL>"""', '%', '(', 'lang', ',', 'info', '[', "'url'", ']', ')', ')', 'org_data', '=', "'\\n'", '.', 'join', '(', 'organization_names', ')', '+', "'\\n'", '+', "'\\n'", '.', 'join', '(', 'organization_displaynames', ')', '+', "'\\n'", '+', "'\\n'", '.', 'join', '(', 'organization_urls', ')', 'str_organization', '=', '""" <md:Organization>\n%(org)s\n </md:Organization>\\n"""', '%', '{', "'org'", ':', 'org_data', '}', 'str_contacts', '=', "''", 'if', 'len', '(', 'contacts', ')', '>', '0', ':', 'contacts_info', '=', '[', ']', 'for', '(', 'ctype', ',', 'info', ')', 'in', 'contacts', '.', 'items', '(', ')', ':', 'contact', '=', '""" <md:ContactPerson contactType="%(type)s">\n <md:GivenName>%(name)s</md:GivenName>\n <md:EmailAddress>%(email)s</md:EmailAddress>\n </md:ContactPerson>"""', '%', '{', "'type'", ':', 'ctype', ',', "'name'", ':', 'info', '[', "'givenName'", ']', ',', "'email'", ':', 'info', '[', "'emailAddress'", ']', ',', '}', 'contacts_info', '.', 'append', '(', 'contact', ')', 'str_contacts', '=', "'\\n'", '.', 'join', '(', 'contacts_info', ')', '+', "'\\n'", 'metadata', '=', 'u"""<?xml version="1.0"?>\n<md:EntityDescriptor xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata"\n %(valid)s\n %(cache)s\n entityID="%(entity_id)s">\n <md:SPSSODescriptor AuthnRequestsSigned="%(authnsign)s" WantAssertionsSigned="%(wsign)s" protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol">\n%(sls)s <md:NameIDFormat>%(name_id_format)s</md:NameIDFormat>\n <md:AssertionConsumerService Binding="%(binding)s"\n Location="%(location)s"\n index="1" />\n%(attribute_consuming_service)s </md:SPSSODescriptor>\n%(organization)s%(contacts)s</md:EntityDescriptor>"""', '%', '{', "'valid'", ':', '(', '\'validUntil="%s"\'', '%', 'valid_until_str', ')', 'if', 'valid_until_str', 'else', "''", ',', "'cache'", ':', '(', '\'cacheDuration="%s"\'', '%', 'cache_duration_str', ')', 'if', 'cache_duration_str', 'else', "''", ',', "'entity_id'", ':', 'sp', '[', "'entityId'", ']', ',', "'authnsign'", ':', 'str_authnsign', ',', "'wsign'", ':', 'str_wsign', ',', "'name_id_format'", ':', 'sp', '[', "'NameIDFormat'", ']', ',', "'binding'", ':', 'sp', '[', "'assertionConsumerService'", ']', '[', "'binding'", ']', ',', "'location'", ':', 'sp', '[', "'assertionConsumerService'", ']', '[', "'url'", ']', ',', "'sls'", ':', 'sls', ',', "'organization'", ':', 'str_organization', ',', "'contacts'", ':', 'str_contacts', ',', "'attribute_consuming_service'", ':', 'str_attribute_consuming_service', '}', 'return', 'metadata']
Builds the metadata of the SP :param sp: The SP data :type sp: string :param authnsign: authnRequestsSigned attribute :type authnsign: string :param wsign: wantAssertionsSigned attribute :type wsign: string :param valid_until: Metadata's expiry date :type valid_until: string|DateTime|Timestamp :param cache_duration: Duration of the cache in seconds :type cache_duration: int|string :param contacts: Contacts info :type contacts: dict :param organization: Organization info :type organization: dict
['Builds', 'the', 'metadata', 'of', 'the', 'SP']
train
https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/metadata.py#L31-L202
8,164
juju-solutions/charms.reactive
charms/reactive/__init__.py
main
def main(relation_name=None): """ This is the main entry point for the reactive framework. It calls :func:`~bus.discover` to find and load all reactive handlers (e.g., :func:`@when <decorators.when>` decorated blocks), and then :func:`~bus.dispatch` to trigger handlers until the queue settles out. Finally, :meth:`unitdata.kv().flush <charmhelpers.core.unitdata.Storage.flush>` is called to persist the flags and other data. :param str relation_name: Optional name of the relation which is being handled. """ hook_name = hookenv.hook_name() restricted_mode = hook_name in ['meter-status-changed', 'collect-metrics'] hookenv.log('Reactive main running for hook %s' % hookenv.hook_name(), level=hookenv.INFO) if restricted_mode: hookenv.log('Restricted mode.', level=hookenv.INFO) # work-around for https://bugs.launchpad.net/juju-core/+bug/1503039 # ensure that external handlers can tell what hook they're running in if 'JUJU_HOOK_NAME' not in os.environ: os.environ['JUJU_HOOK_NAME'] = hook_name try: bus.discover() if not restricted_mode: # limit what gets run in restricted mode hookenv._run_atstart() bus.dispatch(restricted=restricted_mode) except Exception: tb = traceback.format_exc() hookenv.log('Hook error:\n{}'.format(tb), level=hookenv.ERROR) raise except SystemExit as x: if x.code not in (None, 0): raise if not restricted_mode: # limit what gets run in restricted mode hookenv._run_atexit() unitdata._KV.flush()
python
def main(relation_name=None): """ This is the main entry point for the reactive framework. It calls :func:`~bus.discover` to find and load all reactive handlers (e.g., :func:`@when <decorators.when>` decorated blocks), and then :func:`~bus.dispatch` to trigger handlers until the queue settles out. Finally, :meth:`unitdata.kv().flush <charmhelpers.core.unitdata.Storage.flush>` is called to persist the flags and other data. :param str relation_name: Optional name of the relation which is being handled. """ hook_name = hookenv.hook_name() restricted_mode = hook_name in ['meter-status-changed', 'collect-metrics'] hookenv.log('Reactive main running for hook %s' % hookenv.hook_name(), level=hookenv.INFO) if restricted_mode: hookenv.log('Restricted mode.', level=hookenv.INFO) # work-around for https://bugs.launchpad.net/juju-core/+bug/1503039 # ensure that external handlers can tell what hook they're running in if 'JUJU_HOOK_NAME' not in os.environ: os.environ['JUJU_HOOK_NAME'] = hook_name try: bus.discover() if not restricted_mode: # limit what gets run in restricted mode hookenv._run_atstart() bus.dispatch(restricted=restricted_mode) except Exception: tb = traceback.format_exc() hookenv.log('Hook error:\n{}'.format(tb), level=hookenv.ERROR) raise except SystemExit as x: if x.code not in (None, 0): raise if not restricted_mode: # limit what gets run in restricted mode hookenv._run_atexit() unitdata._KV.flush()
['def', 'main', '(', 'relation_name', '=', 'None', ')', ':', 'hook_name', '=', 'hookenv', '.', 'hook_name', '(', ')', 'restricted_mode', '=', 'hook_name', 'in', '[', "'meter-status-changed'", ',', "'collect-metrics'", ']', 'hookenv', '.', 'log', '(', "'Reactive main running for hook %s'", '%', 'hookenv', '.', 'hook_name', '(', ')', ',', 'level', '=', 'hookenv', '.', 'INFO', ')', 'if', 'restricted_mode', ':', 'hookenv', '.', 'log', '(', "'Restricted mode.'", ',', 'level', '=', 'hookenv', '.', 'INFO', ')', '# work-around for https://bugs.launchpad.net/juju-core/+bug/1503039', "# ensure that external handlers can tell what hook they're running in", 'if', "'JUJU_HOOK_NAME'", 'not', 'in', 'os', '.', 'environ', ':', 'os', '.', 'environ', '[', "'JUJU_HOOK_NAME'", ']', '=', 'hook_name', 'try', ':', 'bus', '.', 'discover', '(', ')', 'if', 'not', 'restricted_mode', ':', '# limit what gets run in restricted mode', 'hookenv', '.', '_run_atstart', '(', ')', 'bus', '.', 'dispatch', '(', 'restricted', '=', 'restricted_mode', ')', 'except', 'Exception', ':', 'tb', '=', 'traceback', '.', 'format_exc', '(', ')', 'hookenv', '.', 'log', '(', "'Hook error:\\n{}'", '.', 'format', '(', 'tb', ')', ',', 'level', '=', 'hookenv', '.', 'ERROR', ')', 'raise', 'except', 'SystemExit', 'as', 'x', ':', 'if', 'x', '.', 'code', 'not', 'in', '(', 'None', ',', '0', ')', ':', 'raise', 'if', 'not', 'restricted_mode', ':', '# limit what gets run in restricted mode', 'hookenv', '.', '_run_atexit', '(', ')', 'unitdata', '.', '_KV', '.', 'flush', '(', ')']
This is the main entry point for the reactive framework. It calls :func:`~bus.discover` to find and load all reactive handlers (e.g., :func:`@when <decorators.when>` decorated blocks), and then :func:`~bus.dispatch` to trigger handlers until the queue settles out. Finally, :meth:`unitdata.kv().flush <charmhelpers.core.unitdata.Storage.flush>` is called to persist the flags and other data. :param str relation_name: Optional name of the relation which is being handled.
['This', 'is', 'the', 'main', 'entry', 'point', 'for', 'the', 'reactive', 'framework', '.', 'It', 'calls', ':', 'func', ':', '~bus', '.', 'discover', 'to', 'find', 'and', 'load', 'all', 'reactive', 'handlers', '(', 'e', '.', 'g', '.', ':', 'func', ':', '@when', '<decorators', '.', 'when', '>', 'decorated', 'blocks', ')', 'and', 'then', ':', 'func', ':', '~bus', '.', 'dispatch', 'to', 'trigger', 'handlers', 'until', 'the', 'queue', 'settles', 'out', '.', 'Finally', ':', 'meth', ':', 'unitdata', '.', 'kv', '()', '.', 'flush', '<charmhelpers', '.', 'core', '.', 'unitdata', '.', 'Storage', '.', 'flush', '>', 'is', 'called', 'to', 'persist', 'the', 'flags', 'and', 'other', 'data', '.']
train
https://github.com/juju-solutions/charms.reactive/blob/e37e781432e77c12b63d2c739bd6cd70d3230c3a/charms/reactive/__init__.py#L46-L84
8,165
uw-it-aca/uw-restclients-sws
uw_sws/term.py
get_term_by_date
def get_term_by_date(date): """ Returns a term for the datetime.date object given. """ year = date.year term = None for quarter in ('autumn', 'summer', 'spring', 'winter'): term = get_term_by_year_and_quarter(year, quarter) if date >= term.first_day_quarter: break # If we're in a year, before the start of winter quarter, we need to go # to the previous year's autumn term: if date < term.first_day_quarter: term = get_term_by_year_and_quarter(year - 1, 'autumn') # Autumn quarter should always last through the end of the year, # with winter of the next year starting in January. But this makes sure # we catch it if not. term_after = get_term_after(term) if term_after.first_day_quarter > date: return term else: return term_after pass
python
def get_term_by_date(date): """ Returns a term for the datetime.date object given. """ year = date.year term = None for quarter in ('autumn', 'summer', 'spring', 'winter'): term = get_term_by_year_and_quarter(year, quarter) if date >= term.first_day_quarter: break # If we're in a year, before the start of winter quarter, we need to go # to the previous year's autumn term: if date < term.first_day_quarter: term = get_term_by_year_and_quarter(year - 1, 'autumn') # Autumn quarter should always last through the end of the year, # with winter of the next year starting in January. But this makes sure # we catch it if not. term_after = get_term_after(term) if term_after.first_day_quarter > date: return term else: return term_after pass
['def', 'get_term_by_date', '(', 'date', ')', ':', 'year', '=', 'date', '.', 'year', 'term', '=', 'None', 'for', 'quarter', 'in', '(', "'autumn'", ',', "'summer'", ',', "'spring'", ',', "'winter'", ')', ':', 'term', '=', 'get_term_by_year_and_quarter', '(', 'year', ',', 'quarter', ')', 'if', 'date', '>=', 'term', '.', 'first_day_quarter', ':', 'break', "# If we're in a year, before the start of winter quarter, we need to go", "# to the previous year's autumn term:", 'if', 'date', '<', 'term', '.', 'first_day_quarter', ':', 'term', '=', 'get_term_by_year_and_quarter', '(', 'year', '-', '1', ',', "'autumn'", ')', '# Autumn quarter should always last through the end of the year,', '# with winter of the next year starting in January. But this makes sure', '# we catch it if not.', 'term_after', '=', 'get_term_after', '(', 'term', ')', 'if', 'term_after', '.', 'first_day_quarter', '>', 'date', ':', 'return', 'term', 'else', ':', 'return', 'term_after', 'pass']
Returns a term for the datetime.date object given.
['Returns', 'a', 'term', 'for', 'the', 'datetime', '.', 'date', 'object', 'given', '.']
train
https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/term.py#L91-L118
8,166
jgillick/LendingClub
lendingclub/session.py
Session.get
def get(self, path, query=None, redirects=True): """ GET request wrapper for :func:`request()` """ return self.request('GET', path, query, None, redirects)
python
def get(self, path, query=None, redirects=True): """ GET request wrapper for :func:`request()` """ return self.request('GET', path, query, None, redirects)
['def', 'get', '(', 'self', ',', 'path', ',', 'query', '=', 'None', ',', 'redirects', '=', 'True', ')', ':', 'return', 'self', '.', 'request', '(', "'GET'", ',', 'path', ',', 'query', ',', 'None', ',', 'redirects', ')']
GET request wrapper for :func:`request()`
['GET', 'request', 'wrapper', 'for', ':', 'func', ':', 'request', '()']
train
https://github.com/jgillick/LendingClub/blob/4495f99fd869810f39c00e02b0f4112c6b210384/lendingclub/session.py#L298-L302
8,167
ltworf/typedload
typedload/dataloader.py
_tupleload
def _tupleload(l: Loader, value, type_) -> Tuple: """ This loads into something like Tuple[int,str] """ if HAS_TUPLEARGS: args = type_.__args__ else: args = type_.__tuple_params__ if len(args) == 2 and args[1] == ...: # Tuple[something, ...] return tuple(l.load(i, args[0]) for i in value) else: # Tuple[something, something, somethingelse] if l.failonextra and len(value) > len(args): raise TypedloadValueError('Value is too long for type %s' % type_, value=value, type_=type_) elif len(value) < len(args): raise TypedloadValueError('Value is too short for type %s' % type_, value=value, type_=type_) return tuple(l.load(v, t, annotation=Annotation(AnnotationType.INDEX, i)) for i, (v, t) in enumerate(zip(value, args)))
python
def _tupleload(l: Loader, value, type_) -> Tuple: """ This loads into something like Tuple[int,str] """ if HAS_TUPLEARGS: args = type_.__args__ else: args = type_.__tuple_params__ if len(args) == 2 and args[1] == ...: # Tuple[something, ...] return tuple(l.load(i, args[0]) for i in value) else: # Tuple[something, something, somethingelse] if l.failonextra and len(value) > len(args): raise TypedloadValueError('Value is too long for type %s' % type_, value=value, type_=type_) elif len(value) < len(args): raise TypedloadValueError('Value is too short for type %s' % type_, value=value, type_=type_) return tuple(l.load(v, t, annotation=Annotation(AnnotationType.INDEX, i)) for i, (v, t) in enumerate(zip(value, args)))
['def', '_tupleload', '(', 'l', ':', 'Loader', ',', 'value', ',', 'type_', ')', '->', 'Tuple', ':', 'if', 'HAS_TUPLEARGS', ':', 'args', '=', 'type_', '.', '__args__', 'else', ':', 'args', '=', 'type_', '.', '__tuple_params__', 'if', 'len', '(', 'args', ')', '==', '2', 'and', 'args', '[', '1', ']', '==', '...', ':', '# Tuple[something, ...]', 'return', 'tuple', '(', 'l', '.', 'load', '(', 'i', ',', 'args', '[', '0', ']', ')', 'for', 'i', 'in', 'value', ')', 'else', ':', '# Tuple[something, something, somethingelse]', 'if', 'l', '.', 'failonextra', 'and', 'len', '(', 'value', ')', '>', 'len', '(', 'args', ')', ':', 'raise', 'TypedloadValueError', '(', "'Value is too long for type %s'", '%', 'type_', ',', 'value', '=', 'value', ',', 'type_', '=', 'type_', ')', 'elif', 'len', '(', 'value', ')', '<', 'len', '(', 'args', ')', ':', 'raise', 'TypedloadValueError', '(', "'Value is too short for type %s'", '%', 'type_', ',', 'value', '=', 'value', ',', 'type_', '=', 'type_', ')', 'return', 'tuple', '(', 'l', '.', 'load', '(', 'v', ',', 't', ',', 'annotation', '=', 'Annotation', '(', 'AnnotationType', '.', 'INDEX', ',', 'i', ')', ')', 'for', 'i', ',', '(', 'v', ',', 't', ')', 'in', 'enumerate', '(', 'zip', '(', 'value', ',', 'args', ')', ')', ')']
This loads into something like Tuple[int,str]
['This', 'loads', 'into', 'something', 'like', 'Tuple', '[', 'int', 'str', ']']
train
https://github.com/ltworf/typedload/blob/7fd130612963bfcec3242698463ef863ca4af927/typedload/dataloader.py#L312-L328
8,168
myint/unify
unify.py
detect_encoding
def detect_encoding(filename): """Return file encoding.""" try: with open(filename, 'rb') as input_file: from lib2to3.pgen2 import tokenize as lib2to3_tokenize encoding = lib2to3_tokenize.detect_encoding(input_file.readline)[0] # Check for correctness of encoding. with open_with_encoding(filename, encoding) as input_file: input_file.read() return encoding except (SyntaxError, LookupError, UnicodeDecodeError): return 'latin-1'
python
def detect_encoding(filename): """Return file encoding.""" try: with open(filename, 'rb') as input_file: from lib2to3.pgen2 import tokenize as lib2to3_tokenize encoding = lib2to3_tokenize.detect_encoding(input_file.readline)[0] # Check for correctness of encoding. with open_with_encoding(filename, encoding) as input_file: input_file.read() return encoding except (SyntaxError, LookupError, UnicodeDecodeError): return 'latin-1'
['def', 'detect_encoding', '(', 'filename', ')', ':', 'try', ':', 'with', 'open', '(', 'filename', ',', "'rb'", ')', 'as', 'input_file', ':', 'from', 'lib2to3', '.', 'pgen2', 'import', 'tokenize', 'as', 'lib2to3_tokenize', 'encoding', '=', 'lib2to3_tokenize', '.', 'detect_encoding', '(', 'input_file', '.', 'readline', ')', '[', '0', ']', '# Check for correctness of encoding.', 'with', 'open_with_encoding', '(', 'filename', ',', 'encoding', ')', 'as', 'input_file', ':', 'input_file', '.', 'read', '(', ')', 'return', 'encoding', 'except', '(', 'SyntaxError', ',', 'LookupError', ',', 'UnicodeDecodeError', ')', ':', 'return', "'latin-1'"]
Return file encoding.
['Return', 'file', 'encoding', '.']
train
https://github.com/myint/unify/blob/ae699f5980a715cadc4a2f07bf16d11083c59401/unify.py#L119-L132
8,169
mongodb/mongo-python-driver
pymongo/collection.py
Collection.estimated_document_count
def estimated_document_count(self, **kwargs): """Get an estimate of the number of documents in this collection using collection metadata. The :meth:`estimated_document_count` method is **not** supported in a transaction. All optional parameters should be passed as keyword arguments to this method. Valid options include: - `maxTimeMS` (int): The maximum amount of time to allow this operation to run, in milliseconds. :Parameters: - `**kwargs` (optional): See list of options above. .. versionadded:: 3.7 """ if 'session' in kwargs: raise ConfigurationError( 'estimated_document_count does not support sessions') cmd = SON([('count', self.__name)]) cmd.update(kwargs) return self._count(cmd)
python
def estimated_document_count(self, **kwargs): """Get an estimate of the number of documents in this collection using collection metadata. The :meth:`estimated_document_count` method is **not** supported in a transaction. All optional parameters should be passed as keyword arguments to this method. Valid options include: - `maxTimeMS` (int): The maximum amount of time to allow this operation to run, in milliseconds. :Parameters: - `**kwargs` (optional): See list of options above. .. versionadded:: 3.7 """ if 'session' in kwargs: raise ConfigurationError( 'estimated_document_count does not support sessions') cmd = SON([('count', self.__name)]) cmd.update(kwargs) return self._count(cmd)
['def', 'estimated_document_count', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'if', "'session'", 'in', 'kwargs', ':', 'raise', 'ConfigurationError', '(', "'estimated_document_count does not support sessions'", ')', 'cmd', '=', 'SON', '(', '[', '(', "'count'", ',', 'self', '.', '__name', ')', ']', ')', 'cmd', '.', 'update', '(', 'kwargs', ')', 'return', 'self', '.', '_count', '(', 'cmd', ')']
Get an estimate of the number of documents in this collection using collection metadata. The :meth:`estimated_document_count` method is **not** supported in a transaction. All optional parameters should be passed as keyword arguments to this method. Valid options include: - `maxTimeMS` (int): The maximum amount of time to allow this operation to run, in milliseconds. :Parameters: - `**kwargs` (optional): See list of options above. .. versionadded:: 3.7
['Get', 'an', 'estimate', 'of', 'the', 'number', 'of', 'documents', 'in', 'this', 'collection', 'using', 'collection', 'metadata', '.']
train
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/collection.py#L1600-L1623
8,170
dmlc/xgboost
python-package/xgboost/rabit.py
broadcast
def broadcast(data, root): """Broadcast object from one node to all other nodes. Parameters ---------- data : any type that can be pickled Input data, if current rank does not equal root, this can be None root : int Rank of the node to broadcast data from. Returns ------- object : int the result of broadcast. """ rank = get_rank() length = ctypes.c_ulong() if root == rank: assert data is not None, 'need to pass in data when broadcasting' s = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL) length.value = len(s) # run first broadcast _LIB.RabitBroadcast(ctypes.byref(length), ctypes.sizeof(ctypes.c_ulong), root) if root != rank: dptr = (ctypes.c_char * length.value)() # run second _LIB.RabitBroadcast(ctypes.cast(dptr, ctypes.c_void_p), length.value, root) data = pickle.loads(dptr.raw) del dptr else: _LIB.RabitBroadcast(ctypes.cast(ctypes.c_char_p(s), ctypes.c_void_p), length.value, root) del s return data
python
def broadcast(data, root): """Broadcast object from one node to all other nodes. Parameters ---------- data : any type that can be pickled Input data, if current rank does not equal root, this can be None root : int Rank of the node to broadcast data from. Returns ------- object : int the result of broadcast. """ rank = get_rank() length = ctypes.c_ulong() if root == rank: assert data is not None, 'need to pass in data when broadcasting' s = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL) length.value = len(s) # run first broadcast _LIB.RabitBroadcast(ctypes.byref(length), ctypes.sizeof(ctypes.c_ulong), root) if root != rank: dptr = (ctypes.c_char * length.value)() # run second _LIB.RabitBroadcast(ctypes.cast(dptr, ctypes.c_void_p), length.value, root) data = pickle.loads(dptr.raw) del dptr else: _LIB.RabitBroadcast(ctypes.cast(ctypes.c_char_p(s), ctypes.c_void_p), length.value, root) del s return data
['def', 'broadcast', '(', 'data', ',', 'root', ')', ':', 'rank', '=', 'get_rank', '(', ')', 'length', '=', 'ctypes', '.', 'c_ulong', '(', ')', 'if', 'root', '==', 'rank', ':', 'assert', 'data', 'is', 'not', 'None', ',', "'need to pass in data when broadcasting'", 's', '=', 'pickle', '.', 'dumps', '(', 'data', ',', 'protocol', '=', 'pickle', '.', 'HIGHEST_PROTOCOL', ')', 'length', '.', 'value', '=', 'len', '(', 's', ')', '# run first broadcast', '_LIB', '.', 'RabitBroadcast', '(', 'ctypes', '.', 'byref', '(', 'length', ')', ',', 'ctypes', '.', 'sizeof', '(', 'ctypes', '.', 'c_ulong', ')', ',', 'root', ')', 'if', 'root', '!=', 'rank', ':', 'dptr', '=', '(', 'ctypes', '.', 'c_char', '*', 'length', '.', 'value', ')', '(', ')', '# run second', '_LIB', '.', 'RabitBroadcast', '(', 'ctypes', '.', 'cast', '(', 'dptr', ',', 'ctypes', '.', 'c_void_p', ')', ',', 'length', '.', 'value', ',', 'root', ')', 'data', '=', 'pickle', '.', 'loads', '(', 'dptr', '.', 'raw', ')', 'del', 'dptr', 'else', ':', '_LIB', '.', 'RabitBroadcast', '(', 'ctypes', '.', 'cast', '(', 'ctypes', '.', 'c_char_p', '(', 's', ')', ',', 'ctypes', '.', 'c_void_p', ')', ',', 'length', '.', 'value', ',', 'root', ')', 'del', 's', 'return', 'data']
Broadcast object from one node to all other nodes. Parameters ---------- data : any type that can be pickled Input data, if current rank does not equal root, this can be None root : int Rank of the node to broadcast data from. Returns ------- object : int the result of broadcast.
['Broadcast', 'object', 'from', 'one', 'node', 'to', 'all', 'other', 'nodes', '.']
train
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/rabit.py#L97-L132
8,171
SiLab-Bonn/pyBAR
pybar/fei4/register.py
save_configuration_to_hdf5
def save_configuration_to_hdf5(register, configuration_file, name=''): '''Saving configuration to HDF5 file from register object Parameters ---------- register : pybar.fei4.register object configuration_file : string, file Filename of the HDF5 configuration file or file object. name : string Additional identifier (subgroup). Useful when storing more than one configuration inside a HDF5 file. ''' def save_conf(): logging.info("Saving configuration: %s" % h5_file.filename) register.configuration_file = h5_file.filename try: configuration_group = h5_file.create_group(h5_file.root, "configuration") except tb.NodeError: configuration_group = h5_file.root.configuration if name: try: configuration_group = h5_file.create_group(configuration_group, name) except tb.NodeError: configuration_group = h5_file.root.configuration.name # calibration_parameters try: h5_file.remove_node(configuration_group, name='calibration_parameters') except tb.NodeError: pass calibration_data_table = h5_file.create_table(configuration_group, name='calibration_parameters', description=NameValue, title='calibration_parameters') calibration_data_row = calibration_data_table.row for key, value in register.calibration_parameters.iteritems(): calibration_data_row['name'] = key calibration_data_row['value'] = str(value) calibration_data_row.append() calibration_data_table.flush() # miscellaneous try: h5_file.remove_node(configuration_group, name='miscellaneous') except tb.NodeError: pass miscellaneous_data_table = h5_file.create_table(configuration_group, name='miscellaneous', description=NameValue, title='miscellaneous') miscellaneous_data_row = miscellaneous_data_table.row miscellaneous_data_row['name'] = 'Flavor' miscellaneous_data_row['value'] = register.flavor miscellaneous_data_row.append() miscellaneous_data_row['name'] = 'Chip_ID' miscellaneous_data_row['value'] = register.chip_id miscellaneous_data_row.append() for key, value in register.miscellaneous.iteritems(): miscellaneous_data_row['name'] = key miscellaneous_data_row['value'] = value miscellaneous_data_row.append() miscellaneous_data_table.flush() # global try: h5_file.remove_node(configuration_group, name='global_register') except tb.NodeError: pass global_data_table = h5_file.create_table(configuration_group, name='global_register', description=NameValue, title='global_register') global_data_table_row = global_data_table.row global_regs = register.get_global_register_objects(readonly=False) for global_reg in sorted(global_regs, key=itemgetter('name')): global_data_table_row['name'] = global_reg['name'] global_data_table_row['value'] = global_reg['value'] # TODO: some function that converts to bin, hex global_data_table_row.append() global_data_table.flush() # pixel for pixel_reg in register.pixel_registers.itervalues(): try: h5_file.remove_node(configuration_group, name=pixel_reg['name']) except tb.NodeError: pass data = pixel_reg['value'].T atom = tb.Atom.from_dtype(data.dtype) ds = h5_file.create_carray(configuration_group, name=pixel_reg['name'], atom=atom, shape=data.shape, title=pixel_reg['name']) ds[:] = data if isinstance(configuration_file, tb.file.File): h5_file = configuration_file save_conf() else: with tb.open_file(configuration_file, mode="a", title='') as h5_file: save_conf()
python
def save_configuration_to_hdf5(register, configuration_file, name=''): '''Saving configuration to HDF5 file from register object Parameters ---------- register : pybar.fei4.register object configuration_file : string, file Filename of the HDF5 configuration file or file object. name : string Additional identifier (subgroup). Useful when storing more than one configuration inside a HDF5 file. ''' def save_conf(): logging.info("Saving configuration: %s" % h5_file.filename) register.configuration_file = h5_file.filename try: configuration_group = h5_file.create_group(h5_file.root, "configuration") except tb.NodeError: configuration_group = h5_file.root.configuration if name: try: configuration_group = h5_file.create_group(configuration_group, name) except tb.NodeError: configuration_group = h5_file.root.configuration.name # calibration_parameters try: h5_file.remove_node(configuration_group, name='calibration_parameters') except tb.NodeError: pass calibration_data_table = h5_file.create_table(configuration_group, name='calibration_parameters', description=NameValue, title='calibration_parameters') calibration_data_row = calibration_data_table.row for key, value in register.calibration_parameters.iteritems(): calibration_data_row['name'] = key calibration_data_row['value'] = str(value) calibration_data_row.append() calibration_data_table.flush() # miscellaneous try: h5_file.remove_node(configuration_group, name='miscellaneous') except tb.NodeError: pass miscellaneous_data_table = h5_file.create_table(configuration_group, name='miscellaneous', description=NameValue, title='miscellaneous') miscellaneous_data_row = miscellaneous_data_table.row miscellaneous_data_row['name'] = 'Flavor' miscellaneous_data_row['value'] = register.flavor miscellaneous_data_row.append() miscellaneous_data_row['name'] = 'Chip_ID' miscellaneous_data_row['value'] = register.chip_id miscellaneous_data_row.append() for key, value in register.miscellaneous.iteritems(): miscellaneous_data_row['name'] = key miscellaneous_data_row['value'] = value miscellaneous_data_row.append() miscellaneous_data_table.flush() # global try: h5_file.remove_node(configuration_group, name='global_register') except tb.NodeError: pass global_data_table = h5_file.create_table(configuration_group, name='global_register', description=NameValue, title='global_register') global_data_table_row = global_data_table.row global_regs = register.get_global_register_objects(readonly=False) for global_reg in sorted(global_regs, key=itemgetter('name')): global_data_table_row['name'] = global_reg['name'] global_data_table_row['value'] = global_reg['value'] # TODO: some function that converts to bin, hex global_data_table_row.append() global_data_table.flush() # pixel for pixel_reg in register.pixel_registers.itervalues(): try: h5_file.remove_node(configuration_group, name=pixel_reg['name']) except tb.NodeError: pass data = pixel_reg['value'].T atom = tb.Atom.from_dtype(data.dtype) ds = h5_file.create_carray(configuration_group, name=pixel_reg['name'], atom=atom, shape=data.shape, title=pixel_reg['name']) ds[:] = data if isinstance(configuration_file, tb.file.File): h5_file = configuration_file save_conf() else: with tb.open_file(configuration_file, mode="a", title='') as h5_file: save_conf()
['def', 'save_configuration_to_hdf5', '(', 'register', ',', 'configuration_file', ',', 'name', '=', "''", ')', ':', 'def', 'save_conf', '(', ')', ':', 'logging', '.', 'info', '(', '"Saving configuration: %s"', '%', 'h5_file', '.', 'filename', ')', 'register', '.', 'configuration_file', '=', 'h5_file', '.', 'filename', 'try', ':', 'configuration_group', '=', 'h5_file', '.', 'create_group', '(', 'h5_file', '.', 'root', ',', '"configuration"', ')', 'except', 'tb', '.', 'NodeError', ':', 'configuration_group', '=', 'h5_file', '.', 'root', '.', 'configuration', 'if', 'name', ':', 'try', ':', 'configuration_group', '=', 'h5_file', '.', 'create_group', '(', 'configuration_group', ',', 'name', ')', 'except', 'tb', '.', 'NodeError', ':', 'configuration_group', '=', 'h5_file', '.', 'root', '.', 'configuration', '.', 'name', '# calibration_parameters\r', 'try', ':', 'h5_file', '.', 'remove_node', '(', 'configuration_group', ',', 'name', '=', "'calibration_parameters'", ')', 'except', 'tb', '.', 'NodeError', ':', 'pass', 'calibration_data_table', '=', 'h5_file', '.', 'create_table', '(', 'configuration_group', ',', 'name', '=', "'calibration_parameters'", ',', 'description', '=', 'NameValue', ',', 'title', '=', "'calibration_parameters'", ')', 'calibration_data_row', '=', 'calibration_data_table', '.', 'row', 'for', 'key', ',', 'value', 'in', 'register', '.', 'calibration_parameters', '.', 'iteritems', '(', ')', ':', 'calibration_data_row', '[', "'name'", ']', '=', 'key', 'calibration_data_row', '[', "'value'", ']', '=', 'str', '(', 'value', ')', 'calibration_data_row', '.', 'append', '(', ')', 'calibration_data_table', '.', 'flush', '(', ')', '# miscellaneous\r', 'try', ':', 'h5_file', '.', 'remove_node', '(', 'configuration_group', ',', 'name', '=', "'miscellaneous'", ')', 'except', 'tb', '.', 'NodeError', ':', 'pass', 'miscellaneous_data_table', '=', 'h5_file', '.', 'create_table', '(', 'configuration_group', ',', 'name', '=', "'miscellaneous'", ',', 'description', '=', 'NameValue', ',', 'title', '=', "'miscellaneous'", ')', 'miscellaneous_data_row', '=', 'miscellaneous_data_table', '.', 'row', 'miscellaneous_data_row', '[', "'name'", ']', '=', "'Flavor'", 'miscellaneous_data_row', '[', "'value'", ']', '=', 'register', '.', 'flavor', 'miscellaneous_data_row', '.', 'append', '(', ')', 'miscellaneous_data_row', '[', "'name'", ']', '=', "'Chip_ID'", 'miscellaneous_data_row', '[', "'value'", ']', '=', 'register', '.', 'chip_id', 'miscellaneous_data_row', '.', 'append', '(', ')', 'for', 'key', ',', 'value', 'in', 'register', '.', 'miscellaneous', '.', 'iteritems', '(', ')', ':', 'miscellaneous_data_row', '[', "'name'", ']', '=', 'key', 'miscellaneous_data_row', '[', "'value'", ']', '=', 'value', 'miscellaneous_data_row', '.', 'append', '(', ')', 'miscellaneous_data_table', '.', 'flush', '(', ')', '# global\r', 'try', ':', 'h5_file', '.', 'remove_node', '(', 'configuration_group', ',', 'name', '=', "'global_register'", ')', 'except', 'tb', '.', 'NodeError', ':', 'pass', 'global_data_table', '=', 'h5_file', '.', 'create_table', '(', 'configuration_group', ',', 'name', '=', "'global_register'", ',', 'description', '=', 'NameValue', ',', 'title', '=', "'global_register'", ')', 'global_data_table_row', '=', 'global_data_table', '.', 'row', 'global_regs', '=', 'register', '.', 'get_global_register_objects', '(', 'readonly', '=', 'False', ')', 'for', 'global_reg', 'in', 'sorted', '(', 'global_regs', ',', 'key', '=', 'itemgetter', '(', "'name'", ')', ')', ':', 'global_data_table_row', '[', "'name'", ']', '=', 'global_reg', '[', "'name'", ']', 'global_data_table_row', '[', "'value'", ']', '=', 'global_reg', '[', "'value'", ']', '# TODO: some function that converts to bin, hex\r', 'global_data_table_row', '.', 'append', '(', ')', 'global_data_table', '.', 'flush', '(', ')', '# pixel\r', 'for', 'pixel_reg', 'in', 'register', '.', 'pixel_registers', '.', 'itervalues', '(', ')', ':', 'try', ':', 'h5_file', '.', 'remove_node', '(', 'configuration_group', ',', 'name', '=', 'pixel_reg', '[', "'name'", ']', ')', 'except', 'tb', '.', 'NodeError', ':', 'pass', 'data', '=', 'pixel_reg', '[', "'value'", ']', '.', 'T', 'atom', '=', 'tb', '.', 'Atom', '.', 'from_dtype', '(', 'data', '.', 'dtype', ')', 'ds', '=', 'h5_file', '.', 'create_carray', '(', 'configuration_group', ',', 'name', '=', 'pixel_reg', '[', "'name'", ']', ',', 'atom', '=', 'atom', ',', 'shape', '=', 'data', '.', 'shape', ',', 'title', '=', 'pixel_reg', '[', "'name'", ']', ')', 'ds', '[', ':', ']', '=', 'data', 'if', 'isinstance', '(', 'configuration_file', ',', 'tb', '.', 'file', '.', 'File', ')', ':', 'h5_file', '=', 'configuration_file', 'save_conf', '(', ')', 'else', ':', 'with', 'tb', '.', 'open_file', '(', 'configuration_file', ',', 'mode', '=', '"a"', ',', 'title', '=', "''", ')', 'as', 'h5_file', ':', 'save_conf', '(', ')']
Saving configuration to HDF5 file from register object Parameters ---------- register : pybar.fei4.register object configuration_file : string, file Filename of the HDF5 configuration file or file object. name : string Additional identifier (subgroup). Useful when storing more than one configuration inside a HDF5 file.
['Saving', 'configuration', 'to', 'HDF5', 'file', 'from', 'register', 'object', 'Parameters', '----------', 'register', ':', 'pybar', '.', 'fei4', '.', 'register', 'object', 'configuration_file', ':', 'string', 'file', 'Filename', 'of', 'the', 'HDF5', 'configuration', 'file', 'or', 'file', 'object', '.', 'name', ':', 'string', 'Additional', 'identifier', '(', 'subgroup', ')', '.', 'Useful', 'when', 'storing', 'more', 'than', 'one', 'configuration', 'inside', 'a', 'HDF5', 'file', '.']
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register.py#L933-L1019
8,172
python-tap/tappy
tap/rules.py
Rules._process_plan_lines
def _process_plan_lines(self, final_line_count): """Process plan line rules.""" if not self._lines_seen["plan"]: self._add_error(_("Missing a plan.")) return if len(self._lines_seen["plan"]) > 1: self._add_error(_("Only one plan line is permitted per file.")) return plan, at_line = self._lines_seen["plan"][0] if not self._plan_on_valid_line(at_line, final_line_count): self._add_error( _("A plan must appear at the beginning or end of the file.") ) return if plan.expected_tests != self._lines_seen["test"]: self._add_error( _("Expected {expected_count} tests but only {seen_count} ran.").format( expected_count=plan.expected_tests, seen_count=self._lines_seen["test"], ) )
python
def _process_plan_lines(self, final_line_count): """Process plan line rules.""" if not self._lines_seen["plan"]: self._add_error(_("Missing a plan.")) return if len(self._lines_seen["plan"]) > 1: self._add_error(_("Only one plan line is permitted per file.")) return plan, at_line = self._lines_seen["plan"][0] if not self._plan_on_valid_line(at_line, final_line_count): self._add_error( _("A plan must appear at the beginning or end of the file.") ) return if plan.expected_tests != self._lines_seen["test"]: self._add_error( _("Expected {expected_count} tests but only {seen_count} ran.").format( expected_count=plan.expected_tests, seen_count=self._lines_seen["test"], ) )
['def', '_process_plan_lines', '(', 'self', ',', 'final_line_count', ')', ':', 'if', 'not', 'self', '.', '_lines_seen', '[', '"plan"', ']', ':', 'self', '.', '_add_error', '(', '_', '(', '"Missing a plan."', ')', ')', 'return', 'if', 'len', '(', 'self', '.', '_lines_seen', '[', '"plan"', ']', ')', '>', '1', ':', 'self', '.', '_add_error', '(', '_', '(', '"Only one plan line is permitted per file."', ')', ')', 'return', 'plan', ',', 'at_line', '=', 'self', '.', '_lines_seen', '[', '"plan"', ']', '[', '0', ']', 'if', 'not', 'self', '.', '_plan_on_valid_line', '(', 'at_line', ',', 'final_line_count', ')', ':', 'self', '.', '_add_error', '(', '_', '(', '"A plan must appear at the beginning or end of the file."', ')', ')', 'return', 'if', 'plan', '.', 'expected_tests', '!=', 'self', '.', '_lines_seen', '[', '"test"', ']', ':', 'self', '.', '_add_error', '(', '_', '(', '"Expected {expected_count} tests but only {seen_count} ran."', ')', '.', 'format', '(', 'expected_count', '=', 'plan', '.', 'expected_tests', ',', 'seen_count', '=', 'self', '.', '_lines_seen', '[', '"test"', ']', ',', ')', ')']
Process plan line rules.
['Process', 'plan', 'line', 'rules', '.']
train
https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/rules.py#L28-L51
8,173
pazz/alot
alot/buffers/thread.py
ThreadBuffer.focus_next_sibling
def focus_next_sibling(self): """focus next sibling of currently focussed message in thread tree""" mid = self.get_selected_mid() newpos = self._tree.next_sibling_position(mid) if newpos is not None: newpos = self._sanitize_position((newpos,)) self.body.set_focus(newpos)
python
def focus_next_sibling(self): """focus next sibling of currently focussed message in thread tree""" mid = self.get_selected_mid() newpos = self._tree.next_sibling_position(mid) if newpos is not None: newpos = self._sanitize_position((newpos,)) self.body.set_focus(newpos)
['def', 'focus_next_sibling', '(', 'self', ')', ':', 'mid', '=', 'self', '.', 'get_selected_mid', '(', ')', 'newpos', '=', 'self', '.', '_tree', '.', 'next_sibling_position', '(', 'mid', ')', 'if', 'newpos', 'is', 'not', 'None', ':', 'newpos', '=', 'self', '.', '_sanitize_position', '(', '(', 'newpos', ',', ')', ')', 'self', '.', 'body', '.', 'set_focus', '(', 'newpos', ')']
focus next sibling of currently focussed message in thread tree
['focus', 'next', 'sibling', 'of', 'currently', 'focussed', 'message', 'in', 'thread', 'tree']
train
https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/buffers/thread.py#L223-L229
8,174
yyuu/botornado
boto/dynamodb/item.py
Item.delete
def delete(self, expected_value=None, return_values=None): """ Delete the item from DynamoDB. :type expected_value: dict :param expected_value: A dictionary of name/value pairs that you expect. This dictionary should have name/value pairs where the name is the name of the attribute and the value is either the value you are expecting or False if you expect the attribute not to exist. :type return_values: str :param return_values: Controls the return of attribute name-value pairs before then were changed. Possible values are: None or 'ALL_OLD'. If 'ALL_OLD' is specified and the item is overwritten, the content of the old item is returned. """ return self.table.layer2.delete_item(self, expected_value, return_values)
python
def delete(self, expected_value=None, return_values=None): """ Delete the item from DynamoDB. :type expected_value: dict :param expected_value: A dictionary of name/value pairs that you expect. This dictionary should have name/value pairs where the name is the name of the attribute and the value is either the value you are expecting or False if you expect the attribute not to exist. :type return_values: str :param return_values: Controls the return of attribute name-value pairs before then were changed. Possible values are: None or 'ALL_OLD'. If 'ALL_OLD' is specified and the item is overwritten, the content of the old item is returned. """ return self.table.layer2.delete_item(self, expected_value, return_values)
['def', 'delete', '(', 'self', ',', 'expected_value', '=', 'None', ',', 'return_values', '=', 'None', ')', ':', 'return', 'self', '.', 'table', '.', 'layer2', '.', 'delete_item', '(', 'self', ',', 'expected_value', ',', 'return_values', ')']
Delete the item from DynamoDB. :type expected_value: dict :param expected_value: A dictionary of name/value pairs that you expect. This dictionary should have name/value pairs where the name is the name of the attribute and the value is either the value you are expecting or False if you expect the attribute not to exist. :type return_values: str :param return_values: Controls the return of attribute name-value pairs before then were changed. Possible values are: None or 'ALL_OLD'. If 'ALL_OLD' is specified and the item is overwritten, the content of the old item is returned.
['Delete', 'the', 'item', 'from', 'DynamoDB', '.']
train
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/dynamodb/item.py#L133-L152
8,175
pkgw/pwkit
pwkit/parallel.py
serial_ppmap
def serial_ppmap(func, fixed_arg, var_arg_iter): """A serial implementation of the "partially-pickling map" function returned by the :meth:`ParallelHelper.get_ppmap` interface. Its arguments are: *func* A callable taking three arguments and returning a Pickle-able value. *fixed_arg* Any value, even one that is not pickle-able. *var_arg_iter* An iterable that generates Pickle-able values. The functionality is:: def serial_ppmap(func, fixed_arg, var_arg_iter): return [func(i, fixed_arg, x) for i, x in enumerate(var_arg_iter)] Therefore the arguments to your ``func`` function, which actually does the interesting computations, are: *index* The 0-based index number of the item being processed; often this can be ignored. *fixed_arg* The same *fixed_arg* that was passed to ``ppmap``. *var_arg* The *index*'th item in the *var_arg_iter* iterable passed to ``ppmap``. """ return [func(i, fixed_arg, x) for i, x in enumerate(var_arg_iter)]
python
def serial_ppmap(func, fixed_arg, var_arg_iter): """A serial implementation of the "partially-pickling map" function returned by the :meth:`ParallelHelper.get_ppmap` interface. Its arguments are: *func* A callable taking three arguments and returning a Pickle-able value. *fixed_arg* Any value, even one that is not pickle-able. *var_arg_iter* An iterable that generates Pickle-able values. The functionality is:: def serial_ppmap(func, fixed_arg, var_arg_iter): return [func(i, fixed_arg, x) for i, x in enumerate(var_arg_iter)] Therefore the arguments to your ``func`` function, which actually does the interesting computations, are: *index* The 0-based index number of the item being processed; often this can be ignored. *fixed_arg* The same *fixed_arg* that was passed to ``ppmap``. *var_arg* The *index*'th item in the *var_arg_iter* iterable passed to ``ppmap``. """ return [func(i, fixed_arg, x) for i, x in enumerate(var_arg_iter)]
['def', 'serial_ppmap', '(', 'func', ',', 'fixed_arg', ',', 'var_arg_iter', ')', ':', 'return', '[', 'func', '(', 'i', ',', 'fixed_arg', ',', 'x', ')', 'for', 'i', ',', 'x', 'in', 'enumerate', '(', 'var_arg_iter', ')', ']']
A serial implementation of the "partially-pickling map" function returned by the :meth:`ParallelHelper.get_ppmap` interface. Its arguments are: *func* A callable taking three arguments and returning a Pickle-able value. *fixed_arg* Any value, even one that is not pickle-able. *var_arg_iter* An iterable that generates Pickle-able values. The functionality is:: def serial_ppmap(func, fixed_arg, var_arg_iter): return [func(i, fixed_arg, x) for i, x in enumerate(var_arg_iter)] Therefore the arguments to your ``func`` function, which actually does the interesting computations, are: *index* The 0-based index number of the item being processed; often this can be ignored. *fixed_arg* The same *fixed_arg* that was passed to ``ppmap``. *var_arg* The *index*'th item in the *var_arg_iter* iterable passed to ``ppmap``.
['A', 'serial', 'implementation', 'of', 'the', 'partially', '-', 'pickling', 'map', 'function', 'returned', 'by', 'the', ':', 'meth', ':', 'ParallelHelper', '.', 'get_ppmap', 'interface', '.', 'Its', 'arguments', 'are', ':']
train
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/parallel.py#L231-L260
8,176
zsethna/OLGA
olga/load_model.py
GenerativeModelVDJ.load_and_process_igor_model
def load_and_process_igor_model(self, marginals_file_name): """Set attributes by reading a generative model from IGoR marginal file. Sets attributes PV, PdelV_given_V, PDJ, PdelJ_given_J, PdelDldelDr_given_D, PinsVD, PinsDJ, Rvd, and Rdj. Parameters ---------- marginals_file_name : str File name for a IGoR model marginals file. """ raw_model = read_igor_marginals_txt(marginals_file_name) self.PV = raw_model[0]['v_choice'] self.PinsVD = raw_model[0]['vd_ins'] self.PinsDJ = raw_model[0]['dj_ins'] self.PdelV_given_V = raw_model[0]['v_3_del'].T self.PdelJ_given_J = raw_model[0]['j_5_del'].T #While this class assumes P(V, D, J) factorizes into P(V)*P(D, J), the B cell model #infers allowing for the full correlation. Most of the correlation information is due to #chromosomal correlation of alleles (i.e. what chromosome each allele is found on). #While this information can be interesting for inference purposes, it is discarded here #as generally these models may be use for CDR3s from individuals the models weren't inferred #from (and thus the chromosomal correlations are incorrect). This also equates the T and B cell #models. To reintroduce the chromosomal correlations use V and J usage masks after inferring the #allele identities on each chromosome. if raw_model[1]['d_gene'] == ['j_choice', 'd_gene']: #Factorized P(V, D, J) = P(V)*P(D, J) --- correct for T cell models self.PDJ = np.multiply(raw_model[0]['d_gene'].T, raw_model[0]['j_choice']) elif raw_model[1]['d_gene'] == ['v_choice', 'j_choice', 'd_gene']: #Full P(V, D, J) for B cells --- need to compute the marginal P(D, J) PVJ = np.multiply(raw_model[0]['j_choice'].T, raw_model[0]['v_choice']).T PVDJ = np.zeros([raw_model[0]['d_gene'].shape[0], raw_model[0]['d_gene'].shape[2], raw_model[0]['d_gene'].shape[1]]) for v_in in range(raw_model[0]['d_gene'].shape[0]): for j_in in range(raw_model[0]['d_gene'].shape[1]): PVDJ[v_in, :, j_in] = PVJ[v_in, j_in]*raw_model[0]['d_gene'][v_in, j_in, :] self.PDJ = np.sum(PVDJ, 0) else: print 'Unrecognized model structure -- need to construct P(D, J)' return 0 self.PdelDldelDr_given_D = np.transpose(np.multiply(np.transpose(raw_model[0]['d_3_del'], (2, 0, 1)), raw_model[0]['d_5_del']), (2, 0 , 1)) Rvd_raw = raw_model[0]['vd_dinucl'].reshape((4, 4)).T self.Rvd = np.multiply(Rvd_raw, 1/np.sum(Rvd_raw, axis = 0)) Rdj_raw = raw_model[0]['dj_dinucl'].reshape((4, 4)).T self.Rdj = np.multiply(Rdj_raw, 1/np.sum(Rdj_raw, axis = 0))
python
def load_and_process_igor_model(self, marginals_file_name): """Set attributes by reading a generative model from IGoR marginal file. Sets attributes PV, PdelV_given_V, PDJ, PdelJ_given_J, PdelDldelDr_given_D, PinsVD, PinsDJ, Rvd, and Rdj. Parameters ---------- marginals_file_name : str File name for a IGoR model marginals file. """ raw_model = read_igor_marginals_txt(marginals_file_name) self.PV = raw_model[0]['v_choice'] self.PinsVD = raw_model[0]['vd_ins'] self.PinsDJ = raw_model[0]['dj_ins'] self.PdelV_given_V = raw_model[0]['v_3_del'].T self.PdelJ_given_J = raw_model[0]['j_5_del'].T #While this class assumes P(V, D, J) factorizes into P(V)*P(D, J), the B cell model #infers allowing for the full correlation. Most of the correlation information is due to #chromosomal correlation of alleles (i.e. what chromosome each allele is found on). #While this information can be interesting for inference purposes, it is discarded here #as generally these models may be use for CDR3s from individuals the models weren't inferred #from (and thus the chromosomal correlations are incorrect). This also equates the T and B cell #models. To reintroduce the chromosomal correlations use V and J usage masks after inferring the #allele identities on each chromosome. if raw_model[1]['d_gene'] == ['j_choice', 'd_gene']: #Factorized P(V, D, J) = P(V)*P(D, J) --- correct for T cell models self.PDJ = np.multiply(raw_model[0]['d_gene'].T, raw_model[0]['j_choice']) elif raw_model[1]['d_gene'] == ['v_choice', 'j_choice', 'd_gene']: #Full P(V, D, J) for B cells --- need to compute the marginal P(D, J) PVJ = np.multiply(raw_model[0]['j_choice'].T, raw_model[0]['v_choice']).T PVDJ = np.zeros([raw_model[0]['d_gene'].shape[0], raw_model[0]['d_gene'].shape[2], raw_model[0]['d_gene'].shape[1]]) for v_in in range(raw_model[0]['d_gene'].shape[0]): for j_in in range(raw_model[0]['d_gene'].shape[1]): PVDJ[v_in, :, j_in] = PVJ[v_in, j_in]*raw_model[0]['d_gene'][v_in, j_in, :] self.PDJ = np.sum(PVDJ, 0) else: print 'Unrecognized model structure -- need to construct P(D, J)' return 0 self.PdelDldelDr_given_D = np.transpose(np.multiply(np.transpose(raw_model[0]['d_3_del'], (2, 0, 1)), raw_model[0]['d_5_del']), (2, 0 , 1)) Rvd_raw = raw_model[0]['vd_dinucl'].reshape((4, 4)).T self.Rvd = np.multiply(Rvd_raw, 1/np.sum(Rvd_raw, axis = 0)) Rdj_raw = raw_model[0]['dj_dinucl'].reshape((4, 4)).T self.Rdj = np.multiply(Rdj_raw, 1/np.sum(Rdj_raw, axis = 0))
['def', 'load_and_process_igor_model', '(', 'self', ',', 'marginals_file_name', ')', ':', 'raw_model', '=', 'read_igor_marginals_txt', '(', 'marginals_file_name', ')', 'self', '.', 'PV', '=', 'raw_model', '[', '0', ']', '[', "'v_choice'", ']', 'self', '.', 'PinsVD', '=', 'raw_model', '[', '0', ']', '[', "'vd_ins'", ']', 'self', '.', 'PinsDJ', '=', 'raw_model', '[', '0', ']', '[', "'dj_ins'", ']', 'self', '.', 'PdelV_given_V', '=', 'raw_model', '[', '0', ']', '[', "'v_3_del'", ']', '.', 'T', 'self', '.', 'PdelJ_given_J', '=', 'raw_model', '[', '0', ']', '[', "'j_5_del'", ']', '.', 'T', '#While this class assumes P(V, D, J) factorizes into P(V)*P(D, J), the B cell model', '#infers allowing for the full correlation. Most of the correlation information is due to', '#chromosomal correlation of alleles (i.e. what chromosome each allele is found on).', '#While this information can be interesting for inference purposes, it is discarded here', "#as generally these models may be use for CDR3s from individuals the models weren't inferred", '#from (and thus the chromosomal correlations are incorrect). This also equates the T and B cell', '#models. To reintroduce the chromosomal correlations use V and J usage masks after inferring the ', '#allele identities on each chromosome.', 'if', 'raw_model', '[', '1', ']', '[', "'d_gene'", ']', '==', '[', "'j_choice'", ',', "'d_gene'", ']', ':', '#Factorized P(V, D, J) = P(V)*P(D, J) --- correct for T cell models', 'self', '.', 'PDJ', '=', 'np', '.', 'multiply', '(', 'raw_model', '[', '0', ']', '[', "'d_gene'", ']', '.', 'T', ',', 'raw_model', '[', '0', ']', '[', "'j_choice'", ']', ')', 'elif', 'raw_model', '[', '1', ']', '[', "'d_gene'", ']', '==', '[', "'v_choice'", ',', "'j_choice'", ',', "'d_gene'", ']', ':', '#Full P(V, D, J) for B cells --- need to compute the marginal P(D, J)', 'PVJ', '=', 'np', '.', 'multiply', '(', 'raw_model', '[', '0', ']', '[', "'j_choice'", ']', '.', 'T', ',', 'raw_model', '[', '0', ']', '[', "'v_choice'", ']', ')', '.', 'T', 'PVDJ', '=', 'np', '.', 'zeros', '(', '[', 'raw_model', '[', '0', ']', '[', "'d_gene'", ']', '.', 'shape', '[', '0', ']', ',', 'raw_model', '[', '0', ']', '[', "'d_gene'", ']', '.', 'shape', '[', '2', ']', ',', 'raw_model', '[', '0', ']', '[', "'d_gene'", ']', '.', 'shape', '[', '1', ']', ']', ')', 'for', 'v_in', 'in', 'range', '(', 'raw_model', '[', '0', ']', '[', "'d_gene'", ']', '.', 'shape', '[', '0', ']', ')', ':', 'for', 'j_in', 'in', 'range', '(', 'raw_model', '[', '0', ']', '[', "'d_gene'", ']', '.', 'shape', '[', '1', ']', ')', ':', 'PVDJ', '[', 'v_in', ',', ':', ',', 'j_in', ']', '=', 'PVJ', '[', 'v_in', ',', 'j_in', ']', '*', 'raw_model', '[', '0', ']', '[', "'d_gene'", ']', '[', 'v_in', ',', 'j_in', ',', ':', ']', 'self', '.', 'PDJ', '=', 'np', '.', 'sum', '(', 'PVDJ', ',', '0', ')', 'else', ':', 'print', "'Unrecognized model structure -- need to construct P(D, J)'", 'return', '0', 'self', '.', 'PdelDldelDr_given_D', '=', 'np', '.', 'transpose', '(', 'np', '.', 'multiply', '(', 'np', '.', 'transpose', '(', 'raw_model', '[', '0', ']', '[', "'d_3_del'", ']', ',', '(', '2', ',', '0', ',', '1', ')', ')', ',', 'raw_model', '[', '0', ']', '[', "'d_5_del'", ']', ')', ',', '(', '2', ',', '0', ',', '1', ')', ')', 'Rvd_raw', '=', 'raw_model', '[', '0', ']', '[', "'vd_dinucl'", ']', '.', 'reshape', '(', '(', '4', ',', '4', ')', ')', '.', 'T', 'self', '.', 'Rvd', '=', 'np', '.', 'multiply', '(', 'Rvd_raw', ',', '1', '/', 'np', '.', 'sum', '(', 'Rvd_raw', ',', 'axis', '=', '0', ')', ')', 'Rdj_raw', '=', 'raw_model', '[', '0', ']', '[', "'dj_dinucl'", ']', '.', 'reshape', '(', '(', '4', ',', '4', ')', ')', '.', 'T', 'self', '.', 'Rdj', '=', 'np', '.', 'multiply', '(', 'Rdj_raw', ',', '1', '/', 'np', '.', 'sum', '(', 'Rdj_raw', ',', 'axis', '=', '0', ')', ')']
Set attributes by reading a generative model from IGoR marginal file. Sets attributes PV, PdelV_given_V, PDJ, PdelJ_given_J, PdelDldelDr_given_D, PinsVD, PinsDJ, Rvd, and Rdj. Parameters ---------- marginals_file_name : str File name for a IGoR model marginals file.
['Set', 'attributes', 'by', 'reading', 'a', 'generative', 'model', 'from', 'IGoR', 'marginal', 'file', '.', 'Sets', 'attributes', 'PV', 'PdelV_given_V', 'PDJ', 'PdelJ_given_J', 'PdelDldelDr_given_D', 'PinsVD', 'PinsDJ', 'Rvd', 'and', 'Rdj', '.', 'Parameters', '----------', 'marginals_file_name', ':', 'str', 'File', 'name', 'for', 'a', 'IGoR', 'model', 'marginals', 'file', '.']
train
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/load_model.py#L681-L731
8,177
zhanglab/psamm
psamm/importer.py
main
def main(importer_class=None, args=None): """Entry point for import program. If the ``args`` are provided, these should be a list of strings that will be used instead of ``sys.argv[1:]``. This is mostly useful for testing. """ parser = argparse.ArgumentParser( description='Import from external model formats') parser.add_argument('--source', metavar='path', default='.', help='Source directory or file') parser.add_argument('--dest', metavar='path', default='.', help='Destination directory (default is ".")') parser.add_argument('--no-exchange', action='store_true', help=('Disable importing exchange reactions as' ' exchange compound file.')) parser.add_argument('--split-subsystem', action='store_true', help='Enable splitting reaction files by subsystem') parser.add_argument('--merge-compounds', action='store_true', help=('Merge identical compounds occuring in various' ' compartments.')) parser.add_argument('--force', action='store_true', help='Enable overwriting model files') if importer_class is None: parser.add_argument( 'format', help='Format to import ("list" to see all)') args = parser.parse_args(args) # Set up logging for the command line interface if 'PSAMM_DEBUG' in os.environ: level = getattr(logging, os.environ['PSAMM_DEBUG'].upper(), None) if level is not None: logging.basicConfig(level=level) else: logging.basicConfig( level=logging.INFO, format='%(levelname)s: %(message)s') if importer_class is None: # Discover all available model importers importers = {} for importer_entry in pkg_resources.iter_entry_points( 'psamm.importer'): canonical = importer_entry.name.lower() if canonical not in importers: importers[canonical] = importer_entry else: logger.warning('Importer {} was found more than once!'.format( importer_entry.name)) # Print list of importers if args.format in ('list', 'help'): if len(importers) == 0: logger.error('No importers found!') else: importer_classes = [] for name, entry in iteritems(importers): importer_class = entry.load() title = getattr(importer_class, 'title', None) generic = getattr(importer_class, 'generic', False) if title is not None: importer_classes.append( (title, generic, name, importer_class)) print('Generic importers:') for title, _, name, importer_class in sorted( c for c in importer_classes if c[1]): print('{:<12} {}'.format(name, title)) print() print('Model-specific importers:') for title, _, name, importer_class in sorted( c for c in importer_classes if not c[1]): print('{:<12} {}'.format(name, title)) sys.exit(0) importer_name = args.format.lower() if importer_name not in importers: logger.error('Importer {} not found!'.format(importer_name)) logger.info('Use "list" to see available importers.') sys.exit(-1) importer_class = importers[importer_name].load() importer = importer_class() try: model = importer.import_model(args.source) except ModelLoadError as e: logger.error('Failed to load model!', exc_info=True) importer.help() parser.error(text_type(e)) except ParseError as e: logger.error('Failed to parse model!', exc_info=True) logger.error(text_type(e)) sys.exit(-1) if args.merge_compounds: compounds_before = len(model.compounds) sbml.merge_equivalent_compounds(model) if len(model.compounds) < compounds_before: logger.info( 'Merged {} compound entries into {} entries by' ' removing duplicates in various compartments'.format( compounds_before, len(model.compounds))) print('Model: {}'.format(model.name)) print('- Biomass reaction: {}'.format(model.biomass_reaction)) print('- Compartments: {}'.format(len(model.compartments))) print('- Compounds: {}'.format(len(model.compounds))) print('- Reactions: {}'.format(len(model.reactions))) print('- Genes: {}'.format(count_genes(model))) # Check if dest directory is empty. If we get an error assume that the # directory does not exist. dest_is_empty = False try: dest_is_empty = len(os.listdir(args.dest)) == 0 except OSError: dest_is_empty = True if not dest_is_empty: if not args.force: logger.error('Destination directory is not empty. Use --force' ' option to proceed anyway, overwriting any existing' ' files in {}'.format(args.dest)) return 1 else: logger.warning('Destination directory is not empty, overwriting' ' existing files in {}'.format(args.dest)) # Create destination directory if not exists dest = args.dest mkdir_p(dest) convert_exchange = not args.no_exchange write_yaml_model(model, dest, convert_exchange=convert_exchange, split_subsystem=args.split_subsystem)
python
def main(importer_class=None, args=None): """Entry point for import program. If the ``args`` are provided, these should be a list of strings that will be used instead of ``sys.argv[1:]``. This is mostly useful for testing. """ parser = argparse.ArgumentParser( description='Import from external model formats') parser.add_argument('--source', metavar='path', default='.', help='Source directory or file') parser.add_argument('--dest', metavar='path', default='.', help='Destination directory (default is ".")') parser.add_argument('--no-exchange', action='store_true', help=('Disable importing exchange reactions as' ' exchange compound file.')) parser.add_argument('--split-subsystem', action='store_true', help='Enable splitting reaction files by subsystem') parser.add_argument('--merge-compounds', action='store_true', help=('Merge identical compounds occuring in various' ' compartments.')) parser.add_argument('--force', action='store_true', help='Enable overwriting model files') if importer_class is None: parser.add_argument( 'format', help='Format to import ("list" to see all)') args = parser.parse_args(args) # Set up logging for the command line interface if 'PSAMM_DEBUG' in os.environ: level = getattr(logging, os.environ['PSAMM_DEBUG'].upper(), None) if level is not None: logging.basicConfig(level=level) else: logging.basicConfig( level=logging.INFO, format='%(levelname)s: %(message)s') if importer_class is None: # Discover all available model importers importers = {} for importer_entry in pkg_resources.iter_entry_points( 'psamm.importer'): canonical = importer_entry.name.lower() if canonical not in importers: importers[canonical] = importer_entry else: logger.warning('Importer {} was found more than once!'.format( importer_entry.name)) # Print list of importers if args.format in ('list', 'help'): if len(importers) == 0: logger.error('No importers found!') else: importer_classes = [] for name, entry in iteritems(importers): importer_class = entry.load() title = getattr(importer_class, 'title', None) generic = getattr(importer_class, 'generic', False) if title is not None: importer_classes.append( (title, generic, name, importer_class)) print('Generic importers:') for title, _, name, importer_class in sorted( c for c in importer_classes if c[1]): print('{:<12} {}'.format(name, title)) print() print('Model-specific importers:') for title, _, name, importer_class in sorted( c for c in importer_classes if not c[1]): print('{:<12} {}'.format(name, title)) sys.exit(0) importer_name = args.format.lower() if importer_name not in importers: logger.error('Importer {} not found!'.format(importer_name)) logger.info('Use "list" to see available importers.') sys.exit(-1) importer_class = importers[importer_name].load() importer = importer_class() try: model = importer.import_model(args.source) except ModelLoadError as e: logger.error('Failed to load model!', exc_info=True) importer.help() parser.error(text_type(e)) except ParseError as e: logger.error('Failed to parse model!', exc_info=True) logger.error(text_type(e)) sys.exit(-1) if args.merge_compounds: compounds_before = len(model.compounds) sbml.merge_equivalent_compounds(model) if len(model.compounds) < compounds_before: logger.info( 'Merged {} compound entries into {} entries by' ' removing duplicates in various compartments'.format( compounds_before, len(model.compounds))) print('Model: {}'.format(model.name)) print('- Biomass reaction: {}'.format(model.biomass_reaction)) print('- Compartments: {}'.format(len(model.compartments))) print('- Compounds: {}'.format(len(model.compounds))) print('- Reactions: {}'.format(len(model.reactions))) print('- Genes: {}'.format(count_genes(model))) # Check if dest directory is empty. If we get an error assume that the # directory does not exist. dest_is_empty = False try: dest_is_empty = len(os.listdir(args.dest)) == 0 except OSError: dest_is_empty = True if not dest_is_empty: if not args.force: logger.error('Destination directory is not empty. Use --force' ' option to proceed anyway, overwriting any existing' ' files in {}'.format(args.dest)) return 1 else: logger.warning('Destination directory is not empty, overwriting' ' existing files in {}'.format(args.dest)) # Create destination directory if not exists dest = args.dest mkdir_p(dest) convert_exchange = not args.no_exchange write_yaml_model(model, dest, convert_exchange=convert_exchange, split_subsystem=args.split_subsystem)
['def', 'main', '(', 'importer_class', '=', 'None', ',', 'args', '=', 'None', ')', ':', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', 'description', '=', "'Import from external model formats'", ')', 'parser', '.', 'add_argument', '(', "'--source'", ',', 'metavar', '=', "'path'", ',', 'default', '=', "'.'", ',', 'help', '=', "'Source directory or file'", ')', 'parser', '.', 'add_argument', '(', "'--dest'", ',', 'metavar', '=', "'path'", ',', 'default', '=', "'.'", ',', 'help', '=', '\'Destination directory (default is ".")\'', ')', 'parser', '.', 'add_argument', '(', "'--no-exchange'", ',', 'action', '=', "'store_true'", ',', 'help', '=', '(', "'Disable importing exchange reactions as'", "' exchange compound file.'", ')', ')', 'parser', '.', 'add_argument', '(', "'--split-subsystem'", ',', 'action', '=', "'store_true'", ',', 'help', '=', "'Enable splitting reaction files by subsystem'", ')', 'parser', '.', 'add_argument', '(', "'--merge-compounds'", ',', 'action', '=', "'store_true'", ',', 'help', '=', '(', "'Merge identical compounds occuring in various'", "' compartments.'", ')', ')', 'parser', '.', 'add_argument', '(', "'--force'", ',', 'action', '=', "'store_true'", ',', 'help', '=', "'Enable overwriting model files'", ')', 'if', 'importer_class', 'is', 'None', ':', 'parser', '.', 'add_argument', '(', "'format'", ',', 'help', '=', '\'Format to import ("list" to see all)\'', ')', 'args', '=', 'parser', '.', 'parse_args', '(', 'args', ')', '# Set up logging for the command line interface', 'if', "'PSAMM_DEBUG'", 'in', 'os', '.', 'environ', ':', 'level', '=', 'getattr', '(', 'logging', ',', 'os', '.', 'environ', '[', "'PSAMM_DEBUG'", ']', '.', 'upper', '(', ')', ',', 'None', ')', 'if', 'level', 'is', 'not', 'None', ':', 'logging', '.', 'basicConfig', '(', 'level', '=', 'level', ')', 'else', ':', 'logging', '.', 'basicConfig', '(', 'level', '=', 'logging', '.', 'INFO', ',', 'format', '=', "'%(levelname)s: %(message)s'", ')', 'if', 'importer_class', 'is', 'None', ':', '# Discover all available model importers', 'importers', '=', '{', '}', 'for', 'importer_entry', 'in', 'pkg_resources', '.', 'iter_entry_points', '(', "'psamm.importer'", ')', ':', 'canonical', '=', 'importer_entry', '.', 'name', '.', 'lower', '(', ')', 'if', 'canonical', 'not', 'in', 'importers', ':', 'importers', '[', 'canonical', ']', '=', 'importer_entry', 'else', ':', 'logger', '.', 'warning', '(', "'Importer {} was found more than once!'", '.', 'format', '(', 'importer_entry', '.', 'name', ')', ')', '# Print list of importers', 'if', 'args', '.', 'format', 'in', '(', "'list'", ',', "'help'", ')', ':', 'if', 'len', '(', 'importers', ')', '==', '0', ':', 'logger', '.', 'error', '(', "'No importers found!'", ')', 'else', ':', 'importer_classes', '=', '[', ']', 'for', 'name', ',', 'entry', 'in', 'iteritems', '(', 'importers', ')', ':', 'importer_class', '=', 'entry', '.', 'load', '(', ')', 'title', '=', 'getattr', '(', 'importer_class', ',', "'title'", ',', 'None', ')', 'generic', '=', 'getattr', '(', 'importer_class', ',', "'generic'", ',', 'False', ')', 'if', 'title', 'is', 'not', 'None', ':', 'importer_classes', '.', 'append', '(', '(', 'title', ',', 'generic', ',', 'name', ',', 'importer_class', ')', ')', 'print', '(', "'Generic importers:'", ')', 'for', 'title', ',', '_', ',', 'name', ',', 'importer_class', 'in', 'sorted', '(', 'c', 'for', 'c', 'in', 'importer_classes', 'if', 'c', '[', '1', ']', ')', ':', 'print', '(', "'{:<12} {}'", '.', 'format', '(', 'name', ',', 'title', ')', ')', 'print', '(', ')', 'print', '(', "'Model-specific importers:'", ')', 'for', 'title', ',', '_', ',', 'name', ',', 'importer_class', 'in', 'sorted', '(', 'c', 'for', 'c', 'in', 'importer_classes', 'if', 'not', 'c', '[', '1', ']', ')', ':', 'print', '(', "'{:<12} {}'", '.', 'format', '(', 'name', ',', 'title', ')', ')', 'sys', '.', 'exit', '(', '0', ')', 'importer_name', '=', 'args', '.', 'format', '.', 'lower', '(', ')', 'if', 'importer_name', 'not', 'in', 'importers', ':', 'logger', '.', 'error', '(', "'Importer {} not found!'", '.', 'format', '(', 'importer_name', ')', ')', 'logger', '.', 'info', '(', '\'Use "list" to see available importers.\'', ')', 'sys', '.', 'exit', '(', '-', '1', ')', 'importer_class', '=', 'importers', '[', 'importer_name', ']', '.', 'load', '(', ')', 'importer', '=', 'importer_class', '(', ')', 'try', ':', 'model', '=', 'importer', '.', 'import_model', '(', 'args', '.', 'source', ')', 'except', 'ModelLoadError', 'as', 'e', ':', 'logger', '.', 'error', '(', "'Failed to load model!'", ',', 'exc_info', '=', 'True', ')', 'importer', '.', 'help', '(', ')', 'parser', '.', 'error', '(', 'text_type', '(', 'e', ')', ')', 'except', 'ParseError', 'as', 'e', ':', 'logger', '.', 'error', '(', "'Failed to parse model!'", ',', 'exc_info', '=', 'True', ')', 'logger', '.', 'error', '(', 'text_type', '(', 'e', ')', ')', 'sys', '.', 'exit', '(', '-', '1', ')', 'if', 'args', '.', 'merge_compounds', ':', 'compounds_before', '=', 'len', '(', 'model', '.', 'compounds', ')', 'sbml', '.', 'merge_equivalent_compounds', '(', 'model', ')', 'if', 'len', '(', 'model', '.', 'compounds', ')', '<', 'compounds_before', ':', 'logger', '.', 'info', '(', "'Merged {} compound entries into {} entries by'", "' removing duplicates in various compartments'", '.', 'format', '(', 'compounds_before', ',', 'len', '(', 'model', '.', 'compounds', ')', ')', ')', 'print', '(', "'Model: {}'", '.', 'format', '(', 'model', '.', 'name', ')', ')', 'print', '(', "'- Biomass reaction: {}'", '.', 'format', '(', 'model', '.', 'biomass_reaction', ')', ')', 'print', '(', "'- Compartments: {}'", '.', 'format', '(', 'len', '(', 'model', '.', 'compartments', ')', ')', ')', 'print', '(', "'- Compounds: {}'", '.', 'format', '(', 'len', '(', 'model', '.', 'compounds', ')', ')', ')', 'print', '(', "'- Reactions: {}'", '.', 'format', '(', 'len', '(', 'model', '.', 'reactions', ')', ')', ')', 'print', '(', "'- Genes: {}'", '.', 'format', '(', 'count_genes', '(', 'model', ')', ')', ')', '# Check if dest directory is empty. If we get an error assume that the', '# directory does not exist.', 'dest_is_empty', '=', 'False', 'try', ':', 'dest_is_empty', '=', 'len', '(', 'os', '.', 'listdir', '(', 'args', '.', 'dest', ')', ')', '==', '0', 'except', 'OSError', ':', 'dest_is_empty', '=', 'True', 'if', 'not', 'dest_is_empty', ':', 'if', 'not', 'args', '.', 'force', ':', 'logger', '.', 'error', '(', "'Destination directory is not empty. Use --force'", "' option to proceed anyway, overwriting any existing'", "' files in {}'", '.', 'format', '(', 'args', '.', 'dest', ')', ')', 'return', '1', 'else', ':', 'logger', '.', 'warning', '(', "'Destination directory is not empty, overwriting'", "' existing files in {}'", '.', 'format', '(', 'args', '.', 'dest', ')', ')', '# Create destination directory if not exists', 'dest', '=', 'args', '.', 'dest', 'mkdir_p', '(', 'dest', ')', 'convert_exchange', '=', 'not', 'args', '.', 'no_exchange', 'write_yaml_model', '(', 'model', ',', 'dest', ',', 'convert_exchange', '=', 'convert_exchange', ',', 'split_subsystem', '=', 'args', '.', 'split_subsystem', ')']
Entry point for import program. If the ``args`` are provided, these should be a list of strings that will be used instead of ``sys.argv[1:]``. This is mostly useful for testing.
['Entry', 'point', 'for', 'import', 'program', '.']
train
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/importer.py#L563-L700
8,178
tino/pyFirmata
pyfirmata/pyfirmata.py
Board.add_cmd_handler
def add_cmd_handler(self, cmd, func): """Adds a command handler for a command.""" len_args = len(inspect.getargspec(func)[0]) def add_meta(f): def decorator(*args, **kwargs): f(*args, **kwargs) decorator.bytes_needed = len_args - 1 # exclude self decorator.__name__ = f.__name__ return decorator func = add_meta(func) self._command_handlers[cmd] = func
python
def add_cmd_handler(self, cmd, func): """Adds a command handler for a command.""" len_args = len(inspect.getargspec(func)[0]) def add_meta(f): def decorator(*args, **kwargs): f(*args, **kwargs) decorator.bytes_needed = len_args - 1 # exclude self decorator.__name__ = f.__name__ return decorator func = add_meta(func) self._command_handlers[cmd] = func
['def', 'add_cmd_handler', '(', 'self', ',', 'cmd', ',', 'func', ')', ':', 'len_args', '=', 'len', '(', 'inspect', '.', 'getargspec', '(', 'func', ')', '[', '0', ']', ')', 'def', 'add_meta', '(', 'f', ')', ':', 'def', 'decorator', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'f', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'decorator', '.', 'bytes_needed', '=', 'len_args', '-', '1', '# exclude self', 'decorator', '.', '__name__', '=', 'f', '.', '__name__', 'return', 'decorator', 'func', '=', 'add_meta', '(', 'func', ')', 'self', '.', '_command_handlers', '[', 'cmd', ']', '=', 'func']
Adds a command handler for a command.
['Adds', 'a', 'command', 'handler', 'for', 'a', 'command', '.']
train
https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/pyfirmata.py#L183-L194
8,179
apache/spark
python/pyspark/rdd.py
RDD.mapValues
def mapValues(self, f): """ Pass each value in the key-value pair RDD through a map function without changing the keys; this also retains the original RDD's partitioning. >>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])]) >>> def f(x): return len(x) >>> x.mapValues(f).collect() [('a', 3), ('b', 1)] """ map_values_fn = lambda kv: (kv[0], f(kv[1])) return self.map(map_values_fn, preservesPartitioning=True)
python
def mapValues(self, f): """ Pass each value in the key-value pair RDD through a map function without changing the keys; this also retains the original RDD's partitioning. >>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])]) >>> def f(x): return len(x) >>> x.mapValues(f).collect() [('a', 3), ('b', 1)] """ map_values_fn = lambda kv: (kv[0], f(kv[1])) return self.map(map_values_fn, preservesPartitioning=True)
['def', 'mapValues', '(', 'self', ',', 'f', ')', ':', 'map_values_fn', '=', 'lambda', 'kv', ':', '(', 'kv', '[', '0', ']', ',', 'f', '(', 'kv', '[', '1', ']', ')', ')', 'return', 'self', '.', 'map', '(', 'map_values_fn', ',', 'preservesPartitioning', '=', 'True', ')']
Pass each value in the key-value pair RDD through a map function without changing the keys; this also retains the original RDD's partitioning. >>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])]) >>> def f(x): return len(x) >>> x.mapValues(f).collect() [('a', 3), ('b', 1)]
['Pass', 'each', 'value', 'in', 'the', 'key', '-', 'value', 'pair', 'RDD', 'through', 'a', 'map', 'function', 'without', 'changing', 'the', 'keys', ';', 'this', 'also', 'retains', 'the', 'original', 'RDD', 's', 'partitioning', '.']
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L1974-L1986
8,180
BernardFW/bernard
src/bernard/engine/fsm.py
FSM._make_register
def _make_register(self) -> BaseRegisterStore: """ Make the register storage. """ s = settings.REGISTER_STORE store_class = import_class(s['class']) return store_class(**s['params'])
python
def _make_register(self) -> BaseRegisterStore: """ Make the register storage. """ s = settings.REGISTER_STORE store_class = import_class(s['class']) return store_class(**s['params'])
['def', '_make_register', '(', 'self', ')', '->', 'BaseRegisterStore', ':', 's', '=', 'settings', '.', 'REGISTER_STORE', 'store_class', '=', 'import_class', '(', 's', '[', "'class'", ']', ')', 'return', 'store_class', '(', '*', '*', 's', '[', "'params'", ']', ')']
Make the register storage.
['Make', 'the', 'register', 'storage', '.']
train
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/fsm.py#L141-L148
8,181
marshmallow-code/marshmallow
src/marshmallow/fields.py
Field.deserialize
def deserialize(self, value, attr=None, data=None, **kwargs): """Deserialize ``value``. :param value: The value to be deserialized. :param str attr: The attribute/key in `data` to be deserialized. :param dict data: The raw input data passed to the `Schema.load`. :param dict kwargs': Field-specific keyword arguments. :raise ValidationError: If an invalid value is passed or if a required value is missing. """ # Validate required fields, deserialize, then validate # deserialized value self._validate_missing(value) if value is missing_: _miss = self.missing return _miss() if callable(_miss) else _miss if getattr(self, 'allow_none', False) is True and value is None: return None output = self._deserialize(value, attr, data, **kwargs) self._validate(output) return output
python
def deserialize(self, value, attr=None, data=None, **kwargs): """Deserialize ``value``. :param value: The value to be deserialized. :param str attr: The attribute/key in `data` to be deserialized. :param dict data: The raw input data passed to the `Schema.load`. :param dict kwargs': Field-specific keyword arguments. :raise ValidationError: If an invalid value is passed or if a required value is missing. """ # Validate required fields, deserialize, then validate # deserialized value self._validate_missing(value) if value is missing_: _miss = self.missing return _miss() if callable(_miss) else _miss if getattr(self, 'allow_none', False) is True and value is None: return None output = self._deserialize(value, attr, data, **kwargs) self._validate(output) return output
['def', 'deserialize', '(', 'self', ',', 'value', ',', 'attr', '=', 'None', ',', 'data', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', '# Validate required fields, deserialize, then validate', '# deserialized value', 'self', '.', '_validate_missing', '(', 'value', ')', 'if', 'value', 'is', 'missing_', ':', '_miss', '=', 'self', '.', 'missing', 'return', '_miss', '(', ')', 'if', 'callable', '(', '_miss', ')', 'else', '_miss', 'if', 'getattr', '(', 'self', ',', "'allow_none'", ',', 'False', ')', 'is', 'True', 'and', 'value', 'is', 'None', ':', 'return', 'None', 'output', '=', 'self', '.', '_deserialize', '(', 'value', ',', 'attr', ',', 'data', ',', '*', '*', 'kwargs', ')', 'self', '.', '_validate', '(', 'output', ')', 'return', 'output']
Deserialize ``value``. :param value: The value to be deserialized. :param str attr: The attribute/key in `data` to be deserialized. :param dict data: The raw input data passed to the `Schema.load`. :param dict kwargs': Field-specific keyword arguments. :raise ValidationError: If an invalid value is passed or if a required value is missing.
['Deserialize', 'value', '.']
train
https://github.com/marshmallow-code/marshmallow/blob/a6b6c4151f1fbf16f3774d4052ca2bddf6903750/src/marshmallow/fields.py#L285-L305
8,182
mlavin/argyle
argyle/postgres.py
create_db_user
def create_db_user(username, password=None, flags=None): """Create a databse user.""" flags = flags or u'-D -A -R' sudo(u'createuser %s %s' % (flags, username), user=u'postgres') if password: change_db_user_password(username, password)
python
def create_db_user(username, password=None, flags=None): """Create a databse user.""" flags = flags or u'-D -A -R' sudo(u'createuser %s %s' % (flags, username), user=u'postgres') if password: change_db_user_password(username, password)
['def', 'create_db_user', '(', 'username', ',', 'password', '=', 'None', ',', 'flags', '=', 'None', ')', ':', 'flags', '=', 'flags', 'or', "u'-D -A -R'", 'sudo', '(', "u'createuser %s %s'", '%', '(', 'flags', ',', 'username', ')', ',', 'user', '=', "u'postgres'", ')', 'if', 'password', ':', 'change_db_user_password', '(', 'username', ',', 'password', ')']
Create a databse user.
['Create', 'a', 'databse', 'user', '.']
train
https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/postgres.py#L11-L17
8,183
almcc/cinder-data
cinder_data/store.py
Store.find_all
def find_all(self, model_class, params={}): """Return an list of models from the API and caches the result. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. params (dict, optional): Description Returns: list: A list of instances of you model_class or and empty list. """ url = '{host}/{namespace}/{model}{params}'.format( host=self._host, namespace=self._namespace, model=self._translate_name(model_class.__name__), params=self._build_param_string(params) ) data = self._get_json(url)['data'] fresh_models = [] for item in data: fresh_model = model_class(item['attributes']) fresh_model.id = item['id'] fresh_model.validate() fresh_models.append(fresh_model) if self._cache is not None: self._cache.set_record(model_class.__name__, fresh_model.id, fresh_model) return fresh_models
python
def find_all(self, model_class, params={}): """Return an list of models from the API and caches the result. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. params (dict, optional): Description Returns: list: A list of instances of you model_class or and empty list. """ url = '{host}/{namespace}/{model}{params}'.format( host=self._host, namespace=self._namespace, model=self._translate_name(model_class.__name__), params=self._build_param_string(params) ) data = self._get_json(url)['data'] fresh_models = [] for item in data: fresh_model = model_class(item['attributes']) fresh_model.id = item['id'] fresh_model.validate() fresh_models.append(fresh_model) if self._cache is not None: self._cache.set_record(model_class.__name__, fresh_model.id, fresh_model) return fresh_models
['def', 'find_all', '(', 'self', ',', 'model_class', ',', 'params', '=', '{', '}', ')', ':', 'url', '=', "'{host}/{namespace}/{model}{params}'", '.', 'format', '(', 'host', '=', 'self', '.', '_host', ',', 'namespace', '=', 'self', '.', '_namespace', ',', 'model', '=', 'self', '.', '_translate_name', '(', 'model_class', '.', '__name__', ')', ',', 'params', '=', 'self', '.', '_build_param_string', '(', 'params', ')', ')', 'data', '=', 'self', '.', '_get_json', '(', 'url', ')', '[', "'data'", ']', 'fresh_models', '=', '[', ']', 'for', 'item', 'in', 'data', ':', 'fresh_model', '=', 'model_class', '(', 'item', '[', "'attributes'", ']', ')', 'fresh_model', '.', 'id', '=', 'item', '[', "'id'", ']', 'fresh_model', '.', 'validate', '(', ')', 'fresh_models', '.', 'append', '(', 'fresh_model', ')', 'if', 'self', '.', '_cache', 'is', 'not', 'None', ':', 'self', '.', '_cache', '.', 'set_record', '(', 'model_class', '.', '__name__', ',', 'fresh_model', '.', 'id', ',', 'fresh_model', ')', 'return', 'fresh_models']
Return an list of models from the API and caches the result. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. params (dict, optional): Description Returns: list: A list of instances of you model_class or and empty list.
['Return', 'an', 'list', 'of', 'models', 'from', 'the', 'API', 'and', 'caches', 'the', 'result', '.']
train
https://github.com/almcc/cinder-data/blob/4159a5186c4b4fc32354749892e86130530f6ec5/cinder_data/store.py#L57-L83
8,184
westonplatter/fast_arrow
fast_arrow/resources/option.py
Option.in_chain
def in_chain(cls, client, chain_id, expiration_dates=[]): """ fetch all option instruments in an options chain - expiration_dates = optionally scope """ request_url = "https://api.robinhood.com/options/instruments/" params = { "chain_id": chain_id, "expiration_dates": ",".join(expiration_dates) } data = client.get(request_url, params=params) results = data['results'] while data['next']: data = client.get(data['next']) results.extend(data['results']) return results
python
def in_chain(cls, client, chain_id, expiration_dates=[]): """ fetch all option instruments in an options chain - expiration_dates = optionally scope """ request_url = "https://api.robinhood.com/options/instruments/" params = { "chain_id": chain_id, "expiration_dates": ",".join(expiration_dates) } data = client.get(request_url, params=params) results = data['results'] while data['next']: data = client.get(data['next']) results.extend(data['results']) return results
['def', 'in_chain', '(', 'cls', ',', 'client', ',', 'chain_id', ',', 'expiration_dates', '=', '[', ']', ')', ':', 'request_url', '=', '"https://api.robinhood.com/options/instruments/"', 'params', '=', '{', '"chain_id"', ':', 'chain_id', ',', '"expiration_dates"', ':', '","', '.', 'join', '(', 'expiration_dates', ')', '}', 'data', '=', 'client', '.', 'get', '(', 'request_url', ',', 'params', '=', 'params', ')', 'results', '=', 'data', '[', "'results'", ']', 'while', 'data', '[', "'next'", ']', ':', 'data', '=', 'client', '.', 'get', '(', 'data', '[', "'next'", ']', ')', 'results', '.', 'extend', '(', 'data', '[', "'results'", ']', ')', 'return', 'results']
fetch all option instruments in an options chain - expiration_dates = optionally scope
['fetch', 'all', 'option', 'instruments', 'in', 'an', 'options', 'chain', '-', 'expiration_dates', '=', 'optionally', 'scope']
train
https://github.com/westonplatter/fast_arrow/blob/514cbca4994f52a97222058167830a302e313d04/fast_arrow/resources/option.py#L65-L83
8,185
danidee10/Staticfy
staticfy/staticfy.py
get_elements
def get_elements(html_file, tags): """ Extract all the elements we're interested in. Returns a list of tuples with the attribute as first item and the list of elements as the second item. """ with open(html_file) as f: document = BeautifulSoup(f, 'html.parser') def condition(tag, attr): # Don't include external links return lambda x: x.name == tag \ and not x.get(attr, 'http').startswith(('http', '//')) all_tags = [(attr, document.find_all(condition(tag, attr))) for tag, attr in tags] return all_tags
python
def get_elements(html_file, tags): """ Extract all the elements we're interested in. Returns a list of tuples with the attribute as first item and the list of elements as the second item. """ with open(html_file) as f: document = BeautifulSoup(f, 'html.parser') def condition(tag, attr): # Don't include external links return lambda x: x.name == tag \ and not x.get(attr, 'http').startswith(('http', '//')) all_tags = [(attr, document.find_all(condition(tag, attr))) for tag, attr in tags] return all_tags
['def', 'get_elements', '(', 'html_file', ',', 'tags', ')', ':', 'with', 'open', '(', 'html_file', ')', 'as', 'f', ':', 'document', '=', 'BeautifulSoup', '(', 'f', ',', "'html.parser'", ')', 'def', 'condition', '(', 'tag', ',', 'attr', ')', ':', "# Don't include external links", 'return', 'lambda', 'x', ':', 'x', '.', 'name', '==', 'tag', 'and', 'not', 'x', '.', 'get', '(', 'attr', ',', "'http'", ')', '.', 'startswith', '(', '(', "'http'", ',', "'//'", ')', ')', 'all_tags', '=', '[', '(', 'attr', ',', 'document', '.', 'find_all', '(', 'condition', '(', 'tag', ',', 'attr', ')', ')', ')', 'for', 'tag', ',', 'attr', 'in', 'tags', ']', 'return', 'all_tags']
Extract all the elements we're interested in. Returns a list of tuples with the attribute as first item and the list of elements as the second item.
['Extract', 'all', 'the', 'elements', 'we', 're', 'interested', 'in', '.']
train
https://github.com/danidee10/Staticfy/blob/ebc555b00377394b0f714e4a173d37833fec90cb/staticfy/staticfy.py#L71-L89
8,186
zero-os/0-core
client/py-client/zeroos/core0/client/client.py
KvmManager.infops
def infops(self, uuid): """ Get info per second about a kvm domain by uuid :param uuid: uuid of the kvm container (same as the used in create) :return: """ args = { 'uuid': uuid, } self._domain_action_chk.check(args) return self._client.json('kvm.infops', args)
python
def infops(self, uuid): """ Get info per second about a kvm domain by uuid :param uuid: uuid of the kvm container (same as the used in create) :return: """ args = { 'uuid': uuid, } self._domain_action_chk.check(args) return self._client.json('kvm.infops', args)
['def', 'infops', '(', 'self', ',', 'uuid', ')', ':', 'args', '=', '{', "'uuid'", ':', 'uuid', ',', '}', 'self', '.', '_domain_action_chk', '.', 'check', '(', 'args', ')', 'return', 'self', '.', '_client', '.', 'json', '(', "'kvm.infops'", ',', 'args', ')']
Get info per second about a kvm domain by uuid :param uuid: uuid of the kvm container (same as the used in create) :return:
['Get', 'info', 'per', 'second', 'about', 'a', 'kvm', 'domain', 'by', 'uuid', ':', 'param', 'uuid', ':', 'uuid', 'of', 'the', 'kvm', 'container', '(', 'same', 'as', 'the', 'used', 'in', 'create', ')', ':', 'return', ':']
train
https://github.com/zero-os/0-core/blob/69f6ce845ab8b8ad805a79a415227e7ac566c218/client/py-client/zeroos/core0/client/client.py#L2404-L2415
8,187
JelteF/PyLaTeX
pylatex/figure.py
Figure.add_plot
def add_plot(self, *args, extension='pdf', **kwargs): """Add the current Matplotlib plot to the figure. The plot that gets added is the one that would normally be shown when using ``plt.show()``. Args ---- args: Arguments passed to plt.savefig for displaying the plot. extension : str extension of image file indicating figure file type kwargs: Keyword arguments passed to plt.savefig for displaying the plot. In case these contain ``width`` or ``placement``, they will be used for the same purpose as in the add_image command. Namely the width and placement of the generated plot in the LaTeX document. """ add_image_kwargs = {} for key in ('width', 'placement'): if key in kwargs: add_image_kwargs[key] = kwargs.pop(key) filename = self._save_plot(*args, extension=extension, **kwargs) self.add_image(filename, **add_image_kwargs)
python
def add_plot(self, *args, extension='pdf', **kwargs): """Add the current Matplotlib plot to the figure. The plot that gets added is the one that would normally be shown when using ``plt.show()``. Args ---- args: Arguments passed to plt.savefig for displaying the plot. extension : str extension of image file indicating figure file type kwargs: Keyword arguments passed to plt.savefig for displaying the plot. In case these contain ``width`` or ``placement``, they will be used for the same purpose as in the add_image command. Namely the width and placement of the generated plot in the LaTeX document. """ add_image_kwargs = {} for key in ('width', 'placement'): if key in kwargs: add_image_kwargs[key] = kwargs.pop(key) filename = self._save_plot(*args, extension=extension, **kwargs) self.add_image(filename, **add_image_kwargs)
['def', 'add_plot', '(', 'self', ',', '*', 'args', ',', 'extension', '=', "'pdf'", ',', '*', '*', 'kwargs', ')', ':', 'add_image_kwargs', '=', '{', '}', 'for', 'key', 'in', '(', "'width'", ',', "'placement'", ')', ':', 'if', 'key', 'in', 'kwargs', ':', 'add_image_kwargs', '[', 'key', ']', '=', 'kwargs', '.', 'pop', '(', 'key', ')', 'filename', '=', 'self', '.', '_save_plot', '(', '*', 'args', ',', 'extension', '=', 'extension', ',', '*', '*', 'kwargs', ')', 'self', '.', 'add_image', '(', 'filename', ',', '*', '*', 'add_image_kwargs', ')']
Add the current Matplotlib plot to the figure. The plot that gets added is the one that would normally be shown when using ``plt.show()``. Args ---- args: Arguments passed to plt.savefig for displaying the plot. extension : str extension of image file indicating figure file type kwargs: Keyword arguments passed to plt.savefig for displaying the plot. In case these contain ``width`` or ``placement``, they will be used for the same purpose as in the add_image command. Namely the width and placement of the generated plot in the LaTeX document.
['Add', 'the', 'current', 'Matplotlib', 'plot', 'to', 'the', 'figure', '.']
train
https://github.com/JelteF/PyLaTeX/blob/62d9d9912ce8445e6629cdbcb80ad86143a1ed23/pylatex/figure.py#L64-L91
8,188
erdewit/ib_insync
ib_insync/ib.py
IB.cancelMktDepth
def cancelMktDepth(self, contract: Contract, isSmartDepth=False): """ Unsubscribe from market depth data. Args: contract: The exact contract object that was used to subscribe with. """ ticker = self.ticker(contract) reqId = self.wrapper.endTicker(ticker, 'mktDepth') if reqId: self.client.cancelMktDepth(reqId, isSmartDepth) else: self._logger.error( f'cancelMktDepth: No reqId found for contract {contract}')
python
def cancelMktDepth(self, contract: Contract, isSmartDepth=False): """ Unsubscribe from market depth data. Args: contract: The exact contract object that was used to subscribe with. """ ticker = self.ticker(contract) reqId = self.wrapper.endTicker(ticker, 'mktDepth') if reqId: self.client.cancelMktDepth(reqId, isSmartDepth) else: self._logger.error( f'cancelMktDepth: No reqId found for contract {contract}')
['def', 'cancelMktDepth', '(', 'self', ',', 'contract', ':', 'Contract', ',', 'isSmartDepth', '=', 'False', ')', ':', 'ticker', '=', 'self', '.', 'ticker', '(', 'contract', ')', 'reqId', '=', 'self', '.', 'wrapper', '.', 'endTicker', '(', 'ticker', ',', "'mktDepth'", ')', 'if', 'reqId', ':', 'self', '.', 'client', '.', 'cancelMktDepth', '(', 'reqId', ',', 'isSmartDepth', ')', 'else', ':', 'self', '.', '_logger', '.', 'error', '(', "f'cancelMktDepth: No reqId found for contract {contract}'", ')']
Unsubscribe from market depth data. Args: contract: The exact contract object that was used to subscribe with.
['Unsubscribe', 'from', 'market', 'depth', 'data', '.']
train
https://github.com/erdewit/ib_insync/blob/d0646a482590f5cb7bfddbd1f0870f8c4bc1df80/ib_insync/ib.py#L1242-L1256
8,189
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/tools/MAVExplorer.py
flightmode_menu
def flightmode_menu(): '''construct flightmode menu''' modes = mestate.mlog.flightmode_list() ret = [] idx = 0 for (mode,t1,t2) in modes: modestr = "%s %us" % (mode, (t2-t1)) ret.append(MPMenuCheckbox(modestr, modestr, 'mode-%u' % idx)) idx += 1 mestate.flightmode_selections.append(False) return ret
python
def flightmode_menu(): '''construct flightmode menu''' modes = mestate.mlog.flightmode_list() ret = [] idx = 0 for (mode,t1,t2) in modes: modestr = "%s %us" % (mode, (t2-t1)) ret.append(MPMenuCheckbox(modestr, modestr, 'mode-%u' % idx)) idx += 1 mestate.flightmode_selections.append(False) return ret
['def', 'flightmode_menu', '(', ')', ':', 'modes', '=', 'mestate', '.', 'mlog', '.', 'flightmode_list', '(', ')', 'ret', '=', '[', ']', 'idx', '=', '0', 'for', '(', 'mode', ',', 't1', ',', 't2', ')', 'in', 'modes', ':', 'modestr', '=', '"%s %us"', '%', '(', 'mode', ',', '(', 't2', '-', 't1', ')', ')', 'ret', '.', 'append', '(', 'MPMenuCheckbox', '(', 'modestr', ',', 'modestr', ',', "'mode-%u'", '%', 'idx', ')', ')', 'idx', '+=', '1', 'mestate', '.', 'flightmode_selections', '.', 'append', '(', 'False', ')', 'return', 'ret']
construct flightmode menu
['construct', 'flightmode', 'menu']
train
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/tools/MAVExplorer.py#L92-L102
8,190
dpgaspar/Flask-AppBuilder
flask_appbuilder/forms.py
GeneralModelConverter._convert_many_to_one
def _convert_many_to_one(self, col_name, label, description, lst_validators, filter_rel_fields, form_props): """ Creates a WTForm field for many to one related fields, will use a Select box based on a query. Will only work with SQLAlchemy interface. """ query_func = self._get_related_query_func(col_name, filter_rel_fields) get_pk_func = self._get_related_pk_func(col_name) extra_classes = None allow_blank = True if not self.datamodel.is_nullable(col_name): lst_validators.append(validators.DataRequired()) allow_blank = False else: lst_validators.append(validators.Optional()) form_props[col_name] = \ QuerySelectField(label, description=description, query_func=query_func, get_pk_func=get_pk_func, allow_blank=allow_blank, validators=lst_validators, widget=Select2Widget(extra_classes=extra_classes)) return form_props
python
def _convert_many_to_one(self, col_name, label, description, lst_validators, filter_rel_fields, form_props): """ Creates a WTForm field for many to one related fields, will use a Select box based on a query. Will only work with SQLAlchemy interface. """ query_func = self._get_related_query_func(col_name, filter_rel_fields) get_pk_func = self._get_related_pk_func(col_name) extra_classes = None allow_blank = True if not self.datamodel.is_nullable(col_name): lst_validators.append(validators.DataRequired()) allow_blank = False else: lst_validators.append(validators.Optional()) form_props[col_name] = \ QuerySelectField(label, description=description, query_func=query_func, get_pk_func=get_pk_func, allow_blank=allow_blank, validators=lst_validators, widget=Select2Widget(extra_classes=extra_classes)) return form_props
['def', '_convert_many_to_one', '(', 'self', ',', 'col_name', ',', 'label', ',', 'description', ',', 'lst_validators', ',', 'filter_rel_fields', ',', 'form_props', ')', ':', 'query_func', '=', 'self', '.', '_get_related_query_func', '(', 'col_name', ',', 'filter_rel_fields', ')', 'get_pk_func', '=', 'self', '.', '_get_related_pk_func', '(', 'col_name', ')', 'extra_classes', '=', 'None', 'allow_blank', '=', 'True', 'if', 'not', 'self', '.', 'datamodel', '.', 'is_nullable', '(', 'col_name', ')', ':', 'lst_validators', '.', 'append', '(', 'validators', '.', 'DataRequired', '(', ')', ')', 'allow_blank', '=', 'False', 'else', ':', 'lst_validators', '.', 'append', '(', 'validators', '.', 'Optional', '(', ')', ')', 'form_props', '[', 'col_name', ']', '=', 'QuerySelectField', '(', 'label', ',', 'description', '=', 'description', ',', 'query_func', '=', 'query_func', ',', 'get_pk_func', '=', 'get_pk_func', ',', 'allow_blank', '=', 'allow_blank', ',', 'validators', '=', 'lst_validators', ',', 'widget', '=', 'Select2Widget', '(', 'extra_classes', '=', 'extra_classes', ')', ')', 'return', 'form_props']
Creates a WTForm field for many to one related fields, will use a Select box based on a query. Will only work with SQLAlchemy interface.
['Creates', 'a', 'WTForm', 'field', 'for', 'many', 'to', 'one', 'related', 'fields', 'will', 'use', 'a', 'Select', 'box', 'based', 'on', 'a', 'query', '.', 'Will', 'only', 'work', 'with', 'SQLAlchemy', 'interface', '.']
train
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/forms.py#L136-L161
8,191
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/work/work_client.py
WorkClient.create_plan
def create_plan(self, posted_plan, project): """CreatePlan. Add a new plan for the team :param :class:`<CreatePlan> <azure.devops.v5_0.work.models.CreatePlan>` posted_plan: Plan definition :param str project: Project ID or project name :rtype: :class:`<Plan> <azure.devops.v5_0.work.models.Plan>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(posted_plan, 'CreatePlan') response = self._send(http_method='POST', location_id='0b42cb47-cd73-4810-ac90-19c9ba147453', version='5.0', route_values=route_values, content=content) return self._deserialize('Plan', response)
python
def create_plan(self, posted_plan, project): """CreatePlan. Add a new plan for the team :param :class:`<CreatePlan> <azure.devops.v5_0.work.models.CreatePlan>` posted_plan: Plan definition :param str project: Project ID or project name :rtype: :class:`<Plan> <azure.devops.v5_0.work.models.Plan>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(posted_plan, 'CreatePlan') response = self._send(http_method='POST', location_id='0b42cb47-cd73-4810-ac90-19c9ba147453', version='5.0', route_values=route_values, content=content) return self._deserialize('Plan', response)
['def', 'create_plan', '(', 'self', ',', 'posted_plan', ',', 'project', ')', ':', 'route_values', '=', '{', '}', 'if', 'project', 'is', 'not', 'None', ':', 'route_values', '[', "'project'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'project'", ',', 'project', ',', "'str'", ')', 'content', '=', 'self', '.', '_serialize', '.', 'body', '(', 'posted_plan', ',', "'CreatePlan'", ')', 'response', '=', 'self', '.', '_send', '(', 'http_method', '=', "'POST'", ',', 'location_id', '=', "'0b42cb47-cd73-4810-ac90-19c9ba147453'", ',', 'version', '=', "'5.0'", ',', 'route_values', '=', 'route_values', ',', 'content', '=', 'content', ')', 'return', 'self', '.', '_deserialize', '(', "'Plan'", ',', 'response', ')']
CreatePlan. Add a new plan for the team :param :class:`<CreatePlan> <azure.devops.v5_0.work.models.CreatePlan>` posted_plan: Plan definition :param str project: Project ID or project name :rtype: :class:`<Plan> <azure.devops.v5_0.work.models.Plan>`
['CreatePlan', '.', 'Add', 'a', 'new', 'plan', 'for', 'the', 'team', ':', 'param', ':', 'class', ':', '<CreatePlan', '>', '<azure', '.', 'devops', '.', 'v5_0', '.', 'work', '.', 'models', '.', 'CreatePlan', '>', 'posted_plan', ':', 'Plan', 'definition', ':', 'param', 'str', 'project', ':', 'Project', 'ID', 'or', 'project', 'name', ':', 'rtype', ':', ':', 'class', ':', '<Plan', '>', '<azure', '.', 'devops', '.', 'v5_0', '.', 'work', '.', 'models', '.', 'Plan', '>']
train
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/work/work_client.py#L984-L1000
8,192
yeasy/hyperledger-py
hyperledger/api/chaincode.py
ChainCodeApiMixin.chaincode_query
def chaincode_query(self, chaincode_name, type=CHAINCODE_LANG_GO, function="query", args=["a"], id=1, secure_context=None, confidentiality_level=CHAINCODE_CONFIDENTIAL_PUB, metadata=None): """ { "jsonrpc": "2.0", "method": "query", "params": { "type": 1, "chaincodeID":{ "name":"52b0d803fc395b5e34d8d4a7cd69fb6aa00099b8fabed83504ac1c5d61a425aca5b3ad3bf96643ea4fdaac132c417c37b00f88fa800de7ece387d008a76d3586" }, "ctorMsg": { "function":"query", "args":["a"] } }, "id": 3 } :return: json obj of the chaincode instance """ return self._exec_action(method="query", type=type, chaincodeID={"name": chaincode_name}, function=function, args=args, id=id, secure_context=secure_context, confidentiality_level=confidentiality_level, metadata=metadata)
python
def chaincode_query(self, chaincode_name, type=CHAINCODE_LANG_GO, function="query", args=["a"], id=1, secure_context=None, confidentiality_level=CHAINCODE_CONFIDENTIAL_PUB, metadata=None): """ { "jsonrpc": "2.0", "method": "query", "params": { "type": 1, "chaincodeID":{ "name":"52b0d803fc395b5e34d8d4a7cd69fb6aa00099b8fabed83504ac1c5d61a425aca5b3ad3bf96643ea4fdaac132c417c37b00f88fa800de7ece387d008a76d3586" }, "ctorMsg": { "function":"query", "args":["a"] } }, "id": 3 } :return: json obj of the chaincode instance """ return self._exec_action(method="query", type=type, chaincodeID={"name": chaincode_name}, function=function, args=args, id=id, secure_context=secure_context, confidentiality_level=confidentiality_level, metadata=metadata)
['def', 'chaincode_query', '(', 'self', ',', 'chaincode_name', ',', 'type', '=', 'CHAINCODE_LANG_GO', ',', 'function', '=', '"query"', ',', 'args', '=', '[', '"a"', ']', ',', 'id', '=', '1', ',', 'secure_context', '=', 'None', ',', 'confidentiality_level', '=', 'CHAINCODE_CONFIDENTIAL_PUB', ',', 'metadata', '=', 'None', ')', ':', 'return', 'self', '.', '_exec_action', '(', 'method', '=', '"query"', ',', 'type', '=', 'type', ',', 'chaincodeID', '=', '{', '"name"', ':', 'chaincode_name', '}', ',', 'function', '=', 'function', ',', 'args', '=', 'args', ',', 'id', '=', 'id', ',', 'secure_context', '=', 'secure_context', ',', 'confidentiality_level', '=', 'confidentiality_level', ',', 'metadata', '=', 'metadata', ')']
{ "jsonrpc": "2.0", "method": "query", "params": { "type": 1, "chaincodeID":{ "name":"52b0d803fc395b5e34d8d4a7cd69fb6aa00099b8fabed83504ac1c5d61a425aca5b3ad3bf96643ea4fdaac132c417c37b00f88fa800de7ece387d008a76d3586" }, "ctorMsg": { "function":"query", "args":["a"] } }, "id": 3 } :return: json obj of the chaincode instance
['{', 'jsonrpc', ':', '2', '.', '0', 'method', ':', 'query', 'params', ':', '{', 'type', ':', '1', 'chaincodeID', ':', '{', 'name', ':', '52b0d803fc395b5e34d8d4a7cd69fb6aa00099b8fabed83504ac1c5d61a425aca5b3ad3bf96643ea4fdaac132c417c37b00f88fa800de7ece387d008a76d3586', '}', 'ctorMsg', ':', '{', 'function', ':', 'query', 'args', ':', '[', 'a', ']', '}', '}', 'id', ':', '3', '}', ':', 'return', ':', 'json', 'obj', 'of', 'the', 'chaincode', 'instance']
train
https://github.com/yeasy/hyperledger-py/blob/f24e9cc409b50628b911950466786be6fe74f09f/hyperledger/api/chaincode.py#L153-L182
8,193
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py
brocade_ip_policy.hide_routemap_holder_route_map_content_set_ipv6_next_vrf_next_vrf_list_next_hop
def hide_routemap_holder_route_map_content_set_ipv6_next_vrf_next_vrf_list_next_hop(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") route_map = ET.SubElement(hide_routemap_holder, "route-map") name_key = ET.SubElement(route_map, "name") name_key.text = kwargs.pop('name') action_rm_key = ET.SubElement(route_map, "action-rm") action_rm_key.text = kwargs.pop('action_rm') instance_key = ET.SubElement(route_map, "instance") instance_key.text = kwargs.pop('instance') content = ET.SubElement(route_map, "content") set = ET.SubElement(content, "set") ipv6 = ET.SubElement(set, "ipv6") next_vrf = ET.SubElement(ipv6, "next-vrf") next_vrf_list = ET.SubElement(next_vrf, "next-vrf-list") vrf_key = ET.SubElement(next_vrf_list, "vrf") vrf_key.text = kwargs.pop('vrf') next_hop = ET.SubElement(next_vrf_list, "next-hop") next_hop.text = kwargs.pop('next_hop') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def hide_routemap_holder_route_map_content_set_ipv6_next_vrf_next_vrf_list_next_hop(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") route_map = ET.SubElement(hide_routemap_holder, "route-map") name_key = ET.SubElement(route_map, "name") name_key.text = kwargs.pop('name') action_rm_key = ET.SubElement(route_map, "action-rm") action_rm_key.text = kwargs.pop('action_rm') instance_key = ET.SubElement(route_map, "instance") instance_key.text = kwargs.pop('instance') content = ET.SubElement(route_map, "content") set = ET.SubElement(content, "set") ipv6 = ET.SubElement(set, "ipv6") next_vrf = ET.SubElement(ipv6, "next-vrf") next_vrf_list = ET.SubElement(next_vrf, "next-vrf-list") vrf_key = ET.SubElement(next_vrf_list, "vrf") vrf_key.text = kwargs.pop('vrf') next_hop = ET.SubElement(next_vrf_list, "next-hop") next_hop.text = kwargs.pop('next_hop') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'hide_routemap_holder_route_map_content_set_ipv6_next_vrf_next_vrf_list_next_hop', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'hide_routemap_holder', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"hide-routemap-holder"', ',', 'xmlns', '=', '"urn:brocade.com:mgmt:brocade-ip-policy"', ')', 'route_map', '=', 'ET', '.', 'SubElement', '(', 'hide_routemap_holder', ',', '"route-map"', ')', 'name_key', '=', 'ET', '.', 'SubElement', '(', 'route_map', ',', '"name"', ')', 'name_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'name'", ')', 'action_rm_key', '=', 'ET', '.', 'SubElement', '(', 'route_map', ',', '"action-rm"', ')', 'action_rm_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'action_rm'", ')', 'instance_key', '=', 'ET', '.', 'SubElement', '(', 'route_map', ',', '"instance"', ')', 'instance_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'instance'", ')', 'content', '=', 'ET', '.', 'SubElement', '(', 'route_map', ',', '"content"', ')', 'set', '=', 'ET', '.', 'SubElement', '(', 'content', ',', '"set"', ')', 'ipv6', '=', 'ET', '.', 'SubElement', '(', 'set', ',', '"ipv6"', ')', 'next_vrf', '=', 'ET', '.', 'SubElement', '(', 'ipv6', ',', '"next-vrf"', ')', 'next_vrf_list', '=', 'ET', '.', 'SubElement', '(', 'next_vrf', ',', '"next-vrf-list"', ')', 'vrf_key', '=', 'ET', '.', 'SubElement', '(', 'next_vrf_list', ',', '"vrf"', ')', 'vrf_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'vrf'", ')', 'next_hop', '=', 'ET', '.', 'SubElement', '(', 'next_vrf_list', ',', '"next-hop"', ')', 'next_hop', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'next_hop'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py#L698-L721
8,194
priestc/moneywagon
moneywagon/currency_support.py
CurrencySupport.supported_currencies
def supported_currencies(self, project='moneywagon', level="full"): """ Returns a list of all currencies that are supported by the passed in project. and support level. Support level can be: "block", "transaction", "address" or "full". """ ret = [] if project == 'multiexplorer-wallet': for currency, data in self.sorted_crypto_data: if not data.get("bip44_coin_type"): continue if len(data.get('services', {}).get("push_tx", [])) < 1: continue if len(data.get('services', {}).get("historical_transactions", [])) < 1: continue if len(data.get('services', {}).get("single_transaction", [])) < 1: continue if len(data.get('services', {}).get("unspent_outputs", [])) < 1: continue ret.append(currency) altcore_tx = self.supported_currencies('altcore', level="transaction") return [x for x in ret if x in altcore_tx] for symbol, data in self.sorted_crypto_data: if symbol == '': # template continue if self.is_all_supported(data, project, level): ret.append(symbol) return ret
python
def supported_currencies(self, project='moneywagon', level="full"): """ Returns a list of all currencies that are supported by the passed in project. and support level. Support level can be: "block", "transaction", "address" or "full". """ ret = [] if project == 'multiexplorer-wallet': for currency, data in self.sorted_crypto_data: if not data.get("bip44_coin_type"): continue if len(data.get('services', {}).get("push_tx", [])) < 1: continue if len(data.get('services', {}).get("historical_transactions", [])) < 1: continue if len(data.get('services', {}).get("single_transaction", [])) < 1: continue if len(data.get('services', {}).get("unspent_outputs", [])) < 1: continue ret.append(currency) altcore_tx = self.supported_currencies('altcore', level="transaction") return [x for x in ret if x in altcore_tx] for symbol, data in self.sorted_crypto_data: if symbol == '': # template continue if self.is_all_supported(data, project, level): ret.append(symbol) return ret
['def', 'supported_currencies', '(', 'self', ',', 'project', '=', "'moneywagon'", ',', 'level', '=', '"full"', ')', ':', 'ret', '=', '[', ']', 'if', 'project', '==', "'multiexplorer-wallet'", ':', 'for', 'currency', ',', 'data', 'in', 'self', '.', 'sorted_crypto_data', ':', 'if', 'not', 'data', '.', 'get', '(', '"bip44_coin_type"', ')', ':', 'continue', 'if', 'len', '(', 'data', '.', 'get', '(', "'services'", ',', '{', '}', ')', '.', 'get', '(', '"push_tx"', ',', '[', ']', ')', ')', '<', '1', ':', 'continue', 'if', 'len', '(', 'data', '.', 'get', '(', "'services'", ',', '{', '}', ')', '.', 'get', '(', '"historical_transactions"', ',', '[', ']', ')', ')', '<', '1', ':', 'continue', 'if', 'len', '(', 'data', '.', 'get', '(', "'services'", ',', '{', '}', ')', '.', 'get', '(', '"single_transaction"', ',', '[', ']', ')', ')', '<', '1', ':', 'continue', 'if', 'len', '(', 'data', '.', 'get', '(', "'services'", ',', '{', '}', ')', '.', 'get', '(', '"unspent_outputs"', ',', '[', ']', ')', ')', '<', '1', ':', 'continue', 'ret', '.', 'append', '(', 'currency', ')', 'altcore_tx', '=', 'self', '.', 'supported_currencies', '(', "'altcore'", ',', 'level', '=', '"transaction"', ')', 'return', '[', 'x', 'for', 'x', 'in', 'ret', 'if', 'x', 'in', 'altcore_tx', ']', 'for', 'symbol', ',', 'data', 'in', 'self', '.', 'sorted_crypto_data', ':', 'if', 'symbol', '==', "''", ':', '# template', 'continue', 'if', 'self', '.', 'is_all_supported', '(', 'data', ',', 'project', ',', 'level', ')', ':', 'ret', '.', 'append', '(', 'symbol', ')', 'return', 'ret']
Returns a list of all currencies that are supported by the passed in project. and support level. Support level can be: "block", "transaction", "address" or "full".
['Returns', 'a', 'list', 'of', 'all', 'currencies', 'that', 'are', 'supported', 'by', 'the', 'passed', 'in', 'project', '.', 'and', 'support', 'level', '.', 'Support', 'level', 'can', 'be', ':', 'block', 'transaction', 'address', 'or', 'full', '.']
train
https://github.com/priestc/moneywagon/blob/00518f1f557dcca8b3031f46d3564c2baa0227a3/moneywagon/currency_support.py#L55-L84
8,195
bunq/sdk_python
bunq/sdk/model/core.py
SessionServer.get_referenced_user
def get_referenced_user(self): """ :rtype: BunqModel """ if self._user_person is not None: return self._user_person if self._user_company is not None: return self._user_company if self._user_api_key is not None: return self._user_api_key raise BunqException(self._ERROR_ALL_FIELD_IS_NULL)
python
def get_referenced_user(self): """ :rtype: BunqModel """ if self._user_person is not None: return self._user_person if self._user_company is not None: return self._user_company if self._user_api_key is not None: return self._user_api_key raise BunqException(self._ERROR_ALL_FIELD_IS_NULL)
['def', 'get_referenced_user', '(', 'self', ')', ':', 'if', 'self', '.', '_user_person', 'is', 'not', 'None', ':', 'return', 'self', '.', '_user_person', 'if', 'self', '.', '_user_company', 'is', 'not', 'None', ':', 'return', 'self', '.', '_user_company', 'if', 'self', '.', '_user_api_key', 'is', 'not', 'None', ':', 'return', 'self', '.', '_user_api_key', 'raise', 'BunqException', '(', 'self', '.', '_ERROR_ALL_FIELD_IS_NULL', ')']
:rtype: BunqModel
[':', 'rtype', ':', 'BunqModel']
train
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/core.py#L507-L521
8,196
awslabs/serverless-application-model
samtranslator/translator/logical_id_generator.py
LogicalIdGenerator.get_hash
def get_hash(self, length=HASH_LENGTH): """ Generate and return a hash of data that can be used as suffix of logicalId :return: Hash of data if it was present :rtype string """ data_hash = "" if not self.data_str: return data_hash encoded_data_str = self.data_str if sys.version_info.major == 2: # In Py2, only unicode needs to be encoded. if isinstance(self.data_str, unicode): encoded_data_str = self.data_str.encode('utf-8') else: # data_str should always be unicode on python 3 encoded_data_str = self.data_str.encode('utf-8') data_hash = hashlib.sha1(encoded_data_str).hexdigest() return data_hash[:length]
python
def get_hash(self, length=HASH_LENGTH): """ Generate and return a hash of data that can be used as suffix of logicalId :return: Hash of data if it was present :rtype string """ data_hash = "" if not self.data_str: return data_hash encoded_data_str = self.data_str if sys.version_info.major == 2: # In Py2, only unicode needs to be encoded. if isinstance(self.data_str, unicode): encoded_data_str = self.data_str.encode('utf-8') else: # data_str should always be unicode on python 3 encoded_data_str = self.data_str.encode('utf-8') data_hash = hashlib.sha1(encoded_data_str).hexdigest() return data_hash[:length]
['def', 'get_hash', '(', 'self', ',', 'length', '=', 'HASH_LENGTH', ')', ':', 'data_hash', '=', '""', 'if', 'not', 'self', '.', 'data_str', ':', 'return', 'data_hash', 'encoded_data_str', '=', 'self', '.', 'data_str', 'if', 'sys', '.', 'version_info', '.', 'major', '==', '2', ':', '# In Py2, only unicode needs to be encoded.', 'if', 'isinstance', '(', 'self', '.', 'data_str', ',', 'unicode', ')', ':', 'encoded_data_str', '=', 'self', '.', 'data_str', '.', 'encode', '(', "'utf-8'", ')', 'else', ':', '# data_str should always be unicode on python 3', 'encoded_data_str', '=', 'self', '.', 'data_str', '.', 'encode', '(', "'utf-8'", ')', 'data_hash', '=', 'hashlib', '.', 'sha1', '(', 'encoded_data_str', ')', '.', 'hexdigest', '(', ')', 'return', 'data_hash', '[', ':', 'length', ']']
Generate and return a hash of data that can be used as suffix of logicalId :return: Hash of data if it was present :rtype string
['Generate', 'and', 'return', 'a', 'hash', 'of', 'data', 'that', 'can', 'be', 'used', 'as', 'suffix', 'of', 'logicalId']
train
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/translator/logical_id_generator.py#L49-L72
8,197
Gjum/agarnet
agarnet/client.py
Client.send_respawn
def send_respawn(self): """ Respawns the player. """ nick = self.player.nick self.send_struct('<B%iH' % len(nick), 0, *map(ord, nick))
python
def send_respawn(self): """ Respawns the player. """ nick = self.player.nick self.send_struct('<B%iH' % len(nick), 0, *map(ord, nick))
['def', 'send_respawn', '(', 'self', ')', ':', 'nick', '=', 'self', '.', 'player', '.', 'nick', 'self', '.', 'send_struct', '(', "'<B%iH'", '%', 'len', '(', 'nick', ')', ',', '0', ',', '*', 'map', '(', 'ord', ',', 'nick', ')', ')']
Respawns the player.
['Respawns', 'the', 'player', '.']
train
https://github.com/Gjum/agarnet/blob/63365ba32aa31c23a6d61438b556ceb8ed65631f/agarnet/client.py#L406-L411
8,198
sdispater/poetry
poetry/masonry/api.py
build_sdist
def build_sdist(sdist_directory, config_settings=None): """Builds an sdist, places it in sdist_directory""" poetry = Poetry.create(".") path = SdistBuilder(poetry, SystemEnv(Path(sys.prefix)), NullIO()).build( Path(sdist_directory) ) return unicode(path.name)
python
def build_sdist(sdist_directory, config_settings=None): """Builds an sdist, places it in sdist_directory""" poetry = Poetry.create(".") path = SdistBuilder(poetry, SystemEnv(Path(sys.prefix)), NullIO()).build( Path(sdist_directory) ) return unicode(path.name)
['def', 'build_sdist', '(', 'sdist_directory', ',', 'config_settings', '=', 'None', ')', ':', 'poetry', '=', 'Poetry', '.', 'create', '(', '"."', ')', 'path', '=', 'SdistBuilder', '(', 'poetry', ',', 'SystemEnv', '(', 'Path', '(', 'sys', '.', 'prefix', ')', ')', ',', 'NullIO', '(', ')', ')', '.', 'build', '(', 'Path', '(', 'sdist_directory', ')', ')', 'return', 'unicode', '(', 'path', '.', 'name', ')']
Builds an sdist, places it in sdist_directory
['Builds', 'an', 'sdist', 'places', 'it', 'in', 'sdist_directory']
train
https://github.com/sdispater/poetry/blob/2d27acd76c165dd49f11934520a7973de7a3762a/poetry/masonry/api.py#L65-L73
8,199
jobovy/galpy
galpy/df/evolveddiskdf.py
evolveddiskdf.meanvR
def meanvR(self,R,t=0.,nsigma=None,deg=False,phi=0., epsrel=1.e-02,epsabs=1.e-05, grid=None,gridpoints=101,returnGrid=False, surfacemass=None, hierarchgrid=False,nlevels=2,integrate_method='dopr54_c'): """ NAME: meanvR PURPOSE: calculate the mean vR of the velocity distribution at (R,phi) INPUT: R - radius at which to calculate the moment(/ro) (can be Quantity) phi= azimuth (rad unless deg=True; can be Quantity) t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) (can be Quantity) surfacemass= if set use this pre-calculated surfacemass nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous) deg= azimuth is in degree (default=False); do not set this when giving phi as a Quantity epsrel, epsabs - scipy.integrate keywords (the integration calculates the ratio of this vmoment to that of the initial DF) grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid gridpoints= number of points to use for the grid in 1D (default=101) returnGrid= if True, return the grid object (default=False) hierarchgrid= if True, use a hierarchical grid (default=False) nlevels= number of hierarchical levels for the hierarchical grid integrate_method= orbit.integrate method argument OUTPUT: mean vR HISTORY: 2011-03-31 - Written - Bovy (NYU) """ if isinstance(grid,evolveddiskdfGrid) or \ isinstance(grid,evolveddiskdfHierarchicalGrid): grido= grid vmomentR= self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi, nsigma=nsigma, epsrel=epsrel, epsabs=epsabs,grid=grid, gridpoints=gridpoints, returnGrid=False, hierarchgrid=hierarchgrid, nlevels=nlevels, integrate_method=integrate_method) elif isinstance(grid,bool) and grid: #Precalculate the grid (vmomentR,grido)= self.vmomentsurfacemass(R,1,0,deg=deg,t=t, nsigma=nsigma,phi=phi, epsrel=epsrel, epsabs=epsabs,grid=grid, gridpoints=gridpoints, returnGrid=True, hierarchgrid=hierarchgrid, nlevels=nlevels, integrate_method=integrate_method) else: grido= False vmomentR= self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi, nsigma=nsigma, epsrel=epsrel, epsabs=epsabs,grid=grid, gridpoints=gridpoints, returnGrid=False, hierarchgrid=hierarchgrid, nlevels=nlevels,integrate_method=integrate_method) if surfacemass is None: surfacemass= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi, nsigma=nsigma,epsrel=epsrel, epsabs=epsabs,grid=grido, gridpoints=gridpoints, returnGrid=False, hierarchgrid=hierarchgrid, nlevels=nlevels,integrate_method=integrate_method) out= vmomentR/surfacemass if returnGrid and ((isinstance(grid,bool) and grid) or isinstance(grid,evolveddiskdfGrid) or isinstance(grid,evolveddiskdfHierarchicalGrid)): return (out,grido) else: return out
python
def meanvR(self,R,t=0.,nsigma=None,deg=False,phi=0., epsrel=1.e-02,epsabs=1.e-05, grid=None,gridpoints=101,returnGrid=False, surfacemass=None, hierarchgrid=False,nlevels=2,integrate_method='dopr54_c'): """ NAME: meanvR PURPOSE: calculate the mean vR of the velocity distribution at (R,phi) INPUT: R - radius at which to calculate the moment(/ro) (can be Quantity) phi= azimuth (rad unless deg=True; can be Quantity) t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) (can be Quantity) surfacemass= if set use this pre-calculated surfacemass nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous) deg= azimuth is in degree (default=False); do not set this when giving phi as a Quantity epsrel, epsabs - scipy.integrate keywords (the integration calculates the ratio of this vmoment to that of the initial DF) grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid gridpoints= number of points to use for the grid in 1D (default=101) returnGrid= if True, return the grid object (default=False) hierarchgrid= if True, use a hierarchical grid (default=False) nlevels= number of hierarchical levels for the hierarchical grid integrate_method= orbit.integrate method argument OUTPUT: mean vR HISTORY: 2011-03-31 - Written - Bovy (NYU) """ if isinstance(grid,evolveddiskdfGrid) or \ isinstance(grid,evolveddiskdfHierarchicalGrid): grido= grid vmomentR= self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi, nsigma=nsigma, epsrel=epsrel, epsabs=epsabs,grid=grid, gridpoints=gridpoints, returnGrid=False, hierarchgrid=hierarchgrid, nlevels=nlevels, integrate_method=integrate_method) elif isinstance(grid,bool) and grid: #Precalculate the grid (vmomentR,grido)= self.vmomentsurfacemass(R,1,0,deg=deg,t=t, nsigma=nsigma,phi=phi, epsrel=epsrel, epsabs=epsabs,grid=grid, gridpoints=gridpoints, returnGrid=True, hierarchgrid=hierarchgrid, nlevels=nlevels, integrate_method=integrate_method) else: grido= False vmomentR= self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi, nsigma=nsigma, epsrel=epsrel, epsabs=epsabs,grid=grid, gridpoints=gridpoints, returnGrid=False, hierarchgrid=hierarchgrid, nlevels=nlevels,integrate_method=integrate_method) if surfacemass is None: surfacemass= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi, nsigma=nsigma,epsrel=epsrel, epsabs=epsabs,grid=grido, gridpoints=gridpoints, returnGrid=False, hierarchgrid=hierarchgrid, nlevels=nlevels,integrate_method=integrate_method) out= vmomentR/surfacemass if returnGrid and ((isinstance(grid,bool) and grid) or isinstance(grid,evolveddiskdfGrid) or isinstance(grid,evolveddiskdfHierarchicalGrid)): return (out,grido) else: return out
['def', 'meanvR', '(', 'self', ',', 'R', ',', 't', '=', '0.', ',', 'nsigma', '=', 'None', ',', 'deg', '=', 'False', ',', 'phi', '=', '0.', ',', 'epsrel', '=', '1.e-02', ',', 'epsabs', '=', '1.e-05', ',', 'grid', '=', 'None', ',', 'gridpoints', '=', '101', ',', 'returnGrid', '=', 'False', ',', 'surfacemass', '=', 'None', ',', 'hierarchgrid', '=', 'False', ',', 'nlevels', '=', '2', ',', 'integrate_method', '=', "'dopr54_c'", ')', ':', 'if', 'isinstance', '(', 'grid', ',', 'evolveddiskdfGrid', ')', 'or', 'isinstance', '(', 'grid', ',', 'evolveddiskdfHierarchicalGrid', ')', ':', 'grido', '=', 'grid', 'vmomentR', '=', 'self', '.', 'vmomentsurfacemass', '(', 'R', ',', '1', ',', '0', ',', 'deg', '=', 'deg', ',', 't', '=', 't', ',', 'phi', '=', 'phi', ',', 'nsigma', '=', 'nsigma', ',', 'epsrel', '=', 'epsrel', ',', 'epsabs', '=', 'epsabs', ',', 'grid', '=', 'grid', ',', 'gridpoints', '=', 'gridpoints', ',', 'returnGrid', '=', 'False', ',', 'hierarchgrid', '=', 'hierarchgrid', ',', 'nlevels', '=', 'nlevels', ',', 'integrate_method', '=', 'integrate_method', ')', 'elif', 'isinstance', '(', 'grid', ',', 'bool', ')', 'and', 'grid', ':', '#Precalculate the grid', '(', 'vmomentR', ',', 'grido', ')', '=', 'self', '.', 'vmomentsurfacemass', '(', 'R', ',', '1', ',', '0', ',', 'deg', '=', 'deg', ',', 't', '=', 't', ',', 'nsigma', '=', 'nsigma', ',', 'phi', '=', 'phi', ',', 'epsrel', '=', 'epsrel', ',', 'epsabs', '=', 'epsabs', ',', 'grid', '=', 'grid', ',', 'gridpoints', '=', 'gridpoints', ',', 'returnGrid', '=', 'True', ',', 'hierarchgrid', '=', 'hierarchgrid', ',', 'nlevels', '=', 'nlevels', ',', 'integrate_method', '=', 'integrate_method', ')', 'else', ':', 'grido', '=', 'False', 'vmomentR', '=', 'self', '.', 'vmomentsurfacemass', '(', 'R', ',', '1', ',', '0', ',', 'deg', '=', 'deg', ',', 't', '=', 't', ',', 'phi', '=', 'phi', ',', 'nsigma', '=', 'nsigma', ',', 'epsrel', '=', 'epsrel', ',', 'epsabs', '=', 'epsabs', ',', 'grid', '=', 'grid', ',', 'gridpoints', '=', 'gridpoints', ',', 'returnGrid', '=', 'False', ',', 'hierarchgrid', '=', 'hierarchgrid', ',', 'nlevels', '=', 'nlevels', ',', 'integrate_method', '=', 'integrate_method', ')', 'if', 'surfacemass', 'is', 'None', ':', 'surfacemass', '=', 'self', '.', 'vmomentsurfacemass', '(', 'R', ',', '0', ',', '0', ',', 'deg', '=', 'deg', ',', 't', '=', 't', ',', 'phi', '=', 'phi', ',', 'nsigma', '=', 'nsigma', ',', 'epsrel', '=', 'epsrel', ',', 'epsabs', '=', 'epsabs', ',', 'grid', '=', 'grido', ',', 'gridpoints', '=', 'gridpoints', ',', 'returnGrid', '=', 'False', ',', 'hierarchgrid', '=', 'hierarchgrid', ',', 'nlevels', '=', 'nlevels', ',', 'integrate_method', '=', 'integrate_method', ')', 'out', '=', 'vmomentR', '/', 'surfacemass', 'if', 'returnGrid', 'and', '(', '(', 'isinstance', '(', 'grid', ',', 'bool', ')', 'and', 'grid', ')', 'or', 'isinstance', '(', 'grid', ',', 'evolveddiskdfGrid', ')', 'or', 'isinstance', '(', 'grid', ',', 'evolveddiskdfHierarchicalGrid', ')', ')', ':', 'return', '(', 'out', ',', 'grido', ')', 'else', ':', 'return', 'out']
NAME: meanvR PURPOSE: calculate the mean vR of the velocity distribution at (R,phi) INPUT: R - radius at which to calculate the moment(/ro) (can be Quantity) phi= azimuth (rad unless deg=True; can be Quantity) t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) (can be Quantity) surfacemass= if set use this pre-calculated surfacemass nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous) deg= azimuth is in degree (default=False); do not set this when giving phi as a Quantity epsrel, epsabs - scipy.integrate keywords (the integration calculates the ratio of this vmoment to that of the initial DF) grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid gridpoints= number of points to use for the grid in 1D (default=101) returnGrid= if True, return the grid object (default=False) hierarchgrid= if True, use a hierarchical grid (default=False) nlevels= number of hierarchical levels for the hierarchical grid integrate_method= orbit.integrate method argument OUTPUT: mean vR HISTORY: 2011-03-31 - Written - Bovy (NYU)
['NAME', ':']
train
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/df/evolveddiskdf.py#L597-L695