repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
remram44/rpaths
rpaths.py
https://github.com/remram44/rpaths/blob/e4ff55d985c4d643d9fd214539d45af39ae5a7cd/rpaths.py#L1078-L1085
def matches(self, path): """Tests if the given path matches the pattern. Note that the unicode translation of the patch is matched, so replacement characters might have been added. """ path = self._prepare_path(path) return self.full_regex.search(path) is not None
[ "def", "matches", "(", "self", ",", "path", ")", ":", "path", "=", "self", ".", "_prepare_path", "(", "path", ")", "return", "self", ".", "full_regex", ".", "search", "(", "path", ")", "is", "not", "None" ]
Tests if the given path matches the pattern. Note that the unicode translation of the patch is matched, so replacement characters might have been added.
[ "Tests", "if", "the", "given", "path", "matches", "the", "pattern", "." ]
python
train
wright-group/WrightTools
WrightTools/data/_data.py
https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/data/_data.py#L153-L160
def shape(self) -> tuple: """Shape.""" try: assert self._shape is not None except (AssertionError, AttributeError): self._shape = wt_kit.joint_shape(*self.variables) finally: return self._shape
[ "def", "shape", "(", "self", ")", "->", "tuple", ":", "try", ":", "assert", "self", ".", "_shape", "is", "not", "None", "except", "(", "AssertionError", ",", "AttributeError", ")", ":", "self", ".", "_shape", "=", "wt_kit", ".", "joint_shape", "(", "*", "self", ".", "variables", ")", "finally", ":", "return", "self", ".", "_shape" ]
Shape.
[ "Shape", "." ]
python
train
Fizzadar/pyinfra
pyinfra/modules/server.py
https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/server.py#L316-L453
def user( state, host, name, present=True, home=None, shell=None, group=None, groups=None, public_keys=None, delete_keys=False, ensure_home=True, system=False, uid=None, ): ''' Add/remove/update system users & their ssh `authorized_keys`. + name: name of the user to ensure + present: whether this user should exist + home: the users home directory + shell: the users shell + group: the users primary group + groups: the users secondary groups + public_keys: list of public keys to attach to this user, ``home`` must be specified + delete_keys: whether to remove any keys not specified in ``public_keys`` + ensure_home: whether to ensure the ``home`` directory exists + system: whether to create a system account Home directory: When ``ensure_home`` or ``public_keys`` are provided, ``home`` defaults to ``/home/{name}``. ''' users = host.fact.users or {} user = users.get(name) if groups is None: groups = [] if home is None: home = '/home/{0}'.format(name) # User not wanted? if not present: if user: yield 'userdel {0}'.format(name) return # User doesn't exist but we want them? if present and user is None: # Create the user w/home/shell args = [] if home: args.append('-d {0}'.format(home)) if shell: args.append('-s {0}'.format(shell)) if group: args.append('-g {0}'.format(group)) if groups: args.append('-G {0}'.format(','.join(groups))) if system and host.fact.os not in ('OpenBSD', 'NetBSD'): args.append('-r') if uid: args.append('--uid {0}'.format(uid)) yield 'useradd {0} {1}'.format(' '.join(args), name) # User exists and we want them, check home/shell/keys else: args = [] # Check homedir if home and user['home'] != home: args.append('-d {0}'.format(home)) # Check shell if shell and user['shell'] != shell: args.append('-s {0}'.format(shell)) # Check primary group if group and user['group'] != group: args.append('-g {0}'.format(group)) # Check secondary groups, if defined if groups and set(user['groups']) != set(groups): args.append('-G {0}'.format(','.join(groups))) # Need to mod the user? if args: yield 'usermod {0} {1}'.format(' '.join(args), name) # Ensure home directory ownership if ensure_home: yield files.directory( state, host, home, user=name, group=name, ) # Add SSH keys if public_keys is not None: # Ensure .ssh directory # note that this always outputs commands unless the SSH user has access to the # authorized_keys file, ie the SSH user is the user defined in this function yield files.directory( state, host, '{0}/.ssh'.format(home), user=name, group=name, mode=700, ) filename = '{0}/.ssh/authorized_keys'.format(home) if delete_keys: # Create a whole new authorized_keys file keys_file = six.StringIO('{0}\n'.format( '\n'.join(public_keys), )) # And ensure it exists yield files.put( state, host, keys_file, filename, user=name, group=name, mode=600, ) else: # Ensure authorized_keys exists yield files.file( state, host, filename, user=name, group=name, mode=600, ) # And every public key is present for key in public_keys: yield files.line( state, host, filename, key, )
[ "def", "user", "(", "state", ",", "host", ",", "name", ",", "present", "=", "True", ",", "home", "=", "None", ",", "shell", "=", "None", ",", "group", "=", "None", ",", "groups", "=", "None", ",", "public_keys", "=", "None", ",", "delete_keys", "=", "False", ",", "ensure_home", "=", "True", ",", "system", "=", "False", ",", "uid", "=", "None", ",", ")", ":", "users", "=", "host", ".", "fact", ".", "users", "or", "{", "}", "user", "=", "users", ".", "get", "(", "name", ")", "if", "groups", "is", "None", ":", "groups", "=", "[", "]", "if", "home", "is", "None", ":", "home", "=", "'/home/{0}'", ".", "format", "(", "name", ")", "# User not wanted?", "if", "not", "present", ":", "if", "user", ":", "yield", "'userdel {0}'", ".", "format", "(", "name", ")", "return", "# User doesn't exist but we want them?", "if", "present", "and", "user", "is", "None", ":", "# Create the user w/home/shell", "args", "=", "[", "]", "if", "home", ":", "args", ".", "append", "(", "'-d {0}'", ".", "format", "(", "home", ")", ")", "if", "shell", ":", "args", ".", "append", "(", "'-s {0}'", ".", "format", "(", "shell", ")", ")", "if", "group", ":", "args", ".", "append", "(", "'-g {0}'", ".", "format", "(", "group", ")", ")", "if", "groups", ":", "args", ".", "append", "(", "'-G {0}'", ".", "format", "(", "','", ".", "join", "(", "groups", ")", ")", ")", "if", "system", "and", "host", ".", "fact", ".", "os", "not", "in", "(", "'OpenBSD'", ",", "'NetBSD'", ")", ":", "args", ".", "append", "(", "'-r'", ")", "if", "uid", ":", "args", ".", "append", "(", "'--uid {0}'", ".", "format", "(", "uid", ")", ")", "yield", "'useradd {0} {1}'", ".", "format", "(", "' '", ".", "join", "(", "args", ")", ",", "name", ")", "# User exists and we want them, check home/shell/keys", "else", ":", "args", "=", "[", "]", "# Check homedir", "if", "home", "and", "user", "[", "'home'", "]", "!=", "home", ":", "args", ".", "append", "(", "'-d {0}'", ".", "format", "(", "home", ")", ")", "# Check shell", "if", "shell", "and", "user", "[", "'shell'", "]", "!=", "shell", ":", "args", ".", "append", "(", "'-s {0}'", ".", "format", "(", "shell", ")", ")", "# Check primary group", "if", "group", "and", "user", "[", "'group'", "]", "!=", "group", ":", "args", ".", "append", "(", "'-g {0}'", ".", "format", "(", "group", ")", ")", "# Check secondary groups, if defined", "if", "groups", "and", "set", "(", "user", "[", "'groups'", "]", ")", "!=", "set", "(", "groups", ")", ":", "args", ".", "append", "(", "'-G {0}'", ".", "format", "(", "','", ".", "join", "(", "groups", ")", ")", ")", "# Need to mod the user?", "if", "args", ":", "yield", "'usermod {0} {1}'", ".", "format", "(", "' '", ".", "join", "(", "args", ")", ",", "name", ")", "# Ensure home directory ownership", "if", "ensure_home", ":", "yield", "files", ".", "directory", "(", "state", ",", "host", ",", "home", ",", "user", "=", "name", ",", "group", "=", "name", ",", ")", "# Add SSH keys", "if", "public_keys", "is", "not", "None", ":", "# Ensure .ssh directory", "# note that this always outputs commands unless the SSH user has access to the", "# authorized_keys file, ie the SSH user is the user defined in this function", "yield", "files", ".", "directory", "(", "state", ",", "host", ",", "'{0}/.ssh'", ".", "format", "(", "home", ")", ",", "user", "=", "name", ",", "group", "=", "name", ",", "mode", "=", "700", ",", ")", "filename", "=", "'{0}/.ssh/authorized_keys'", ".", "format", "(", "home", ")", "if", "delete_keys", ":", "# Create a whole new authorized_keys file", "keys_file", "=", "six", ".", "StringIO", "(", "'{0}\\n'", ".", "format", "(", "'\\n'", ".", "join", "(", "public_keys", ")", ",", ")", ")", "# And ensure it exists", "yield", "files", ".", "put", "(", "state", ",", "host", ",", "keys_file", ",", "filename", ",", "user", "=", "name", ",", "group", "=", "name", ",", "mode", "=", "600", ",", ")", "else", ":", "# Ensure authorized_keys exists", "yield", "files", ".", "file", "(", "state", ",", "host", ",", "filename", ",", "user", "=", "name", ",", "group", "=", "name", ",", "mode", "=", "600", ",", ")", "# And every public key is present", "for", "key", "in", "public_keys", ":", "yield", "files", ".", "line", "(", "state", ",", "host", ",", "filename", ",", "key", ",", ")" ]
Add/remove/update system users & their ssh `authorized_keys`. + name: name of the user to ensure + present: whether this user should exist + home: the users home directory + shell: the users shell + group: the users primary group + groups: the users secondary groups + public_keys: list of public keys to attach to this user, ``home`` must be specified + delete_keys: whether to remove any keys not specified in ``public_keys`` + ensure_home: whether to ensure the ``home`` directory exists + system: whether to create a system account Home directory: When ``ensure_home`` or ``public_keys`` are provided, ``home`` defaults to ``/home/{name}``.
[ "Add", "/", "remove", "/", "update", "system", "users", "&", "their", "ssh", "authorized_keys", "." ]
python
train
LinkCareServices/period
period/main.py
https://github.com/LinkCareServices/period/blob/014f3c766940658904c52547d8cf8c12d4895e07/period/main.py#L149-L203
def flatten(self, lst=None): """syntax.flatten(token_stream) - compile period tokens This turns a stream of tokens into p-code for the trivial stack machine that evaluates period expressions in in_period. """ tree = [] uops = [] # accumulated unary operations s = Stack() group_len = 0 # in current precendence group for item in lst: if type(item) == type([]): # Subexpression. tree = tree + self.flatten(item) group_len = group_len + 1 # Unary ops dump, for things like: '!(Monday|Wednesday)' for uop in uops: tree.append(uop) uops = [] elif item in self.ops and item not in self.uops: # Operator. if not s.empty(): prev_op = s.pop() # If the precendence of the previous operation is # higher then dump out everything so far, ensuring the # order of evaluation. if _precedence[prev_op] > _precedence[item]: s.push(prev_op) # put it back for i in range(group_len - 1): tree.append(s.pop()) group_len = 0 else: s.push(prev_op) s.push(item) else: s.push(item) elif item in self.uops: uops.append(item) else: # Token of some sort. tree.append(item) group_len = group_len + 1 # Dump any unary operations. for uop in uops: tree.append(uop) uops = [] while not s.empty(): tree.append(s.pop()) # Drop any remaining unary operations. for uop in uops: tree.append(uop) return tree
[ "def", "flatten", "(", "self", ",", "lst", "=", "None", ")", ":", "tree", "=", "[", "]", "uops", "=", "[", "]", "# accumulated unary operations", "s", "=", "Stack", "(", ")", "group_len", "=", "0", "# in current precendence group", "for", "item", "in", "lst", ":", "if", "type", "(", "item", ")", "==", "type", "(", "[", "]", ")", ":", "# Subexpression.", "tree", "=", "tree", "+", "self", ".", "flatten", "(", "item", ")", "group_len", "=", "group_len", "+", "1", "# Unary ops dump, for things like: '!(Monday|Wednesday)'", "for", "uop", "in", "uops", ":", "tree", ".", "append", "(", "uop", ")", "uops", "=", "[", "]", "elif", "item", "in", "self", ".", "ops", "and", "item", "not", "in", "self", ".", "uops", ":", "# Operator.", "if", "not", "s", ".", "empty", "(", ")", ":", "prev_op", "=", "s", ".", "pop", "(", ")", "# If the precendence of the previous operation is", "# higher then dump out everything so far, ensuring the", "# order of evaluation.", "if", "_precedence", "[", "prev_op", "]", ">", "_precedence", "[", "item", "]", ":", "s", ".", "push", "(", "prev_op", ")", "# put it back", "for", "i", "in", "range", "(", "group_len", "-", "1", ")", ":", "tree", ".", "append", "(", "s", ".", "pop", "(", ")", ")", "group_len", "=", "0", "else", ":", "s", ".", "push", "(", "prev_op", ")", "s", ".", "push", "(", "item", ")", "else", ":", "s", ".", "push", "(", "item", ")", "elif", "item", "in", "self", ".", "uops", ":", "uops", ".", "append", "(", "item", ")", "else", ":", "# Token of some sort.", "tree", ".", "append", "(", "item", ")", "group_len", "=", "group_len", "+", "1", "# Dump any unary operations.", "for", "uop", "in", "uops", ":", "tree", ".", "append", "(", "uop", ")", "uops", "=", "[", "]", "while", "not", "s", ".", "empty", "(", ")", ":", "tree", ".", "append", "(", "s", ".", "pop", "(", ")", ")", "# Drop any remaining unary operations.", "for", "uop", "in", "uops", ":", "tree", ".", "append", "(", "uop", ")", "return", "tree" ]
syntax.flatten(token_stream) - compile period tokens This turns a stream of tokens into p-code for the trivial stack machine that evaluates period expressions in in_period.
[ "syntax", ".", "flatten", "(", "token_stream", ")", "-", "compile", "period", "tokens" ]
python
train
wecatch/app-turbo
turbo/register.py
https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/register.py#L14-L25
def register_app(app_name, app_setting, web_application_setting, mainfile, package_space): """insert current project root path into sys path """ from turbo import log app_config.app_name = app_name app_config.app_setting = app_setting app_config.project_name = os.path.basename(get_base_dir(mainfile, 2)) app_config.web_application_setting.update(web_application_setting) if app_setting.get('session_config'): app_config.session_config.update(app_setting['session_config']) log.getLogger(**app_setting.log) _install_app(package_space)
[ "def", "register_app", "(", "app_name", ",", "app_setting", ",", "web_application_setting", ",", "mainfile", ",", "package_space", ")", ":", "from", "turbo", "import", "log", "app_config", ".", "app_name", "=", "app_name", "app_config", ".", "app_setting", "=", "app_setting", "app_config", ".", "project_name", "=", "os", ".", "path", ".", "basename", "(", "get_base_dir", "(", "mainfile", ",", "2", ")", ")", "app_config", ".", "web_application_setting", ".", "update", "(", "web_application_setting", ")", "if", "app_setting", ".", "get", "(", "'session_config'", ")", ":", "app_config", ".", "session_config", ".", "update", "(", "app_setting", "[", "'session_config'", "]", ")", "log", ".", "getLogger", "(", "*", "*", "app_setting", ".", "log", ")", "_install_app", "(", "package_space", ")" ]
insert current project root path into sys path
[ "insert", "current", "project", "root", "path", "into", "sys", "path" ]
python
train
MycroftAI/mycroft-skills-manager
msm/skill_entry.py
https://github.com/MycroftAI/mycroft-skills-manager/blob/5acef240de42e8ceae2e82bc7492ffee33288b00/msm/skill_entry.py#L136-L142
def attach(self, remote_entry): """Attach a remote entry to a local entry""" self.name = remote_entry.name self.sha = remote_entry.sha self.url = remote_entry.url self.author = remote_entry.author return self
[ "def", "attach", "(", "self", ",", "remote_entry", ")", ":", "self", ".", "name", "=", "remote_entry", ".", "name", "self", ".", "sha", "=", "remote_entry", ".", "sha", "self", ".", "url", "=", "remote_entry", ".", "url", "self", ".", "author", "=", "remote_entry", ".", "author", "return", "self" ]
Attach a remote entry to a local entry
[ "Attach", "a", "remote", "entry", "to", "a", "local", "entry" ]
python
train
DLR-RM/RAFCON
source/rafcon/core/states/execution_state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/execution_state.py#L132-L173
def run(self): """ This defines the sequence of actions that are taken when the execution state is executed :return: """ if self.is_root_state: self.execution_history.push_call_history_item(self, CallType.EXECUTE, None, self.input_data) logger.debug("Running {0}{1}".format(self, " (backwards)" if self.backward_execution else "")) if self.backward_execution: self.setup_backward_run() else: self.setup_run() try: outcome = self._execute(self.input_data, self.output_data, self.backward_execution) self.state_execution_status = StateExecutionStatus.WAIT_FOR_NEXT_STATE if self.backward_execution: # outcome handling is not required as we are in backward mode and the execution order is fixed result = self.finalize() else: # check output data self.check_output_data_type() result = self.finalize(outcome) if self.is_root_state: self.execution_history.push_return_history_item(self, CallType.EXECUTE, None, self.output_data) return result except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() formatted_exc = traceback.format_exception(exc_type, exc_value, exc_traceback) truncated_exc = [] for line in formatted_exc: if os.path.join("rafcon", "core") not in line: truncated_exc.append(line) logger.error("{0} had an internal error: {1}: {2}\n{3}".format(self, type(e).__name__, e, ''.join(truncated_exc))) # write error to the output_data of the state self.output_data["error"] = e self.state_execution_status = StateExecutionStatus.WAIT_FOR_NEXT_STATE return self.finalize(Outcome(-1, "aborted"))
[ "def", "run", "(", "self", ")", ":", "if", "self", ".", "is_root_state", ":", "self", ".", "execution_history", ".", "push_call_history_item", "(", "self", ",", "CallType", ".", "EXECUTE", ",", "None", ",", "self", ".", "input_data", ")", "logger", ".", "debug", "(", "\"Running {0}{1}\"", ".", "format", "(", "self", ",", "\" (backwards)\"", "if", "self", ".", "backward_execution", "else", "\"\"", ")", ")", "if", "self", ".", "backward_execution", ":", "self", ".", "setup_backward_run", "(", ")", "else", ":", "self", ".", "setup_run", "(", ")", "try", ":", "outcome", "=", "self", ".", "_execute", "(", "self", ".", "input_data", ",", "self", ".", "output_data", ",", "self", ".", "backward_execution", ")", "self", ".", "state_execution_status", "=", "StateExecutionStatus", ".", "WAIT_FOR_NEXT_STATE", "if", "self", ".", "backward_execution", ":", "# outcome handling is not required as we are in backward mode and the execution order is fixed", "result", "=", "self", ".", "finalize", "(", ")", "else", ":", "# check output data", "self", ".", "check_output_data_type", "(", ")", "result", "=", "self", ".", "finalize", "(", "outcome", ")", "if", "self", ".", "is_root_state", ":", "self", ".", "execution_history", ".", "push_return_history_item", "(", "self", ",", "CallType", ".", "EXECUTE", ",", "None", ",", "self", ".", "output_data", ")", "return", "result", "except", "Exception", "as", "e", ":", "exc_type", ",", "exc_value", ",", "exc_traceback", "=", "sys", ".", "exc_info", "(", ")", "formatted_exc", "=", "traceback", ".", "format_exception", "(", "exc_type", ",", "exc_value", ",", "exc_traceback", ")", "truncated_exc", "=", "[", "]", "for", "line", "in", "formatted_exc", ":", "if", "os", ".", "path", ".", "join", "(", "\"rafcon\"", ",", "\"core\"", ")", "not", "in", "line", ":", "truncated_exc", ".", "append", "(", "line", ")", "logger", ".", "error", "(", "\"{0} had an internal error: {1}: {2}\\n{3}\"", ".", "format", "(", "self", ",", "type", "(", "e", ")", ".", "__name__", ",", "e", ",", "''", ".", "join", "(", "truncated_exc", ")", ")", ")", "# write error to the output_data of the state", "self", ".", "output_data", "[", "\"error\"", "]", "=", "e", "self", ".", "state_execution_status", "=", "StateExecutionStatus", ".", "WAIT_FOR_NEXT_STATE", "return", "self", ".", "finalize", "(", "Outcome", "(", "-", "1", ",", "\"aborted\"", ")", ")" ]
This defines the sequence of actions that are taken when the execution state is executed :return:
[ "This", "defines", "the", "sequence", "of", "actions", "that", "are", "taken", "when", "the", "execution", "state", "is", "executed" ]
python
train
cloudant/python-cloudant
src/cloudant/database.py
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/database.py#L1378-L1401
def unshare_database(self, username): """ Removes all sharing with the named user for the current remote database. This will remove the entry for the user from the security document. To modify permissions, use the :func:`~cloudant.database.CloudantDatabase.share_database` method instead. :param str username: Cloudant user to unshare the database from. :returns: Unshare database status in JSON format """ doc = self.security_document() data = doc.get('cloudant', {}) if username in data: del data[username] doc['cloudant'] = data resp = self.r_session.put( self.security_url, data=json.dumps(doc, cls=self.client.encoder), headers={'Content-Type': 'application/json'} ) resp.raise_for_status() return response_to_json_dict(resp)
[ "def", "unshare_database", "(", "self", ",", "username", ")", ":", "doc", "=", "self", ".", "security_document", "(", ")", "data", "=", "doc", ".", "get", "(", "'cloudant'", ",", "{", "}", ")", "if", "username", "in", "data", ":", "del", "data", "[", "username", "]", "doc", "[", "'cloudant'", "]", "=", "data", "resp", "=", "self", ".", "r_session", ".", "put", "(", "self", ".", "security_url", ",", "data", "=", "json", ".", "dumps", "(", "doc", ",", "cls", "=", "self", ".", "client", ".", "encoder", ")", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", ")", "resp", ".", "raise_for_status", "(", ")", "return", "response_to_json_dict", "(", "resp", ")" ]
Removes all sharing with the named user for the current remote database. This will remove the entry for the user from the security document. To modify permissions, use the :func:`~cloudant.database.CloudantDatabase.share_database` method instead. :param str username: Cloudant user to unshare the database from. :returns: Unshare database status in JSON format
[ "Removes", "all", "sharing", "with", "the", "named", "user", "for", "the", "current", "remote", "database", ".", "This", "will", "remove", "the", "entry", "for", "the", "user", "from", "the", "security", "document", ".", "To", "modify", "permissions", "use", "the", ":", "func", ":", "~cloudant", ".", "database", ".", "CloudantDatabase", ".", "share_database", "method", "instead", "." ]
python
train
box/genty
genty/genty.py
https://github.com/box/genty/blob/85f7c960a2b67cf9e58e0d9e677e4a0bc4f05081/genty/genty.py#L338-L366
def _build_dataset_method(method, dataset): """ Return a fabricated method that marshals the dataset into parameters for given 'method' :param method: The underlying test method. :type method: `callable` :param dataset: Tuple or GentyArgs instance containing the args of the dataset. :type dataset: `tuple` or :class:`GentyArgs` :return: Return an unbound function that will become a test method :rtype: `function` """ if isinstance(dataset, GentyArgs): test_method = lambda my_self: method( my_self, *dataset.args, **dataset.kwargs ) else: test_method = lambda my_self: method( my_self, *dataset ) return test_method
[ "def", "_build_dataset_method", "(", "method", ",", "dataset", ")", ":", "if", "isinstance", "(", "dataset", ",", "GentyArgs", ")", ":", "test_method", "=", "lambda", "my_self", ":", "method", "(", "my_self", ",", "*", "dataset", ".", "args", ",", "*", "*", "dataset", ".", "kwargs", ")", "else", ":", "test_method", "=", "lambda", "my_self", ":", "method", "(", "my_self", ",", "*", "dataset", ")", "return", "test_method" ]
Return a fabricated method that marshals the dataset into parameters for given 'method' :param method: The underlying test method. :type method: `callable` :param dataset: Tuple or GentyArgs instance containing the args of the dataset. :type dataset: `tuple` or :class:`GentyArgs` :return: Return an unbound function that will become a test method :rtype: `function`
[ "Return", "a", "fabricated", "method", "that", "marshals", "the", "dataset", "into", "parameters", "for", "given", "method", ":", "param", "method", ":", "The", "underlying", "test", "method", ".", ":", "type", "method", ":", "callable", ":", "param", "dataset", ":", "Tuple", "or", "GentyArgs", "instance", "containing", "the", "args", "of", "the", "dataset", ".", ":", "type", "dataset", ":", "tuple", "or", ":", "class", ":", "GentyArgs", ":", "return", ":", "Return", "an", "unbound", "function", "that", "will", "become", "a", "test", "method", ":", "rtype", ":", "function" ]
python
train
pantsbuild/pants
src/python/pants/build_graph/injectables_mixin.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/build_graph/injectables_mixin.py#L58-L70
def injectables_spec_for_key(self, key): """Given a key, yield a singular spec representing that key. :API: public """ specs = self.injectables_specs_for_key(key) specs_len = len(specs) if specs_len == 0: return None if specs_len != 1: raise self.TooManySpecsForKey('injectables spec mapping for key included {} elements, ' 'expected 1'.format(specs_len)) return specs[0]
[ "def", "injectables_spec_for_key", "(", "self", ",", "key", ")", ":", "specs", "=", "self", ".", "injectables_specs_for_key", "(", "key", ")", "specs_len", "=", "len", "(", "specs", ")", "if", "specs_len", "==", "0", ":", "return", "None", "if", "specs_len", "!=", "1", ":", "raise", "self", ".", "TooManySpecsForKey", "(", "'injectables spec mapping for key included {} elements, '", "'expected 1'", ".", "format", "(", "specs_len", ")", ")", "return", "specs", "[", "0", "]" ]
Given a key, yield a singular spec representing that key. :API: public
[ "Given", "a", "key", "yield", "a", "singular", "spec", "representing", "that", "key", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/controllers/state_editor/outcomes.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/state_editor/outcomes.py#L125-L171
def on_to_state_edited(self, renderer, path, new_state_identifier): """Connects the outcome with a transition to the newly set state :param Gtk.CellRendererText renderer: The cell renderer that was edited :param str path: The path string of the renderer :param str new_state_identifier: An identifier for the new state that was selected """ def do_self_transition_check(t_id, new_state_identifier): # add self transition meta data if 'self' in new_state_identifier.split('.'): insert_self_transition_meta_data(self.model, t_id, 'outcomes_widget', combined_action=True) outcome_id = self.list_store[path][self.ID_STORAGE_ID] if outcome_id in self.dict_to_other_state or outcome_id in self.dict_to_other_outcome: transition_parent_state = self.model.parent.state if outcome_id in self.dict_to_other_state: t_id = self.dict_to_other_state[outcome_id][2] else: t_id = self.dict_to_other_outcome[outcome_id][2] if new_state_identifier is not None: to_state_id = new_state_identifier.split('.')[1] if not transition_parent_state.transitions[t_id].to_state == to_state_id: try: transition_parent_state.transitions[t_id].modify_target(to_state=to_state_id) do_self_transition_check(t_id, new_state_identifier) except ValueError as e: logger.warning("The target of transition couldn't be modified: {0}".format(e)) else: try: transition_parent_state.remove_transition(t_id) except AttributeError as e: logger.warning("The transition couldn't be removed: {0}".format(e)) else: # there is no transition till now if new_state_identifier is not None and not self.model.state.is_root_state: transition_parent_state = self.model.parent.state to_state_id = new_state_identifier.split('.')[1] try: t_id = transition_parent_state.add_transition(from_state_id=self.model.state.state_id, from_outcome=outcome_id, to_state_id=to_state_id, to_outcome=None, transition_id=None) do_self_transition_check(t_id, new_state_identifier) except (ValueError, TypeError) as e: logger.warning("The transition couldn't be added: {0}".format(e)) return else: logger.debug("outcome-editor got None in to_state-combo-change no transition is added")
[ "def", "on_to_state_edited", "(", "self", ",", "renderer", ",", "path", ",", "new_state_identifier", ")", ":", "def", "do_self_transition_check", "(", "t_id", ",", "new_state_identifier", ")", ":", "# add self transition meta data", "if", "'self'", "in", "new_state_identifier", ".", "split", "(", "'.'", ")", ":", "insert_self_transition_meta_data", "(", "self", ".", "model", ",", "t_id", ",", "'outcomes_widget'", ",", "combined_action", "=", "True", ")", "outcome_id", "=", "self", ".", "list_store", "[", "path", "]", "[", "self", ".", "ID_STORAGE_ID", "]", "if", "outcome_id", "in", "self", ".", "dict_to_other_state", "or", "outcome_id", "in", "self", ".", "dict_to_other_outcome", ":", "transition_parent_state", "=", "self", ".", "model", ".", "parent", ".", "state", "if", "outcome_id", "in", "self", ".", "dict_to_other_state", ":", "t_id", "=", "self", ".", "dict_to_other_state", "[", "outcome_id", "]", "[", "2", "]", "else", ":", "t_id", "=", "self", ".", "dict_to_other_outcome", "[", "outcome_id", "]", "[", "2", "]", "if", "new_state_identifier", "is", "not", "None", ":", "to_state_id", "=", "new_state_identifier", ".", "split", "(", "'.'", ")", "[", "1", "]", "if", "not", "transition_parent_state", ".", "transitions", "[", "t_id", "]", ".", "to_state", "==", "to_state_id", ":", "try", ":", "transition_parent_state", ".", "transitions", "[", "t_id", "]", ".", "modify_target", "(", "to_state", "=", "to_state_id", ")", "do_self_transition_check", "(", "t_id", ",", "new_state_identifier", ")", "except", "ValueError", "as", "e", ":", "logger", ".", "warning", "(", "\"The target of transition couldn't be modified: {0}\"", ".", "format", "(", "e", ")", ")", "else", ":", "try", ":", "transition_parent_state", ".", "remove_transition", "(", "t_id", ")", "except", "AttributeError", "as", "e", ":", "logger", ".", "warning", "(", "\"The transition couldn't be removed: {0}\"", ".", "format", "(", "e", ")", ")", "else", ":", "# there is no transition till now", "if", "new_state_identifier", "is", "not", "None", "and", "not", "self", ".", "model", ".", "state", ".", "is_root_state", ":", "transition_parent_state", "=", "self", ".", "model", ".", "parent", ".", "state", "to_state_id", "=", "new_state_identifier", ".", "split", "(", "'.'", ")", "[", "1", "]", "try", ":", "t_id", "=", "transition_parent_state", ".", "add_transition", "(", "from_state_id", "=", "self", ".", "model", ".", "state", ".", "state_id", ",", "from_outcome", "=", "outcome_id", ",", "to_state_id", "=", "to_state_id", ",", "to_outcome", "=", "None", ",", "transition_id", "=", "None", ")", "do_self_transition_check", "(", "t_id", ",", "new_state_identifier", ")", "except", "(", "ValueError", ",", "TypeError", ")", "as", "e", ":", "logger", ".", "warning", "(", "\"The transition couldn't be added: {0}\"", ".", "format", "(", "e", ")", ")", "return", "else", ":", "logger", ".", "debug", "(", "\"outcome-editor got None in to_state-combo-change no transition is added\"", ")" ]
Connects the outcome with a transition to the newly set state :param Gtk.CellRendererText renderer: The cell renderer that was edited :param str path: The path string of the renderer :param str new_state_identifier: An identifier for the new state that was selected
[ "Connects", "the", "outcome", "with", "a", "transition", "to", "the", "newly", "set", "state" ]
python
train
agramian/subprocess-manager
subprocess_manager/nbstream_readerwriter.py
https://github.com/agramian/subprocess-manager/blob/fff9ff2ddab644a86f96e1ccf5df142c482a8247/subprocess_manager/nbstream_readerwriter.py#L54-L61
def readline(self, timeout = 0.1): """Try to read a line from the stream queue. """ try: return self._q.get(block = timeout is not None, timeout = timeout) except Empty: return None
[ "def", "readline", "(", "self", ",", "timeout", "=", "0.1", ")", ":", "try", ":", "return", "self", ".", "_q", ".", "get", "(", "block", "=", "timeout", "is", "not", "None", ",", "timeout", "=", "timeout", ")", "except", "Empty", ":", "return", "None" ]
Try to read a line from the stream queue.
[ "Try", "to", "read", "a", "line", "from", "the", "stream", "queue", "." ]
python
train
secdev/scapy
scapy/modules/krack/automaton.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/modules/krack/automaton.py#L72-L155
def parse_args(self, ap_mac, ssid, passphrase, channel=None, # KRACK attack options double_3handshake=True, encrypt_3handshake=True, wait_3handshake=0, double_gtk_refresh=True, arp_target_ip=None, arp_source_ip=None, wait_gtk=10, **kwargs): """ Mandatory arguments: @iface: interface to use (must be in monitor mode) @ap_mac: AP's MAC @ssid: AP's SSID @passphrase: AP's Passphrase (min 8 char.) Optional arguments: @channel: used by the interface. Default 6, autodetected on windows Krack attacks options: - Msg 3/4 handshake replay: double_3handshake: double the 3/4 handshake message encrypt_3handshake: encrypt the second 3/4 handshake message wait_3handshake: time to wait (in sec.) before sending the second 3/4 - double GTK rekeying: double_gtk_refresh: double the 1/2 GTK rekeying message wait_gtk: time to wait (in sec.) before sending the GTK rekeying arp_target_ip: Client IP to use in ARP req. (to detect attack success) If None, use a DHCP server arp_source_ip: Server IP to use in ARP req. (to detect attack success) If None, use the DHCP server gateway address """ super(KrackAP, self).parse_args(**kwargs) # Main AP options self.mac = ap_mac self.ssid = ssid self.passphrase = passphrase if channel is None: if WINDOWS: try: channel = kwargs.get("iface", conf.iface).channel() except (Scapy_Exception, AttributeError): channel = 6 else: channel = 6 self.channel = channel # Internal structures self.last_iv = None self.client = None self.seq_num = count() self.replay_counter = count() self.time_handshake_end = None self.dhcp_server = DHCPOverWPA(send_func=self.send_ether_over_wpa, pool=Net("192.168.42.128/25"), network="192.168.42.0/24", gw="192.168.42.1") self.arp_sent = [] self.arp_to_send = 0 self.arp_retry = 0 # Bit 0: 3way handshake sent # Bit 1: GTK rekeying sent # Bit 2: ARP response obtained self.krack_state = 0 # Krack options self.double_3handshake = double_3handshake self.encrypt_3handshake = encrypt_3handshake self.wait_3handshake = wait_3handshake self.double_gtk_refresh = double_gtk_refresh self.arp_target_ip = arp_target_ip if arp_source_ip is None: # Use the DHCP server Gateway address arp_source_ip = self.dhcp_server.gw self.arp_source_ip = arp_source_ip self.wait_gtk = wait_gtk # May take several seconds self.install_PMK()
[ "def", "parse_args", "(", "self", ",", "ap_mac", ",", "ssid", ",", "passphrase", ",", "channel", "=", "None", ",", "# KRACK attack options", "double_3handshake", "=", "True", ",", "encrypt_3handshake", "=", "True", ",", "wait_3handshake", "=", "0", ",", "double_gtk_refresh", "=", "True", ",", "arp_target_ip", "=", "None", ",", "arp_source_ip", "=", "None", ",", "wait_gtk", "=", "10", ",", "*", "*", "kwargs", ")", ":", "super", "(", "KrackAP", ",", "self", ")", ".", "parse_args", "(", "*", "*", "kwargs", ")", "# Main AP options", "self", ".", "mac", "=", "ap_mac", "self", ".", "ssid", "=", "ssid", "self", ".", "passphrase", "=", "passphrase", "if", "channel", "is", "None", ":", "if", "WINDOWS", ":", "try", ":", "channel", "=", "kwargs", ".", "get", "(", "\"iface\"", ",", "conf", ".", "iface", ")", ".", "channel", "(", ")", "except", "(", "Scapy_Exception", ",", "AttributeError", ")", ":", "channel", "=", "6", "else", ":", "channel", "=", "6", "self", ".", "channel", "=", "channel", "# Internal structures", "self", ".", "last_iv", "=", "None", "self", ".", "client", "=", "None", "self", ".", "seq_num", "=", "count", "(", ")", "self", ".", "replay_counter", "=", "count", "(", ")", "self", ".", "time_handshake_end", "=", "None", "self", ".", "dhcp_server", "=", "DHCPOverWPA", "(", "send_func", "=", "self", ".", "send_ether_over_wpa", ",", "pool", "=", "Net", "(", "\"192.168.42.128/25\"", ")", ",", "network", "=", "\"192.168.42.0/24\"", ",", "gw", "=", "\"192.168.42.1\"", ")", "self", ".", "arp_sent", "=", "[", "]", "self", ".", "arp_to_send", "=", "0", "self", ".", "arp_retry", "=", "0", "# Bit 0: 3way handshake sent", "# Bit 1: GTK rekeying sent", "# Bit 2: ARP response obtained", "self", ".", "krack_state", "=", "0", "# Krack options", "self", ".", "double_3handshake", "=", "double_3handshake", "self", ".", "encrypt_3handshake", "=", "encrypt_3handshake", "self", ".", "wait_3handshake", "=", "wait_3handshake", "self", ".", "double_gtk_refresh", "=", "double_gtk_refresh", "self", ".", "arp_target_ip", "=", "arp_target_ip", "if", "arp_source_ip", "is", "None", ":", "# Use the DHCP server Gateway address", "arp_source_ip", "=", "self", ".", "dhcp_server", ".", "gw", "self", ".", "arp_source_ip", "=", "arp_source_ip", "self", ".", "wait_gtk", "=", "wait_gtk", "# May take several seconds", "self", ".", "install_PMK", "(", ")" ]
Mandatory arguments: @iface: interface to use (must be in monitor mode) @ap_mac: AP's MAC @ssid: AP's SSID @passphrase: AP's Passphrase (min 8 char.) Optional arguments: @channel: used by the interface. Default 6, autodetected on windows Krack attacks options: - Msg 3/4 handshake replay: double_3handshake: double the 3/4 handshake message encrypt_3handshake: encrypt the second 3/4 handshake message wait_3handshake: time to wait (in sec.) before sending the second 3/4 - double GTK rekeying: double_gtk_refresh: double the 1/2 GTK rekeying message wait_gtk: time to wait (in sec.) before sending the GTK rekeying arp_target_ip: Client IP to use in ARP req. (to detect attack success) If None, use a DHCP server arp_source_ip: Server IP to use in ARP req. (to detect attack success) If None, use the DHCP server gateway address
[ "Mandatory", "arguments", ":", "@iface", ":", "interface", "to", "use", "(", "must", "be", "in", "monitor", "mode", ")", "@ap_mac", ":", "AP", "s", "MAC", "@ssid", ":", "AP", "s", "SSID", "@passphrase", ":", "AP", "s", "Passphrase", "(", "min", "8", "char", ".", ")" ]
python
train
pymc-devs/pymc
pymc/StepMethods.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1488-L1496
def trace2array(self, sl): """Return an array with the trace of all stochastics, sliced by sl.""" chain = [] for stochastic in self.stochastics: tr = stochastic.trace.gettrace(slicing=sl) if tr is None: raise AttributeError chain.append(tr) return np.hstack(chain)
[ "def", "trace2array", "(", "self", ",", "sl", ")", ":", "chain", "=", "[", "]", "for", "stochastic", "in", "self", ".", "stochastics", ":", "tr", "=", "stochastic", ".", "trace", ".", "gettrace", "(", "slicing", "=", "sl", ")", "if", "tr", "is", "None", ":", "raise", "AttributeError", "chain", ".", "append", "(", "tr", ")", "return", "np", ".", "hstack", "(", "chain", ")" ]
Return an array with the trace of all stochastics, sliced by sl.
[ "Return", "an", "array", "with", "the", "trace", "of", "all", "stochastics", "sliced", "by", "sl", "." ]
python
train
jrief/djangocms-cascade
cmsplugin_cascade/apps.py
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/apps.py#L97-L110
def revoke_permissions(self, ctype): """ Remove all permissions for the content type to be removed """ ContentType = apps.get_model('contenttypes', 'ContentType') try: Permission = apps.get_model('auth', 'Permission') except LookupError: return codenames = ['{0}_{1}'.format(perm, ctype) for perm in self.default_permissions] cascade_element = apps.get_model(self.label, 'cascadeelement') element_ctype = ContentType.objects.get_for_model(cascade_element) Permission.objects.filter(content_type=element_ctype, codename__in=codenames).delete()
[ "def", "revoke_permissions", "(", "self", ",", "ctype", ")", ":", "ContentType", "=", "apps", ".", "get_model", "(", "'contenttypes'", ",", "'ContentType'", ")", "try", ":", "Permission", "=", "apps", ".", "get_model", "(", "'auth'", ",", "'Permission'", ")", "except", "LookupError", ":", "return", "codenames", "=", "[", "'{0}_{1}'", ".", "format", "(", "perm", ",", "ctype", ")", "for", "perm", "in", "self", ".", "default_permissions", "]", "cascade_element", "=", "apps", ".", "get_model", "(", "self", ".", "label", ",", "'cascadeelement'", ")", "element_ctype", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "cascade_element", ")", "Permission", ".", "objects", ".", "filter", "(", "content_type", "=", "element_ctype", ",", "codename__in", "=", "codenames", ")", ".", "delete", "(", ")" ]
Remove all permissions for the content type to be removed
[ "Remove", "all", "permissions", "for", "the", "content", "type", "to", "be", "removed" ]
python
train
qacafe/cdrouter.py
cdrouter/users.py
https://github.com/qacafe/cdrouter.py/blob/aacf2c6ab0b987250f7b1892f4bba14bb2b7dbe5/cdrouter/users.py#L188-L195
def bulk_copy(self, ids): """Bulk copy a set of users. :param ids: Int list of user IDs. :return: :class:`users.User <users.User>` list """ schema = UserSchema() return self.service.bulk_copy(self.base, self.RESOURCE, ids, schema)
[ "def", "bulk_copy", "(", "self", ",", "ids", ")", ":", "schema", "=", "UserSchema", "(", ")", "return", "self", ".", "service", ".", "bulk_copy", "(", "self", ".", "base", ",", "self", ".", "RESOURCE", ",", "ids", ",", "schema", ")" ]
Bulk copy a set of users. :param ids: Int list of user IDs. :return: :class:`users.User <users.User>` list
[ "Bulk", "copy", "a", "set", "of", "users", "." ]
python
train
nicferrier/md
src/mdlib/client.py
https://github.com/nicferrier/md/blob/302ca8882dae060fb15bd5ae470d8e661fb67ec4/src/mdlib/client.py#L187-L197
def _get(self, msgid): """Yields the message header against each part from the message.""" foldername, msgkey = msgid.split(SEPERATOR) folder = self.folder if foldername == "INBOX" else self._getfolder(foldername) # Now look up the message msg = folder[msgkey] msg.is_seen = True hdr = list(msg.items()) for p in msg.walk(): yield hdr,p return
[ "def", "_get", "(", "self", ",", "msgid", ")", ":", "foldername", ",", "msgkey", "=", "msgid", ".", "split", "(", "SEPERATOR", ")", "folder", "=", "self", ".", "folder", "if", "foldername", "==", "\"INBOX\"", "else", "self", ".", "_getfolder", "(", "foldername", ")", "# Now look up the message", "msg", "=", "folder", "[", "msgkey", "]", "msg", ".", "is_seen", "=", "True", "hdr", "=", "list", "(", "msg", ".", "items", "(", ")", ")", "for", "p", "in", "msg", ".", "walk", "(", ")", ":", "yield", "hdr", ",", "p", "return" ]
Yields the message header against each part from the message.
[ "Yields", "the", "message", "header", "against", "each", "part", "from", "the", "message", "." ]
python
train
Gandi/gandi.cli
gandi/cli/modules/domain.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/domain.py#L80-L101
def renew(cls, fqdn, duration, background): """Renew a domain.""" fqdn = fqdn.lower() if not background and not cls.intty(): background = True domain_info = cls.info(fqdn) current_year = domain_info['date_registry_end'].year domain_params = { 'duration': duration, 'current_year': current_year, } result = cls.call('domain.renew', fqdn, domain_params) if background: return result # interactive mode, run a progress bar cls.echo('Renewing your domain.') cls.display_progress(result) cls.echo('Your domain %s has been renewed.' % fqdn)
[ "def", "renew", "(", "cls", ",", "fqdn", ",", "duration", ",", "background", ")", ":", "fqdn", "=", "fqdn", ".", "lower", "(", ")", "if", "not", "background", "and", "not", "cls", ".", "intty", "(", ")", ":", "background", "=", "True", "domain_info", "=", "cls", ".", "info", "(", "fqdn", ")", "current_year", "=", "domain_info", "[", "'date_registry_end'", "]", ".", "year", "domain_params", "=", "{", "'duration'", ":", "duration", ",", "'current_year'", ":", "current_year", ",", "}", "result", "=", "cls", ".", "call", "(", "'domain.renew'", ",", "fqdn", ",", "domain_params", ")", "if", "background", ":", "return", "result", "# interactive mode, run a progress bar", "cls", ".", "echo", "(", "'Renewing your domain.'", ")", "cls", ".", "display_progress", "(", "result", ")", "cls", ".", "echo", "(", "'Your domain %s has been renewed.'", "%", "fqdn", ")" ]
Renew a domain.
[ "Renew", "a", "domain", "." ]
python
train
wreckage/django-happenings
happenings/utils/handlers.py
https://github.com/wreckage/django-happenings/blob/7bca5576efa6cd4c4e87356bf9e5b8cd538ae91d/happenings/utils/handlers.py#L244-L272
def _handle_weekly_repeat_out(self): """ Handles repeating an event weekly (or biweekly) if the current year and month are outside of its start year and month. It takes care of cases 3 and 4 in _handle_weekly_repeat_in() comments. """ start_d = _first_weekday( self.event.l_start_date.weekday(), date(self.year, self.month, 1) ) self.day = start_d.day self.count_first = True if self.event.repeats('BIWEEKLY'): self._biweekly_helper() elif self.event.repeats('WEEKLY'): # Note count_first=True b/c although the start date isn't this # month, the event does begin repeating this month and start_date # has not yet been counted. # Also note we start from start_d.day and not # event.l_start_date.day self.repeat() if self.event.is_chunk(): diff = self.event.start_end_diff self.count = _chunk_fill_out_first_week( self.year, self.month, self.count, self.event, diff ) for i in xrange(diff): # count the chunk days, then repeat them self.day = start_d.day + i + 1 self.repeat()
[ "def", "_handle_weekly_repeat_out", "(", "self", ")", ":", "start_d", "=", "_first_weekday", "(", "self", ".", "event", ".", "l_start_date", ".", "weekday", "(", ")", ",", "date", "(", "self", ".", "year", ",", "self", ".", "month", ",", "1", ")", ")", "self", ".", "day", "=", "start_d", ".", "day", "self", ".", "count_first", "=", "True", "if", "self", ".", "event", ".", "repeats", "(", "'BIWEEKLY'", ")", ":", "self", ".", "_biweekly_helper", "(", ")", "elif", "self", ".", "event", ".", "repeats", "(", "'WEEKLY'", ")", ":", "# Note count_first=True b/c although the start date isn't this", "# month, the event does begin repeating this month and start_date", "# has not yet been counted.", "# Also note we start from start_d.day and not", "# event.l_start_date.day", "self", ".", "repeat", "(", ")", "if", "self", ".", "event", ".", "is_chunk", "(", ")", ":", "diff", "=", "self", ".", "event", ".", "start_end_diff", "self", ".", "count", "=", "_chunk_fill_out_first_week", "(", "self", ".", "year", ",", "self", ".", "month", ",", "self", ".", "count", ",", "self", ".", "event", ",", "diff", ")", "for", "i", "in", "xrange", "(", "diff", ")", ":", "# count the chunk days, then repeat them", "self", ".", "day", "=", "start_d", ".", "day", "+", "i", "+", "1", "self", ".", "repeat", "(", ")" ]
Handles repeating an event weekly (or biweekly) if the current year and month are outside of its start year and month. It takes care of cases 3 and 4 in _handle_weekly_repeat_in() comments.
[ "Handles", "repeating", "an", "event", "weekly", "(", "or", "biweekly", ")", "if", "the", "current", "year", "and", "month", "are", "outside", "of", "its", "start", "year", "and", "month", ".", "It", "takes", "care", "of", "cases", "3", "and", "4", "in", "_handle_weekly_repeat_in", "()", "comments", "." ]
python
test
libvips/pyvips
pyvips/error.py
https://github.com/libvips/pyvips/blob/f4d9334d2e3085b4b058129f14ac17a7872b109b/pyvips/error.py#L33-L47
def _to_string(x): """Convert to a unicode string. If x is a byte string, assume it is utf-8 and decode to a Python unicode string. You must call this on text strings you get back from libvips. """ if x == ffi.NULL: x = 'NULL' else: x = ffi.string(x) if isinstance(x, byte_type): x = x.decode('utf-8') return x
[ "def", "_to_string", "(", "x", ")", ":", "if", "x", "==", "ffi", ".", "NULL", ":", "x", "=", "'NULL'", "else", ":", "x", "=", "ffi", ".", "string", "(", "x", ")", "if", "isinstance", "(", "x", ",", "byte_type", ")", ":", "x", "=", "x", ".", "decode", "(", "'utf-8'", ")", "return", "x" ]
Convert to a unicode string. If x is a byte string, assume it is utf-8 and decode to a Python unicode string. You must call this on text strings you get back from libvips.
[ "Convert", "to", "a", "unicode", "string", "." ]
python
train
stevearc/dql
dql/grammar/common.py
https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/grammar/common.py#L27-L44
def function(name, *args, **kwargs): """ Construct a parser for a standard function format """ if kwargs.get("caseless"): name = upkey(name) else: name = Word(name) fxn_args = None for i, arg in enumerate(args): if i == 0: fxn_args = arg else: fxn_args += Suppress(",") + arg if fxn_args is None: return name + Suppress("(") + Suppress(")") if kwargs.get("optparen"): return name + ((Suppress("(") + fxn_args + Suppress(")")) | fxn_args) else: return name + Suppress("(") + fxn_args + Suppress(")")
[ "def", "function", "(", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ".", "get", "(", "\"caseless\"", ")", ":", "name", "=", "upkey", "(", "name", ")", "else", ":", "name", "=", "Word", "(", "name", ")", "fxn_args", "=", "None", "for", "i", ",", "arg", "in", "enumerate", "(", "args", ")", ":", "if", "i", "==", "0", ":", "fxn_args", "=", "arg", "else", ":", "fxn_args", "+=", "Suppress", "(", "\",\"", ")", "+", "arg", "if", "fxn_args", "is", "None", ":", "return", "name", "+", "Suppress", "(", "\"(\"", ")", "+", "Suppress", "(", "\")\"", ")", "if", "kwargs", ".", "get", "(", "\"optparen\"", ")", ":", "return", "name", "+", "(", "(", "Suppress", "(", "\"(\"", ")", "+", "fxn_args", "+", "Suppress", "(", "\")\"", ")", ")", "|", "fxn_args", ")", "else", ":", "return", "name", "+", "Suppress", "(", "\"(\"", ")", "+", "fxn_args", "+", "Suppress", "(", "\")\"", ")" ]
Construct a parser for a standard function format
[ "Construct", "a", "parser", "for", "a", "standard", "function", "format" ]
python
train
Netflix-Skunkworks/swag-client
swag_client/backends/s3.py
https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backends/s3.py#L126-L132
def get_all(self): """Gets all items in file.""" logger.debug('Fetching items. Path: {data_file}'.format( data_file=self.data_file )) return load_file(self.client, self.bucket_name, self.data_file)
[ "def", "get_all", "(", "self", ")", ":", "logger", ".", "debug", "(", "'Fetching items. Path: {data_file}'", ".", "format", "(", "data_file", "=", "self", ".", "data_file", ")", ")", "return", "load_file", "(", "self", ".", "client", ",", "self", ".", "bucket_name", ",", "self", ".", "data_file", ")" ]
Gets all items in file.
[ "Gets", "all", "items", "in", "file", "." ]
python
train
ynop/audiomate
audiomate/formats/audacity.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/formats/audacity.py#L45-L73
def read_label_file(path): """ Read the labels from an audacity label file. Args: path (str): Path to the label file. Returns: list: List of labels (start [sec], end [sec], label) Example:: >>> read_label_file('/path/to/label/file.txt') [ [0.0, 0.2, 'sie'], [0.2, 2.2, 'hallo'] ] """ labels = [] for record in textfile.read_separated_lines_generator(path, separator='\t', max_columns=3): value = '' if len(record) > 2: value = str(record[2]) labels.append([float(_clean_time(record[0])), float(_clean_time(record[1])), value]) return labels
[ "def", "read_label_file", "(", "path", ")", ":", "labels", "=", "[", "]", "for", "record", "in", "textfile", ".", "read_separated_lines_generator", "(", "path", ",", "separator", "=", "'\\t'", ",", "max_columns", "=", "3", ")", ":", "value", "=", "''", "if", "len", "(", "record", ")", ">", "2", ":", "value", "=", "str", "(", "record", "[", "2", "]", ")", "labels", ".", "append", "(", "[", "float", "(", "_clean_time", "(", "record", "[", "0", "]", ")", ")", ",", "float", "(", "_clean_time", "(", "record", "[", "1", "]", ")", ")", ",", "value", "]", ")", "return", "labels" ]
Read the labels from an audacity label file. Args: path (str): Path to the label file. Returns: list: List of labels (start [sec], end [sec], label) Example:: >>> read_label_file('/path/to/label/file.txt') [ [0.0, 0.2, 'sie'], [0.2, 2.2, 'hallo'] ]
[ "Read", "the", "labels", "from", "an", "audacity", "label", "file", "." ]
python
train
limpyd/redis-limpyd
limpyd/indexes.py
https://github.com/limpyd/redis-limpyd/blob/3c745dde1390a0bd09690b77a089dcc08c6c7e43/limpyd/indexes.py#L1324-L1332
def unstore(self, key, pk, value): """Remove the value/pk from the sorted set index For the parameters, see BaseRangeIndex.store We simple remove the pk as a member from the sorted set """ self.connection.zrem(key, pk)
[ "def", "unstore", "(", "self", ",", "key", ",", "pk", ",", "value", ")", ":", "self", ".", "connection", ".", "zrem", "(", "key", ",", "pk", ")" ]
Remove the value/pk from the sorted set index For the parameters, see BaseRangeIndex.store We simple remove the pk as a member from the sorted set
[ "Remove", "the", "value", "/", "pk", "from", "the", "sorted", "set", "index" ]
python
train
dcos/shakedown
shakedown/dcos/docker.py
https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/dcos/docker.py#L12-L39
def docker_version(host=None, component='server'): """ Return the version of Docker [Server] :param host: host or IP of the machine Docker is running on :type host: str :param component: Docker component :type component: str :return: Docker version :rtype: str """ if component.lower() == 'client': component = 'Client' else: component = 'Server' # sudo is required for non-coreOS installs command = 'sudo docker version -f {{.{}.Version}}'.format(component) if host is None: success, output = shakedown.run_command_on_master(command, None, None, False) else: success, output = shakedown.run_command_on_host(host, command, None, None, False) if success: return output else: return 'unknown'
[ "def", "docker_version", "(", "host", "=", "None", ",", "component", "=", "'server'", ")", ":", "if", "component", ".", "lower", "(", ")", "==", "'client'", ":", "component", "=", "'Client'", "else", ":", "component", "=", "'Server'", "# sudo is required for non-coreOS installs", "command", "=", "'sudo docker version -f {{.{}.Version}}'", ".", "format", "(", "component", ")", "if", "host", "is", "None", ":", "success", ",", "output", "=", "shakedown", ".", "run_command_on_master", "(", "command", ",", "None", ",", "None", ",", "False", ")", "else", ":", "success", ",", "output", "=", "shakedown", ".", "run_command_on_host", "(", "host", ",", "command", ",", "None", ",", "None", ",", "False", ")", "if", "success", ":", "return", "output", "else", ":", "return", "'unknown'" ]
Return the version of Docker [Server] :param host: host or IP of the machine Docker is running on :type host: str :param component: Docker component :type component: str :return: Docker version :rtype: str
[ "Return", "the", "version", "of", "Docker", "[", "Server", "]" ]
python
train
Capitains/Nautilus
capitains_nautilus/flask_ext.py
https://github.com/Capitains/Nautilus/blob/6be453fe0cc0e2c1b89ff06e5af1409165fc1411/capitains_nautilus/flask_ext.py#L160-L183
def init_blueprint(self, app): """ Properly generates the blueprint, registering routes and filters and connecting the app and the blueprint :return: Blueprint of the extension :rtype: Blueprint """ self.blueprint = Blueprint( self.name, self.name, template_folder=resource_filename("capitains_nautilus", "data/templates"), url_prefix=self.prefix ) # Register routes for url, name, methods, extension_name in self.ROUTES: self.blueprint.add_url_rule( url, view_func=self.view(name, extension_name), endpoint=name[2:], methods=methods ) app.register_blueprint(self.blueprint) return self.blueprint
[ "def", "init_blueprint", "(", "self", ",", "app", ")", ":", "self", ".", "blueprint", "=", "Blueprint", "(", "self", ".", "name", ",", "self", ".", "name", ",", "template_folder", "=", "resource_filename", "(", "\"capitains_nautilus\"", ",", "\"data/templates\"", ")", ",", "url_prefix", "=", "self", ".", "prefix", ")", "# Register routes", "for", "url", ",", "name", ",", "methods", ",", "extension_name", "in", "self", ".", "ROUTES", ":", "self", ".", "blueprint", ".", "add_url_rule", "(", "url", ",", "view_func", "=", "self", ".", "view", "(", "name", ",", "extension_name", ")", ",", "endpoint", "=", "name", "[", "2", ":", "]", ",", "methods", "=", "methods", ")", "app", ".", "register_blueprint", "(", "self", ".", "blueprint", ")", "return", "self", ".", "blueprint" ]
Properly generates the blueprint, registering routes and filters and connecting the app and the blueprint :return: Blueprint of the extension :rtype: Blueprint
[ "Properly", "generates", "the", "blueprint", "registering", "routes", "and", "filters", "and", "connecting", "the", "app", "and", "the", "blueprint" ]
python
train
jilljenn/tryalgo
tryalgo/gale_shapley.py
https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/gale_shapley.py#L12-L40
def gale_shapley(men, women): """Stable matching by Gale-Shapley :param men: table of size n, men[i] is preference list of women for men i :param women: similar :returns: matching table, from women to men :complexity: :math:`O(n^2)` """ n = len(men) assert n == len(women) current_suitor = [0] * n spouse = [None] * n rank = [[0] * n for j in range(n)] # build rank for j in range(n): for r in range(n): rank[j][women[j][r]] = r singles = deque(range(n)) # all men are single and get in the queue while singles: i = singles.popleft() j = men[i][current_suitor[i]] current_suitor[i] += 1 if spouse[j] is None: spouse[j] = i elif rank[j][spouse[j]] < rank[j][i]: singles.append(i) else: singles.put(spouse[j]) # sorry for spouse[j] spouse[j] = i return spouse
[ "def", "gale_shapley", "(", "men", ",", "women", ")", ":", "n", "=", "len", "(", "men", ")", "assert", "n", "==", "len", "(", "women", ")", "current_suitor", "=", "[", "0", "]", "*", "n", "spouse", "=", "[", "None", "]", "*", "n", "rank", "=", "[", "[", "0", "]", "*", "n", "for", "j", "in", "range", "(", "n", ")", "]", "# build rank", "for", "j", "in", "range", "(", "n", ")", ":", "for", "r", "in", "range", "(", "n", ")", ":", "rank", "[", "j", "]", "[", "women", "[", "j", "]", "[", "r", "]", "]", "=", "r", "singles", "=", "deque", "(", "range", "(", "n", ")", ")", "# all men are single and get in the queue", "while", "singles", ":", "i", "=", "singles", ".", "popleft", "(", ")", "j", "=", "men", "[", "i", "]", "[", "current_suitor", "[", "i", "]", "]", "current_suitor", "[", "i", "]", "+=", "1", "if", "spouse", "[", "j", "]", "is", "None", ":", "spouse", "[", "j", "]", "=", "i", "elif", "rank", "[", "j", "]", "[", "spouse", "[", "j", "]", "]", "<", "rank", "[", "j", "]", "[", "i", "]", ":", "singles", ".", "append", "(", "i", ")", "else", ":", "singles", ".", "put", "(", "spouse", "[", "j", "]", ")", "# sorry for spouse[j]", "spouse", "[", "j", "]", "=", "i", "return", "spouse" ]
Stable matching by Gale-Shapley :param men: table of size n, men[i] is preference list of women for men i :param women: similar :returns: matching table, from women to men :complexity: :math:`O(n^2)`
[ "Stable", "matching", "by", "Gale", "-", "Shapley" ]
python
train
SpriteLink/NIPAP
nipap-www/nipapwww/controllers/prefix.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/nipap-www/nipapwww/controllers/prefix.py#L51-L104
def edit(self, id): """ Edit a prefix. """ # find prefix c.prefix = Prefix.get(int(id)) # we got a HTTP POST - edit object if request.method == 'POST': c.prefix.prefix = request.params['prefix_prefix'] c.prefix.description = request.params['prefix_description'] if request.params['prefix_node'].strip() == '': c.prefix.node = None else: c.prefix.node = request.params['prefix_node'] if request.params['prefix_country'].strip() == '': c.prefix.country = None else: c.prefix.country = request.params['prefix_country'] if request.params['prefix_comment'].strip() == '': c.prefix.comment = None else: c.prefix.comment = request.params['prefix_comment'] if request.params['prefix_order_id'].strip() == '': c.prefix.order_id = None else: c.prefix.order_id = request.params['prefix_order_id'] if request.params['prefix_customer_id'].strip() == '': c.prefix.customer_id = None else: c.prefix.customer_id = request.params['prefix_customer_id'] if request.params['prefix_vrf'].strip() == '': c.prefix.vrf = None else: # TODO: handle non-existent VRF... c.prefix.vrf = VRF.list({ 'rt': request.params['prefix_vrf'] })[0] if request.params.get('prefix_monitor') is not None: c.prefix.monitor = True else: c.prefix.monitor = False c.prefix.alarm_priority = request.params['prefix_alarm_priority'] c.prefix.save() redirect(url(controller='prefix', action='list')) return render('/prefix_edit.html')
[ "def", "edit", "(", "self", ",", "id", ")", ":", "# find prefix", "c", ".", "prefix", "=", "Prefix", ".", "get", "(", "int", "(", "id", ")", ")", "# we got a HTTP POST - edit object", "if", "request", ".", "method", "==", "'POST'", ":", "c", ".", "prefix", ".", "prefix", "=", "request", ".", "params", "[", "'prefix_prefix'", "]", "c", ".", "prefix", ".", "description", "=", "request", ".", "params", "[", "'prefix_description'", "]", "if", "request", ".", "params", "[", "'prefix_node'", "]", ".", "strip", "(", ")", "==", "''", ":", "c", ".", "prefix", ".", "node", "=", "None", "else", ":", "c", ".", "prefix", ".", "node", "=", "request", ".", "params", "[", "'prefix_node'", "]", "if", "request", ".", "params", "[", "'prefix_country'", "]", ".", "strip", "(", ")", "==", "''", ":", "c", ".", "prefix", ".", "country", "=", "None", "else", ":", "c", ".", "prefix", ".", "country", "=", "request", ".", "params", "[", "'prefix_country'", "]", "if", "request", ".", "params", "[", "'prefix_comment'", "]", ".", "strip", "(", ")", "==", "''", ":", "c", ".", "prefix", ".", "comment", "=", "None", "else", ":", "c", ".", "prefix", ".", "comment", "=", "request", ".", "params", "[", "'prefix_comment'", "]", "if", "request", ".", "params", "[", "'prefix_order_id'", "]", ".", "strip", "(", ")", "==", "''", ":", "c", ".", "prefix", ".", "order_id", "=", "None", "else", ":", "c", ".", "prefix", ".", "order_id", "=", "request", ".", "params", "[", "'prefix_order_id'", "]", "if", "request", ".", "params", "[", "'prefix_customer_id'", "]", ".", "strip", "(", ")", "==", "''", ":", "c", ".", "prefix", ".", "customer_id", "=", "None", "else", ":", "c", ".", "prefix", ".", "customer_id", "=", "request", ".", "params", "[", "'prefix_customer_id'", "]", "if", "request", ".", "params", "[", "'prefix_vrf'", "]", ".", "strip", "(", ")", "==", "''", ":", "c", ".", "prefix", ".", "vrf", "=", "None", "else", ":", "# TODO: handle non-existent VRF...", "c", ".", "prefix", ".", "vrf", "=", "VRF", ".", "list", "(", "{", "'rt'", ":", "request", ".", "params", "[", "'prefix_vrf'", "]", "}", ")", "[", "0", "]", "if", "request", ".", "params", ".", "get", "(", "'prefix_monitor'", ")", "is", "not", "None", ":", "c", ".", "prefix", ".", "monitor", "=", "True", "else", ":", "c", ".", "prefix", ".", "monitor", "=", "False", "c", ".", "prefix", ".", "alarm_priority", "=", "request", ".", "params", "[", "'prefix_alarm_priority'", "]", "c", ".", "prefix", ".", "save", "(", ")", "redirect", "(", "url", "(", "controller", "=", "'prefix'", ",", "action", "=", "'list'", ")", ")", "return", "render", "(", "'/prefix_edit.html'", ")" ]
Edit a prefix.
[ "Edit", "a", "prefix", "." ]
python
train
aio-libs/aioftp
ftpbench.py
https://github.com/aio-libs/aioftp/blob/b45395b1aba41301b898040acade7010e6878a08/ftpbench.py#L159-L174
def human2bytes(s): """ >>> human2bytes('1M') 1048576 >>> human2bytes('1G') 1073741824 """ symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') letter = s[-1:].strip().upper() num = s[:-1] assert num.isdigit() and letter in symbols, s num = float(num) prefix = {symbols[0]: 1} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 return int(num * prefix[letter])
[ "def", "human2bytes", "(", "s", ")", ":", "symbols", "=", "(", "'B'", ",", "'K'", ",", "'M'", ",", "'G'", ",", "'T'", ",", "'P'", ",", "'E'", ",", "'Z'", ",", "'Y'", ")", "letter", "=", "s", "[", "-", "1", ":", "]", ".", "strip", "(", ")", ".", "upper", "(", ")", "num", "=", "s", "[", ":", "-", "1", "]", "assert", "num", ".", "isdigit", "(", ")", "and", "letter", "in", "symbols", ",", "s", "num", "=", "float", "(", "num", ")", "prefix", "=", "{", "symbols", "[", "0", "]", ":", "1", "}", "for", "i", ",", "s", "in", "enumerate", "(", "symbols", "[", "1", ":", "]", ")", ":", "prefix", "[", "s", "]", "=", "1", "<<", "(", "i", "+", "1", ")", "*", "10", "return", "int", "(", "num", "*", "prefix", "[", "letter", "]", ")" ]
>>> human2bytes('1M') 1048576 >>> human2bytes('1G') 1073741824
[ ">>>", "human2bytes", "(", "1M", ")", "1048576", ">>>", "human2bytes", "(", "1G", ")", "1073741824" ]
python
valid
ynop/audiomate
audiomate/corpus/corpus.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/corpus/corpus.py#L550-L566
def merge_corpora(cls, corpora): """ Merge a list of corpora into one. Args: corpora (Iterable): An iterable of :py:class:`audiomate.corpus.CorpusView`. Returns: Corpus: A corpus with the data from all given corpora merged into one. """ ds = Corpus() for merging_corpus in corpora: ds.merge_corpus(merging_corpus) return ds
[ "def", "merge_corpora", "(", "cls", ",", "corpora", ")", ":", "ds", "=", "Corpus", "(", ")", "for", "merging_corpus", "in", "corpora", ":", "ds", ".", "merge_corpus", "(", "merging_corpus", ")", "return", "ds" ]
Merge a list of corpora into one. Args: corpora (Iterable): An iterable of :py:class:`audiomate.corpus.CorpusView`. Returns: Corpus: A corpus with the data from all given corpora merged into one.
[ "Merge", "a", "list", "of", "corpora", "into", "one", "." ]
python
train
tanghaibao/jcvi
jcvi/apps/gmap.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/gmap.py#L104-L119
def index(args): """ %prog index database.fasta ` Wrapper for `gmap_build`. Same interface. """ p = OptionParser(index.__doc__) p.add_option("--supercat", default=False, action="store_true", help="Concatenate reference to speed up alignment") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) dbfile, = args check_index(dbfile, supercat=opts.supercat)
[ "def", "index", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "index", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--supercat\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Concatenate reference to speed up alignment\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "dbfile", ",", "=", "args", "check_index", "(", "dbfile", ",", "supercat", "=", "opts", ".", "supercat", ")" ]
%prog index database.fasta ` Wrapper for `gmap_build`. Same interface.
[ "%prog", "index", "database", ".", "fasta", "Wrapper", "for", "gmap_build", ".", "Same", "interface", "." ]
python
train
softlayer/softlayer-python
SoftLayer/managers/account.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/account.py#L78-L92
def get_event(self, event_id): """Gets details about a maintenance event :param int event_id: Notification_Occurrence_Event ID :return: Notification_Occurrence_Event """ mask = """mask[ acknowledgedFlag, attachments, impactedResources, statusCode, updates, notificationOccurrenceEventType] """ return self.client.call('Notification_Occurrence_Event', 'getObject', id=event_id, mask=mask)
[ "def", "get_event", "(", "self", ",", "event_id", ")", ":", "mask", "=", "\"\"\"mask[\n acknowledgedFlag,\n attachments,\n impactedResources,\n statusCode,\n updates,\n notificationOccurrenceEventType]\n \"\"\"", "return", "self", ".", "client", ".", "call", "(", "'Notification_Occurrence_Event'", ",", "'getObject'", ",", "id", "=", "event_id", ",", "mask", "=", "mask", ")" ]
Gets details about a maintenance event :param int event_id: Notification_Occurrence_Event ID :return: Notification_Occurrence_Event
[ "Gets", "details", "about", "a", "maintenance", "event" ]
python
train
Clinical-Genomics/scout
scout/adapter/mongo/event.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/event.py#L74-L135
def events(self, institute, case=None, variant_id=None, level=None, comments=False, panel=None): """Fetch events from the database. Args: institute (dict): A institute case (dict): A case variant_id (str, optional): global variant id level (str, optional): restrict comments to 'specific' or 'global' comments (bool, optional): restrict events to include only comments panel (str): A panel name Returns: pymongo.Cursor: Query result """ query = {} if variant_id: if comments: # If it's comment-related event collect global and variant-specific comment events LOG.debug("Fetching all comments for institute {0} case {1} variant {2}".format( institute['_id'], case['_id'], variant_id)) query = { '$or': [ { 'category' : 'variant', 'variant_id' : variant_id, 'verb' : 'comment', 'level' : 'global' }, { 'category' : 'variant', 'variant_id' : variant_id, 'institute' : institute['_id'], 'case' : case['_id'], 'verb' : 'comment', 'level' : 'specific' } ] } else: # Collect other variant-specific events which are not comments query['institute'] = institute['_id'] query['category'] = 'variant' query['variant_id'] = variant_id query['case'] = case['_id'] else: query['institute'] = institute['_id'] if panel: query['panel'] = panel # If no variant_id or panel we know that it is a case level comment else: query['category'] = 'case' if case: query['case'] = case['_id'] if comments: query['verb'] = 'comment' return self.event_collection.find(query).sort('created_at', pymongo.DESCENDING)
[ "def", "events", "(", "self", ",", "institute", ",", "case", "=", "None", ",", "variant_id", "=", "None", ",", "level", "=", "None", ",", "comments", "=", "False", ",", "panel", "=", "None", ")", ":", "query", "=", "{", "}", "if", "variant_id", ":", "if", "comments", ":", "# If it's comment-related event collect global and variant-specific comment events", "LOG", ".", "debug", "(", "\"Fetching all comments for institute {0} case {1} variant {2}\"", ".", "format", "(", "institute", "[", "'_id'", "]", ",", "case", "[", "'_id'", "]", ",", "variant_id", ")", ")", "query", "=", "{", "'$or'", ":", "[", "{", "'category'", ":", "'variant'", ",", "'variant_id'", ":", "variant_id", ",", "'verb'", ":", "'comment'", ",", "'level'", ":", "'global'", "}", ",", "{", "'category'", ":", "'variant'", ",", "'variant_id'", ":", "variant_id", ",", "'institute'", ":", "institute", "[", "'_id'", "]", ",", "'case'", ":", "case", "[", "'_id'", "]", ",", "'verb'", ":", "'comment'", ",", "'level'", ":", "'specific'", "}", "]", "}", "else", ":", "# Collect other variant-specific events which are not comments", "query", "[", "'institute'", "]", "=", "institute", "[", "'_id'", "]", "query", "[", "'category'", "]", "=", "'variant'", "query", "[", "'variant_id'", "]", "=", "variant_id", "query", "[", "'case'", "]", "=", "case", "[", "'_id'", "]", "else", ":", "query", "[", "'institute'", "]", "=", "institute", "[", "'_id'", "]", "if", "panel", ":", "query", "[", "'panel'", "]", "=", "panel", "# If no variant_id or panel we know that it is a case level comment", "else", ":", "query", "[", "'category'", "]", "=", "'case'", "if", "case", ":", "query", "[", "'case'", "]", "=", "case", "[", "'_id'", "]", "if", "comments", ":", "query", "[", "'verb'", "]", "=", "'comment'", "return", "self", ".", "event_collection", ".", "find", "(", "query", ")", ".", "sort", "(", "'created_at'", ",", "pymongo", ".", "DESCENDING", ")" ]
Fetch events from the database. Args: institute (dict): A institute case (dict): A case variant_id (str, optional): global variant id level (str, optional): restrict comments to 'specific' or 'global' comments (bool, optional): restrict events to include only comments panel (str): A panel name Returns: pymongo.Cursor: Query result
[ "Fetch", "events", "from", "the", "database", "." ]
python
test
googledatalab/pydatalab
datalab/data/_sql_statement.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/data/_sql_statement.py#L69-L120
def _find_recursive_dependencies(sql, values, code, resolved_vars, resolving_vars=None): """ Recursive helper method for expanding variables including transitive dependencies. Placeholders in SQL are represented as $<name>. If '$' must appear within the SQL statement literally, then it can be escaped as '$$'. Args: sql: the raw SQL statement with named placeholders. values: the user-supplied dictionary of name/value pairs to use for placeholder values. code: an array of referenced UDFs found during expansion. resolved_vars: a ref parameter for the variable references completely resolved so far. resolving_vars: a ref parameter for the variable(s) we are currently resolving; if we see a dependency again that is in this set we know we have a circular reference. Returns: The formatted SQL statement with placeholders replaced with their values. Raises: Exception if a placeholder was found in the SQL statement, but did not have a corresponding argument value. """ # Get the set of $var references in this SQL. dependencies = SqlStatement._get_dependencies(sql) for dependency in dependencies: # Now we check each dependency. If it is in complete - i.e., we have an expansion # for it already - we just continue. if dependency in resolved_vars: continue # Look it up in our resolution namespace dictionary. dep = datalab.utils.get_item(values, dependency) # If it is a SQL module, get the main/last query from the module, so users can refer # to $module. Useful especially if final query in module has no DEFINE QUERY <name> part. if isinstance(dep, types.ModuleType): dep = _utils.get_default_query_from_module(dep) # If we can't resolve the $name, give up. if dep is None: raise Exception("Unsatisfied dependency $%s" % dependency) # If it is a SqlStatement, it may have its own $ references in turn; check to make # sure we don't have circular references, and if not, recursively expand it and add # it to the set of complete dependencies. if isinstance(dep, SqlStatement): if resolving_vars is None: resolving_vars = [] elif dependency in resolving_vars: # Circular dependency raise Exception("Circular dependency in $%s" % dependency) resolving_vars.append(dependency) SqlStatement._find_recursive_dependencies(dep._sql, values, code, resolved_vars, resolving_vars) resolving_vars.pop() resolved_vars[dependency] = SqlStatement(dep._sql) else: resolved_vars[dependency] = dep
[ "def", "_find_recursive_dependencies", "(", "sql", ",", "values", ",", "code", ",", "resolved_vars", ",", "resolving_vars", "=", "None", ")", ":", "# Get the set of $var references in this SQL.", "dependencies", "=", "SqlStatement", ".", "_get_dependencies", "(", "sql", ")", "for", "dependency", "in", "dependencies", ":", "# Now we check each dependency. If it is in complete - i.e., we have an expansion", "# for it already - we just continue.", "if", "dependency", "in", "resolved_vars", ":", "continue", "# Look it up in our resolution namespace dictionary.", "dep", "=", "datalab", ".", "utils", ".", "get_item", "(", "values", ",", "dependency", ")", "# If it is a SQL module, get the main/last query from the module, so users can refer", "# to $module. Useful especially if final query in module has no DEFINE QUERY <name> part.", "if", "isinstance", "(", "dep", ",", "types", ".", "ModuleType", ")", ":", "dep", "=", "_utils", ".", "get_default_query_from_module", "(", "dep", ")", "# If we can't resolve the $name, give up.", "if", "dep", "is", "None", ":", "raise", "Exception", "(", "\"Unsatisfied dependency $%s\"", "%", "dependency", ")", "# If it is a SqlStatement, it may have its own $ references in turn; check to make", "# sure we don't have circular references, and if not, recursively expand it and add", "# it to the set of complete dependencies.", "if", "isinstance", "(", "dep", ",", "SqlStatement", ")", ":", "if", "resolving_vars", "is", "None", ":", "resolving_vars", "=", "[", "]", "elif", "dependency", "in", "resolving_vars", ":", "# Circular dependency", "raise", "Exception", "(", "\"Circular dependency in $%s\"", "%", "dependency", ")", "resolving_vars", ".", "append", "(", "dependency", ")", "SqlStatement", ".", "_find_recursive_dependencies", "(", "dep", ".", "_sql", ",", "values", ",", "code", ",", "resolved_vars", ",", "resolving_vars", ")", "resolving_vars", ".", "pop", "(", ")", "resolved_vars", "[", "dependency", "]", "=", "SqlStatement", "(", "dep", ".", "_sql", ")", "else", ":", "resolved_vars", "[", "dependency", "]", "=", "dep" ]
Recursive helper method for expanding variables including transitive dependencies. Placeholders in SQL are represented as $<name>. If '$' must appear within the SQL statement literally, then it can be escaped as '$$'. Args: sql: the raw SQL statement with named placeholders. values: the user-supplied dictionary of name/value pairs to use for placeholder values. code: an array of referenced UDFs found during expansion. resolved_vars: a ref parameter for the variable references completely resolved so far. resolving_vars: a ref parameter for the variable(s) we are currently resolving; if we see a dependency again that is in this set we know we have a circular reference. Returns: The formatted SQL statement with placeholders replaced with their values. Raises: Exception if a placeholder was found in the SQL statement, but did not have a corresponding argument value.
[ "Recursive", "helper", "method", "for", "expanding", "variables", "including", "transitive", "dependencies", "." ]
python
train
manns/pyspread
pyspread/src/lib/fileio.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/fileio.py#L88-L101
def next(self): """Next that shows progress in statusbar for each <freq> cells""" self.progress_status() # Check abortes state and raise StopIteration if aborted if self.aborted: statustext = _("File loading aborted.") post_command_event(self.main_window, self.main_window.StatusBarMsg, text=statustext) raise StopIteration return self.parent_cls.next(self)
[ "def", "next", "(", "self", ")", ":", "self", ".", "progress_status", "(", ")", "# Check abortes state and raise StopIteration if aborted", "if", "self", ".", "aborted", ":", "statustext", "=", "_", "(", "\"File loading aborted.\"", ")", "post_command_event", "(", "self", ".", "main_window", ",", "self", ".", "main_window", ".", "StatusBarMsg", ",", "text", "=", "statustext", ")", "raise", "StopIteration", "return", "self", ".", "parent_cls", ".", "next", "(", "self", ")" ]
Next that shows progress in statusbar for each <freq> cells
[ "Next", "that", "shows", "progress", "in", "statusbar", "for", "each", "<freq", ">", "cells" ]
python
train
anthok/overwatch-api
overwatch_api/core.py
https://github.com/anthok/overwatch-api/blob/aba976a3c07c4932de13f4236d924b2901b149b9/overwatch_api/core.py#L226-L235
async def _async_get(self, session: aiohttp.ClientSession, *args, _async_timeout_seconds: int = 5, **kwargs): """Uses aiohttp to make a get request asynchronously. Will raise asyncio.TimeoutError if the request could not be completed within _async_timeout_seconds (default 5) seconds.""" # Taken almost directly from the aiohttp tutorial with async_timeout.timeout(_async_timeout_seconds): async with session.get(*args, **kwargs) as response: return await response.json(), response.status
[ "async", "def", "_async_get", "(", "self", ",", "session", ":", "aiohttp", ".", "ClientSession", ",", "*", "args", ",", "_async_timeout_seconds", ":", "int", "=", "5", ",", "*", "*", "kwargs", ")", ":", "# Taken almost directly from the aiohttp tutorial", "with", "async_timeout", ".", "timeout", "(", "_async_timeout_seconds", ")", ":", "async", "with", "session", ".", "get", "(", "*", "args", ",", "*", "*", "kwargs", ")", "as", "response", ":", "return", "await", "response", ".", "json", "(", ")", ",", "response", ".", "status" ]
Uses aiohttp to make a get request asynchronously. Will raise asyncio.TimeoutError if the request could not be completed within _async_timeout_seconds (default 5) seconds.
[ "Uses", "aiohttp", "to", "make", "a", "get", "request", "asynchronously", ".", "Will", "raise", "asyncio", ".", "TimeoutError", "if", "the", "request", "could", "not", "be", "completed", "within", "_async_timeout_seconds", "(", "default", "5", ")", "seconds", "." ]
python
train
spulec/moto
moto/packages/httpretty/core.py
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/packages/httpretty/core.py#L1089-L1116
def httprettified(test): "A decorator tests that use HTTPretty" def decorate_class(klass): for attr in dir(klass): if not attr.startswith('test_'): continue attr_value = getattr(klass, attr) if not hasattr(attr_value, "__call__"): continue setattr(klass, attr, decorate_callable(attr_value)) return klass def decorate_callable(test): @functools.wraps(test) def wrapper(*args, **kw): httpretty.reset() httpretty.enable() try: return test(*args, **kw) finally: httpretty.disable() return wrapper if isinstance(test, ClassTypes): return decorate_class(test) return decorate_callable(test)
[ "def", "httprettified", "(", "test", ")", ":", "def", "decorate_class", "(", "klass", ")", ":", "for", "attr", "in", "dir", "(", "klass", ")", ":", "if", "not", "attr", ".", "startswith", "(", "'test_'", ")", ":", "continue", "attr_value", "=", "getattr", "(", "klass", ",", "attr", ")", "if", "not", "hasattr", "(", "attr_value", ",", "\"__call__\"", ")", ":", "continue", "setattr", "(", "klass", ",", "attr", ",", "decorate_callable", "(", "attr_value", ")", ")", "return", "klass", "def", "decorate_callable", "(", "test", ")", ":", "@", "functools", ".", "wraps", "(", "test", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "httpretty", ".", "reset", "(", ")", "httpretty", ".", "enable", "(", ")", "try", ":", "return", "test", "(", "*", "args", ",", "*", "*", "kw", ")", "finally", ":", "httpretty", ".", "disable", "(", ")", "return", "wrapper", "if", "isinstance", "(", "test", ",", "ClassTypes", ")", ":", "return", "decorate_class", "(", "test", ")", "return", "decorate_callable", "(", "test", ")" ]
A decorator tests that use HTTPretty
[ "A", "decorator", "tests", "that", "use", "HTTPretty" ]
python
train
dopefishh/pympi
pympi/Elan.py
https://github.com/dopefishh/pympi/blob/79c747cde45b5ba203ed93154d8c123ac9c3ef56/pympi/Elan.py#L582-L598
def get_annotation_data_before_time(self, id_tier, time): """Give the annotation before a given time. When the tier contains reference annotations this will be returned, check :func:`get_ref_annotation_data_before_time` for the format. If an annotation overlaps with ``time`` that annotation will be returned. :param str id_tier: Name of the tier. :param int time: Time to get the annotation before. :raises KeyError: If the tier is non existent. """ if self.tiers[id_tier][1]: return self.get_ref_annotation_before_time(id_tier, time) befores = self.get_annotation_data_between_times(id_tier, 0, time) if befores: return [max(befores, key=lambda x: x[0])] else: return []
[ "def", "get_annotation_data_before_time", "(", "self", ",", "id_tier", ",", "time", ")", ":", "if", "self", ".", "tiers", "[", "id_tier", "]", "[", "1", "]", ":", "return", "self", ".", "get_ref_annotation_before_time", "(", "id_tier", ",", "time", ")", "befores", "=", "self", ".", "get_annotation_data_between_times", "(", "id_tier", ",", "0", ",", "time", ")", "if", "befores", ":", "return", "[", "max", "(", "befores", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", "]", "else", ":", "return", "[", "]" ]
Give the annotation before a given time. When the tier contains reference annotations this will be returned, check :func:`get_ref_annotation_data_before_time` for the format. If an annotation overlaps with ``time`` that annotation will be returned. :param str id_tier: Name of the tier. :param int time: Time to get the annotation before. :raises KeyError: If the tier is non existent.
[ "Give", "the", "annotation", "before", "a", "given", "time", ".", "When", "the", "tier", "contains", "reference", "annotations", "this", "will", "be", "returned", "check", ":", "func", ":", "get_ref_annotation_data_before_time", "for", "the", "format", ".", "If", "an", "annotation", "overlaps", "with", "time", "that", "annotation", "will", "be", "returned", "." ]
python
test
tgbugs/pyontutils
pyontutils/combinators.py
https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/combinators.py#L17-L26
def make_predicate_object_combinator(function, p, o): """ Combinator to hold predicate object pairs until a subject is supplied and then call a function that accepts a subject, predicate, and object. Create a combinator to defer production of a triple until the missing pieces are supplied. Note that the naming here tells you what is stored IN the combinator. The argument to the combinator is the piece that is missing. """ def predicate_object_combinator(subject): return function(subject, p, o) return predicate_object_combinator
[ "def", "make_predicate_object_combinator", "(", "function", ",", "p", ",", "o", ")", ":", "def", "predicate_object_combinator", "(", "subject", ")", ":", "return", "function", "(", "subject", ",", "p", ",", "o", ")", "return", "predicate_object_combinator" ]
Combinator to hold predicate object pairs until a subject is supplied and then call a function that accepts a subject, predicate, and object. Create a combinator to defer production of a triple until the missing pieces are supplied. Note that the naming here tells you what is stored IN the combinator. The argument to the combinator is the piece that is missing.
[ "Combinator", "to", "hold", "predicate", "object", "pairs", "until", "a", "subject", "is", "supplied", "and", "then", "call", "a", "function", "that", "accepts", "a", "subject", "predicate", "and", "object", "." ]
python
train
pandas-dev/pandas
pandas/core/internals/managers.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L1792-L1803
def _simple_blockify(tuples, dtype): """ return a single array of a block that has a single dtype; if dtype is not None, coerce to this dtype """ values, placement = _stack_arrays(tuples, dtype) # CHECK DTYPE? if dtype is not None and values.dtype != dtype: # pragma: no cover values = values.astype(dtype) block = make_block(values, placement=placement) return [block]
[ "def", "_simple_blockify", "(", "tuples", ",", "dtype", ")", ":", "values", ",", "placement", "=", "_stack_arrays", "(", "tuples", ",", "dtype", ")", "# CHECK DTYPE?", "if", "dtype", "is", "not", "None", "and", "values", ".", "dtype", "!=", "dtype", ":", "# pragma: no cover", "values", "=", "values", ".", "astype", "(", "dtype", ")", "block", "=", "make_block", "(", "values", ",", "placement", "=", "placement", ")", "return", "[", "block", "]" ]
return a single array of a block that has a single dtype; if dtype is not None, coerce to this dtype
[ "return", "a", "single", "array", "of", "a", "block", "that", "has", "a", "single", "dtype", ";", "if", "dtype", "is", "not", "None", "coerce", "to", "this", "dtype" ]
python
train
tadashi-aikawa/owlmixin
owlmixin/transformers.py
https://github.com/tadashi-aikawa/owlmixin/blob/7c4a042c3008abddc56a8e8e55ae930d276071f5/owlmixin/transformers.py#L304-L312
def to_yamlf(self, fpath: str, encoding: str='utf8', ignore_none: bool=True, ignore_empty: bool=False) -> str: """From instance to yaml file :param ignore_none: Properties which is None are excluded if True :param fpath: Yaml file path :param encoding: Yaml file encoding :return: Yaml file path """ return util.save_yamlf(traverse(self, ignore_none, force_value=True, ignore_empty=ignore_empty), fpath, encoding)
[ "def", "to_yamlf", "(", "self", ",", "fpath", ":", "str", ",", "encoding", ":", "str", "=", "'utf8'", ",", "ignore_none", ":", "bool", "=", "True", ",", "ignore_empty", ":", "bool", "=", "False", ")", "->", "str", ":", "return", "util", ".", "save_yamlf", "(", "traverse", "(", "self", ",", "ignore_none", ",", "force_value", "=", "True", ",", "ignore_empty", "=", "ignore_empty", ")", ",", "fpath", ",", "encoding", ")" ]
From instance to yaml file :param ignore_none: Properties which is None are excluded if True :param fpath: Yaml file path :param encoding: Yaml file encoding :return: Yaml file path
[ "From", "instance", "to", "yaml", "file" ]
python
train
ggravlingen/pytradfri
pytradfri/api/libcoap_api.py
https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/api/libcoap_api.py#L198-L210
def retry_timeout(api, retries=3): """Retry API call when a timeout occurs.""" @wraps(api) def retry_api(*args, **kwargs): """Retrying API.""" for i in range(1, retries + 1): try: return api(*args, **kwargs) except RequestTimeout: if i == retries: raise return retry_api
[ "def", "retry_timeout", "(", "api", ",", "retries", "=", "3", ")", ":", "@", "wraps", "(", "api", ")", "def", "retry_api", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Retrying API.\"\"\"", "for", "i", "in", "range", "(", "1", ",", "retries", "+", "1", ")", ":", "try", ":", "return", "api", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "RequestTimeout", ":", "if", "i", "==", "retries", ":", "raise", "return", "retry_api" ]
Retry API call when a timeout occurs.
[ "Retry", "API", "call", "when", "a", "timeout", "occurs", "." ]
python
train
gawel/irc3
irc3/dec.py
https://github.com/gawel/irc3/blob/cd27840a5809a1f803dc620860fe75d83d2a2ec8/irc3/dec.py#L85-L89
def dcc_event(regexp, callback=None, iotype='in', venusian_category='irc3.dcc'): """Work like :class:`~irc3.dec.event` but occurs during DCC CHATs""" return event(regexp, callback=callback, iotype='dcc_' + iotype, venusian_category=venusian_category)
[ "def", "dcc_event", "(", "regexp", ",", "callback", "=", "None", ",", "iotype", "=", "'in'", ",", "venusian_category", "=", "'irc3.dcc'", ")", ":", "return", "event", "(", "regexp", ",", "callback", "=", "callback", ",", "iotype", "=", "'dcc_'", "+", "iotype", ",", "venusian_category", "=", "venusian_category", ")" ]
Work like :class:`~irc3.dec.event` but occurs during DCC CHATs
[ "Work", "like", ":", "class", ":", "~irc3", ".", "dec", ".", "event", "but", "occurs", "during", "DCC", "CHATs" ]
python
train
seb-m/tss
tss.py
https://github.com/seb-m/tss/blob/ab45176b8585ba6bbbcaeffd21ec0c63f615dce0/tss.py#L53-L63
def encode(value, encoding='utf-8', encoding_errors='strict'): """ Return a bytestring representation of the value. """ if isinstance(value, bytes): return value if not isinstance(value, basestring): value = str(value) if isinstance(value, unicode): value = value.encode(encoding, encoding_errors) return value
[ "def", "encode", "(", "value", ",", "encoding", "=", "'utf-8'", ",", "encoding_errors", "=", "'strict'", ")", ":", "if", "isinstance", "(", "value", ",", "bytes", ")", ":", "return", "value", "if", "not", "isinstance", "(", "value", ",", "basestring", ")", ":", "value", "=", "str", "(", "value", ")", "if", "isinstance", "(", "value", ",", "unicode", ")", ":", "value", "=", "value", ".", "encode", "(", "encoding", ",", "encoding_errors", ")", "return", "value" ]
Return a bytestring representation of the value.
[ "Return", "a", "bytestring", "representation", "of", "the", "value", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/research/vqa_self_attention.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/vqa_self_attention.py#L560-L651
def decoder(decoder_input, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, hparams, name="decoder", save_weights_to=None, make_image_summary=True,): """A stack of transformer layers. Args: decoder_input: a Tensor encoder_output: a Tensor decoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. Returns: y: a Tensors """ x = decoder_input with tf.variable_scope(name): for layer in range(hparams.num_decoder_layers or hparams.num_hidden_layers): layer_name = "layer_%d" % layer with tf.variable_scope(layer_name): with tf.variable_scope("self_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), None, decoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, save_weights_to=save_weights_to, make_image_summary=make_image_summary, ) utils.collect_named_outputs("norms", "decoder_self_attention_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs("norms", "decoder_self_attention_post_%d"%(layer), tf.norm(x, axis=-1)) if encoder_output is not None: with tf.variable_scope("encdec_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), encoder_output, encoder_decoder_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, save_weights_to=save_weights_to, make_image_summary=make_image_summary, ) utils.collect_named_outputs( "norms", "decoder_encoder_attention_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "decoder_encoder_attention_post_%d"%(layer), tf.norm(x, axis=-1)) with tf.variable_scope("ffn"): y = common_layers.dense_relu_dense( common_layers.layer_preprocess(x, hparams), hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout, ) utils.collect_named_outputs("norms", "decoder_ffn_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs("norms", "decoder_ffn_post_%d"%(layer), tf.norm(x, axis=-1)) # if normalization is done in layer_preprocess, then it should also be done # on the output, since the output can grow very large, being the sum of # a whole stack of unnormalized layer outputs. return common_layers.layer_preprocess(x, hparams)
[ "def", "decoder", "(", "decoder_input", ",", "encoder_output", ",", "decoder_self_attention_bias", ",", "encoder_decoder_attention_bias", ",", "hparams", ",", "name", "=", "\"decoder\"", ",", "save_weights_to", "=", "None", ",", "make_image_summary", "=", "True", ",", ")", ":", "x", "=", "decoder_input", "with", "tf", ".", "variable_scope", "(", "name", ")", ":", "for", "layer", "in", "range", "(", "hparams", ".", "num_decoder_layers", "or", "hparams", ".", "num_hidden_layers", ")", ":", "layer_name", "=", "\"layer_%d\"", "%", "layer", "with", "tf", ".", "variable_scope", "(", "layer_name", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"self_attention\"", ")", ":", "y", "=", "common_attention", ".", "multihead_attention", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "None", ",", "decoder_self_attention_bias", ",", "hparams", ".", "attention_key_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "attention_value_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "num_heads", ",", "hparams", ".", "attention_dropout", ",", "attention_type", "=", "hparams", ".", "self_attention_type", ",", "save_weights_to", "=", "save_weights_to", ",", "make_image_summary", "=", "make_image_summary", ",", ")", "utils", ".", "collect_named_outputs", "(", "\"norms\"", ",", "\"decoder_self_attention_%d\"", "%", "(", "layer", ")", ",", "tf", ".", "norm", "(", "y", ",", "axis", "=", "-", "1", ")", ")", "x", "=", "common_layers", ".", "layer_postprocess", "(", "x", ",", "y", ",", "hparams", ")", "utils", ".", "collect_named_outputs", "(", "\"norms\"", ",", "\"decoder_self_attention_post_%d\"", "%", "(", "layer", ")", ",", "tf", ".", "norm", "(", "x", ",", "axis", "=", "-", "1", ")", ")", "if", "encoder_output", "is", "not", "None", ":", "with", "tf", ".", "variable_scope", "(", "\"encdec_attention\"", ")", ":", "y", "=", "common_attention", ".", "multihead_attention", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "encoder_output", ",", "encoder_decoder_attention_bias", ",", "hparams", ".", "attention_key_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "attention_value_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "num_heads", ",", "hparams", ".", "attention_dropout", ",", "save_weights_to", "=", "save_weights_to", ",", "make_image_summary", "=", "make_image_summary", ",", ")", "utils", ".", "collect_named_outputs", "(", "\"norms\"", ",", "\"decoder_encoder_attention_%d\"", "%", "(", "layer", ")", ",", "tf", ".", "norm", "(", "y", ",", "axis", "=", "-", "1", ")", ")", "x", "=", "common_layers", ".", "layer_postprocess", "(", "x", ",", "y", ",", "hparams", ")", "utils", ".", "collect_named_outputs", "(", "\"norms\"", ",", "\"decoder_encoder_attention_post_%d\"", "%", "(", "layer", ")", ",", "tf", ".", "norm", "(", "x", ",", "axis", "=", "-", "1", ")", ")", "with", "tf", ".", "variable_scope", "(", "\"ffn\"", ")", ":", "y", "=", "common_layers", ".", "dense_relu_dense", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "hparams", ".", "filter_size", ",", "hparams", ".", "hidden_size", ",", "dropout", "=", "hparams", ".", "relu_dropout", ",", ")", "utils", ".", "collect_named_outputs", "(", "\"norms\"", ",", "\"decoder_ffn_%d\"", "%", "(", "layer", ")", ",", "tf", ".", "norm", "(", "y", ",", "axis", "=", "-", "1", ")", ")", "x", "=", "common_layers", ".", "layer_postprocess", "(", "x", ",", "y", ",", "hparams", ")", "utils", ".", "collect_named_outputs", "(", "\"norms\"", ",", "\"decoder_ffn_post_%d\"", "%", "(", "layer", ")", ",", "tf", ".", "norm", "(", "x", ",", "axis", "=", "-", "1", ")", ")", "# if normalization is done in layer_preprocess, then it should also be done", "# on the output, since the output can grow very large, being the sum of", "# a whole stack of unnormalized layer outputs.", "return", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")" ]
A stack of transformer layers. Args: decoder_input: a Tensor encoder_output: a Tensor decoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. Returns: y: a Tensors
[ "A", "stack", "of", "transformer", "layers", "." ]
python
train
bambinos/bambi
bambi/models.py
https://github.com/bambinos/bambi/blob/b4a0ced917968bb99ca20915317417d708387946/bambi/models.py#L374-L496
def _add(self, fixed=None, random=None, priors=None, family='gaussian', link=None, categorical=None, append=True): '''Internal version of add(), with the same arguments. Runs during Model.build() ''' # use cleaned data with NAs removed (if user requested) data = self.clean_data # alter this pandas flag to avoid false positive SettingWithCopyWarnings data._is_copy = False # Explicitly convert columns to category if desired--though this # can also be done within the formula using C(). if categorical is not None: data = data.copy() cats = listify(categorical) data[cats] = data[cats].apply(lambda x: x.astype('category')) if fixed is not None: if '~' in fixed: # check to see if formula is using the 'y[event] ~ x' syntax # (for bernoulli models). If so, chop it into groups: # 1 = 'y[event]', 2 = 'y', 3 = 'event', 4 = 'x' # If this syntax is not being used, event = None event = re.match(r'^((\S+)\[(\S+)\])\s*~(.*)$', fixed) if event is not None: fixed = '{}~{}'.format(event.group(2), event.group(4)) y, X = dmatrices(fixed, data=data, NA_action='raise') y_label = y.design_info.term_names[0] if event is not None: # pass in new Y data that has 1 if y=event and 0 otherwise y_data = y[:, y.design_info.column_names.index(event.group(1))] y_data = pd.DataFrame({event.group(3): y_data}) self._add_y(y_label, family=family, link=link, data=y_data) else: # use Y as-is self._add_y(y_label, family=family, link=link) else: X = dmatrix(fixed, data=data, NA_action='raise') # Loop over predictor terms for _name, _slice in X.design_info.term_name_slices.items(): cols = X.design_info.column_names[_slice] term_data = pd.DataFrame(X[:, _slice], columns=cols) prior = priors.pop(_name, priors.get('fixed', None)) self.terms[_name] = Term(_name, term_data, prior=prior) # Random effects if random is not None: random = listify(random) for f in random: f = f.strip() # Split specification into intercept, predictor, and grouper patt = r'^([01]+)*[\s\+]*([^\|]+)*\|(.*)' intcpt, pred, grpr = re.search(patt, f).groups() label = '{}|{}'.format(pred, grpr) if pred else grpr prior = priors.pop(label, priors.get('random', None)) # Treat all grouping variables as categoricals, regardless of # their dtype and what the user may have specified in the # 'categorical' argument. var_names = re.findall('(\w+)', grpr) for v in var_names: if v in data.columns: data.loc[:, v] = data.loc[:, v].astype('category') self.clean_data.loc[:, v] = data.loc[:, v] # Default to including random intercepts intcpt = 1 if intcpt is None else int(intcpt) grpr_df = dmatrix('0+%s' % grpr, data, return_type='dataframe', NA_action='raise') # If there's no predictor, we must be adding random intercepts if not pred and grpr not in self.terms: name = '1|' + grpr pred = np.ones((len(grpr_df), 1)) term = RandomTerm(name, grpr_df, pred, grpr_df.values, categorical=True, prior=prior) self.terms[name] = term else: pred_df = dmatrix('%s+%s' % (intcpt, pred), data, return_type='dataframe', NA_action='raise') # determine value of the 'constant' attribute const = np.atleast_2d(pred_df.T).T.sum(1).var() == 0 for col, i in pred_df.design_info.column_name_indexes.items(): pred_data = pred_df.iloc[:, i] lev_data = grpr_df.multiply(pred_data, axis=0) # Also rename intercepts and skip if already added. # This can happen if user specifies something like # random=['1|school', 'student|school']. if col == 'Intercept': if grpr in self.terms: continue label = '1|%s' % grpr else: label = col + '|' + grpr prior = priors.pop(label, priors.get('random', None)) # Categorical or continuous is determined from data ld = lev_data.values if ((ld == 0) | (ld == 1)).all(): lev_data = lev_data.astype(int) cat = True else: cat = False pred_data = pred_data[:, None] # Must be 2D later term = RandomTerm(label, lev_data, pred_data, grpr_df.values, categorical=cat, constant=const if const else None, prior=prior) self.terms[label] = term
[ "def", "_add", "(", "self", ",", "fixed", "=", "None", ",", "random", "=", "None", ",", "priors", "=", "None", ",", "family", "=", "'gaussian'", ",", "link", "=", "None", ",", "categorical", "=", "None", ",", "append", "=", "True", ")", ":", "# use cleaned data with NAs removed (if user requested)", "data", "=", "self", ".", "clean_data", "# alter this pandas flag to avoid false positive SettingWithCopyWarnings", "data", ".", "_is_copy", "=", "False", "# Explicitly convert columns to category if desired--though this", "# can also be done within the formula using C().", "if", "categorical", "is", "not", "None", ":", "data", "=", "data", ".", "copy", "(", ")", "cats", "=", "listify", "(", "categorical", ")", "data", "[", "cats", "]", "=", "data", "[", "cats", "]", ".", "apply", "(", "lambda", "x", ":", "x", ".", "astype", "(", "'category'", ")", ")", "if", "fixed", "is", "not", "None", ":", "if", "'~'", "in", "fixed", ":", "# check to see if formula is using the 'y[event] ~ x' syntax", "# (for bernoulli models). If so, chop it into groups:", "# 1 = 'y[event]', 2 = 'y', 3 = 'event', 4 = 'x'", "# If this syntax is not being used, event = None", "event", "=", "re", ".", "match", "(", "r'^((\\S+)\\[(\\S+)\\])\\s*~(.*)$'", ",", "fixed", ")", "if", "event", "is", "not", "None", ":", "fixed", "=", "'{}~{}'", ".", "format", "(", "event", ".", "group", "(", "2", ")", ",", "event", ".", "group", "(", "4", ")", ")", "y", ",", "X", "=", "dmatrices", "(", "fixed", ",", "data", "=", "data", ",", "NA_action", "=", "'raise'", ")", "y_label", "=", "y", ".", "design_info", ".", "term_names", "[", "0", "]", "if", "event", "is", "not", "None", ":", "# pass in new Y data that has 1 if y=event and 0 otherwise", "y_data", "=", "y", "[", ":", ",", "y", ".", "design_info", ".", "column_names", ".", "index", "(", "event", ".", "group", "(", "1", ")", ")", "]", "y_data", "=", "pd", ".", "DataFrame", "(", "{", "event", ".", "group", "(", "3", ")", ":", "y_data", "}", ")", "self", ".", "_add_y", "(", "y_label", ",", "family", "=", "family", ",", "link", "=", "link", ",", "data", "=", "y_data", ")", "else", ":", "# use Y as-is", "self", ".", "_add_y", "(", "y_label", ",", "family", "=", "family", ",", "link", "=", "link", ")", "else", ":", "X", "=", "dmatrix", "(", "fixed", ",", "data", "=", "data", ",", "NA_action", "=", "'raise'", ")", "# Loop over predictor terms", "for", "_name", ",", "_slice", "in", "X", ".", "design_info", ".", "term_name_slices", ".", "items", "(", ")", ":", "cols", "=", "X", ".", "design_info", ".", "column_names", "[", "_slice", "]", "term_data", "=", "pd", ".", "DataFrame", "(", "X", "[", ":", ",", "_slice", "]", ",", "columns", "=", "cols", ")", "prior", "=", "priors", ".", "pop", "(", "_name", ",", "priors", ".", "get", "(", "'fixed'", ",", "None", ")", ")", "self", ".", "terms", "[", "_name", "]", "=", "Term", "(", "_name", ",", "term_data", ",", "prior", "=", "prior", ")", "# Random effects", "if", "random", "is", "not", "None", ":", "random", "=", "listify", "(", "random", ")", "for", "f", "in", "random", ":", "f", "=", "f", ".", "strip", "(", ")", "# Split specification into intercept, predictor, and grouper", "patt", "=", "r'^([01]+)*[\\s\\+]*([^\\|]+)*\\|(.*)'", "intcpt", ",", "pred", ",", "grpr", "=", "re", ".", "search", "(", "patt", ",", "f", ")", ".", "groups", "(", ")", "label", "=", "'{}|{}'", ".", "format", "(", "pred", ",", "grpr", ")", "if", "pred", "else", "grpr", "prior", "=", "priors", ".", "pop", "(", "label", ",", "priors", ".", "get", "(", "'random'", ",", "None", ")", ")", "# Treat all grouping variables as categoricals, regardless of", "# their dtype and what the user may have specified in the", "# 'categorical' argument.", "var_names", "=", "re", ".", "findall", "(", "'(\\w+)'", ",", "grpr", ")", "for", "v", "in", "var_names", ":", "if", "v", "in", "data", ".", "columns", ":", "data", ".", "loc", "[", ":", ",", "v", "]", "=", "data", ".", "loc", "[", ":", ",", "v", "]", ".", "astype", "(", "'category'", ")", "self", ".", "clean_data", ".", "loc", "[", ":", ",", "v", "]", "=", "data", ".", "loc", "[", ":", ",", "v", "]", "# Default to including random intercepts", "intcpt", "=", "1", "if", "intcpt", "is", "None", "else", "int", "(", "intcpt", ")", "grpr_df", "=", "dmatrix", "(", "'0+%s'", "%", "grpr", ",", "data", ",", "return_type", "=", "'dataframe'", ",", "NA_action", "=", "'raise'", ")", "# If there's no predictor, we must be adding random intercepts", "if", "not", "pred", "and", "grpr", "not", "in", "self", ".", "terms", ":", "name", "=", "'1|'", "+", "grpr", "pred", "=", "np", ".", "ones", "(", "(", "len", "(", "grpr_df", ")", ",", "1", ")", ")", "term", "=", "RandomTerm", "(", "name", ",", "grpr_df", ",", "pred", ",", "grpr_df", ".", "values", ",", "categorical", "=", "True", ",", "prior", "=", "prior", ")", "self", ".", "terms", "[", "name", "]", "=", "term", "else", ":", "pred_df", "=", "dmatrix", "(", "'%s+%s'", "%", "(", "intcpt", ",", "pred", ")", ",", "data", ",", "return_type", "=", "'dataframe'", ",", "NA_action", "=", "'raise'", ")", "# determine value of the 'constant' attribute", "const", "=", "np", ".", "atleast_2d", "(", "pred_df", ".", "T", ")", ".", "T", ".", "sum", "(", "1", ")", ".", "var", "(", ")", "==", "0", "for", "col", ",", "i", "in", "pred_df", ".", "design_info", ".", "column_name_indexes", ".", "items", "(", ")", ":", "pred_data", "=", "pred_df", ".", "iloc", "[", ":", ",", "i", "]", "lev_data", "=", "grpr_df", ".", "multiply", "(", "pred_data", ",", "axis", "=", "0", ")", "# Also rename intercepts and skip if already added.", "# This can happen if user specifies something like", "# random=['1|school', 'student|school'].", "if", "col", "==", "'Intercept'", ":", "if", "grpr", "in", "self", ".", "terms", ":", "continue", "label", "=", "'1|%s'", "%", "grpr", "else", ":", "label", "=", "col", "+", "'|'", "+", "grpr", "prior", "=", "priors", ".", "pop", "(", "label", ",", "priors", ".", "get", "(", "'random'", ",", "None", ")", ")", "# Categorical or continuous is determined from data", "ld", "=", "lev_data", ".", "values", "if", "(", "(", "ld", "==", "0", ")", "|", "(", "ld", "==", "1", ")", ")", ".", "all", "(", ")", ":", "lev_data", "=", "lev_data", ".", "astype", "(", "int", ")", "cat", "=", "True", "else", ":", "cat", "=", "False", "pred_data", "=", "pred_data", "[", ":", ",", "None", "]", "# Must be 2D later", "term", "=", "RandomTerm", "(", "label", ",", "lev_data", ",", "pred_data", ",", "grpr_df", ".", "values", ",", "categorical", "=", "cat", ",", "constant", "=", "const", "if", "const", "else", "None", ",", "prior", "=", "prior", ")", "self", ".", "terms", "[", "label", "]", "=", "term" ]
Internal version of add(), with the same arguments. Runs during Model.build()
[ "Internal", "version", "of", "add", "()", "with", "the", "same", "arguments", "." ]
python
train
CamDavidsonPilon/lifelines
lifelines/utils/__init__.py
https://github.com/CamDavidsonPilon/lifelines/blob/bdf6be6f1d10eea4c46365ee0ee6a47d8c30edf8/lifelines/utils/__init__.py#L737-L770
def _additive_estimate(events, timeline, _additive_f, _additive_var, reverse): """ Called to compute the Kaplan Meier and Nelson-Aalen estimates. """ if reverse: events = events.sort_index(ascending=False) at_risk = events["entrance"].sum() - events["removed"].cumsum().shift(1).fillna(0) deaths = events["observed"] estimate_ = np.cumsum(_additive_f(at_risk, deaths)).sort_index().shift(-1).fillna(0) var_ = np.cumsum(_additive_var(at_risk, deaths)).sort_index().shift(-1).fillna(0) else: deaths = events["observed"] # Why subtract entrants like this? see https://github.com/CamDavidsonPilon/lifelines/issues/497 # specifically, we kill people, compute the ratio, and then "add" the entrants. This means that # the population should not have the late entrants. The only exception to this rule # is the first period, where entrants happen _prior_ to deaths. entrances = events["entrance"].copy() entrances.iloc[0] = 0 population = events["at_risk"] - entrances estimate_ = np.cumsum(_additive_f(population, deaths)) var_ = np.cumsum(_additive_var(population, deaths)) timeline = sorted(timeline) estimate_ = estimate_.reindex(timeline, method="pad").fillna(0) var_ = var_.reindex(timeline, method="pad") var_.index.name = "timeline" estimate_.index.name = "timeline" return estimate_, var_
[ "def", "_additive_estimate", "(", "events", ",", "timeline", ",", "_additive_f", ",", "_additive_var", ",", "reverse", ")", ":", "if", "reverse", ":", "events", "=", "events", ".", "sort_index", "(", "ascending", "=", "False", ")", "at_risk", "=", "events", "[", "\"entrance\"", "]", ".", "sum", "(", ")", "-", "events", "[", "\"removed\"", "]", ".", "cumsum", "(", ")", ".", "shift", "(", "1", ")", ".", "fillna", "(", "0", ")", "deaths", "=", "events", "[", "\"observed\"", "]", "estimate_", "=", "np", ".", "cumsum", "(", "_additive_f", "(", "at_risk", ",", "deaths", ")", ")", ".", "sort_index", "(", ")", ".", "shift", "(", "-", "1", ")", ".", "fillna", "(", "0", ")", "var_", "=", "np", ".", "cumsum", "(", "_additive_var", "(", "at_risk", ",", "deaths", ")", ")", ".", "sort_index", "(", ")", ".", "shift", "(", "-", "1", ")", ".", "fillna", "(", "0", ")", "else", ":", "deaths", "=", "events", "[", "\"observed\"", "]", "# Why subtract entrants like this? see https://github.com/CamDavidsonPilon/lifelines/issues/497", "# specifically, we kill people, compute the ratio, and then \"add\" the entrants. This means that", "# the population should not have the late entrants. The only exception to this rule", "# is the first period, where entrants happen _prior_ to deaths.", "entrances", "=", "events", "[", "\"entrance\"", "]", ".", "copy", "(", ")", "entrances", ".", "iloc", "[", "0", "]", "=", "0", "population", "=", "events", "[", "\"at_risk\"", "]", "-", "entrances", "estimate_", "=", "np", ".", "cumsum", "(", "_additive_f", "(", "population", ",", "deaths", ")", ")", "var_", "=", "np", ".", "cumsum", "(", "_additive_var", "(", "population", ",", "deaths", ")", ")", "timeline", "=", "sorted", "(", "timeline", ")", "estimate_", "=", "estimate_", ".", "reindex", "(", "timeline", ",", "method", "=", "\"pad\"", ")", ".", "fillna", "(", "0", ")", "var_", "=", "var_", ".", "reindex", "(", "timeline", ",", "method", "=", "\"pad\"", ")", "var_", ".", "index", ".", "name", "=", "\"timeline\"", "estimate_", ".", "index", ".", "name", "=", "\"timeline\"", "return", "estimate_", ",", "var_" ]
Called to compute the Kaplan Meier and Nelson-Aalen estimates.
[ "Called", "to", "compute", "the", "Kaplan", "Meier", "and", "Nelson", "-", "Aalen", "estimates", "." ]
python
train
gmr/rejected
rejected/mcp.py
https://github.com/gmr/rejected/blob/610a3e1401122ecb98d891b6795cca0255e5b044/rejected/mcp.py#L305-L328
def kill_processes(self): """Gets called on shutdown by the timer when too much time has gone by, calling the terminate method instead of nicely asking for the consumers to stop. """ LOGGER.critical('Max shutdown exceeded, forcibly exiting') processes = self.active_processes(False) while processes: for proc in self.active_processes(False): if int(proc.pid) != int(os.getpid()): LOGGER.warning('Killing %s (%s)', proc.name, proc.pid) try: os.kill(int(proc.pid), signal.SIGKILL) except OSError: pass else: LOGGER.warning('Cowardly refusing kill self (%s, %s)', proc.pid, os.getpid()) time.sleep(0.5) processes = self.active_processes(False) LOGGER.info('Killed all children') return self.set_state(self.STATE_STOPPED)
[ "def", "kill_processes", "(", "self", ")", ":", "LOGGER", ".", "critical", "(", "'Max shutdown exceeded, forcibly exiting'", ")", "processes", "=", "self", ".", "active_processes", "(", "False", ")", "while", "processes", ":", "for", "proc", "in", "self", ".", "active_processes", "(", "False", ")", ":", "if", "int", "(", "proc", ".", "pid", ")", "!=", "int", "(", "os", ".", "getpid", "(", ")", ")", ":", "LOGGER", ".", "warning", "(", "'Killing %s (%s)'", ",", "proc", ".", "name", ",", "proc", ".", "pid", ")", "try", ":", "os", ".", "kill", "(", "int", "(", "proc", ".", "pid", ")", ",", "signal", ".", "SIGKILL", ")", "except", "OSError", ":", "pass", "else", ":", "LOGGER", ".", "warning", "(", "'Cowardly refusing kill self (%s, %s)'", ",", "proc", ".", "pid", ",", "os", ".", "getpid", "(", ")", ")", "time", ".", "sleep", "(", "0.5", ")", "processes", "=", "self", ".", "active_processes", "(", "False", ")", "LOGGER", ".", "info", "(", "'Killed all children'", ")", "return", "self", ".", "set_state", "(", "self", ".", "STATE_STOPPED", ")" ]
Gets called on shutdown by the timer when too much time has gone by, calling the terminate method instead of nicely asking for the consumers to stop.
[ "Gets", "called", "on", "shutdown", "by", "the", "timer", "when", "too", "much", "time", "has", "gone", "by", "calling", "the", "terminate", "method", "instead", "of", "nicely", "asking", "for", "the", "consumers", "to", "stop", "." ]
python
train
lxc/python2-lxc
lxc/__init__.py
https://github.com/lxc/python2-lxc/blob/b7ec757d2bea1e5787c3e65b1359b8893491ef90/lxc/__init__.py#L452-L465
def attach_run_command(cmd): """ Run a command when attaching Please do not call directly, this will execvp the command. This is to be used in conjunction with the attach method of a container. """ if isinstance(cmd, tuple): return _lxc.attach_run_command(cmd) elif isinstance(cmd, list): return _lxc.attach_run_command((cmd[0], cmd)) else: return _lxc.attach_run_command((cmd, [cmd]))
[ "def", "attach_run_command", "(", "cmd", ")", ":", "if", "isinstance", "(", "cmd", ",", "tuple", ")", ":", "return", "_lxc", ".", "attach_run_command", "(", "cmd", ")", "elif", "isinstance", "(", "cmd", ",", "list", ")", ":", "return", "_lxc", ".", "attach_run_command", "(", "(", "cmd", "[", "0", "]", ",", "cmd", ")", ")", "else", ":", "return", "_lxc", ".", "attach_run_command", "(", "(", "cmd", ",", "[", "cmd", "]", ")", ")" ]
Run a command when attaching Please do not call directly, this will execvp the command. This is to be used in conjunction with the attach method of a container.
[ "Run", "a", "command", "when", "attaching" ]
python
train
SpikeInterface/spikeextractors
spikeextractors/extractors/biocamrecordingextractor/biocamrecordingextractor.py
https://github.com/SpikeInterface/spikeextractors/blob/cbe3b8778a215f0bbd743af8b306856a87e438e1/spikeextractors/extractors/biocamrecordingextractor/biocamrecordingextractor.py#L94-L143
def openBiocamFile(filename, verbose=False): """Open a Biocam hdf5 file, read and return the recording info, pick te correct method to access raw data, and return this to the caller.""" rf = h5py.File(filename, 'r') # Read recording variables recVars = rf.require_group('3BRecInfo/3BRecVars/') # bitDepth = recVars['BitDepth'].value[0] # maxV = recVars['MaxVolt'].value[0] # minV = recVars['MinVolt'].value[0] nFrames = recVars['NRecFrames'][0] samplingRate = recVars['SamplingRate'][0] signalInv = recVars['SignalInversion'][0] # Read chip variables chipVars = rf.require_group('3BRecInfo/3BMeaChip/') nCols = chipVars['NCols'][0] # Get the actual number of channels used in the recording file_format = rf['3BData'].attrs.get('Version') if file_format == 100: nRecCh = len(rf['3BData/Raw'][0]) elif file_format == 101: nRecCh = int(1. * rf['3BData/Raw'].shape[0] / nFrames) else: raise Exception('Unknown data file format.') if verbose: print('# 3Brain data format:', file_format, 'signal inversion', signalInv) print('# signal range: ', recVars['MinVolt'][0], '- ', recVars['MaxVolt'][0]) print('# channels: ', nRecCh) print('# frames: ', nFrames) print('# sampling rate: ', samplingRate) # get channel locations r = rf['3BRecInfo/3BMeaStreams/Raw/Chs'][()]['Row'] c = rf['3BRecInfo/3BMeaStreams/Raw/Chs'][()]['Col'] rawIndices = np.vstack((r, c)).T # assign channel numbers chIndices = np.array([(x - 1) + (y - 1) * nCols for (y, x) in rawIndices]) # determine correct function to read data if verbose: print("# Signal inversion looks like " + str(signalInv) + ", guessing correct method for data access.") print("# If your results look wrong, signal polarity is may be wrong.") if file_format == 100: if signalInv == -1: read_function = readHDF5t_100 else: read_function = readHDF5t_100_i else: if signalInv == -1: read_function = readHDF5t_101_i else: read_function = readHDF5t_101 return (rf, nFrames, samplingRate, nRecCh, chIndices, file_format, signalInv, rawIndices, read_function)
[ "def", "openBiocamFile", "(", "filename", ",", "verbose", "=", "False", ")", ":", "rf", "=", "h5py", ".", "File", "(", "filename", ",", "'r'", ")", "# Read recording variables", "recVars", "=", "rf", ".", "require_group", "(", "'3BRecInfo/3BRecVars/'", ")", "# bitDepth = recVars['BitDepth'].value[0]", "# maxV = recVars['MaxVolt'].value[0]", "# minV = recVars['MinVolt'].value[0]", "nFrames", "=", "recVars", "[", "'NRecFrames'", "]", "[", "0", "]", "samplingRate", "=", "recVars", "[", "'SamplingRate'", "]", "[", "0", "]", "signalInv", "=", "recVars", "[", "'SignalInversion'", "]", "[", "0", "]", "# Read chip variables", "chipVars", "=", "rf", ".", "require_group", "(", "'3BRecInfo/3BMeaChip/'", ")", "nCols", "=", "chipVars", "[", "'NCols'", "]", "[", "0", "]", "# Get the actual number of channels used in the recording", "file_format", "=", "rf", "[", "'3BData'", "]", ".", "attrs", ".", "get", "(", "'Version'", ")", "if", "file_format", "==", "100", ":", "nRecCh", "=", "len", "(", "rf", "[", "'3BData/Raw'", "]", "[", "0", "]", ")", "elif", "file_format", "==", "101", ":", "nRecCh", "=", "int", "(", "1.", "*", "rf", "[", "'3BData/Raw'", "]", ".", "shape", "[", "0", "]", "/", "nFrames", ")", "else", ":", "raise", "Exception", "(", "'Unknown data file format.'", ")", "if", "verbose", ":", "print", "(", "'# 3Brain data format:'", ",", "file_format", ",", "'signal inversion'", ",", "signalInv", ")", "print", "(", "'# signal range: '", ",", "recVars", "[", "'MinVolt'", "]", "[", "0", "]", ",", "'- '", ",", "recVars", "[", "'MaxVolt'", "]", "[", "0", "]", ")", "print", "(", "'# channels: '", ",", "nRecCh", ")", "print", "(", "'# frames: '", ",", "nFrames", ")", "print", "(", "'# sampling rate: '", ",", "samplingRate", ")", "# get channel locations", "r", "=", "rf", "[", "'3BRecInfo/3BMeaStreams/Raw/Chs'", "]", "[", "(", ")", "]", "[", "'Row'", "]", "c", "=", "rf", "[", "'3BRecInfo/3BMeaStreams/Raw/Chs'", "]", "[", "(", ")", "]", "[", "'Col'", "]", "rawIndices", "=", "np", ".", "vstack", "(", "(", "r", ",", "c", ")", ")", ".", "T", "# assign channel numbers", "chIndices", "=", "np", ".", "array", "(", "[", "(", "x", "-", "1", ")", "+", "(", "y", "-", "1", ")", "*", "nCols", "for", "(", "y", ",", "x", ")", "in", "rawIndices", "]", ")", "# determine correct function to read data", "if", "verbose", ":", "print", "(", "\"# Signal inversion looks like \"", "+", "str", "(", "signalInv", ")", "+", "\", guessing correct method for data access.\"", ")", "print", "(", "\"# If your results look wrong, signal polarity is may be wrong.\"", ")", "if", "file_format", "==", "100", ":", "if", "signalInv", "==", "-", "1", ":", "read_function", "=", "readHDF5t_100", "else", ":", "read_function", "=", "readHDF5t_100_i", "else", ":", "if", "signalInv", "==", "-", "1", ":", "read_function", "=", "readHDF5t_101_i", "else", ":", "read_function", "=", "readHDF5t_101", "return", "(", "rf", ",", "nFrames", ",", "samplingRate", ",", "nRecCh", ",", "chIndices", ",", "file_format", ",", "signalInv", ",", "rawIndices", ",", "read_function", ")" ]
Open a Biocam hdf5 file, read and return the recording info, pick te correct method to access raw data, and return this to the caller.
[ "Open", "a", "Biocam", "hdf5", "file", "read", "and", "return", "the", "recording", "info", "pick", "te", "correct", "method", "to", "access", "raw", "data", "and", "return", "this", "to", "the", "caller", "." ]
python
train
silver-castle/mach9
mach9/response.py
https://github.com/silver-castle/mach9/blob/7a623aab3c70d89d36ade6901b6307e115400c5e/mach9/response.py#L325-L335
def raw(body, status=200, headers=None, content_type='application/octet-stream'): ''' Returns response object without encoding the body. :param body: Response data. :param status: Response code. :param headers: Custom Headers. :param content_type: the content type (string) of the response. ''' return HTTPResponse(body_bytes=body, status=status, headers=headers, content_type=content_type)
[ "def", "raw", "(", "body", ",", "status", "=", "200", ",", "headers", "=", "None", ",", "content_type", "=", "'application/octet-stream'", ")", ":", "return", "HTTPResponse", "(", "body_bytes", "=", "body", ",", "status", "=", "status", ",", "headers", "=", "headers", ",", "content_type", "=", "content_type", ")" ]
Returns response object without encoding the body. :param body: Response data. :param status: Response code. :param headers: Custom Headers. :param content_type: the content type (string) of the response.
[ "Returns", "response", "object", "without", "encoding", "the", "body", ".", ":", "param", "body", ":", "Response", "data", ".", ":", "param", "status", ":", "Response", "code", ".", ":", "param", "headers", ":", "Custom", "Headers", ".", ":", "param", "content_type", ":", "the", "content", "type", "(", "string", ")", "of", "the", "response", "." ]
python
train
onelogin/python-saml
src/onelogin/saml2/settings.py
https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/settings.py#L389-L495
def check_sp_settings(self, settings): """ Checks the SP settings info. :param settings: Dict with settings data :type settings: dict :returns: Errors found on the SP settings data :rtype: list """ assert isinstance(settings, dict) errors = [] if not isinstance(settings, dict) or not settings: errors.append('invalid_syntax') else: if not settings.get('sp'): errors.append('sp_not_found') else: # check_sp_certs uses self.__sp so I add it old_sp = self.__sp self.__sp = settings['sp'] sp = settings['sp'] security = settings.get('security', {}) if not sp.get('entityId'): errors.append('sp_entityId_not_found') if not sp.get('assertionConsumerService', {}).get('url'): errors.append('sp_acs_not_found') elif not validate_url(sp['assertionConsumerService']['url']): errors.append('sp_acs_url_invalid') if sp.get('attributeConsumingService'): attributeConsumingService = sp['attributeConsumingService'] if 'serviceName' not in attributeConsumingService: errors.append('sp_attributeConsumingService_serviceName_not_found') elif not isinstance(attributeConsumingService['serviceName'], basestring): errors.append('sp_attributeConsumingService_serviceName_type_invalid') if 'requestedAttributes' not in attributeConsumingService: errors.append('sp_attributeConsumingService_requestedAttributes_not_found') elif not isinstance(attributeConsumingService['requestedAttributes'], list): errors.append('sp_attributeConsumingService_serviceName_type_invalid') else: for req_attrib in attributeConsumingService['requestedAttributes']: if 'name' not in req_attrib: errors.append('sp_attributeConsumingService_requestedAttributes_name_not_found') if 'name' in req_attrib and not req_attrib['name'].strip(): errors.append('sp_attributeConsumingService_requestedAttributes_name_invalid') if 'attributeValue' in req_attrib and type(req_attrib['attributeValue']) != list: errors.append('sp_attributeConsumingService_requestedAttributes_attributeValue_type_invalid') if 'isRequired' in req_attrib and type(req_attrib['isRequired']) != bool: errors.append('sp_attributeConsumingService_requestedAttributes_isRequired_type_invalid') if "serviceDescription" in attributeConsumingService and not isinstance(attributeConsumingService['serviceDescription'], basestring): errors.append('sp_attributeConsumingService_serviceDescription_type_invalid') slo_url = sp.get('singleLogoutService', {}).get('url') if slo_url and not validate_url(slo_url): errors.append('sp_sls_url_invalid') if 'signMetadata' in security and isinstance(security['signMetadata'], dict): if 'keyFileName' not in security['signMetadata'] or \ 'certFileName' not in security['signMetadata']: errors.append('sp_signMetadata_invalid') authn_sign = bool(security.get('authnRequestsSigned')) logout_req_sign = bool(security.get('logoutRequestSigned')) logout_res_sign = bool(security.get('logoutResponseSigned')) want_assert_enc = bool(security.get('wantAssertionsEncrypted')) want_nameid_enc = bool(security.get('wantNameIdEncrypted')) if not self.check_sp_certs(): if authn_sign or logout_req_sign or logout_res_sign or \ want_assert_enc or want_nameid_enc: errors.append('sp_cert_not_found_and_required') if 'contactPerson' in settings: types = settings['contactPerson'].keys() valid_types = ['technical', 'support', 'administrative', 'billing', 'other'] for c_type in types: if c_type not in valid_types: errors.append('contact_type_invalid') break for c_type in settings['contactPerson']: contact = settings['contactPerson'][c_type] if ('givenName' not in contact or len(contact['givenName']) == 0) or \ ('emailAddress' not in contact or len(contact['emailAddress']) == 0): errors.append('contact_not_enought_data') break if 'organization' in settings: for org in settings['organization']: organization = settings['organization'][org] if ('name' not in organization or len(organization['name']) == 0) or \ ('displayname' not in organization or len(organization['displayname']) == 0) or \ ('url' not in organization or len(organization['url']) == 0): errors.append('organization_not_enought_data') break # Restores the value that had the self.__sp if 'old_sp' in locals(): self.__sp = old_sp return errors
[ "def", "check_sp_settings", "(", "self", ",", "settings", ")", ":", "assert", "isinstance", "(", "settings", ",", "dict", ")", "errors", "=", "[", "]", "if", "not", "isinstance", "(", "settings", ",", "dict", ")", "or", "not", "settings", ":", "errors", ".", "append", "(", "'invalid_syntax'", ")", "else", ":", "if", "not", "settings", ".", "get", "(", "'sp'", ")", ":", "errors", ".", "append", "(", "'sp_not_found'", ")", "else", ":", "# check_sp_certs uses self.__sp so I add it", "old_sp", "=", "self", ".", "__sp", "self", ".", "__sp", "=", "settings", "[", "'sp'", "]", "sp", "=", "settings", "[", "'sp'", "]", "security", "=", "settings", ".", "get", "(", "'security'", ",", "{", "}", ")", "if", "not", "sp", ".", "get", "(", "'entityId'", ")", ":", "errors", ".", "append", "(", "'sp_entityId_not_found'", ")", "if", "not", "sp", ".", "get", "(", "'assertionConsumerService'", ",", "{", "}", ")", ".", "get", "(", "'url'", ")", ":", "errors", ".", "append", "(", "'sp_acs_not_found'", ")", "elif", "not", "validate_url", "(", "sp", "[", "'assertionConsumerService'", "]", "[", "'url'", "]", ")", ":", "errors", ".", "append", "(", "'sp_acs_url_invalid'", ")", "if", "sp", ".", "get", "(", "'attributeConsumingService'", ")", ":", "attributeConsumingService", "=", "sp", "[", "'attributeConsumingService'", "]", "if", "'serviceName'", "not", "in", "attributeConsumingService", ":", "errors", ".", "append", "(", "'sp_attributeConsumingService_serviceName_not_found'", ")", "elif", "not", "isinstance", "(", "attributeConsumingService", "[", "'serviceName'", "]", ",", "basestring", ")", ":", "errors", ".", "append", "(", "'sp_attributeConsumingService_serviceName_type_invalid'", ")", "if", "'requestedAttributes'", "not", "in", "attributeConsumingService", ":", "errors", ".", "append", "(", "'sp_attributeConsumingService_requestedAttributes_not_found'", ")", "elif", "not", "isinstance", "(", "attributeConsumingService", "[", "'requestedAttributes'", "]", ",", "list", ")", ":", "errors", ".", "append", "(", "'sp_attributeConsumingService_serviceName_type_invalid'", ")", "else", ":", "for", "req_attrib", "in", "attributeConsumingService", "[", "'requestedAttributes'", "]", ":", "if", "'name'", "not", "in", "req_attrib", ":", "errors", ".", "append", "(", "'sp_attributeConsumingService_requestedAttributes_name_not_found'", ")", "if", "'name'", "in", "req_attrib", "and", "not", "req_attrib", "[", "'name'", "]", ".", "strip", "(", ")", ":", "errors", ".", "append", "(", "'sp_attributeConsumingService_requestedAttributes_name_invalid'", ")", "if", "'attributeValue'", "in", "req_attrib", "and", "type", "(", "req_attrib", "[", "'attributeValue'", "]", ")", "!=", "list", ":", "errors", ".", "append", "(", "'sp_attributeConsumingService_requestedAttributes_attributeValue_type_invalid'", ")", "if", "'isRequired'", "in", "req_attrib", "and", "type", "(", "req_attrib", "[", "'isRequired'", "]", ")", "!=", "bool", ":", "errors", ".", "append", "(", "'sp_attributeConsumingService_requestedAttributes_isRequired_type_invalid'", ")", "if", "\"serviceDescription\"", "in", "attributeConsumingService", "and", "not", "isinstance", "(", "attributeConsumingService", "[", "'serviceDescription'", "]", ",", "basestring", ")", ":", "errors", ".", "append", "(", "'sp_attributeConsumingService_serviceDescription_type_invalid'", ")", "slo_url", "=", "sp", ".", "get", "(", "'singleLogoutService'", ",", "{", "}", ")", ".", "get", "(", "'url'", ")", "if", "slo_url", "and", "not", "validate_url", "(", "slo_url", ")", ":", "errors", ".", "append", "(", "'sp_sls_url_invalid'", ")", "if", "'signMetadata'", "in", "security", "and", "isinstance", "(", "security", "[", "'signMetadata'", "]", ",", "dict", ")", ":", "if", "'keyFileName'", "not", "in", "security", "[", "'signMetadata'", "]", "or", "'certFileName'", "not", "in", "security", "[", "'signMetadata'", "]", ":", "errors", ".", "append", "(", "'sp_signMetadata_invalid'", ")", "authn_sign", "=", "bool", "(", "security", ".", "get", "(", "'authnRequestsSigned'", ")", ")", "logout_req_sign", "=", "bool", "(", "security", ".", "get", "(", "'logoutRequestSigned'", ")", ")", "logout_res_sign", "=", "bool", "(", "security", ".", "get", "(", "'logoutResponseSigned'", ")", ")", "want_assert_enc", "=", "bool", "(", "security", ".", "get", "(", "'wantAssertionsEncrypted'", ")", ")", "want_nameid_enc", "=", "bool", "(", "security", ".", "get", "(", "'wantNameIdEncrypted'", ")", ")", "if", "not", "self", ".", "check_sp_certs", "(", ")", ":", "if", "authn_sign", "or", "logout_req_sign", "or", "logout_res_sign", "or", "want_assert_enc", "or", "want_nameid_enc", ":", "errors", ".", "append", "(", "'sp_cert_not_found_and_required'", ")", "if", "'contactPerson'", "in", "settings", ":", "types", "=", "settings", "[", "'contactPerson'", "]", ".", "keys", "(", ")", "valid_types", "=", "[", "'technical'", ",", "'support'", ",", "'administrative'", ",", "'billing'", ",", "'other'", "]", "for", "c_type", "in", "types", ":", "if", "c_type", "not", "in", "valid_types", ":", "errors", ".", "append", "(", "'contact_type_invalid'", ")", "break", "for", "c_type", "in", "settings", "[", "'contactPerson'", "]", ":", "contact", "=", "settings", "[", "'contactPerson'", "]", "[", "c_type", "]", "if", "(", "'givenName'", "not", "in", "contact", "or", "len", "(", "contact", "[", "'givenName'", "]", ")", "==", "0", ")", "or", "(", "'emailAddress'", "not", "in", "contact", "or", "len", "(", "contact", "[", "'emailAddress'", "]", ")", "==", "0", ")", ":", "errors", ".", "append", "(", "'contact_not_enought_data'", ")", "break", "if", "'organization'", "in", "settings", ":", "for", "org", "in", "settings", "[", "'organization'", "]", ":", "organization", "=", "settings", "[", "'organization'", "]", "[", "org", "]", "if", "(", "'name'", "not", "in", "organization", "or", "len", "(", "organization", "[", "'name'", "]", ")", "==", "0", ")", "or", "(", "'displayname'", "not", "in", "organization", "or", "len", "(", "organization", "[", "'displayname'", "]", ")", "==", "0", ")", "or", "(", "'url'", "not", "in", "organization", "or", "len", "(", "organization", "[", "'url'", "]", ")", "==", "0", ")", ":", "errors", ".", "append", "(", "'organization_not_enought_data'", ")", "break", "# Restores the value that had the self.__sp", "if", "'old_sp'", "in", "locals", "(", ")", ":", "self", ".", "__sp", "=", "old_sp", "return", "errors" ]
Checks the SP settings info. :param settings: Dict with settings data :type settings: dict :returns: Errors found on the SP settings data :rtype: list
[ "Checks", "the", "SP", "settings", "info", "." ]
python
train
astropy/photutils
photutils/isophote/geometry.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/isophote/geometry.py#L275-L344
def initialize_sector_geometry(self, phi): """ Initialize geometry attributes associated with an elliptical sector at the given polar angle ``phi``. This function computes: * the four vertices that define the elliptical sector on the pixel array. * the sector area (saved in the ``sector_area`` attribute) * the sector angular width (saved in ``sector_angular_width`` attribute) Parameters ---------- phi : float The polar angle (radians) where the sector is located. Returns ------- x, y : 1D `~numpy.ndarray` The x and y coordinates of each vertex as 1D arrays. """ # These polar radii bound the region between the inner # and outer ellipses that define the sector. sma1, sma2 = self.bounding_ellipses() eps_ = 1. - self.eps # polar vector at one side of the elliptical sector self._phi1 = phi - self.sector_angular_width / 2. r1 = (sma1 * eps_ / math.sqrt((eps_ * math.cos(self._phi1))**2 + (math.sin(self._phi1))**2)) r2 = (sma2 * eps_ / math.sqrt((eps_ * math.cos(self._phi1))**2 + (math.sin(self._phi1))**2)) # polar vector at the other side of the elliptical sector self._phi2 = phi + self.sector_angular_width / 2. r3 = (sma2 * eps_ / math.sqrt((eps_ * math.cos(self._phi2))**2 + (math.sin(self._phi2))**2)) r4 = (sma1 * eps_ / math.sqrt((eps_ * math.cos(self._phi2))**2 + (math.sin(self._phi2))**2)) # sector area sa1 = _area(sma1, self.eps, self._phi1, r1) sa2 = _area(sma2, self.eps, self._phi1, r2) sa3 = _area(sma2, self.eps, self._phi2, r3) sa4 = _area(sma1, self.eps, self._phi2, r4) self.sector_area = abs((sa3 - sa2) - (sa4 - sa1)) # angular width of sector. It is calculated such that the sectors # come out with roughly constant area along the ellipse. self.sector_angular_width = max(min((self._area_factor / (r3 - r4) / r4), self._phi_max), self._phi_min) # compute the 4 vertices that define the elliptical sector. vertex_x = np.zeros(shape=4, dtype=float) vertex_y = np.zeros(shape=4, dtype=float) # vertices are labelled in counterclockwise sequence vertex_x[0:2] = np.array([r1, r2]) * math.cos(self._phi1 + self.pa) vertex_x[2:4] = np.array([r4, r3]) * math.cos(self._phi2 + self.pa) vertex_y[0:2] = np.array([r1, r2]) * math.sin(self._phi1 + self.pa) vertex_y[2:4] = np.array([r4, r3]) * math.sin(self._phi2 + self.pa) vertex_x += self.x0 vertex_y += self.y0 return vertex_x, vertex_y
[ "def", "initialize_sector_geometry", "(", "self", ",", "phi", ")", ":", "# These polar radii bound the region between the inner", "# and outer ellipses that define the sector.", "sma1", ",", "sma2", "=", "self", ".", "bounding_ellipses", "(", ")", "eps_", "=", "1.", "-", "self", ".", "eps", "# polar vector at one side of the elliptical sector", "self", ".", "_phi1", "=", "phi", "-", "self", ".", "sector_angular_width", "/", "2.", "r1", "=", "(", "sma1", "*", "eps_", "/", "math", ".", "sqrt", "(", "(", "eps_", "*", "math", ".", "cos", "(", "self", ".", "_phi1", ")", ")", "**", "2", "+", "(", "math", ".", "sin", "(", "self", ".", "_phi1", ")", ")", "**", "2", ")", ")", "r2", "=", "(", "sma2", "*", "eps_", "/", "math", ".", "sqrt", "(", "(", "eps_", "*", "math", ".", "cos", "(", "self", ".", "_phi1", ")", ")", "**", "2", "+", "(", "math", ".", "sin", "(", "self", ".", "_phi1", ")", ")", "**", "2", ")", ")", "# polar vector at the other side of the elliptical sector", "self", ".", "_phi2", "=", "phi", "+", "self", ".", "sector_angular_width", "/", "2.", "r3", "=", "(", "sma2", "*", "eps_", "/", "math", ".", "sqrt", "(", "(", "eps_", "*", "math", ".", "cos", "(", "self", ".", "_phi2", ")", ")", "**", "2", "+", "(", "math", ".", "sin", "(", "self", ".", "_phi2", ")", ")", "**", "2", ")", ")", "r4", "=", "(", "sma1", "*", "eps_", "/", "math", ".", "sqrt", "(", "(", "eps_", "*", "math", ".", "cos", "(", "self", ".", "_phi2", ")", ")", "**", "2", "+", "(", "math", ".", "sin", "(", "self", ".", "_phi2", ")", ")", "**", "2", ")", ")", "# sector area", "sa1", "=", "_area", "(", "sma1", ",", "self", ".", "eps", ",", "self", ".", "_phi1", ",", "r1", ")", "sa2", "=", "_area", "(", "sma2", ",", "self", ".", "eps", ",", "self", ".", "_phi1", ",", "r2", ")", "sa3", "=", "_area", "(", "sma2", ",", "self", ".", "eps", ",", "self", ".", "_phi2", ",", "r3", ")", "sa4", "=", "_area", "(", "sma1", ",", "self", ".", "eps", ",", "self", ".", "_phi2", ",", "r4", ")", "self", ".", "sector_area", "=", "abs", "(", "(", "sa3", "-", "sa2", ")", "-", "(", "sa4", "-", "sa1", ")", ")", "# angular width of sector. It is calculated such that the sectors", "# come out with roughly constant area along the ellipse.", "self", ".", "sector_angular_width", "=", "max", "(", "min", "(", "(", "self", ".", "_area_factor", "/", "(", "r3", "-", "r4", ")", "/", "r4", ")", ",", "self", ".", "_phi_max", ")", ",", "self", ".", "_phi_min", ")", "# compute the 4 vertices that define the elliptical sector.", "vertex_x", "=", "np", ".", "zeros", "(", "shape", "=", "4", ",", "dtype", "=", "float", ")", "vertex_y", "=", "np", ".", "zeros", "(", "shape", "=", "4", ",", "dtype", "=", "float", ")", "# vertices are labelled in counterclockwise sequence", "vertex_x", "[", "0", ":", "2", "]", "=", "np", ".", "array", "(", "[", "r1", ",", "r2", "]", ")", "*", "math", ".", "cos", "(", "self", ".", "_phi1", "+", "self", ".", "pa", ")", "vertex_x", "[", "2", ":", "4", "]", "=", "np", ".", "array", "(", "[", "r4", ",", "r3", "]", ")", "*", "math", ".", "cos", "(", "self", ".", "_phi2", "+", "self", ".", "pa", ")", "vertex_y", "[", "0", ":", "2", "]", "=", "np", ".", "array", "(", "[", "r1", ",", "r2", "]", ")", "*", "math", ".", "sin", "(", "self", ".", "_phi1", "+", "self", ".", "pa", ")", "vertex_y", "[", "2", ":", "4", "]", "=", "np", ".", "array", "(", "[", "r4", ",", "r3", "]", ")", "*", "math", ".", "sin", "(", "self", ".", "_phi2", "+", "self", ".", "pa", ")", "vertex_x", "+=", "self", ".", "x0", "vertex_y", "+=", "self", ".", "y0", "return", "vertex_x", ",", "vertex_y" ]
Initialize geometry attributes associated with an elliptical sector at the given polar angle ``phi``. This function computes: * the four vertices that define the elliptical sector on the pixel array. * the sector area (saved in the ``sector_area`` attribute) * the sector angular width (saved in ``sector_angular_width`` attribute) Parameters ---------- phi : float The polar angle (radians) where the sector is located. Returns ------- x, y : 1D `~numpy.ndarray` The x and y coordinates of each vertex as 1D arrays.
[ "Initialize", "geometry", "attributes", "associated", "with", "an", "elliptical", "sector", "at", "the", "given", "polar", "angle", "phi", "." ]
python
train
sighingnow/parsec.py
src/parsec/__init__.py
https://github.com/sighingnow/parsec.py/blob/ed50e1e259142757470b925f8d20dfe5ad223af0/src/parsec/__init__.py#L231-L243
def mark(self): '''Mark the line and column information of the result of this parser.''' def pos(text, index): return ParseError.loc_info(text, index) @Parser def mark_parser(text, index): res = self(text, index) if res.status: return Value.success(res.index, (pos(text, index), res.value, pos(text, res.index))) else: return res # failed. return mark_parser
[ "def", "mark", "(", "self", ")", ":", "def", "pos", "(", "text", ",", "index", ")", ":", "return", "ParseError", ".", "loc_info", "(", "text", ",", "index", ")", "@", "Parser", "def", "mark_parser", "(", "text", ",", "index", ")", ":", "res", "=", "self", "(", "text", ",", "index", ")", "if", "res", ".", "status", ":", "return", "Value", ".", "success", "(", "res", ".", "index", ",", "(", "pos", "(", "text", ",", "index", ")", ",", "res", ".", "value", ",", "pos", "(", "text", ",", "res", ".", "index", ")", ")", ")", "else", ":", "return", "res", "# failed.", "return", "mark_parser" ]
Mark the line and column information of the result of this parser.
[ "Mark", "the", "line", "and", "column", "information", "of", "the", "result", "of", "this", "parser", "." ]
python
train
senaite/senaite.core
bika/lims/browser/srtemplate/artemplates.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/srtemplate/artemplates.py#L141-L169
def _buildFromPerPartition(self, item, partition): """ This function will get the partition info and then it'll write the container and preservation data to the dictionary 'item' :param item: a dict which contains the ARTeplate data columns :param partition: a dict with some partition info :returns: the item dict with the partition's data """ uc = getToolByName(self, 'uid_catalog') container = uc(UID=partition.get('container_uid', '')) preservation = uc(UID=partition.get('preservation_uid', '')) if container: container = container[0].getObject() item['ContainerTitle'] = container.title item['replace']['ContainerTitle'] = "<a href='%s'>%s</a>" % \ (container.absolute_url(), item['ContainerTitle']) item['ContainerVolume'] = container.getCapacity() else: item['ContainerTitle'] = '' item['ContainerVolume'] = '' if preservation: preservation = preservation[0].getObject() item['Preservation'] = preservation.title item['replace']['Preservation'] = "<a href='%s'>%s</a>" % \ (preservation.absolute_url(), item['Preservation']) else: item['Preservation'] = '' item['PreparationMethod'] = '' return item
[ "def", "_buildFromPerPartition", "(", "self", ",", "item", ",", "partition", ")", ":", "uc", "=", "getToolByName", "(", "self", ",", "'uid_catalog'", ")", "container", "=", "uc", "(", "UID", "=", "partition", ".", "get", "(", "'container_uid'", ",", "''", ")", ")", "preservation", "=", "uc", "(", "UID", "=", "partition", ".", "get", "(", "'preservation_uid'", ",", "''", ")", ")", "if", "container", ":", "container", "=", "container", "[", "0", "]", ".", "getObject", "(", ")", "item", "[", "'ContainerTitle'", "]", "=", "container", ".", "title", "item", "[", "'replace'", "]", "[", "'ContainerTitle'", "]", "=", "\"<a href='%s'>%s</a>\"", "%", "(", "container", ".", "absolute_url", "(", ")", ",", "item", "[", "'ContainerTitle'", "]", ")", "item", "[", "'ContainerVolume'", "]", "=", "container", ".", "getCapacity", "(", ")", "else", ":", "item", "[", "'ContainerTitle'", "]", "=", "''", "item", "[", "'ContainerVolume'", "]", "=", "''", "if", "preservation", ":", "preservation", "=", "preservation", "[", "0", "]", ".", "getObject", "(", ")", "item", "[", "'Preservation'", "]", "=", "preservation", ".", "title", "item", "[", "'replace'", "]", "[", "'Preservation'", "]", "=", "\"<a href='%s'>%s</a>\"", "%", "(", "preservation", ".", "absolute_url", "(", ")", ",", "item", "[", "'Preservation'", "]", ")", "else", ":", "item", "[", "'Preservation'", "]", "=", "''", "item", "[", "'PreparationMethod'", "]", "=", "''", "return", "item" ]
This function will get the partition info and then it'll write the container and preservation data to the dictionary 'item' :param item: a dict which contains the ARTeplate data columns :param partition: a dict with some partition info :returns: the item dict with the partition's data
[ "This", "function", "will", "get", "the", "partition", "info", "and", "then", "it", "ll", "write", "the", "container", "and", "preservation", "data", "to", "the", "dictionary", "item", ":", "param", "item", ":", "a", "dict", "which", "contains", "the", "ARTeplate", "data", "columns", ":", "param", "partition", ":", "a", "dict", "with", "some", "partition", "info", ":", "returns", ":", "the", "item", "dict", "with", "the", "partition", "s", "data" ]
python
train
randomir/plucky
plucky/structural.py
https://github.com/randomir/plucky/blob/16b7b59aa19d619d8e619dc15dc7eeffc9fe078a/plucky/structural.py#L99-L107
def _filtered_list(self, selector): """Iterate over `self.obj` list, extracting `selector` from each element. The `selector` can be a simple integer index, or any valid key (hashable object). """ res = [] for elem in self.obj: self._append(elem, selector, res) return res
[ "def", "_filtered_list", "(", "self", ",", "selector", ")", ":", "res", "=", "[", "]", "for", "elem", "in", "self", ".", "obj", ":", "self", ".", "_append", "(", "elem", ",", "selector", ",", "res", ")", "return", "res" ]
Iterate over `self.obj` list, extracting `selector` from each element. The `selector` can be a simple integer index, or any valid key (hashable object).
[ "Iterate", "over", "self", ".", "obj", "list", "extracting", "selector", "from", "each", "element", ".", "The", "selector", "can", "be", "a", "simple", "integer", "index", "or", "any", "valid", "key", "(", "hashable", "object", ")", "." ]
python
train
tropo/tropo-webapi-python
build/lib/tropo.py
https://github.com/tropo/tropo-webapi-python/blob/f87772644a6b45066a4c5218f0c1f6467b64ab3c/build/lib/tropo.py#L654-L664
def getInterpretation(self): """ Get the value of the previously POSTed Tropo action. """ actions = self._actions if (type (actions) is list): dict = actions[0] else: dict = actions return dict['interpretation']
[ "def", "getInterpretation", "(", "self", ")", ":", "actions", "=", "self", ".", "_actions", "if", "(", "type", "(", "actions", ")", "is", "list", ")", ":", "dict", "=", "actions", "[", "0", "]", "else", ":", "dict", "=", "actions", "return", "dict", "[", "'interpretation'", "]" ]
Get the value of the previously POSTed Tropo action.
[ "Get", "the", "value", "of", "the", "previously", "POSTed", "Tropo", "action", "." ]
python
train
jayclassless/tidypy
src/tidypy/progress.py
https://github.com/jayclassless/tidypy/blob/3c3497ca377fbbe937103b77b02b326c860c748f/src/tidypy/progress.py#L41-L52
def on_tool_finish(self, tool): """ Called when an individual tool completes execution. :param tool: the name of the tool that completed :type tool: str """ with self._lock: if tool in self.current_tools: self.current_tools.remove(tool) self.completed_tools.append(tool)
[ "def", "on_tool_finish", "(", "self", ",", "tool", ")", ":", "with", "self", ".", "_lock", ":", "if", "tool", "in", "self", ".", "current_tools", ":", "self", ".", "current_tools", ".", "remove", "(", "tool", ")", "self", ".", "completed_tools", ".", "append", "(", "tool", ")" ]
Called when an individual tool completes execution. :param tool: the name of the tool that completed :type tool: str
[ "Called", "when", "an", "individual", "tool", "completes", "execution", "." ]
python
valid
Duke-GCB/DukeDSClient
ddsc/core/ddsapi.py
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/ddsapi.py#L620-L635
def get_users(self, full_name=None, email=None, username=None): """ Send GET request to /users for users with optional full_name, email, and/or username filtering. :param full_name: str name of the user we are searching for :param email: str: optional email to filter by :param username: str: optional username to filter by :return: requests.Response containing the successful result """ data = {} if full_name: data['full_name_contains'] = full_name if email: data['email'] = email if username: data['username'] = username return self._get_collection('/users', data)
[ "def", "get_users", "(", "self", ",", "full_name", "=", "None", ",", "email", "=", "None", ",", "username", "=", "None", ")", ":", "data", "=", "{", "}", "if", "full_name", ":", "data", "[", "'full_name_contains'", "]", "=", "full_name", "if", "email", ":", "data", "[", "'email'", "]", "=", "email", "if", "username", ":", "data", "[", "'username'", "]", "=", "username", "return", "self", ".", "_get_collection", "(", "'/users'", ",", "data", ")" ]
Send GET request to /users for users with optional full_name, email, and/or username filtering. :param full_name: str name of the user we are searching for :param email: str: optional email to filter by :param username: str: optional username to filter by :return: requests.Response containing the successful result
[ "Send", "GET", "request", "to", "/", "users", "for", "users", "with", "optional", "full_name", "email", "and", "/", "or", "username", "filtering", ".", ":", "param", "full_name", ":", "str", "name", "of", "the", "user", "we", "are", "searching", "for", ":", "param", "email", ":", "str", ":", "optional", "email", "to", "filter", "by", ":", "param", "username", ":", "str", ":", "optional", "username", "to", "filter", "by", ":", "return", ":", "requests", ".", "Response", "containing", "the", "successful", "result" ]
python
train
Miserlou/SoundScrape
soundscrape/soundscrape.py
https://github.com/Miserlou/SoundScrape/blob/efc63b99ce7e78b352e2ba22d5e51f83445546d7/soundscrape/soundscrape.py#L654-L703
def get_bandcamp_metadata(url): """ Read information from the Bandcamp JavaScript object. The method may return a list of URLs (indicating this is probably a "main" page which links to one or more albums), or a JSON if we can already parse album/track info from the given url. The JSON is "sloppy". The native python JSON parser often can't deal, so we use the more tolerant demjson instead. """ request = requests.get(url) try: sloppy_json = request.text.split("var TralbumData = ") sloppy_json = sloppy_json[1].replace('" + "', "") sloppy_json = sloppy_json.replace("'", "\'") sloppy_json = sloppy_json.split("};")[0] + "};" sloppy_json = sloppy_json.replace("};", "}") output = demjson.decode(sloppy_json) # if the JSON parser failed, we should consider it's a "/music" page, # so we generate a list of albums/tracks and return it immediately except Exception as e: regex_all_albums = r'<a href="(/(?:album|track)/[^>]+)">' all_albums = re.findall(regex_all_albums, request.text, re.MULTILINE) album_url_list = list() for album in all_albums: album_url = re.sub(r'music/?$', '', url) + album album_url_list.append(album_url) return album_url_list # if the JSON parser was successful, use a regex to get all tags # from this album/track, join them and set it as the "genre" regex_tags = r'<a class="tag" href[^>]+>([^<]+)</a>' tags = re.findall(regex_tags, request.text, re.MULTILINE) # make sure we treat integers correctly with join() # according to http://stackoverflow.com/a/7323861 # (very unlikely, but better safe than sorry!) output['genre'] = ' '.join(s for s in tags) # make sure we always get the correct album name, even if this is a # track URL (unless this track does not belong to any album, in which # case the album name remains set as None. output['album_name'] = None regex_album_name = r'album_title\s*:\s*"([^"]+)"\s*,' match = re.search(regex_album_name, request.text, re.MULTILINE) if match: output['album_name'] = match.group(1) try: artUrl = request.text.split("\"tralbumArt\">")[1].split("\">")[0].split("href=\"")[1] output['artFullsizeUrl'] = artUrl except: puts_safe(colored.red("Couldn't get full artwork") + "") output['artFullsizeUrl'] = None return output
[ "def", "get_bandcamp_metadata", "(", "url", ")", ":", "request", "=", "requests", ".", "get", "(", "url", ")", "try", ":", "sloppy_json", "=", "request", ".", "text", ".", "split", "(", "\"var TralbumData = \"", ")", "sloppy_json", "=", "sloppy_json", "[", "1", "]", ".", "replace", "(", "'\" + \"'", ",", "\"\"", ")", "sloppy_json", "=", "sloppy_json", ".", "replace", "(", "\"'\"", ",", "\"\\'\"", ")", "sloppy_json", "=", "sloppy_json", ".", "split", "(", "\"};\"", ")", "[", "0", "]", "+", "\"};\"", "sloppy_json", "=", "sloppy_json", ".", "replace", "(", "\"};\"", ",", "\"}\"", ")", "output", "=", "demjson", ".", "decode", "(", "sloppy_json", ")", "# if the JSON parser failed, we should consider it's a \"/music\" page,", "# so we generate a list of albums/tracks and return it immediately", "except", "Exception", "as", "e", ":", "regex_all_albums", "=", "r'<a href=\"(/(?:album|track)/[^>]+)\">'", "all_albums", "=", "re", ".", "findall", "(", "regex_all_albums", ",", "request", ".", "text", ",", "re", ".", "MULTILINE", ")", "album_url_list", "=", "list", "(", ")", "for", "album", "in", "all_albums", ":", "album_url", "=", "re", ".", "sub", "(", "r'music/?$'", ",", "''", ",", "url", ")", "+", "album", "album_url_list", ".", "append", "(", "album_url", ")", "return", "album_url_list", "# if the JSON parser was successful, use a regex to get all tags", "# from this album/track, join them and set it as the \"genre\"", "regex_tags", "=", "r'<a class=\"tag\" href[^>]+>([^<]+)</a>'", "tags", "=", "re", ".", "findall", "(", "regex_tags", ",", "request", ".", "text", ",", "re", ".", "MULTILINE", ")", "# make sure we treat integers correctly with join()", "# according to http://stackoverflow.com/a/7323861", "# (very unlikely, but better safe than sorry!)", "output", "[", "'genre'", "]", "=", "' '", ".", "join", "(", "s", "for", "s", "in", "tags", ")", "# make sure we always get the correct album name, even if this is a", "# track URL (unless this track does not belong to any album, in which", "# case the album name remains set as None.", "output", "[", "'album_name'", "]", "=", "None", "regex_album_name", "=", "r'album_title\\s*:\\s*\"([^\"]+)\"\\s*,'", "match", "=", "re", ".", "search", "(", "regex_album_name", ",", "request", ".", "text", ",", "re", ".", "MULTILINE", ")", "if", "match", ":", "output", "[", "'album_name'", "]", "=", "match", ".", "group", "(", "1", ")", "try", ":", "artUrl", "=", "request", ".", "text", ".", "split", "(", "\"\\\"tralbumArt\\\">\"", ")", "[", "1", "]", ".", "split", "(", "\"\\\">\"", ")", "[", "0", "]", ".", "split", "(", "\"href=\\\"\"", ")", "[", "1", "]", "output", "[", "'artFullsizeUrl'", "]", "=", "artUrl", "except", ":", "puts_safe", "(", "colored", ".", "red", "(", "\"Couldn't get full artwork\"", ")", "+", "\"\"", ")", "output", "[", "'artFullsizeUrl'", "]", "=", "None", "return", "output" ]
Read information from the Bandcamp JavaScript object. The method may return a list of URLs (indicating this is probably a "main" page which links to one or more albums), or a JSON if we can already parse album/track info from the given url. The JSON is "sloppy". The native python JSON parser often can't deal, so we use the more tolerant demjson instead.
[ "Read", "information", "from", "the", "Bandcamp", "JavaScript", "object", ".", "The", "method", "may", "return", "a", "list", "of", "URLs", "(", "indicating", "this", "is", "probably", "a", "main", "page", "which", "links", "to", "one", "or", "more", "albums", ")", "or", "a", "JSON", "if", "we", "can", "already", "parse", "album", "/", "track", "info", "from", "the", "given", "url", ".", "The", "JSON", "is", "sloppy", ".", "The", "native", "python", "JSON", "parser", "often", "can", "t", "deal", "so", "we", "use", "the", "more", "tolerant", "demjson", "instead", "." ]
python
train
teaearlgraycold/puni
puni/decorators.py
https://github.com/teaearlgraycold/puni/blob/f6d0bfde99942b29a6f91273e48abcd2d7a94c93/puni/decorators.py#L21-L46
def update_cache(func): """Decorate functions that modify the internally stored usernotes JSON. Ensures that updates are mirrored onto reddit. Arguments: func: the function being decorated """ @wraps(func) def wrapper(self, *args, **kwargs): """The wrapper function.""" lazy = kwargs.get('lazy', False) kwargs.pop('lazy', None) if not lazy: self.get_json() ret = func(self, *args, **kwargs) # If returning a string assume it is an update message if isinstance(ret, str) and not lazy: self.set_json(ret) else: return ret return wrapper
[ "def", "update_cache", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"The wrapper function.\"\"\"", "lazy", "=", "kwargs", ".", "get", "(", "'lazy'", ",", "False", ")", "kwargs", ".", "pop", "(", "'lazy'", ",", "None", ")", "if", "not", "lazy", ":", "self", ".", "get_json", "(", ")", "ret", "=", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# If returning a string assume it is an update message", "if", "isinstance", "(", "ret", ",", "str", ")", "and", "not", "lazy", ":", "self", ".", "set_json", "(", "ret", ")", "else", ":", "return", "ret", "return", "wrapper" ]
Decorate functions that modify the internally stored usernotes JSON. Ensures that updates are mirrored onto reddit. Arguments: func: the function being decorated
[ "Decorate", "functions", "that", "modify", "the", "internally", "stored", "usernotes", "JSON", "." ]
python
train
lappis-unb/salic-ml
src/salicml/metrics/base.py
https://github.com/lappis-unb/salic-ml/blob/1b3ebc4f8067740999897ccffd9892dc94482a93/src/salicml/metrics/base.py#L5-L11
def get_info(df, group, info=['mean', 'std']): """ Aggregate mean and std with the given group. """ agg = df.groupby(group).agg(info) agg.columns = agg.columns.droplevel(0) return agg
[ "def", "get_info", "(", "df", ",", "group", ",", "info", "=", "[", "'mean'", ",", "'std'", "]", ")", ":", "agg", "=", "df", ".", "groupby", "(", "group", ")", ".", "agg", "(", "info", ")", "agg", ".", "columns", "=", "agg", ".", "columns", ".", "droplevel", "(", "0", ")", "return", "agg" ]
Aggregate mean and std with the given group.
[ "Aggregate", "mean", "and", "std", "with", "the", "given", "group", "." ]
python
train
urbn/Caesium
caesium/handler.py
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L167-L180
def arg_as_array(self, arg, split_char="|"): """Turns an argument into an array, split by the splitChar :param str arg: The name of the query param you want to turn into an array based on the value :param str split_char: The character the value should be split on. :returns: A list of values :rtype: list """ valuesString = self.get_argument(arg, default=None) if valuesString: valuesArray = valuesString.split(split_char) return valuesArray return None
[ "def", "arg_as_array", "(", "self", ",", "arg", ",", "split_char", "=", "\"|\"", ")", ":", "valuesString", "=", "self", ".", "get_argument", "(", "arg", ",", "default", "=", "None", ")", "if", "valuesString", ":", "valuesArray", "=", "valuesString", ".", "split", "(", "split_char", ")", "return", "valuesArray", "return", "None" ]
Turns an argument into an array, split by the splitChar :param str arg: The name of the query param you want to turn into an array based on the value :param str split_char: The character the value should be split on. :returns: A list of values :rtype: list
[ "Turns", "an", "argument", "into", "an", "array", "split", "by", "the", "splitChar" ]
python
train
juicer/juicer
juicer/utils/__init__.py
https://github.com/juicer/juicer/blob/0c9f0fd59e293d45df6b46e81f675d33221c600d/juicer/utils/__init__.py#L270-L281
def flatten(x): """ Flatten an arbitrary depth nested list. """ # Lifted from: http://stackoverflow.com/a/406822/263969 result = [] for el in x: if hasattr(el, "__iter__") and not isinstance(el, basestring): result.extend(flatten(el)) else: result.append(el) return result
[ "def", "flatten", "(", "x", ")", ":", "# Lifted from: http://stackoverflow.com/a/406822/263969", "result", "=", "[", "]", "for", "el", "in", "x", ":", "if", "hasattr", "(", "el", ",", "\"__iter__\"", ")", "and", "not", "isinstance", "(", "el", ",", "basestring", ")", ":", "result", ".", "extend", "(", "flatten", "(", "el", ")", ")", "else", ":", "result", ".", "append", "(", "el", ")", "return", "result" ]
Flatten an arbitrary depth nested list.
[ "Flatten", "an", "arbitrary", "depth", "nested", "list", "." ]
python
train
apache/airflow
airflow/contrib/hooks/qubole_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/qubole_hook.py#L164-L190
def get_results(self, ti=None, fp=None, inline=True, delim=None, fetch=True): """ Get results (or just s3 locations) of a command from Qubole and save into a file :param ti: Task Instance of the dag, used to determine the Quboles command id :param fp: Optional file pointer, will create one and return if None passed :param inline: True to download actual results, False to get s3 locations only :param delim: Replaces the CTL-A chars with the given delim, defaults to ',' :param fetch: when inline is True, get results directly from s3 (if large) :return: file location containing actual results or s3 locations of results """ if fp is None: iso = datetime.datetime.utcnow().isoformat() logpath = os.path.expanduser( configuration.conf.get('core', 'BASE_LOG_FOLDER') ) resultpath = logpath + '/' + self.dag_id + '/' + self.task_id + '/results' configuration.mkdir_p(resultpath) fp = open(resultpath + '/' + iso, 'wb') if self.cmd is None: cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id) self.cmd = self.cls.find(cmd_id) self.cmd.get_results(fp, inline, delim, fetch) fp.flush() fp.close() return fp.name
[ "def", "get_results", "(", "self", ",", "ti", "=", "None", ",", "fp", "=", "None", ",", "inline", "=", "True", ",", "delim", "=", "None", ",", "fetch", "=", "True", ")", ":", "if", "fp", "is", "None", ":", "iso", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "isoformat", "(", ")", "logpath", "=", "os", ".", "path", ".", "expanduser", "(", "configuration", ".", "conf", ".", "get", "(", "'core'", ",", "'BASE_LOG_FOLDER'", ")", ")", "resultpath", "=", "logpath", "+", "'/'", "+", "self", ".", "dag_id", "+", "'/'", "+", "self", ".", "task_id", "+", "'/results'", "configuration", ".", "mkdir_p", "(", "resultpath", ")", "fp", "=", "open", "(", "resultpath", "+", "'/'", "+", "iso", ",", "'wb'", ")", "if", "self", ".", "cmd", "is", "None", ":", "cmd_id", "=", "ti", ".", "xcom_pull", "(", "key", "=", "\"qbol_cmd_id\"", ",", "task_ids", "=", "self", ".", "task_id", ")", "self", ".", "cmd", "=", "self", ".", "cls", ".", "find", "(", "cmd_id", ")", "self", ".", "cmd", ".", "get_results", "(", "fp", ",", "inline", ",", "delim", ",", "fetch", ")", "fp", ".", "flush", "(", ")", "fp", ".", "close", "(", ")", "return", "fp", ".", "name" ]
Get results (or just s3 locations) of a command from Qubole and save into a file :param ti: Task Instance of the dag, used to determine the Quboles command id :param fp: Optional file pointer, will create one and return if None passed :param inline: True to download actual results, False to get s3 locations only :param delim: Replaces the CTL-A chars with the given delim, defaults to ',' :param fetch: when inline is True, get results directly from s3 (if large) :return: file location containing actual results or s3 locations of results
[ "Get", "results", "(", "or", "just", "s3", "locations", ")", "of", "a", "command", "from", "Qubole", "and", "save", "into", "a", "file", ":", "param", "ti", ":", "Task", "Instance", "of", "the", "dag", "used", "to", "determine", "the", "Quboles", "command", "id", ":", "param", "fp", ":", "Optional", "file", "pointer", "will", "create", "one", "and", "return", "if", "None", "passed", ":", "param", "inline", ":", "True", "to", "download", "actual", "results", "False", "to", "get", "s3", "locations", "only", ":", "param", "delim", ":", "Replaces", "the", "CTL", "-", "A", "chars", "with", "the", "given", "delim", "defaults", "to", ":", "param", "fetch", ":", "when", "inline", "is", "True", "get", "results", "directly", "from", "s3", "(", "if", "large", ")", ":", "return", ":", "file", "location", "containing", "actual", "results", "or", "s3", "locations", "of", "results" ]
python
test
fossasia/knittingpattern
knittingpattern/Loader.py
https://github.com/fossasia/knittingpattern/blob/8e608896b0ab82fea1ca9fbfa2b4ee023d8c8027/knittingpattern/Loader.py#L122-L136
def relative_file(self, module, file): """Load a file relative to a module. :param str module: can be - a path to a folder - a path to a file - a module name :param str folder: the path of a folder relative to :paramref:`module` :return: the result of the processing """ path = self._relative_to_absolute(module, file) return self.path(path)
[ "def", "relative_file", "(", "self", ",", "module", ",", "file", ")", ":", "path", "=", "self", ".", "_relative_to_absolute", "(", "module", ",", "file", ")", "return", "self", ".", "path", "(", "path", ")" ]
Load a file relative to a module. :param str module: can be - a path to a folder - a path to a file - a module name :param str folder: the path of a folder relative to :paramref:`module` :return: the result of the processing
[ "Load", "a", "file", "relative", "to", "a", "module", "." ]
python
valid
GetmeUK/MongoFrames
snippets/comparable.py
https://github.com/GetmeUK/MongoFrames/blob/7d2bd792235dfa77a9deecab5366f5f73480823d/snippets/comparable.py#L73-L82
def is_diff(self): """Return True if there are any differences logged""" if not isinstance(self.details, dict): return False for key in ['additions', 'updates', 'deletions']: if self.details.get(key, None): return True return False
[ "def", "is_diff", "(", "self", ")", ":", "if", "not", "isinstance", "(", "self", ".", "details", ",", "dict", ")", ":", "return", "False", "for", "key", "in", "[", "'additions'", ",", "'updates'", ",", "'deletions'", "]", ":", "if", "self", ".", "details", ".", "get", "(", "key", ",", "None", ")", ":", "return", "True", "return", "False" ]
Return True if there are any differences logged
[ "Return", "True", "if", "there", "are", "any", "differences", "logged" ]
python
train
ingolemo/python-lenses
examples/robots.py
https://github.com/ingolemo/python-lenses/blob/a3a6ed0a31f6674451e542e7380a8aa16e6f8edf/examples/robots.py#L113-L125
def advance_robots(self): '''Produces a new game state in which the robots have advanced towards the player by one step. Handles the robots crashing into one another too.''' # move the robots towards the player self = lens.robots.Each().call_step_towards(self.player)(self) # robots in the same place are crashes self = lens.crashes.call_union(duplicates(self.robots))(self) # remove crashed robots self = lens.robots.modify(lambda r: list(set(r) - self.crashes))(self) return self
[ "def", "advance_robots", "(", "self", ")", ":", "# move the robots towards the player", "self", "=", "lens", ".", "robots", ".", "Each", "(", ")", ".", "call_step_towards", "(", "self", ".", "player", ")", "(", "self", ")", "# robots in the same place are crashes", "self", "=", "lens", ".", "crashes", ".", "call_union", "(", "duplicates", "(", "self", ".", "robots", ")", ")", "(", "self", ")", "# remove crashed robots", "self", "=", "lens", ".", "robots", ".", "modify", "(", "lambda", "r", ":", "list", "(", "set", "(", "r", ")", "-", "self", ".", "crashes", ")", ")", "(", "self", ")", "return", "self" ]
Produces a new game state in which the robots have advanced towards the player by one step. Handles the robots crashing into one another too.
[ "Produces", "a", "new", "game", "state", "in", "which", "the", "robots", "have", "advanced", "towards", "the", "player", "by", "one", "step", ".", "Handles", "the", "robots", "crashing", "into", "one", "another", "too", "." ]
python
test
MonashBI/arcana
arcana/study/base.py
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/base.py#L636-L651
def unhandled_branch(self, name): """ Convenient method for raising exception if a pipeline doesn't handle a particular switch value Parameters ---------- name : str Name of the switch value : str Value of the switch which hasn't been handled """ raise ArcanaDesignError( "'{}' value of '{}' switch in {} is not handled" .format(self._get_parameter(name), name, self._param_error_location))
[ "def", "unhandled_branch", "(", "self", ",", "name", ")", ":", "raise", "ArcanaDesignError", "(", "\"'{}' value of '{}' switch in {} is not handled\"", ".", "format", "(", "self", ".", "_get_parameter", "(", "name", ")", ",", "name", ",", "self", ".", "_param_error_location", ")", ")" ]
Convenient method for raising exception if a pipeline doesn't handle a particular switch value Parameters ---------- name : str Name of the switch value : str Value of the switch which hasn't been handled
[ "Convenient", "method", "for", "raising", "exception", "if", "a", "pipeline", "doesn", "t", "handle", "a", "particular", "switch", "value" ]
python
train
jbaiter/gphoto2-cffi
gphoto2cffi/gphoto2.py
https://github.com/jbaiter/gphoto2-cffi/blob/2876d15a58174bd24613cd4106a3ef0cefd48050/gphoto2cffi/gphoto2.py#L369-L388
def iter_data(self, chunk_size=2**16, ftype='normal'): """ Get an iterator that yields chunks of the file content. :param chunk_size: Size of yielded chunks in bytes :type chunk_size: int :param ftype: Select 'view' on file. :type ftype: str :return: Iterator """ self._check_type_supported(ftype) buf_p = ffi.new("char[{0}]".format(chunk_size)) size_p = ffi.new("uint64_t*") offset_p = ffi.new("uint64_t*") for chunk_idx in range(int(math.ceil(self.size/chunk_size))): size_p[0] = chunk_size lib.gp_camera_file_read( self._cam._cam, self.directory.path.encode(), self.name.encode(), backend.FILE_TYPES[ftype], offset_p[0], buf_p, size_p, self._cam._ctx) yield ffi.buffer(buf_p, size_p[0])[:]
[ "def", "iter_data", "(", "self", ",", "chunk_size", "=", "2", "**", "16", ",", "ftype", "=", "'normal'", ")", ":", "self", ".", "_check_type_supported", "(", "ftype", ")", "buf_p", "=", "ffi", ".", "new", "(", "\"char[{0}]\"", ".", "format", "(", "chunk_size", ")", ")", "size_p", "=", "ffi", ".", "new", "(", "\"uint64_t*\"", ")", "offset_p", "=", "ffi", ".", "new", "(", "\"uint64_t*\"", ")", "for", "chunk_idx", "in", "range", "(", "int", "(", "math", ".", "ceil", "(", "self", ".", "size", "/", "chunk_size", ")", ")", ")", ":", "size_p", "[", "0", "]", "=", "chunk_size", "lib", ".", "gp_camera_file_read", "(", "self", ".", "_cam", ".", "_cam", ",", "self", ".", "directory", ".", "path", ".", "encode", "(", ")", ",", "self", ".", "name", ".", "encode", "(", ")", ",", "backend", ".", "FILE_TYPES", "[", "ftype", "]", ",", "offset_p", "[", "0", "]", ",", "buf_p", ",", "size_p", ",", "self", ".", "_cam", ".", "_ctx", ")", "yield", "ffi", ".", "buffer", "(", "buf_p", ",", "size_p", "[", "0", "]", ")", "[", ":", "]" ]
Get an iterator that yields chunks of the file content. :param chunk_size: Size of yielded chunks in bytes :type chunk_size: int :param ftype: Select 'view' on file. :type ftype: str :return: Iterator
[ "Get", "an", "iterator", "that", "yields", "chunks", "of", "the", "file", "content", "." ]
python
train
pedrotgn/pyactor
pyactor/context.py
https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/context.py#L178-L233
def spawn(self, aid, klass, *param, **kparam): ''' This method creates an actor attached to this host. It will be an instance of the class *klass* and it will be assigned an ID that identifies it among the host. This method can be called remotely synchronously. :param str. aid: identifier for the spawning actor. Unique within the host. :param class klass: class type of the spawning actor. If you are spawning remotely and the class is not in the server module, you must specify here the path to that class in the form 'module.py/Class' so the server can import the class and create the instance. :param param-kparam: arguments for the init function of the spawning actor class. :return: :class:`~.Proxy` of the actor spawned. :raises: :class:`AlreadyExistsError`, if the ID specified is already in use. :raises: :class:`HostDownError` if the host is not initiated. ''' if param is None: param = [] if not self.alive: raise HostDownError() if isinstance(klass, basestring): module, klass = klass.split('/') module_ = __import__(module, globals(), locals(), [klass], -1) klass_ = getattr(module_, klass) elif isinstance(klass, (types.TypeType, types.ClassType)): klass_ = klass url = '%s://%s/%s' % (self.transport, self.host_url.netloc, aid) if url in self.actors.keys(): raise AlreadyExistsError(url) else: obj = klass_(*param, **kparam) obj.id = aid obj.url = url if self.running: obj.host = self.proxy # else: # obj.host = Exception("Host is not an active actor. \ # Use 'init_host' to make it alive.") if hasattr(klass_, '_parallel') and klass_._parallel: new_actor = parallels.ActorParallel(url, klass_, obj) lock = new_actor.get_lock() self.locks[url] = lock else: new_actor = actor.Actor(url, klass_, obj) obj.proxy = Proxy(new_actor) self.launch_actor(url, new_actor) return Proxy(new_actor)
[ "def", "spawn", "(", "self", ",", "aid", ",", "klass", ",", "*", "param", ",", "*", "*", "kparam", ")", ":", "if", "param", "is", "None", ":", "param", "=", "[", "]", "if", "not", "self", ".", "alive", ":", "raise", "HostDownError", "(", ")", "if", "isinstance", "(", "klass", ",", "basestring", ")", ":", "module", ",", "klass", "=", "klass", ".", "split", "(", "'/'", ")", "module_", "=", "__import__", "(", "module", ",", "globals", "(", ")", ",", "locals", "(", ")", ",", "[", "klass", "]", ",", "-", "1", ")", "klass_", "=", "getattr", "(", "module_", ",", "klass", ")", "elif", "isinstance", "(", "klass", ",", "(", "types", ".", "TypeType", ",", "types", ".", "ClassType", ")", ")", ":", "klass_", "=", "klass", "url", "=", "'%s://%s/%s'", "%", "(", "self", ".", "transport", ",", "self", ".", "host_url", ".", "netloc", ",", "aid", ")", "if", "url", "in", "self", ".", "actors", ".", "keys", "(", ")", ":", "raise", "AlreadyExistsError", "(", "url", ")", "else", ":", "obj", "=", "klass_", "(", "*", "param", ",", "*", "*", "kparam", ")", "obj", ".", "id", "=", "aid", "obj", ".", "url", "=", "url", "if", "self", ".", "running", ":", "obj", ".", "host", "=", "self", ".", "proxy", "# else:", "# obj.host = Exception(\"Host is not an active actor. \\", "# Use 'init_host' to make it alive.\")", "if", "hasattr", "(", "klass_", ",", "'_parallel'", ")", "and", "klass_", ".", "_parallel", ":", "new_actor", "=", "parallels", ".", "ActorParallel", "(", "url", ",", "klass_", ",", "obj", ")", "lock", "=", "new_actor", ".", "get_lock", "(", ")", "self", ".", "locks", "[", "url", "]", "=", "lock", "else", ":", "new_actor", "=", "actor", ".", "Actor", "(", "url", ",", "klass_", ",", "obj", ")", "obj", ".", "proxy", "=", "Proxy", "(", "new_actor", ")", "self", ".", "launch_actor", "(", "url", ",", "new_actor", ")", "return", "Proxy", "(", "new_actor", ")" ]
This method creates an actor attached to this host. It will be an instance of the class *klass* and it will be assigned an ID that identifies it among the host. This method can be called remotely synchronously. :param str. aid: identifier for the spawning actor. Unique within the host. :param class klass: class type of the spawning actor. If you are spawning remotely and the class is not in the server module, you must specify here the path to that class in the form 'module.py/Class' so the server can import the class and create the instance. :param param-kparam: arguments for the init function of the spawning actor class. :return: :class:`~.Proxy` of the actor spawned. :raises: :class:`AlreadyExistsError`, if the ID specified is already in use. :raises: :class:`HostDownError` if the host is not initiated.
[ "This", "method", "creates", "an", "actor", "attached", "to", "this", "host", ".", "It", "will", "be", "an", "instance", "of", "the", "class", "*", "klass", "*", "and", "it", "will", "be", "assigned", "an", "ID", "that", "identifies", "it", "among", "the", "host", "." ]
python
train
luckydonald/pytgbot
code_generation/output/pytgbot/api_types/receivable/media.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/code_generation/output/pytgbot/api_types/receivable/media.py#L1225-L1242
def to_array(self): """ Serializes this VideoNote to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(VideoNote, self).to_array() array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str array['length'] = int(self.length) # type int array['duration'] = int(self.duration) # type int if self.thumb is not None: array['thumb'] = self.thumb.to_array() # type PhotoSize if self.file_size is not None: array['file_size'] = int(self.file_size) # type int return array
[ "def", "to_array", "(", "self", ")", ":", "array", "=", "super", "(", "VideoNote", ",", "self", ")", ".", "to_array", "(", ")", "array", "[", "'file_id'", "]", "=", "u", "(", "self", ".", "file_id", ")", "# py2: type unicode, py3: type str", "array", "[", "'length'", "]", "=", "int", "(", "self", ".", "length", ")", "# type int", "array", "[", "'duration'", "]", "=", "int", "(", "self", ".", "duration", ")", "# type int", "if", "self", ".", "thumb", "is", "not", "None", ":", "array", "[", "'thumb'", "]", "=", "self", ".", "thumb", ".", "to_array", "(", ")", "# type PhotoSize", "if", "self", ".", "file_size", "is", "not", "None", ":", "array", "[", "'file_size'", "]", "=", "int", "(", "self", ".", "file_size", ")", "# type int", "return", "array" ]
Serializes this VideoNote to a dictionary. :return: dictionary representation of this object. :rtype: dict
[ "Serializes", "this", "VideoNote", "to", "a", "dictionary", "." ]
python
train
bambinos/bambi
bambi/models.py
https://github.com/bambinos/bambi/blob/b4a0ced917968bb99ca20915317417d708387946/bambi/models.py#L83-L93
def reset(self): ''' Reset list of terms and y-variable. ''' self.terms = OrderedDict() self.y = None self.backend = None self.added_terms = [] self._added_priors = {} self.completes = [] self.clean_data = None
[ "def", "reset", "(", "self", ")", ":", "self", ".", "terms", "=", "OrderedDict", "(", ")", "self", ".", "y", "=", "None", "self", ".", "backend", "=", "None", "self", ".", "added_terms", "=", "[", "]", "self", ".", "_added_priors", "=", "{", "}", "self", ".", "completes", "=", "[", "]", "self", ".", "clean_data", "=", "None" ]
Reset list of terms and y-variable.
[ "Reset", "list", "of", "terms", "and", "y", "-", "variable", "." ]
python
train
pantsbuild/pants
src/python/pants/java/distribution/distribution.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/java/distribution/distribution.py#L500-L534
def _locate(self, minimum_version=None, maximum_version=None, jdk=False): """Finds a java distribution that meets any given constraints and returns it. :param minimum_version: minimum jvm version to look for (eg, 1.7). :param maximum_version: maximum jvm version to look for (eg, 1.7.9999). :param bool jdk: whether the found java distribution is required to have a jdk. :return: the located Distribution. :rtype: :class:`Distribution` :raises: :class:`Distribution.Error` if no suitable java distribution could be found. """ for location in itertools.chain(self._distribution_environment.jvm_locations): try: dist = Distribution(home_path=location.home_path, bin_path=location.bin_path, minimum_version=minimum_version, maximum_version=maximum_version, jdk=jdk) dist.validate() logger.debug('Located {} for constraints: minimum_version {}, maximum_version {}, jdk {}' .format(dist, minimum_version, maximum_version, jdk)) return dist except (ValueError, Distribution.Error) as e: logger.debug('{} is not a valid distribution because: {}' .format(location.home_path, str(e))) pass if (minimum_version is not None and maximum_version is not None and maximum_version < minimum_version): error_format = ('Pants configuration/options led to impossible constraints for {} ' 'distribution: minimum_version {}, maximum_version {}') else: error_format = ('Failed to locate a {} distribution with minimum_version {}, ' 'maximum_version {}') raise self.Error(error_format.format('JDK' if jdk else 'JRE', minimum_version, maximum_version))
[ "def", "_locate", "(", "self", ",", "minimum_version", "=", "None", ",", "maximum_version", "=", "None", ",", "jdk", "=", "False", ")", ":", "for", "location", "in", "itertools", ".", "chain", "(", "self", ".", "_distribution_environment", ".", "jvm_locations", ")", ":", "try", ":", "dist", "=", "Distribution", "(", "home_path", "=", "location", ".", "home_path", ",", "bin_path", "=", "location", ".", "bin_path", ",", "minimum_version", "=", "minimum_version", ",", "maximum_version", "=", "maximum_version", ",", "jdk", "=", "jdk", ")", "dist", ".", "validate", "(", ")", "logger", ".", "debug", "(", "'Located {} for constraints: minimum_version {}, maximum_version {}, jdk {}'", ".", "format", "(", "dist", ",", "minimum_version", ",", "maximum_version", ",", "jdk", ")", ")", "return", "dist", "except", "(", "ValueError", ",", "Distribution", ".", "Error", ")", "as", "e", ":", "logger", ".", "debug", "(", "'{} is not a valid distribution because: {}'", ".", "format", "(", "location", ".", "home_path", ",", "str", "(", "e", ")", ")", ")", "pass", "if", "(", "minimum_version", "is", "not", "None", "and", "maximum_version", "is", "not", "None", "and", "maximum_version", "<", "minimum_version", ")", ":", "error_format", "=", "(", "'Pants configuration/options led to impossible constraints for {} '", "'distribution: minimum_version {}, maximum_version {}'", ")", "else", ":", "error_format", "=", "(", "'Failed to locate a {} distribution with minimum_version {}, '", "'maximum_version {}'", ")", "raise", "self", ".", "Error", "(", "error_format", ".", "format", "(", "'JDK'", "if", "jdk", "else", "'JRE'", ",", "minimum_version", ",", "maximum_version", ")", ")" ]
Finds a java distribution that meets any given constraints and returns it. :param minimum_version: minimum jvm version to look for (eg, 1.7). :param maximum_version: maximum jvm version to look for (eg, 1.7.9999). :param bool jdk: whether the found java distribution is required to have a jdk. :return: the located Distribution. :rtype: :class:`Distribution` :raises: :class:`Distribution.Error` if no suitable java distribution could be found.
[ "Finds", "a", "java", "distribution", "that", "meets", "any", "given", "constraints", "and", "returns", "it", "." ]
python
train
robhowley/nhlscrapi
nhlscrapi/games/events.py
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/games/events.py#L167-L183
def Create(event_type): """ Factory method creates objects derived from :py:class`.Event` with class name matching the :py:class`.EventType`. :param event_type: number for type of event :returns: constructed event corresponding to ``event_type`` :rtype: :py:class:`.Event` """ if event_type in EventType.Name: # unknown event type gets base class if EventType.Name[event_type] == Event.__name__: return Event() else: # instantiate Event subclass with same name as EventType name return [t for t in EventFactory.event_list if t.__name__ == EventType.Name[event_type]][0]() else: raise TypeError("EventFactory.Create: Invalid EventType")
[ "def", "Create", "(", "event_type", ")", ":", "if", "event_type", "in", "EventType", ".", "Name", ":", "# unknown event type gets base class", "if", "EventType", ".", "Name", "[", "event_type", "]", "==", "Event", ".", "__name__", ":", "return", "Event", "(", ")", "else", ":", "# instantiate Event subclass with same name as EventType name", "return", "[", "t", "for", "t", "in", "EventFactory", ".", "event_list", "if", "t", ".", "__name__", "==", "EventType", ".", "Name", "[", "event_type", "]", "]", "[", "0", "]", "(", ")", "else", ":", "raise", "TypeError", "(", "\"EventFactory.Create: Invalid EventType\"", ")" ]
Factory method creates objects derived from :py:class`.Event` with class name matching the :py:class`.EventType`. :param event_type: number for type of event :returns: constructed event corresponding to ``event_type`` :rtype: :py:class:`.Event`
[ "Factory", "method", "creates", "objects", "derived", "from", ":", "py", ":", "class", ".", "Event", "with", "class", "name", "matching", "the", ":", "py", ":", "class", ".", "EventType", ".", ":", "param", "event_type", ":", "number", "for", "type", "of", "event", ":", "returns", ":", "constructed", "event", "corresponding", "to", "event_type", ":", "rtype", ":", ":", "py", ":", "class", ":", ".", "Event" ]
python
train
lemieuxl/pyGenClean
pyGenClean/PlinkUtils/__init__.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/PlinkUtils/__init__.py#L58-L101
def get_plink_version(): """Gets the Plink version from the binary. :returns: the version of the Plink software :rtype: str This function uses :py:class:`subprocess.Popen` to gather the version of the Plink binary. Since executing the software to gather the version creates an output file, it is deleted. .. warning:: This function only works as long as the version is returned as ``| PLINK! | NNN |`` (where, ``NNN`` is the version), since we use regular expresion to extract the version number from the standard output of the software. """ # Running the command tmp_fn = None with tempfile.NamedTemporaryFile(delete=False) as tmpfile: tmp_fn = tmpfile.name + "_pyGenClean" # The command to run command = ["plink", "--noweb", "--out", tmp_fn] output = None try: proc = Popen(command, stdout=PIPE, stderr=PIPE) output = proc.communicate()[0].decode() except OSError: raise ProgramError("plink: command not found") # Deleting the output file automatically created by Plink if os.path.isfile(tmp_fn + ".log"): os.remove(tmp_fn + ".log") # Finding the version version = re.search(r"\|\s+PLINK!\s+\|\s+(\S+)\s+\|", output) if version is None: version = "unknown" else: version = version.group(1) return version
[ "def", "get_plink_version", "(", ")", ":", "# Running the command", "tmp_fn", "=", "None", "with", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ")", "as", "tmpfile", ":", "tmp_fn", "=", "tmpfile", ".", "name", "+", "\"_pyGenClean\"", "# The command to run", "command", "=", "[", "\"plink\"", ",", "\"--noweb\"", ",", "\"--out\"", ",", "tmp_fn", "]", "output", "=", "None", "try", ":", "proc", "=", "Popen", "(", "command", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "output", "=", "proc", ".", "communicate", "(", ")", "[", "0", "]", ".", "decode", "(", ")", "except", "OSError", ":", "raise", "ProgramError", "(", "\"plink: command not found\"", ")", "# Deleting the output file automatically created by Plink", "if", "os", ".", "path", ".", "isfile", "(", "tmp_fn", "+", "\".log\"", ")", ":", "os", ".", "remove", "(", "tmp_fn", "+", "\".log\"", ")", "# Finding the version", "version", "=", "re", ".", "search", "(", "r\"\\|\\s+PLINK!\\s+\\|\\s+(\\S+)\\s+\\|\"", ",", "output", ")", "if", "version", "is", "None", ":", "version", "=", "\"unknown\"", "else", ":", "version", "=", "version", ".", "group", "(", "1", ")", "return", "version" ]
Gets the Plink version from the binary. :returns: the version of the Plink software :rtype: str This function uses :py:class:`subprocess.Popen` to gather the version of the Plink binary. Since executing the software to gather the version creates an output file, it is deleted. .. warning:: This function only works as long as the version is returned as ``| PLINK! | NNN |`` (where, ``NNN`` is the version), since we use regular expresion to extract the version number from the standard output of the software.
[ "Gets", "the", "Plink", "version", "from", "the", "binary", "." ]
python
train
zetaops/zengine
zengine/current.py
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/current.py#L116-L132
def set_message(self, title, msg, typ, url=None): """ Sets user notification message. Args: title: Msg. title msg: Msg. text typ: Msg. type url: Additional URL (if exists) Returns: Message ID. """ return self.user.send_notification(title=title, message=msg, typ=typ, url=url)
[ "def", "set_message", "(", "self", ",", "title", ",", "msg", ",", "typ", ",", "url", "=", "None", ")", ":", "return", "self", ".", "user", ".", "send_notification", "(", "title", "=", "title", ",", "message", "=", "msg", ",", "typ", "=", "typ", ",", "url", "=", "url", ")" ]
Sets user notification message. Args: title: Msg. title msg: Msg. text typ: Msg. type url: Additional URL (if exists) Returns: Message ID.
[ "Sets", "user", "notification", "message", "." ]
python
train
log2timeline/dfvfs
dfvfs/helpers/source_scanner.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/helpers/source_scanner.py#L290-L303
def LockScanNode(self, path_spec): """Marks a scan node as locked. Args: path_spec (PathSpec): path specification. Raises: KeyError: if the scan node does not exists. """ scan_node = self._scan_nodes.get(path_spec, None) if not scan_node: raise KeyError('Scan node does not exist.') self._locked_scan_nodes[path_spec] = scan_node
[ "def", "LockScanNode", "(", "self", ",", "path_spec", ")", ":", "scan_node", "=", "self", ".", "_scan_nodes", ".", "get", "(", "path_spec", ",", "None", ")", "if", "not", "scan_node", ":", "raise", "KeyError", "(", "'Scan node does not exist.'", ")", "self", ".", "_locked_scan_nodes", "[", "path_spec", "]", "=", "scan_node" ]
Marks a scan node as locked. Args: path_spec (PathSpec): path specification. Raises: KeyError: if the scan node does not exists.
[ "Marks", "a", "scan", "node", "as", "locked", "." ]
python
train
datadesk/python-documentcloud
documentcloud/__init__.py
https://github.com/datadesk/python-documentcloud/blob/0d7f42cbf1edf5c61fca37ed846362cba4abfd76/documentcloud/__init__.py#L866-L876
def get_small_image_url(self, page=1): """ Returns the URL for the small sized image of a single page. The page kwarg specifies which page to return. One is the default. """ template = self.resources.page.get('image') return template.replace( "{page}", str(page) ).replace("{size}", "small")
[ "def", "get_small_image_url", "(", "self", ",", "page", "=", "1", ")", ":", "template", "=", "self", ".", "resources", ".", "page", ".", "get", "(", "'image'", ")", "return", "template", ".", "replace", "(", "\"{page}\"", ",", "str", "(", "page", ")", ")", ".", "replace", "(", "\"{size}\"", ",", "\"small\"", ")" ]
Returns the URL for the small sized image of a single page. The page kwarg specifies which page to return. One is the default.
[ "Returns", "the", "URL", "for", "the", "small", "sized", "image", "of", "a", "single", "page", "." ]
python
train
wangwenpei/fantasy
fantasy/utils.py
https://github.com/wangwenpei/fantasy/blob/0fe92059bd868f14da84235beb05b217b1d46e4a/fantasy/utils.py#L14-L26
def get_config(app, prefix='hive_'): """Conveniently get the security configuration for the specified application without the annoying 'SECURITY_' prefix. :param app: The application to inspect """ items = app.config.items() prefix = prefix.upper() def strip_prefix(tup): return (tup[0].replace(prefix, ''), tup[1]) return dict([strip_prefix(i) for i in items if i[0].startswith(prefix)])
[ "def", "get_config", "(", "app", ",", "prefix", "=", "'hive_'", ")", ":", "items", "=", "app", ".", "config", ".", "items", "(", ")", "prefix", "=", "prefix", ".", "upper", "(", ")", "def", "strip_prefix", "(", "tup", ")", ":", "return", "(", "tup", "[", "0", "]", ".", "replace", "(", "prefix", ",", "''", ")", ",", "tup", "[", "1", "]", ")", "return", "dict", "(", "[", "strip_prefix", "(", "i", ")", "for", "i", "in", "items", "if", "i", "[", "0", "]", ".", "startswith", "(", "prefix", ")", "]", ")" ]
Conveniently get the security configuration for the specified application without the annoying 'SECURITY_' prefix. :param app: The application to inspect
[ "Conveniently", "get", "the", "security", "configuration", "for", "the", "specified", "application", "without", "the", "annoying", "SECURITY_", "prefix", "." ]
python
test
saltstack/salt
salt/modules/azurearm_network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/azurearm_network.py#L155-L199
def default_security_rule_get(name, security_group, resource_group, **kwargs): ''' .. versionadded:: 2019.2.0 Get details about a default security rule within a security group. :param name: The name of the security rule to query. :param security_group: The network security group containing the security rule. :param resource_group: The resource group name assigned to the network security group. CLI Example: .. code-block:: bash salt-call azurearm_network.default_security_rule_get DenyAllOutBound testnsg testgroup ''' result = {} default_rules = default_security_rules_list( security_group=security_group, resource_group=resource_group, **kwargs ) if isinstance(default_rules, dict) and 'error' in default_rules: return default_rules try: for default_rule in default_rules: if default_rule['name'] == name: result = default_rule if not result: result = { 'error': 'Unable to find {0} in {1}!'.format(name, security_group) } except KeyError as exc: log.error('Unable to find %s in %s!', name, security_group) result = {'error': str(exc)} return result
[ "def", "default_security_rule_get", "(", "name", ",", "security_group", ",", "resource_group", ",", "*", "*", "kwargs", ")", ":", "result", "=", "{", "}", "default_rules", "=", "default_security_rules_list", "(", "security_group", "=", "security_group", ",", "resource_group", "=", "resource_group", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "default_rules", ",", "dict", ")", "and", "'error'", "in", "default_rules", ":", "return", "default_rules", "try", ":", "for", "default_rule", "in", "default_rules", ":", "if", "default_rule", "[", "'name'", "]", "==", "name", ":", "result", "=", "default_rule", "if", "not", "result", ":", "result", "=", "{", "'error'", ":", "'Unable to find {0} in {1}!'", ".", "format", "(", "name", ",", "security_group", ")", "}", "except", "KeyError", "as", "exc", ":", "log", ".", "error", "(", "'Unable to find %s in %s!'", ",", "name", ",", "security_group", ")", "result", "=", "{", "'error'", ":", "str", "(", "exc", ")", "}", "return", "result" ]
.. versionadded:: 2019.2.0 Get details about a default security rule within a security group. :param name: The name of the security rule to query. :param security_group: The network security group containing the security rule. :param resource_group: The resource group name assigned to the network security group. CLI Example: .. code-block:: bash salt-call azurearm_network.default_security_rule_get DenyAllOutBound testnsg testgroup
[ "..", "versionadded", "::", "2019", ".", "2", ".", "0" ]
python
train
CivicSpleen/ambry
ambry/metadata/proptree.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/metadata/proptree.py#L304-L317
def register_members(self): """Collect the names of the class member and convert them to object members. Unlike Terms, the Group class members are converted into object members, so the configuration data """ self._members = { name: attr for name, attr in iteritems(type(self).__dict__) if isinstance(attr, Group)} for name, m in iteritems(self._members): m.init_descriptor(name, self)
[ "def", "register_members", "(", "self", ")", ":", "self", ".", "_members", "=", "{", "name", ":", "attr", "for", "name", ",", "attr", "in", "iteritems", "(", "type", "(", "self", ")", ".", "__dict__", ")", "if", "isinstance", "(", "attr", ",", "Group", ")", "}", "for", "name", ",", "m", "in", "iteritems", "(", "self", ".", "_members", ")", ":", "m", ".", "init_descriptor", "(", "name", ",", "self", ")" ]
Collect the names of the class member and convert them to object members. Unlike Terms, the Group class members are converted into object members, so the configuration data
[ "Collect", "the", "names", "of", "the", "class", "member", "and", "convert", "them", "to", "object", "members", "." ]
python
train
latchset/custodia
src/custodia/server/__init__.py
https://github.com/latchset/custodia/blob/5ad4cd7a2f40babc6b8b5d16215b7e27ca993b6d/src/custodia/server/__init__.py#L34-L57
def _load_plugin_class(menu, name): """Load Custodia plugin Entry points are preferred over dotted import path. """ group = 'custodia.{}'.format(menu) eps = list(pkg_resources.iter_entry_points(group, name)) if len(eps) > 1: raise ValueError( "Multiple entry points for {} {}: {}".format(menu, name, eps)) elif len(eps) == 1: # backwards compatibility with old setuptools ep = eps[0] if hasattr(ep, 'resolve'): return ep.resolve() else: return ep.load(require=False) elif '.' in name: # fall back to old style dotted name module, classname = name.rsplit('.', 1) m = importlib.import_module(module) return getattr(m, classname) else: raise ValueError("{}: {} not found".format(menu, name))
[ "def", "_load_plugin_class", "(", "menu", ",", "name", ")", ":", "group", "=", "'custodia.{}'", ".", "format", "(", "menu", ")", "eps", "=", "list", "(", "pkg_resources", ".", "iter_entry_points", "(", "group", ",", "name", ")", ")", "if", "len", "(", "eps", ")", ">", "1", ":", "raise", "ValueError", "(", "\"Multiple entry points for {} {}: {}\"", ".", "format", "(", "menu", ",", "name", ",", "eps", ")", ")", "elif", "len", "(", "eps", ")", "==", "1", ":", "# backwards compatibility with old setuptools", "ep", "=", "eps", "[", "0", "]", "if", "hasattr", "(", "ep", ",", "'resolve'", ")", ":", "return", "ep", ".", "resolve", "(", ")", "else", ":", "return", "ep", ".", "load", "(", "require", "=", "False", ")", "elif", "'.'", "in", "name", ":", "# fall back to old style dotted name", "module", ",", "classname", "=", "name", ".", "rsplit", "(", "'.'", ",", "1", ")", "m", "=", "importlib", ".", "import_module", "(", "module", ")", "return", "getattr", "(", "m", ",", "classname", ")", "else", ":", "raise", "ValueError", "(", "\"{}: {} not found\"", ".", "format", "(", "menu", ",", "name", ")", ")" ]
Load Custodia plugin Entry points are preferred over dotted import path.
[ "Load", "Custodia", "plugin" ]
python
train
jonathanslenders/textfsm
jtextfsm.py
https://github.com/jonathanslenders/textfsm/blob/cca5084512d14bc367205aceb34c938ac1c65daf/jtextfsm.py#L251-L291
def Parse(self, value): """Parse a 'Value' declaration. Args: value: String line from a template file, must begin with 'Value '. Raises: TextFSMTemplateError: Value declaration contains an error. """ value_line = value.split(' ') if len(value_line) < 3: raise TextFSMTemplateError('Expect at least 3 tokens on line.') if not value_line[2].startswith('('): # Options are present options = value_line[1] for option in options.split(','): self._AddOption(option) # Call option OnCreateOptions callbacks [option.OnCreateOptions() for option in self.options] self.name = value_line[2] self.regex = ' '.join(value_line[3:]) else: # There were no valid options, so there are no options. # Treat this argument as the name. self.name = value_line[1] self.regex = ' '.join(value_line[2:]) if len(self.name) > self.max_name_len: raise TextFSMTemplateError( "Invalid Value name '%s' or name too long." % self.name) if (not re.match(r'^\(.*\)$', self.regex) or self.regex.count('(') != self.regex.count(')')): raise TextFSMTemplateError( "Value '%s' must be contained within a '()' pair." % self.regex) self.template = re.sub(r'^\(', '(?P<%s>' % self.name, self.regex)
[ "def", "Parse", "(", "self", ",", "value", ")", ":", "value_line", "=", "value", ".", "split", "(", "' '", ")", "if", "len", "(", "value_line", ")", "<", "3", ":", "raise", "TextFSMTemplateError", "(", "'Expect at least 3 tokens on line.'", ")", "if", "not", "value_line", "[", "2", "]", ".", "startswith", "(", "'('", ")", ":", "# Options are present", "options", "=", "value_line", "[", "1", "]", "for", "option", "in", "options", ".", "split", "(", "','", ")", ":", "self", ".", "_AddOption", "(", "option", ")", "# Call option OnCreateOptions callbacks", "[", "option", ".", "OnCreateOptions", "(", ")", "for", "option", "in", "self", ".", "options", "]", "self", ".", "name", "=", "value_line", "[", "2", "]", "self", ".", "regex", "=", "' '", ".", "join", "(", "value_line", "[", "3", ":", "]", ")", "else", ":", "# There were no valid options, so there are no options.", "# Treat this argument as the name.", "self", ".", "name", "=", "value_line", "[", "1", "]", "self", ".", "regex", "=", "' '", ".", "join", "(", "value_line", "[", "2", ":", "]", ")", "if", "len", "(", "self", ".", "name", ")", ">", "self", ".", "max_name_len", ":", "raise", "TextFSMTemplateError", "(", "\"Invalid Value name '%s' or name too long.\"", "%", "self", ".", "name", ")", "if", "(", "not", "re", ".", "match", "(", "r'^\\(.*\\)$'", ",", "self", ".", "regex", ")", "or", "self", ".", "regex", ".", "count", "(", "'('", ")", "!=", "self", ".", "regex", ".", "count", "(", "')'", ")", ")", ":", "raise", "TextFSMTemplateError", "(", "\"Value '%s' must be contained within a '()' pair.\"", "%", "self", ".", "regex", ")", "self", ".", "template", "=", "re", ".", "sub", "(", "r'^\\('", ",", "'(?P<%s>'", "%", "self", ".", "name", ",", "self", ".", "regex", ")" ]
Parse a 'Value' declaration. Args: value: String line from a template file, must begin with 'Value '. Raises: TextFSMTemplateError: Value declaration contains an error.
[ "Parse", "a", "Value", "declaration", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/research/transformer_nat.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_nat.py#L31-L48
def init_vq_bottleneck(bottleneck_size, hidden_size): """Get lookup table for VQ bottleneck.""" means = tf.get_variable( name="means", shape=[bottleneck_size, hidden_size], initializer=tf.uniform_unit_scaling_initializer()) ema_count = tf.get_variable( name="ema_count", shape=[bottleneck_size], initializer=tf.constant_initializer(0), trainable=False) with tf.colocate_with(means): ema_means = tf.get_variable( name="ema_means", initializer=means.initialized_value(), trainable=False) return means, ema_means, ema_count
[ "def", "init_vq_bottleneck", "(", "bottleneck_size", ",", "hidden_size", ")", ":", "means", "=", "tf", ".", "get_variable", "(", "name", "=", "\"means\"", ",", "shape", "=", "[", "bottleneck_size", ",", "hidden_size", "]", ",", "initializer", "=", "tf", ".", "uniform_unit_scaling_initializer", "(", ")", ")", "ema_count", "=", "tf", ".", "get_variable", "(", "name", "=", "\"ema_count\"", ",", "shape", "=", "[", "bottleneck_size", "]", ",", "initializer", "=", "tf", ".", "constant_initializer", "(", "0", ")", ",", "trainable", "=", "False", ")", "with", "tf", ".", "colocate_with", "(", "means", ")", ":", "ema_means", "=", "tf", ".", "get_variable", "(", "name", "=", "\"ema_means\"", ",", "initializer", "=", "means", ".", "initialized_value", "(", ")", ",", "trainable", "=", "False", ")", "return", "means", ",", "ema_means", ",", "ema_count" ]
Get lookup table for VQ bottleneck.
[ "Get", "lookup", "table", "for", "VQ", "bottleneck", "." ]
python
train
widdowquinn/pyADHoRe
pyadhore/iadhore.py
https://github.com/widdowquinn/pyADHoRe/blob/b2ebbf6ae9c6afe9262eb0e3d9cf395970e38533/pyadhore/iadhore.py#L333-L336
def db_file(self, value): """ Setter for _db_file attribute """ assert not os.path.isfile(value), "%s already exists" % value self._db_file = value
[ "def", "db_file", "(", "self", ",", "value", ")", ":", "assert", "not", "os", ".", "path", ".", "isfile", "(", "value", ")", ",", "\"%s already exists\"", "%", "value", "self", ".", "_db_file", "=", "value" ]
Setter for _db_file attribute
[ "Setter", "for", "_db_file", "attribute" ]
python
train
StyXman/ayrton
ayrton/parser/error.py
https://github.com/StyXman/ayrton/blob/e1eed5c7ef230e3c2340a1f0bf44c72bbdc0debb/ayrton/parser/error.py#L14-L17
def strerror(errno): """Translate an error code to a message string.""" from pypy.module._codecs.locale import str_decode_locale_surrogateescape return str_decode_locale_surrogateescape(os.strerror(errno))
[ "def", "strerror", "(", "errno", ")", ":", "from", "pypy", ".", "module", ".", "_codecs", ".", "locale", "import", "str_decode_locale_surrogateescape", "return", "str_decode_locale_surrogateescape", "(", "os", ".", "strerror", "(", "errno", ")", ")" ]
Translate an error code to a message string.
[ "Translate", "an", "error", "code", "to", "a", "message", "string", "." ]
python
train
fooelisa/pyiosxr
pyIOSXR/iosxr.py
https://github.com/fooelisa/pyiosxr/blob/2bc11797013f1c29d2d338c32edb95068ebdf524/pyIOSXR/iosxr.py#L154-L175
def open(self): """ Open a connection to an IOS-XR device. Connects to the device using SSH and drops into XML mode. """ try: self.device = ConnectHandler(device_type='cisco_xr', ip=self.hostname, port=self.port, username=self.username, password=self.password, **self.netmiko_kwargs) self.device.timeout = self.timeout self._xml_agent_alive = True # successfully open thus alive except NetMikoTimeoutException as t_err: raise ConnectError(t_err.args[0]) except NetMikoAuthenticationException as au_err: raise ConnectError(au_err.args[0]) self._cli_prompt = self.device.find_prompt() # get the prompt self._enter_xml_mode()
[ "def", "open", "(", "self", ")", ":", "try", ":", "self", ".", "device", "=", "ConnectHandler", "(", "device_type", "=", "'cisco_xr'", ",", "ip", "=", "self", ".", "hostname", ",", "port", "=", "self", ".", "port", ",", "username", "=", "self", ".", "username", ",", "password", "=", "self", ".", "password", ",", "*", "*", "self", ".", "netmiko_kwargs", ")", "self", ".", "device", ".", "timeout", "=", "self", ".", "timeout", "self", ".", "_xml_agent_alive", "=", "True", "# successfully open thus alive", "except", "NetMikoTimeoutException", "as", "t_err", ":", "raise", "ConnectError", "(", "t_err", ".", "args", "[", "0", "]", ")", "except", "NetMikoAuthenticationException", "as", "au_err", ":", "raise", "ConnectError", "(", "au_err", ".", "args", "[", "0", "]", ")", "self", ".", "_cli_prompt", "=", "self", ".", "device", ".", "find_prompt", "(", ")", "# get the prompt", "self", ".", "_enter_xml_mode", "(", ")" ]
Open a connection to an IOS-XR device. Connects to the device using SSH and drops into XML mode.
[ "Open", "a", "connection", "to", "an", "IOS", "-", "XR", "device", "." ]
python
train
hsolbrig/PyShEx
pyshex/sparql11_query/p17_1_operand_data_types.py
https://github.com/hsolbrig/PyShEx/blob/9d659cc36e808afd66d4a6d60e8ea21cb12eb744/pyshex/sparql11_query/p17_1_operand_data_types.py#L20-L22
def is_simple_literal(n: Node) -> bool: """ simple literal denotes a plain literal with no language tag. """ return is_typed_literal(n) and cast(Literal, n).datatype is None and cast(Literal, n).language is None
[ "def", "is_simple_literal", "(", "n", ":", "Node", ")", "->", "bool", ":", "return", "is_typed_literal", "(", "n", ")", "and", "cast", "(", "Literal", ",", "n", ")", ".", "datatype", "is", "None", "and", "cast", "(", "Literal", ",", "n", ")", ".", "language", "is", "None" ]
simple literal denotes a plain literal with no language tag.
[ "simple", "literal", "denotes", "a", "plain", "literal", "with", "no", "language", "tag", "." ]
python
train
buriburisuri/sugartensor
sugartensor/sg_logging.py
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_logging.py#L59-L78
def sg_summary_gradient(tensor, gradient, prefix=None, name=None): r"""Register `tensor` to summary report as `gradient` Args: tensor: A `Tensor` to log as gradient gradient: A 0-D `Tensor`. A gradient to log prefix: A `string`. A prefix to display in the tensor board web UI. name: A `string`. A name to display in the tensor board web UI. Returns: None """ # defaults prefix = '' if prefix is None else prefix + '/' # summary name name = prefix + _pretty_name(tensor) if name is None else prefix + name # summary statistics # noinspection PyBroadException _scalar(name + '/grad', tf.reduce_mean(tf.abs(gradient))) _histogram(name + '/grad-h', tf.abs(gradient))
[ "def", "sg_summary_gradient", "(", "tensor", ",", "gradient", ",", "prefix", "=", "None", ",", "name", "=", "None", ")", ":", "# defaults", "prefix", "=", "''", "if", "prefix", "is", "None", "else", "prefix", "+", "'/'", "# summary name", "name", "=", "prefix", "+", "_pretty_name", "(", "tensor", ")", "if", "name", "is", "None", "else", "prefix", "+", "name", "# summary statistics", "# noinspection PyBroadException", "_scalar", "(", "name", "+", "'/grad'", ",", "tf", ".", "reduce_mean", "(", "tf", ".", "abs", "(", "gradient", ")", ")", ")", "_histogram", "(", "name", "+", "'/grad-h'", ",", "tf", ".", "abs", "(", "gradient", ")", ")" ]
r"""Register `tensor` to summary report as `gradient` Args: tensor: A `Tensor` to log as gradient gradient: A 0-D `Tensor`. A gradient to log prefix: A `string`. A prefix to display in the tensor board web UI. name: A `string`. A name to display in the tensor board web UI. Returns: None
[ "r", "Register", "tensor", "to", "summary", "report", "as", "gradient" ]
python
train
nickstenning/tagalog
tagalog/shipper.py
https://github.com/nickstenning/tagalog/blob/c6847a957dc4f96836a5cf13c4eb664fccafaac2/tagalog/shipper.py#L136-L145
def purge(self, connection): """Remove the connection from rotation""" self._checkpid() if connection.pid == self.pid: idx = connection._pattern_idx if connection in self._in_use_connections[idx]: self._in_use_connections[idx].remove(connection) else: self._available_connections[idx].remove(connection) connection.disconnect()
[ "def", "purge", "(", "self", ",", "connection", ")", ":", "self", ".", "_checkpid", "(", ")", "if", "connection", ".", "pid", "==", "self", ".", "pid", ":", "idx", "=", "connection", ".", "_pattern_idx", "if", "connection", "in", "self", ".", "_in_use_connections", "[", "idx", "]", ":", "self", ".", "_in_use_connections", "[", "idx", "]", ".", "remove", "(", "connection", ")", "else", ":", "self", ".", "_available_connections", "[", "idx", "]", ".", "remove", "(", "connection", ")", "connection", ".", "disconnect", "(", ")" ]
Remove the connection from rotation
[ "Remove", "the", "connection", "from", "rotation" ]
python
train
apache/spark
python/pyspark/mllib/linalg/distributed.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L811-L828
def entries(self): """ Entries of the CoordinateMatrix stored as an RDD of MatrixEntries. >>> mat = CoordinateMatrix(sc.parallelize([MatrixEntry(0, 0, 1.2), ... MatrixEntry(6, 4, 2.1)])) >>> entries = mat.entries >>> entries.first() MatrixEntry(0, 0, 1.2) """ # We use DataFrames for serialization of MatrixEntry entries # from Java, so we first convert the RDD of entries to a # DataFrame on the Scala/Java side. Then we map each Row in # the DataFrame back to a MatrixEntry on this side. entries_df = callMLlibFunc("getMatrixEntries", self._java_matrix_wrapper._java_model) entries = entries_df.rdd.map(lambda row: MatrixEntry(row[0], row[1], row[2])) return entries
[ "def", "entries", "(", "self", ")", ":", "# We use DataFrames for serialization of MatrixEntry entries", "# from Java, so we first convert the RDD of entries to a", "# DataFrame on the Scala/Java side. Then we map each Row in", "# the DataFrame back to a MatrixEntry on this side.", "entries_df", "=", "callMLlibFunc", "(", "\"getMatrixEntries\"", ",", "self", ".", "_java_matrix_wrapper", ".", "_java_model", ")", "entries", "=", "entries_df", ".", "rdd", ".", "map", "(", "lambda", "row", ":", "MatrixEntry", "(", "row", "[", "0", "]", ",", "row", "[", "1", "]", ",", "row", "[", "2", "]", ")", ")", "return", "entries" ]
Entries of the CoordinateMatrix stored as an RDD of MatrixEntries. >>> mat = CoordinateMatrix(sc.parallelize([MatrixEntry(0, 0, 1.2), ... MatrixEntry(6, 4, 2.1)])) >>> entries = mat.entries >>> entries.first() MatrixEntry(0, 0, 1.2)
[ "Entries", "of", "the", "CoordinateMatrix", "stored", "as", "an", "RDD", "of", "MatrixEntries", "." ]
python
train
scivision/sciencedates
sciencedates/findnearest.py
https://github.com/scivision/sciencedates/blob/a713389e027b42d26875cf227450a5d7c6696000/sciencedates/findnearest.py#L6-L42
def find_nearest(x, x0) -> Tuple[int, Any]: """ This find_nearest function does NOT assume sorted input inputs: x: array (float, int, datetime, h5py.Dataset) within which to search for x0 x0: singleton or array of values to search for in x outputs: idx: index of flattened x nearest to x0 (i.e. works with higher than 1-D arrays also) xidx: x[idx] Observe how bisect.bisect() gives the incorrect result! idea based on: http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array """ x = np.asanyarray(x) # for indexing upon return x0 = np.atleast_1d(x0) # %% if x.size == 0 or x0.size == 0: raise ValueError('empty input(s)') if x0.ndim not in (0, 1): raise ValueError('2-D x0 not handled yet') # %% ind = np.empty_like(x0, dtype=int) # NOTE: not trapping IndexError (all-nan) becaues returning None can surprise with slice indexing for i, xi in enumerate(x0): if xi is not None and (isinstance(xi, (datetime.datetime, datetime.date, np.datetime64)) or np.isfinite(xi)): ind[i] = np.nanargmin(abs(x-xi)) else: raise ValueError('x0 must NOT be None or NaN to avoid surprising None return value') return ind.squeeze()[()], x[ind].squeeze()[()]
[ "def", "find_nearest", "(", "x", ",", "x0", ")", "->", "Tuple", "[", "int", ",", "Any", "]", ":", "x", "=", "np", ".", "asanyarray", "(", "x", ")", "# for indexing upon return", "x0", "=", "np", ".", "atleast_1d", "(", "x0", ")", "# %%", "if", "x", ".", "size", "==", "0", "or", "x0", ".", "size", "==", "0", ":", "raise", "ValueError", "(", "'empty input(s)'", ")", "if", "x0", ".", "ndim", "not", "in", "(", "0", ",", "1", ")", ":", "raise", "ValueError", "(", "'2-D x0 not handled yet'", ")", "# %%", "ind", "=", "np", ".", "empty_like", "(", "x0", ",", "dtype", "=", "int", ")", "# NOTE: not trapping IndexError (all-nan) becaues returning None can surprise with slice indexing", "for", "i", ",", "xi", "in", "enumerate", "(", "x0", ")", ":", "if", "xi", "is", "not", "None", "and", "(", "isinstance", "(", "xi", ",", "(", "datetime", ".", "datetime", ",", "datetime", ".", "date", ",", "np", ".", "datetime64", ")", ")", "or", "np", ".", "isfinite", "(", "xi", ")", ")", ":", "ind", "[", "i", "]", "=", "np", ".", "nanargmin", "(", "abs", "(", "x", "-", "xi", ")", ")", "else", ":", "raise", "ValueError", "(", "'x0 must NOT be None or NaN to avoid surprising None return value'", ")", "return", "ind", ".", "squeeze", "(", ")", "[", "(", ")", "]", ",", "x", "[", "ind", "]", ".", "squeeze", "(", ")", "[", "(", ")", "]" ]
This find_nearest function does NOT assume sorted input inputs: x: array (float, int, datetime, h5py.Dataset) within which to search for x0 x0: singleton or array of values to search for in x outputs: idx: index of flattened x nearest to x0 (i.e. works with higher than 1-D arrays also) xidx: x[idx] Observe how bisect.bisect() gives the incorrect result! idea based on: http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
[ "This", "find_nearest", "function", "does", "NOT", "assume", "sorted", "input" ]
python
train
libtcod/python-tcod
build_libtcod.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/build_libtcod.py#L145-L157
def fix_header(filepath): """Removes leading whitespace from a MacOS header file. This whitespace is causing issues with directives on some platforms. """ with open(filepath, "r+") as f: current = f.read() fixed = "\n".join(line.strip() for line in current.split("\n")) if current == fixed: return f.seek(0) f.truncate() f.write(fixed)
[ "def", "fix_header", "(", "filepath", ")", ":", "with", "open", "(", "filepath", ",", "\"r+\"", ")", "as", "f", ":", "current", "=", "f", ".", "read", "(", ")", "fixed", "=", "\"\\n\"", ".", "join", "(", "line", ".", "strip", "(", ")", "for", "line", "in", "current", ".", "split", "(", "\"\\n\"", ")", ")", "if", "current", "==", "fixed", ":", "return", "f", ".", "seek", "(", "0", ")", "f", ".", "truncate", "(", ")", "f", ".", "write", "(", "fixed", ")" ]
Removes leading whitespace from a MacOS header file. This whitespace is causing issues with directives on some platforms.
[ "Removes", "leading", "whitespace", "from", "a", "MacOS", "header", "file", "." ]
python
train
Xion/taipan
taipan/strings.py
https://github.com/Xion/taipan/blob/f333f0287c8bd0915182c7d5308e5f05ef0cca78/taipan/strings.py#L188-L245
def join(delimiter, iterable, **kwargs): """Returns a string which is a concatenation of strings in ``iterable``, separated by given ``delimiter``. :param delimiter: Delimiter to put between strings :param iterable: Iterable to join Optional keyword arguments control the exact joining strategy: :param errors: What to do with erroneous non-strings in the input. Possible values include: * ``'ignore'`` (or ``None``) * ``'cast'`` (or ``False``) -- convert non-strings to strings * ``'raise'`` (or ``True``) -- raise exception for any non-strings * ``'replace'`` -- replace non-strings with alternative value :param with_: Replacement used when ``errors == 'replace'``. This can be a string, or a callable taking erroneous value and returning a string replacement. .. versionadded:: 0.0.3 Allow to specify error handling policy through ``errors`` parameter """ ensure_string(delimiter) ensure_iterable(iterable) ensure_keyword_args(kwargs, optional=('errors', 'with_')) errors = kwargs.get('errors', True) if errors in ('raise', True): iterable = imap(ensure_string, iterable) elif errors in ('ignore', None): iterable = ifilter(is_string, iterable) elif errors in ('cast', False): iterable = imap(delimiter.__class__, iterable) elif errors == 'replace': if 'with_' not in kwargs: raise ValueError("'replace' error policy requires specifying " "replacement through with_=") with_ = kwargs['with_'] if is_string(with_): replacement = lambda x: with_ elif callable(with_): replacement = with_ else: raise TypeError("error replacement must be a string or function, " "got %s" % type(with_).__name__) iterable = (x if is_string(x) else ensure_string(replacement(x)) for x in iterable) else: raise TypeError( "%r is not a valid error handling policy for join()" % (errors,)) return delimiter.join(iterable)
[ "def", "join", "(", "delimiter", ",", "iterable", ",", "*", "*", "kwargs", ")", ":", "ensure_string", "(", "delimiter", ")", "ensure_iterable", "(", "iterable", ")", "ensure_keyword_args", "(", "kwargs", ",", "optional", "=", "(", "'errors'", ",", "'with_'", ")", ")", "errors", "=", "kwargs", ".", "get", "(", "'errors'", ",", "True", ")", "if", "errors", "in", "(", "'raise'", ",", "True", ")", ":", "iterable", "=", "imap", "(", "ensure_string", ",", "iterable", ")", "elif", "errors", "in", "(", "'ignore'", ",", "None", ")", ":", "iterable", "=", "ifilter", "(", "is_string", ",", "iterable", ")", "elif", "errors", "in", "(", "'cast'", ",", "False", ")", ":", "iterable", "=", "imap", "(", "delimiter", ".", "__class__", ",", "iterable", ")", "elif", "errors", "==", "'replace'", ":", "if", "'with_'", "not", "in", "kwargs", ":", "raise", "ValueError", "(", "\"'replace' error policy requires specifying \"", "\"replacement through with_=\"", ")", "with_", "=", "kwargs", "[", "'with_'", "]", "if", "is_string", "(", "with_", ")", ":", "replacement", "=", "lambda", "x", ":", "with_", "elif", "callable", "(", "with_", ")", ":", "replacement", "=", "with_", "else", ":", "raise", "TypeError", "(", "\"error replacement must be a string or function, \"", "\"got %s\"", "%", "type", "(", "with_", ")", ".", "__name__", ")", "iterable", "=", "(", "x", "if", "is_string", "(", "x", ")", "else", "ensure_string", "(", "replacement", "(", "x", ")", ")", "for", "x", "in", "iterable", ")", "else", ":", "raise", "TypeError", "(", "\"%r is not a valid error handling policy for join()\"", "%", "(", "errors", ",", ")", ")", "return", "delimiter", ".", "join", "(", "iterable", ")" ]
Returns a string which is a concatenation of strings in ``iterable``, separated by given ``delimiter``. :param delimiter: Delimiter to put between strings :param iterable: Iterable to join Optional keyword arguments control the exact joining strategy: :param errors: What to do with erroneous non-strings in the input. Possible values include: * ``'ignore'`` (or ``None``) * ``'cast'`` (or ``False``) -- convert non-strings to strings * ``'raise'`` (or ``True``) -- raise exception for any non-strings * ``'replace'`` -- replace non-strings with alternative value :param with_: Replacement used when ``errors == 'replace'``. This can be a string, or a callable taking erroneous value and returning a string replacement. .. versionadded:: 0.0.3 Allow to specify error handling policy through ``errors`` parameter
[ "Returns", "a", "string", "which", "is", "a", "concatenation", "of", "strings", "in", "iterable", "separated", "by", "given", "delimiter", "." ]
python
train
FulcrumTechnologies/pyconfluence
pyconfluence/actions.py
https://github.com/FulcrumTechnologies/pyconfluence/blob/a999726dbc1cbdd3d9062234698eeae799ce84ce/pyconfluence/actions.py#L281-L294
def delete_page_full(id): """Delete a page from Confluence, along with its children. Parameters: - id: id of a Confluence page. Notes: - Getting a 204 error is expected! It means the page can no longer be found. """ children = _json.loads(get_page_children(id)) for i in children["results"]: delete_page_full(i["id"]) return delete_page(id)
[ "def", "delete_page_full", "(", "id", ")", ":", "children", "=", "_json", ".", "loads", "(", "get_page_children", "(", "id", ")", ")", "for", "i", "in", "children", "[", "\"results\"", "]", ":", "delete_page_full", "(", "i", "[", "\"id\"", "]", ")", "return", "delete_page", "(", "id", ")" ]
Delete a page from Confluence, along with its children. Parameters: - id: id of a Confluence page. Notes: - Getting a 204 error is expected! It means the page can no longer be found.
[ "Delete", "a", "page", "from", "Confluence", "along", "with", "its", "children", ".", "Parameters", ":", "-", "id", ":", "id", "of", "a", "Confluence", "page", ".", "Notes", ":", "-", "Getting", "a", "204", "error", "is", "expected!", "It", "means", "the", "page", "can", "no", "longer", "be", "found", "." ]
python
train
pygobject/pgi
pgi/overrides/Gtk.py
https://github.com/pygobject/pgi/blob/2090435df6241a15ec2a78379a36b738b728652c/pgi/overrides/Gtk.py#L2178-L2193
def get_selected(self): """ :returns: :model: the :obj:`Gtk.TreeModel` :iter: The :obj:`Gtk.TreeIter` or :obj:`None` :rtype: (**model**: :obj:`Gtk.TreeModel`, **iter**: :obj:`Gtk.TreeIter` or :obj:`None`) {{ docs }} """ success, model, aiter = super(TreeSelection, self).get_selected() if success: return (model, aiter) else: return (model, None)
[ "def", "get_selected", "(", "self", ")", ":", "success", ",", "model", ",", "aiter", "=", "super", "(", "TreeSelection", ",", "self", ")", ".", "get_selected", "(", ")", "if", "success", ":", "return", "(", "model", ",", "aiter", ")", "else", ":", "return", "(", "model", ",", "None", ")" ]
:returns: :model: the :obj:`Gtk.TreeModel` :iter: The :obj:`Gtk.TreeIter` or :obj:`None` :rtype: (**model**: :obj:`Gtk.TreeModel`, **iter**: :obj:`Gtk.TreeIter` or :obj:`None`) {{ docs }}
[ ":", "returns", ":", ":", "model", ":", "the", ":", "obj", ":", "Gtk", ".", "TreeModel", ":", "iter", ":", "The", ":", "obj", ":", "Gtk", ".", "TreeIter", "or", ":", "obj", ":", "None" ]
python
train
cocoakekeyu/cancan
cancan/ability.py
https://github.com/cocoakekeyu/cancan/blob/f198d560e6e008e6c5580ba55581a939a5d544ed/cancan/ability.py#L37-L41
def addnot(self, action=None, subject=None, **conditions): """ Defines an ability which cannot be done. """ self.add_rule(Rule(False, action, subject, **conditions))
[ "def", "addnot", "(", "self", ",", "action", "=", "None", ",", "subject", "=", "None", ",", "*", "*", "conditions", ")", ":", "self", ".", "add_rule", "(", "Rule", "(", "False", ",", "action", ",", "subject", ",", "*", "*", "conditions", ")", ")" ]
Defines an ability which cannot be done.
[ "Defines", "an", "ability", "which", "cannot", "be", "done", "." ]
python
train
NASA-AMMOS/AIT-Core
ait/core/table.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/table.py#L593-L596
def add (self, defn): """Adds the given Command Definition to this Command Dictionary.""" self[defn.name] = defn self.colnames[defn.name] = defn
[ "def", "add", "(", "self", ",", "defn", ")", ":", "self", "[", "defn", ".", "name", "]", "=", "defn", "self", ".", "colnames", "[", "defn", ".", "name", "]", "=", "defn" ]
Adds the given Command Definition to this Command Dictionary.
[ "Adds", "the", "given", "Command", "Definition", "to", "this", "Command", "Dictionary", "." ]
python
train
globality-corp/microcosm
microcosm/loaders/environment.py
https://github.com/globality-corp/microcosm/blob/6856200ca295da4269c8c1c9de7db0b97c1f4523/microcosm/loaders/environment.py#L13-L43
def _load_from_environ(metadata, value_func=None): """ Load configuration from environment variables. Any environment variable prefixed with the metadata's name will be used to recursively set dictionary keys, splitting on '__'. :param value_func: a mutator for the envvar's value (if any) """ # We'll match the ennvar name against the metadata's name. The ennvar # name must be uppercase and hyphens in names converted to underscores. # # | envar | name | matches? | # +-------------+---------+----------+ # | FOO_BAR | foo | yes | # | FOO_BAR | bar | no | # | foo_bar | bar | no | # | FOO_BAR_BAZ | foo_bar | yes | # | FOO_BAR_BAZ | foo-bar | yes | # +-------------+---------+----------+ prefix = metadata.name.upper().replace("-", "_") return expand_config( environ, separator="__", skip_to=1, key_parts_filter=lambda key_parts: len(key_parts) > 1 and key_parts[0] == prefix, value_func=lambda value: value_func(value) if value_func else value, )
[ "def", "_load_from_environ", "(", "metadata", ",", "value_func", "=", "None", ")", ":", "# We'll match the ennvar name against the metadata's name. The ennvar", "# name must be uppercase and hyphens in names converted to underscores.", "#", "# | envar | name | matches? |", "# +-------------+---------+----------+", "# | FOO_BAR | foo | yes |", "# | FOO_BAR | bar | no |", "# | foo_bar | bar | no |", "# | FOO_BAR_BAZ | foo_bar | yes |", "# | FOO_BAR_BAZ | foo-bar | yes |", "# +-------------+---------+----------+", "prefix", "=", "metadata", ".", "name", ".", "upper", "(", ")", ".", "replace", "(", "\"-\"", ",", "\"_\"", ")", "return", "expand_config", "(", "environ", ",", "separator", "=", "\"__\"", ",", "skip_to", "=", "1", ",", "key_parts_filter", "=", "lambda", "key_parts", ":", "len", "(", "key_parts", ")", ">", "1", "and", "key_parts", "[", "0", "]", "==", "prefix", ",", "value_func", "=", "lambda", "value", ":", "value_func", "(", "value", ")", "if", "value_func", "else", "value", ",", ")" ]
Load configuration from environment variables. Any environment variable prefixed with the metadata's name will be used to recursively set dictionary keys, splitting on '__'. :param value_func: a mutator for the envvar's value (if any)
[ "Load", "configuration", "from", "environment", "variables", "." ]
python
train