repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/uri_parser.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/uri_parser.py#L261-L356
def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False): """Parse and validate a MongoDB URI. Returns a dict of the form:: { 'nodelist': <list of (host, port) tuples>, 'username': <username> or None, 'password': <password> or None, 'database': <database name> or None, 'collection': <collection name> or None, 'options': <dict of MongoDB URI options> } :Parameters: - `uri`: The MongoDB URI to parse. - `default_port`: The port number to use when one wasn't specified for a host in the URI. - `validate`: If ``True`` (the default), validate and normalize all options. - `warn` (optional): When validating, if ``True`` then will warn the user then ignore any invalid options or values. If ``False``, validation will error when options are unsupported or values are invalid. .. versionchanged:: 3.5 Return the original value of the ``readPreference`` MongoDB URI option instead of the validated read preference mode. .. versionchanged:: 3.1 ``warn`` added so invalid options can be ignored. """ if not uri.startswith(SCHEME): raise InvalidURI("Invalid URI scheme: URI " "must begin with '%s'" % (SCHEME,)) scheme_free = uri[SCHEME_LEN:] if not scheme_free: raise InvalidURI("Must provide at least one hostname or IP.") user = None passwd = None dbase = None collection = None options = {} host_part, _, path_part = _partition(scheme_free, '/') if not host_part: host_part = path_part path_part = "" if not path_part and '?' in host_part: raise InvalidURI("A '/' is required between " "the host list and any options.") if '@' in host_part: userinfo, _, hosts = _rpartition(host_part, '@') user, passwd = parse_userinfo(userinfo) else: hosts = host_part if '/' in hosts: raise InvalidURI("Any '/' in a unix domain socket must be" " percent-encoded: %s" % host_part) hosts = unquote_plus(hosts) nodes = split_hosts(hosts, default_port=default_port) if path_part: if path_part[0] == '?': opts = unquote_plus(path_part[1:]) else: dbase, _, opts = map(unquote_plus, _partition(path_part, '?')) if '.' in dbase: dbase, collection = dbase.split('.', 1) if _BAD_DB_CHARS.search(dbase): raise InvalidURI('Bad database name "%s"' % dbase) if opts: options = split_options(opts, validate, warn) if dbase is not None: dbase = unquote_plus(dbase) if collection is not None: collection = unquote_plus(collection) return { 'nodelist': nodes, 'username': user, 'password': passwd, 'database': dbase, 'collection': collection, 'options': options }
[ "def", "parse_uri", "(", "uri", ",", "default_port", "=", "DEFAULT_PORT", ",", "validate", "=", "True", ",", "warn", "=", "False", ")", ":", "if", "not", "uri", ".", "startswith", "(", "SCHEME", ")", ":", "raise", "InvalidURI", "(", "\"Invalid URI scheme: URI \"", "\"must begin with '%s'\"", "%", "(", "SCHEME", ",", ")", ")", "scheme_free", "=", "uri", "[", "SCHEME_LEN", ":", "]", "if", "not", "scheme_free", ":", "raise", "InvalidURI", "(", "\"Must provide at least one hostname or IP.\"", ")", "user", "=", "None", "passwd", "=", "None", "dbase", "=", "None", "collection", "=", "None", "options", "=", "{", "}", "host_part", ",", "_", ",", "path_part", "=", "_partition", "(", "scheme_free", ",", "'/'", ")", "if", "not", "host_part", ":", "host_part", "=", "path_part", "path_part", "=", "\"\"", "if", "not", "path_part", "and", "'?'", "in", "host_part", ":", "raise", "InvalidURI", "(", "\"A '/' is required between \"", "\"the host list and any options.\"", ")", "if", "'@'", "in", "host_part", ":", "userinfo", ",", "_", ",", "hosts", "=", "_rpartition", "(", "host_part", ",", "'@'", ")", "user", ",", "passwd", "=", "parse_userinfo", "(", "userinfo", ")", "else", ":", "hosts", "=", "host_part", "if", "'/'", "in", "hosts", ":", "raise", "InvalidURI", "(", "\"Any '/' in a unix domain socket must be\"", "\" percent-encoded: %s\"", "%", "host_part", ")", "hosts", "=", "unquote_plus", "(", "hosts", ")", "nodes", "=", "split_hosts", "(", "hosts", ",", "default_port", "=", "default_port", ")", "if", "path_part", ":", "if", "path_part", "[", "0", "]", "==", "'?'", ":", "opts", "=", "unquote_plus", "(", "path_part", "[", "1", ":", "]", ")", "else", ":", "dbase", ",", "_", ",", "opts", "=", "map", "(", "unquote_plus", ",", "_partition", "(", "path_part", ",", "'?'", ")", ")", "if", "'.'", "in", "dbase", ":", "dbase", ",", "collection", "=", "dbase", ".", "split", "(", "'.'", ",", "1", ")", "if", "_BAD_DB_CHARS", ".", "search", "(", "dbase", ")", ":", "raise", "InvalidURI", "(", "'Bad database name \"%s\"'", "%", "dbase", ")", "if", "opts", ":", "options", "=", "split_options", "(", "opts", ",", "validate", ",", "warn", ")", "if", "dbase", "is", "not", "None", ":", "dbase", "=", "unquote_plus", "(", "dbase", ")", "if", "collection", "is", "not", "None", ":", "collection", "=", "unquote_plus", "(", "collection", ")", "return", "{", "'nodelist'", ":", "nodes", ",", "'username'", ":", "user", ",", "'password'", ":", "passwd", ",", "'database'", ":", "dbase", ",", "'collection'", ":", "collection", ",", "'options'", ":", "options", "}" ]
Parse and validate a MongoDB URI. Returns a dict of the form:: { 'nodelist': <list of (host, port) tuples>, 'username': <username> or None, 'password': <password> or None, 'database': <database name> or None, 'collection': <collection name> or None, 'options': <dict of MongoDB URI options> } :Parameters: - `uri`: The MongoDB URI to parse. - `default_port`: The port number to use when one wasn't specified for a host in the URI. - `validate`: If ``True`` (the default), validate and normalize all options. - `warn` (optional): When validating, if ``True`` then will warn the user then ignore any invalid options or values. If ``False``, validation will error when options are unsupported or values are invalid. .. versionchanged:: 3.5 Return the original value of the ``readPreference`` MongoDB URI option instead of the validated read preference mode. .. versionchanged:: 3.1 ``warn`` added so invalid options can be ignored.
[ "Parse", "and", "validate", "a", "MongoDB", "URI", "." ]
python
train
farshidce/touchworks-python
touchworks/api/http.py
https://github.com/farshidce/touchworks-python/blob/ea8f93a0f4273de1317a318e945a571f5038ba62/touchworks/api/http.py#L312-L329
def find_document_type_by_name(self, entity_name, active='Y', match_case=True): """ search document types by name and active(Y/N) status :param entity_name: entity name :return: """ all_types = self.get_dictionary('Document_Type_DE') if match_case: filtered = filter( lambda x: x['Active'] == active and x['EntryName'].find(entity_name) >= 0, all_types) else: token = entity_name.lower() filtered = filter( lambda x: x['Active'] == active and x['EntryName'].lower().find(token) >= 0, all_types) return filtered
[ "def", "find_document_type_by_name", "(", "self", ",", "entity_name", ",", "active", "=", "'Y'", ",", "match_case", "=", "True", ")", ":", "all_types", "=", "self", ".", "get_dictionary", "(", "'Document_Type_DE'", ")", "if", "match_case", ":", "filtered", "=", "filter", "(", "lambda", "x", ":", "x", "[", "'Active'", "]", "==", "active", "and", "x", "[", "'EntryName'", "]", ".", "find", "(", "entity_name", ")", ">=", "0", ",", "all_types", ")", "else", ":", "token", "=", "entity_name", ".", "lower", "(", ")", "filtered", "=", "filter", "(", "lambda", "x", ":", "x", "[", "'Active'", "]", "==", "active", "and", "x", "[", "'EntryName'", "]", ".", "lower", "(", ")", ".", "find", "(", "token", ")", ">=", "0", ",", "all_types", ")", "return", "filtered" ]
search document types by name and active(Y/N) status :param entity_name: entity name :return:
[ "search", "document", "types", "by", "name", "and", "active", "(", "Y", "/", "N", ")", "status", ":", "param", "entity_name", ":", "entity", "name", ":", "return", ":" ]
python
train
saltstack/salt
salt/modules/aliases.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aliases.py#L141-L158
def has_target(alias, target): ''' Return true if the alias/target is set CLI Example: .. code-block:: bash salt '*' aliases.has_target alias target ''' if target == '': raise SaltInvocationError('target can not be an empty string') aliases = list_aliases() if alias not in aliases: return False if isinstance(target, list): target = ', '.join(target) return target == aliases[alias]
[ "def", "has_target", "(", "alias", ",", "target", ")", ":", "if", "target", "==", "''", ":", "raise", "SaltInvocationError", "(", "'target can not be an empty string'", ")", "aliases", "=", "list_aliases", "(", ")", "if", "alias", "not", "in", "aliases", ":", "return", "False", "if", "isinstance", "(", "target", ",", "list", ")", ":", "target", "=", "', '", ".", "join", "(", "target", ")", "return", "target", "==", "aliases", "[", "alias", "]" ]
Return true if the alias/target is set CLI Example: .. code-block:: bash salt '*' aliases.has_target alias target
[ "Return", "true", "if", "the", "alias", "/", "target", "is", "set" ]
python
train
DomainTools/python_api
domaintools/api.py
https://github.com/DomainTools/python_api/blob/17be85fd4913fbe14d7660a4f4829242f1663e60/domaintools/api.py#L189-L197
def phisheye_term_list(self, include_inactive=False, **kwargs): """Provides a list of terms that are set up for this account. This call is not charged against your API usage limit. NOTE: The terms must be configured in the PhishEye web interface: https://research.domaintools.com/phisheye. There is no API call to set up the terms. """ return self._results('phisheye_term_list', '/v1/phisheye/term-list', include_inactive=include_inactive, items_path=('terms', ), **kwargs)
[ "def", "phisheye_term_list", "(", "self", ",", "include_inactive", "=", "False", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_results", "(", "'phisheye_term_list'", ",", "'/v1/phisheye/term-list'", ",", "include_inactive", "=", "include_inactive", ",", "items_path", "=", "(", "'terms'", ",", ")", ",", "*", "*", "kwargs", ")" ]
Provides a list of terms that are set up for this account. This call is not charged against your API usage limit. NOTE: The terms must be configured in the PhishEye web interface: https://research.domaintools.com/phisheye. There is no API call to set up the terms.
[ "Provides", "a", "list", "of", "terms", "that", "are", "set", "up", "for", "this", "account", ".", "This", "call", "is", "not", "charged", "against", "your", "API", "usage", "limit", "." ]
python
train
OCA/openupgradelib
openupgradelib/openupgrade.py
https://github.com/OCA/openupgradelib/blob/b220b6498075d62c1b64073cc934513a465cfd85/openupgradelib/openupgrade.py#L397-L502
def rename_fields(env, field_spec, no_deep=False): """Rename fields. Typically called in the pre script. WARNING: If using this on base module, pass the argument ``no_deep`` with True value for avoiding the using of the environment (which is not yet loaded). This, in contrast of ``rename_columns``, performs all the steps for completely rename a field from one name to another. This is needed for making a complete renaming of a field with all their side features: translations, filters, exports... Call this method whenever you are not performing a pure SQL column renaming for other purposes (preserve a value for example). This method performs also the SQL column renaming, so only one call is needed. :param env: Environment/pool variable. The database cursor is the only thing needed, but added in prevision of TODO tasks for not breaking API later. :param fields_spec: a list of tuples with the following elements: * Model name. The name of the Odoo model * Table name. The name of the SQL table for the model. * Old field name. The name of the old field. * New field name. The name of the new field. :param no_deep: If True, avoids to perform any operation that involves the environment. Not used for now. """ cr = env.cr for model, table, old_field, new_field in field_spec: if column_exists(cr, table, old_field): rename_columns(cr, {table: [(old_field, new_field)]}) # Rename corresponding field entry cr.execute(""" UPDATE ir_model_fields SET name = %s WHERE name = %s AND model = %s """, (new_field, old_field, model), ) # Rename translations cr.execute(""" UPDATE ir_translation SET name = %s WHERE name = %s AND type = 'model' """, ( "%s,%s" % (model, old_field), "%s,%s" % (model, new_field), ), ) # Rename appearances on export profiles # TODO: Rename when the field is part of a submodel (ex. m2one.field) cr.execute(""" UPDATE ir_exports_line SET name = %s WHERE name = %s """, (old_field, new_field), ) # Rename appearances on filters # Example of replaced domain: [['field', '=', self], ...] # TODO: Rename when the field is part of a submodel (ex. m2one.field) cr.execute(""" UPDATE ir_filters SET domain = replace(domain, %(old_pattern)s, %(new_pattern)s) WHERE model_id = %%s AND domain ~ %(old_pattern)s """ % { 'old_pattern': "$$'%s'$$" % old_field, 'new_pattern': "$$'%s'$$" % new_field, }, (model, ), ) # Examples of replaced contexts: # {'group_by': ['field', 'other_field'], 'other_key':value} # {'group_by': ['date_field:month']} # {'other_key': value, 'group_by': ['other_field', 'field']} # {'group_by': ['other_field'],'col_group_by': ['field']} cr.execute(r""" UPDATE ir_filters SET context = regexp_replace( context, %(old_pattern)s, %(new_pattern)s ) WHERE model_id = %%s AND context ~ %(old_pattern)s """ % { 'old_pattern': ( r"$$('group_by'|'col_group_by'):([^\]]*)" r"'%s(:day|:week|:month|:year){0,1}'(.*?\])$$" ) % old_field, 'new_pattern': r"$$\1:\2'%s\3'\4$$" % new_field, }, (model, ), ) if table_exists(env.cr, 'mail_alias'): # Rename appearances on mail alias cr.execute(""" UPDATE mail_alias ma SET alias_defaults = replace(alias_defaults, %(old_pattern)s, %(new_pattern)s) FROM ir_model im WHERE ma.alias_model_id = im.id AND im.model = %%s AND ma.alias_defaults ~ %(old_pattern)s """ % { 'old_pattern': "$$'%s'$$" % old_field, 'new_pattern': "$$'%s'$$" % new_field, }, (model, ), )
[ "def", "rename_fields", "(", "env", ",", "field_spec", ",", "no_deep", "=", "False", ")", ":", "cr", "=", "env", ".", "cr", "for", "model", ",", "table", ",", "old_field", ",", "new_field", "in", "field_spec", ":", "if", "column_exists", "(", "cr", ",", "table", ",", "old_field", ")", ":", "rename_columns", "(", "cr", ",", "{", "table", ":", "[", "(", "old_field", ",", "new_field", ")", "]", "}", ")", "# Rename corresponding field entry", "cr", ".", "execute", "(", "\"\"\"\n UPDATE ir_model_fields\n SET name = %s\n WHERE name = %s\n AND model = %s\n \"\"\"", ",", "(", "new_field", ",", "old_field", ",", "model", ")", ",", ")", "# Rename translations", "cr", ".", "execute", "(", "\"\"\"\n UPDATE ir_translation\n SET name = %s\n WHERE name = %s\n AND type = 'model'\n \"\"\"", ",", "(", "\"%s,%s\"", "%", "(", "model", ",", "old_field", ")", ",", "\"%s,%s\"", "%", "(", "model", ",", "new_field", ")", ",", ")", ",", ")", "# Rename appearances on export profiles", "# TODO: Rename when the field is part of a submodel (ex. m2one.field)", "cr", ".", "execute", "(", "\"\"\"\n UPDATE ir_exports_line\n SET name = %s\n WHERE name = %s\n \"\"\"", ",", "(", "old_field", ",", "new_field", ")", ",", ")", "# Rename appearances on filters", "# Example of replaced domain: [['field', '=', self], ...]", "# TODO: Rename when the field is part of a submodel (ex. m2one.field)", "cr", ".", "execute", "(", "\"\"\"\n UPDATE ir_filters\n SET domain = replace(domain, %(old_pattern)s, %(new_pattern)s)\n WHERE model_id = %%s\n AND domain ~ %(old_pattern)s\n \"\"\"", "%", "{", "'old_pattern'", ":", "\"$$'%s'$$\"", "%", "old_field", ",", "'new_pattern'", ":", "\"$$'%s'$$\"", "%", "new_field", ",", "}", ",", "(", "model", ",", ")", ",", ")", "# Examples of replaced contexts:", "# {'group_by': ['field', 'other_field'], 'other_key':value}", "# {'group_by': ['date_field:month']}", "# {'other_key': value, 'group_by': ['other_field', 'field']}", "# {'group_by': ['other_field'],'col_group_by': ['field']}", "cr", ".", "execute", "(", "r\"\"\"\n UPDATE ir_filters\n SET context = regexp_replace(\n context, %(old_pattern)s, %(new_pattern)s\n )\n WHERE model_id = %%s\n AND context ~ %(old_pattern)s\n \"\"\"", "%", "{", "'old_pattern'", ":", "(", "r\"$$('group_by'|'col_group_by'):([^\\]]*)\"", "r\"'%s(:day|:week|:month|:year){0,1}'(.*?\\])$$\"", ")", "%", "old_field", ",", "'new_pattern'", ":", "r\"$$\\1:\\2'%s\\3'\\4$$\"", "%", "new_field", ",", "}", ",", "(", "model", ",", ")", ",", ")", "if", "table_exists", "(", "env", ".", "cr", ",", "'mail_alias'", ")", ":", "# Rename appearances on mail alias", "cr", ".", "execute", "(", "\"\"\"\n UPDATE mail_alias ma\n SET alias_defaults =\n replace(alias_defaults, %(old_pattern)s, %(new_pattern)s)\n FROM ir_model im\n WHERE ma.alias_model_id = im.id\n AND im.model = %%s\n AND ma.alias_defaults ~ %(old_pattern)s\n \"\"\"", "%", "{", "'old_pattern'", ":", "\"$$'%s'$$\"", "%", "old_field", ",", "'new_pattern'", ":", "\"$$'%s'$$\"", "%", "new_field", ",", "}", ",", "(", "model", ",", ")", ",", ")" ]
Rename fields. Typically called in the pre script. WARNING: If using this on base module, pass the argument ``no_deep`` with True value for avoiding the using of the environment (which is not yet loaded). This, in contrast of ``rename_columns``, performs all the steps for completely rename a field from one name to another. This is needed for making a complete renaming of a field with all their side features: translations, filters, exports... Call this method whenever you are not performing a pure SQL column renaming for other purposes (preserve a value for example). This method performs also the SQL column renaming, so only one call is needed. :param env: Environment/pool variable. The database cursor is the only thing needed, but added in prevision of TODO tasks for not breaking API later. :param fields_spec: a list of tuples with the following elements: * Model name. The name of the Odoo model * Table name. The name of the SQL table for the model. * Old field name. The name of the old field. * New field name. The name of the new field. :param no_deep: If True, avoids to perform any operation that involves the environment. Not used for now.
[ "Rename", "fields", ".", "Typically", "called", "in", "the", "pre", "script", ".", "WARNING", ":", "If", "using", "this", "on", "base", "module", "pass", "the", "argument", "no_deep", "with", "True", "value", "for", "avoiding", "the", "using", "of", "the", "environment", "(", "which", "is", "not", "yet", "loaded", ")", "." ]
python
train
Synerty/peek-plugin-base
peek_plugin_base/server/PeekPlatformServerHttpHookABC.py
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/server/PeekPlatformServerHttpHookABC.py#L30-L41
def addServerResource(self, pluginSubPath: bytes, resource: BasicResource) -> None: """ Add Server Resource Add a cusotom implementation of a served http resource. :param pluginSubPath: The resource path where you want to serve this resource. :param resource: The resource to serve. :return: None """ pluginSubPath = pluginSubPath.strip(b'/') self.__rootServerResource.putChild(pluginSubPath, resource)
[ "def", "addServerResource", "(", "self", ",", "pluginSubPath", ":", "bytes", ",", "resource", ":", "BasicResource", ")", "->", "None", ":", "pluginSubPath", "=", "pluginSubPath", ".", "strip", "(", "b'/'", ")", "self", ".", "__rootServerResource", ".", "putChild", "(", "pluginSubPath", ",", "resource", ")" ]
Add Server Resource Add a cusotom implementation of a served http resource. :param pluginSubPath: The resource path where you want to serve this resource. :param resource: The resource to serve. :return: None
[ "Add", "Server", "Resource" ]
python
train
Erotemic/utool
utool/util_cache.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L989-L993
def delete_global_cache(appname='default'): """ Reads cache files to a safe place in each operating system """ #close_global_shelf(appname) shelf_fpath = get_global_shelf_fpath(appname) util_path.remove_file(shelf_fpath, verbose=True, dryrun=False)
[ "def", "delete_global_cache", "(", "appname", "=", "'default'", ")", ":", "#close_global_shelf(appname)", "shelf_fpath", "=", "get_global_shelf_fpath", "(", "appname", ")", "util_path", ".", "remove_file", "(", "shelf_fpath", ",", "verbose", "=", "True", ",", "dryrun", "=", "False", ")" ]
Reads cache files to a safe place in each operating system
[ "Reads", "cache", "files", "to", "a", "safe", "place", "in", "each", "operating", "system" ]
python
train
ASMfreaK/habitipy
habitipy/util.py
https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/util.py#L166-L169
def get_translation_functions(package_name: str, names: Tuple[str, ...] = ('gettext',)): """finds and installs translation functions for package""" translation = get_translation_for(package_name) return [getattr(translation, x) for x in names]
[ "def", "get_translation_functions", "(", "package_name", ":", "str", ",", "names", ":", "Tuple", "[", "str", ",", "...", "]", "=", "(", "'gettext'", ",", ")", ")", ":", "translation", "=", "get_translation_for", "(", "package_name", ")", "return", "[", "getattr", "(", "translation", ",", "x", ")", "for", "x", "in", "names", "]" ]
finds and installs translation functions for package
[ "finds", "and", "installs", "translation", "functions", "for", "package" ]
python
train
buildbot/buildbot
master/buildbot/db/pool.py
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/db/pool.py#L152-L159
def shutdown(self): """Manually stop the pool. This is only necessary from tests, as the pool will stop itself when the reactor stops under normal circumstances.""" if not self._stop_evt: return # pool is already stopped self.reactor.removeSystemEventTrigger(self._stop_evt) self._stop()
[ "def", "shutdown", "(", "self", ")", ":", "if", "not", "self", ".", "_stop_evt", ":", "return", "# pool is already stopped", "self", ".", "reactor", ".", "removeSystemEventTrigger", "(", "self", ".", "_stop_evt", ")", "self", ".", "_stop", "(", ")" ]
Manually stop the pool. This is only necessary from tests, as the pool will stop itself when the reactor stops under normal circumstances.
[ "Manually", "stop", "the", "pool", ".", "This", "is", "only", "necessary", "from", "tests", "as", "the", "pool", "will", "stop", "itself", "when", "the", "reactor", "stops", "under", "normal", "circumstances", "." ]
python
train
goose3/goose3
goose3/extractors/content.py
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/content.py#L198-L225
def get_siblings_content(self, current_sibling, baselinescore_siblings_para): """ adds any siblings that may have a decent score to this node """ if current_sibling.tag == 'p' and self.parser.getText(current_sibling): tmp = current_sibling if tmp.tail: tmp = deepcopy(tmp) tmp.tail = '' return [tmp] else: potential_paragraphs = self.parser.getElementsByTag(current_sibling, tag='p') if potential_paragraphs is None: return None paragraphs = list() for first_paragraph in potential_paragraphs: text = self.parser.getText(first_paragraph) if text: # no len(text) > 0 word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text) paragraph_score = word_stats.get_stopword_count() sibling_baseline_score = float(.30) high_link_density = self.is_highlink_density(first_paragraph) score = float(baselinescore_siblings_para * sibling_baseline_score) if score < paragraph_score and not high_link_density: para = self.parser.createElement(tag='p', text=text, tail=None) paragraphs.append(para) return paragraphs
[ "def", "get_siblings_content", "(", "self", ",", "current_sibling", ",", "baselinescore_siblings_para", ")", ":", "if", "current_sibling", ".", "tag", "==", "'p'", "and", "self", ".", "parser", ".", "getText", "(", "current_sibling", ")", ":", "tmp", "=", "current_sibling", "if", "tmp", ".", "tail", ":", "tmp", "=", "deepcopy", "(", "tmp", ")", "tmp", ".", "tail", "=", "''", "return", "[", "tmp", "]", "else", ":", "potential_paragraphs", "=", "self", ".", "parser", ".", "getElementsByTag", "(", "current_sibling", ",", "tag", "=", "'p'", ")", "if", "potential_paragraphs", "is", "None", ":", "return", "None", "paragraphs", "=", "list", "(", ")", "for", "first_paragraph", "in", "potential_paragraphs", ":", "text", "=", "self", ".", "parser", ".", "getText", "(", "first_paragraph", ")", "if", "text", ":", "# no len(text) > 0", "word_stats", "=", "self", ".", "stopwords_class", "(", "language", "=", "self", ".", "get_language", "(", ")", ")", ".", "get_stopword_count", "(", "text", ")", "paragraph_score", "=", "word_stats", ".", "get_stopword_count", "(", ")", "sibling_baseline_score", "=", "float", "(", ".30", ")", "high_link_density", "=", "self", ".", "is_highlink_density", "(", "first_paragraph", ")", "score", "=", "float", "(", "baselinescore_siblings_para", "*", "sibling_baseline_score", ")", "if", "score", "<", "paragraph_score", "and", "not", "high_link_density", ":", "para", "=", "self", ".", "parser", ".", "createElement", "(", "tag", "=", "'p'", ",", "text", "=", "text", ",", "tail", "=", "None", ")", "paragraphs", ".", "append", "(", "para", ")", "return", "paragraphs" ]
adds any siblings that may have a decent score to this node
[ "adds", "any", "siblings", "that", "may", "have", "a", "decent", "score", "to", "this", "node" ]
python
valid
bw2/ConfigArgParse
configargparse.py
https://github.com/bw2/ConfigArgParse/blob/8bbc7de67f884184068d62af7f78e723d01c0081/configargparse.py#L701-L754
def _open_config_files(self, command_line_args): """Tries to parse config file path(s) from within command_line_args. Returns a list of opened config files, including files specified on the commandline as well as any default_config_files specified in the constructor that are present on disk. Args: command_line_args: List of all args (already split on spaces) """ # open any default config files config_files = [open(f) for files in map(glob.glob, map(os.path.expanduser, self._default_config_files)) for f in files] # list actions with is_config_file_arg=True. Its possible there is more # than one such arg. user_config_file_arg_actions = [ a for a in self._actions if getattr(a, "is_config_file_arg", False)] if not user_config_file_arg_actions: return config_files for action in user_config_file_arg_actions: # try to parse out the config file path by using a clean new # ArgumentParser that only knows this one arg/action. arg_parser = argparse.ArgumentParser( prefix_chars=self.prefix_chars, add_help=False) arg_parser._add_action(action) # make parser not exit on error by replacing its error method. # Otherwise it sys.exits(..) if, for example, config file # is_required=True and user doesn't provide it. def error_method(self, message): pass arg_parser.error = types.MethodType(error_method, arg_parser) # check whether the user provided a value parsed_arg = arg_parser.parse_known_args(args=command_line_args) if not parsed_arg: continue namespace, _ = parsed_arg user_config_file = getattr(namespace, action.dest, None) if not user_config_file: continue # validate the user-provided config file path user_config_file = os.path.expanduser(user_config_file) if not os.path.isfile(user_config_file): self.error('File not found: %s' % user_config_file) config_files += [open(user_config_file)] return config_files
[ "def", "_open_config_files", "(", "self", ",", "command_line_args", ")", ":", "# open any default config files", "config_files", "=", "[", "open", "(", "f", ")", "for", "files", "in", "map", "(", "glob", ".", "glob", ",", "map", "(", "os", ".", "path", ".", "expanduser", ",", "self", ".", "_default_config_files", ")", ")", "for", "f", "in", "files", "]", "# list actions with is_config_file_arg=True. Its possible there is more", "# than one such arg.", "user_config_file_arg_actions", "=", "[", "a", "for", "a", "in", "self", ".", "_actions", "if", "getattr", "(", "a", ",", "\"is_config_file_arg\"", ",", "False", ")", "]", "if", "not", "user_config_file_arg_actions", ":", "return", "config_files", "for", "action", "in", "user_config_file_arg_actions", ":", "# try to parse out the config file path by using a clean new", "# ArgumentParser that only knows this one arg/action.", "arg_parser", "=", "argparse", ".", "ArgumentParser", "(", "prefix_chars", "=", "self", ".", "prefix_chars", ",", "add_help", "=", "False", ")", "arg_parser", ".", "_add_action", "(", "action", ")", "# make parser not exit on error by replacing its error method.", "# Otherwise it sys.exits(..) if, for example, config file", "# is_required=True and user doesn't provide it.", "def", "error_method", "(", "self", ",", "message", ")", ":", "pass", "arg_parser", ".", "error", "=", "types", ".", "MethodType", "(", "error_method", ",", "arg_parser", ")", "# check whether the user provided a value", "parsed_arg", "=", "arg_parser", ".", "parse_known_args", "(", "args", "=", "command_line_args", ")", "if", "not", "parsed_arg", ":", "continue", "namespace", ",", "_", "=", "parsed_arg", "user_config_file", "=", "getattr", "(", "namespace", ",", "action", ".", "dest", ",", "None", ")", "if", "not", "user_config_file", ":", "continue", "# validate the user-provided config file path", "user_config_file", "=", "os", ".", "path", ".", "expanduser", "(", "user_config_file", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "user_config_file", ")", ":", "self", ".", "error", "(", "'File not found: %s'", "%", "user_config_file", ")", "config_files", "+=", "[", "open", "(", "user_config_file", ")", "]", "return", "config_files" ]
Tries to parse config file path(s) from within command_line_args. Returns a list of opened config files, including files specified on the commandline as well as any default_config_files specified in the constructor that are present on disk. Args: command_line_args: List of all args (already split on spaces)
[ "Tries", "to", "parse", "config", "file", "path", "(", "s", ")", "from", "within", "command_line_args", ".", "Returns", "a", "list", "of", "opened", "config", "files", "including", "files", "specified", "on", "the", "commandline", "as", "well", "as", "any", "default_config_files", "specified", "in", "the", "constructor", "that", "are", "present", "on", "disk", "." ]
python
train
buruzaemon/natto-py
natto/environment.py
https://github.com/buruzaemon/natto-py/blob/018fe004c47c45c66bdf2e03fe24e981ae089b76/natto/environment.py#L164-L199
def __regkey_value(self, path, name='', start_key=None): r'''Return the data of value mecabrc at MeCab HKEY node. On Windows, the path to the mecabrc as set in the Windows Registry is used to deduce the path to libmecab.dll. Returns: The full path to the mecabrc on Windows. Raises: WindowsError: A problem was encountered in trying to locate the value mecabrc at HKEY_CURRENT_USER\Software\MeCab. ''' if sys.version < '3': import _winreg as reg else: import winreg as reg def _fn(path, name='', start_key=None): if isinstance(path, str): path = path.split('\\') if start_key is None: start_key = getattr(reg, path[0]) return _fn(path[1:], name, start_key) else: subkey = path.pop(0) with reg.OpenKey(start_key, subkey) as handle: if path: return _fn(path, name, handle) else: desc, i = None, 0 while not desc or desc[0] != name: desc = reg.EnumValue(handle, i) i += 1 return desc[1] return _fn(path, name, start_key)
[ "def", "__regkey_value", "(", "self", ",", "path", ",", "name", "=", "''", ",", "start_key", "=", "None", ")", ":", "if", "sys", ".", "version", "<", "'3'", ":", "import", "_winreg", "as", "reg", "else", ":", "import", "winreg", "as", "reg", "def", "_fn", "(", "path", ",", "name", "=", "''", ",", "start_key", "=", "None", ")", ":", "if", "isinstance", "(", "path", ",", "str", ")", ":", "path", "=", "path", ".", "split", "(", "'\\\\'", ")", "if", "start_key", "is", "None", ":", "start_key", "=", "getattr", "(", "reg", ",", "path", "[", "0", "]", ")", "return", "_fn", "(", "path", "[", "1", ":", "]", ",", "name", ",", "start_key", ")", "else", ":", "subkey", "=", "path", ".", "pop", "(", "0", ")", "with", "reg", ".", "OpenKey", "(", "start_key", ",", "subkey", ")", "as", "handle", ":", "if", "path", ":", "return", "_fn", "(", "path", ",", "name", ",", "handle", ")", "else", ":", "desc", ",", "i", "=", "None", ",", "0", "while", "not", "desc", "or", "desc", "[", "0", "]", "!=", "name", ":", "desc", "=", "reg", ".", "EnumValue", "(", "handle", ",", "i", ")", "i", "+=", "1", "return", "desc", "[", "1", "]", "return", "_fn", "(", "path", ",", "name", ",", "start_key", ")" ]
r'''Return the data of value mecabrc at MeCab HKEY node. On Windows, the path to the mecabrc as set in the Windows Registry is used to deduce the path to libmecab.dll. Returns: The full path to the mecabrc on Windows. Raises: WindowsError: A problem was encountered in trying to locate the value mecabrc at HKEY_CURRENT_USER\Software\MeCab.
[ "r", "Return", "the", "data", "of", "value", "mecabrc", "at", "MeCab", "HKEY", "node", ".", "On", "Windows", "the", "path", "to", "the", "mecabrc", "as", "set", "in", "the", "Windows", "Registry", "is", "used", "to", "deduce", "the", "path", "to", "libmecab", ".", "dll", ".", "Returns", ":", "The", "full", "path", "to", "the", "mecabrc", "on", "Windows", ".", "Raises", ":", "WindowsError", ":", "A", "problem", "was", "encountered", "in", "trying", "to", "locate", "the", "value", "mecabrc", "at", "HKEY_CURRENT_USER", "\\", "Software", "\\", "MeCab", "." ]
python
train
katerina7479/pypdflite
pypdflite/pdflite.py
https://github.com/katerina7479/pypdflite/blob/ac2501f30d6619eae9dea5644717575ca9263d0a/pypdflite/pdflite.py#L313-L326
def _text_to_string(self, text): """ Provides for escape characters and converting to pdf text object (pdf strings are in parantheses). Mainly for use in the information block here, this functionality is also present in the text object. """ if text: for i,j in [("\\","\\\\"),(")","\\)"),("(", "\\(")]: text = text.replace(i, j) text = "(%s)" % text else: text = 'None' return text
[ "def", "_text_to_string", "(", "self", ",", "text", ")", ":", "if", "text", ":", "for", "i", ",", "j", "in", "[", "(", "\"\\\\\"", ",", "\"\\\\\\\\\"", ")", ",", "(", "\")\"", ",", "\"\\\\)\"", ")", ",", "(", "\"(\"", ",", "\"\\\\(\"", ")", "]", ":", "text", "=", "text", ".", "replace", "(", "i", ",", "j", ")", "text", "=", "\"(%s)\"", "%", "text", "else", ":", "text", "=", "'None'", "return", "text" ]
Provides for escape characters and converting to pdf text object (pdf strings are in parantheses). Mainly for use in the information block here, this functionality is also present in the text object.
[ "Provides", "for", "escape", "characters", "and", "converting", "to", "pdf", "text", "object", "(", "pdf", "strings", "are", "in", "parantheses", ")", ".", "Mainly", "for", "use", "in", "the", "information", "block", "here", "this", "functionality", "is", "also", "present", "in", "the", "text", "object", "." ]
python
test
pudo/dataset
dataset/util.py
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/util.py#L111-L120
def pad_chunk_columns(chunk): """Given a set of items to be inserted, make sure they all have the same columns by padding columns with None if they are missing.""" columns = set() for record in chunk: columns.update(record.keys()) for record in chunk: for column in columns: record.setdefault(column, None) return chunk
[ "def", "pad_chunk_columns", "(", "chunk", ")", ":", "columns", "=", "set", "(", ")", "for", "record", "in", "chunk", ":", "columns", ".", "update", "(", "record", ".", "keys", "(", ")", ")", "for", "record", "in", "chunk", ":", "for", "column", "in", "columns", ":", "record", ".", "setdefault", "(", "column", ",", "None", ")", "return", "chunk" ]
Given a set of items to be inserted, make sure they all have the same columns by padding columns with None if they are missing.
[ "Given", "a", "set", "of", "items", "to", "be", "inserted", "make", "sure", "they", "all", "have", "the", "same", "columns", "by", "padding", "columns", "with", "None", "if", "they", "are", "missing", "." ]
python
train
SoCo/SoCo
soco/data_structures.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/data_structures.py#L54-L78
def to_didl_string(*args): """Convert any number of `DidlObjects <DidlObject>` to a unicode xml string. Args: *args (DidlObject): One or more `DidlObject` (or subclass) instances. Returns: str: A unicode string representation of DIDL-Lite XML in the form ``'<DIDL-Lite ...>...</DIDL-Lite>'``. """ didl = XML.Element( 'DIDL-Lite', { 'xmlns': "urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/", 'xmlns:dc': "http://purl.org/dc/elements/1.1/", 'xmlns:upnp': "urn:schemas-upnp-org:metadata-1-0/upnp/", 'xmlns:r': "urn:schemas-rinconnetworks-com:metadata-1-0/" }) for arg in args: didl.append(arg.to_element()) if sys.version_info[0] == 2: return XML.tostring(didl) else: return XML.tostring(didl, encoding='unicode')
[ "def", "to_didl_string", "(", "*", "args", ")", ":", "didl", "=", "XML", ".", "Element", "(", "'DIDL-Lite'", ",", "{", "'xmlns'", ":", "\"urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/\"", ",", "'xmlns:dc'", ":", "\"http://purl.org/dc/elements/1.1/\"", ",", "'xmlns:upnp'", ":", "\"urn:schemas-upnp-org:metadata-1-0/upnp/\"", ",", "'xmlns:r'", ":", "\"urn:schemas-rinconnetworks-com:metadata-1-0/\"", "}", ")", "for", "arg", "in", "args", ":", "didl", ".", "append", "(", "arg", ".", "to_element", "(", ")", ")", "if", "sys", ".", "version_info", "[", "0", "]", "==", "2", ":", "return", "XML", ".", "tostring", "(", "didl", ")", "else", ":", "return", "XML", ".", "tostring", "(", "didl", ",", "encoding", "=", "'unicode'", ")" ]
Convert any number of `DidlObjects <DidlObject>` to a unicode xml string. Args: *args (DidlObject): One or more `DidlObject` (or subclass) instances. Returns: str: A unicode string representation of DIDL-Lite XML in the form ``'<DIDL-Lite ...>...</DIDL-Lite>'``.
[ "Convert", "any", "number", "of", "DidlObjects", "<DidlObject", ">", "to", "a", "unicode", "xml", "string", "." ]
python
train
prompt-toolkit/pyvim
pyvim/commands/commands.py
https://github.com/prompt-toolkit/pyvim/blob/5928b53b9d700863c1a06d2181a034a955f94594/pyvim/commands/commands.py#L302-L306
def quit_all(editor, force=False): """ Quit all. """ quit(editor, all_=True, force=force)
[ "def", "quit_all", "(", "editor", ",", "force", "=", "False", ")", ":", "quit", "(", "editor", ",", "all_", "=", "True", ",", "force", "=", "force", ")" ]
Quit all.
[ "Quit", "all", "." ]
python
train
marl/jams
jams/display.py
https://github.com/marl/jams/blob/b16778399b9528efbd71434842a079f7691a7a66/jams/display.py#L71-L76
def hierarchy(annotation, **kwargs): '''Plotting wrapper for hierarchical segmentations''' htimes, hlabels = hierarchy_flatten(annotation) htimes = [np.asarray(_) for _ in htimes] return mir_eval.display.hierarchy(htimes, hlabels, **kwargs)
[ "def", "hierarchy", "(", "annotation", ",", "*", "*", "kwargs", ")", ":", "htimes", ",", "hlabels", "=", "hierarchy_flatten", "(", "annotation", ")", "htimes", "=", "[", "np", ".", "asarray", "(", "_", ")", "for", "_", "in", "htimes", "]", "return", "mir_eval", ".", "display", ".", "hierarchy", "(", "htimes", ",", "hlabels", ",", "*", "*", "kwargs", ")" ]
Plotting wrapper for hierarchical segmentations
[ "Plotting", "wrapper", "for", "hierarchical", "segmentations" ]
python
valid
python-cmd2/cmd2
cmd2/cmd2.py
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/cmd2.py#L2261-L2284
def alias_create(self, args: argparse.Namespace) -> None: """Create or overwrite an alias""" # Validate the alias name valid, errmsg = self.statement_parser.is_valid_command(args.name) if not valid: self.perror("Invalid alias name: {}".format(errmsg), traceback_war=False) return if args.name in self.macros: self.perror("Alias cannot have the same name as a macro", traceback_war=False) return utils.unquote_redirection_tokens(args.command_args) # Build the alias value string value = args.command if args.command_args: value += ' ' + ' '.join(args.command_args) # Set the alias result = "overwritten" if args.name in self.aliases else "created" self.aliases[args.name] = value self.poutput("Alias '{}' {}".format(args.name, result))
[ "def", "alias_create", "(", "self", ",", "args", ":", "argparse", ".", "Namespace", ")", "->", "None", ":", "# Validate the alias name", "valid", ",", "errmsg", "=", "self", ".", "statement_parser", ".", "is_valid_command", "(", "args", ".", "name", ")", "if", "not", "valid", ":", "self", ".", "perror", "(", "\"Invalid alias name: {}\"", ".", "format", "(", "errmsg", ")", ",", "traceback_war", "=", "False", ")", "return", "if", "args", ".", "name", "in", "self", ".", "macros", ":", "self", ".", "perror", "(", "\"Alias cannot have the same name as a macro\"", ",", "traceback_war", "=", "False", ")", "return", "utils", ".", "unquote_redirection_tokens", "(", "args", ".", "command_args", ")", "# Build the alias value string", "value", "=", "args", ".", "command", "if", "args", ".", "command_args", ":", "value", "+=", "' '", "+", "' '", ".", "join", "(", "args", ".", "command_args", ")", "# Set the alias", "result", "=", "\"overwritten\"", "if", "args", ".", "name", "in", "self", ".", "aliases", "else", "\"created\"", "self", ".", "aliases", "[", "args", ".", "name", "]", "=", "value", "self", ".", "poutput", "(", "\"Alias '{}' {}\"", ".", "format", "(", "args", ".", "name", ",", "result", ")", ")" ]
Create or overwrite an alias
[ "Create", "or", "overwrite", "an", "alias" ]
python
train
PolyJIT/benchbuild
benchbuild/likwid.py
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/likwid.py#L110-L123
def read_tables(fstream): """ Read all tables from likwid's file stream. Args: fstream: Likwid's output file stream. Returns: A generator that can be used to iterate over all tables in the fstream. """ table = read_table(fstream) while table is not None: yield table table = read_table(fstream)
[ "def", "read_tables", "(", "fstream", ")", ":", "table", "=", "read_table", "(", "fstream", ")", "while", "table", "is", "not", "None", ":", "yield", "table", "table", "=", "read_table", "(", "fstream", ")" ]
Read all tables from likwid's file stream. Args: fstream: Likwid's output file stream. Returns: A generator that can be used to iterate over all tables in the fstream.
[ "Read", "all", "tables", "from", "likwid", "s", "file", "stream", "." ]
python
train
bspaans/python-mingus
mingus/extra/tablature.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/extra/tablature.py#L433-L440
def _get_width(maxwidth): """Return the width of a single bar, when width of the page is given.""" width = maxwidth / 3 if maxwidth <= 60: width = maxwidth elif 60 < maxwidth <= 120: width = maxwidth / 2 return width
[ "def", "_get_width", "(", "maxwidth", ")", ":", "width", "=", "maxwidth", "/", "3", "if", "maxwidth", "<=", "60", ":", "width", "=", "maxwidth", "elif", "60", "<", "maxwidth", "<=", "120", ":", "width", "=", "maxwidth", "/", "2", "return", "width" ]
Return the width of a single bar, when width of the page is given.
[ "Return", "the", "width", "of", "a", "single", "bar", "when", "width", "of", "the", "page", "is", "given", "." ]
python
train
b3j0f/annotation
b3j0f/annotation/async.py
https://github.com/b3j0f/annotation/blob/738035a974e4092696d9dc1bbd149faa21c8c51f/b3j0f/annotation/async.py#L279-L287
def notify_observers(self, joinpoint, post=False): """Notify observers with parameter calls and information about pre/post call. """ _observers = tuple(self.observers) for observer in _observers: observer.notify(joinpoint=joinpoint, post=post)
[ "def", "notify_observers", "(", "self", ",", "joinpoint", ",", "post", "=", "False", ")", ":", "_observers", "=", "tuple", "(", "self", ".", "observers", ")", "for", "observer", "in", "_observers", ":", "observer", ".", "notify", "(", "joinpoint", "=", "joinpoint", ",", "post", "=", "post", ")" ]
Notify observers with parameter calls and information about pre/post call.
[ "Notify", "observers", "with", "parameter", "calls", "and", "information", "about", "pre", "/", "post", "call", "." ]
python
train
noahbenson/pimms
pimms/immutable.py
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/immutable.py#L482-L499
def require(f): ''' The @require decorator, usable in an immutable class (see immutable), specifies that the following function is actually a validation check on the immutable class. These functions will appear as static members of the class and get called automatically when the relevant data change. Daughter classes can overload requirements to change them, or may add new requirements with different function names. ''' (args, varargs, kwargs, dflts) = getargspec_py27like(f) if varargs is not None or kwargs is not None or dflts: raise ValueError( 'Requirements may not accept variable, variadic keyword, or default arguments') f._pimms_immutable_data_ = {} f._pimms_immutable_data_['is_check'] = True f._pimms_immutable_data_['inputs'] = args f._pimms_immutable_data_['name'] = f.__name__ f = staticmethod(f) return f
[ "def", "require", "(", "f", ")", ":", "(", "args", ",", "varargs", ",", "kwargs", ",", "dflts", ")", "=", "getargspec_py27like", "(", "f", ")", "if", "varargs", "is", "not", "None", "or", "kwargs", "is", "not", "None", "or", "dflts", ":", "raise", "ValueError", "(", "'Requirements may not accept variable, variadic keyword, or default arguments'", ")", "f", ".", "_pimms_immutable_data_", "=", "{", "}", "f", ".", "_pimms_immutable_data_", "[", "'is_check'", "]", "=", "True", "f", ".", "_pimms_immutable_data_", "[", "'inputs'", "]", "=", "args", "f", ".", "_pimms_immutable_data_", "[", "'name'", "]", "=", "f", ".", "__name__", "f", "=", "staticmethod", "(", "f", ")", "return", "f" ]
The @require decorator, usable in an immutable class (see immutable), specifies that the following function is actually a validation check on the immutable class. These functions will appear as static members of the class and get called automatically when the relevant data change. Daughter classes can overload requirements to change them, or may add new requirements with different function names.
[ "The" ]
python
train
juju-solutions/charms.reactive
charms/reactive/bus.py
https://github.com/juju-solutions/charms.reactive/blob/e37e781432e77c12b63d2c739bd6cd70d3230c3a/charms/reactive/bus.py#L185-L199
def register_flags(self, flags): """ Register flags as being relevant to this handler. Relevant flags will be used to determine if the handler should be re-invoked due to changes in the set of active flags. If this handler has already been invoked during this :func:`dispatch` run and none of its relevant flags have been set or removed since then, then the handler will be skipped. This is also used for linting and composition purposes, to determine if a layer has unhandled flags. """ self._CONSUMED_FLAGS.update(flags) self._flags.update(flags)
[ "def", "register_flags", "(", "self", ",", "flags", ")", ":", "self", ".", "_CONSUMED_FLAGS", ".", "update", "(", "flags", ")", "self", ".", "_flags", ".", "update", "(", "flags", ")" ]
Register flags as being relevant to this handler. Relevant flags will be used to determine if the handler should be re-invoked due to changes in the set of active flags. If this handler has already been invoked during this :func:`dispatch` run and none of its relevant flags have been set or removed since then, then the handler will be skipped. This is also used for linting and composition purposes, to determine if a layer has unhandled flags.
[ "Register", "flags", "as", "being", "relevant", "to", "this", "handler", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/extract_text.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/extract_text.py#L478-L505
def gen_xml_files_from_docx(fp: BinaryIO) -> Iterator[str]: """ Generate XML files (as strings) from a DOCX file. Args: fp: :class:`BinaryIO` object for reading the ``.DOCX`` file Yields: the string contents of each individual XML file within the ``.DOCX`` file Raises: zipfile.BadZipFile: if the zip is unreadable (encrypted?) """ try: z = zipfile.ZipFile(fp) filelist = z.namelist() for filename in filelist: if DOCX_HEADER_FILE_REGEX.match(filename): yield z.read(filename).decode("utf8") yield z.read(DOCX_DOC_FILE) for filename in filelist: if DOCX_FOOTER_FILE_REGEX.match(filename): yield z.read(filename).decode("utf8") except zipfile.BadZipFile: # Clarify the error: raise zipfile.BadZipFile("File is not a zip file - encrypted DOCX?")
[ "def", "gen_xml_files_from_docx", "(", "fp", ":", "BinaryIO", ")", "->", "Iterator", "[", "str", "]", ":", "try", ":", "z", "=", "zipfile", ".", "ZipFile", "(", "fp", ")", "filelist", "=", "z", ".", "namelist", "(", ")", "for", "filename", "in", "filelist", ":", "if", "DOCX_HEADER_FILE_REGEX", ".", "match", "(", "filename", ")", ":", "yield", "z", ".", "read", "(", "filename", ")", ".", "decode", "(", "\"utf8\"", ")", "yield", "z", ".", "read", "(", "DOCX_DOC_FILE", ")", "for", "filename", "in", "filelist", ":", "if", "DOCX_FOOTER_FILE_REGEX", ".", "match", "(", "filename", ")", ":", "yield", "z", ".", "read", "(", "filename", ")", ".", "decode", "(", "\"utf8\"", ")", "except", "zipfile", ".", "BadZipFile", ":", "# Clarify the error:", "raise", "zipfile", ".", "BadZipFile", "(", "\"File is not a zip file - encrypted DOCX?\"", ")" ]
Generate XML files (as strings) from a DOCX file. Args: fp: :class:`BinaryIO` object for reading the ``.DOCX`` file Yields: the string contents of each individual XML file within the ``.DOCX`` file Raises: zipfile.BadZipFile: if the zip is unreadable (encrypted?)
[ "Generate", "XML", "files", "(", "as", "strings", ")", "from", "a", "DOCX", "file", "." ]
python
train
neo4j-drivers/neobolt
neobolt/impl/python/direct.py
https://github.com/neo4j-drivers/neobolt/blob/724569d76e85777c4f5e30e8d0a18116bda4d8cd/neobolt/impl/python/direct.py#L300-L310
def reset(self): """ Add a RESET message to the outgoing queue, send it and consume all remaining messages. """ def fail(metadata): raise ProtocolError("RESET failed %r" % metadata) log_debug("[#%04X] C: RESET", self.local_port) self._append(b"\x0F", response=Response(self, on_failure=fail)) self.sync()
[ "def", "reset", "(", "self", ")", ":", "def", "fail", "(", "metadata", ")", ":", "raise", "ProtocolError", "(", "\"RESET failed %r\"", "%", "metadata", ")", "log_debug", "(", "\"[#%04X] C: RESET\"", ",", "self", ".", "local_port", ")", "self", ".", "_append", "(", "b\"\\x0F\"", ",", "response", "=", "Response", "(", "self", ",", "on_failure", "=", "fail", ")", ")", "self", ".", "sync", "(", ")" ]
Add a RESET message to the outgoing queue, send it and consume all remaining messages.
[ "Add", "a", "RESET", "message", "to", "the", "outgoing", "queue", "send", "it", "and", "consume", "all", "remaining", "messages", "." ]
python
train
Vagrants/blackbird
blackbird/utils/configread.py
https://github.com/Vagrants/blackbird/blob/3b38cd5650caae362e0668dbd38bf8f88233e079/blackbird/utils/configread.py#L150-L166
def _merge_includes(self): """ If "include" option exists in "default.cfg", read the file(glob-match) in the directory. """ raw_include_path = self.get_global_include() if raw_include_path: abs_include_path = self._get_global_include_abs_path( raw_include_path ) self._validate_global_include(abs_include_path) self.set_global_include(abs_include_path) for infile in glob.glob(abs_include_path): self.config.merge( self._configobj_factory(infile=infile) )
[ "def", "_merge_includes", "(", "self", ")", ":", "raw_include_path", "=", "self", ".", "get_global_include", "(", ")", "if", "raw_include_path", ":", "abs_include_path", "=", "self", ".", "_get_global_include_abs_path", "(", "raw_include_path", ")", "self", ".", "_validate_global_include", "(", "abs_include_path", ")", "self", ".", "set_global_include", "(", "abs_include_path", ")", "for", "infile", "in", "glob", ".", "glob", "(", "abs_include_path", ")", ":", "self", ".", "config", ".", "merge", "(", "self", ".", "_configobj_factory", "(", "infile", "=", "infile", ")", ")" ]
If "include" option exists in "default.cfg", read the file(glob-match) in the directory.
[ "If", "include", "option", "exists", "in", "default", ".", "cfg", "read", "the", "file", "(", "glob", "-", "match", ")", "in", "the", "directory", "." ]
python
train
numenta/nupic
src/nupic/swarming/hypersearch_worker.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch_worker.py#L127-L231
def _processUpdatedModels(self, cjDAO): """ For all models that modified their results since last time this method was called, send their latest results to the Hypersearch implementation. """ # Get the latest update counters. This returns a list of tuples: # (modelID, updateCounter) curModelIDCtrList = cjDAO.modelsGetUpdateCounters(self._options.jobID) if len(curModelIDCtrList) == 0: return self.logger.debug("current modelID/updateCounters: %s" \ % (str(curModelIDCtrList))) self.logger.debug("last modelID/updateCounters: %s" \ % (str(self._modelIDCtrList))) # -------------------------------------------------------------------- # Find out which ones have changed update counters. Since these are models # that the Hypersearch implementation already knows about, we don't need to # send params or paramsHash curModelIDCtrList = sorted(curModelIDCtrList) numItems = len(curModelIDCtrList) # Each item in the list we are filtering contains: # (idxIntoModelIDCtrList, (modelID, curCtr), (modelID, oldCtr)) # We only want to keep the ones where the oldCtr != curCtr changedEntries = filter(lambda x:x[1][1] != x[2][1], itertools.izip(xrange(numItems), curModelIDCtrList, self._modelIDCtrList)) if len(changedEntries) > 0: # Update values in our cache self.logger.debug("changedEntries: %s", str(changedEntries)) for entry in changedEntries: (idx, (modelID, curCtr), (_, oldCtr)) = entry self._modelIDCtrDict[modelID] = curCtr assert (self._modelIDCtrList[idx][0] == modelID) assert (curCtr != oldCtr) self._modelIDCtrList[idx][1] = curCtr # Tell Hypersearch implementation of the updated results for each model changedModelIDs = [x[1][0] for x in changedEntries] modelResults = cjDAO.modelsGetResultAndStatus(changedModelIDs) for mResult in modelResults: results = mResult.results if results is not None: results = json.loads(results) self._hs.recordModelProgress(modelID=mResult.modelId, modelParams = None, modelParamsHash = mResult.engParamsHash, results = results, completed = (mResult.status == cjDAO.STATUS_COMPLETED), completionReason = mResult.completionReason, matured = mResult.engMatured, numRecords = mResult.numRecords) # -------------------------------------------------------------------- # Figure out which ones are newly arrived and add them to our # cache curModelIDSet = set([x[0] for x in curModelIDCtrList]) newModelIDs = curModelIDSet.difference(self._modelIDSet) if len(newModelIDs) > 0: # Add new modelID and counters to our cache self._modelIDSet.update(newModelIDs) curModelIDCtrDict = dict(curModelIDCtrList) # Get the results for each of these models and send them to the # Hypersearch implementation. modelInfos = cjDAO.modelsGetResultAndStatus(newModelIDs) modelInfos.sort() modelParamsAndHashs = cjDAO.modelsGetParams(newModelIDs) modelParamsAndHashs.sort() for (mResult, mParamsAndHash) in itertools.izip(modelInfos, modelParamsAndHashs): modelID = mResult.modelId assert (modelID == mParamsAndHash.modelId) # Update our cache of IDs and update counters self._modelIDCtrDict[modelID] = curModelIDCtrDict[modelID] self._modelIDCtrList.append([modelID, curModelIDCtrDict[modelID]]) # Tell the Hypersearch implementation of the new model results = mResult.results if results is not None: results = json.loads(mResult.results) self._hs.recordModelProgress(modelID = modelID, modelParams = json.loads(mParamsAndHash.params), modelParamsHash = mParamsAndHash.engParamsHash, results = results, completed = (mResult.status == cjDAO.STATUS_COMPLETED), completionReason = (mResult.completionReason), matured = mResult.engMatured, numRecords = mResult.numRecords) # Keep our list sorted self._modelIDCtrList.sort()
[ "def", "_processUpdatedModels", "(", "self", ",", "cjDAO", ")", ":", "# Get the latest update counters. This returns a list of tuples:", "# (modelID, updateCounter)", "curModelIDCtrList", "=", "cjDAO", ".", "modelsGetUpdateCounters", "(", "self", ".", "_options", ".", "jobID", ")", "if", "len", "(", "curModelIDCtrList", ")", "==", "0", ":", "return", "self", ".", "logger", ".", "debug", "(", "\"current modelID/updateCounters: %s\"", "%", "(", "str", "(", "curModelIDCtrList", ")", ")", ")", "self", ".", "logger", ".", "debug", "(", "\"last modelID/updateCounters: %s\"", "%", "(", "str", "(", "self", ".", "_modelIDCtrList", ")", ")", ")", "# --------------------------------------------------------------------", "# Find out which ones have changed update counters. Since these are models", "# that the Hypersearch implementation already knows about, we don't need to", "# send params or paramsHash", "curModelIDCtrList", "=", "sorted", "(", "curModelIDCtrList", ")", "numItems", "=", "len", "(", "curModelIDCtrList", ")", "# Each item in the list we are filtering contains:", "# (idxIntoModelIDCtrList, (modelID, curCtr), (modelID, oldCtr))", "# We only want to keep the ones where the oldCtr != curCtr", "changedEntries", "=", "filter", "(", "lambda", "x", ":", "x", "[", "1", "]", "[", "1", "]", "!=", "x", "[", "2", "]", "[", "1", "]", ",", "itertools", ".", "izip", "(", "xrange", "(", "numItems", ")", ",", "curModelIDCtrList", ",", "self", ".", "_modelIDCtrList", ")", ")", "if", "len", "(", "changedEntries", ")", ">", "0", ":", "# Update values in our cache", "self", ".", "logger", ".", "debug", "(", "\"changedEntries: %s\"", ",", "str", "(", "changedEntries", ")", ")", "for", "entry", "in", "changedEntries", ":", "(", "idx", ",", "(", "modelID", ",", "curCtr", ")", ",", "(", "_", ",", "oldCtr", ")", ")", "=", "entry", "self", ".", "_modelIDCtrDict", "[", "modelID", "]", "=", "curCtr", "assert", "(", "self", ".", "_modelIDCtrList", "[", "idx", "]", "[", "0", "]", "==", "modelID", ")", "assert", "(", "curCtr", "!=", "oldCtr", ")", "self", ".", "_modelIDCtrList", "[", "idx", "]", "[", "1", "]", "=", "curCtr", "# Tell Hypersearch implementation of the updated results for each model", "changedModelIDs", "=", "[", "x", "[", "1", "]", "[", "0", "]", "for", "x", "in", "changedEntries", "]", "modelResults", "=", "cjDAO", ".", "modelsGetResultAndStatus", "(", "changedModelIDs", ")", "for", "mResult", "in", "modelResults", ":", "results", "=", "mResult", ".", "results", "if", "results", "is", "not", "None", ":", "results", "=", "json", ".", "loads", "(", "results", ")", "self", ".", "_hs", ".", "recordModelProgress", "(", "modelID", "=", "mResult", ".", "modelId", ",", "modelParams", "=", "None", ",", "modelParamsHash", "=", "mResult", ".", "engParamsHash", ",", "results", "=", "results", ",", "completed", "=", "(", "mResult", ".", "status", "==", "cjDAO", ".", "STATUS_COMPLETED", ")", ",", "completionReason", "=", "mResult", ".", "completionReason", ",", "matured", "=", "mResult", ".", "engMatured", ",", "numRecords", "=", "mResult", ".", "numRecords", ")", "# --------------------------------------------------------------------", "# Figure out which ones are newly arrived and add them to our", "# cache", "curModelIDSet", "=", "set", "(", "[", "x", "[", "0", "]", "for", "x", "in", "curModelIDCtrList", "]", ")", "newModelIDs", "=", "curModelIDSet", ".", "difference", "(", "self", ".", "_modelIDSet", ")", "if", "len", "(", "newModelIDs", ")", ">", "0", ":", "# Add new modelID and counters to our cache", "self", ".", "_modelIDSet", ".", "update", "(", "newModelIDs", ")", "curModelIDCtrDict", "=", "dict", "(", "curModelIDCtrList", ")", "# Get the results for each of these models and send them to the", "# Hypersearch implementation.", "modelInfos", "=", "cjDAO", ".", "modelsGetResultAndStatus", "(", "newModelIDs", ")", "modelInfos", ".", "sort", "(", ")", "modelParamsAndHashs", "=", "cjDAO", ".", "modelsGetParams", "(", "newModelIDs", ")", "modelParamsAndHashs", ".", "sort", "(", ")", "for", "(", "mResult", ",", "mParamsAndHash", ")", "in", "itertools", ".", "izip", "(", "modelInfos", ",", "modelParamsAndHashs", ")", ":", "modelID", "=", "mResult", ".", "modelId", "assert", "(", "modelID", "==", "mParamsAndHash", ".", "modelId", ")", "# Update our cache of IDs and update counters", "self", ".", "_modelIDCtrDict", "[", "modelID", "]", "=", "curModelIDCtrDict", "[", "modelID", "]", "self", ".", "_modelIDCtrList", ".", "append", "(", "[", "modelID", ",", "curModelIDCtrDict", "[", "modelID", "]", "]", ")", "# Tell the Hypersearch implementation of the new model", "results", "=", "mResult", ".", "results", "if", "results", "is", "not", "None", ":", "results", "=", "json", ".", "loads", "(", "mResult", ".", "results", ")", "self", ".", "_hs", ".", "recordModelProgress", "(", "modelID", "=", "modelID", ",", "modelParams", "=", "json", ".", "loads", "(", "mParamsAndHash", ".", "params", ")", ",", "modelParamsHash", "=", "mParamsAndHash", ".", "engParamsHash", ",", "results", "=", "results", ",", "completed", "=", "(", "mResult", ".", "status", "==", "cjDAO", ".", "STATUS_COMPLETED", ")", ",", "completionReason", "=", "(", "mResult", ".", "completionReason", ")", ",", "matured", "=", "mResult", ".", "engMatured", ",", "numRecords", "=", "mResult", ".", "numRecords", ")", "# Keep our list sorted", "self", ".", "_modelIDCtrList", ".", "sort", "(", ")" ]
For all models that modified their results since last time this method was called, send their latest results to the Hypersearch implementation.
[ "For", "all", "models", "that", "modified", "their", "results", "since", "last", "time", "this", "method", "was", "called", "send", "their", "latest", "results", "to", "the", "Hypersearch", "implementation", "." ]
python
valid
riverrun/drat
drat/analysis.py
https://github.com/riverrun/drat/blob/50cbbf69c022b6ca6641cd55386813b0695c21f5/drat/analysis.py#L60-L72
def run_check(self, data): """Check for uncommon words and difficult words in file.""" if not data: sys.exit(1) data, sentences, chars, num_words = self.pre_check(data) w_dict = Counter(data) uniq_len, uncommon, uncom_len = self.gsl(w_dict) non_dchall_set = Counter({word: count for word, count in w_dict.items() if word and word not in self.dale_chall_words}) diff_count = sum(non_dchall_set.values()) dc_score = round(self.dale_chall(diff_count, num_words, sentences), 1) cli_score = round(self.coleman_liau(chars, num_words, sentences), 1) return uncommon, uncom_len, uniq_len, dc_score, cli_score
[ "def", "run_check", "(", "self", ",", "data", ")", ":", "if", "not", "data", ":", "sys", ".", "exit", "(", "1", ")", "data", ",", "sentences", ",", "chars", ",", "num_words", "=", "self", ".", "pre_check", "(", "data", ")", "w_dict", "=", "Counter", "(", "data", ")", "uniq_len", ",", "uncommon", ",", "uncom_len", "=", "self", ".", "gsl", "(", "w_dict", ")", "non_dchall_set", "=", "Counter", "(", "{", "word", ":", "count", "for", "word", ",", "count", "in", "w_dict", ".", "items", "(", ")", "if", "word", "and", "word", "not", "in", "self", ".", "dale_chall_words", "}", ")", "diff_count", "=", "sum", "(", "non_dchall_set", ".", "values", "(", ")", ")", "dc_score", "=", "round", "(", "self", ".", "dale_chall", "(", "diff_count", ",", "num_words", ",", "sentences", ")", ",", "1", ")", "cli_score", "=", "round", "(", "self", ".", "coleman_liau", "(", "chars", ",", "num_words", ",", "sentences", ")", ",", "1", ")", "return", "uncommon", ",", "uncom_len", ",", "uniq_len", ",", "dc_score", ",", "cli_score" ]
Check for uncommon words and difficult words in file.
[ "Check", "for", "uncommon", "words", "and", "difficult", "words", "in", "file", "." ]
python
train
synw/goerr
goerr/messages.py
https://github.com/synw/goerr/blob/08b3809d6715bffe26899a769d96fa5de8573faf/goerr/messages.py#L18-L25
def error(self, i: int=None) -> str: """ Returns an error message """ head = "[" + colors.red("error") + "]" if i is not None: head = str(i) + " " + head return head
[ "def", "error", "(", "self", ",", "i", ":", "int", "=", "None", ")", "->", "str", ":", "head", "=", "\"[\"", "+", "colors", ".", "red", "(", "\"error\"", ")", "+", "\"]\"", "if", "i", "is", "not", "None", ":", "head", "=", "str", "(", "i", ")", "+", "\" \"", "+", "head", "return", "head" ]
Returns an error message
[ "Returns", "an", "error", "message" ]
python
train
peerplays-network/python-peerplays
peerplays/cli/bookie.py
https://github.com/peerplays-network/python-peerplays/blob/188f04238e7e21d5f73e9b01099eea44289ef6b7/peerplays/cli/bookie.py#L62-L68
def bmgs(ctx, event): """ [bookie] List betting market groups for an event :param str event: Event id """ eg = Event(event, peerplays_instance=ctx.peerplays) click.echo(pretty_print(eg.bettingmarketgroups, ctx=ctx))
[ "def", "bmgs", "(", "ctx", ",", "event", ")", ":", "eg", "=", "Event", "(", "event", ",", "peerplays_instance", "=", "ctx", ".", "peerplays", ")", "click", ".", "echo", "(", "pretty_print", "(", "eg", ".", "bettingmarketgroups", ",", "ctx", "=", "ctx", ")", ")" ]
[bookie] List betting market groups for an event :param str event: Event id
[ "[", "bookie", "]", "List", "betting", "market", "groups", "for", "an", "event" ]
python
train
Turbo87/aerofiles
aerofiles/igc/writer.py
https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/igc/writer.py#L552-L607
def write_task_point(self, latitude=None, longitude=None, text='', distance_min=None, distance_max=None, bearing1=None, bearing2=None): """ Write a task declaration point:: writer.write_task_point( latitude=(51 + 7.345 / 60.), longitude=(6 + 24.765 / 60.), text='Meiersberg', ) # -> C5107345N00624765EMeiersberg If no ``latitude`` or ``longitude`` is passed, the fields will be filled with zeros (i.e. unknown coordinates). This however should only be used for ``TAKEOFF`` and ``LANDING`` points. For area tasks there are some additional parameters that can be used to specify the relevant areas:: writer.write_task_point( -(12 + 32.112 / 60.), -(178 + .001 / 60.), 'TURN AREA', distance_min=12.0, distance_max=32.0, bearing1=122.0, bearing2=182.0, ) # -> C1232112S17800001W00120000032000122000182000TURN AREA :param latitude: latitude of the point (between -90 and 90 degrees) :param longitude: longitude of the point (between -180 and 180 degrees) :param text: type and/or name of the waypoint (e.g. ``TAKEOFF``, ``START``, ``TURN 1``, ``TURN 2``, ``FINISH`` or ``LANDING``) """ latitude = self.format_latitude(latitude) longitude = self.format_longitude(longitude) record = latitude + longitude if None not in [distance_min, distance_max, bearing1, bearing2]: record += '%04d' % int(distance_min) record += '%03d' % int((distance_min - int(distance_min)) * 1000) record += '%04d' % int(distance_max) record += '%03d' % int((distance_max - int(distance_max)) * 1000) record += '%03d' % int(bearing1) record += '%03d' % int((bearing1 - int(bearing1)) * 1000) record += '%03d' % int(bearing2) record += '%03d' % int((bearing2 - int(bearing2)) * 1000) if text: record += text self.write_record('C', record)
[ "def", "write_task_point", "(", "self", ",", "latitude", "=", "None", ",", "longitude", "=", "None", ",", "text", "=", "''", ",", "distance_min", "=", "None", ",", "distance_max", "=", "None", ",", "bearing1", "=", "None", ",", "bearing2", "=", "None", ")", ":", "latitude", "=", "self", ".", "format_latitude", "(", "latitude", ")", "longitude", "=", "self", ".", "format_longitude", "(", "longitude", ")", "record", "=", "latitude", "+", "longitude", "if", "None", "not", "in", "[", "distance_min", ",", "distance_max", ",", "bearing1", ",", "bearing2", "]", ":", "record", "+=", "'%04d'", "%", "int", "(", "distance_min", ")", "record", "+=", "'%03d'", "%", "int", "(", "(", "distance_min", "-", "int", "(", "distance_min", ")", ")", "*", "1000", ")", "record", "+=", "'%04d'", "%", "int", "(", "distance_max", ")", "record", "+=", "'%03d'", "%", "int", "(", "(", "distance_max", "-", "int", "(", "distance_max", ")", ")", "*", "1000", ")", "record", "+=", "'%03d'", "%", "int", "(", "bearing1", ")", "record", "+=", "'%03d'", "%", "int", "(", "(", "bearing1", "-", "int", "(", "bearing1", ")", ")", "*", "1000", ")", "record", "+=", "'%03d'", "%", "int", "(", "bearing2", ")", "record", "+=", "'%03d'", "%", "int", "(", "(", "bearing2", "-", "int", "(", "bearing2", ")", ")", "*", "1000", ")", "if", "text", ":", "record", "+=", "text", "self", ".", "write_record", "(", "'C'", ",", "record", ")" ]
Write a task declaration point:: writer.write_task_point( latitude=(51 + 7.345 / 60.), longitude=(6 + 24.765 / 60.), text='Meiersberg', ) # -> C5107345N00624765EMeiersberg If no ``latitude`` or ``longitude`` is passed, the fields will be filled with zeros (i.e. unknown coordinates). This however should only be used for ``TAKEOFF`` and ``LANDING`` points. For area tasks there are some additional parameters that can be used to specify the relevant areas:: writer.write_task_point( -(12 + 32.112 / 60.), -(178 + .001 / 60.), 'TURN AREA', distance_min=12.0, distance_max=32.0, bearing1=122.0, bearing2=182.0, ) # -> C1232112S17800001W00120000032000122000182000TURN AREA :param latitude: latitude of the point (between -90 and 90 degrees) :param longitude: longitude of the point (between -180 and 180 degrees) :param text: type and/or name of the waypoint (e.g. ``TAKEOFF``, ``START``, ``TURN 1``, ``TURN 2``, ``FINISH`` or ``LANDING``)
[ "Write", "a", "task", "declaration", "point", "::" ]
python
train
rocky/python3-trepan
trepan/interfaces/server.py
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/interfaces/server.py#L61-L78
def confirm(self, prompt, default): """ Called when a dangerous action is about to be done to make sure it's okay. `prompt' is printed; user response is returned.""" while True: try: self.write_confirm(prompt, default) reply = self.readline('').strip().lower() except EOFError: return default if reply in ('y', 'yes'): return True elif reply in ('n', 'no'): return False else: self.msg("Please answer y or n.") pass pass return default
[ "def", "confirm", "(", "self", ",", "prompt", ",", "default", ")", ":", "while", "True", ":", "try", ":", "self", ".", "write_confirm", "(", "prompt", ",", "default", ")", "reply", "=", "self", ".", "readline", "(", "''", ")", ".", "strip", "(", ")", ".", "lower", "(", ")", "except", "EOFError", ":", "return", "default", "if", "reply", "in", "(", "'y'", ",", "'yes'", ")", ":", "return", "True", "elif", "reply", "in", "(", "'n'", ",", "'no'", ")", ":", "return", "False", "else", ":", "self", ".", "msg", "(", "\"Please answer y or n.\"", ")", "pass", "pass", "return", "default" ]
Called when a dangerous action is about to be done to make sure it's okay. `prompt' is printed; user response is returned.
[ "Called", "when", "a", "dangerous", "action", "is", "about", "to", "be", "done", "to", "make", "sure", "it", "s", "okay", ".", "prompt", "is", "printed", ";", "user", "response", "is", "returned", "." ]
python
test
dev-pipeline/dev-pipeline-core
lib/devpipeline_core/command.py
https://github.com/dev-pipeline/dev-pipeline-core/blob/fa40c050a56202485070b0300bb8695e9388c34f/lib/devpipeline_core/command.py#L53-L66
def set_version(self, version): """ Add the --version string with appropriate output. Arguments: version - the version of whatever provides the command """ self.parser.add_argument( "--version", action="version", version="%(prog)s {} (core {})".format( version, devpipeline_core.version.STRING ), )
[ "def", "set_version", "(", "self", ",", "version", ")", ":", "self", ".", "parser", ".", "add_argument", "(", "\"--version\"", ",", "action", "=", "\"version\"", ",", "version", "=", "\"%(prog)s {} (core {})\"", ".", "format", "(", "version", ",", "devpipeline_core", ".", "version", ".", "STRING", ")", ",", ")" ]
Add the --version string with appropriate output. Arguments: version - the version of whatever provides the command
[ "Add", "the", "--", "version", "string", "with", "appropriate", "output", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xviewwidget/xviewprofiletoolbar.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xviewprofiletoolbar.py#L218-L237
def exportProfile(self, profile, filename=None): """ Exports this toolbar to the given filename. :param profile | <XViewProfile> filename | <str> || None """ if not filename: filename = QFileDialog.getSaveFileName(self, 'Export Profile', '', 'XML Files (*.xml)') if type(filename) == tuple: filename = nativestring(filename[0]) if not filename: return False profile.save(filename) return True
[ "def", "exportProfile", "(", "self", ",", "profile", ",", "filename", "=", "None", ")", ":", "if", "not", "filename", ":", "filename", "=", "QFileDialog", ".", "getSaveFileName", "(", "self", ",", "'Export Profile'", ",", "''", ",", "'XML Files (*.xml)'", ")", "if", "type", "(", "filename", ")", "==", "tuple", ":", "filename", "=", "nativestring", "(", "filename", "[", "0", "]", ")", "if", "not", "filename", ":", "return", "False", "profile", ".", "save", "(", "filename", ")", "return", "True" ]
Exports this toolbar to the given filename. :param profile | <XViewProfile> filename | <str> || None
[ "Exports", "this", "toolbar", "to", "the", "given", "filename", ".", ":", "param", "profile", "|", "<XViewProfile", ">", "filename", "|", "<str", ">", "||", "None" ]
python
train
BlueBrain/NeuroM
neurom/core/tree.py
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/core/tree.py#L66-L72
def ipreorder(self): '''Depth-first pre-order iteration of tree nodes''' children = deque((self, )) while children: cur_node = children.pop() children.extend(reversed(cur_node.children)) yield cur_node
[ "def", "ipreorder", "(", "self", ")", ":", "children", "=", "deque", "(", "(", "self", ",", ")", ")", "while", "children", ":", "cur_node", "=", "children", ".", "pop", "(", ")", "children", ".", "extend", "(", "reversed", "(", "cur_node", ".", "children", ")", ")", "yield", "cur_node" ]
Depth-first pre-order iteration of tree nodes
[ "Depth", "-", "first", "pre", "-", "order", "iteration", "of", "tree", "nodes" ]
python
train
python-diamond/Diamond
src/collectors/jolokia/jolokia.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/jolokia/jolokia.py#L213-L238
def _list_request(self): """Returns a dictionary with JMX domain names as keys""" try: # https://jolokia.org/reference/html/protocol.html # # A maxDepth of 1 restricts the return value to a map with the JMX # domains as keys. The values of the maps don't have any meaning # and are dummy values. # # maxCollectionSize=0 means "unlimited". This works around an issue # prior to Jolokia 1.3 where results were truncated at 1000 # url = "http://%s:%s/%s%s?maxDepth=1&maxCollectionSize=0" % ( self.config['host'], self.config['port'], self.jolokia_path, self.LIST_URL) # need some time to process the downloaded metrics, so that's why # timeout is lower than the interval. timeout = max(2, float(self.config['interval']) * 2 / 3) with closing(urllib2.urlopen(self._create_request(url), timeout=timeout)) as response: return self._read_json(response) except (urllib2.HTTPError, ValueError) as e: self.log.error('Unable to read JSON response: %s', str(e)) return {}
[ "def", "_list_request", "(", "self", ")", ":", "try", ":", "# https://jolokia.org/reference/html/protocol.html", "#", "# A maxDepth of 1 restricts the return value to a map with the JMX", "# domains as keys. The values of the maps don't have any meaning", "# and are dummy values.", "#", "# maxCollectionSize=0 means \"unlimited\". This works around an issue", "# prior to Jolokia 1.3 where results were truncated at 1000", "#", "url", "=", "\"http://%s:%s/%s%s?maxDepth=1&maxCollectionSize=0\"", "%", "(", "self", ".", "config", "[", "'host'", "]", ",", "self", ".", "config", "[", "'port'", "]", ",", "self", ".", "jolokia_path", ",", "self", ".", "LIST_URL", ")", "# need some time to process the downloaded metrics, so that's why", "# timeout is lower than the interval.", "timeout", "=", "max", "(", "2", ",", "float", "(", "self", ".", "config", "[", "'interval'", "]", ")", "*", "2", "/", "3", ")", "with", "closing", "(", "urllib2", ".", "urlopen", "(", "self", ".", "_create_request", "(", "url", ")", ",", "timeout", "=", "timeout", ")", ")", "as", "response", ":", "return", "self", ".", "_read_json", "(", "response", ")", "except", "(", "urllib2", ".", "HTTPError", ",", "ValueError", ")", "as", "e", ":", "self", ".", "log", ".", "error", "(", "'Unable to read JSON response: %s'", ",", "str", "(", "e", ")", ")", "return", "{", "}" ]
Returns a dictionary with JMX domain names as keys
[ "Returns", "a", "dictionary", "with", "JMX", "domain", "names", "as", "keys" ]
python
train
SBRG/ssbio
ssbio/protein/sequence/properties/scratch.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/properties/scratch.py#L147-L150
def accpro_results(self): """Parse the ACCpro output file and return a dict of secondary structure compositions. """ return ssbio.protein.sequence.utils.fasta.load_fasta_file_as_dict_of_seqs(self.out_accpro)
[ "def", "accpro_results", "(", "self", ")", ":", "return", "ssbio", ".", "protein", ".", "sequence", ".", "utils", ".", "fasta", ".", "load_fasta_file_as_dict_of_seqs", "(", "self", ".", "out_accpro", ")" ]
Parse the ACCpro output file and return a dict of secondary structure compositions.
[ "Parse", "the", "ACCpro", "output", "file", "and", "return", "a", "dict", "of", "secondary", "structure", "compositions", "." ]
python
train
koordinates/python-client
koordinates/layers.py
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L179-L184
def is_draft_version(self): """ Return if this version is the draft version of a layer """ pub_ver = getattr(self, 'published_version', None) latest_ver = getattr(self, 'latest_version', None) this_ver = getattr(self, 'this_version', None) return this_ver and latest_ver and (this_ver == latest_ver) and (latest_ver != pub_ver)
[ "def", "is_draft_version", "(", "self", ")", ":", "pub_ver", "=", "getattr", "(", "self", ",", "'published_version'", ",", "None", ")", "latest_ver", "=", "getattr", "(", "self", ",", "'latest_version'", ",", "None", ")", "this_ver", "=", "getattr", "(", "self", ",", "'this_version'", ",", "None", ")", "return", "this_ver", "and", "latest_ver", "and", "(", "this_ver", "==", "latest_ver", ")", "and", "(", "latest_ver", "!=", "pub_ver", ")" ]
Return if this version is the draft version of a layer
[ "Return", "if", "this", "version", "is", "the", "draft", "version", "of", "a", "layer" ]
python
train
improbable-research/keanu
keanu-python/keanu/vertex/generated.py
https://github.com/improbable-research/keanu/blob/73189a8f569078e156168e795f82c7366c59574b/keanu-python/keanu/vertex/generated.py#L512-L520
def Slice(input_vertex: vertex_constructor_param_types, dimension: int, index: int, label: Optional[str]=None) -> Vertex: """ Takes the slice along a given dimension and index of a vertex :param input_vertex: the input vertex :param dimension: the dimension to extract along :param index: the index of extraction """ return Double(context.jvm_view().SliceVertex, label, cast_to_double_vertex(input_vertex), cast_to_integer(dimension), cast_to_integer(index))
[ "def", "Slice", "(", "input_vertex", ":", "vertex_constructor_param_types", ",", "dimension", ":", "int", ",", "index", ":", "int", ",", "label", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Vertex", ":", "return", "Double", "(", "context", ".", "jvm_view", "(", ")", ".", "SliceVertex", ",", "label", ",", "cast_to_double_vertex", "(", "input_vertex", ")", ",", "cast_to_integer", "(", "dimension", ")", ",", "cast_to_integer", "(", "index", ")", ")" ]
Takes the slice along a given dimension and index of a vertex :param input_vertex: the input vertex :param dimension: the dimension to extract along :param index: the index of extraction
[ "Takes", "the", "slice", "along", "a", "given", "dimension", "and", "index", "of", "a", "vertex", ":", "param", "input_vertex", ":", "the", "input", "vertex", ":", "param", "dimension", ":", "the", "dimension", "to", "extract", "along", ":", "param", "index", ":", "the", "index", "of", "extraction" ]
python
train
hugapi/hug
examples/static_serve.py
https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/examples/static_serve.py#L12-L48
def setup(api=None): """Sets up and fills test directory for serving. Using different filetypes to see how they are dealt with. The tempoary directory will clean itself up. """ global tmp_dir_object tmp_dir_object = tempfile.TemporaryDirectory() dir_name = tmp_dir_object.name dir_a = os.path.join(dir_name, "a") os.mkdir(dir_a) dir_b = os.path.join(dir_name, "b") os.mkdir(dir_b) # populate directory a with text files file_list = [ ["hi.txt", """Hi World!"""], ["hi.html", """<strong>Hi World!</strong>"""], ["hello.html", """ <img src='/static/b/smile.png'</img> pop-up <script src='/static/a/hi.js'></script>"""], ["hi.js", """alert('Hi World')""" ] ] for f in file_list: with open(os.path.join(dir_a, f[0]), mode="wt") as fo: fo.write(f[1]) # populate directory b with binary file image = b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\n\x00\x00\x00\n\x08\x02\x00\x00\x00\x02PX\xea\x00\x00\x006IDAT\x18\xd3c\xfc\xff\xff?\x03n\xc0\xc4\x80\x170100022222\xc2\x85\x90\xb9\x04t3\x92`7\xb2\x15D\xeb\xc6\xe34\xa8n4c\xe1F\x120\x1c\x00\xc6z\x12\x1c\x8cT\xf2\x1e\x00\x00\x00\x00IEND\xaeB`\x82' with open(os.path.join(dir_b, "smile.png"), mode="wb") as fo: fo.write(image)
[ "def", "setup", "(", "api", "=", "None", ")", ":", "global", "tmp_dir_object", "tmp_dir_object", "=", "tempfile", ".", "TemporaryDirectory", "(", ")", "dir_name", "=", "tmp_dir_object", ".", "name", "dir_a", "=", "os", ".", "path", ".", "join", "(", "dir_name", ",", "\"a\"", ")", "os", ".", "mkdir", "(", "dir_a", ")", "dir_b", "=", "os", ".", "path", ".", "join", "(", "dir_name", ",", "\"b\"", ")", "os", ".", "mkdir", "(", "dir_b", ")", "# populate directory a with text files", "file_list", "=", "[", "[", "\"hi.txt\"", ",", "\"\"\"Hi World!\"\"\"", "]", ",", "[", "\"hi.html\"", ",", "\"\"\"<strong>Hi World!</strong>\"\"\"", "]", ",", "[", "\"hello.html\"", ",", "\"\"\"\n <img src='/static/b/smile.png'</img>\n pop-up\n <script src='/static/a/hi.js'></script>\"\"\"", "]", ",", "[", "\"hi.js\"", ",", "\"\"\"alert('Hi World')\"\"\"", "]", "]", "for", "f", "in", "file_list", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "dir_a", ",", "f", "[", "0", "]", ")", ",", "mode", "=", "\"wt\"", ")", "as", "fo", ":", "fo", ".", "write", "(", "f", "[", "1", "]", ")", "# populate directory b with binary file", "image", "=", "b'\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\\x00\\x00\\x00\\n\\x00\\x00\\x00\\n\\x08\\x02\\x00\\x00\\x00\\x02PX\\xea\\x00\\x00\\x006IDAT\\x18\\xd3c\\xfc\\xff\\xff?\\x03n\\xc0\\xc4\\x80\\x170100022222\\xc2\\x85\\x90\\xb9\\x04t3\\x92`7\\xb2\\x15D\\xeb\\xc6\\xe34\\xa8n4c\\xe1F\\x120\\x1c\\x00\\xc6z\\x12\\x1c\\x8cT\\xf2\\x1e\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82'", "with", "open", "(", "os", ".", "path", ".", "join", "(", "dir_b", ",", "\"smile.png\"", ")", ",", "mode", "=", "\"wb\"", ")", "as", "fo", ":", "fo", ".", "write", "(", "image", ")" ]
Sets up and fills test directory for serving. Using different filetypes to see how they are dealt with. The tempoary directory will clean itself up.
[ "Sets", "up", "and", "fills", "test", "directory", "for", "serving", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/mpls_config/router/mpls/mpls_cmds_holder/ldp/ldp_holder/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/mpls_config/router/mpls/mpls_cmds_holder/ldp/ldp_holder/__init__.py#L214-L235
def _set_ldp_hello_timeout_basic(self, v, load=False): """ Setter method for ldp_hello_timeout_basic, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/ldp/ldp_holder/ldp_hello_timeout_basic (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_ldp_hello_timeout_basic is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ldp_hello_timeout_basic() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'2..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(15), is_leaf=True, yang_name="ldp-hello-timeout-basic", rest_name="hello-timeout-link", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'In seconds (2-65535, default 15)', u'cli-full-no': None, u'alt-name': u'hello-timeout-link'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ldp_hello_timeout_basic must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'2..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(15), is_leaf=True, yang_name="ldp-hello-timeout-basic", rest_name="hello-timeout-link", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'In seconds (2-65535, default 15)', u'cli-full-no': None, u'alt-name': u'hello-timeout-link'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""", }) self.__ldp_hello_timeout_basic = t if hasattr(self, '_set'): self._set()
[ "def", "_set_ldp_hello_timeout_basic", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "RestrictedClassType", "(", "base_type", "=", "RestrictedClassType", "(", "base_type", "=", "long", ",", "restriction_dict", "=", "{", "'range'", ":", "[", "'0..4294967295'", "]", "}", ",", "int_size", "=", "32", ")", ",", "restriction_dict", "=", "{", "'range'", ":", "[", "u'2..65535'", "]", "}", ")", ",", "default", "=", "RestrictedClassType", "(", "base_type", "=", "long", ",", "restriction_dict", "=", "{", "'range'", ":", "[", "'0..4294967295'", "]", "}", ",", "int_size", "=", "32", ")", "(", "15", ")", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"ldp-hello-timeout-basic\"", ",", "rest_name", "=", "\"hello-timeout-link\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'cli-full-command'", ":", "None", ",", "u'info'", ":", "u'In seconds (2-65535, default 15)'", ",", "u'cli-full-no'", ":", "None", ",", "u'alt-name'", ":", "u'hello-timeout-link'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-mpls'", ",", "defining_module", "=", "'brocade-mpls'", ",", "yang_type", "=", "'uint32'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"ldp_hello_timeout_basic must be of a type compatible with uint32\"\"\"", ",", "'defined-type'", ":", "\"uint32\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'2..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(15), is_leaf=True, yang_name=\"ldp-hello-timeout-basic\", rest_name=\"hello-timeout-link\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'In seconds (2-65535, default 15)', u'cli-full-no': None, u'alt-name': u'hello-timeout-link'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__ldp_hello_timeout_basic", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for ldp_hello_timeout_basic, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/ldp/ldp_holder/ldp_hello_timeout_basic (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_ldp_hello_timeout_basic is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ldp_hello_timeout_basic() directly.
[ "Setter", "method", "for", "ldp_hello_timeout_basic", "mapped", "from", "YANG", "variable", "/", "mpls_config", "/", "router", "/", "mpls", "/", "mpls_cmds_holder", "/", "ldp", "/", "ldp_holder", "/", "ldp_hello_timeout_basic", "(", "uint32", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_ldp_hello_timeout_basic", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_ldp_hello_timeout_basic", "()", "directly", "." ]
python
train
juju/charm-helpers
charmhelpers/core/hookenv.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/core/hookenv.py#L616-L619
def metadata(): """Get the current charm metadata.yaml contents as a python object""" with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: return yaml.safe_load(md)
[ "def", "metadata", "(", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "charm_dir", "(", ")", ",", "'metadata.yaml'", ")", ")", "as", "md", ":", "return", "yaml", ".", "safe_load", "(", "md", ")" ]
Get the current charm metadata.yaml contents as a python object
[ "Get", "the", "current", "charm", "metadata", ".", "yaml", "contents", "as", "a", "python", "object" ]
python
train
pypa/pipenv
pipenv/vendor/requests/sessions.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requests/sessions.py#L276-L315
def rebuild_proxies(self, prepared_request, proxies): """This method re-evaluates the proxy configuration by considering the environment variables. If we are redirected to a URL covered by NO_PROXY, we strip the proxy configuration. Otherwise, we set missing proxy keys for this URL (in case they were stripped by a previous redirect). This method also replaces the Proxy-Authorization header where necessary. :rtype: dict """ proxies = proxies if proxies is not None else {} headers = prepared_request.headers url = prepared_request.url scheme = urlparse(url).scheme new_proxies = proxies.copy() no_proxy = proxies.get('no_proxy') bypass_proxy = should_bypass_proxies(url, no_proxy=no_proxy) if self.trust_env and not bypass_proxy: environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) proxy = environ_proxies.get(scheme, environ_proxies.get('all')) if proxy: new_proxies.setdefault(scheme, proxy) if 'Proxy-Authorization' in headers: del headers['Proxy-Authorization'] try: username, password = get_auth_from_url(new_proxies[scheme]) except KeyError: username, password = None, None if username and password: headers['Proxy-Authorization'] = _basic_auth_str(username, password) return new_proxies
[ "def", "rebuild_proxies", "(", "self", ",", "prepared_request", ",", "proxies", ")", ":", "proxies", "=", "proxies", "if", "proxies", "is", "not", "None", "else", "{", "}", "headers", "=", "prepared_request", ".", "headers", "url", "=", "prepared_request", ".", "url", "scheme", "=", "urlparse", "(", "url", ")", ".", "scheme", "new_proxies", "=", "proxies", ".", "copy", "(", ")", "no_proxy", "=", "proxies", ".", "get", "(", "'no_proxy'", ")", "bypass_proxy", "=", "should_bypass_proxies", "(", "url", ",", "no_proxy", "=", "no_proxy", ")", "if", "self", ".", "trust_env", "and", "not", "bypass_proxy", ":", "environ_proxies", "=", "get_environ_proxies", "(", "url", ",", "no_proxy", "=", "no_proxy", ")", "proxy", "=", "environ_proxies", ".", "get", "(", "scheme", ",", "environ_proxies", ".", "get", "(", "'all'", ")", ")", "if", "proxy", ":", "new_proxies", ".", "setdefault", "(", "scheme", ",", "proxy", ")", "if", "'Proxy-Authorization'", "in", "headers", ":", "del", "headers", "[", "'Proxy-Authorization'", "]", "try", ":", "username", ",", "password", "=", "get_auth_from_url", "(", "new_proxies", "[", "scheme", "]", ")", "except", "KeyError", ":", "username", ",", "password", "=", "None", ",", "None", "if", "username", "and", "password", ":", "headers", "[", "'Proxy-Authorization'", "]", "=", "_basic_auth_str", "(", "username", ",", "password", ")", "return", "new_proxies" ]
This method re-evaluates the proxy configuration by considering the environment variables. If we are redirected to a URL covered by NO_PROXY, we strip the proxy configuration. Otherwise, we set missing proxy keys for this URL (in case they were stripped by a previous redirect). This method also replaces the Proxy-Authorization header where necessary. :rtype: dict
[ "This", "method", "re", "-", "evaluates", "the", "proxy", "configuration", "by", "considering", "the", "environment", "variables", ".", "If", "we", "are", "redirected", "to", "a", "URL", "covered", "by", "NO_PROXY", "we", "strip", "the", "proxy", "configuration", ".", "Otherwise", "we", "set", "missing", "proxy", "keys", "for", "this", "URL", "(", "in", "case", "they", "were", "stripped", "by", "a", "previous", "redirect", ")", "." ]
python
train
ethereum/py-trie
trie/smt.py
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/smt.py#L144-L186
def update(self, key: bytes, value: bytes, node_updates: Sequence[Hash32]): """ Merge an update for another key with the one we are tracking internally. :param key: keypath of the update we are processing :param value: value of the update we are processing :param node_updates: sequence of sibling nodes (in root->leaf order) must be at least as large as the first diverging key in the keypath """ validate_is_bytes(key) validate_length(key, self._key_size) # Path diff is the logical XOR of the updated key and this account path_diff = (to_int(self.key) ^ to_int(key)) # Same key (diff of 0), update the tracked value if path_diff == 0: self._value = value # No need to update branch else: # Find the first mismatched bit between keypaths. This is # where the branch point occurs, and we should update the # sibling node in the source branch at the branch point. # NOTE: Keys are in MSB->LSB (root->leaf) order. # Node lists are in root->leaf order. # Be sure to convert between them effectively. for bit in reversed(range(self._branch_size)): if path_diff & (1 << bit) > 0: branch_point = (self._branch_size - 1) - bit break # NOTE: node_updates only has to be as long as necessary # to obtain the update. This allows an optimization # of pruning updates to the maximum possible depth # that would be required to update, which may be # significantly smaller than the tree depth. if len(node_updates) <= branch_point: raise ValidationError("Updated node list is not deep enough") # Update sibling node in the branch where our key differs from the update self._branch[branch_point] = node_updates[branch_point]
[ "def", "update", "(", "self", ",", "key", ":", "bytes", ",", "value", ":", "bytes", ",", "node_updates", ":", "Sequence", "[", "Hash32", "]", ")", ":", "validate_is_bytes", "(", "key", ")", "validate_length", "(", "key", ",", "self", ".", "_key_size", ")", "# Path diff is the logical XOR of the updated key and this account", "path_diff", "=", "(", "to_int", "(", "self", ".", "key", ")", "^", "to_int", "(", "key", ")", ")", "# Same key (diff of 0), update the tracked value", "if", "path_diff", "==", "0", ":", "self", ".", "_value", "=", "value", "# No need to update branch", "else", ":", "# Find the first mismatched bit between keypaths. This is", "# where the branch point occurs, and we should update the", "# sibling node in the source branch at the branch point.", "# NOTE: Keys are in MSB->LSB (root->leaf) order.", "# Node lists are in root->leaf order.", "# Be sure to convert between them effectively.", "for", "bit", "in", "reversed", "(", "range", "(", "self", ".", "_branch_size", ")", ")", ":", "if", "path_diff", "&", "(", "1", "<<", "bit", ")", ">", "0", ":", "branch_point", "=", "(", "self", ".", "_branch_size", "-", "1", ")", "-", "bit", "break", "# NOTE: node_updates only has to be as long as necessary", "# to obtain the update. This allows an optimization", "# of pruning updates to the maximum possible depth", "# that would be required to update, which may be", "# significantly smaller than the tree depth.", "if", "len", "(", "node_updates", ")", "<=", "branch_point", ":", "raise", "ValidationError", "(", "\"Updated node list is not deep enough\"", ")", "# Update sibling node in the branch where our key differs from the update", "self", ".", "_branch", "[", "branch_point", "]", "=", "node_updates", "[", "branch_point", "]" ]
Merge an update for another key with the one we are tracking internally. :param key: keypath of the update we are processing :param value: value of the update we are processing :param node_updates: sequence of sibling nodes (in root->leaf order) must be at least as large as the first diverging key in the keypath
[ "Merge", "an", "update", "for", "another", "key", "with", "the", "one", "we", "are", "tracking", "internally", "." ]
python
train
CitrineInformatics/pif-dft
dfttopif/parsers/base.py
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/base.py#L296-L304
def get_number_of_atoms(self): """Get the number of atoms in the calculated structure. Returns: Property, where number of atoms is a scalar. """ strc = self.get_output_structure() if not strc: return None return Property(scalars=[Scalar(value=len(strc))], units="/unit cell")
[ "def", "get_number_of_atoms", "(", "self", ")", ":", "strc", "=", "self", ".", "get_output_structure", "(", ")", "if", "not", "strc", ":", "return", "None", "return", "Property", "(", "scalars", "=", "[", "Scalar", "(", "value", "=", "len", "(", "strc", ")", ")", "]", ",", "units", "=", "\"/unit cell\"", ")" ]
Get the number of atoms in the calculated structure. Returns: Property, where number of atoms is a scalar.
[ "Get", "the", "number", "of", "atoms", "in", "the", "calculated", "structure", "." ]
python
train
santoshphilip/eppy
eppy/geometry/surface.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/geometry/surface.py#L89-L98
def angle2vecs(vec1, vec2): """angle between two vectors""" # vector a * vector b = |a|*|b|* cos(angle between vector a and vector b) dot = np.dot(vec1, vec2) vec1_modulus = np.sqrt(np.multiply(vec1, vec1).sum()) vec2_modulus = np.sqrt(np.multiply(vec2, vec2).sum()) if (vec1_modulus * vec2_modulus) == 0: cos_angle = 1 else: cos_angle = dot / (vec1_modulus * vec2_modulus) return math.degrees(acos(cos_angle))
[ "def", "angle2vecs", "(", "vec1", ",", "vec2", ")", ":", "# vector a * vector b = |a|*|b|* cos(angle between vector a and vector b)", "dot", "=", "np", ".", "dot", "(", "vec1", ",", "vec2", ")", "vec1_modulus", "=", "np", ".", "sqrt", "(", "np", ".", "multiply", "(", "vec1", ",", "vec1", ")", ".", "sum", "(", ")", ")", "vec2_modulus", "=", "np", ".", "sqrt", "(", "np", ".", "multiply", "(", "vec2", ",", "vec2", ")", ".", "sum", "(", ")", ")", "if", "(", "vec1_modulus", "*", "vec2_modulus", ")", "==", "0", ":", "cos_angle", "=", "1", "else", ":", "cos_angle", "=", "dot", "/", "(", "vec1_modulus", "*", "vec2_modulus", ")", "return", "math", ".", "degrees", "(", "acos", "(", "cos_angle", ")", ")" ]
angle between two vectors
[ "angle", "between", "two", "vectors" ]
python
train
ambitioninc/django-query-builder
querybuilder/query.py
https://github.com/ambitioninc/django-query-builder/blob/113a7d845d3ddc6a45621b9880308e756f87c5bf/querybuilder/query.py#L1583-L1680
def select(self, return_models=False, nest=False, bypass_safe_limit=False, sql=None, sql_args=None): """ Executes the SELECT statement and returns the rows as a list of dictionaries or a list of model instances :type return_models: bool :param return_models: Set to True to return a list of models instead of a list of dictionaries. Defaults to False :type nest: bool :param nest: Set to True to treat all double underscores in keynames as nested data. This will convert all keys with double underscores to dictionaries keyed off of the left side of the underscores. Ex: {"id": 1", "account__id": 1, "account__name": "Name"} becomes {"id": 1, "account": {"id": 1, "name": "Name"}} :type bypass_safe_limit: bool :param bypass_safe_limit: Ignores the safe_limit option even if the safe_limit is enabled :type sql: str or None :param sql: The sql to execute in the SELECT statement. If one is not specified, then the query will use ``self.get_sql()`` :type sql_args: str or None :param sql_args: The sql args to be used in the SELECT statement. If none are specified, then the query wil use ``self.get_args()`` :rtype: list of dict :return: list of dictionaries of the rows """ # Check if we need to set a safe limit if bypass_safe_limit is False: if Query.enable_safe_limit: if self.count() > Query.safe_limit: self.limit(Query.safe_limit) # determine which sql to use if sql is None: sql = self.get_sql() # determine which sql args to use if sql_args is None: sql_args = self.get_args() # get the cursor to execute the query cursor = self.get_cursor() # execute the query cursor.execute(sql, sql_args) # get the results as a list of dictionaries rows = self._fetch_all_as_dict(cursor) # check if models should be returned instead of dictionaries if return_models: # set nesting to true, so the nested models can easily load the data nest = True # build model map of map name to model model_map = {} for join_item in self.joins: model_map[join_item.right_table.field_prefix] = join_item.right_table.model # check if results should be nested if nest: # convert keys with double underscores to dictionaries for row in rows: _row = row.copy() for key, value in _row.items(): set_value_for_keypath(row, key, value, True, '__') if '__' in key: row.pop(key) # create models if needed if return_models: model_class = self.tables[0].model new_rows = [] for row in rows: model = model_class() # assign all non-model keys first because django 1.5 requires # that the model has an id set before setting a property that is # a foreign key for key, value in row.items(): if key not in model_map: setattr(model, key, value) # assign all model instances for key, value in row.items(): if key in model_map: child_model = model_map[key]() for child_key, child_value in value.items(): setattr(child_model, child_key, child_value) value = child_model setattr(model, key, value) new_rows.append(model) rows = new_rows return rows
[ "def", "select", "(", "self", ",", "return_models", "=", "False", ",", "nest", "=", "False", ",", "bypass_safe_limit", "=", "False", ",", "sql", "=", "None", ",", "sql_args", "=", "None", ")", ":", "# Check if we need to set a safe limit", "if", "bypass_safe_limit", "is", "False", ":", "if", "Query", ".", "enable_safe_limit", ":", "if", "self", ".", "count", "(", ")", ">", "Query", ".", "safe_limit", ":", "self", ".", "limit", "(", "Query", ".", "safe_limit", ")", "# determine which sql to use", "if", "sql", "is", "None", ":", "sql", "=", "self", ".", "get_sql", "(", ")", "# determine which sql args to use", "if", "sql_args", "is", "None", ":", "sql_args", "=", "self", ".", "get_args", "(", ")", "# get the cursor to execute the query", "cursor", "=", "self", ".", "get_cursor", "(", ")", "# execute the query", "cursor", ".", "execute", "(", "sql", ",", "sql_args", ")", "# get the results as a list of dictionaries", "rows", "=", "self", ".", "_fetch_all_as_dict", "(", "cursor", ")", "# check if models should be returned instead of dictionaries", "if", "return_models", ":", "# set nesting to true, so the nested models can easily load the data", "nest", "=", "True", "# build model map of map name to model", "model_map", "=", "{", "}", "for", "join_item", "in", "self", ".", "joins", ":", "model_map", "[", "join_item", ".", "right_table", ".", "field_prefix", "]", "=", "join_item", ".", "right_table", ".", "model", "# check if results should be nested", "if", "nest", ":", "# convert keys with double underscores to dictionaries", "for", "row", "in", "rows", ":", "_row", "=", "row", ".", "copy", "(", ")", "for", "key", ",", "value", "in", "_row", ".", "items", "(", ")", ":", "set_value_for_keypath", "(", "row", ",", "key", ",", "value", ",", "True", ",", "'__'", ")", "if", "'__'", "in", "key", ":", "row", ".", "pop", "(", "key", ")", "# create models if needed", "if", "return_models", ":", "model_class", "=", "self", ".", "tables", "[", "0", "]", ".", "model", "new_rows", "=", "[", "]", "for", "row", "in", "rows", ":", "model", "=", "model_class", "(", ")", "# assign all non-model keys first because django 1.5 requires", "# that the model has an id set before setting a property that is", "# a foreign key", "for", "key", ",", "value", "in", "row", ".", "items", "(", ")", ":", "if", "key", "not", "in", "model_map", ":", "setattr", "(", "model", ",", "key", ",", "value", ")", "# assign all model instances", "for", "key", ",", "value", "in", "row", ".", "items", "(", ")", ":", "if", "key", "in", "model_map", ":", "child_model", "=", "model_map", "[", "key", "]", "(", ")", "for", "child_key", ",", "child_value", "in", "value", ".", "items", "(", ")", ":", "setattr", "(", "child_model", ",", "child_key", ",", "child_value", ")", "value", "=", "child_model", "setattr", "(", "model", ",", "key", ",", "value", ")", "new_rows", ".", "append", "(", "model", ")", "rows", "=", "new_rows", "return", "rows" ]
Executes the SELECT statement and returns the rows as a list of dictionaries or a list of model instances :type return_models: bool :param return_models: Set to True to return a list of models instead of a list of dictionaries. Defaults to False :type nest: bool :param nest: Set to True to treat all double underscores in keynames as nested data. This will convert all keys with double underscores to dictionaries keyed off of the left side of the underscores. Ex: {"id": 1", "account__id": 1, "account__name": "Name"} becomes {"id": 1, "account": {"id": 1, "name": "Name"}} :type bypass_safe_limit: bool :param bypass_safe_limit: Ignores the safe_limit option even if the safe_limit is enabled :type sql: str or None :param sql: The sql to execute in the SELECT statement. If one is not specified, then the query will use ``self.get_sql()`` :type sql_args: str or None :param sql_args: The sql args to be used in the SELECT statement. If none are specified, then the query wil use ``self.get_args()`` :rtype: list of dict :return: list of dictionaries of the rows
[ "Executes", "the", "SELECT", "statement", "and", "returns", "the", "rows", "as", "a", "list", "of", "dictionaries", "or", "a", "list", "of", "model", "instances" ]
python
train
AshleySetter/optoanalysis
optoanalysis/optoanalysis/optoanalysis.py
https://github.com/AshleySetter/optoanalysis/blob/9d390acc834d70024d47b574aea14189a5a5714e/optoanalysis/optoanalysis/optoanalysis.py#L770-L827
def calc_gamma_from_energy_autocorrelation_fit(self, GammaGuess=None, silent=False, MakeFig=True, show_fig=True): """ Calculates the total damping, i.e. Gamma, by calculating the energy each point in time. This energy array is then used for the autocorrleation. The autocorrelation is fitted with an exponential relaxation function and the function returns the parameters with errors. Parameters ---------- GammaGuess : float, optional Inital guess for BigGamma (in radians) silent : bool, optional Whether it prints the values fitted or is silent. MakeFig : bool, optional Whether to construct and return the figure object showing the fitting. defaults to True show_fig : bool, optional Whether to show the figure object when it has been created. defaults to True Returns ------- Gamma : ufloat Big Gamma, the total damping in radians fig : matplotlib.figure.Figure object The figure object created showing the autocorrelation of the data with the fit ax : matplotlib.axes.Axes object The axes object created showing the autocorrelation of the data with the fit """ autocorrelation = calc_autocorrelation(self.voltage[:-1]**2*self.OmegaTrap.n**2+(_np.diff(self.voltage)*self.SampleFreq)**2) time = self.time.get_array()[:len(autocorrelation)] if GammaGuess==None: Gamma_Initial = (time[4]-time[0])/(autocorrelation[0]-autocorrelation[4]) else: Gamma_Initial = GammaGuess if MakeFig == True: Params, ParamsErr, fig, ax = fit_autocorrelation( autocorrelation, time, Gamma_Initial, MakeFig=MakeFig, show_fig=show_fig) else: Params, ParamsErr, _ , _ = fit_autocorrelation( autocorrelation, time, Gamma_Initial, MakeFig=MakeFig, show_fig=show_fig) if silent == False: print("\n") print( "Big Gamma: {} +- {}% ".format(Params[0], ParamsErr[0] / Params[0] * 100)) Gamma = _uncertainties.ufloat(Params[0], ParamsErr[0]) if MakeFig == True: return Gamma, fig, ax else: return Gamma, None, None
[ "def", "calc_gamma_from_energy_autocorrelation_fit", "(", "self", ",", "GammaGuess", "=", "None", ",", "silent", "=", "False", ",", "MakeFig", "=", "True", ",", "show_fig", "=", "True", ")", ":", "autocorrelation", "=", "calc_autocorrelation", "(", "self", ".", "voltage", "[", ":", "-", "1", "]", "**", "2", "*", "self", ".", "OmegaTrap", ".", "n", "**", "2", "+", "(", "_np", ".", "diff", "(", "self", ".", "voltage", ")", "*", "self", ".", "SampleFreq", ")", "**", "2", ")", "time", "=", "self", ".", "time", ".", "get_array", "(", ")", "[", ":", "len", "(", "autocorrelation", ")", "]", "if", "GammaGuess", "==", "None", ":", "Gamma_Initial", "=", "(", "time", "[", "4", "]", "-", "time", "[", "0", "]", ")", "/", "(", "autocorrelation", "[", "0", "]", "-", "autocorrelation", "[", "4", "]", ")", "else", ":", "Gamma_Initial", "=", "GammaGuess", "if", "MakeFig", "==", "True", ":", "Params", ",", "ParamsErr", ",", "fig", ",", "ax", "=", "fit_autocorrelation", "(", "autocorrelation", ",", "time", ",", "Gamma_Initial", ",", "MakeFig", "=", "MakeFig", ",", "show_fig", "=", "show_fig", ")", "else", ":", "Params", ",", "ParamsErr", ",", "_", ",", "_", "=", "fit_autocorrelation", "(", "autocorrelation", ",", "time", ",", "Gamma_Initial", ",", "MakeFig", "=", "MakeFig", ",", "show_fig", "=", "show_fig", ")", "if", "silent", "==", "False", ":", "print", "(", "\"\\n\"", ")", "print", "(", "\"Big Gamma: {} +- {}% \"", ".", "format", "(", "Params", "[", "0", "]", ",", "ParamsErr", "[", "0", "]", "/", "Params", "[", "0", "]", "*", "100", ")", ")", "Gamma", "=", "_uncertainties", ".", "ufloat", "(", "Params", "[", "0", "]", ",", "ParamsErr", "[", "0", "]", ")", "if", "MakeFig", "==", "True", ":", "return", "Gamma", ",", "fig", ",", "ax", "else", ":", "return", "Gamma", ",", "None", ",", "None" ]
Calculates the total damping, i.e. Gamma, by calculating the energy each point in time. This energy array is then used for the autocorrleation. The autocorrelation is fitted with an exponential relaxation function and the function returns the parameters with errors. Parameters ---------- GammaGuess : float, optional Inital guess for BigGamma (in radians) silent : bool, optional Whether it prints the values fitted or is silent. MakeFig : bool, optional Whether to construct and return the figure object showing the fitting. defaults to True show_fig : bool, optional Whether to show the figure object when it has been created. defaults to True Returns ------- Gamma : ufloat Big Gamma, the total damping in radians fig : matplotlib.figure.Figure object The figure object created showing the autocorrelation of the data with the fit ax : matplotlib.axes.Axes object The axes object created showing the autocorrelation of the data with the fit
[ "Calculates", "the", "total", "damping", "i", ".", "e", ".", "Gamma", "by", "calculating", "the", "energy", "each", "point", "in", "time", ".", "This", "energy", "array", "is", "then", "used", "for", "the", "autocorrleation", ".", "The", "autocorrelation", "is", "fitted", "with", "an", "exponential", "relaxation", "function", "and", "the", "function", "returns", "the", "parameters", "with", "errors", "." ]
python
train
apache/incubator-heron
heron/tools/admin/src/python/standalone.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/admin/src/python/standalone.py#L377-L387
def add_additional_args(parsers): ''' add additional parameters to parser ''' for parser in parsers: cli_args.add_verbose(parser) cli_args.add_config(parser) parser.add_argument( '--heron-dir', default=config.get_heron_dir(), help='Path to Heron home directory')
[ "def", "add_additional_args", "(", "parsers", ")", ":", "for", "parser", "in", "parsers", ":", "cli_args", ".", "add_verbose", "(", "parser", ")", "cli_args", ".", "add_config", "(", "parser", ")", "parser", ".", "add_argument", "(", "'--heron-dir'", ",", "default", "=", "config", ".", "get_heron_dir", "(", ")", ",", "help", "=", "'Path to Heron home directory'", ")" ]
add additional parameters to parser
[ "add", "additional", "parameters", "to", "parser" ]
python
valid
fukuball/fuku-ml
FukuML/Utility.py
https://github.com/fukuball/fuku-ml/blob/0da15ad7af76adf344b5a6b3f3dbabbbab3446b0/FukuML/Utility.py#L251-L269
def kernel_matrix_xX(svm_model, original_x, original_X): if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'): K = (svm_model.zeta + svm_model.gamma * np.dot(original_x, original_X.T)) ** svm_model.Q elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'): K = np.exp(-svm_model.gamma * (cdist(original_X, np.atleast_2d(original_x), 'euclidean').T ** 2)).ravel() ''' K = np.zeros((svm_model.data_num, svm_model.data_num)) for i in range(svm_model.data_num): for j in range(svm_model.data_num): if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'): K[i, j] = Kernel.polynomial_kernel(svm_model, original_x, original_X[j]) elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'): K[i, j] = Kernel.gaussian_kernel(svm_model, original_x, original_X[j]) ''' return K
[ "def", "kernel_matrix_xX", "(", "svm_model", ",", "original_x", ",", "original_X", ")", ":", "if", "(", "svm_model", ".", "svm_kernel", "==", "'polynomial_kernel'", "or", "svm_model", ".", "svm_kernel", "==", "'soft_polynomial_kernel'", ")", ":", "K", "=", "(", "svm_model", ".", "zeta", "+", "svm_model", ".", "gamma", "*", "np", ".", "dot", "(", "original_x", ",", "original_X", ".", "T", ")", ")", "**", "svm_model", ".", "Q", "elif", "(", "svm_model", ".", "svm_kernel", "==", "'gaussian_kernel'", "or", "svm_model", ".", "svm_kernel", "==", "'soft_gaussian_kernel'", ")", ":", "K", "=", "np", ".", "exp", "(", "-", "svm_model", ".", "gamma", "*", "(", "cdist", "(", "original_X", ",", "np", ".", "atleast_2d", "(", "original_x", ")", ",", "'euclidean'", ")", ".", "T", "**", "2", ")", ")", ".", "ravel", "(", ")", "return", "K" ]
K = np.zeros((svm_model.data_num, svm_model.data_num)) for i in range(svm_model.data_num): for j in range(svm_model.data_num): if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'): K[i, j] = Kernel.polynomial_kernel(svm_model, original_x, original_X[j]) elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'): K[i, j] = Kernel.gaussian_kernel(svm_model, original_x, original_X[j])
[ "K", "=", "np", ".", "zeros", "((", "svm_model", ".", "data_num", "svm_model", ".", "data_num", "))" ]
python
test
VingtCinq/python-mailchimp
mailchimp3/entities/campaignfolders.py
https://github.com/VingtCinq/python-mailchimp/blob/1b472f1b64fdde974732ac4b7ed48908bb707260/mailchimp3/entities/campaignfolders.py#L97-L106
def delete(self, folder_id): """ Delete a specific campaign folder, and mark all the campaigns in the folder as ‘unfiled’. :param folder_id: The unique id for the campaign folder. :type folder_id: :py:class:`str` """ self.folder_id = folder_id return self._mc_client._delete(url=self._build_path(folder_id))
[ "def", "delete", "(", "self", ",", "folder_id", ")", ":", "self", ".", "folder_id", "=", "folder_id", "return", "self", ".", "_mc_client", ".", "_delete", "(", "url", "=", "self", ".", "_build_path", "(", "folder_id", ")", ")" ]
Delete a specific campaign folder, and mark all the campaigns in the folder as ‘unfiled’. :param folder_id: The unique id for the campaign folder. :type folder_id: :py:class:`str`
[ "Delete", "a", "specific", "campaign", "folder", "and", "mark", "all", "the", "campaigns", "in", "the", "folder", "as", "‘unfiled’", "." ]
python
valid
saltstack/salt
salt/states/file.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/file.py#L5355-L5456
def comment(name, regex, char='#', backup='.bak'): ''' Comment out specified lines in a file. name The full path to the file to be edited regex A regular expression used to find the lines that are to be commented; this pattern will be wrapped in parenthesis and will move any preceding/trailing ``^`` or ``$`` characters outside the parenthesis (e.g., the pattern ``^foo$`` will be rewritten as ``^(foo)$``) Note that you _need_ the leading ^, otherwise each time you run highstate, another comment char will be inserted. char : ``#`` The character to be inserted at the beginning of a line in order to comment it out backup : ``.bak`` The file will be backed up before edit with this file extension .. warning:: This backup will be overwritten each time ``sed`` / ``comment`` / ``uncomment`` is called. Meaning the backup will only be useful after the first invocation. Set to False/None to not keep a backup. Usage: .. code-block:: yaml /etc/fstab: file.comment: - regex: ^bind 127.0.0.1 .. versionadded:: 0.9.5 ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.comment') check_res, check_msg = _check_file(name) if not check_res: return _error(ret, check_msg) # remove (?i)-like flags, ^ and $ unanchor_regex = re.sub(r'^(\(\?[iLmsux]\))?\^?(.*?)\$?$', r'\2', regex) comment_regex = char + unanchor_regex # Make sure the pattern appears in the file before continuing if not __salt__['file.search'](name, regex, multiline=True): if __salt__['file.search'](name, comment_regex, multiline=True): ret['comment'] = 'Pattern already commented' ret['result'] = True return ret else: return _error(ret, '{0}: Pattern not found'.format(unanchor_regex)) if __opts__['test']: ret['changes'][name] = 'updated' ret['comment'] = 'File {0} is set to be updated'.format(name) ret['result'] = None return ret with salt.utils.files.fopen(name, 'rb') as fp_: slines = fp_.read() if six.PY3: slines = slines.decode(__salt_system_encoding__) slines = slines.splitlines(True) # Perform the edit __salt__['file.comment_line'](name, regex, char, True, backup) with salt.utils.files.fopen(name, 'rb') as fp_: nlines = fp_.read() if six.PY3: nlines = nlines.decode(__salt_system_encoding__) nlines = nlines.splitlines(True) # Check the result ret['result'] = __salt__['file.search'](name, unanchor_regex, multiline=True) if slines != nlines: if not __utils__['files.is_text'](name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them ret['changes']['diff'] = ( ''.join(difflib.unified_diff(slines, nlines)) ) if ret['result']: ret['comment'] = 'Commented lines successfully' else: ret['comment'] = 'Expected commented lines not found' return ret
[ "def", "comment", "(", "name", ",", "regex", ",", "char", "=", "'#'", ",", "backup", "=", "'.bak'", ")", ":", "name", "=", "os", ".", "path", ".", "expanduser", "(", "name", ")", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "False", ",", "'comment'", ":", "''", "}", "if", "not", "name", ":", "return", "_error", "(", "ret", ",", "'Must provide name to file.comment'", ")", "check_res", ",", "check_msg", "=", "_check_file", "(", "name", ")", "if", "not", "check_res", ":", "return", "_error", "(", "ret", ",", "check_msg", ")", "# remove (?i)-like flags, ^ and $", "unanchor_regex", "=", "re", ".", "sub", "(", "r'^(\\(\\?[iLmsux]\\))?\\^?(.*?)\\$?$'", ",", "r'\\2'", ",", "regex", ")", "comment_regex", "=", "char", "+", "unanchor_regex", "# Make sure the pattern appears in the file before continuing", "if", "not", "__salt__", "[", "'file.search'", "]", "(", "name", ",", "regex", ",", "multiline", "=", "True", ")", ":", "if", "__salt__", "[", "'file.search'", "]", "(", "name", ",", "comment_regex", ",", "multiline", "=", "True", ")", ":", "ret", "[", "'comment'", "]", "=", "'Pattern already commented'", "ret", "[", "'result'", "]", "=", "True", "return", "ret", "else", ":", "return", "_error", "(", "ret", ",", "'{0}: Pattern not found'", ".", "format", "(", "unanchor_regex", ")", ")", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'changes'", "]", "[", "name", "]", "=", "'updated'", "ret", "[", "'comment'", "]", "=", "'File {0} is set to be updated'", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "None", "return", "ret", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "name", ",", "'rb'", ")", "as", "fp_", ":", "slines", "=", "fp_", ".", "read", "(", ")", "if", "six", ".", "PY3", ":", "slines", "=", "slines", ".", "decode", "(", "__salt_system_encoding__", ")", "slines", "=", "slines", ".", "splitlines", "(", "True", ")", "# Perform the edit", "__salt__", "[", "'file.comment_line'", "]", "(", "name", ",", "regex", ",", "char", ",", "True", ",", "backup", ")", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "name", ",", "'rb'", ")", "as", "fp_", ":", "nlines", "=", "fp_", ".", "read", "(", ")", "if", "six", ".", "PY3", ":", "nlines", "=", "nlines", ".", "decode", "(", "__salt_system_encoding__", ")", "nlines", "=", "nlines", ".", "splitlines", "(", "True", ")", "# Check the result", "ret", "[", "'result'", "]", "=", "__salt__", "[", "'file.search'", "]", "(", "name", ",", "unanchor_regex", ",", "multiline", "=", "True", ")", "if", "slines", "!=", "nlines", ":", "if", "not", "__utils__", "[", "'files.is_text'", "]", "(", "name", ")", ":", "ret", "[", "'changes'", "]", "[", "'diff'", "]", "=", "'Replace binary file'", "else", ":", "# Changes happened, add them", "ret", "[", "'changes'", "]", "[", "'diff'", "]", "=", "(", "''", ".", "join", "(", "difflib", ".", "unified_diff", "(", "slines", ",", "nlines", ")", ")", ")", "if", "ret", "[", "'result'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Commented lines successfully'", "else", ":", "ret", "[", "'comment'", "]", "=", "'Expected commented lines not found'", "return", "ret" ]
Comment out specified lines in a file. name The full path to the file to be edited regex A regular expression used to find the lines that are to be commented; this pattern will be wrapped in parenthesis and will move any preceding/trailing ``^`` or ``$`` characters outside the parenthesis (e.g., the pattern ``^foo$`` will be rewritten as ``^(foo)$``) Note that you _need_ the leading ^, otherwise each time you run highstate, another comment char will be inserted. char : ``#`` The character to be inserted at the beginning of a line in order to comment it out backup : ``.bak`` The file will be backed up before edit with this file extension .. warning:: This backup will be overwritten each time ``sed`` / ``comment`` / ``uncomment`` is called. Meaning the backup will only be useful after the first invocation. Set to False/None to not keep a backup. Usage: .. code-block:: yaml /etc/fstab: file.comment: - regex: ^bind 127.0.0.1 .. versionadded:: 0.9.5
[ "Comment", "out", "specified", "lines", "in", "a", "file", "." ]
python
train
grycap/RADL
radl/radl.py
https://github.com/grycap/RADL/blob/03ccabb0313a48a5aa0e20c1f7983fddcb95e9cb/radl/radl.py#L966-L969
def setUserPasswdCredentials(self, username, password): """Set username and password in ``disk.0.os.credentials``.""" self.setCredentialValues(username=username, password=password)
[ "def", "setUserPasswdCredentials", "(", "self", ",", "username", ",", "password", ")", ":", "self", ".", "setCredentialValues", "(", "username", "=", "username", ",", "password", "=", "password", ")" ]
Set username and password in ``disk.0.os.credentials``.
[ "Set", "username", "and", "password", "in", "disk", ".", "0", ".", "os", ".", "credentials", "." ]
python
train
micheles/decorator
src/decorator.py
https://github.com/micheles/decorator/blob/7495513ee24deffbf5060860eb69b224fe1d0fe4/src/decorator.py#L259-L293
def decorator(caller, _func=None): """decorator(caller) converts a caller function into a decorator""" if _func is not None: # return a decorated function # this is obsolete behavior; you should use decorate instead return decorate(_func, caller) # else return a decorator function defaultargs, defaults = '', () if inspect.isclass(caller): name = caller.__name__.lower() doc = 'decorator(%s) converts functions/generators into ' \ 'factories of %s objects' % (caller.__name__, caller.__name__) elif inspect.isfunction(caller): if caller.__name__ == '<lambda>': name = '_lambda_' else: name = caller.__name__ doc = caller.__doc__ nargs = caller.__code__.co_argcount ndefs = len(caller.__defaults__ or ()) defaultargs = ', '.join(caller.__code__.co_varnames[nargs-ndefs:nargs]) if defaultargs: defaultargs += ',' defaults = caller.__defaults__ else: # assume caller is an object with a __call__ method name = caller.__class__.__name__.lower() doc = caller.__call__.__doc__ evaldict = dict(_call=caller, _decorate_=decorate) dec = FunctionMaker.create( '%s(func, %s)' % (name, defaultargs), 'if func is None: return lambda func: _decorate_(func, _call, (%s))\n' 'return _decorate_(func, _call, (%s))' % (defaultargs, defaultargs), evaldict, doc=doc, module=caller.__module__, __wrapped__=caller) if defaults: dec.__defaults__ = (None,) + defaults return dec
[ "def", "decorator", "(", "caller", ",", "_func", "=", "None", ")", ":", "if", "_func", "is", "not", "None", ":", "# return a decorated function", "# this is obsolete behavior; you should use decorate instead", "return", "decorate", "(", "_func", ",", "caller", ")", "# else return a decorator function", "defaultargs", ",", "defaults", "=", "''", ",", "(", ")", "if", "inspect", ".", "isclass", "(", "caller", ")", ":", "name", "=", "caller", ".", "__name__", ".", "lower", "(", ")", "doc", "=", "'decorator(%s) converts functions/generators into '", "'factories of %s objects'", "%", "(", "caller", ".", "__name__", ",", "caller", ".", "__name__", ")", "elif", "inspect", ".", "isfunction", "(", "caller", ")", ":", "if", "caller", ".", "__name__", "==", "'<lambda>'", ":", "name", "=", "'_lambda_'", "else", ":", "name", "=", "caller", ".", "__name__", "doc", "=", "caller", ".", "__doc__", "nargs", "=", "caller", ".", "__code__", ".", "co_argcount", "ndefs", "=", "len", "(", "caller", ".", "__defaults__", "or", "(", ")", ")", "defaultargs", "=", "', '", ".", "join", "(", "caller", ".", "__code__", ".", "co_varnames", "[", "nargs", "-", "ndefs", ":", "nargs", "]", ")", "if", "defaultargs", ":", "defaultargs", "+=", "','", "defaults", "=", "caller", ".", "__defaults__", "else", ":", "# assume caller is an object with a __call__ method", "name", "=", "caller", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", "doc", "=", "caller", ".", "__call__", ".", "__doc__", "evaldict", "=", "dict", "(", "_call", "=", "caller", ",", "_decorate_", "=", "decorate", ")", "dec", "=", "FunctionMaker", ".", "create", "(", "'%s(func, %s)'", "%", "(", "name", ",", "defaultargs", ")", ",", "'if func is None: return lambda func: _decorate_(func, _call, (%s))\\n'", "'return _decorate_(func, _call, (%s))'", "%", "(", "defaultargs", ",", "defaultargs", ")", ",", "evaldict", ",", "doc", "=", "doc", ",", "module", "=", "caller", ".", "__module__", ",", "__wrapped__", "=", "caller", ")", "if", "defaults", ":", "dec", ".", "__defaults__", "=", "(", "None", ",", ")", "+", "defaults", "return", "dec" ]
decorator(caller) converts a caller function into a decorator
[ "decorator", "(", "caller", ")", "converts", "a", "caller", "function", "into", "a", "decorator" ]
python
train
ewels/MultiQC
multiqc/modules/damageprofiler/damageprofiler.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/damageprofiler/damageprofiler.py#L174-L208
def addSummaryMetrics(self, dict_to_plot): """ Take the parsed stats from the DamageProfiler and add it to the basic stats table at the top of the report """ headers = OrderedDict() headers['std'] = { 'title': 'Read length std. dev.', 'description': 'Read length std. dev.', 'suffix': 'bp', 'scale': 'PuBu', 'format': '{:,.2f}', 'shared_key': 'read_length', 'hidden': True } headers['median'] = { 'title': 'Median read length', 'description': 'Median read length', 'suffix': 'bp', 'scale': 'YlGnBu', 'format': '{:,.2f}', 'shared_key': 'read_length' } headers['mean_readlength'] = { 'title': 'Mean read length', 'description': 'Mean read length', 'suffix': 'bp', 'scale': 'PuBuGn', 'format': '{:,.2f}', 'shared_key': 'read_length', 'hidden': True } self.general_stats_addcols(dict_to_plot, headers)
[ "def", "addSummaryMetrics", "(", "self", ",", "dict_to_plot", ")", ":", "headers", "=", "OrderedDict", "(", ")", "headers", "[", "'std'", "]", "=", "{", "'title'", ":", "'Read length std. dev.'", ",", "'description'", ":", "'Read length std. dev.'", ",", "'suffix'", ":", "'bp'", ",", "'scale'", ":", "'PuBu'", ",", "'format'", ":", "'{:,.2f}'", ",", "'shared_key'", ":", "'read_length'", ",", "'hidden'", ":", "True", "}", "headers", "[", "'median'", "]", "=", "{", "'title'", ":", "'Median read length'", ",", "'description'", ":", "'Median read length'", ",", "'suffix'", ":", "'bp'", ",", "'scale'", ":", "'YlGnBu'", ",", "'format'", ":", "'{:,.2f}'", ",", "'shared_key'", ":", "'read_length'", "}", "headers", "[", "'mean_readlength'", "]", "=", "{", "'title'", ":", "'Mean read length'", ",", "'description'", ":", "'Mean read length'", ",", "'suffix'", ":", "'bp'", ",", "'scale'", ":", "'PuBuGn'", ",", "'format'", ":", "'{:,.2f}'", ",", "'shared_key'", ":", "'read_length'", ",", "'hidden'", ":", "True", "}", "self", ".", "general_stats_addcols", "(", "dict_to_plot", ",", "headers", ")" ]
Take the parsed stats from the DamageProfiler and add it to the basic stats table at the top of the report
[ "Take", "the", "parsed", "stats", "from", "the", "DamageProfiler", "and", "add", "it", "to", "the", "basic", "stats", "table", "at", "the", "top", "of", "the", "report" ]
python
train
gwpy/gwpy
gwpy/plot/axes.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/axes.py#L149-L162
def set_epoch(self, epoch): """Set the epoch for the current GPS scale. This method will fail if the current X-axis scale isn't one of the GPS scales. See :ref:`gwpy-plot-gps` for more details. Parameters ---------- epoch : `float`, `str` GPS-compatible time or date object, anything parseable by :func:`~gwpy.time.to_gps` is fine. """ scale = self.get_xscale() return self.set_xscale(scale, epoch=epoch)
[ "def", "set_epoch", "(", "self", ",", "epoch", ")", ":", "scale", "=", "self", ".", "get_xscale", "(", ")", "return", "self", ".", "set_xscale", "(", "scale", ",", "epoch", "=", "epoch", ")" ]
Set the epoch for the current GPS scale. This method will fail if the current X-axis scale isn't one of the GPS scales. See :ref:`gwpy-plot-gps` for more details. Parameters ---------- epoch : `float`, `str` GPS-compatible time or date object, anything parseable by :func:`~gwpy.time.to_gps` is fine.
[ "Set", "the", "epoch", "for", "the", "current", "GPS", "scale", "." ]
python
train
hyperledger/indy-plenum
stp_core/loop/looper.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/stp_core/loop/looper.py#L204-L217
async def runOnceNicely(self): """ Execute `runOnce` with a small tolerance of 0.01 seconds so that the Prodables can complete their other asynchronous tasks not running on the event-loop. """ start = time.perf_counter() msgsProcessed = await self.prodAllOnce() if msgsProcessed == 0: # if no let other stuff run await asyncio.sleep(0.01, loop=self.loop) dur = time.perf_counter() - start if dur >= 15: logger.info("it took {:.3f} seconds to run once nicely". format(dur), extra={"cli": False})
[ "async", "def", "runOnceNicely", "(", "self", ")", ":", "start", "=", "time", ".", "perf_counter", "(", ")", "msgsProcessed", "=", "await", "self", ".", "prodAllOnce", "(", ")", "if", "msgsProcessed", "==", "0", ":", "# if no let other stuff run", "await", "asyncio", ".", "sleep", "(", "0.01", ",", "loop", "=", "self", ".", "loop", ")", "dur", "=", "time", ".", "perf_counter", "(", ")", "-", "start", "if", "dur", ">=", "15", ":", "logger", ".", "info", "(", "\"it took {:.3f} seconds to run once nicely\"", ".", "format", "(", "dur", ")", ",", "extra", "=", "{", "\"cli\"", ":", "False", "}", ")" ]
Execute `runOnce` with a small tolerance of 0.01 seconds so that the Prodables can complete their other asynchronous tasks not running on the event-loop.
[ "Execute", "runOnce", "with", "a", "small", "tolerance", "of", "0", ".", "01", "seconds", "so", "that", "the", "Prodables", "can", "complete", "their", "other", "asynchronous", "tasks", "not", "running", "on", "the", "event", "-", "loop", "." ]
python
train
aio-libs/aioodbc
aioodbc/cursor.py
https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/cursor.py#L124-L132
def executemany(self, sql, *params): """Prepare a database query or command and then execute it against all parameter sequences found in the sequence seq_of_params. :param sql: the SQL statement to execute with optional ? parameters :param params: sequence parameters for the markers in the SQL. """ fut = self._run_operation(self._impl.executemany, sql, *params) return fut
[ "def", "executemany", "(", "self", ",", "sql", ",", "*", "params", ")", ":", "fut", "=", "self", ".", "_run_operation", "(", "self", ".", "_impl", ".", "executemany", ",", "sql", ",", "*", "params", ")", "return", "fut" ]
Prepare a database query or command and then execute it against all parameter sequences found in the sequence seq_of_params. :param sql: the SQL statement to execute with optional ? parameters :param params: sequence parameters for the markers in the SQL.
[ "Prepare", "a", "database", "query", "or", "command", "and", "then", "execute", "it", "against", "all", "parameter", "sequences", "found", "in", "the", "sequence", "seq_of_params", "." ]
python
train
StagPython/StagPy
stagpy/time_series.py
https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/time_series.py#L96-L122
def plot_time_series(sdat, lovs): """Plot requested time series. Args: sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance. lovs (nested list of str): nested list of series names such as the one produced by :func:`stagpy.misc.list_of_vars`. Other Parameters: conf.time.tstart: the starting time. conf.time.tend: the ending time. """ sovs = misc.set_of_vars(lovs) tseries = {} times = {} metas = {} for tvar in sovs: series, time, meta = get_time_series( sdat, tvar, conf.time.tstart, conf.time.tend) tseries[tvar] = series metas[tvar] = meta if time is not None: times[tvar] = time tseries['t'] = get_time_series( sdat, 't', conf.time.tstart, conf.time.tend)[0] _plot_time_list(sdat, lovs, tseries, metas, times)
[ "def", "plot_time_series", "(", "sdat", ",", "lovs", ")", ":", "sovs", "=", "misc", ".", "set_of_vars", "(", "lovs", ")", "tseries", "=", "{", "}", "times", "=", "{", "}", "metas", "=", "{", "}", "for", "tvar", "in", "sovs", ":", "series", ",", "time", ",", "meta", "=", "get_time_series", "(", "sdat", ",", "tvar", ",", "conf", ".", "time", ".", "tstart", ",", "conf", ".", "time", ".", "tend", ")", "tseries", "[", "tvar", "]", "=", "series", "metas", "[", "tvar", "]", "=", "meta", "if", "time", "is", "not", "None", ":", "times", "[", "tvar", "]", "=", "time", "tseries", "[", "'t'", "]", "=", "get_time_series", "(", "sdat", ",", "'t'", ",", "conf", ".", "time", ".", "tstart", ",", "conf", ".", "time", ".", "tend", ")", "[", "0", "]", "_plot_time_list", "(", "sdat", ",", "lovs", ",", "tseries", ",", "metas", ",", "times", ")" ]
Plot requested time series. Args: sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance. lovs (nested list of str): nested list of series names such as the one produced by :func:`stagpy.misc.list_of_vars`. Other Parameters: conf.time.tstart: the starting time. conf.time.tend: the ending time.
[ "Plot", "requested", "time", "series", "." ]
python
train
apache/airflow
airflow/contrib/hooks/bigquery_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L923-L994
def run_copy(self, source_project_dataset_tables, destination_project_dataset_table, write_disposition='WRITE_EMPTY', create_disposition='CREATE_IF_NEEDED', labels=None): """ Executes a BigQuery copy command to copy data from one BigQuery table to another. See here: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy For more details about these parameters. :param source_project_dataset_tables: One or more dotted ``(project:|project.)<dataset>.<table>`` BigQuery tables to use as the source data. Use a list if there are multiple source tables. If ``<project>`` is not included, project will be the project defined in the connection json. :type source_project_dataset_tables: list|string :param destination_project_dataset_table: The destination BigQuery table. Format is: ``(project:|project.)<dataset>.<table>`` :type destination_project_dataset_table: str :param write_disposition: The write disposition if the table already exists. :type write_disposition: str :param create_disposition: The create disposition if the table doesn't exist. :type create_disposition: str :param labels: a dictionary containing labels for the job/query, passed to BigQuery :type labels: dict """ source_project_dataset_tables = ([ source_project_dataset_tables ] if not isinstance(source_project_dataset_tables, list) else source_project_dataset_tables) source_project_dataset_tables_fixup = [] for source_project_dataset_table in source_project_dataset_tables: source_project, source_dataset, source_table = \ _split_tablename(table_input=source_project_dataset_table, default_project_id=self.project_id, var_name='source_project_dataset_table') source_project_dataset_tables_fixup.append({ 'projectId': source_project, 'datasetId': source_dataset, 'tableId': source_table }) destination_project, destination_dataset, destination_table = \ _split_tablename(table_input=destination_project_dataset_table, default_project_id=self.project_id) configuration = { 'copy': { 'createDisposition': create_disposition, 'writeDisposition': write_disposition, 'sourceTables': source_project_dataset_tables_fixup, 'destinationTable': { 'projectId': destination_project, 'datasetId': destination_dataset, 'tableId': destination_table } } } if labels: configuration['labels'] = labels return self.run_with_configuration(configuration)
[ "def", "run_copy", "(", "self", ",", "source_project_dataset_tables", ",", "destination_project_dataset_table", ",", "write_disposition", "=", "'WRITE_EMPTY'", ",", "create_disposition", "=", "'CREATE_IF_NEEDED'", ",", "labels", "=", "None", ")", ":", "source_project_dataset_tables", "=", "(", "[", "source_project_dataset_tables", "]", "if", "not", "isinstance", "(", "source_project_dataset_tables", ",", "list", ")", "else", "source_project_dataset_tables", ")", "source_project_dataset_tables_fixup", "=", "[", "]", "for", "source_project_dataset_table", "in", "source_project_dataset_tables", ":", "source_project", ",", "source_dataset", ",", "source_table", "=", "_split_tablename", "(", "table_input", "=", "source_project_dataset_table", ",", "default_project_id", "=", "self", ".", "project_id", ",", "var_name", "=", "'source_project_dataset_table'", ")", "source_project_dataset_tables_fixup", ".", "append", "(", "{", "'projectId'", ":", "source_project", ",", "'datasetId'", ":", "source_dataset", ",", "'tableId'", ":", "source_table", "}", ")", "destination_project", ",", "destination_dataset", ",", "destination_table", "=", "_split_tablename", "(", "table_input", "=", "destination_project_dataset_table", ",", "default_project_id", "=", "self", ".", "project_id", ")", "configuration", "=", "{", "'copy'", ":", "{", "'createDisposition'", ":", "create_disposition", ",", "'writeDisposition'", ":", "write_disposition", ",", "'sourceTables'", ":", "source_project_dataset_tables_fixup", ",", "'destinationTable'", ":", "{", "'projectId'", ":", "destination_project", ",", "'datasetId'", ":", "destination_dataset", ",", "'tableId'", ":", "destination_table", "}", "}", "}", "if", "labels", ":", "configuration", "[", "'labels'", "]", "=", "labels", "return", "self", ".", "run_with_configuration", "(", "configuration", ")" ]
Executes a BigQuery copy command to copy data from one BigQuery table to another. See here: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy For more details about these parameters. :param source_project_dataset_tables: One or more dotted ``(project:|project.)<dataset>.<table>`` BigQuery tables to use as the source data. Use a list if there are multiple source tables. If ``<project>`` is not included, project will be the project defined in the connection json. :type source_project_dataset_tables: list|string :param destination_project_dataset_table: The destination BigQuery table. Format is: ``(project:|project.)<dataset>.<table>`` :type destination_project_dataset_table: str :param write_disposition: The write disposition if the table already exists. :type write_disposition: str :param create_disposition: The create disposition if the table doesn't exist. :type create_disposition: str :param labels: a dictionary containing labels for the job/query, passed to BigQuery :type labels: dict
[ "Executes", "a", "BigQuery", "copy", "command", "to", "copy", "data", "from", "one", "BigQuery", "table", "to", "another", ".", "See", "here", ":" ]
python
test
google/grr
grr/server/grr_response_server/aff4.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4.py#L339-L361
def SetAttributes(self, urn, attributes, to_delete, add_child_index=True, mutation_pool=None): """Sets the attributes in the data store.""" attributes[AFF4Object.SchemaCls.LAST] = [ rdfvalue.RDFDatetime.Now().SerializeToDataStore() ] to_delete.add(AFF4Object.SchemaCls.LAST) if mutation_pool: pool = mutation_pool else: pool = data_store.DB.GetMutationPool() pool.MultiSet(urn, attributes, replace=False, to_delete=to_delete) if add_child_index: self._UpdateChildIndex(urn, pool) if mutation_pool is None: pool.Flush()
[ "def", "SetAttributes", "(", "self", ",", "urn", ",", "attributes", ",", "to_delete", ",", "add_child_index", "=", "True", ",", "mutation_pool", "=", "None", ")", ":", "attributes", "[", "AFF4Object", ".", "SchemaCls", ".", "LAST", "]", "=", "[", "rdfvalue", ".", "RDFDatetime", ".", "Now", "(", ")", ".", "SerializeToDataStore", "(", ")", "]", "to_delete", ".", "add", "(", "AFF4Object", ".", "SchemaCls", ".", "LAST", ")", "if", "mutation_pool", ":", "pool", "=", "mutation_pool", "else", ":", "pool", "=", "data_store", ".", "DB", ".", "GetMutationPool", "(", ")", "pool", ".", "MultiSet", "(", "urn", ",", "attributes", ",", "replace", "=", "False", ",", "to_delete", "=", "to_delete", ")", "if", "add_child_index", ":", "self", ".", "_UpdateChildIndex", "(", "urn", ",", "pool", ")", "if", "mutation_pool", "is", "None", ":", "pool", ".", "Flush", "(", ")" ]
Sets the attributes in the data store.
[ "Sets", "the", "attributes", "in", "the", "data", "store", "." ]
python
train
mitsei/dlkit
dlkit/handcar/relationship/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/relationship/sessions.py#L1002-L1029
def get_relationship_form_for_update(self, relationship_id=None): """Gets the relationship form for updating an existing relationship. A new relationship form should be requested for each update transaction. arg: relationship_id (osid.id.Id): the ``Id`` of the ``Relationship`` return: (osid.relationship.RelationshipForm) - the relationship form raise: NotFound - ``relationship_id`` is not found raise: NullArgument - ``relationship_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ if relationship_id is None: raise NullArgument() try: url_path = ('/handcar/services/relationship/families/' + self._catalog_idstr + '/relationships/' + str(relationship_id)) relationship = objects.Relationship(self._get_request(url_path)) except Exception: raise relationship_form = objects.RelationshipForm(relationship._my_map) self._forms[relationship_form.get_id().get_identifier()] = not UPDATED return relationship_form
[ "def", "get_relationship_form_for_update", "(", "self", ",", "relationship_id", "=", "None", ")", ":", "if", "relationship_id", "is", "None", ":", "raise", "NullArgument", "(", ")", "try", ":", "url_path", "=", "(", "'/handcar/services/relationship/families/'", "+", "self", ".", "_catalog_idstr", "+", "'/relationships/'", "+", "str", "(", "relationship_id", ")", ")", "relationship", "=", "objects", ".", "Relationship", "(", "self", ".", "_get_request", "(", "url_path", ")", ")", "except", "Exception", ":", "raise", "relationship_form", "=", "objects", ".", "RelationshipForm", "(", "relationship", ".", "_my_map", ")", "self", ".", "_forms", "[", "relationship_form", ".", "get_id", "(", ")", ".", "get_identifier", "(", ")", "]", "=", "not", "UPDATED", "return", "relationship_form" ]
Gets the relationship form for updating an existing relationship. A new relationship form should be requested for each update transaction. arg: relationship_id (osid.id.Id): the ``Id`` of the ``Relationship`` return: (osid.relationship.RelationshipForm) - the relationship form raise: NotFound - ``relationship_id`` is not found raise: NullArgument - ``relationship_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "relationship", "form", "for", "updating", "an", "existing", "relationship", "." ]
python
train
YosaiProject/yosai
yosai/web/session/session.py
https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/web/session/session.py#L132-L139
def on_expiration(self, session, ese=None, session_key=None): """ :type session: session_abcs.Session :type ese: ExpiredSessionException :type session_key: session_abcs.SessionKey """ super().on_expiration(session, ese, session_key) self.on_invalidation(session_key)
[ "def", "on_expiration", "(", "self", ",", "session", ",", "ese", "=", "None", ",", "session_key", "=", "None", ")", ":", "super", "(", ")", ".", "on_expiration", "(", "session", ",", "ese", ",", "session_key", ")", "self", ".", "on_invalidation", "(", "session_key", ")" ]
:type session: session_abcs.Session :type ese: ExpiredSessionException :type session_key: session_abcs.SessionKey
[ ":", "type", "session", ":", "session_abcs", ".", "Session", ":", "type", "ese", ":", "ExpiredSessionException", ":", "type", "session_key", ":", "session_abcs", ".", "SessionKey" ]
python
train
mandiant/ioc_writer
ioc_writer/ioc_common.py
https://github.com/mandiant/ioc_writer/blob/712247f3a10bdc2584fa18ac909fc763f71df21a/ioc_writer/ioc_common.py#L743-L755
def make_processitem_path(path, condition='contains', negate=False, preserve_case=False): """ Create a node for ProcessItem/path :return: A IndicatorItem represented as an Element node """ document = 'ProcessItem' search = 'ProcessItem/path' content_type = 'string' content = path ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content, negate=negate, preserve_case=preserve_case) return ii_node
[ "def", "make_processitem_path", "(", "path", ",", "condition", "=", "'contains'", ",", "negate", "=", "False", ",", "preserve_case", "=", "False", ")", ":", "document", "=", "'ProcessItem'", "search", "=", "'ProcessItem/path'", "content_type", "=", "'string'", "content", "=", "path", "ii_node", "=", "ioc_api", ".", "make_indicatoritem_node", "(", "condition", ",", "document", ",", "search", ",", "content_type", ",", "content", ",", "negate", "=", "negate", ",", "preserve_case", "=", "preserve_case", ")", "return", "ii_node" ]
Create a node for ProcessItem/path :return: A IndicatorItem represented as an Element node
[ "Create", "a", "node", "for", "ProcessItem", "/", "path", ":", "return", ":", "A", "IndicatorItem", "represented", "as", "an", "Element", "node" ]
python
train
jobovy/galpy
galpy/actionAngle/actionAngleAxi.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/actionAngle/actionAngleAxi.py#L330-L347
def calcELAxi(R,vR,vT,pot,vc=1.,ro=1.): """ NAME: calcELAxi PURPOSE: calculate the energy and angular momentum INPUT: R - Galactocentric radius (/ro) vR - radial part of the velocity (/vc) vT - azimuthal part of the velocity (/vc) vc - circular velocity ro - reference radius OUTPUT: (E,L) HISTORY: 2010-11-30 - Written - Bovy (NYU) """ return (potentialAxi(R,pot)+vR**2./2.+vT**2./2.,R*vT)
[ "def", "calcELAxi", "(", "R", ",", "vR", ",", "vT", ",", "pot", ",", "vc", "=", "1.", ",", "ro", "=", "1.", ")", ":", "return", "(", "potentialAxi", "(", "R", ",", "pot", ")", "+", "vR", "**", "2.", "/", "2.", "+", "vT", "**", "2.", "/", "2.", ",", "R", "*", "vT", ")" ]
NAME: calcELAxi PURPOSE: calculate the energy and angular momentum INPUT: R - Galactocentric radius (/ro) vR - radial part of the velocity (/vc) vT - azimuthal part of the velocity (/vc) vc - circular velocity ro - reference radius OUTPUT: (E,L) HISTORY: 2010-11-30 - Written - Bovy (NYU)
[ "NAME", ":", "calcELAxi", "PURPOSE", ":", "calculate", "the", "energy", "and", "angular", "momentum", "INPUT", ":", "R", "-", "Galactocentric", "radius", "(", "/", "ro", ")", "vR", "-", "radial", "part", "of", "the", "velocity", "(", "/", "vc", ")", "vT", "-", "azimuthal", "part", "of", "the", "velocity", "(", "/", "vc", ")", "vc", "-", "circular", "velocity", "ro", "-", "reference", "radius", "OUTPUT", ":", "(", "E", "L", ")", "HISTORY", ":", "2010", "-", "11", "-", "30", "-", "Written", "-", "Bovy", "(", "NYU", ")" ]
python
train
secynic/ipwhois
ipwhois/scripts/ipwhois_cli.py
https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/scripts/ipwhois_cli.py#L406-L426
def generate_output_header(self, query_type='RDAP'): """ The function for generating the CLI output header. Args: query_type (:obj:`str`): The IPWhois query type. Defaults to 'RDAP'. Returns: str: The generated output. """ output = '\n{0}{1}{2} query for {3}:{4}\n\n'.format( ANSI['ul'], ANSI['b'], query_type, self.obj.address_str, ANSI['end'] ) return output
[ "def", "generate_output_header", "(", "self", ",", "query_type", "=", "'RDAP'", ")", ":", "output", "=", "'\\n{0}{1}{2} query for {3}:{4}\\n\\n'", ".", "format", "(", "ANSI", "[", "'ul'", "]", ",", "ANSI", "[", "'b'", "]", ",", "query_type", ",", "self", ".", "obj", ".", "address_str", ",", "ANSI", "[", "'end'", "]", ")", "return", "output" ]
The function for generating the CLI output header. Args: query_type (:obj:`str`): The IPWhois query type. Defaults to 'RDAP'. Returns: str: The generated output.
[ "The", "function", "for", "generating", "the", "CLI", "output", "header", "." ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Taskmaster.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Taskmaster.py#L760-L950
def _find_next_ready_node(self): """ Finds the next node that is ready to be built. This is *the* main guts of the DAG walk. We loop through the list of candidates, looking for something that has no un-built children (i.e., that is a leaf Node or has dependencies that are all leaf Nodes or up-to-date). Candidate Nodes are re-scanned (both the target Node itself and its sources, which are always scanned in the context of a given target) to discover implicit dependencies. A Node that must wait for some children to be built will be put back on the candidates list after the children have finished building. A Node that has been put back on the candidates list in this way may have itself (or its sources) re-scanned, in order to handle generated header files (e.g.) and the implicit dependencies therein. Note that this method does not do any signature calculation or up-to-date check itself. All of that is handled by the Task class. This is purely concerned with the dependency graph walk. """ self.ready_exc = None T = self.trace if T: T.write(SCons.Util.UnicodeType('\n') + self.trace_message('Looking for a node to evaluate')) while True: node = self.next_candidate() if node is None: if T: T.write(self.trace_message('No candidate anymore.') + u'\n') return None node = node.disambiguate() state = node.get_state() # For debugging only: # # try: # self._validate_pending_children() # except: # self.ready_exc = sys.exc_info() # return node if CollectStats: if not hasattr(node.attributes, 'stats'): node.attributes.stats = Stats() StatsNodes.append(node) S = node.attributes.stats S.considered = S.considered + 1 else: S = None if T: T.write(self.trace_message(u' Considering node %s and its children:' % self.trace_node(node))) if state == NODE_NO_STATE: # Mark this node as being on the execution stack: node.set_state(NODE_PENDING) elif state > NODE_PENDING: # Skip this node if it has already been evaluated: if S: S.already_handled = S.already_handled + 1 if T: T.write(self.trace_message(u' already handled (executed)')) continue executor = node.get_executor() try: children = executor.get_all_children() except SystemExit: exc_value = sys.exc_info()[1] e = SCons.Errors.ExplicitExit(node, exc_value.code) self.ready_exc = (SCons.Errors.ExplicitExit, e) if T: T.write(self.trace_message(' SystemExit')) return node except Exception as e: # We had a problem just trying to figure out the # children (like a child couldn't be linked in to a # VariantDir, or a Scanner threw something). Arrange to # raise the exception when the Task is "executed." self.ready_exc = sys.exc_info() if S: S.problem = S.problem + 1 if T: T.write(self.trace_message(' exception %s while scanning children.\n' % e)) return node children_not_visited = [] children_pending = set() children_not_ready = [] children_failed = False for child in chain(executor.get_all_prerequisites(), children): childstate = child.get_state() if T: T.write(self.trace_message(u' ' + self.trace_node(child))) if childstate == NODE_NO_STATE: children_not_visited.append(child) elif childstate == NODE_PENDING: children_pending.add(child) elif childstate == NODE_FAILED: children_failed = True if childstate <= NODE_EXECUTING: children_not_ready.append(child) # These nodes have not even been visited yet. Add # them to the list so that on some next pass we can # take a stab at evaluating them (or their children). children_not_visited.reverse() self.candidates.extend(self.order(children_not_visited)) # if T and children_not_visited: # T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited))) # T.write(self.trace_message(' candidates now: %s\n' % map(str, self.candidates))) # Skip this node if any of its children have failed. # # This catches the case where we're descending a top-level # target and one of our children failed while trying to be # built by a *previous* descent of an earlier top-level # target. # # It can also occur if a node is reused in multiple # targets. One first descends though the one of the # target, the next time occurs through the other target. # # Note that we can only have failed_children if the # --keep-going flag was used, because without it the build # will stop before diving in the other branch. # # Note that even if one of the children fails, we still # added the other children to the list of candidate nodes # to keep on building (--keep-going). if children_failed: for n in executor.get_action_targets(): n.set_state(NODE_FAILED) if S: S.child_failed = S.child_failed + 1 if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node))) continue if children_not_ready: for child in children_not_ready: # We're waiting on one or more derived targets # that have not yet finished building. if S: S.not_built = S.not_built + 1 # Add this node to the waiting parents lists of # anything we're waiting on, with a reference # count so we can be put back on the list for # re-evaluation when they've all finished. node.ref_count = node.ref_count + child.add_to_waiting_parents(node) if T: T.write(self.trace_message(u' adjusted ref count: %s, child %s' % (self.trace_node(node), repr(str(child))))) if T: for pc in children_pending: T.write(self.trace_message(' adding %s to the pending children set\n' % self.trace_node(pc))) self.pending_children = self.pending_children | children_pending continue # Skip this node if it has side-effects that are # currently being built: wait_side_effects = False for se in executor.get_action_side_effects(): if se.get_state() == NODE_EXECUTING: se.add_to_waiting_s_e(node) wait_side_effects = True if wait_side_effects: if S: S.side_effects = S.side_effects + 1 continue # The default when we've gotten through all of the checks above: # this node is ready to be built. if S: S.build = S.build + 1 if T: T.write(self.trace_message(u'Evaluating %s\n' % self.trace_node(node))) # For debugging only: # # try: # self._validate_pending_children() # except: # self.ready_exc = sys.exc_info() # return node return node return None
[ "def", "_find_next_ready_node", "(", "self", ")", ":", "self", ".", "ready_exc", "=", "None", "T", "=", "self", ".", "trace", "if", "T", ":", "T", ".", "write", "(", "SCons", ".", "Util", ".", "UnicodeType", "(", "'\\n'", ")", "+", "self", ".", "trace_message", "(", "'Looking for a node to evaluate'", ")", ")", "while", "True", ":", "node", "=", "self", ".", "next_candidate", "(", ")", "if", "node", "is", "None", ":", "if", "T", ":", "T", ".", "write", "(", "self", ".", "trace_message", "(", "'No candidate anymore.'", ")", "+", "u'\\n'", ")", "return", "None", "node", "=", "node", ".", "disambiguate", "(", ")", "state", "=", "node", ".", "get_state", "(", ")", "# For debugging only:", "#", "# try:", "# self._validate_pending_children()", "# except:", "# self.ready_exc = sys.exc_info()", "# return node", "if", "CollectStats", ":", "if", "not", "hasattr", "(", "node", ".", "attributes", ",", "'stats'", ")", ":", "node", ".", "attributes", ".", "stats", "=", "Stats", "(", ")", "StatsNodes", ".", "append", "(", "node", ")", "S", "=", "node", ".", "attributes", ".", "stats", "S", ".", "considered", "=", "S", ".", "considered", "+", "1", "else", ":", "S", "=", "None", "if", "T", ":", "T", ".", "write", "(", "self", ".", "trace_message", "(", "u' Considering node %s and its children:'", "%", "self", ".", "trace_node", "(", "node", ")", ")", ")", "if", "state", "==", "NODE_NO_STATE", ":", "# Mark this node as being on the execution stack:", "node", ".", "set_state", "(", "NODE_PENDING", ")", "elif", "state", ">", "NODE_PENDING", ":", "# Skip this node if it has already been evaluated:", "if", "S", ":", "S", ".", "already_handled", "=", "S", ".", "already_handled", "+", "1", "if", "T", ":", "T", ".", "write", "(", "self", ".", "trace_message", "(", "u' already handled (executed)'", ")", ")", "continue", "executor", "=", "node", ".", "get_executor", "(", ")", "try", ":", "children", "=", "executor", ".", "get_all_children", "(", ")", "except", "SystemExit", ":", "exc_value", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "e", "=", "SCons", ".", "Errors", ".", "ExplicitExit", "(", "node", ",", "exc_value", ".", "code", ")", "self", ".", "ready_exc", "=", "(", "SCons", ".", "Errors", ".", "ExplicitExit", ",", "e", ")", "if", "T", ":", "T", ".", "write", "(", "self", ".", "trace_message", "(", "' SystemExit'", ")", ")", "return", "node", "except", "Exception", "as", "e", ":", "# We had a problem just trying to figure out the", "# children (like a child couldn't be linked in to a", "# VariantDir, or a Scanner threw something). Arrange to", "# raise the exception when the Task is \"executed.\"", "self", ".", "ready_exc", "=", "sys", ".", "exc_info", "(", ")", "if", "S", ":", "S", ".", "problem", "=", "S", ".", "problem", "+", "1", "if", "T", ":", "T", ".", "write", "(", "self", ".", "trace_message", "(", "' exception %s while scanning children.\\n'", "%", "e", ")", ")", "return", "node", "children_not_visited", "=", "[", "]", "children_pending", "=", "set", "(", ")", "children_not_ready", "=", "[", "]", "children_failed", "=", "False", "for", "child", "in", "chain", "(", "executor", ".", "get_all_prerequisites", "(", ")", ",", "children", ")", ":", "childstate", "=", "child", ".", "get_state", "(", ")", "if", "T", ":", "T", ".", "write", "(", "self", ".", "trace_message", "(", "u' '", "+", "self", ".", "trace_node", "(", "child", ")", ")", ")", "if", "childstate", "==", "NODE_NO_STATE", ":", "children_not_visited", ".", "append", "(", "child", ")", "elif", "childstate", "==", "NODE_PENDING", ":", "children_pending", ".", "add", "(", "child", ")", "elif", "childstate", "==", "NODE_FAILED", ":", "children_failed", "=", "True", "if", "childstate", "<=", "NODE_EXECUTING", ":", "children_not_ready", ".", "append", "(", "child", ")", "# These nodes have not even been visited yet. Add", "# them to the list so that on some next pass we can", "# take a stab at evaluating them (or their children).", "children_not_visited", ".", "reverse", "(", ")", "self", ".", "candidates", ".", "extend", "(", "self", ".", "order", "(", "children_not_visited", ")", ")", "# if T and children_not_visited:", "# T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited)))", "# T.write(self.trace_message(' candidates now: %s\\n' % map(str, self.candidates)))", "# Skip this node if any of its children have failed.", "#", "# This catches the case where we're descending a top-level", "# target and one of our children failed while trying to be", "# built by a *previous* descent of an earlier top-level", "# target.", "#", "# It can also occur if a node is reused in multiple", "# targets. One first descends though the one of the", "# target, the next time occurs through the other target.", "#", "# Note that we can only have failed_children if the", "# --keep-going flag was used, because without it the build", "# will stop before diving in the other branch.", "#", "# Note that even if one of the children fails, we still", "# added the other children to the list of candidate nodes", "# to keep on building (--keep-going).", "if", "children_failed", ":", "for", "n", "in", "executor", ".", "get_action_targets", "(", ")", ":", "n", ".", "set_state", "(", "NODE_FAILED", ")", "if", "S", ":", "S", ".", "child_failed", "=", "S", ".", "child_failed", "+", "1", "if", "T", ":", "T", ".", "write", "(", "self", ".", "trace_message", "(", "'****** %s\\n'", "%", "self", ".", "trace_node", "(", "node", ")", ")", ")", "continue", "if", "children_not_ready", ":", "for", "child", "in", "children_not_ready", ":", "# We're waiting on one or more derived targets", "# that have not yet finished building.", "if", "S", ":", "S", ".", "not_built", "=", "S", ".", "not_built", "+", "1", "# Add this node to the waiting parents lists of", "# anything we're waiting on, with a reference", "# count so we can be put back on the list for", "# re-evaluation when they've all finished.", "node", ".", "ref_count", "=", "node", ".", "ref_count", "+", "child", ".", "add_to_waiting_parents", "(", "node", ")", "if", "T", ":", "T", ".", "write", "(", "self", ".", "trace_message", "(", "u' adjusted ref count: %s, child %s'", "%", "(", "self", ".", "trace_node", "(", "node", ")", ",", "repr", "(", "str", "(", "child", ")", ")", ")", ")", ")", "if", "T", ":", "for", "pc", "in", "children_pending", ":", "T", ".", "write", "(", "self", ".", "trace_message", "(", "' adding %s to the pending children set\\n'", "%", "self", ".", "trace_node", "(", "pc", ")", ")", ")", "self", ".", "pending_children", "=", "self", ".", "pending_children", "|", "children_pending", "continue", "# Skip this node if it has side-effects that are", "# currently being built:", "wait_side_effects", "=", "False", "for", "se", "in", "executor", ".", "get_action_side_effects", "(", ")", ":", "if", "se", ".", "get_state", "(", ")", "==", "NODE_EXECUTING", ":", "se", ".", "add_to_waiting_s_e", "(", "node", ")", "wait_side_effects", "=", "True", "if", "wait_side_effects", ":", "if", "S", ":", "S", ".", "side_effects", "=", "S", ".", "side_effects", "+", "1", "continue", "# The default when we've gotten through all of the checks above:", "# this node is ready to be built.", "if", "S", ":", "S", ".", "build", "=", "S", ".", "build", "+", "1", "if", "T", ":", "T", ".", "write", "(", "self", ".", "trace_message", "(", "u'Evaluating %s\\n'", "%", "self", ".", "trace_node", "(", "node", ")", ")", ")", "# For debugging only:", "#", "# try:", "# self._validate_pending_children()", "# except:", "# self.ready_exc = sys.exc_info()", "# return node", "return", "node", "return", "None" ]
Finds the next node that is ready to be built. This is *the* main guts of the DAG walk. We loop through the list of candidates, looking for something that has no un-built children (i.e., that is a leaf Node or has dependencies that are all leaf Nodes or up-to-date). Candidate Nodes are re-scanned (both the target Node itself and its sources, which are always scanned in the context of a given target) to discover implicit dependencies. A Node that must wait for some children to be built will be put back on the candidates list after the children have finished building. A Node that has been put back on the candidates list in this way may have itself (or its sources) re-scanned, in order to handle generated header files (e.g.) and the implicit dependencies therein. Note that this method does not do any signature calculation or up-to-date check itself. All of that is handled by the Task class. This is purely concerned with the dependency graph walk.
[ "Finds", "the", "next", "node", "that", "is", "ready", "to", "be", "built", "." ]
python
train
Netflix-Skunkworks/historical
historical/common/dynamodb.py
https://github.com/Netflix-Skunkworks/historical/blob/c3ebaa8388a3fe67e23a6c9c6b04c3e618497c4a/historical/common/dynamodb.py#L254-L275
def deserialize_durable_record_to_current_model(record, current_model): """ Utility function that will take a Durable Dynamo event record and turn it into the proper Current Dynamo object. This will properly deserialize the ugly Dynamo datatypes away. :param record: :param current_model: :return: """ # Was the item in question too big for SNS? If so, then we need to fetch the item from the current Dynamo table: if record.get(EVENT_TOO_BIG_FLAG): # Try to get the data from the current table vs. grabbing the data from the Durable table: return get_full_current_object(record['dynamodb']['Keys']['arn']['S'], current_model) new_image = remove_durable_specific_fields(record['dynamodb']['NewImage']) data = {} for item, value in new_image.items(): # This could end up as loss of precision data[item] = DESER.deserialize(value) return current_model(**data)
[ "def", "deserialize_durable_record_to_current_model", "(", "record", ",", "current_model", ")", ":", "# Was the item in question too big for SNS? If so, then we need to fetch the item from the current Dynamo table:", "if", "record", ".", "get", "(", "EVENT_TOO_BIG_FLAG", ")", ":", "# Try to get the data from the current table vs. grabbing the data from the Durable table:", "return", "get_full_current_object", "(", "record", "[", "'dynamodb'", "]", "[", "'Keys'", "]", "[", "'arn'", "]", "[", "'S'", "]", ",", "current_model", ")", "new_image", "=", "remove_durable_specific_fields", "(", "record", "[", "'dynamodb'", "]", "[", "'NewImage'", "]", ")", "data", "=", "{", "}", "for", "item", ",", "value", "in", "new_image", ".", "items", "(", ")", ":", "# This could end up as loss of precision", "data", "[", "item", "]", "=", "DESER", ".", "deserialize", "(", "value", ")", "return", "current_model", "(", "*", "*", "data", ")" ]
Utility function that will take a Durable Dynamo event record and turn it into the proper Current Dynamo object. This will properly deserialize the ugly Dynamo datatypes away. :param record: :param current_model: :return:
[ "Utility", "function", "that", "will", "take", "a", "Durable", "Dynamo", "event", "record", "and", "turn", "it", "into", "the", "proper", "Current", "Dynamo", "object", "." ]
python
train
bitesofcode/projex
projex/plugin.py
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/plugin.py#L683-L727
def fromFile(cls, filepath): """ Creates a proxy instance from the inputted registry file. :param filepath | <str> :return <PluginProxy> || None """ xdata = ElementTree.parse(nstr(filepath)) xroot = xdata.getroot() # collect variable information name = xroot.get('name') ver = float(xroot.get('version', '1.0')) if not name: name = os.path.basename(filepath).split('.') if name == '__init__': name = os.path.normpath(filepath).split(os.path.sep)[-2] name = projex.text.pretty(name) icon = xroot.get('icon', './icon.png') ximport = xroot.find('import') if ximport is not None: importpath = ximport.get('path', './__init__.py') else: importpath = './__init__.py' params = {'description': '', 'author': '', 'email': '', 'url': ''} for param, default in params.items(): xdata = xroot.find(param) if xdata is not None: params[param] = xdata.text # generate the proxy information proxy = PluginProxy(cls, name, ver) proxy.setImportPath(importpath) proxy.setDescription(params['description']) proxy.setAuthor(params['author']) proxy.setEmail(params['email']) proxy.setUrl(params['url']) proxy.setFilepath(filepath) return proxy
[ "def", "fromFile", "(", "cls", ",", "filepath", ")", ":", "xdata", "=", "ElementTree", ".", "parse", "(", "nstr", "(", "filepath", ")", ")", "xroot", "=", "xdata", ".", "getroot", "(", ")", "# collect variable information", "name", "=", "xroot", ".", "get", "(", "'name'", ")", "ver", "=", "float", "(", "xroot", ".", "get", "(", "'version'", ",", "'1.0'", ")", ")", "if", "not", "name", ":", "name", "=", "os", ".", "path", ".", "basename", "(", "filepath", ")", ".", "split", "(", "'.'", ")", "if", "name", "==", "'__init__'", ":", "name", "=", "os", ".", "path", ".", "normpath", "(", "filepath", ")", ".", "split", "(", "os", ".", "path", ".", "sep", ")", "[", "-", "2", "]", "name", "=", "projex", ".", "text", ".", "pretty", "(", "name", ")", "icon", "=", "xroot", ".", "get", "(", "'icon'", ",", "'./icon.png'", ")", "ximport", "=", "xroot", ".", "find", "(", "'import'", ")", "if", "ximport", "is", "not", "None", ":", "importpath", "=", "ximport", ".", "get", "(", "'path'", ",", "'./__init__.py'", ")", "else", ":", "importpath", "=", "'./__init__.py'", "params", "=", "{", "'description'", ":", "''", ",", "'author'", ":", "''", ",", "'email'", ":", "''", ",", "'url'", ":", "''", "}", "for", "param", ",", "default", "in", "params", ".", "items", "(", ")", ":", "xdata", "=", "xroot", ".", "find", "(", "param", ")", "if", "xdata", "is", "not", "None", ":", "params", "[", "param", "]", "=", "xdata", ".", "text", "# generate the proxy information", "proxy", "=", "PluginProxy", "(", "cls", ",", "name", ",", "ver", ")", "proxy", ".", "setImportPath", "(", "importpath", ")", "proxy", ".", "setDescription", "(", "params", "[", "'description'", "]", ")", "proxy", ".", "setAuthor", "(", "params", "[", "'author'", "]", ")", "proxy", ".", "setEmail", "(", "params", "[", "'email'", "]", ")", "proxy", ".", "setUrl", "(", "params", "[", "'url'", "]", ")", "proxy", ".", "setFilepath", "(", "filepath", ")", "return", "proxy" ]
Creates a proxy instance from the inputted registry file. :param filepath | <str> :return <PluginProxy> || None
[ "Creates", "a", "proxy", "instance", "from", "the", "inputted", "registry", "file", ".", ":", "param", "filepath", "|", "<str", ">", ":", "return", "<PluginProxy", ">", "||", "None" ]
python
train
saltstack/salt
salt/modules/boto_elbv2.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elbv2.py#L166-L206
def delete_target_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete target group. name (string) - Target Group Name or Amazon Resource Name (ARN). returns (bool) - True on success, False on failure. CLI example: .. code-block:: bash salt myminion boto_elbv2.delete_target_group arn:aws:elasticloadbalancing:us-west-2:644138682826:targetgroup/learn1give1-api/414788a16b5cf163 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not target_group_exists(name, region, key, keyid, profile): return True try: if name.startswith('arn:aws:elasticloadbalancing'): conn.delete_target_group(TargetGroupArn=name) log.info('Deleted target group %s', name) else: tg_info = conn.describe_target_groups(Names=[name]) if len(tg_info['TargetGroups']) != 1: return False arn = tg_info['TargetGroups'][0]['TargetGroupArn'] conn.delete_target_group(TargetGroupArn=arn) log.info('Deleted target group %s ARN %s', name, arn) return True except ClientError as error: log.error('Failed to delete target group %s', name, exc_info_on_loglevel=logging.DEBUG) return False
[ "def", "delete_target_group", "(", "name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "not", "target_group_exists", "(", "name", ",", "region", ",", "key", ",", "keyid", ",", "profile", ")", ":", "return", "True", "try", ":", "if", "name", ".", "startswith", "(", "'arn:aws:elasticloadbalancing'", ")", ":", "conn", ".", "delete_target_group", "(", "TargetGroupArn", "=", "name", ")", "log", ".", "info", "(", "'Deleted target group %s'", ",", "name", ")", "else", ":", "tg_info", "=", "conn", ".", "describe_target_groups", "(", "Names", "=", "[", "name", "]", ")", "if", "len", "(", "tg_info", "[", "'TargetGroups'", "]", ")", "!=", "1", ":", "return", "False", "arn", "=", "tg_info", "[", "'TargetGroups'", "]", "[", "0", "]", "[", "'TargetGroupArn'", "]", "conn", ".", "delete_target_group", "(", "TargetGroupArn", "=", "arn", ")", "log", ".", "info", "(", "'Deleted target group %s ARN %s'", ",", "name", ",", "arn", ")", "return", "True", "except", "ClientError", "as", "error", ":", "log", ".", "error", "(", "'Failed to delete target group %s'", ",", "name", ",", "exc_info_on_loglevel", "=", "logging", ".", "DEBUG", ")", "return", "False" ]
Delete target group. name (string) - Target Group Name or Amazon Resource Name (ARN). returns (bool) - True on success, False on failure. CLI example: .. code-block:: bash salt myminion boto_elbv2.delete_target_group arn:aws:elasticloadbalancing:us-west-2:644138682826:targetgroup/learn1give1-api/414788a16b5cf163
[ "Delete", "target", "group", "." ]
python
train
CZ-NIC/yangson
yangson/schemadata.py
https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/schemadata.py#L368-L393
def path2route(path: SchemaPath) -> SchemaRoute: """Translate a schema/data path to a schema/data route. Args: path: Schema path. Raises: InvalidSchemaPath: Invalid path. """ if path == "/" or path == "": return [] nlist = path.split("/") prevns = None res = [] for n in (nlist[1:] if path[0] == "/" else nlist): p, s, loc = n.partition(":") if s: if p == prevns: raise InvalidSchemaPath(path) res.append((loc, p)) prevns = p elif prevns: res.append((p, prevns)) else: raise InvalidSchemaPath(path) return res
[ "def", "path2route", "(", "path", ":", "SchemaPath", ")", "->", "SchemaRoute", ":", "if", "path", "==", "\"/\"", "or", "path", "==", "\"\"", ":", "return", "[", "]", "nlist", "=", "path", ".", "split", "(", "\"/\"", ")", "prevns", "=", "None", "res", "=", "[", "]", "for", "n", "in", "(", "nlist", "[", "1", ":", "]", "if", "path", "[", "0", "]", "==", "\"/\"", "else", "nlist", ")", ":", "p", ",", "s", ",", "loc", "=", "n", ".", "partition", "(", "\":\"", ")", "if", "s", ":", "if", "p", "==", "prevns", ":", "raise", "InvalidSchemaPath", "(", "path", ")", "res", ".", "append", "(", "(", "loc", ",", "p", ")", ")", "prevns", "=", "p", "elif", "prevns", ":", "res", ".", "append", "(", "(", "p", ",", "prevns", ")", ")", "else", ":", "raise", "InvalidSchemaPath", "(", "path", ")", "return", "res" ]
Translate a schema/data path to a schema/data route. Args: path: Schema path. Raises: InvalidSchemaPath: Invalid path.
[ "Translate", "a", "schema", "/", "data", "path", "to", "a", "schema", "/", "data", "route", "." ]
python
train
pazz/urwidtrees
urwidtrees/widgets.py
https://github.com/pazz/urwidtrees/blob/d1fa38ce4f37db00bdfc574b856023b5db4c7ead/urwidtrees/widgets.py#L156-L165
def collapse_focussed(self): """ Collapse currently focussed position; works only if the underlying tree allows it. """ if implementsCollapseAPI(self._tree): w, focuspos = self.get_focus() self._tree.collapse(focuspos) self._walker.clear_cache() self.refresh()
[ "def", "collapse_focussed", "(", "self", ")", ":", "if", "implementsCollapseAPI", "(", "self", ".", "_tree", ")", ":", "w", ",", "focuspos", "=", "self", ".", "get_focus", "(", ")", "self", ".", "_tree", ".", "collapse", "(", "focuspos", ")", "self", ".", "_walker", ".", "clear_cache", "(", ")", "self", ".", "refresh", "(", ")" ]
Collapse currently focussed position; works only if the underlying tree allows it.
[ "Collapse", "currently", "focussed", "position", ";", "works", "only", "if", "the", "underlying", "tree", "allows", "it", "." ]
python
train
KyleWpppd/css-audit
cssaudit/parser.py
https://github.com/KyleWpppd/css-audit/blob/cab4d4204cf30d54bc1881deee6ad92ae6aacc56/cssaudit/parser.py#L213-L236
def extract_leftmost_selector(selector_list): """ Because we aren't building a DOM tree to transverse, the only way to get the most general selectors is to take the leftmost. For example with `div.outer div.inner`, we can't tell if `div.inner` has been used in context without building a tree. """ classes = set() ids = set() elements = set() # print "Selector list: %s \n\n\n\n\n\n" % selector_list for selector in selector_list: selector = selector.split()[0] if selector[0] == '.': classes.add(selector) elif selector[0] == '#': ids.add(selector) else: elements.add(selector) return { 'classes':classes, 'ids':ids, 'elements':elements, }
[ "def", "extract_leftmost_selector", "(", "selector_list", ")", ":", "classes", "=", "set", "(", ")", "ids", "=", "set", "(", ")", "elements", "=", "set", "(", ")", "# print \"Selector list: %s \\n\\n\\n\\n\\n\\n\" % selector_list", "for", "selector", "in", "selector_list", ":", "selector", "=", "selector", ".", "split", "(", ")", "[", "0", "]", "if", "selector", "[", "0", "]", "==", "'.'", ":", "classes", ".", "add", "(", "selector", ")", "elif", "selector", "[", "0", "]", "==", "'#'", ":", "ids", ".", "add", "(", "selector", ")", "else", ":", "elements", ".", "add", "(", "selector", ")", "return", "{", "'classes'", ":", "classes", ",", "'ids'", ":", "ids", ",", "'elements'", ":", "elements", ",", "}" ]
Because we aren't building a DOM tree to transverse, the only way to get the most general selectors is to take the leftmost. For example with `div.outer div.inner`, we can't tell if `div.inner` has been used in context without building a tree.
[ "Because", "we", "aren", "t", "building", "a", "DOM", "tree", "to", "transverse", "the", "only", "way", "to", "get", "the", "most", "general", "selectors", "is", "to", "take", "the", "leftmost", ".", "For", "example", "with", "div", ".", "outer", "div", ".", "inner", "we", "can", "t", "tell", "if", "div", ".", "inner", "has", "been", "used", "in", "context", "without", "building", "a", "tree", "." ]
python
train
iotile/coretools
iotileemulate/iotile/emulate/reference/controller_features/tile_manager.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileemulate/iotile/emulate/reference/controller_features/tile_manager.py#L108-L120
def insert_tile(self, tile_info): """Add or replace an entry in the tile cache. Args: tile_info (TileInfo): The newly registered tile. """ for i, tile in enumerate(self.registered_tiles): if tile.slot == tile_info.slot: self.registered_tiles[i] = tile_info return self.registered_tiles.append(tile_info)
[ "def", "insert_tile", "(", "self", ",", "tile_info", ")", ":", "for", "i", ",", "tile", "in", "enumerate", "(", "self", ".", "registered_tiles", ")", ":", "if", "tile", ".", "slot", "==", "tile_info", ".", "slot", ":", "self", ".", "registered_tiles", "[", "i", "]", "=", "tile_info", "return", "self", ".", "registered_tiles", ".", "append", "(", "tile_info", ")" ]
Add or replace an entry in the tile cache. Args: tile_info (TileInfo): The newly registered tile.
[ "Add", "or", "replace", "an", "entry", "in", "the", "tile", "cache", "." ]
python
train
ProjetPP/PPP-Core
ppp_core/__init__.py
https://github.com/ProjetPP/PPP-Core/blob/49ee5b16325aa7134e2e423cf75e7b2609df96a0/ppp_core/__init__.py#L6-L9
def app(environ, start_response): """Function called by the WSGI server.""" r = HttpRequestHandler(environ, start_response, Router).dispatch() return r
[ "def", "app", "(", "environ", ",", "start_response", ")", ":", "r", "=", "HttpRequestHandler", "(", "environ", ",", "start_response", ",", "Router", ")", ".", "dispatch", "(", ")", "return", "r" ]
Function called by the WSGI server.
[ "Function", "called", "by", "the", "WSGI", "server", "." ]
python
train
influxdata/influxdb-python
influxdb/client.py
https://github.com/influxdata/influxdb-python/blob/d5d12499f3755199d5eedd8b363450f1cf4073bd/influxdb/client.py#L976-L1020
def create_continuous_query(self, name, select, database=None, resample_opts=None): r"""Create a continuous query for a database. :param name: the name of continuous query to create :type name: str :param select: select statement for the continuous query :type select: str :param database: the database for which the continuous query is created. Defaults to current client's database :type database: str :param resample_opts: resample options :type resample_opts: str :Example: :: >> select_clause = 'SELECT mean("value") INTO "cpu_mean" ' \ ... 'FROM "cpu" GROUP BY time(1m)' >> client.create_continuous_query( ... 'cpu_mean', select_clause, 'db_name', 'EVERY 10s FOR 2m' ... ) >> client.get_list_continuous_queries() [ { 'db_name': [ { 'name': 'cpu_mean', 'query': 'CREATE CONTINUOUS QUERY "cpu_mean" ' 'ON "db_name" ' 'RESAMPLE EVERY 10s FOR 2m ' 'BEGIN SELECT mean("value") ' 'INTO "cpu_mean" FROM "cpu" ' 'GROUP BY time(1m) END' } ] } ] """ query_string = ( "CREATE CONTINUOUS QUERY {0} ON {1}{2} BEGIN {3} END" ).format(quote_ident(name), quote_ident(database or self._database), ' RESAMPLE ' + resample_opts if resample_opts else '', select) self.query(query_string)
[ "def", "create_continuous_query", "(", "self", ",", "name", ",", "select", ",", "database", "=", "None", ",", "resample_opts", "=", "None", ")", ":", "query_string", "=", "(", "\"CREATE CONTINUOUS QUERY {0} ON {1}{2} BEGIN {3} END\"", ")", ".", "format", "(", "quote_ident", "(", "name", ")", ",", "quote_ident", "(", "database", "or", "self", ".", "_database", ")", ",", "' RESAMPLE '", "+", "resample_opts", "if", "resample_opts", "else", "''", ",", "select", ")", "self", ".", "query", "(", "query_string", ")" ]
r"""Create a continuous query for a database. :param name: the name of continuous query to create :type name: str :param select: select statement for the continuous query :type select: str :param database: the database for which the continuous query is created. Defaults to current client's database :type database: str :param resample_opts: resample options :type resample_opts: str :Example: :: >> select_clause = 'SELECT mean("value") INTO "cpu_mean" ' \ ... 'FROM "cpu" GROUP BY time(1m)' >> client.create_continuous_query( ... 'cpu_mean', select_clause, 'db_name', 'EVERY 10s FOR 2m' ... ) >> client.get_list_continuous_queries() [ { 'db_name': [ { 'name': 'cpu_mean', 'query': 'CREATE CONTINUOUS QUERY "cpu_mean" ' 'ON "db_name" ' 'RESAMPLE EVERY 10s FOR 2m ' 'BEGIN SELECT mean("value") ' 'INTO "cpu_mean" FROM "cpu" ' 'GROUP BY time(1m) END' } ] } ]
[ "r", "Create", "a", "continuous", "query", "for", "a", "database", "." ]
python
train
BrianHicks/emit
emit/router/core.py
https://github.com/BrianHicks/emit/blob/19a86c2392b136c9e857000798ccaa525aa0ed84/emit/router/core.py#L386-L399
def get_name(self, func): ''' Get the name to reference a function by :param func: function to get the name of :type func: callable ''' if hasattr(func, 'name'): return func.name return '%s.%s' % ( func.__module__, func.__name__ )
[ "def", "get_name", "(", "self", ",", "func", ")", ":", "if", "hasattr", "(", "func", ",", "'name'", ")", ":", "return", "func", ".", "name", "return", "'%s.%s'", "%", "(", "func", ".", "__module__", ",", "func", ".", "__name__", ")" ]
Get the name to reference a function by :param func: function to get the name of :type func: callable
[ "Get", "the", "name", "to", "reference", "a", "function", "by" ]
python
train
earlye/nephele
nephele/AwsProcessor.py
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsProcessor.py#L140-L161
def do_mfa(self, args): """ Enter a 6-digit MFA token. Nephele will execute the appropriate `aws` command line to authenticate that token. mfa -h for more details """ parser = CommandArgumentParser("mfa") parser.add_argument(dest='token',help='MFA token value'); parser.add_argument("-p","--profile",dest='awsProfile',default=AwsConnectionFactory.instance.getProfile(),help='MFA token value'); args = vars(parser.parse_args(args)) token = args['token'] awsProfile = args['awsProfile'] arn = AwsConnectionFactory.instance.load_arn(awsProfile) credentials_command = ["aws","--profile",awsProfile,"--output","json","sts","get-session-token","--serial-number",arn,"--token-code",token] output = run_cmd(credentials_command) # Throws on non-zero exit :yey: credentials = json.loads("\n".join(output.stdout))['Credentials'] AwsConnectionFactory.instance.setMfaCredentials(credentials,awsProfile)
[ "def", "do_mfa", "(", "self", ",", "args", ")", ":", "parser", "=", "CommandArgumentParser", "(", "\"mfa\"", ")", "parser", ".", "add_argument", "(", "dest", "=", "'token'", ",", "help", "=", "'MFA token value'", ")", "parser", ".", "add_argument", "(", "\"-p\"", ",", "\"--profile\"", ",", "dest", "=", "'awsProfile'", ",", "default", "=", "AwsConnectionFactory", ".", "instance", ".", "getProfile", "(", ")", ",", "help", "=", "'MFA token value'", ")", "args", "=", "vars", "(", "parser", ".", "parse_args", "(", "args", ")", ")", "token", "=", "args", "[", "'token'", "]", "awsProfile", "=", "args", "[", "'awsProfile'", "]", "arn", "=", "AwsConnectionFactory", ".", "instance", ".", "load_arn", "(", "awsProfile", ")", "credentials_command", "=", "[", "\"aws\"", ",", "\"--profile\"", ",", "awsProfile", ",", "\"--output\"", ",", "\"json\"", ",", "\"sts\"", ",", "\"get-session-token\"", ",", "\"--serial-number\"", ",", "arn", ",", "\"--token-code\"", ",", "token", "]", "output", "=", "run_cmd", "(", "credentials_command", ")", "# Throws on non-zero exit :yey:", "credentials", "=", "json", ".", "loads", "(", "\"\\n\"", ".", "join", "(", "output", ".", "stdout", ")", ")", "[", "'Credentials'", "]", "AwsConnectionFactory", ".", "instance", ".", "setMfaCredentials", "(", "credentials", ",", "awsProfile", ")" ]
Enter a 6-digit MFA token. Nephele will execute the appropriate `aws` command line to authenticate that token. mfa -h for more details
[ "Enter", "a", "6", "-", "digit", "MFA", "token", ".", "Nephele", "will", "execute", "the", "appropriate", "aws", "command", "line", "to", "authenticate", "that", "token", "." ]
python
train
tjcsl/ion
intranet/apps/users/models.py
https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/users/models.py#L113-L133
def get_teachers_sorted(self): """Get teachers sorted by last name. This is used for the announcement request page. """ teachers = self.get_teachers() teachers = [(u.last_name, u.first_name, u.id) for u in teachers] for t in teachers: if t is None or t[0] is None or t[1] is None or t[2] is None: teachers.remove(t) for t in teachers: if t[0] is None or len(t[0]) <= 1: teachers.remove(t) teachers.sort(key=lambda u: (u[0], u[1])) # Hack to return QuerySet in given order id_list = [t[2] for t in teachers] clauses = ' '.join(['WHEN id=%s THEN %s' % (pk, i) for i, pk in enumerate(id_list)]) ordering = 'CASE %s END' % clauses queryset = User.objects.filter(id__in=id_list).extra(select={'ordering': ordering}, order_by=('ordering',)) return queryset
[ "def", "get_teachers_sorted", "(", "self", ")", ":", "teachers", "=", "self", ".", "get_teachers", "(", ")", "teachers", "=", "[", "(", "u", ".", "last_name", ",", "u", ".", "first_name", ",", "u", ".", "id", ")", "for", "u", "in", "teachers", "]", "for", "t", "in", "teachers", ":", "if", "t", "is", "None", "or", "t", "[", "0", "]", "is", "None", "or", "t", "[", "1", "]", "is", "None", "or", "t", "[", "2", "]", "is", "None", ":", "teachers", ".", "remove", "(", "t", ")", "for", "t", "in", "teachers", ":", "if", "t", "[", "0", "]", "is", "None", "or", "len", "(", "t", "[", "0", "]", ")", "<=", "1", ":", "teachers", ".", "remove", "(", "t", ")", "teachers", ".", "sort", "(", "key", "=", "lambda", "u", ":", "(", "u", "[", "0", "]", ",", "u", "[", "1", "]", ")", ")", "# Hack to return QuerySet in given order", "id_list", "=", "[", "t", "[", "2", "]", "for", "t", "in", "teachers", "]", "clauses", "=", "' '", ".", "join", "(", "[", "'WHEN id=%s THEN %s'", "%", "(", "pk", ",", "i", ")", "for", "i", ",", "pk", "in", "enumerate", "(", "id_list", ")", "]", ")", "ordering", "=", "'CASE %s END'", "%", "clauses", "queryset", "=", "User", ".", "objects", ".", "filter", "(", "id__in", "=", "id_list", ")", ".", "extra", "(", "select", "=", "{", "'ordering'", ":", "ordering", "}", ",", "order_by", "=", "(", "'ordering'", ",", ")", ")", "return", "queryset" ]
Get teachers sorted by last name. This is used for the announcement request page.
[ "Get", "teachers", "sorted", "by", "last", "name", "." ]
python
train
saltstack/salt
salt/modules/tuned.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/tuned.py#L94-L109
def profile(profile_name): ''' Activate specified profile CLI Example: .. code-block:: bash salt '*' tuned.profile virtual-guest ''' # run tuned-adm with the profile specified result = __salt__['cmd.retcode']('tuned-adm profile {0}'.format(profile_name)) if int(result) != 0: return False return '{0}'.format(profile_name)
[ "def", "profile", "(", "profile_name", ")", ":", "# run tuned-adm with the profile specified", "result", "=", "__salt__", "[", "'cmd.retcode'", "]", "(", "'tuned-adm profile {0}'", ".", "format", "(", "profile_name", ")", ")", "if", "int", "(", "result", ")", "!=", "0", ":", "return", "False", "return", "'{0}'", ".", "format", "(", "profile_name", ")" ]
Activate specified profile CLI Example: .. code-block:: bash salt '*' tuned.profile virtual-guest
[ "Activate", "specified", "profile" ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/discovery_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/discovery_v1.py#L8126-L8135
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'sentiment') and self.sentiment is not None: _dict['sentiment'] = self.sentiment if hasattr(self, 'emotion') and self.emotion is not None: _dict['emotion'] = self.emotion if hasattr(self, 'limit') and self.limit is not None: _dict['limit'] = self.limit return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'sentiment'", ")", "and", "self", ".", "sentiment", "is", "not", "None", ":", "_dict", "[", "'sentiment'", "]", "=", "self", ".", "sentiment", "if", "hasattr", "(", "self", ",", "'emotion'", ")", "and", "self", ".", "emotion", "is", "not", "None", ":", "_dict", "[", "'emotion'", "]", "=", "self", ".", "emotion", "if", "hasattr", "(", "self", ",", "'limit'", ")", "and", "self", ".", "limit", "is", "not", "None", ":", "_dict", "[", "'limit'", "]", "=", "self", ".", "limit", "return", "_dict" ]
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
worstcase/blockade
blockade/net.py
https://github.com/worstcase/blockade/blob/3dc6ad803f0b0d56586dec9542a6a06aa06cf569/blockade/net.py#L235-L251
def insert_rule(self, chain, src=None, dest=None, target=None): """Insert a new rule in the chain """ if not chain: raise ValueError("Invalid chain") if not target: raise ValueError("Invalid target") if not (src or dest): raise ValueError("Need src, dest, or both") args = ["-I", chain] if src: args += ["-s", src] if dest: args += ["-d", dest] args += ["-j", target] self.call(*args)
[ "def", "insert_rule", "(", "self", ",", "chain", ",", "src", "=", "None", ",", "dest", "=", "None", ",", "target", "=", "None", ")", ":", "if", "not", "chain", ":", "raise", "ValueError", "(", "\"Invalid chain\"", ")", "if", "not", "target", ":", "raise", "ValueError", "(", "\"Invalid target\"", ")", "if", "not", "(", "src", "or", "dest", ")", ":", "raise", "ValueError", "(", "\"Need src, dest, or both\"", ")", "args", "=", "[", "\"-I\"", ",", "chain", "]", "if", "src", ":", "args", "+=", "[", "\"-s\"", ",", "src", "]", "if", "dest", ":", "args", "+=", "[", "\"-d\"", ",", "dest", "]", "args", "+=", "[", "\"-j\"", ",", "target", "]", "self", ".", "call", "(", "*", "args", ")" ]
Insert a new rule in the chain
[ "Insert", "a", "new", "rule", "in", "the", "chain" ]
python
valid
JensAstrup/pyOutlook
pyOutlook/core/message.py
https://github.com/JensAstrup/pyOutlook/blob/f4ca9d4a8629c0a41f78102ce84fab702a841167/pyOutlook/core/message.py#L343-L356
def reply(self, reply_comment): """Reply to the Message. Notes: HTML can be inserted in the string and will be interpreted properly by Outlook. Args: reply_comment: String message to send with email. """ payload = '{ "Comment": "' + reply_comment + '"}' endpoint = 'https://outlook.office.com/api/v2.0/me/messages/' + self.message_id + '/reply' self._make_api_call('post', endpoint, data=payload)
[ "def", "reply", "(", "self", ",", "reply_comment", ")", ":", "payload", "=", "'{ \"Comment\": \"'", "+", "reply_comment", "+", "'\"}'", "endpoint", "=", "'https://outlook.office.com/api/v2.0/me/messages/'", "+", "self", ".", "message_id", "+", "'/reply'", "self", ".", "_make_api_call", "(", "'post'", ",", "endpoint", ",", "data", "=", "payload", ")" ]
Reply to the Message. Notes: HTML can be inserted in the string and will be interpreted properly by Outlook. Args: reply_comment: String message to send with email.
[ "Reply", "to", "the", "Message", "." ]
python
train
gdestuynder/simple_bugzilla
bugzilla.py
https://github.com/gdestuynder/simple_bugzilla/blob/c69766a81fa7960a8f2b22287968fa4787f1bcfe/bugzilla.py#L98-L108
def post_bug(self, bug): '''http://bugzilla.readthedocs.org/en/latest/api/core/v1/bug.html#create-bug''' assert type(bug) is DotDict assert 'product' in bug assert 'component' in bug assert 'summary' in bug if (not 'version' in bug): bug.version = 'other' if (not 'op_sys' in bug): bug.op_sys = 'All' if (not 'platform' in bug): bug.platform = 'All' return self._post('bug', json.dumps(bug))
[ "def", "post_bug", "(", "self", ",", "bug", ")", ":", "assert", "type", "(", "bug", ")", "is", "DotDict", "assert", "'product'", "in", "bug", "assert", "'component'", "in", "bug", "assert", "'summary'", "in", "bug", "if", "(", "not", "'version'", "in", "bug", ")", ":", "bug", ".", "version", "=", "'other'", "if", "(", "not", "'op_sys'", "in", "bug", ")", ":", "bug", ".", "op_sys", "=", "'All'", "if", "(", "not", "'platform'", "in", "bug", ")", ":", "bug", ".", "platform", "=", "'All'", "return", "self", ".", "_post", "(", "'bug'", ",", "json", ".", "dumps", "(", "bug", ")", ")" ]
http://bugzilla.readthedocs.org/en/latest/api/core/v1/bug.html#create-bug
[ "http", ":", "//", "bugzilla", ".", "readthedocs", ".", "org", "/", "en", "/", "latest", "/", "api", "/", "core", "/", "v1", "/", "bug", ".", "html#create", "-", "bug" ]
python
train
mattimck/python-exist
exist/auth.py
https://github.com/mattimck/python-exist/blob/2c4be9d176d8e8007c4e020ee7cd6263a2096abb/exist/auth.py#L75-L82
def authorize_url(self): """ Build the authorization url and save the state. Return the authorization url """ url, self.state = self.oauth.authorization_url( '%sauthorize' % OAUTH_URL) return url
[ "def", "authorize_url", "(", "self", ")", ":", "url", ",", "self", ".", "state", "=", "self", ".", "oauth", ".", "authorization_url", "(", "'%sauthorize'", "%", "OAUTH_URL", ")", "return", "url" ]
Build the authorization url and save the state. Return the authorization url
[ "Build", "the", "authorization", "url", "and", "save", "the", "state", ".", "Return", "the", "authorization", "url" ]
python
train
hazelcast/hazelcast-python-client
hazelcast/serialization/base.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/serialization/base.py#L169-L180
def serializer_by_type_id(self, type_id): """ Find and return the serializer for the type-id :param type_id: type-id the serializer :return: the serializer """ if type_id <= 0: indx = index_for_default_type(type_id) serializer = self._constant_type_ids.get(indx, None) if serializer is not None: return serializer return self._id_dic.get(type_id, None)
[ "def", "serializer_by_type_id", "(", "self", ",", "type_id", ")", ":", "if", "type_id", "<=", "0", ":", "indx", "=", "index_for_default_type", "(", "type_id", ")", "serializer", "=", "self", ".", "_constant_type_ids", ".", "get", "(", "indx", ",", "None", ")", "if", "serializer", "is", "not", "None", ":", "return", "serializer", "return", "self", ".", "_id_dic", ".", "get", "(", "type_id", ",", "None", ")" ]
Find and return the serializer for the type-id :param type_id: type-id the serializer :return: the serializer
[ "Find", "and", "return", "the", "serializer", "for", "the", "type", "-", "id", ":", "param", "type_id", ":", "type", "-", "id", "the", "serializer", ":", "return", ":", "the", "serializer" ]
python
train
santoshphilip/eppy
eppy/modeleditor.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L321-L333
def equalfield(bunchdt, data, commdct, idfobj1, idfobj2, fieldname, places=7): """returns true if the two fields are equal will test for retaincase places is used if the field is float/real""" # TODO test if both objects are of same type key1 = idfobj1.obj[0].upper() key2 = idfobj2.obj[0].upper() if key1 != key2: raise NotSameObjectError vee2 = idfobj2[fieldname] return isfieldvalue( bunchdt, data, commdct, idfobj1, fieldname, vee2, places=places)
[ "def", "equalfield", "(", "bunchdt", ",", "data", ",", "commdct", ",", "idfobj1", ",", "idfobj2", ",", "fieldname", ",", "places", "=", "7", ")", ":", "# TODO test if both objects are of same type", "key1", "=", "idfobj1", ".", "obj", "[", "0", "]", ".", "upper", "(", ")", "key2", "=", "idfobj2", ".", "obj", "[", "0", "]", ".", "upper", "(", ")", "if", "key1", "!=", "key2", ":", "raise", "NotSameObjectError", "vee2", "=", "idfobj2", "[", "fieldname", "]", "return", "isfieldvalue", "(", "bunchdt", ",", "data", ",", "commdct", ",", "idfobj1", ",", "fieldname", ",", "vee2", ",", "places", "=", "places", ")" ]
returns true if the two fields are equal will test for retaincase places is used if the field is float/real
[ "returns", "true", "if", "the", "two", "fields", "are", "equal", "will", "test", "for", "retaincase", "places", "is", "used", "if", "the", "field", "is", "float", "/", "real" ]
python
train
roclark/sportsreference
sportsreference/nhl/roster.py
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nhl/roster.py#L548-L556
def team_abbreviation(self): """ Returns a ``string`` of the team's abbreviation, such as 'DET' for the Detroit Red Wings. """ # For career stats, skip the team abbreviation. if self._season[self._index].lower() == 'career': return None return self._team_abbreviation[self._index]
[ "def", "team_abbreviation", "(", "self", ")", ":", "# For career stats, skip the team abbreviation.", "if", "self", ".", "_season", "[", "self", ".", "_index", "]", ".", "lower", "(", ")", "==", "'career'", ":", "return", "None", "return", "self", ".", "_team_abbreviation", "[", "self", ".", "_index", "]" ]
Returns a ``string`` of the team's abbreviation, such as 'DET' for the Detroit Red Wings.
[ "Returns", "a", "string", "of", "the", "team", "s", "abbreviation", "such", "as", "DET", "for", "the", "Detroit", "Red", "Wings", "." ]
python
train
Robpol86/flake8-pydocstyle
flake8_pydocstyle.py
https://github.com/Robpol86/flake8-pydocstyle/blob/657425541e1d868a6a5241a83c3a16a9a715d6b5/flake8_pydocstyle.py#L87-L112
def parse_options(cls, options): """Read parsed options from flake8. :param options: Options to add to flake8's command line options. """ # Handle flake8 options. cls.options['explain'] = bool(options.show_pydocstyle) cls.options['ignore'] = options.ignore # Handle pydocstyle options. config = pydocstyle.RawConfigParser() for file_name in pydocstyle.ConfigurationParser.PROJECT_CONFIG_FILES: if config.read(os.path.join(os.path.abspath('.'), file_name)): break if not config.has_section('pydocstyle'): return native_options = dict() for option in config.options('pydocstyle'): if option == 'ignore': native_options['ignore'] = config.get('pydocstyle', option) if option in ('explain', 'source'): native_options[option] = config.getboolean('pydocstyle', option) native_options['show-source'] = native_options.pop('source', None) if native_options.get('ignore'): native_options['ignore'] = native_options['ignore'].split(',') cls.options.update(dict((k, v) for k, v in native_options.items() if v))
[ "def", "parse_options", "(", "cls", ",", "options", ")", ":", "# Handle flake8 options.", "cls", ".", "options", "[", "'explain'", "]", "=", "bool", "(", "options", ".", "show_pydocstyle", ")", "cls", ".", "options", "[", "'ignore'", "]", "=", "options", ".", "ignore", "# Handle pydocstyle options.", "config", "=", "pydocstyle", ".", "RawConfigParser", "(", ")", "for", "file_name", "in", "pydocstyle", ".", "ConfigurationParser", ".", "PROJECT_CONFIG_FILES", ":", "if", "config", ".", "read", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "abspath", "(", "'.'", ")", ",", "file_name", ")", ")", ":", "break", "if", "not", "config", ".", "has_section", "(", "'pydocstyle'", ")", ":", "return", "native_options", "=", "dict", "(", ")", "for", "option", "in", "config", ".", "options", "(", "'pydocstyle'", ")", ":", "if", "option", "==", "'ignore'", ":", "native_options", "[", "'ignore'", "]", "=", "config", ".", "get", "(", "'pydocstyle'", ",", "option", ")", "if", "option", "in", "(", "'explain'", ",", "'source'", ")", ":", "native_options", "[", "option", "]", "=", "config", ".", "getboolean", "(", "'pydocstyle'", ",", "option", ")", "native_options", "[", "'show-source'", "]", "=", "native_options", ".", "pop", "(", "'source'", ",", "None", ")", "if", "native_options", ".", "get", "(", "'ignore'", ")", ":", "native_options", "[", "'ignore'", "]", "=", "native_options", "[", "'ignore'", "]", ".", "split", "(", "','", ")", "cls", ".", "options", ".", "update", "(", "dict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "native_options", ".", "items", "(", ")", "if", "v", ")", ")" ]
Read parsed options from flake8. :param options: Options to add to flake8's command line options.
[ "Read", "parsed", "options", "from", "flake8", "." ]
python
train
juju/python-libjuju
juju/client/connection.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/client/connection.py#L250-L278
async def _pinger(self): ''' A Controller can time us out if we are silent for too long. This is especially true in JaaS, which has a fairly strict timeout. To prevent timing out, we send a ping every ten seconds. ''' async def _do_ping(): try: await pinger_facade.Ping() await asyncio.sleep(10, loop=self.loop) except CancelledError: pass pinger_facade = client.PingerFacade.from_connection(self) try: while True: await utils.run_with_interrupt( _do_ping(), self.monitor.close_called, loop=self.loop) if self.monitor.close_called.is_set(): break except websockets.exceptions.ConnectionClosed: # The connection has closed - we can't do anything # more until the connection is restarted. log.debug('ping failed because of closed connection') pass
[ "async", "def", "_pinger", "(", "self", ")", ":", "async", "def", "_do_ping", "(", ")", ":", "try", ":", "await", "pinger_facade", ".", "Ping", "(", ")", "await", "asyncio", ".", "sleep", "(", "10", ",", "loop", "=", "self", ".", "loop", ")", "except", "CancelledError", ":", "pass", "pinger_facade", "=", "client", ".", "PingerFacade", ".", "from_connection", "(", "self", ")", "try", ":", "while", "True", ":", "await", "utils", ".", "run_with_interrupt", "(", "_do_ping", "(", ")", ",", "self", ".", "monitor", ".", "close_called", ",", "loop", "=", "self", ".", "loop", ")", "if", "self", ".", "monitor", ".", "close_called", ".", "is_set", "(", ")", ":", "break", "except", "websockets", ".", "exceptions", ".", "ConnectionClosed", ":", "# The connection has closed - we can't do anything", "# more until the connection is restarted.", "log", ".", "debug", "(", "'ping failed because of closed connection'", ")", "pass" ]
A Controller can time us out if we are silent for too long. This is especially true in JaaS, which has a fairly strict timeout. To prevent timing out, we send a ping every ten seconds.
[ "A", "Controller", "can", "time", "us", "out", "if", "we", "are", "silent", "for", "too", "long", ".", "This", "is", "especially", "true", "in", "JaaS", "which", "has", "a", "fairly", "strict", "timeout", "." ]
python
train
payu-org/payu
payu/laboratory.py
https://github.com/payu-org/payu/blob/1442a9a226012eff248b8097cc1eaabc3e224867/payu/laboratory.py#L51-L69
def get_default_lab_path(self, config): """Generate a default laboratory path based on user environment.""" # Default path settings # Append project name if present (NCI-specific) default_project = os.environ.get('PROJECT', '') default_short_path = os.path.join('/short', default_project) default_user = pwd.getpwuid(os.getuid()).pw_name short_path = config.get('shortpath', default_short_path) lab_name = config.get('laboratory', self.model_type) if os.path.isabs(lab_name): lab_path = lab_name else: user_name = config.get('user', default_user) lab_path = os.path.join(short_path, user_name, lab_name) return lab_path
[ "def", "get_default_lab_path", "(", "self", ",", "config", ")", ":", "# Default path settings", "# Append project name if present (NCI-specific)", "default_project", "=", "os", ".", "environ", ".", "get", "(", "'PROJECT'", ",", "''", ")", "default_short_path", "=", "os", ".", "path", ".", "join", "(", "'/short'", ",", "default_project", ")", "default_user", "=", "pwd", ".", "getpwuid", "(", "os", ".", "getuid", "(", ")", ")", ".", "pw_name", "short_path", "=", "config", ".", "get", "(", "'shortpath'", ",", "default_short_path", ")", "lab_name", "=", "config", ".", "get", "(", "'laboratory'", ",", "self", ".", "model_type", ")", "if", "os", ".", "path", ".", "isabs", "(", "lab_name", ")", ":", "lab_path", "=", "lab_name", "else", ":", "user_name", "=", "config", ".", "get", "(", "'user'", ",", "default_user", ")", "lab_path", "=", "os", ".", "path", ".", "join", "(", "short_path", ",", "user_name", ",", "lab_name", ")", "return", "lab_path" ]
Generate a default laboratory path based on user environment.
[ "Generate", "a", "default", "laboratory", "path", "based", "on", "user", "environment", "." ]
python
train
pypa/pipenv
pipenv/vendor/requirementslib/models/utils.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requirementslib/models/utils.py#L752-L788
def make_install_requirement(name, version, extras, markers, constraint=False): """ Generates an :class:`~pip._internal.req.req_install.InstallRequirement`. Create an InstallRequirement from the supplied metadata. :param name: The requirement's name. :type name: str :param version: The requirement version (must be pinned). :type version: str. :param extras: The desired extras. :type extras: list[str] :param markers: The desired markers, without a preceding semicolon. :type markers: str :param constraint: Whether to flag the requirement as a constraint, defaults to False. :param constraint: bool, optional :return: A generated InstallRequirement :rtype: :class:`~pip._internal.req.req_install.InstallRequirement` """ # If no extras are specified, the extras string is blank from pip_shims.shims import install_req_from_line extras_string = "" if extras: # Sort extras for stability extras_string = "[{}]".format(",".join(sorted(extras))) if not markers: return install_req_from_line( str("{}{}=={}".format(name, extras_string, version)), constraint=constraint ) else: return install_req_from_line( str("{}{}=={}; {}".format(name, extras_string, version, str(markers))), constraint=constraint, )
[ "def", "make_install_requirement", "(", "name", ",", "version", ",", "extras", ",", "markers", ",", "constraint", "=", "False", ")", ":", "# If no extras are specified, the extras string is blank", "from", "pip_shims", ".", "shims", "import", "install_req_from_line", "extras_string", "=", "\"\"", "if", "extras", ":", "# Sort extras for stability", "extras_string", "=", "\"[{}]\"", ".", "format", "(", "\",\"", ".", "join", "(", "sorted", "(", "extras", ")", ")", ")", "if", "not", "markers", ":", "return", "install_req_from_line", "(", "str", "(", "\"{}{}=={}\"", ".", "format", "(", "name", ",", "extras_string", ",", "version", ")", ")", ",", "constraint", "=", "constraint", ")", "else", ":", "return", "install_req_from_line", "(", "str", "(", "\"{}{}=={}; {}\"", ".", "format", "(", "name", ",", "extras_string", ",", "version", ",", "str", "(", "markers", ")", ")", ")", ",", "constraint", "=", "constraint", ",", ")" ]
Generates an :class:`~pip._internal.req.req_install.InstallRequirement`. Create an InstallRequirement from the supplied metadata. :param name: The requirement's name. :type name: str :param version: The requirement version (must be pinned). :type version: str. :param extras: The desired extras. :type extras: list[str] :param markers: The desired markers, without a preceding semicolon. :type markers: str :param constraint: Whether to flag the requirement as a constraint, defaults to False. :param constraint: bool, optional :return: A generated InstallRequirement :rtype: :class:`~pip._internal.req.req_install.InstallRequirement`
[ "Generates", "an", ":", "class", ":", "~pip", ".", "_internal", ".", "req", ".", "req_install", ".", "InstallRequirement", "." ]
python
train
intel-analytics/BigDL
pyspark/bigdl/transform/vision/image.py
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/transform/vision/image.py#L87-L92
def get_label(self): """ get label as ndarray from ImageFeature """ label = callBigDlFunc(self.bigdl_type, "imageFeatureToLabelTensor", self.value) return label.to_ndarray()
[ "def", "get_label", "(", "self", ")", ":", "label", "=", "callBigDlFunc", "(", "self", ".", "bigdl_type", ",", "\"imageFeatureToLabelTensor\"", ",", "self", ".", "value", ")", "return", "label", ".", "to_ndarray", "(", ")" ]
get label as ndarray from ImageFeature
[ "get", "label", "as", "ndarray", "from", "ImageFeature" ]
python
test
pip-services3-python/pip-services3-commons-python
pip_services3_commons/convert/BooleanConverter.py
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/convert/BooleanConverter.py#L63-L74
def to_boolean_with_default(value, default_value): """ Converts value into boolean or returns default value when conversion is not possible :param value: the value to convert. :param default_value: the default value :return: boolean value or default when conversion is not supported. """ result = BooleanConverter.to_nullable_boolean(value) return result if result != None else default_value
[ "def", "to_boolean_with_default", "(", "value", ",", "default_value", ")", ":", "result", "=", "BooleanConverter", ".", "to_nullable_boolean", "(", "value", ")", "return", "result", "if", "result", "!=", "None", "else", "default_value" ]
Converts value into boolean or returns default value when conversion is not possible :param value: the value to convert. :param default_value: the default value :return: boolean value or default when conversion is not supported.
[ "Converts", "value", "into", "boolean", "or", "returns", "default", "value", "when", "conversion", "is", "not", "possible" ]
python
train
michael-lazar/rtv
rtv/packages/praw/__init__.py
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/__init__.py#L2380-L2395
def rename_multireddit(self, current_name, new_name, *args, **kwargs): """Rename a Multireddit. :param current_name: The name of the multireddit to rename :param new_name: The new name to assign to this multireddit The additional parameters are passed directly into :meth:`~praw.__init__.BaseReddit.request_json` """ current_path = self.MULTI_PATH.format(self.user.name, current_name) new_path = self.MULTI_PATH.format(self.user.name, new_name) data = {'from': current_path, 'to': new_path} return self.request_json(self.config['multireddit_rename'], data=data, *args, **kwargs)
[ "def", "rename_multireddit", "(", "self", ",", "current_name", ",", "new_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "current_path", "=", "self", ".", "MULTI_PATH", ".", "format", "(", "self", ".", "user", ".", "name", ",", "current_name", ")", "new_path", "=", "self", ".", "MULTI_PATH", ".", "format", "(", "self", ".", "user", ".", "name", ",", "new_name", ")", "data", "=", "{", "'from'", ":", "current_path", ",", "'to'", ":", "new_path", "}", "return", "self", ".", "request_json", "(", "self", ".", "config", "[", "'multireddit_rename'", "]", ",", "data", "=", "data", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Rename a Multireddit. :param current_name: The name of the multireddit to rename :param new_name: The new name to assign to this multireddit The additional parameters are passed directly into :meth:`~praw.__init__.BaseReddit.request_json`
[ "Rename", "a", "Multireddit", "." ]
python
train
clalancette/pycdlib
pycdlib/pycdlib.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L5217-L5385
def add_symlink(self, symlink_path, rr_symlink_name=None, rr_path=None, joliet_path=None, udf_symlink_path=None, udf_target=None): # type: (str, Optional[str], Optional[str], Optional[str], Optional[str], Optional[str]) -> None ''' Add a symlink from rr_symlink_name to the rr_path. The ISO must have either Rock Ridge or UDF support (or both). Parameters: symlink_path - The ISO9660 name of the symlink itself on the ISO. rr_symlink_name - The Rock Ridge name of the symlink itself on the ISO. rr_path - The Rock Ridge name of the entry on the ISO that the symlink points to. joliet_path - The Joliet name of the symlink (if this ISO has Joliet). udf_symlink_path - The UDF path of the symlink itself on the ISO. udf_target - The UDF name of the entry on the ISO that the symlink points to. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') # There are actually quite a few combinations and rules to think about # here. Rules: # # 1. All symlinks must have an ISO9660 name. # 2. If the ISO is Rock Ridge, it must have a Rock Ridge name for the # ISO9660 Directory Record (rr_symlink_name). # 3. Conversely, rr_symlink_name must not be provided for a # non-Rock Ridge ISO. # 4. rr_path is the optional target for the symlink; if it is provided, # then the ISO must be a Rock Ridge one. # 5. udf_symlink_path is the optional UDF name for the symlink; if it # is provided, then this must be a UDF ISO and udf_target must also # be provided. # 6. Conversely, if this is a non-UDF ISO, udf_symlink_path must not # be provided. # 7. udf_target is the optional UDF target for the symlink; if it is # provided, then this must be a UDF ISO and udf_symlink_path must # also be provided. # 8. Conversely, if this is a non-UDF ISO, udf_target must not be # provided. # 9. joliet_path is the optional path on the Joliet filesystem; if it # is provided, the ISO must be a Joliet one. # 10. Conversely, if this is a non-Joliet ISO, joliet_path must not be # provided. # 11. At least one of rr_path and the pair of # udf_symlink_path, udf_target must be provided. if self.rock_ridge: # Rule 2 if rr_symlink_name is None: raise pycdlibexception.PyCdlibInvalidInput('A Rock Ridge name must be passed for a Rock Ridge ISO') else: # Rule 3 if rr_symlink_name is not None: raise pycdlibexception.PyCdlibInvalidInput('A Rock Ridge name can only be passed for a Rock Ridge ISO') if rr_path is not None: # Rule 4 if not self.rock_ridge: raise pycdlibexception.PyCdlibInvalidInput('Can only add a symlink to a Rock Ridge or UDF ISO') if udf_symlink_path is not None: # Rule 5/6/7/8 if not self._has_udf: raise pycdlibexception.PyCdlibInvalidInput('Can only add a UDF symlink to a UDF ISO') if udf_target is None: raise pycdlibexception.PyCdlibInvalidInput('A udf_target must be supplied along with a udf_symlink_path') if joliet_path is not None: # Rule 9/10 if self.joliet_vd is None: raise pycdlibexception.PyCdlibInvalidInput('A Joliet path can only be specified for a Joliet ISO') if rr_path is None and udf_symlink_path is None: # Rule 11 raise pycdlibexception.PyCdlibInvalidInput('At least one of a Rock Ridge or a UDF target must be specified') symlink_path_bytes = utils.normpath(symlink_path) (name, parent) = self._iso_name_and_parent_from_path(symlink_path_bytes) log_block_size = self.pvd.logical_block_size() # The ISO9660 directory record; this will be added in all cases. rec = dr.DirectoryRecord() num_bytes_to_add = 0 if rr_path is not None and rr_symlink_name is not None: # We specifically do *not* normalize rr_path here, since that # potentially changes the meaning of what the user wanted. rr_symlink_name_bytes = rr_symlink_name.encode('utf-8') rec.new_symlink(self.pvd, name, parent, rr_path.encode('utf-8'), self.pvd.sequence_number(), self.rock_ridge, rr_symlink_name_bytes, self.xa) num_bytes_to_add += self._add_child_to_dr(rec, log_block_size) num_bytes_to_add += self._update_rr_ce_entry(rec) if udf_symlink_path is not None and udf_target is not None: # If we aren't making a Rock Ridge symlink at the same time, we need # to add a new zero-byte file to the ISO. if rr_path is None: rrname = name.decode('utf-8') if not self.rock_ridge: rrname = '' tmp_joliet_path = joliet_path if tmp_joliet_path is None: tmp_joliet_path = '' num_bytes_to_add += self._add_fp(None, 0, False, symlink_path, rrname, tmp_joliet_path, '', None, False) udf_symlink_path_bytes = utils.normpath(udf_symlink_path) # We specifically do *not* normalize udf_target here, since that # potentially changes the meaning of what the user wanted. (udf_name, udf_parent) = self._udf_name_and_parent_from_path(udf_symlink_path_bytes) file_ident = udfmod.UDFFileIdentifierDescriptor() file_ident.new(False, False, udf_name, udf_parent) num_new_extents = udf_parent.add_file_ident_desc(file_ident, log_block_size) num_bytes_to_add += num_new_extents * log_block_size # Generate the bytearry representing the symlink symlink_bytearray = udfmod.symlink_to_bytes(udf_target) # The UDF File Entry file_entry = udfmod.UDFFileEntry() file_entry.new(len(symlink_bytearray), 'symlink', udf_parent, log_block_size) file_ident.file_entry = file_entry file_entry.file_ident = file_ident num_bytes_to_add += log_block_size num_bytes_to_add += file_entry.info_len # The inode for the symlink array. ino = inode.Inode() ino.new(len(symlink_bytearray), BytesIO(symlink_bytearray), False, 0) ino.linked_records.append(file_entry) ino.num_udf += 1 file_entry.inode = ino self.inodes.append(ino) self.udf_logical_volume_integrity.logical_volume_impl_use.num_files += 1 # Note that we explicitly do *not* link this record to the ISO9660 # record; that's because there is no way to correlate them during # parse time. Instead, we treat them as individual entries, which # has the knock-on effect of requiring two operations to remove; # rm_file() to remove the ISO9660 record, and rm_hard_link() to # remove the UDF record. if joliet_path is not None: if self.joliet_vd is None: raise pycdlibexception.PyCdlibInternalError('Tried to add a Joliet path to a non-Joliet ISO') joliet_path_bytes = self._normalize_joliet_path(joliet_path) (joliet_name, joliet_parent) = self._joliet_name_and_parent_from_path(joliet_path_bytes) # Add in a "fake" symlink entry for Joliet. joliet_rec = dr.DirectoryRecord() joliet_rec.new_file(self.joliet_vd, 0, joliet_name, joliet_parent, self.joliet_vd.sequence_number(), '', b'', self.xa, -1) num_bytes_to_add += self._add_child_to_dr(joliet_rec, self.joliet_vd.logical_block_size()) self._finish_add(0, num_bytes_to_add)
[ "def", "add_symlink", "(", "self", ",", "symlink_path", ",", "rr_symlink_name", "=", "None", ",", "rr_path", "=", "None", ",", "joliet_path", "=", "None", ",", "udf_symlink_path", "=", "None", ",", "udf_target", "=", "None", ")", ":", "# type: (str, Optional[str], Optional[str], Optional[str], Optional[str], Optional[str]) -> None", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'This object is not yet initialized; call either open() or new() to create an ISO'", ")", "# There are actually quite a few combinations and rules to think about", "# here. Rules:", "#", "# 1. All symlinks must have an ISO9660 name.", "# 2. If the ISO is Rock Ridge, it must have a Rock Ridge name for the", "# ISO9660 Directory Record (rr_symlink_name).", "# 3. Conversely, rr_symlink_name must not be provided for a", "# non-Rock Ridge ISO.", "# 4. rr_path is the optional target for the symlink; if it is provided,", "# then the ISO must be a Rock Ridge one.", "# 5. udf_symlink_path is the optional UDF name for the symlink; if it", "# is provided, then this must be a UDF ISO and udf_target must also", "# be provided.", "# 6. Conversely, if this is a non-UDF ISO, udf_symlink_path must not", "# be provided.", "# 7. udf_target is the optional UDF target for the symlink; if it is", "# provided, then this must be a UDF ISO and udf_symlink_path must", "# also be provided.", "# 8. Conversely, if this is a non-UDF ISO, udf_target must not be", "# provided.", "# 9. joliet_path is the optional path on the Joliet filesystem; if it", "# is provided, the ISO must be a Joliet one.", "# 10. Conversely, if this is a non-Joliet ISO, joliet_path must not be", "# provided.", "# 11. At least one of rr_path and the pair of", "# udf_symlink_path, udf_target must be provided.", "if", "self", ".", "rock_ridge", ":", "# Rule 2", "if", "rr_symlink_name", "is", "None", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'A Rock Ridge name must be passed for a Rock Ridge ISO'", ")", "else", ":", "# Rule 3", "if", "rr_symlink_name", "is", "not", "None", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'A Rock Ridge name can only be passed for a Rock Ridge ISO'", ")", "if", "rr_path", "is", "not", "None", ":", "# Rule 4", "if", "not", "self", ".", "rock_ridge", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'Can only add a symlink to a Rock Ridge or UDF ISO'", ")", "if", "udf_symlink_path", "is", "not", "None", ":", "# Rule 5/6/7/8", "if", "not", "self", ".", "_has_udf", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'Can only add a UDF symlink to a UDF ISO'", ")", "if", "udf_target", "is", "None", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'A udf_target must be supplied along with a udf_symlink_path'", ")", "if", "joliet_path", "is", "not", "None", ":", "# Rule 9/10", "if", "self", ".", "joliet_vd", "is", "None", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'A Joliet path can only be specified for a Joliet ISO'", ")", "if", "rr_path", "is", "None", "and", "udf_symlink_path", "is", "None", ":", "# Rule 11", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'At least one of a Rock Ridge or a UDF target must be specified'", ")", "symlink_path_bytes", "=", "utils", ".", "normpath", "(", "symlink_path", ")", "(", "name", ",", "parent", ")", "=", "self", ".", "_iso_name_and_parent_from_path", "(", "symlink_path_bytes", ")", "log_block_size", "=", "self", ".", "pvd", ".", "logical_block_size", "(", ")", "# The ISO9660 directory record; this will be added in all cases.", "rec", "=", "dr", ".", "DirectoryRecord", "(", ")", "num_bytes_to_add", "=", "0", "if", "rr_path", "is", "not", "None", "and", "rr_symlink_name", "is", "not", "None", ":", "# We specifically do *not* normalize rr_path here, since that", "# potentially changes the meaning of what the user wanted.", "rr_symlink_name_bytes", "=", "rr_symlink_name", ".", "encode", "(", "'utf-8'", ")", "rec", ".", "new_symlink", "(", "self", ".", "pvd", ",", "name", ",", "parent", ",", "rr_path", ".", "encode", "(", "'utf-8'", ")", ",", "self", ".", "pvd", ".", "sequence_number", "(", ")", ",", "self", ".", "rock_ridge", ",", "rr_symlink_name_bytes", ",", "self", ".", "xa", ")", "num_bytes_to_add", "+=", "self", ".", "_add_child_to_dr", "(", "rec", ",", "log_block_size", ")", "num_bytes_to_add", "+=", "self", ".", "_update_rr_ce_entry", "(", "rec", ")", "if", "udf_symlink_path", "is", "not", "None", "and", "udf_target", "is", "not", "None", ":", "# If we aren't making a Rock Ridge symlink at the same time, we need", "# to add a new zero-byte file to the ISO.", "if", "rr_path", "is", "None", ":", "rrname", "=", "name", ".", "decode", "(", "'utf-8'", ")", "if", "not", "self", ".", "rock_ridge", ":", "rrname", "=", "''", "tmp_joliet_path", "=", "joliet_path", "if", "tmp_joliet_path", "is", "None", ":", "tmp_joliet_path", "=", "''", "num_bytes_to_add", "+=", "self", ".", "_add_fp", "(", "None", ",", "0", ",", "False", ",", "symlink_path", ",", "rrname", ",", "tmp_joliet_path", ",", "''", ",", "None", ",", "False", ")", "udf_symlink_path_bytes", "=", "utils", ".", "normpath", "(", "udf_symlink_path", ")", "# We specifically do *not* normalize udf_target here, since that", "# potentially changes the meaning of what the user wanted.", "(", "udf_name", ",", "udf_parent", ")", "=", "self", ".", "_udf_name_and_parent_from_path", "(", "udf_symlink_path_bytes", ")", "file_ident", "=", "udfmod", ".", "UDFFileIdentifierDescriptor", "(", ")", "file_ident", ".", "new", "(", "False", ",", "False", ",", "udf_name", ",", "udf_parent", ")", "num_new_extents", "=", "udf_parent", ".", "add_file_ident_desc", "(", "file_ident", ",", "log_block_size", ")", "num_bytes_to_add", "+=", "num_new_extents", "*", "log_block_size", "# Generate the bytearry representing the symlink", "symlink_bytearray", "=", "udfmod", ".", "symlink_to_bytes", "(", "udf_target", ")", "# The UDF File Entry", "file_entry", "=", "udfmod", ".", "UDFFileEntry", "(", ")", "file_entry", ".", "new", "(", "len", "(", "symlink_bytearray", ")", ",", "'symlink'", ",", "udf_parent", ",", "log_block_size", ")", "file_ident", ".", "file_entry", "=", "file_entry", "file_entry", ".", "file_ident", "=", "file_ident", "num_bytes_to_add", "+=", "log_block_size", "num_bytes_to_add", "+=", "file_entry", ".", "info_len", "# The inode for the symlink array.", "ino", "=", "inode", ".", "Inode", "(", ")", "ino", ".", "new", "(", "len", "(", "symlink_bytearray", ")", ",", "BytesIO", "(", "symlink_bytearray", ")", ",", "False", ",", "0", ")", "ino", ".", "linked_records", ".", "append", "(", "file_entry", ")", "ino", ".", "num_udf", "+=", "1", "file_entry", ".", "inode", "=", "ino", "self", ".", "inodes", ".", "append", "(", "ino", ")", "self", ".", "udf_logical_volume_integrity", ".", "logical_volume_impl_use", ".", "num_files", "+=", "1", "# Note that we explicitly do *not* link this record to the ISO9660", "# record; that's because there is no way to correlate them during", "# parse time. Instead, we treat them as individual entries, which", "# has the knock-on effect of requiring two operations to remove;", "# rm_file() to remove the ISO9660 record, and rm_hard_link() to", "# remove the UDF record.", "if", "joliet_path", "is", "not", "None", ":", "if", "self", ".", "joliet_vd", "is", "None", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'Tried to add a Joliet path to a non-Joliet ISO'", ")", "joliet_path_bytes", "=", "self", ".", "_normalize_joliet_path", "(", "joliet_path", ")", "(", "joliet_name", ",", "joliet_parent", ")", "=", "self", ".", "_joliet_name_and_parent_from_path", "(", "joliet_path_bytes", ")", "# Add in a \"fake\" symlink entry for Joliet.", "joliet_rec", "=", "dr", ".", "DirectoryRecord", "(", ")", "joliet_rec", ".", "new_file", "(", "self", ".", "joliet_vd", ",", "0", ",", "joliet_name", ",", "joliet_parent", ",", "self", ".", "joliet_vd", ".", "sequence_number", "(", ")", ",", "''", ",", "b''", ",", "self", ".", "xa", ",", "-", "1", ")", "num_bytes_to_add", "+=", "self", ".", "_add_child_to_dr", "(", "joliet_rec", ",", "self", ".", "joliet_vd", ".", "logical_block_size", "(", ")", ")", "self", ".", "_finish_add", "(", "0", ",", "num_bytes_to_add", ")" ]
Add a symlink from rr_symlink_name to the rr_path. The ISO must have either Rock Ridge or UDF support (or both). Parameters: symlink_path - The ISO9660 name of the symlink itself on the ISO. rr_symlink_name - The Rock Ridge name of the symlink itself on the ISO. rr_path - The Rock Ridge name of the entry on the ISO that the symlink points to. joliet_path - The Joliet name of the symlink (if this ISO has Joliet). udf_symlink_path - The UDF path of the symlink itself on the ISO. udf_target - The UDF name of the entry on the ISO that the symlink points to. Returns: Nothing.
[ "Add", "a", "symlink", "from", "rr_symlink_name", "to", "the", "rr_path", ".", "The", "ISO", "must", "have", "either", "Rock", "Ridge", "or", "UDF", "support", "(", "or", "both", ")", "." ]
python
train
mozilla/socorrolib
socorrolib/lib/ooid.py
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/ooid.py#L14-L26
def createNewOoid(timestamp=None, depth=None): """Create a new Ooid for a given time, to be stored at a given depth timestamp: the year-month-day is encoded in the ooid. If none, use current day depth: the expected storage depth is encoded in the ooid. If non, use the defaultDepth returns a new opaque id string holding 24 random hex digits and encoded date and depth info """ if not timestamp: timestamp = utc_now().date() if not depth: depth = defaultDepth assert depth <= 4 and depth >=1 uuid = str(uu.uuid4()) return "%s%d%02d%02d%02d" %(uuid[:-7],depth,timestamp.year%100,timestamp.month,timestamp.day)
[ "def", "createNewOoid", "(", "timestamp", "=", "None", ",", "depth", "=", "None", ")", ":", "if", "not", "timestamp", ":", "timestamp", "=", "utc_now", "(", ")", ".", "date", "(", ")", "if", "not", "depth", ":", "depth", "=", "defaultDepth", "assert", "depth", "<=", "4", "and", "depth", ">=", "1", "uuid", "=", "str", "(", "uu", ".", "uuid4", "(", ")", ")", "return", "\"%s%d%02d%02d%02d\"", "%", "(", "uuid", "[", ":", "-", "7", "]", ",", "depth", ",", "timestamp", ".", "year", "%", "100", ",", "timestamp", ".", "month", ",", "timestamp", ".", "day", ")" ]
Create a new Ooid for a given time, to be stored at a given depth timestamp: the year-month-day is encoded in the ooid. If none, use current day depth: the expected storage depth is encoded in the ooid. If non, use the defaultDepth returns a new opaque id string holding 24 random hex digits and encoded date and depth info
[ "Create", "a", "new", "Ooid", "for", "a", "given", "time", "to", "be", "stored", "at", "a", "given", "depth", "timestamp", ":", "the", "year", "-", "month", "-", "day", "is", "encoded", "in", "the", "ooid", ".", "If", "none", "use", "current", "day", "depth", ":", "the", "expected", "storage", "depth", "is", "encoded", "in", "the", "ooid", ".", "If", "non", "use", "the", "defaultDepth", "returns", "a", "new", "opaque", "id", "string", "holding", "24", "random", "hex", "digits", "and", "encoded", "date", "and", "depth", "info" ]
python
train
couchbase/couchbase-python-client
couchbase/exceptions.py
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/exceptions.py#L754-L767
def exc_from_rc(rc, msg=None, obj=None): """ .. warning:: INTERNAL For those rare cases when an exception needs to be thrown from Python using a libcouchbase error code. :param rc: The error code :param msg: Message (description) :param obj: Context :return: a raisable exception """ newcls = CouchbaseError.rc_to_exctype(rc) return newcls(params={'rc': rc, 'objextra': obj, 'message': msg})
[ "def", "exc_from_rc", "(", "rc", ",", "msg", "=", "None", ",", "obj", "=", "None", ")", ":", "newcls", "=", "CouchbaseError", ".", "rc_to_exctype", "(", "rc", ")", "return", "newcls", "(", "params", "=", "{", "'rc'", ":", "rc", ",", "'objextra'", ":", "obj", ",", "'message'", ":", "msg", "}", ")" ]
.. warning:: INTERNAL For those rare cases when an exception needs to be thrown from Python using a libcouchbase error code. :param rc: The error code :param msg: Message (description) :param obj: Context :return: a raisable exception
[ "..", "warning", "::", "INTERNAL" ]
python
train
ajenti/jadi
jadi/jadi.py
https://github.com/ajenti/jadi/blob/db76e1c5330672d282f03787fedcd702c04b007f/jadi/jadi.py#L141-L175
def component(iface): ''' Marks the decorated class as a component implementing the given ``iface`` :param iface: the interface to implement :type iface: :func:`interface` ''' def decorator(cls): if not cls: return None # Run custom verificator if any if hasattr(cls, '__verify__'): if not cls.__verify__(): return None if not hasattr(iface, 'implementations'): log.error('%s is not an @interface', iface) log.debug( 'Registering [%s] (implementation of [%s])' % ( get_fqdn(cls), get_fqdn(iface) ) ) iface.implementations.append(cls) def get(cls, context): return context.get_component(cls) cls.get = get.__get__(cls) return cls return decorator
[ "def", "component", "(", "iface", ")", ":", "def", "decorator", "(", "cls", ")", ":", "if", "not", "cls", ":", "return", "None", "# Run custom verificator if any", "if", "hasattr", "(", "cls", ",", "'__verify__'", ")", ":", "if", "not", "cls", ".", "__verify__", "(", ")", ":", "return", "None", "if", "not", "hasattr", "(", "iface", ",", "'implementations'", ")", ":", "log", ".", "error", "(", "'%s is not an @interface'", ",", "iface", ")", "log", ".", "debug", "(", "'Registering [%s] (implementation of [%s])'", "%", "(", "get_fqdn", "(", "cls", ")", ",", "get_fqdn", "(", "iface", ")", ")", ")", "iface", ".", "implementations", ".", "append", "(", "cls", ")", "def", "get", "(", "cls", ",", "context", ")", ":", "return", "context", ".", "get_component", "(", "cls", ")", "cls", ".", "get", "=", "get", ".", "__get__", "(", "cls", ")", "return", "cls", "return", "decorator" ]
Marks the decorated class as a component implementing the given ``iface`` :param iface: the interface to implement :type iface: :func:`interface`
[ "Marks", "the", "decorated", "class", "as", "a", "component", "implementing", "the", "given", "iface" ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/bson/json_util.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/bson/json_util.py#L634-L639
def _parse_canonical_code(doc): """Decode a JSON code to bson.code.Code.""" for key in doc: if key not in ('$code', '$scope'): raise TypeError('Bad $code, extra field(s): %s' % (doc,)) return Code(doc['$code'], scope=doc.get('$scope'))
[ "def", "_parse_canonical_code", "(", "doc", ")", ":", "for", "key", "in", "doc", ":", "if", "key", "not", "in", "(", "'$code'", ",", "'$scope'", ")", ":", "raise", "TypeError", "(", "'Bad $code, extra field(s): %s'", "%", "(", "doc", ",", ")", ")", "return", "Code", "(", "doc", "[", "'$code'", "]", ",", "scope", "=", "doc", ".", "get", "(", "'$scope'", ")", ")" ]
Decode a JSON code to bson.code.Code.
[ "Decode", "a", "JSON", "code", "to", "bson", ".", "code", ".", "Code", "." ]
python
train