repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
sampottinger/pycotracer
pycotracer/mongo_aggregator.py
https://github.com/sampottinger/pycotracer/blob/c66c3230949b7bee8c9fec5fc00ab392865a0c8b/pycotracer/mongo_aggregator.py#L200-L213
def insert_loan_entries(database, entries): """Insert a set of records of a loan report in the provided database. Insert a set of new records into the provided database without checking for conflicting entries. @param db: The MongoDB database to operate on. The loans collection will be used from this database. @type db: pymongo.database.Database @param entries: The entries to insert into the database. @type entries: dict """ entries = map(clean_entry, entries) database.loans.insert(entries, continue_on_error=True)
[ "def", "insert_loan_entries", "(", "database", ",", "entries", ")", ":", "entries", "=", "map", "(", "clean_entry", ",", "entries", ")", "database", ".", "loans", ".", "insert", "(", "entries", ",", "continue_on_error", "=", "True", ")" ]
Insert a set of records of a loan report in the provided database. Insert a set of new records into the provided database without checking for conflicting entries. @param db: The MongoDB database to operate on. The loans collection will be used from this database. @type db: pymongo.database.Database @param entries: The entries to insert into the database. @type entries: dict
[ "Insert", "a", "set", "of", "records", "of", "a", "loan", "report", "in", "the", "provided", "database", "." ]
python
train
Miserlou/Zappa
zappa/core.py
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L2076-L2096
def delete_stack(self, name, wait=False): """ Delete the CF stack managed by Zappa. """ try: stack = self.cf_client.describe_stacks(StackName=name)['Stacks'][0] except: # pragma: no cover print('No Zappa stack named {0}'.format(name)) return False tags = {x['Key']:x['Value'] for x in stack['Tags']} if tags.get('ZappaProject') == name: self.cf_client.delete_stack(StackName=name) if wait: waiter = self.cf_client.get_waiter('stack_delete_complete') print('Waiting for stack {0} to be deleted..'.format(name)) waiter.wait(StackName=name) return True else: print('ZappaProject tag not found on {0}, doing nothing'.format(name)) return False
[ "def", "delete_stack", "(", "self", ",", "name", ",", "wait", "=", "False", ")", ":", "try", ":", "stack", "=", "self", ".", "cf_client", ".", "describe_stacks", "(", "StackName", "=", "name", ")", "[", "'Stacks'", "]", "[", "0", "]", "except", ":", "# pragma: no cover", "print", "(", "'No Zappa stack named {0}'", ".", "format", "(", "name", ")", ")", "return", "False", "tags", "=", "{", "x", "[", "'Key'", "]", ":", "x", "[", "'Value'", "]", "for", "x", "in", "stack", "[", "'Tags'", "]", "}", "if", "tags", ".", "get", "(", "'ZappaProject'", ")", "==", "name", ":", "self", ".", "cf_client", ".", "delete_stack", "(", "StackName", "=", "name", ")", "if", "wait", ":", "waiter", "=", "self", ".", "cf_client", ".", "get_waiter", "(", "'stack_delete_complete'", ")", "print", "(", "'Waiting for stack {0} to be deleted..'", ".", "format", "(", "name", ")", ")", "waiter", ".", "wait", "(", "StackName", "=", "name", ")", "return", "True", "else", ":", "print", "(", "'ZappaProject tag not found on {0}, doing nothing'", ".", "format", "(", "name", ")", ")", "return", "False" ]
Delete the CF stack managed by Zappa.
[ "Delete", "the", "CF", "stack", "managed", "by", "Zappa", "." ]
python
train
IAMconsortium/pyam
pyam/core.py
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L1334-L1338
def _aggregate(df, by): """Aggregate `df` by specified column(s), return indexed `pd.Series`""" by = [by] if isstr(by) else by cols = [c for c in list(df.columns) if c not in ['value'] + by] return df.groupby(cols).sum()['value']
[ "def", "_aggregate", "(", "df", ",", "by", ")", ":", "by", "=", "[", "by", "]", "if", "isstr", "(", "by", ")", "else", "by", "cols", "=", "[", "c", "for", "c", "in", "list", "(", "df", ".", "columns", ")", "if", "c", "not", "in", "[", "'value'", "]", "+", "by", "]", "return", "df", ".", "groupby", "(", "cols", ")", ".", "sum", "(", ")", "[", "'value'", "]" ]
Aggregate `df` by specified column(s), return indexed `pd.Series`
[ "Aggregate", "df", "by", "specified", "column", "(", "s", ")", "return", "indexed", "pd", ".", "Series" ]
python
train
UCL-INGI/INGInious
inginious/frontend/template_helper.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/template_helper.py#L148-L151
def _generic_hook(self, name, **kwargs): """ A generic hook that links the TemplateHelper with PluginManager """ entries = [entry for entry in self._plugin_manager.call_hook(name, **kwargs) if entry is not None] return "\n".join(entries)
[ "def", "_generic_hook", "(", "self", ",", "name", ",", "*", "*", "kwargs", ")", ":", "entries", "=", "[", "entry", "for", "entry", "in", "self", ".", "_plugin_manager", ".", "call_hook", "(", "name", ",", "*", "*", "kwargs", ")", "if", "entry", "is", "not", "None", "]", "return", "\"\\n\"", ".", "join", "(", "entries", ")" ]
A generic hook that links the TemplateHelper with PluginManager
[ "A", "generic", "hook", "that", "links", "the", "TemplateHelper", "with", "PluginManager" ]
python
train
jsommers/switchyard
switchyard/lib/packet/packet.py
https://github.com/jsommers/switchyard/blob/fdcb3869c937dcedbd6ea7a7822ebd412bf1e2b0/switchyard/lib/packet/packet.py#L29-L40
def to_bytes(self): ''' Returns serialized bytes object representing all headers/ payloads in this packet''' rawlist = [] i = len(self._headers)-1 while i >= 0: self._headers[i].pre_serialize(b''.join(rawlist), self, i) rawlist.insert(0, self._headers[i].to_bytes()) i -= 1 self._raw = b''.join(rawlist) return self._raw
[ "def", "to_bytes", "(", "self", ")", ":", "rawlist", "=", "[", "]", "i", "=", "len", "(", "self", ".", "_headers", ")", "-", "1", "while", "i", ">=", "0", ":", "self", ".", "_headers", "[", "i", "]", ".", "pre_serialize", "(", "b''", ".", "join", "(", "rawlist", ")", ",", "self", ",", "i", ")", "rawlist", ".", "insert", "(", "0", ",", "self", ".", "_headers", "[", "i", "]", ".", "to_bytes", "(", ")", ")", "i", "-=", "1", "self", ".", "_raw", "=", "b''", ".", "join", "(", "rawlist", ")", "return", "self", ".", "_raw" ]
Returns serialized bytes object representing all headers/ payloads in this packet
[ "Returns", "serialized", "bytes", "object", "representing", "all", "headers", "/", "payloads", "in", "this", "packet" ]
python
train
jmoiron/johnny-cache
johnny/cache.py
https://github.com/jmoiron/johnny-cache/blob/d96ea94c5dfcde517ff8f65d6ba4e435d8a0168c/johnny/cache.py#L178-L183
def gen_multi_key(self, values, db='default'): """Takes a list of generations (not table keys) and returns a key.""" db = settings.DB_CACHE_KEYS[db] if db and len(db) > 100: db = db[0:68] + self.gen_key(db[68:]) return '%s_%s_multi_%s' % (self.prefix, db, self.gen_key(*values))
[ "def", "gen_multi_key", "(", "self", ",", "values", ",", "db", "=", "'default'", ")", ":", "db", "=", "settings", ".", "DB_CACHE_KEYS", "[", "db", "]", "if", "db", "and", "len", "(", "db", ")", ">", "100", ":", "db", "=", "db", "[", "0", ":", "68", "]", "+", "self", ".", "gen_key", "(", "db", "[", "68", ":", "]", ")", "return", "'%s_%s_multi_%s'", "%", "(", "self", ".", "prefix", ",", "db", ",", "self", ".", "gen_key", "(", "*", "values", ")", ")" ]
Takes a list of generations (not table keys) and returns a key.
[ "Takes", "a", "list", "of", "generations", "(", "not", "table", "keys", ")", "and", "returns", "a", "key", "." ]
python
train
NASA-AMMOS/AIT-Core
ait/core/val.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/val.py#L420-L507
def content_val(self, ymldata=None, messages=None): """Validates the Command Dictionary to ensure the contents for each of the fields meets specific criteria regarding the expected types, byte ranges, etc.""" self._ymlproc = YAMLProcessor(self._ymlfile, False) # Turn off the YAML Processor log.debug("BEGIN: Content-based validation of Command dictionary") if ymldata is not None: cmddict = ymldata elif ymldata is None and self._ymlproc.loaded: cmddict = self._ymlproc.data elif not self._ymlproc.loaded: raise util.YAMLError("YAML failed to load.") try: # instantiate the document number. this will increment in order to # track the line numbers and section where validation fails docnum = 0 # boolean to hold argument validity argsvalid = True # list of rules to validate against rules = [] ### set the command rules # # set uniqueness rule for command names rules.append(UniquenessRule('name', "Duplicate command name: %s", messages)) # set uniqueness rule for opcodes rules.append(UniquenessRule('opcode', "Duplicate opcode: %s", messages)) # ### for cmdcnt, cmddefn in enumerate(cmddict[0]): # check the command rules for rule in rules: rule.check(cmddefn) # list of argument rules to validate against argrules = [] ### set rules for command arguments # # set uniqueness rule for opcodes argrules.append(UniquenessRule('name', "Duplicate argument name: " + cmddefn.name + ".%s", messages)) # set type rule for arg.type argrules.append(TypeRule('type', "Invalid argument type for argument: " + cmddefn.name + ".%s", messages)) # set argument size rule for arg.type.nbytes argrules.append(TypeSizeRule('nbytes', "Invalid argument size for argument: " + cmddefn.name + ".%s", messages)) # set argument enumerations rule to check no enumerations contain un-quoted YAML special variables argrules.append(EnumRule('enum', "Invalid enum value for argument: " + cmddefn.name + ".%s", messages)) # set byte order rule to ensure proper ordering of aruguments argrules.append(ByteOrderRule('bytes', "Invalid byte order for argument: " + cmddefn.name + ".%s", messages)) # ### argdefns = cmddefn.argdefns for arg in argdefns: # check argument rules for rule in argrules: rule.check(arg) # check if argument rule failed, if so set the validity to False if not all(r.valid is True for r in argrules): argsvalid = False log.debug("END: Content-based validation complete for '%s'", self._ymlfile) # check validity of all command rules and argument validity return all(rule.valid is True for rule in rules) and argsvalid except util.YAMLValidationError, e: # Display the error message if messages is not None: if len(e.message) < 128: msg = "Validation Failed for YAML file '" + self._ymlfile + "': '" + str(e.message) + "'" else: msg = "Validation Failed for YAML file '" + self._ymlfile + "'" log.error(msg) self.ehandler.process(docnum, self.ehandler.doclines, e, messages) return False
[ "def", "content_val", "(", "self", ",", "ymldata", "=", "None", ",", "messages", "=", "None", ")", ":", "self", ".", "_ymlproc", "=", "YAMLProcessor", "(", "self", ".", "_ymlfile", ",", "False", ")", "# Turn off the YAML Processor", "log", ".", "debug", "(", "\"BEGIN: Content-based validation of Command dictionary\"", ")", "if", "ymldata", "is", "not", "None", ":", "cmddict", "=", "ymldata", "elif", "ymldata", "is", "None", "and", "self", ".", "_ymlproc", ".", "loaded", ":", "cmddict", "=", "self", ".", "_ymlproc", ".", "data", "elif", "not", "self", ".", "_ymlproc", ".", "loaded", ":", "raise", "util", ".", "YAMLError", "(", "\"YAML failed to load.\"", ")", "try", ":", "# instantiate the document number. this will increment in order to", "# track the line numbers and section where validation fails", "docnum", "=", "0", "# boolean to hold argument validity", "argsvalid", "=", "True", "# list of rules to validate against", "rules", "=", "[", "]", "### set the command rules", "#", "# set uniqueness rule for command names", "rules", ".", "append", "(", "UniquenessRule", "(", "'name'", ",", "\"Duplicate command name: %s\"", ",", "messages", ")", ")", "# set uniqueness rule for opcodes", "rules", ".", "append", "(", "UniquenessRule", "(", "'opcode'", ",", "\"Duplicate opcode: %s\"", ",", "messages", ")", ")", "#", "###", "for", "cmdcnt", ",", "cmddefn", "in", "enumerate", "(", "cmddict", "[", "0", "]", ")", ":", "# check the command rules", "for", "rule", "in", "rules", ":", "rule", ".", "check", "(", "cmddefn", ")", "# list of argument rules to validate against", "argrules", "=", "[", "]", "### set rules for command arguments", "#", "# set uniqueness rule for opcodes", "argrules", ".", "append", "(", "UniquenessRule", "(", "'name'", ",", "\"Duplicate argument name: \"", "+", "cmddefn", ".", "name", "+", "\".%s\"", ",", "messages", ")", ")", "# set type rule for arg.type", "argrules", ".", "append", "(", "TypeRule", "(", "'type'", ",", "\"Invalid argument type for argument: \"", "+", "cmddefn", ".", "name", "+", "\".%s\"", ",", "messages", ")", ")", "# set argument size rule for arg.type.nbytes", "argrules", ".", "append", "(", "TypeSizeRule", "(", "'nbytes'", ",", "\"Invalid argument size for argument: \"", "+", "cmddefn", ".", "name", "+", "\".%s\"", ",", "messages", ")", ")", "# set argument enumerations rule to check no enumerations contain un-quoted YAML special variables", "argrules", ".", "append", "(", "EnumRule", "(", "'enum'", ",", "\"Invalid enum value for argument: \"", "+", "cmddefn", ".", "name", "+", "\".%s\"", ",", "messages", ")", ")", "# set byte order rule to ensure proper ordering of aruguments", "argrules", ".", "append", "(", "ByteOrderRule", "(", "'bytes'", ",", "\"Invalid byte order for argument: \"", "+", "cmddefn", ".", "name", "+", "\".%s\"", ",", "messages", ")", ")", "#", "###", "argdefns", "=", "cmddefn", ".", "argdefns", "for", "arg", "in", "argdefns", ":", "# check argument rules", "for", "rule", "in", "argrules", ":", "rule", ".", "check", "(", "arg", ")", "# check if argument rule failed, if so set the validity to False", "if", "not", "all", "(", "r", ".", "valid", "is", "True", "for", "r", "in", "argrules", ")", ":", "argsvalid", "=", "False", "log", ".", "debug", "(", "\"END: Content-based validation complete for '%s'\"", ",", "self", ".", "_ymlfile", ")", "# check validity of all command rules and argument validity", "return", "all", "(", "rule", ".", "valid", "is", "True", "for", "rule", "in", "rules", ")", "and", "argsvalid", "except", "util", ".", "YAMLValidationError", ",", "e", ":", "# Display the error message", "if", "messages", "is", "not", "None", ":", "if", "len", "(", "e", ".", "message", ")", "<", "128", ":", "msg", "=", "\"Validation Failed for YAML file '\"", "+", "self", ".", "_ymlfile", "+", "\"': '\"", "+", "str", "(", "e", ".", "message", ")", "+", "\"'\"", "else", ":", "msg", "=", "\"Validation Failed for YAML file '\"", "+", "self", ".", "_ymlfile", "+", "\"'\"", "log", ".", "error", "(", "msg", ")", "self", ".", "ehandler", ".", "process", "(", "docnum", ",", "self", ".", "ehandler", ".", "doclines", ",", "e", ",", "messages", ")", "return", "False" ]
Validates the Command Dictionary to ensure the contents for each of the fields meets specific criteria regarding the expected types, byte ranges, etc.
[ "Validates", "the", "Command", "Dictionary", "to", "ensure", "the", "contents", "for", "each", "of", "the", "fields", "meets", "specific", "criteria", "regarding", "the", "expected", "types", "byte", "ranges", "etc", "." ]
python
train
pyvisa/pyvisa
pyvisa/rname.py
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/rname.py#L416-L488
def filter(resources, query): """Filter a list of resources according to a query expression. The search criteria specified in the query parameter has two parts: 1. a VISA regular expression over a resource string. 2. optional logical expression over attribute values (not implemented in this function, see below). .. note: The VISA regular expression syntax is not the same as the Python regular expression syntax. (see below) The regular expression is matched against the resource strings of resources known to the VISA Resource Manager. If the resource string matches the regular expression, the attribute values of the resource are then matched against the expression over attribute values. If the match is successful, the resource has met the search criteria and gets added to the list of resources found. By using the optional attribute expression, you can construct flexible and powerful expressions with the use of logical ANDs (&&), ORs(||), and NOTs (!). You can use equal (==) and unequal (!=) comparators to compare attributes of any type, and other inequality comparators (>, <, >=, <=) to compare attributes of numeric type. Use only global attributes in the attribute expression. Local attributes are not allowed in the logical expression part of the expr parameter. Symbol Meaning ---------- ---------- ? Matches any one character. \ Makes the character that follows it an ordinary character instead of special character. For example, when a question mark follows a backslash (\?), it matches the ? character instead of any one character. [list] Matches any one character from the enclosed list. You can use a hyphen to match a range of characters. [^list] Matches any character not in the enclosed list. You can use a hyphen to match a range of characters. * Matches 0 or more occurrences of the preceding character or expression. + Matches 1 or more occurrences of the preceding character or expression. Exp|exp Matches either the preceding or following expression. The or operator | matches the entire expression that precedes or follows it and not just the character that precedes or follows it. For example, VXI|GPIB means (VXI)|(GPIB), not VX(I|G)PIB. (exp) Grouping characters or expressions. :param resources: iterable of resources. :param query: query expression. """ if '{' in query: query, _ = query.split('{') logger.warning('optional part of the query expression not supported. ' 'See filter2') try: query = query.replace('?', '.') matcher = re.compile(query, re.IGNORECASE) except re.error: raise errors.VisaIOError(constants.VI_ERROR_INV_EXPR) return tuple(res for res in resources if matcher.match(res))
[ "def", "filter", "(", "resources", ",", "query", ")", ":", "if", "'{'", "in", "query", ":", "query", ",", "_", "=", "query", ".", "split", "(", "'{'", ")", "logger", ".", "warning", "(", "'optional part of the query expression not supported. '", "'See filter2'", ")", "try", ":", "query", "=", "query", ".", "replace", "(", "'?'", ",", "'.'", ")", "matcher", "=", "re", ".", "compile", "(", "query", ",", "re", ".", "IGNORECASE", ")", "except", "re", ".", "error", ":", "raise", "errors", ".", "VisaIOError", "(", "constants", ".", "VI_ERROR_INV_EXPR", ")", "return", "tuple", "(", "res", "for", "res", "in", "resources", "if", "matcher", ".", "match", "(", "res", ")", ")" ]
Filter a list of resources according to a query expression. The search criteria specified in the query parameter has two parts: 1. a VISA regular expression over a resource string. 2. optional logical expression over attribute values (not implemented in this function, see below). .. note: The VISA regular expression syntax is not the same as the Python regular expression syntax. (see below) The regular expression is matched against the resource strings of resources known to the VISA Resource Manager. If the resource string matches the regular expression, the attribute values of the resource are then matched against the expression over attribute values. If the match is successful, the resource has met the search criteria and gets added to the list of resources found. By using the optional attribute expression, you can construct flexible and powerful expressions with the use of logical ANDs (&&), ORs(||), and NOTs (!). You can use equal (==) and unequal (!=) comparators to compare attributes of any type, and other inequality comparators (>, <, >=, <=) to compare attributes of numeric type. Use only global attributes in the attribute expression. Local attributes are not allowed in the logical expression part of the expr parameter. Symbol Meaning ---------- ---------- ? Matches any one character. \ Makes the character that follows it an ordinary character instead of special character. For example, when a question mark follows a backslash (\?), it matches the ? character instead of any one character. [list] Matches any one character from the enclosed list. You can use a hyphen to match a range of characters. [^list] Matches any character not in the enclosed list. You can use a hyphen to match a range of characters. * Matches 0 or more occurrences of the preceding character or expression. + Matches 1 or more occurrences of the preceding character or expression. Exp|exp Matches either the preceding or following expression. The or operator | matches the entire expression that precedes or follows it and not just the character that precedes or follows it. For example, VXI|GPIB means (VXI)|(GPIB), not VX(I|G)PIB. (exp) Grouping characters or expressions. :param resources: iterable of resources. :param query: query expression.
[ "Filter", "a", "list", "of", "resources", "according", "to", "a", "query", "expression", "." ]
python
train
Stewori/pytypes
pytypes/type_util.py
https://github.com/Stewori/pytypes/blob/b814d38709e84c0e0825caf8b721c20eb5a8ab3b/pytypes/type_util.py#L533-L536
def is_builtin_type(tp): """Checks if the given type is a builtin one. """ return hasattr(__builtins__, tp.__name__) and tp is getattr(__builtins__, tp.__name__)
[ "def", "is_builtin_type", "(", "tp", ")", ":", "return", "hasattr", "(", "__builtins__", ",", "tp", ".", "__name__", ")", "and", "tp", "is", "getattr", "(", "__builtins__", ",", "tp", ".", "__name__", ")" ]
Checks if the given type is a builtin one.
[ "Checks", "if", "the", "given", "type", "is", "a", "builtin", "one", "." ]
python
train
inasafe/inasafe
safe/gui/tools/wizard/step_kw47_default_inasafe_fields.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_kw47_default_inasafe_fields.py#L223-L229
def clear(self): """Clear current state.""" # Adapted from http://stackoverflow.com/a/13103617/1198772 for i in reversed(list(range(self.kwExtraKeywordsGridLayout.count()))): self.kwExtraKeywordsGridLayout.itemAt(i).widget().setParent(None) self.parameters = [] self.parameter_container = ParameterContainer()
[ "def", "clear", "(", "self", ")", ":", "# Adapted from http://stackoverflow.com/a/13103617/1198772", "for", "i", "in", "reversed", "(", "list", "(", "range", "(", "self", ".", "kwExtraKeywordsGridLayout", ".", "count", "(", ")", ")", ")", ")", ":", "self", ".", "kwExtraKeywordsGridLayout", ".", "itemAt", "(", "i", ")", ".", "widget", "(", ")", ".", "setParent", "(", "None", ")", "self", ".", "parameters", "=", "[", "]", "self", ".", "parameter_container", "=", "ParameterContainer", "(", ")" ]
Clear current state.
[ "Clear", "current", "state", "." ]
python
train
python-diamond/Diamond
src/diamond/handler/logentries_diamond.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/diamond/handler/logentries_diamond.py#L70-L83
def _send(self): """ Convert message to a json object and send to Lognetries """ while len(self.queue) > 0: metric = self.queue.popleft() topic, value, timestamp = str(metric).split() msg = json.dumps({"event": {topic: value}}) req = urllib2.Request("https://js.logentries.com/v1/logs/" + self.log_token, msg) try: urllib2.urlopen(req) except urllib2.URLError as e: logging.error("Can't send log message to Logentries %s", e)
[ "def", "_send", "(", "self", ")", ":", "while", "len", "(", "self", ".", "queue", ")", ">", "0", ":", "metric", "=", "self", ".", "queue", ".", "popleft", "(", ")", "topic", ",", "value", ",", "timestamp", "=", "str", "(", "metric", ")", ".", "split", "(", ")", "msg", "=", "json", ".", "dumps", "(", "{", "\"event\"", ":", "{", "topic", ":", "value", "}", "}", ")", "req", "=", "urllib2", ".", "Request", "(", "\"https://js.logentries.com/v1/logs/\"", "+", "self", ".", "log_token", ",", "msg", ")", "try", ":", "urllib2", ".", "urlopen", "(", "req", ")", "except", "urllib2", ".", "URLError", "as", "e", ":", "logging", ".", "error", "(", "\"Can't send log message to Logentries %s\"", ",", "e", ")" ]
Convert message to a json object and send to Lognetries
[ "Convert", "message", "to", "a", "json", "object", "and", "send", "to", "Lognetries" ]
python
train
nccgroup/opinel
opinel/utils/credentials.py
https://github.com/nccgroup/opinel/blob/2d4f5b96e0a1f9cb0356629f4f87e4ed99ce2606/opinel/utils/credentials.py#L311-L319
def read_profile_from_environment_variables(): """ Read profiles from env :return: """ role_arn = os.environ.get('AWS_ROLE_ARN', None) external_id = os.environ.get('AWS_EXTERNAL_ID', None) return role_arn, external_id
[ "def", "read_profile_from_environment_variables", "(", ")", ":", "role_arn", "=", "os", ".", "environ", ".", "get", "(", "'AWS_ROLE_ARN'", ",", "None", ")", "external_id", "=", "os", ".", "environ", ".", "get", "(", "'AWS_EXTERNAL_ID'", ",", "None", ")", "return", "role_arn", ",", "external_id" ]
Read profiles from env :return:
[ "Read", "profiles", "from", "env" ]
python
train
SeattleTestbed/seash
seash_modules.py
https://github.com/SeattleTestbed/seash/blob/40f9d2285662ff8b61e0468b4196acee089b273b/seash_modules.py#L450-L478
def enable_modules_from_last_session(seashcommanddict): """ Enable every module that isn't marked as disabled in the modules folder. This function is meant to be called when seash is initializing and nowhere else. A module is marked as disabled when there is a modulename.disabled file. """ successfully_enabled_modules = [] modules_to_enable = get_enabled_modules() for modulename in modules_to_enable: # There are no bad side effects to seash's state when we do this # The only thing that should happen is that the modulename.disabled file # gets created (temporarily) disable(seashcommanddict, modulename) try: enable(seashcommanddict, modulename) successfully_enabled_modules.append(modulename) except seash_exceptions.ModuleConflictError, e: print "Failed to enable the '"+modulename+"' module due to the following conflicting command:" print str(e) # We mark this module as disabled by adding a modulename.disabled file. open(MODULES_FOLDER_PATH + os.sep + modulename + ".disabled", 'w') except seash_exceptions.InitializeError, e: print "Failed to enable the '"+modulename+"' module." disable(seashcommanddict, modulename) successfully_enabled_modules.sort() print 'Enabled modules:', ', '.join(successfully_enabled_modules), '\n'
[ "def", "enable_modules_from_last_session", "(", "seashcommanddict", ")", ":", "successfully_enabled_modules", "=", "[", "]", "modules_to_enable", "=", "get_enabled_modules", "(", ")", "for", "modulename", "in", "modules_to_enable", ":", "# There are no bad side effects to seash's state when we do this", "# The only thing that should happen is that the modulename.disabled file", "# gets created (temporarily)", "disable", "(", "seashcommanddict", ",", "modulename", ")", "try", ":", "enable", "(", "seashcommanddict", ",", "modulename", ")", "successfully_enabled_modules", ".", "append", "(", "modulename", ")", "except", "seash_exceptions", ".", "ModuleConflictError", ",", "e", ":", "print", "\"Failed to enable the '\"", "+", "modulename", "+", "\"' module due to the following conflicting command:\"", "print", "str", "(", "e", ")", "# We mark this module as disabled by adding a modulename.disabled file.", "open", "(", "MODULES_FOLDER_PATH", "+", "os", ".", "sep", "+", "modulename", "+", "\".disabled\"", ",", "'w'", ")", "except", "seash_exceptions", ".", "InitializeError", ",", "e", ":", "print", "\"Failed to enable the '\"", "+", "modulename", "+", "\"' module.\"", "disable", "(", "seashcommanddict", ",", "modulename", ")", "successfully_enabled_modules", ".", "sort", "(", ")", "print", "'Enabled modules:'", ",", "', '", ".", "join", "(", "successfully_enabled_modules", ")", ",", "'\\n'" ]
Enable every module that isn't marked as disabled in the modules folder. This function is meant to be called when seash is initializing and nowhere else. A module is marked as disabled when there is a modulename.disabled file.
[ "Enable", "every", "module", "that", "isn", "t", "marked", "as", "disabled", "in", "the", "modules", "folder", ".", "This", "function", "is", "meant", "to", "be", "called", "when", "seash", "is", "initializing", "and", "nowhere", "else", ".", "A", "module", "is", "marked", "as", "disabled", "when", "there", "is", "a", "modulename", ".", "disabled", "file", "." ]
python
train
dwavesystems/dwave_networkx
dwave_networkx/algorithms/independent_set.py
https://github.com/dwavesystems/dwave_networkx/blob/9ea1223ddbc7e86db2f90b8b23e250e6642c3d68/dwave_networkx/algorithms/independent_set.py#L24-L95
def maximum_weighted_independent_set(G, weight=None, sampler=None, lagrange=2.0, **sampler_args): """Returns an approximate maximum weighted independent set. Defines a QUBO with ground states corresponding to a maximum weighted independent set and uses the sampler to sample from it. An independent set is a set of nodes such that the subgraph of G induced by these nodes contains no edges. A maximum independent set is an independent set of maximum total node weight. Parameters ---------- G : NetworkX graph The graph on which to find a maximum cut weighted independent set. weight : string, optional (default None) If None, every node has equal weight. If a string, use this node attribute as the node weight. A node without this attribute is assumed to have max weight. sampler A binary quadratic model sampler. A sampler is a process that samples from low energy states in models defined by an Ising equation or a Quadratic Unconstrained Binary Optimization Problem (QUBO). A sampler is expected to have a 'sample_qubo' and 'sample_ising' method. A sampler is expected to return an iterable of samples, in order of increasing energy. If no sampler is provided, one must be provided using the `set_default_sampler` function. lagrange : optional (default 2) Lagrange parameter to weight constraints (no edges within set) versus objective (largest set possible). sampler_args Additional keyword parameters are passed to the sampler. Returns ------- indep_nodes : list List of nodes that form a maximum weighted independent set, as determined by the given sampler. Notes ----- Samplers by their nature may not return the optimal solution. This function does not attempt to confirm the quality of the returned sample. References ---------- `Independent Set on Wikipedia <https://en.wikipedia.org/wiki/Independent_set_(graph_theory)>`_ `QUBO on Wikipedia <https://en.wikipedia.org/wiki/Quadratic_unconstrained_binary_optimization>`_ .. [AL] Lucas, A. (2014). Ising formulations of many NP problems. Frontiers in Physics, Volume 2, Article 5. """ # Get a QUBO representation of the problem Q = maximum_weighted_independent_set_qubo(G, weight, lagrange) # use the sampler to find low energy states response = sampler.sample_qubo(Q, **sampler_args) # we want the lowest energy sample sample = next(iter(response)) # nodes that are spin up or true are exactly the ones in S. return [node for node in sample if sample[node] > 0]
[ "def", "maximum_weighted_independent_set", "(", "G", ",", "weight", "=", "None", ",", "sampler", "=", "None", ",", "lagrange", "=", "2.0", ",", "*", "*", "sampler_args", ")", ":", "# Get a QUBO representation of the problem", "Q", "=", "maximum_weighted_independent_set_qubo", "(", "G", ",", "weight", ",", "lagrange", ")", "# use the sampler to find low energy states", "response", "=", "sampler", ".", "sample_qubo", "(", "Q", ",", "*", "*", "sampler_args", ")", "# we want the lowest energy sample", "sample", "=", "next", "(", "iter", "(", "response", ")", ")", "# nodes that are spin up or true are exactly the ones in S.", "return", "[", "node", "for", "node", "in", "sample", "if", "sample", "[", "node", "]", ">", "0", "]" ]
Returns an approximate maximum weighted independent set. Defines a QUBO with ground states corresponding to a maximum weighted independent set and uses the sampler to sample from it. An independent set is a set of nodes such that the subgraph of G induced by these nodes contains no edges. A maximum independent set is an independent set of maximum total node weight. Parameters ---------- G : NetworkX graph The graph on which to find a maximum cut weighted independent set. weight : string, optional (default None) If None, every node has equal weight. If a string, use this node attribute as the node weight. A node without this attribute is assumed to have max weight. sampler A binary quadratic model sampler. A sampler is a process that samples from low energy states in models defined by an Ising equation or a Quadratic Unconstrained Binary Optimization Problem (QUBO). A sampler is expected to have a 'sample_qubo' and 'sample_ising' method. A sampler is expected to return an iterable of samples, in order of increasing energy. If no sampler is provided, one must be provided using the `set_default_sampler` function. lagrange : optional (default 2) Lagrange parameter to weight constraints (no edges within set) versus objective (largest set possible). sampler_args Additional keyword parameters are passed to the sampler. Returns ------- indep_nodes : list List of nodes that form a maximum weighted independent set, as determined by the given sampler. Notes ----- Samplers by their nature may not return the optimal solution. This function does not attempt to confirm the quality of the returned sample. References ---------- `Independent Set on Wikipedia <https://en.wikipedia.org/wiki/Independent_set_(graph_theory)>`_ `QUBO on Wikipedia <https://en.wikipedia.org/wiki/Quadratic_unconstrained_binary_optimization>`_ .. [AL] Lucas, A. (2014). Ising formulations of many NP problems. Frontiers in Physics, Volume 2, Article 5.
[ "Returns", "an", "approximate", "maximum", "weighted", "independent", "set", "." ]
python
train
sastrarobotics/pyHerkulex
herkulex.py
https://github.com/sastrarobotics/pyHerkulex/blob/3a42046cbfea8c7e343a04f42facba5e7bca570e/herkulex.py#L848-L868
def set_servo_angle(self, goalangle, goaltime, led): """ Sets the servo angle (in degrees) Enable torque using torque_on function before calling this Args: goalangle (int): The desired angle in degrees, range -150 to 150 goaltime (int): the time taken to move from present position to goalposition led (int): the LED color 0x00 LED off 0x04 GREEN 0x08 BLUE 0x10 RED """ if (self.servomodel==0x06) or (self.servomodel == 0x04): goalposition = scale(goalangle, -159.9, 159.6, 10627, 22129) else: goalposition = scale(goalangle, -150, 150, 21, 1002) self.set_servo_position(goalposition, goaltime, led)
[ "def", "set_servo_angle", "(", "self", ",", "goalangle", ",", "goaltime", ",", "led", ")", ":", "if", "(", "self", ".", "servomodel", "==", "0x06", ")", "or", "(", "self", ".", "servomodel", "==", "0x04", ")", ":", "goalposition", "=", "scale", "(", "goalangle", ",", "-", "159.9", ",", "159.6", ",", "10627", ",", "22129", ")", "else", ":", "goalposition", "=", "scale", "(", "goalangle", ",", "-", "150", ",", "150", ",", "21", ",", "1002", ")", "self", ".", "set_servo_position", "(", "goalposition", ",", "goaltime", ",", "led", ")" ]
Sets the servo angle (in degrees) Enable torque using torque_on function before calling this Args: goalangle (int): The desired angle in degrees, range -150 to 150 goaltime (int): the time taken to move from present position to goalposition led (int): the LED color 0x00 LED off 0x04 GREEN 0x08 BLUE 0x10 RED
[ "Sets", "the", "servo", "angle", "(", "in", "degrees", ")" ]
python
train
ArduPilot/MAVProxy
MAVProxy/tools/MAVExplorer.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/tools/MAVExplorer.py#L364-L406
def save_graph(graphdef): '''save a graph as XML''' if graphdef.filename is None: if 'HOME' in os.environ: dname = os.path.join(os.environ['HOME'], '.mavproxy') if os.path.exists(dname): mp_util.mkdir_p(dname) graphdef.filename = os.path.join(dname, 'mavgraphs.xml') elif 'LOCALAPPDATA' in os.environ: dname = os.path.join(os.environ['LOCALAPPDATA'], 'MAVProxy') if os.path.exists(dname): mp_util.mkdir_p(dname) graphdef.filename = os.path.join(dname, 'mavgraphs.xml') else: graphdef.filename = 'mavgraphs.xml' if graphdef.filename is None: print("No file to save graph to") return try: graphs = load_graph_xml(open(graphdef.filename).read(), graphdef.filename, load_all=True) except Exception: graphs = [] found_name = False for i in range(len(graphs)): if graphs[i].name == graphdef.name: graphs[i] = graphdef found_name = True break if not found_name: graphs.append(graphdef) pipe_console_input.send("Saving %u graphs to %s" % (len(graphs), graphdef.filename)) f = open(graphdef.filename, "w") f.write("<graphs>\n\n") for g in graphs: f.write(" <graph name='%s'>\n" % g.name.strip()) if g.description is None: g.description = '' f.write(" <description>%s</description>\n" % g.description.strip()) for e in g.expressions: f.write(" <expression>%s</expression>\n" % e.strip()) f.write(" </graph>\n\n") f.write("</graphs>\n") f.close()
[ "def", "save_graph", "(", "graphdef", ")", ":", "if", "graphdef", ".", "filename", "is", "None", ":", "if", "'HOME'", "in", "os", ".", "environ", ":", "dname", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'HOME'", "]", ",", "'.mavproxy'", ")", "if", "os", ".", "path", ".", "exists", "(", "dname", ")", ":", "mp_util", ".", "mkdir_p", "(", "dname", ")", "graphdef", ".", "filename", "=", "os", ".", "path", ".", "join", "(", "dname", ",", "'mavgraphs.xml'", ")", "elif", "'LOCALAPPDATA'", "in", "os", ".", "environ", ":", "dname", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'LOCALAPPDATA'", "]", ",", "'MAVProxy'", ")", "if", "os", ".", "path", ".", "exists", "(", "dname", ")", ":", "mp_util", ".", "mkdir_p", "(", "dname", ")", "graphdef", ".", "filename", "=", "os", ".", "path", ".", "join", "(", "dname", ",", "'mavgraphs.xml'", ")", "else", ":", "graphdef", ".", "filename", "=", "'mavgraphs.xml'", "if", "graphdef", ".", "filename", "is", "None", ":", "print", "(", "\"No file to save graph to\"", ")", "return", "try", ":", "graphs", "=", "load_graph_xml", "(", "open", "(", "graphdef", ".", "filename", ")", ".", "read", "(", ")", ",", "graphdef", ".", "filename", ",", "load_all", "=", "True", ")", "except", "Exception", ":", "graphs", "=", "[", "]", "found_name", "=", "False", "for", "i", "in", "range", "(", "len", "(", "graphs", ")", ")", ":", "if", "graphs", "[", "i", "]", ".", "name", "==", "graphdef", ".", "name", ":", "graphs", "[", "i", "]", "=", "graphdef", "found_name", "=", "True", "break", "if", "not", "found_name", ":", "graphs", ".", "append", "(", "graphdef", ")", "pipe_console_input", ".", "send", "(", "\"Saving %u graphs to %s\"", "%", "(", "len", "(", "graphs", ")", ",", "graphdef", ".", "filename", ")", ")", "f", "=", "open", "(", "graphdef", ".", "filename", ",", "\"w\"", ")", "f", ".", "write", "(", "\"<graphs>\\n\\n\"", ")", "for", "g", "in", "graphs", ":", "f", ".", "write", "(", "\" <graph name='%s'>\\n\"", "%", "g", ".", "name", ".", "strip", "(", ")", ")", "if", "g", ".", "description", "is", "None", ":", "g", ".", "description", "=", "''", "f", ".", "write", "(", "\" <description>%s</description>\\n\"", "%", "g", ".", "description", ".", "strip", "(", ")", ")", "for", "e", "in", "g", ".", "expressions", ":", "f", ".", "write", "(", "\" <expression>%s</expression>\\n\"", "%", "e", ".", "strip", "(", ")", ")", "f", ".", "write", "(", "\" </graph>\\n\\n\"", ")", "f", ".", "write", "(", "\"</graphs>\\n\"", ")", "f", ".", "close", "(", ")" ]
save a graph as XML
[ "save", "a", "graph", "as", "XML" ]
python
train
twisted/txaws
txaws/ec2/client.py
https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L389-L401
def describe_snapshots(self, *snapshot_ids): """Describe available snapshots. TODO: ownerSet, restorableBySet """ snapshot_set = {} for pos, snapshot_id in enumerate(snapshot_ids): snapshot_set["SnapshotId.%d" % (pos + 1)] = snapshot_id query = self.query_factory( action="DescribeSnapshots", creds=self.creds, endpoint=self.endpoint, other_params=snapshot_set) d = query.submit() return d.addCallback(self.parser.snapshots)
[ "def", "describe_snapshots", "(", "self", ",", "*", "snapshot_ids", ")", ":", "snapshot_set", "=", "{", "}", "for", "pos", ",", "snapshot_id", "in", "enumerate", "(", "snapshot_ids", ")", ":", "snapshot_set", "[", "\"SnapshotId.%d\"", "%", "(", "pos", "+", "1", ")", "]", "=", "snapshot_id", "query", "=", "self", ".", "query_factory", "(", "action", "=", "\"DescribeSnapshots\"", ",", "creds", "=", "self", ".", "creds", ",", "endpoint", "=", "self", ".", "endpoint", ",", "other_params", "=", "snapshot_set", ")", "d", "=", "query", ".", "submit", "(", ")", "return", "d", ".", "addCallback", "(", "self", ".", "parser", ".", "snapshots", ")" ]
Describe available snapshots. TODO: ownerSet, restorableBySet
[ "Describe", "available", "snapshots", "." ]
python
train
jsommers/switchyard
switchyard/lib/socket/socketemu.py
https://github.com/jsommers/switchyard/blob/fdcb3869c937dcedbd6ea7a7822ebd412bf1e2b0/switchyard/lib/socket/socketemu.py#L393-L399
def recv(self, buffersize, flags=0): ''' Receive data on the socket. The buffersize and flags arguments are currently ignored. Only returns the data. ''' _,_,data = self._recv(buffersize) return data
[ "def", "recv", "(", "self", ",", "buffersize", ",", "flags", "=", "0", ")", ":", "_", ",", "_", ",", "data", "=", "self", ".", "_recv", "(", "buffersize", ")", "return", "data" ]
Receive data on the socket. The buffersize and flags arguments are currently ignored. Only returns the data.
[ "Receive", "data", "on", "the", "socket", ".", "The", "buffersize", "and", "flags", "arguments", "are", "currently", "ignored", ".", "Only", "returns", "the", "data", "." ]
python
train
phoebe-project/phoebe2
phoebe/frontend/bundle.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/frontend/bundle.py#L1935-L1947
def remove_component(self, component, **kwargs): """ [NOT IMPLEMENTED] Remove a 'component' from the bundle :raises NotImplementedError: because this isn't implemented yet """ # NOTE: run_checks will check if an entry is in the hierarchy but has no parameters kwargs['component'] = component # NOTE: we do not remove from 'model' by default kwargs['context'] = ['component', 'constraint', 'dataset', 'compute'] self.remove_parameters_all(**kwargs)
[ "def", "remove_component", "(", "self", ",", "component", ",", "*", "*", "kwargs", ")", ":", "# NOTE: run_checks will check if an entry is in the hierarchy but has no parameters", "kwargs", "[", "'component'", "]", "=", "component", "# NOTE: we do not remove from 'model' by default", "kwargs", "[", "'context'", "]", "=", "[", "'component'", ",", "'constraint'", ",", "'dataset'", ",", "'compute'", "]", "self", ".", "remove_parameters_all", "(", "*", "*", "kwargs", ")" ]
[NOT IMPLEMENTED] Remove a 'component' from the bundle :raises NotImplementedError: because this isn't implemented yet
[ "[", "NOT", "IMPLEMENTED", "]" ]
python
train
corpusops/pdbclone
lib/pdb_clone/attach.py
https://github.com/corpusops/pdbclone/blob/f781537c243a4874b246d43dbdef8c4279f0094d/lib/pdb_clone/attach.py#L503-L519
def spawn_gdb(pid, address=DFLT_ADDRESS, gdb='gdb', verbose=False, ctx=None, proc_iut=None): """Spawn gdb and attach to a process.""" parent, child = socket.socketpair() proc = Popen([gdb, '--interpreter=mi', '-nx'], bufsize=0, stdin=child, stdout=child, stderr=STDOUT) child.close() connections = {} gdb = GdbSocket(ctx, address, proc, proc_iut, parent, verbose, connections) gdb.mi_command('-target-attach %d' % pid) gdb.cli_command('python import pdb_clone.bootstrappdb_gdb') asyncore.loop(map=connections) proc.wait() return gdb.error
[ "def", "spawn_gdb", "(", "pid", ",", "address", "=", "DFLT_ADDRESS", ",", "gdb", "=", "'gdb'", ",", "verbose", "=", "False", ",", "ctx", "=", "None", ",", "proc_iut", "=", "None", ")", ":", "parent", ",", "child", "=", "socket", ".", "socketpair", "(", ")", "proc", "=", "Popen", "(", "[", "gdb", ",", "'--interpreter=mi'", ",", "'-nx'", "]", ",", "bufsize", "=", "0", ",", "stdin", "=", "child", ",", "stdout", "=", "child", ",", "stderr", "=", "STDOUT", ")", "child", ".", "close", "(", ")", "connections", "=", "{", "}", "gdb", "=", "GdbSocket", "(", "ctx", ",", "address", ",", "proc", ",", "proc_iut", ",", "parent", ",", "verbose", ",", "connections", ")", "gdb", ".", "mi_command", "(", "'-target-attach %d'", "%", "pid", ")", "gdb", ".", "cli_command", "(", "'python import pdb_clone.bootstrappdb_gdb'", ")", "asyncore", ".", "loop", "(", "map", "=", "connections", ")", "proc", ".", "wait", "(", ")", "return", "gdb", ".", "error" ]
Spawn gdb and attach to a process.
[ "Spawn", "gdb", "and", "attach", "to", "a", "process", "." ]
python
train
awslabs/sockeye
sockeye/training.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/training.py#L322-L328
def prepare_batch(self, batch: mx.io.DataBatch): """ Pre-fetches the next mini-batch. :param batch: The mini-batch to prepare. """ self.module.prepare(batch)
[ "def", "prepare_batch", "(", "self", ",", "batch", ":", "mx", ".", "io", ".", "DataBatch", ")", ":", "self", ".", "module", ".", "prepare", "(", "batch", ")" ]
Pre-fetches the next mini-batch. :param batch: The mini-batch to prepare.
[ "Pre", "-", "fetches", "the", "next", "mini", "-", "batch", "." ]
python
train
saltstack/salt
salt/daemons/masterapi.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L91-L130
def clean_fsbackend(opts): ''' Clean out the old fileserver backends ''' # Clear remote fileserver backend caches so they get recreated for backend in ('git', 'hg', 'svn'): if backend in opts['fileserver_backend']: env_cache = os.path.join( opts['cachedir'], '{0}fs'.format(backend), 'envs.p' ) if os.path.isfile(env_cache): log.debug('Clearing %sfs env cache', backend) try: os.remove(env_cache) except OSError as exc: log.critical( 'Unable to clear env cache file %s: %s', env_cache, exc ) file_lists_dir = os.path.join( opts['cachedir'], 'file_lists', '{0}fs'.format(backend) ) try: file_lists_caches = os.listdir(file_lists_dir) except OSError: continue for file_lists_cache in fnmatch.filter(file_lists_caches, '*.p'): cache_file = os.path.join(file_lists_dir, file_lists_cache) try: os.remove(cache_file) except OSError as exc: log.critical( 'Unable to file_lists cache file %s: %s', cache_file, exc )
[ "def", "clean_fsbackend", "(", "opts", ")", ":", "# Clear remote fileserver backend caches so they get recreated", "for", "backend", "in", "(", "'git'", ",", "'hg'", ",", "'svn'", ")", ":", "if", "backend", "in", "opts", "[", "'fileserver_backend'", "]", ":", "env_cache", "=", "os", ".", "path", ".", "join", "(", "opts", "[", "'cachedir'", "]", ",", "'{0}fs'", ".", "format", "(", "backend", ")", ",", "'envs.p'", ")", "if", "os", ".", "path", ".", "isfile", "(", "env_cache", ")", ":", "log", ".", "debug", "(", "'Clearing %sfs env cache'", ",", "backend", ")", "try", ":", "os", ".", "remove", "(", "env_cache", ")", "except", "OSError", "as", "exc", ":", "log", ".", "critical", "(", "'Unable to clear env cache file %s: %s'", ",", "env_cache", ",", "exc", ")", "file_lists_dir", "=", "os", ".", "path", ".", "join", "(", "opts", "[", "'cachedir'", "]", ",", "'file_lists'", ",", "'{0}fs'", ".", "format", "(", "backend", ")", ")", "try", ":", "file_lists_caches", "=", "os", ".", "listdir", "(", "file_lists_dir", ")", "except", "OSError", ":", "continue", "for", "file_lists_cache", "in", "fnmatch", ".", "filter", "(", "file_lists_caches", ",", "'*.p'", ")", ":", "cache_file", "=", "os", ".", "path", ".", "join", "(", "file_lists_dir", ",", "file_lists_cache", ")", "try", ":", "os", ".", "remove", "(", "cache_file", ")", "except", "OSError", "as", "exc", ":", "log", ".", "critical", "(", "'Unable to file_lists cache file %s: %s'", ",", "cache_file", ",", "exc", ")" ]
Clean out the old fileserver backends
[ "Clean", "out", "the", "old", "fileserver", "backends" ]
python
train
singularityhub/sregistry-cli
sregistry/main/__template__/query.py
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/__template__/query.py#L46-L73
def search_all(self): '''a "show all" search that doesn't require a query''' # This should be your apis url for a search url = '...' # paginte get is what it sounds like, and what you want for multiple # pages of results results = self._paginate_get(url) if len(results) == 0: bot.info("No container collections found.") sys.exit(1) bot.info("Collections") # Here is how to create a simple table. You of course must parse your # custom result and form the fields in the table to be what you think # are important! rows = [] for result in results: if "containers" in result: for c in result['containers']: rows.append([ c['uri'], c['detail'] ]) bot.table(rows) return rows
[ "def", "search_all", "(", "self", ")", ":", "# This should be your apis url for a search", "url", "=", "'...'", "# paginte get is what it sounds like, and what you want for multiple", "# pages of results", "results", "=", "self", ".", "_paginate_get", "(", "url", ")", "if", "len", "(", "results", ")", "==", "0", ":", "bot", ".", "info", "(", "\"No container collections found.\"", ")", "sys", ".", "exit", "(", "1", ")", "bot", ".", "info", "(", "\"Collections\"", ")", "# Here is how to create a simple table. You of course must parse your", "# custom result and form the fields in the table to be what you think", "# are important!", "rows", "=", "[", "]", "for", "result", "in", "results", ":", "if", "\"containers\"", "in", "result", ":", "for", "c", "in", "result", "[", "'containers'", "]", ":", "rows", ".", "append", "(", "[", "c", "[", "'uri'", "]", ",", "c", "[", "'detail'", "]", "]", ")", "bot", ".", "table", "(", "rows", ")", "return", "rows" ]
a "show all" search that doesn't require a query
[ "a", "show", "all", "search", "that", "doesn", "t", "require", "a", "query" ]
python
test
prompt-toolkit/pyvim
pyvim/commands/commands.py
https://github.com/prompt-toolkit/pyvim/blob/5928b53b9d700863c1a06d2181a034a955f94594/pyvim/commands/commands.py#L422-L428
def color_scheme(editor, variables): """ Go to one of the open buffers. """ colorscheme = variables.get('colorscheme') if colorscheme: editor.use_colorscheme(colorscheme)
[ "def", "color_scheme", "(", "editor", ",", "variables", ")", ":", "colorscheme", "=", "variables", ".", "get", "(", "'colorscheme'", ")", "if", "colorscheme", ":", "editor", ".", "use_colorscheme", "(", "colorscheme", ")" ]
Go to one of the open buffers.
[ "Go", "to", "one", "of", "the", "open", "buffers", "." ]
python
train
joestump/python-oauth2
oauth2/__init__.py
https://github.com/joestump/python-oauth2/blob/b94f69b1ad195513547924e380d9265133e995fa/oauth2/__init__.py#L393-L405
def to_header(self, realm=''): """Serialize as a header for an HTTPAuth request.""" oauth_params = ((k, v) for k, v in self.items() if k.startswith('oauth_')) stringy_params = ((k, escape(v)) for k, v in oauth_params) header_params = ('%s="%s"' % (k, v) for k, v in stringy_params) params_header = ', '.join(header_params) auth_header = 'OAuth realm="%s"' % realm if params_header: auth_header = "%s, %s" % (auth_header, params_header) return {'Authorization': auth_header}
[ "def", "to_header", "(", "self", ",", "realm", "=", "''", ")", ":", "oauth_params", "=", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "self", ".", "items", "(", ")", "if", "k", ".", "startswith", "(", "'oauth_'", ")", ")", "stringy_params", "=", "(", "(", "k", ",", "escape", "(", "v", ")", ")", "for", "k", ",", "v", "in", "oauth_params", ")", "header_params", "=", "(", "'%s=\"%s\"'", "%", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "stringy_params", ")", "params_header", "=", "', '", ".", "join", "(", "header_params", ")", "auth_header", "=", "'OAuth realm=\"%s\"'", "%", "realm", "if", "params_header", ":", "auth_header", "=", "\"%s, %s\"", "%", "(", "auth_header", ",", "params_header", ")", "return", "{", "'Authorization'", ":", "auth_header", "}" ]
Serialize as a header for an HTTPAuth request.
[ "Serialize", "as", "a", "header", "for", "an", "HTTPAuth", "request", "." ]
python
train
PetrochukM/PyTorch-NLP
examples/snli/util.py
https://github.com/PetrochukM/PyTorch-NLP/blob/5f7320da5c8d781df072fab3f7e421c6347e5bfa/examples/snli/util.py#L10-L24
def makedirs(name): """helper function for python 2 and 3 to call os.makedirs() avoiding an error if the directory to be created already exists""" import os, errno try: os.makedirs(name) except OSError as ex: if ex.errno == errno.EEXIST and os.path.isdir(name): # ignore existing directory pass else: # a different error happened raise
[ "def", "makedirs", "(", "name", ")", ":", "import", "os", ",", "errno", "try", ":", "os", ".", "makedirs", "(", "name", ")", "except", "OSError", "as", "ex", ":", "if", "ex", ".", "errno", "==", "errno", ".", "EEXIST", "and", "os", ".", "path", ".", "isdir", "(", "name", ")", ":", "# ignore existing directory", "pass", "else", ":", "# a different error happened", "raise" ]
helper function for python 2 and 3 to call os.makedirs() avoiding an error if the directory to be created already exists
[ "helper", "function", "for", "python", "2", "and", "3", "to", "call", "os", ".", "makedirs", "()", "avoiding", "an", "error", "if", "the", "directory", "to", "be", "created", "already", "exists" ]
python
train
pymc-devs/pymc
pymc/distributions.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L957-L965
def rcategorical(p, size=None): """ Categorical random variates. """ out = flib.rcat(p, np.random.random(size=size)) if sum(out.shape) == 1: return out.squeeze() else: return out
[ "def", "rcategorical", "(", "p", ",", "size", "=", "None", ")", ":", "out", "=", "flib", ".", "rcat", "(", "p", ",", "np", ".", "random", ".", "random", "(", "size", "=", "size", ")", ")", "if", "sum", "(", "out", ".", "shape", ")", "==", "1", ":", "return", "out", ".", "squeeze", "(", ")", "else", ":", "return", "out" ]
Categorical random variates.
[ "Categorical", "random", "variates", "." ]
python
train
spacetelescope/synphot_refactor
synphot/models.py
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/models.py#L193-L214
def sampleset(self, step=0.01, minimal=False): """Return ``x`` array that samples the feature. Parameters ---------- step : float Distance of first and last points w.r.t. bounding box. minimal : bool Only return the minimal points needed to define the box; i.e., box edges and a point outside on each side. """ w1, w2 = self.bounding_box if self._n_models == 1: w = self._calc_sampleset(w1, w2, step, minimal) else: w = list(map(partial( self._calc_sampleset, step=step, minimal=minimal), w1, w2)) return np.asarray(w)
[ "def", "sampleset", "(", "self", ",", "step", "=", "0.01", ",", "minimal", "=", "False", ")", ":", "w1", ",", "w2", "=", "self", ".", "bounding_box", "if", "self", ".", "_n_models", "==", "1", ":", "w", "=", "self", ".", "_calc_sampleset", "(", "w1", ",", "w2", ",", "step", ",", "minimal", ")", "else", ":", "w", "=", "list", "(", "map", "(", "partial", "(", "self", ".", "_calc_sampleset", ",", "step", "=", "step", ",", "minimal", "=", "minimal", ")", ",", "w1", ",", "w2", ")", ")", "return", "np", ".", "asarray", "(", "w", ")" ]
Return ``x`` array that samples the feature. Parameters ---------- step : float Distance of first and last points w.r.t. bounding box. minimal : bool Only return the minimal points needed to define the box; i.e., box edges and a point outside on each side.
[ "Return", "x", "array", "that", "samples", "the", "feature", "." ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L729-L747
def SaveState( self, config_parser ): """Retrieve window state to be restored on the next run...""" if not config_parser.has_section( 'window' ): config_parser.add_section( 'window' ) if self.IsMaximized(): config_parser.set( 'window', 'maximized', str(True)) else: config_parser.set( 'window', 'maximized', str(False)) size = self.GetSizeTuple() position = self.GetPositionTuple() config_parser.set( 'window', 'width', str(size[0]) ) config_parser.set( 'window', 'height', str(size[1]) ) config_parser.set( 'window', 'x', str(position[0]) ) config_parser.set( 'window', 'y', str(position[1]) ) for control in self.ProfileListControls: control.SaveState( config_parser ) return config_parser
[ "def", "SaveState", "(", "self", ",", "config_parser", ")", ":", "if", "not", "config_parser", ".", "has_section", "(", "'window'", ")", ":", "config_parser", ".", "add_section", "(", "'window'", ")", "if", "self", ".", "IsMaximized", "(", ")", ":", "config_parser", ".", "set", "(", "'window'", ",", "'maximized'", ",", "str", "(", "True", ")", ")", "else", ":", "config_parser", ".", "set", "(", "'window'", ",", "'maximized'", ",", "str", "(", "False", ")", ")", "size", "=", "self", ".", "GetSizeTuple", "(", ")", "position", "=", "self", ".", "GetPositionTuple", "(", ")", "config_parser", ".", "set", "(", "'window'", ",", "'width'", ",", "str", "(", "size", "[", "0", "]", ")", ")", "config_parser", ".", "set", "(", "'window'", ",", "'height'", ",", "str", "(", "size", "[", "1", "]", ")", ")", "config_parser", ".", "set", "(", "'window'", ",", "'x'", ",", "str", "(", "position", "[", "0", "]", ")", ")", "config_parser", ".", "set", "(", "'window'", ",", "'y'", ",", "str", "(", "position", "[", "1", "]", ")", ")", "for", "control", "in", "self", ".", "ProfileListControls", ":", "control", ".", "SaveState", "(", "config_parser", ")", "return", "config_parser" ]
Retrieve window state to be restored on the next run...
[ "Retrieve", "window", "state", "to", "be", "restored", "on", "the", "next", "run", "..." ]
python
train
RIPE-NCC/ripe-atlas-cousteau
ripe/atlas/cousteau/stream.py
https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/stream.py#L167-L175
def timeout(self, seconds=None): """ Times out all streams after n seconds or wait forever if seconds is None """ if seconds is None: self.socketIO.wait() else: self.socketIO.wait(seconds=seconds)
[ "def", "timeout", "(", "self", ",", "seconds", "=", "None", ")", ":", "if", "seconds", "is", "None", ":", "self", ".", "socketIO", ".", "wait", "(", ")", "else", ":", "self", ".", "socketIO", ".", "wait", "(", "seconds", "=", "seconds", ")" ]
Times out all streams after n seconds or wait forever if seconds is None
[ "Times", "out", "all", "streams", "after", "n", "seconds", "or", "wait", "forever", "if", "seconds", "is", "None" ]
python
train
vberlier/nbtlib
nbtlib/tag.py
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/tag.py#L124-L128
def write_string(value, buff, byteorder='big'): """Write a string to a file-like object.""" data = value.encode('utf-8') write_numeric(USHORT, len(data), buff, byteorder) buff.write(data)
[ "def", "write_string", "(", "value", ",", "buff", ",", "byteorder", "=", "'big'", ")", ":", "data", "=", "value", ".", "encode", "(", "'utf-8'", ")", "write_numeric", "(", "USHORT", ",", "len", "(", "data", ")", ",", "buff", ",", "byteorder", ")", "buff", ".", "write", "(", "data", ")" ]
Write a string to a file-like object.
[ "Write", "a", "string", "to", "a", "file", "-", "like", "object", "." ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_wp.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_wp.py#L787-L810
def savecsv(self, filename): '''save waypoints to a file in human-readable CSV file''' f = open(filename, mode='w') headers = ["Seq", "Frame", "Cmd", "P1", "P2", "P3", "P4", "X", "Y", "Z"] print(self.csv_line(headers)) f.write(self.csv_line(headers) + "\n") for w in self.wploader.wpoints: if getattr(w, 'comment', None): # f.write("# %s\n" % w.comment) pass out_list = [ w.seq, self.pretty_enum_value('MAV_FRAME', w.frame), self.pretty_enum_value('MAV_CMD', w.command), self.pretty_parameter_value(w.param1), self.pretty_parameter_value(w.param2), self.pretty_parameter_value(w.param3), self.pretty_parameter_value(w.param4), self.pretty_parameter_value(w.x), self.pretty_parameter_value(w.y), self.pretty_parameter_value(w.z), ] print(self.csv_line(out_list)) f.write(self.csv_line(out_list) + "\n") f.close()
[ "def", "savecsv", "(", "self", ",", "filename", ")", ":", "f", "=", "open", "(", "filename", ",", "mode", "=", "'w'", ")", "headers", "=", "[", "\"Seq\"", ",", "\"Frame\"", ",", "\"Cmd\"", ",", "\"P1\"", ",", "\"P2\"", ",", "\"P3\"", ",", "\"P4\"", ",", "\"X\"", ",", "\"Y\"", ",", "\"Z\"", "]", "print", "(", "self", ".", "csv_line", "(", "headers", ")", ")", "f", ".", "write", "(", "self", ".", "csv_line", "(", "headers", ")", "+", "\"\\n\"", ")", "for", "w", "in", "self", ".", "wploader", ".", "wpoints", ":", "if", "getattr", "(", "w", ",", "'comment'", ",", "None", ")", ":", "# f.write(\"# %s\\n\" % w.comment)", "pass", "out_list", "=", "[", "w", ".", "seq", ",", "self", ".", "pretty_enum_value", "(", "'MAV_FRAME'", ",", "w", ".", "frame", ")", ",", "self", ".", "pretty_enum_value", "(", "'MAV_CMD'", ",", "w", ".", "command", ")", ",", "self", ".", "pretty_parameter_value", "(", "w", ".", "param1", ")", ",", "self", ".", "pretty_parameter_value", "(", "w", ".", "param2", ")", ",", "self", ".", "pretty_parameter_value", "(", "w", ".", "param3", ")", ",", "self", ".", "pretty_parameter_value", "(", "w", ".", "param4", ")", ",", "self", ".", "pretty_parameter_value", "(", "w", ".", "x", ")", ",", "self", ".", "pretty_parameter_value", "(", "w", ".", "y", ")", ",", "self", ".", "pretty_parameter_value", "(", "w", ".", "z", ")", ",", "]", "print", "(", "self", ".", "csv_line", "(", "out_list", ")", ")", "f", ".", "write", "(", "self", ".", "csv_line", "(", "out_list", ")", "+", "\"\\n\"", ")", "f", ".", "close", "(", ")" ]
save waypoints to a file in human-readable CSV file
[ "save", "waypoints", "to", "a", "file", "in", "human", "-", "readable", "CSV", "file" ]
python
train
abseil/abseil-py
absl/flags/_validators.py
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/flags/_validators.py#L424-L450
def mark_bool_flags_as_mutual_exclusive(flag_names, required=False, flag_values=_flagvalues.FLAGS): """Ensures that only one flag among flag_names is True. Args: flag_names: [str], names of the flags. required: bool. If true, exactly one flag must be True. Otherwise, at most one flag can be True, and it is valid for all flags to be False. flag_values: flags.FlagValues, optional FlagValues instance where the flags are defined. """ for flag_name in flag_names: if not flag_values[flag_name].boolean: raise _exceptions.ValidationError( 'Flag --{} is not Boolean, which is required for flags used in ' 'mark_bool_flags_as_mutual_exclusive.'.format(flag_name)) def validate_boolean_mutual_exclusion(flags_dict): flag_count = sum(bool(val) for val in flags_dict.values()) if flag_count == 1 or (not required and flag_count == 0): return True raise _exceptions.ValidationError( '{} one of ({}) must be True.'.format( 'Exactly' if required else 'At most', ', '.join(flag_names))) register_multi_flags_validator( flag_names, validate_boolean_mutual_exclusion, flag_values=flag_values)
[ "def", "mark_bool_flags_as_mutual_exclusive", "(", "flag_names", ",", "required", "=", "False", ",", "flag_values", "=", "_flagvalues", ".", "FLAGS", ")", ":", "for", "flag_name", "in", "flag_names", ":", "if", "not", "flag_values", "[", "flag_name", "]", ".", "boolean", ":", "raise", "_exceptions", ".", "ValidationError", "(", "'Flag --{} is not Boolean, which is required for flags used in '", "'mark_bool_flags_as_mutual_exclusive.'", ".", "format", "(", "flag_name", ")", ")", "def", "validate_boolean_mutual_exclusion", "(", "flags_dict", ")", ":", "flag_count", "=", "sum", "(", "bool", "(", "val", ")", "for", "val", "in", "flags_dict", ".", "values", "(", ")", ")", "if", "flag_count", "==", "1", "or", "(", "not", "required", "and", "flag_count", "==", "0", ")", ":", "return", "True", "raise", "_exceptions", ".", "ValidationError", "(", "'{} one of ({}) must be True.'", ".", "format", "(", "'Exactly'", "if", "required", "else", "'At most'", ",", "', '", ".", "join", "(", "flag_names", ")", ")", ")", "register_multi_flags_validator", "(", "flag_names", ",", "validate_boolean_mutual_exclusion", ",", "flag_values", "=", "flag_values", ")" ]
Ensures that only one flag among flag_names is True. Args: flag_names: [str], names of the flags. required: bool. If true, exactly one flag must be True. Otherwise, at most one flag can be True, and it is valid for all flags to be False. flag_values: flags.FlagValues, optional FlagValues instance where the flags are defined.
[ "Ensures", "that", "only", "one", "flag", "among", "flag_names", "is", "True", "." ]
python
train
shoebot/shoebot
lib/web/wikipedia.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/web/wikipedia.py#L634-L644
def convert_pre(self, markup): """ Substitutes <pre> to Wikipedia markup by adding a space at the start of a line. """ for m in re.findall(self.re["preformatted"], markup): markup = markup.replace(m, m.replace("\n", "\n ")) markup = re.sub("<pre.*?>\n{0,}", "", markup) markup = re.sub("\W{0,}</pre>", "", markup) return markup
[ "def", "convert_pre", "(", "self", ",", "markup", ")", ":", "for", "m", "in", "re", ".", "findall", "(", "self", ".", "re", "[", "\"preformatted\"", "]", ",", "markup", ")", ":", "markup", "=", "markup", ".", "replace", "(", "m", ",", "m", ".", "replace", "(", "\"\\n\"", ",", "\"\\n \"", ")", ")", "markup", "=", "re", ".", "sub", "(", "\"<pre.*?>\\n{0,}\"", ",", "\"\"", ",", "markup", ")", "markup", "=", "re", ".", "sub", "(", "\"\\W{0,}</pre>\"", ",", "\"\"", ",", "markup", ")", "return", "markup" ]
Substitutes <pre> to Wikipedia markup by adding a space at the start of a line.
[ "Substitutes", "<pre", ">", "to", "Wikipedia", "markup", "by", "adding", "a", "space", "at", "the", "start", "of", "a", "line", "." ]
python
valid
saltstack/salt
salt/modules/influxdbmod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L114-L134
def create_db(name, **client_args): ''' Create a database. name Name of the database to create. CLI Example: .. code-block:: bash salt '*' influxdb.create_db <name> ''' if db_exists(name, **client_args): log.info('DB \'%s\' already exists', name) return False client = _client(**client_args) client.create_database(name) return True
[ "def", "create_db", "(", "name", ",", "*", "*", "client_args", ")", ":", "if", "db_exists", "(", "name", ",", "*", "*", "client_args", ")", ":", "log", ".", "info", "(", "'DB \\'%s\\' already exists'", ",", "name", ")", "return", "False", "client", "=", "_client", "(", "*", "*", "client_args", ")", "client", ".", "create_database", "(", "name", ")", "return", "True" ]
Create a database. name Name of the database to create. CLI Example: .. code-block:: bash salt '*' influxdb.create_db <name>
[ "Create", "a", "database", "." ]
python
train
phaethon/kamene
kamene/plist.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/plist.py#L377-L388
def pdfdump(self, filename = None, **kargs): """Creates a PDF file with a psdump of every packet filename: name of the file to write to. If empty, a temporary file is used and conf.prog.pdfreader is called""" d = self._dump_document(**kargs) if filename is None: filename = get_temp_file(autoext=".pdf") d.writePDFfile(filename) subprocess.Popen([conf.prog.pdfreader, filename+".pdf"]) else: d.writePDFfile(filename) print
[ "def", "pdfdump", "(", "self", ",", "filename", "=", "None", ",", "*", "*", "kargs", ")", ":", "d", "=", "self", ".", "_dump_document", "(", "*", "*", "kargs", ")", "if", "filename", "is", "None", ":", "filename", "=", "get_temp_file", "(", "autoext", "=", "\".pdf\"", ")", "d", ".", "writePDFfile", "(", "filename", ")", "subprocess", ".", "Popen", "(", "[", "conf", ".", "prog", ".", "pdfreader", ",", "filename", "+", "\".pdf\"", "]", ")", "else", ":", "d", ".", "writePDFfile", "(", "filename", ")", "print" ]
Creates a PDF file with a psdump of every packet filename: name of the file to write to. If empty, a temporary file is used and conf.prog.pdfreader is called
[ "Creates", "a", "PDF", "file", "with", "a", "psdump", "of", "every", "packet", "filename", ":", "name", "of", "the", "file", "to", "write", "to", ".", "If", "empty", "a", "temporary", "file", "is", "used", "and", "conf", ".", "prog", ".", "pdfreader", "is", "called" ]
python
train
minhhoit/yacms
yacms/pages/context_processors.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/pages/context_processors.py#L4-L20
def page(request): """ Adds the current page to the template context and runs its ``set_helper`` method. This was previously part of ``PageMiddleware``, but moved to a context processor so that we could assign these template context variables without the middleware depending on Django's ``TemplateResponse``. """ context = {} page = getattr(request, "page", None) if isinstance(page, Page): # set_helpers has always expected the current template context, # but here we're just passing in our context dict with enough # variables to satisfy it. context = {"request": request, "page": page, "_current_page": page} page.set_helpers(context) return context
[ "def", "page", "(", "request", ")", ":", "context", "=", "{", "}", "page", "=", "getattr", "(", "request", ",", "\"page\"", ",", "None", ")", "if", "isinstance", "(", "page", ",", "Page", ")", ":", "# set_helpers has always expected the current template context,", "# but here we're just passing in our context dict with enough", "# variables to satisfy it.", "context", "=", "{", "\"request\"", ":", "request", ",", "\"page\"", ":", "page", ",", "\"_current_page\"", ":", "page", "}", "page", ".", "set_helpers", "(", "context", ")", "return", "context" ]
Adds the current page to the template context and runs its ``set_helper`` method. This was previously part of ``PageMiddleware``, but moved to a context processor so that we could assign these template context variables without the middleware depending on Django's ``TemplateResponse``.
[ "Adds", "the", "current", "page", "to", "the", "template", "context", "and", "runs", "its", "set_helper", "method", ".", "This", "was", "previously", "part", "of", "PageMiddleware", "but", "moved", "to", "a", "context", "processor", "so", "that", "we", "could", "assign", "these", "template", "context", "variables", "without", "the", "middleware", "depending", "on", "Django", "s", "TemplateResponse", "." ]
python
train
oceanprotocol/squid-py
squid_py/ocean/ocean_templates.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/ocean/ocean_templates.py#L18-L40
def propose(self, template_address, account): """ Propose a new template. :param template_address: Address of the template contract, str :param account: account proposing the template, Account :return: bool """ try: proposed = self._keeper.template_manager.propose_template(template_address, account) return proposed except ValueError as err: template_values = self._keeper.template_manager.get_template(template_address) if not template_values: logger.warning(f'Propose template failed: {err}') return False if template_values.state != 1: logger.warning( f'Propose template failed, current state is set to {template_values.state}') return False return True
[ "def", "propose", "(", "self", ",", "template_address", ",", "account", ")", ":", "try", ":", "proposed", "=", "self", ".", "_keeper", ".", "template_manager", ".", "propose_template", "(", "template_address", ",", "account", ")", "return", "proposed", "except", "ValueError", "as", "err", ":", "template_values", "=", "self", ".", "_keeper", ".", "template_manager", ".", "get_template", "(", "template_address", ")", "if", "not", "template_values", ":", "logger", ".", "warning", "(", "f'Propose template failed: {err}'", ")", "return", "False", "if", "template_values", ".", "state", "!=", "1", ":", "logger", ".", "warning", "(", "f'Propose template failed, current state is set to {template_values.state}'", ")", "return", "False", "return", "True" ]
Propose a new template. :param template_address: Address of the template contract, str :param account: account proposing the template, Account :return: bool
[ "Propose", "a", "new", "template", "." ]
python
train
mushkevych/scheduler
workers/abstract_mongo_worker.py
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/workers/abstract_mongo_worker.py#L27-L47
def _flush_aggregated_objects(self): """ method inserts aggregated objects into MongoDB :return number_of_aggregated_objects """ if len(self.aggregated_objects) == 0: # nothing to do return 0 number_of_aggregated_objects = len(self.aggregated_objects) self.logger.info('Aggregated {0} documents. Performing flush.'.format(number_of_aggregated_objects)) for key in self.aggregated_objects: document = self.aggregated_objects[key] mongo_pk = self._mongo_sink_key(*key) self.ds.update(self.sink, mongo_pk, document) self.logger.info('Flush successful.') del self.aggregated_objects self.aggregated_objects = dict() gc.collect() return number_of_aggregated_objects
[ "def", "_flush_aggregated_objects", "(", "self", ")", ":", "if", "len", "(", "self", ".", "aggregated_objects", ")", "==", "0", ":", "# nothing to do", "return", "0", "number_of_aggregated_objects", "=", "len", "(", "self", ".", "aggregated_objects", ")", "self", ".", "logger", ".", "info", "(", "'Aggregated {0} documents. Performing flush.'", ".", "format", "(", "number_of_aggregated_objects", ")", ")", "for", "key", "in", "self", ".", "aggregated_objects", ":", "document", "=", "self", ".", "aggregated_objects", "[", "key", "]", "mongo_pk", "=", "self", ".", "_mongo_sink_key", "(", "*", "key", ")", "self", ".", "ds", ".", "update", "(", "self", ".", "sink", ",", "mongo_pk", ",", "document", ")", "self", ".", "logger", ".", "info", "(", "'Flush successful.'", ")", "del", "self", ".", "aggregated_objects", "self", ".", "aggregated_objects", "=", "dict", "(", ")", "gc", ".", "collect", "(", ")", "return", "number_of_aggregated_objects" ]
method inserts aggregated objects into MongoDB :return number_of_aggregated_objects
[ "method", "inserts", "aggregated", "objects", "into", "MongoDB", ":", "return", "number_of_aggregated_objects" ]
python
train
angr/angr
angr/knowledge_plugins/variables/variable_manager.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/knowledge_plugins/variables/variable_manager.py#L125-L168
def make_phi_node(self, block_addr, *variables): """ Create a phi variable for variables at block `block_addr`. :param int block_addr: The address of the current block. :param variables: Variables that the phi variable represents. :return: The created phi variable. """ existing_phis = set() non_phis = set() for var in variables: if self.is_phi_variable(var): existing_phis.add(var) else: non_phis.add(var) if len(existing_phis) == 1: existing_phi = next(iter(existing_phis)) if non_phis.issubset(self.get_phi_subvariables(existing_phi)): return existing_phi else: # Update phi variables self._phi_variables[existing_phi] |= non_phis return existing_phi repre = next(iter(variables)) repre_type = type(repre) if repre_type is SimRegisterVariable: ident_sort = 'register' a = SimRegisterVariable(repre.reg, repre.size, ident=self.next_variable_ident(ident_sort)) elif repre_type is SimMemoryVariable: ident_sort = 'memory' a = SimMemoryVariable(repre.addr, repre.size, ident=self.next_variable_ident(ident_sort)) elif repre_type is SimStackVariable: ident_sort = 'stack' a = SimStackVariable(repre.offset, repre.size, ident=self.next_variable_ident(ident_sort)) else: raise TypeError('make_phi_node(): Unsupported variable type "%s".' % type(repre)) # Keep a record of all phi variables self._phi_variables[a] = set(variables) self._phi_variables_by_block[block_addr].add(a) return a
[ "def", "make_phi_node", "(", "self", ",", "block_addr", ",", "*", "variables", ")", ":", "existing_phis", "=", "set", "(", ")", "non_phis", "=", "set", "(", ")", "for", "var", "in", "variables", ":", "if", "self", ".", "is_phi_variable", "(", "var", ")", ":", "existing_phis", ".", "add", "(", "var", ")", "else", ":", "non_phis", ".", "add", "(", "var", ")", "if", "len", "(", "existing_phis", ")", "==", "1", ":", "existing_phi", "=", "next", "(", "iter", "(", "existing_phis", ")", ")", "if", "non_phis", ".", "issubset", "(", "self", ".", "get_phi_subvariables", "(", "existing_phi", ")", ")", ":", "return", "existing_phi", "else", ":", "# Update phi variables", "self", ".", "_phi_variables", "[", "existing_phi", "]", "|=", "non_phis", "return", "existing_phi", "repre", "=", "next", "(", "iter", "(", "variables", ")", ")", "repre_type", "=", "type", "(", "repre", ")", "if", "repre_type", "is", "SimRegisterVariable", ":", "ident_sort", "=", "'register'", "a", "=", "SimRegisterVariable", "(", "repre", ".", "reg", ",", "repre", ".", "size", ",", "ident", "=", "self", ".", "next_variable_ident", "(", "ident_sort", ")", ")", "elif", "repre_type", "is", "SimMemoryVariable", ":", "ident_sort", "=", "'memory'", "a", "=", "SimMemoryVariable", "(", "repre", ".", "addr", ",", "repre", ".", "size", ",", "ident", "=", "self", ".", "next_variable_ident", "(", "ident_sort", ")", ")", "elif", "repre_type", "is", "SimStackVariable", ":", "ident_sort", "=", "'stack'", "a", "=", "SimStackVariable", "(", "repre", ".", "offset", ",", "repre", ".", "size", ",", "ident", "=", "self", ".", "next_variable_ident", "(", "ident_sort", ")", ")", "else", ":", "raise", "TypeError", "(", "'make_phi_node(): Unsupported variable type \"%s\".'", "%", "type", "(", "repre", ")", ")", "# Keep a record of all phi variables", "self", ".", "_phi_variables", "[", "a", "]", "=", "set", "(", "variables", ")", "self", ".", "_phi_variables_by_block", "[", "block_addr", "]", ".", "add", "(", "a", ")", "return", "a" ]
Create a phi variable for variables at block `block_addr`. :param int block_addr: The address of the current block. :param variables: Variables that the phi variable represents. :return: The created phi variable.
[ "Create", "a", "phi", "variable", "for", "variables", "at", "block", "block_addr", "." ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/summary.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/summary.py#L240-L266
def _repr(o, verbosity=1): """Get meaning object representation. This function should be used when the simple str(o) output would result in too general data. E.g. "<type 'instance'" is less meaningful than "instance: Foo". Keyword arguments: verbosity -- if True the first row is treated as a table header """ res = "" t = type(o) if (verbosity == 0) or (t not in representations): res = str(t) else: verbosity -= 1 if len(representations[t]) < verbosity: verbosity = len(representations[t]) - 1 res = representations[t][verbosity](o) res = address.sub('', res) res = type_prefix.sub('', res) res = type_suffix.sub('', res) return res
[ "def", "_repr", "(", "o", ",", "verbosity", "=", "1", ")", ":", "res", "=", "\"\"", "t", "=", "type", "(", "o", ")", "if", "(", "verbosity", "==", "0", ")", "or", "(", "t", "not", "in", "representations", ")", ":", "res", "=", "str", "(", "t", ")", "else", ":", "verbosity", "-=", "1", "if", "len", "(", "representations", "[", "t", "]", ")", "<", "verbosity", ":", "verbosity", "=", "len", "(", "representations", "[", "t", "]", ")", "-", "1", "res", "=", "representations", "[", "t", "]", "[", "verbosity", "]", "(", "o", ")", "res", "=", "address", ".", "sub", "(", "''", ",", "res", ")", "res", "=", "type_prefix", ".", "sub", "(", "''", ",", "res", ")", "res", "=", "type_suffix", ".", "sub", "(", "''", ",", "res", ")", "return", "res" ]
Get meaning object representation. This function should be used when the simple str(o) output would result in too general data. E.g. "<type 'instance'" is less meaningful than "instance: Foo". Keyword arguments: verbosity -- if True the first row is treated as a table header
[ "Get", "meaning", "object", "representation", "." ]
python
train
apache/airflow
airflow/hooks/hive_hooks.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L641-L680
def _get_max_partition_from_part_specs(part_specs, partition_key, filter_map): """ Helper method to get max partition of partitions with partition_key from part specs. key:value pair in filter_map will be used to filter out partitions. :param part_specs: list of partition specs. :type part_specs: list :param partition_key: partition key name. :type partition_key: str :param filter_map: partition_key:partition_value map used for partition filtering, e.g. {'key1': 'value1', 'key2': 'value2'}. Only partitions matching all partition_key:partition_value pairs will be considered as candidates of max partition. :type filter_map: map :return: Max partition or None if part_specs is empty. """ if not part_specs: return None # Assuming all specs have the same keys. if partition_key not in part_specs[0].keys(): raise AirflowException("Provided partition_key {} " "is not in part_specs.".format(partition_key)) if filter_map: is_subset = set(filter_map.keys()).issubset(set(part_specs[0].keys())) if filter_map and not is_subset: raise AirflowException("Keys in provided filter_map {} " "are not subset of part_spec keys: {}" .format(', '.join(filter_map.keys()), ', '.join(part_specs[0].keys()))) candidates = [p_dict[partition_key] for p_dict in part_specs if filter_map is None or all(item in p_dict.items() for item in filter_map.items())] if not candidates: return None else: return max(candidates).encode('utf-8')
[ "def", "_get_max_partition_from_part_specs", "(", "part_specs", ",", "partition_key", ",", "filter_map", ")", ":", "if", "not", "part_specs", ":", "return", "None", "# Assuming all specs have the same keys.", "if", "partition_key", "not", "in", "part_specs", "[", "0", "]", ".", "keys", "(", ")", ":", "raise", "AirflowException", "(", "\"Provided partition_key {} \"", "\"is not in part_specs.\"", ".", "format", "(", "partition_key", ")", ")", "if", "filter_map", ":", "is_subset", "=", "set", "(", "filter_map", ".", "keys", "(", ")", ")", ".", "issubset", "(", "set", "(", "part_specs", "[", "0", "]", ".", "keys", "(", ")", ")", ")", "if", "filter_map", "and", "not", "is_subset", ":", "raise", "AirflowException", "(", "\"Keys in provided filter_map {} \"", "\"are not subset of part_spec keys: {}\"", ".", "format", "(", "', '", ".", "join", "(", "filter_map", ".", "keys", "(", ")", ")", ",", "', '", ".", "join", "(", "part_specs", "[", "0", "]", ".", "keys", "(", ")", ")", ")", ")", "candidates", "=", "[", "p_dict", "[", "partition_key", "]", "for", "p_dict", "in", "part_specs", "if", "filter_map", "is", "None", "or", "all", "(", "item", "in", "p_dict", ".", "items", "(", ")", "for", "item", "in", "filter_map", ".", "items", "(", ")", ")", "]", "if", "not", "candidates", ":", "return", "None", "else", ":", "return", "max", "(", "candidates", ")", ".", "encode", "(", "'utf-8'", ")" ]
Helper method to get max partition of partitions with partition_key from part specs. key:value pair in filter_map will be used to filter out partitions. :param part_specs: list of partition specs. :type part_specs: list :param partition_key: partition key name. :type partition_key: str :param filter_map: partition_key:partition_value map used for partition filtering, e.g. {'key1': 'value1', 'key2': 'value2'}. Only partitions matching all partition_key:partition_value pairs will be considered as candidates of max partition. :type filter_map: map :return: Max partition or None if part_specs is empty.
[ "Helper", "method", "to", "get", "max", "partition", "of", "partitions", "with", "partition_key", "from", "part", "specs", ".", "key", ":", "value", "pair", "in", "filter_map", "will", "be", "used", "to", "filter", "out", "partitions", "." ]
python
test
tensorflow/tensor2tensor
tensor2tensor/data_generators/text_problems.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_problems.py#L621-L627
def text2text_distill_iterator(source_txt_path, target_txt_path, distill_txt_path): """Yield dicts for Text2TextProblem.generate_samples from lines of files.""" for inputs, targets, dist_targets in zip( txt_line_iterator(source_txt_path), txt_line_iterator(target_txt_path), txt_line_iterator(distill_txt_path)): yield {"inputs": inputs, "targets": targets, "dist_targets": dist_targets}
[ "def", "text2text_distill_iterator", "(", "source_txt_path", ",", "target_txt_path", ",", "distill_txt_path", ")", ":", "for", "inputs", ",", "targets", ",", "dist_targets", "in", "zip", "(", "txt_line_iterator", "(", "source_txt_path", ")", ",", "txt_line_iterator", "(", "target_txt_path", ")", ",", "txt_line_iterator", "(", "distill_txt_path", ")", ")", ":", "yield", "{", "\"inputs\"", ":", "inputs", ",", "\"targets\"", ":", "targets", ",", "\"dist_targets\"", ":", "dist_targets", "}" ]
Yield dicts for Text2TextProblem.generate_samples from lines of files.
[ "Yield", "dicts", "for", "Text2TextProblem", ".", "generate_samples", "from", "lines", "of", "files", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/internal/distribution_util.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1274-L1320
def pick_vector(cond, true_vector, false_vector, name="pick_vector"): """Picks possibly different length row `Tensor`s based on condition. Value `Tensor`s should have exactly one dimension. If `cond` is a python Boolean or `tf.constant` then either `true_vector` or `false_vector` is immediately returned. I.e., no graph nodes are created and no validation happens. Args: cond: `Tensor`. Must have `dtype=tf.bool` and be scalar. true_vector: `Tensor` of one dimension. Returned when cond is `True`. false_vector: `Tensor` of one dimension. Returned when cond is `False`. name: Python `str`. The name to give this op. Example: ```python pick_vector(tf.less(0, 5), tf.range(10, 12), tf.range(15, 18)) # [10, 11] pick_vector(tf.less(5, 0), tf.range(10, 12), tf.range(15, 18)) # [15, 16, 17] ``` Returns: true_or_false_vector: `Tensor`. Raises: TypeError: if `cond.dtype != tf.bool` TypeError: if `cond` is not a constant and `true_vector.dtype != false_vector.dtype` """ with tf.name_scope(name): cond = tf.convert_to_tensor( value=cond, dtype_hint=tf.bool, name="cond") if cond.dtype != tf.bool: raise TypeError( "{}.dtype={} which is not {}".format(cond, cond.dtype, tf.bool)) true_vector = tf.convert_to_tensor(value=true_vector, name="true_vector") false_vector = tf.convert_to_tensor(value=false_vector, name="false_vector") if true_vector.dtype != false_vector.dtype: raise TypeError( "{}.dtype={} does not match {}.dtype={}".format( true_vector, true_vector.dtype, false_vector, false_vector.dtype)) cond_value_static = tf.get_static_value(cond) if cond_value_static is not None: return true_vector if cond_value_static else false_vector n = tf.shape(input=true_vector)[0] return tf.slice( tf.concat([true_vector, false_vector], 0), [tf.where(cond, 0, n)], [tf.where(cond, n, -1)])
[ "def", "pick_vector", "(", "cond", ",", "true_vector", ",", "false_vector", ",", "name", "=", "\"pick_vector\"", ")", ":", "with", "tf", ".", "name_scope", "(", "name", ")", ":", "cond", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "cond", ",", "dtype_hint", "=", "tf", ".", "bool", ",", "name", "=", "\"cond\"", ")", "if", "cond", ".", "dtype", "!=", "tf", ".", "bool", ":", "raise", "TypeError", "(", "\"{}.dtype={} which is not {}\"", ".", "format", "(", "cond", ",", "cond", ".", "dtype", ",", "tf", ".", "bool", ")", ")", "true_vector", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "true_vector", ",", "name", "=", "\"true_vector\"", ")", "false_vector", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "false_vector", ",", "name", "=", "\"false_vector\"", ")", "if", "true_vector", ".", "dtype", "!=", "false_vector", ".", "dtype", ":", "raise", "TypeError", "(", "\"{}.dtype={} does not match {}.dtype={}\"", ".", "format", "(", "true_vector", ",", "true_vector", ".", "dtype", ",", "false_vector", ",", "false_vector", ".", "dtype", ")", ")", "cond_value_static", "=", "tf", ".", "get_static_value", "(", "cond", ")", "if", "cond_value_static", "is", "not", "None", ":", "return", "true_vector", "if", "cond_value_static", "else", "false_vector", "n", "=", "tf", ".", "shape", "(", "input", "=", "true_vector", ")", "[", "0", "]", "return", "tf", ".", "slice", "(", "tf", ".", "concat", "(", "[", "true_vector", ",", "false_vector", "]", ",", "0", ")", ",", "[", "tf", ".", "where", "(", "cond", ",", "0", ",", "n", ")", "]", ",", "[", "tf", ".", "where", "(", "cond", ",", "n", ",", "-", "1", ")", "]", ")" ]
Picks possibly different length row `Tensor`s based on condition. Value `Tensor`s should have exactly one dimension. If `cond` is a python Boolean or `tf.constant` then either `true_vector` or `false_vector` is immediately returned. I.e., no graph nodes are created and no validation happens. Args: cond: `Tensor`. Must have `dtype=tf.bool` and be scalar. true_vector: `Tensor` of one dimension. Returned when cond is `True`. false_vector: `Tensor` of one dimension. Returned when cond is `False`. name: Python `str`. The name to give this op. Example: ```python pick_vector(tf.less(0, 5), tf.range(10, 12), tf.range(15, 18)) # [10, 11] pick_vector(tf.less(5, 0), tf.range(10, 12), tf.range(15, 18)) # [15, 16, 17] ``` Returns: true_or_false_vector: `Tensor`. Raises: TypeError: if `cond.dtype != tf.bool` TypeError: if `cond` is not a constant and `true_vector.dtype != false_vector.dtype`
[ "Picks", "possibly", "different", "length", "row", "Tensor", "s", "based", "on", "condition", "." ]
python
test
NORDUnet/python-norduniclient
norduniclient/core.py
https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L508-L537
def get_indexed_node(manager, prop, value, node_type='Node', lookup_func='CONTAINS', legacy=True): """ :param manager: Neo4jDBSessionManager :param prop: Indexed property :param value: Indexed value :param node_type: Label used for index :param lookup_func: STARTS WITH | CONTAINS | ENDS WITH :param legacy: Backwards compatibility :type manager: Neo4jDBSessionManager :type prop: str :type value: str :type node_type: str :type lookup_func: str :type legacy: bool :return: Dict or Node object :rtype: dict|Node """ q = """ MATCH (n:{label}) WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}}) RETURN n """.format(label=node_type, prop=prop, lookup_func=lookup_func) with manager.session as s: for result in s.run(q, {'value': value}): if legacy: yield result['n'].properties else: yield result['n']
[ "def", "get_indexed_node", "(", "manager", ",", "prop", ",", "value", ",", "node_type", "=", "'Node'", ",", "lookup_func", "=", "'CONTAINS'", ",", "legacy", "=", "True", ")", ":", "q", "=", "\"\"\"\n MATCH (n:{label})\n WHERE LOWER(n.{prop}) {lookup_func} LOWER({{value}})\n RETURN n\n \"\"\"", ".", "format", "(", "label", "=", "node_type", ",", "prop", "=", "prop", ",", "lookup_func", "=", "lookup_func", ")", "with", "manager", ".", "session", "as", "s", ":", "for", "result", "in", "s", ".", "run", "(", "q", ",", "{", "'value'", ":", "value", "}", ")", ":", "if", "legacy", ":", "yield", "result", "[", "'n'", "]", ".", "properties", "else", ":", "yield", "result", "[", "'n'", "]" ]
:param manager: Neo4jDBSessionManager :param prop: Indexed property :param value: Indexed value :param node_type: Label used for index :param lookup_func: STARTS WITH | CONTAINS | ENDS WITH :param legacy: Backwards compatibility :type manager: Neo4jDBSessionManager :type prop: str :type value: str :type node_type: str :type lookup_func: str :type legacy: bool :return: Dict or Node object :rtype: dict|Node
[ ":", "param", "manager", ":", "Neo4jDBSessionManager", ":", "param", "prop", ":", "Indexed", "property", ":", "param", "value", ":", "Indexed", "value", ":", "param", "node_type", ":", "Label", "used", "for", "index", ":", "param", "lookup_func", ":", "STARTS", "WITH", "|", "CONTAINS", "|", "ENDS", "WITH", ":", "param", "legacy", ":", "Backwards", "compatibility" ]
python
train
googleapis/google-cloud-python
storage/google/cloud/storage/notification.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/notification.py#L111-L133
def from_api_repr(cls, resource, bucket): """Construct an instance from the JSON repr returned by the server. See: https://cloud.google.com/storage/docs/json_api/v1/notifications :type resource: dict :param resource: JSON repr of the notification :type bucket: :class:`google.cloud.storage.bucket.Bucket` :param bucket: Bucket to which the notification is bound. :rtype: :class:`BucketNotification` :returns: the new notification instance """ topic_path = resource.get("topic") if topic_path is None: raise ValueError("Resource has no topic") name, project = _parse_topic_path(topic_path) instance = cls(bucket, name, topic_project=project) instance._properties = resource return instance
[ "def", "from_api_repr", "(", "cls", ",", "resource", ",", "bucket", ")", ":", "topic_path", "=", "resource", ".", "get", "(", "\"topic\"", ")", "if", "topic_path", "is", "None", ":", "raise", "ValueError", "(", "\"Resource has no topic\"", ")", "name", ",", "project", "=", "_parse_topic_path", "(", "topic_path", ")", "instance", "=", "cls", "(", "bucket", ",", "name", ",", "topic_project", "=", "project", ")", "instance", ".", "_properties", "=", "resource", "return", "instance" ]
Construct an instance from the JSON repr returned by the server. See: https://cloud.google.com/storage/docs/json_api/v1/notifications :type resource: dict :param resource: JSON repr of the notification :type bucket: :class:`google.cloud.storage.bucket.Bucket` :param bucket: Bucket to which the notification is bound. :rtype: :class:`BucketNotification` :returns: the new notification instance
[ "Construct", "an", "instance", "from", "the", "JSON", "repr", "returned", "by", "the", "server", "." ]
python
train
pandas-dev/pandas
pandas/core/indexes/base.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1487-L1541
def droplevel(self, level=0): """ Return index with requested level(s) removed. If resulting index has only 1 level left, the result will be of Index type, not MultiIndex. .. versionadded:: 0.23.1 (support for non-MultiIndex) Parameters ---------- level : int, str, or list-like, default 0 If a string is given, must be the name of a level If list-like, elements must be names or indexes of levels. Returns ------- Index or MultiIndex """ if not isinstance(level, (tuple, list)): level = [level] levnums = sorted(self._get_level_number(lev) for lev in level)[::-1] if len(level) == 0: return self if len(level) >= self.nlevels: raise ValueError("Cannot remove {} levels from an index with {} " "levels: at least one level must be " "left.".format(len(level), self.nlevels)) # The two checks above guarantee that here self is a MultiIndex new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) for i in levnums: new_levels.pop(i) new_codes.pop(i) new_names.pop(i) if len(new_levels) == 1: # set nan if needed mask = new_codes[0] == -1 result = new_levels[0].take(new_codes[0]) if mask.any(): result = result.putmask(mask, np.nan) result.name = new_names[0] return result else: from .multi import MultiIndex return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False)
[ "def", "droplevel", "(", "self", ",", "level", "=", "0", ")", ":", "if", "not", "isinstance", "(", "level", ",", "(", "tuple", ",", "list", ")", ")", ":", "level", "=", "[", "level", "]", "levnums", "=", "sorted", "(", "self", ".", "_get_level_number", "(", "lev", ")", "for", "lev", "in", "level", ")", "[", ":", ":", "-", "1", "]", "if", "len", "(", "level", ")", "==", "0", ":", "return", "self", "if", "len", "(", "level", ")", ">=", "self", ".", "nlevels", ":", "raise", "ValueError", "(", "\"Cannot remove {} levels from an index with {} \"", "\"levels: at least one level must be \"", "\"left.\"", ".", "format", "(", "len", "(", "level", ")", ",", "self", ".", "nlevels", ")", ")", "# The two checks above guarantee that here self is a MultiIndex", "new_levels", "=", "list", "(", "self", ".", "levels", ")", "new_codes", "=", "list", "(", "self", ".", "codes", ")", "new_names", "=", "list", "(", "self", ".", "names", ")", "for", "i", "in", "levnums", ":", "new_levels", ".", "pop", "(", "i", ")", "new_codes", ".", "pop", "(", "i", ")", "new_names", ".", "pop", "(", "i", ")", "if", "len", "(", "new_levels", ")", "==", "1", ":", "# set nan if needed", "mask", "=", "new_codes", "[", "0", "]", "==", "-", "1", "result", "=", "new_levels", "[", "0", "]", ".", "take", "(", "new_codes", "[", "0", "]", ")", "if", "mask", ".", "any", "(", ")", ":", "result", "=", "result", ".", "putmask", "(", "mask", ",", "np", ".", "nan", ")", "result", ".", "name", "=", "new_names", "[", "0", "]", "return", "result", "else", ":", "from", ".", "multi", "import", "MultiIndex", "return", "MultiIndex", "(", "levels", "=", "new_levels", ",", "codes", "=", "new_codes", ",", "names", "=", "new_names", ",", "verify_integrity", "=", "False", ")" ]
Return index with requested level(s) removed. If resulting index has only 1 level left, the result will be of Index type, not MultiIndex. .. versionadded:: 0.23.1 (support for non-MultiIndex) Parameters ---------- level : int, str, or list-like, default 0 If a string is given, must be the name of a level If list-like, elements must be names or indexes of levels. Returns ------- Index or MultiIndex
[ "Return", "index", "with", "requested", "level", "(", "s", ")", "removed", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/configfiles.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/configfiles.py#L171-L199
def get_config_parameter(config: ConfigParser, section: str, param: str, fn: Callable[[Any], Any], default: Any) -> Any: """ Fetch parameter from ``configparser`` ``.INI`` file. Args: config: :class:`ConfigParser` object section: section name within config file param: name of parameter within section fn: function to apply to string parameter (e.g. ``int``) default: default value Returns: parameter value, or ``None`` if ``default is None``, or ``fn(default)`` """ try: value = fn(config.get(section, param)) except (TypeError, ValueError, NoOptionError): log.warning( "Configuration variable {} not found or improper in section [{}]; " "using default of {!r}", param, section, default) if default is None: value = default else: value = fn(default) return value
[ "def", "get_config_parameter", "(", "config", ":", "ConfigParser", ",", "section", ":", "str", ",", "param", ":", "str", ",", "fn", ":", "Callable", "[", "[", "Any", "]", ",", "Any", "]", ",", "default", ":", "Any", ")", "->", "Any", ":", "try", ":", "value", "=", "fn", "(", "config", ".", "get", "(", "section", ",", "param", ")", ")", "except", "(", "TypeError", ",", "ValueError", ",", "NoOptionError", ")", ":", "log", ".", "warning", "(", "\"Configuration variable {} not found or improper in section [{}]; \"", "\"using default of {!r}\"", ",", "param", ",", "section", ",", "default", ")", "if", "default", "is", "None", ":", "value", "=", "default", "else", ":", "value", "=", "fn", "(", "default", ")", "return", "value" ]
Fetch parameter from ``configparser`` ``.INI`` file. Args: config: :class:`ConfigParser` object section: section name within config file param: name of parameter within section fn: function to apply to string parameter (e.g. ``int``) default: default value Returns: parameter value, or ``None`` if ``default is None``, or ``fn(default)``
[ "Fetch", "parameter", "from", "configparser", ".", "INI", "file", "." ]
python
train
edx/XBlock
xblock/scorable.py
https://github.com/edx/XBlock/blob/368bf46e2c0ee69bbb21817f428c4684936e18ee/xblock/scorable.py#L34-L55
def rescore(self, only_if_higher): """ Calculate a new raw score and save it to the block. If only_if_higher is True and the score didn't improve, keep the existing score. Raises a TypeError if the block cannot be scored. Raises a ValueError if the user has not yet completed the problem. May also raise other errors in self.calculate_score(). Currently unconstrained. """ _ = self.runtime.service(self, 'i18n').ugettext if not self.allows_rescore(): raise TypeError(_('Problem does not support rescoring: {}').format(self.location)) if not self.has_submitted_answer(): raise ValueError(_('Cannot rescore unanswered problem: {}').format(self.location)) new_score = self.calculate_score() self._publish_grade(new_score, only_if_higher)
[ "def", "rescore", "(", "self", ",", "only_if_higher", ")", ":", "_", "=", "self", ".", "runtime", ".", "service", "(", "self", ",", "'i18n'", ")", ".", "ugettext", "if", "not", "self", ".", "allows_rescore", "(", ")", ":", "raise", "TypeError", "(", "_", "(", "'Problem does not support rescoring: {}'", ")", ".", "format", "(", "self", ".", "location", ")", ")", "if", "not", "self", ".", "has_submitted_answer", "(", ")", ":", "raise", "ValueError", "(", "_", "(", "'Cannot rescore unanswered problem: {}'", ")", ".", "format", "(", "self", ".", "location", ")", ")", "new_score", "=", "self", ".", "calculate_score", "(", ")", "self", ".", "_publish_grade", "(", "new_score", ",", "only_if_higher", ")" ]
Calculate a new raw score and save it to the block. If only_if_higher is True and the score didn't improve, keep the existing score. Raises a TypeError if the block cannot be scored. Raises a ValueError if the user has not yet completed the problem. May also raise other errors in self.calculate_score(). Currently unconstrained.
[ "Calculate", "a", "new", "raw", "score", "and", "save", "it", "to", "the", "block", ".", "If", "only_if_higher", "is", "True", "and", "the", "score", "didn", "t", "improve", "keep", "the", "existing", "score", "." ]
python
train
gwpy/gwpy
gwpy/segments/flag.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/flag.py#L1186-L1253
def query_dqsegdb(cls, flags, *args, **kwargs): """Query the advanced LIGO DQSegDB for a list of flags. Parameters ---------- flags : `iterable` A list of flag names for which to query. *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments. on_error : `str` how to handle an error querying for one flag, one of - `'raise'` (default): raise the Exception - `'warn'`: print a warning - `'ignore'`: move onto the next flag as if nothing happened url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` Returns ------- flagdict : `DataQualityDict` An ordered `DataQualityDict` of (name, `DataQualityFlag`) pairs. """ # check on_error flag on_error = kwargs.pop('on_error', 'raise').lower() if on_error not in ['raise', 'warn', 'ignore']: raise ValueError("on_error must be one of 'raise', 'warn', " "or 'ignore'") # parse segments qsegs = _parse_query_segments(args, cls.query_dqsegdb) # set up threading inq = Queue() outq = Queue() for i in range(len(flags)): t = _QueryDQSegDBThread(inq, outq, qsegs, **kwargs) t.setDaemon(True) t.start() for i, flag in enumerate(flags): inq.put((i, flag)) # capture output inq.join() outq.join() new = cls() results = list(zip(*sorted([outq.get() for i in range(len(flags))], key=lambda x: x[0])))[1] for result, flag in zip(results, flags): if isinstance(result, Exception): result.args = ('%s [%s]' % (str(result), str(flag)),) if on_error == 'ignore': pass elif on_error == 'warn': warnings.warn(str(result)) else: raise result else: new[flag] = result return new
[ "def", "query_dqsegdb", "(", "cls", ",", "flags", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# check on_error flag", "on_error", "=", "kwargs", ".", "pop", "(", "'on_error'", ",", "'raise'", ")", ".", "lower", "(", ")", "if", "on_error", "not", "in", "[", "'raise'", ",", "'warn'", ",", "'ignore'", "]", ":", "raise", "ValueError", "(", "\"on_error must be one of 'raise', 'warn', \"", "\"or 'ignore'\"", ")", "# parse segments", "qsegs", "=", "_parse_query_segments", "(", "args", ",", "cls", ".", "query_dqsegdb", ")", "# set up threading", "inq", "=", "Queue", "(", ")", "outq", "=", "Queue", "(", ")", "for", "i", "in", "range", "(", "len", "(", "flags", ")", ")", ":", "t", "=", "_QueryDQSegDBThread", "(", "inq", ",", "outq", ",", "qsegs", ",", "*", "*", "kwargs", ")", "t", ".", "setDaemon", "(", "True", ")", "t", ".", "start", "(", ")", "for", "i", ",", "flag", "in", "enumerate", "(", "flags", ")", ":", "inq", ".", "put", "(", "(", "i", ",", "flag", ")", ")", "# capture output", "inq", ".", "join", "(", ")", "outq", ".", "join", "(", ")", "new", "=", "cls", "(", ")", "results", "=", "list", "(", "zip", "(", "*", "sorted", "(", "[", "outq", ".", "get", "(", ")", "for", "i", "in", "range", "(", "len", "(", "flags", ")", ")", "]", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", ")", ")", "[", "1", "]", "for", "result", ",", "flag", "in", "zip", "(", "results", ",", "flags", ")", ":", "if", "isinstance", "(", "result", ",", "Exception", ")", ":", "result", ".", "args", "=", "(", "'%s [%s]'", "%", "(", "str", "(", "result", ")", ",", "str", "(", "flag", ")", ")", ",", ")", "if", "on_error", "==", "'ignore'", ":", "pass", "elif", "on_error", "==", "'warn'", ":", "warnings", ".", "warn", "(", "str", "(", "result", ")", ")", "else", ":", "raise", "result", "else", ":", "new", "[", "flag", "]", "=", "result", "return", "new" ]
Query the advanced LIGO DQSegDB for a list of flags. Parameters ---------- flags : `iterable` A list of flag names for which to query. *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments. on_error : `str` how to handle an error querying for one flag, one of - `'raise'` (default): raise the Exception - `'warn'`: print a warning - `'ignore'`: move onto the next flag as if nothing happened url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` Returns ------- flagdict : `DataQualityDict` An ordered `DataQualityDict` of (name, `DataQualityFlag`) pairs.
[ "Query", "the", "advanced", "LIGO", "DQSegDB", "for", "a", "list", "of", "flags", "." ]
python
train
Galarzaa90/tibia.py
tibiapy/world.py
https://github.com/Galarzaa90/tibia.py/blob/02ba1a8f1e18177ef5c7dcd44affc8d761d59e12/tibiapy/world.py#L371-L392
def _parse_tables(cls, parsed_content): """ Parses the information tables found in a world's information page. Parameters ---------- parsed_content: :class:`bs4.BeautifulSoup` A :class:`BeautifulSoup` object containing all the content. Returns ------- :class:`OrderedDict`[:class:`str`, :class:`list`[:class:`bs4.Tag`]] A dictionary containing all the table rows, with the table headers as keys. """ tables = parsed_content.find_all('div', attrs={'class': 'TableContainer'}) output = OrderedDict() for table in tables: title = table.find("div", attrs={'class': 'Text'}).text title = title.split("[")[0].strip() inner_table = table.find("div", attrs={'class': 'InnerTableContainer'}) output[title] = inner_table.find_all("tr") return output
[ "def", "_parse_tables", "(", "cls", ",", "parsed_content", ")", ":", "tables", "=", "parsed_content", ".", "find_all", "(", "'div'", ",", "attrs", "=", "{", "'class'", ":", "'TableContainer'", "}", ")", "output", "=", "OrderedDict", "(", ")", "for", "table", "in", "tables", ":", "title", "=", "table", ".", "find", "(", "\"div\"", ",", "attrs", "=", "{", "'class'", ":", "'Text'", "}", ")", ".", "text", "title", "=", "title", ".", "split", "(", "\"[\"", ")", "[", "0", "]", ".", "strip", "(", ")", "inner_table", "=", "table", ".", "find", "(", "\"div\"", ",", "attrs", "=", "{", "'class'", ":", "'InnerTableContainer'", "}", ")", "output", "[", "title", "]", "=", "inner_table", ".", "find_all", "(", "\"tr\"", ")", "return", "output" ]
Parses the information tables found in a world's information page. Parameters ---------- parsed_content: :class:`bs4.BeautifulSoup` A :class:`BeautifulSoup` object containing all the content. Returns ------- :class:`OrderedDict`[:class:`str`, :class:`list`[:class:`bs4.Tag`]] A dictionary containing all the table rows, with the table headers as keys.
[ "Parses", "the", "information", "tables", "found", "in", "a", "world", "s", "information", "page", "." ]
python
train
Jajcus/pyxmpp2
pyxmpp2/xmppstringprep.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/xmppstringprep.py#L182-L189
def check_unassigned(self, data): """Checks for unassigned character codes.""" for char in data: for lookup in self.unassigned: if lookup(char): raise StringprepError("Unassigned character: {0!r}" .format(char)) return data
[ "def", "check_unassigned", "(", "self", ",", "data", ")", ":", "for", "char", "in", "data", ":", "for", "lookup", "in", "self", ".", "unassigned", ":", "if", "lookup", "(", "char", ")", ":", "raise", "StringprepError", "(", "\"Unassigned character: {0!r}\"", ".", "format", "(", "char", ")", ")", "return", "data" ]
Checks for unassigned character codes.
[ "Checks", "for", "unassigned", "character", "codes", "." ]
python
valid
thunder-project/thunder
thunder/readers.py
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L437-L472
def getkeys(self, path, filename=None, directories=False, recursive=False): """ Get matching keys for a path """ from .utils import connection_with_anon, connection_with_gs parse = BotoClient.parse_query(path) scheme = parse[0] bucket_name = parse[1] key = parse[2] if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(self.credentials) bucket = conn.get_bucket(bucket_name) elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) if filename: if not key.endswith("/"): if self.check_prefix(bucket, key + "/"): key += "/" else: index = key.rfind("/") if index >= 0: key = key[:(index+1)] else: key = "" key += filename keylist = BotoClient.retrieve_keys(bucket, key, prefix=parse[3], postfix=parse[4], directories=directories, recursive=recursive) return scheme, keylist
[ "def", "getkeys", "(", "self", ",", "path", ",", "filename", "=", "None", ",", "directories", "=", "False", ",", "recursive", "=", "False", ")", ":", "from", ".", "utils", "import", "connection_with_anon", ",", "connection_with_gs", "parse", "=", "BotoClient", ".", "parse_query", "(", "path", ")", "scheme", "=", "parse", "[", "0", "]", "bucket_name", "=", "parse", "[", "1", "]", "key", "=", "parse", "[", "2", "]", "if", "scheme", "==", "'s3'", "or", "scheme", "==", "'s3n'", ":", "conn", "=", "connection_with_anon", "(", "self", ".", "credentials", ")", "bucket", "=", "conn", ".", "get_bucket", "(", "bucket_name", ")", "elif", "scheme", "==", "'gs'", ":", "conn", "=", "connection_with_gs", "(", "bucket_name", ")", "bucket", "=", "conn", ".", "get_bucket", "(", ")", "else", ":", "raise", "NotImplementedError", "(", "\"No file reader implementation for URL scheme \"", "+", "scheme", ")", "if", "filename", ":", "if", "not", "key", ".", "endswith", "(", "\"/\"", ")", ":", "if", "self", ".", "check_prefix", "(", "bucket", ",", "key", "+", "\"/\"", ")", ":", "key", "+=", "\"/\"", "else", ":", "index", "=", "key", ".", "rfind", "(", "\"/\"", ")", "if", "index", ">=", "0", ":", "key", "=", "key", "[", ":", "(", "index", "+", "1", ")", "]", "else", ":", "key", "=", "\"\"", "key", "+=", "filename", "keylist", "=", "BotoClient", ".", "retrieve_keys", "(", "bucket", ",", "key", ",", "prefix", "=", "parse", "[", "3", "]", ",", "postfix", "=", "parse", "[", "4", "]", ",", "directories", "=", "directories", ",", "recursive", "=", "recursive", ")", "return", "scheme", ",", "keylist" ]
Get matching keys for a path
[ "Get", "matching", "keys", "for", "a", "path" ]
python
train
cytoscape/py2cytoscape
py2cytoscape/cyrest/cybrowser.py
https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/cybrowser.py#L42-L59
def show(self, wid=None, text=None, title=None, url=None, verbose=False): """ Launch an HTML browser in the Results Panel. :param wid: Window ID :param text: HTML text :param title: Window Title :param url: URL :param verbose: print more """ PARAMS={} for p,v in zip(["id","text","title","url"],[wid,text,title,url]): if v: PARAMS[p]=v response=api(url=self.__url+"/show?",PARAMS=PARAMS, method="GET", verbose=verbose) return response
[ "def", "show", "(", "self", ",", "wid", "=", "None", ",", "text", "=", "None", ",", "title", "=", "None", ",", "url", "=", "None", ",", "verbose", "=", "False", ")", ":", "PARAMS", "=", "{", "}", "for", "p", ",", "v", "in", "zip", "(", "[", "\"id\"", ",", "\"text\"", ",", "\"title\"", ",", "\"url\"", "]", ",", "[", "wid", ",", "text", ",", "title", ",", "url", "]", ")", ":", "if", "v", ":", "PARAMS", "[", "p", "]", "=", "v", "response", "=", "api", "(", "url", "=", "self", ".", "__url", "+", "\"/show?\"", ",", "PARAMS", "=", "PARAMS", ",", "method", "=", "\"GET\"", ",", "verbose", "=", "verbose", ")", "return", "response" ]
Launch an HTML browser in the Results Panel. :param wid: Window ID :param text: HTML text :param title: Window Title :param url: URL :param verbose: print more
[ "Launch", "an", "HTML", "browser", "in", "the", "Results", "Panel", "." ]
python
train
tkf/rash
rash/cli.py
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/cli.py#L31-L64
def get_parser(commands): """ Generate argument parser given a list of subcommand specifications. :type commands: list of (str, function, function) :arg commands: Each element must be a tuple ``(name, adder, runner)``. :param name: subcommand :param adder: a function takes one object which is an instance of :class:`argparse.ArgumentParser` and add arguments to it :param runner: a function takes keyword arguments which must be specified by the arguments parsed by the parser defined by `adder`. Docstring of this function will be the description of the subcommand. """ parser = argparse.ArgumentParser( formatter_class=Formatter, description=__doc__, epilog=EPILOG, ) subparsers = parser.add_subparsers() for (name, adder, runner) in commands: subp = subparsers.add_parser( name, formatter_class=Formatter, description=runner.__doc__ and textwrap.dedent(runner.__doc__)) adder(subp) subp.set_defaults(func=runner) return parser
[ "def", "get_parser", "(", "commands", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "formatter_class", "=", "Formatter", ",", "description", "=", "__doc__", ",", "epilog", "=", "EPILOG", ",", ")", "subparsers", "=", "parser", ".", "add_subparsers", "(", ")", "for", "(", "name", ",", "adder", ",", "runner", ")", "in", "commands", ":", "subp", "=", "subparsers", ".", "add_parser", "(", "name", ",", "formatter_class", "=", "Formatter", ",", "description", "=", "runner", ".", "__doc__", "and", "textwrap", ".", "dedent", "(", "runner", ".", "__doc__", ")", ")", "adder", "(", "subp", ")", "subp", ".", "set_defaults", "(", "func", "=", "runner", ")", "return", "parser" ]
Generate argument parser given a list of subcommand specifications. :type commands: list of (str, function, function) :arg commands: Each element must be a tuple ``(name, adder, runner)``. :param name: subcommand :param adder: a function takes one object which is an instance of :class:`argparse.ArgumentParser` and add arguments to it :param runner: a function takes keyword arguments which must be specified by the arguments parsed by the parser defined by `adder`. Docstring of this function will be the description of the subcommand.
[ "Generate", "argument", "parser", "given", "a", "list", "of", "subcommand", "specifications", "." ]
python
train
google/openhtf
openhtf/core/measurements.py
https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/measurements.py#L260-L276
def validate(self): """Validate this measurement and update its 'outcome' field.""" # PASS if all our validators return True, otherwise FAIL. try: if all(v(self.measured_value.value) for v in self.validators): self.outcome = Outcome.PASS else: self.outcome = Outcome.FAIL return self except Exception as e: # pylint: disable=bare-except _LOG.error('Validation for measurement %s raised an exception %s.', self.name, e) self.outcome = Outcome.FAIL raise finally: if self._cached: self._cached['outcome'] = self.outcome.name
[ "def", "validate", "(", "self", ")", ":", "# PASS if all our validators return True, otherwise FAIL.", "try", ":", "if", "all", "(", "v", "(", "self", ".", "measured_value", ".", "value", ")", "for", "v", "in", "self", ".", "validators", ")", ":", "self", ".", "outcome", "=", "Outcome", ".", "PASS", "else", ":", "self", ".", "outcome", "=", "Outcome", ".", "FAIL", "return", "self", "except", "Exception", "as", "e", ":", "# pylint: disable=bare-except", "_LOG", ".", "error", "(", "'Validation for measurement %s raised an exception %s.'", ",", "self", ".", "name", ",", "e", ")", "self", ".", "outcome", "=", "Outcome", ".", "FAIL", "raise", "finally", ":", "if", "self", ".", "_cached", ":", "self", ".", "_cached", "[", "'outcome'", "]", "=", "self", ".", "outcome", ".", "name" ]
Validate this measurement and update its 'outcome' field.
[ "Validate", "this", "measurement", "and", "update", "its", "outcome", "field", "." ]
python
train
Alignak-monitoring/alignak
alignak/stats.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/stats.py#L467-L536
def counter(self, key, value, timestamp=None): """Set a counter value If the inner key does not exist is is created :param key: counter to update :type key: str :param value: counter value :type value: float :return: An alignak_stat brok if broks are enabled else None """ _min, _max, count, _sum = self.stats.get(key, (None, None, 0, 0)) count += 1 _sum += value if _min is None or value < _min: _min = value if _max is None or value > _max: _max = value self.stats[key] = (_min, _max, count, _sum) # Manage local statsd part if self.statsd_enabled and self.statsd_sock: # beware, we are sending ms here, timer is in seconds packet = '%s.%s.%s:%d|c' % (self.statsd_prefix, self.name, key, value) packet = packet.encode('utf-8') try: self.statsd_sock.sendto(packet, self.statsd_addr) except (socket.error, socket.gaierror): pass # cannot send? ok not a huge problem here and we cannot # log because it will be far too verbose :p # Manage Graphite part if self.statsd_enabled and self.carbon: self.send_to_graphite(key, value, timestamp=timestamp) # Manage file part if self.statsd_enabled and self.file_d: if timestamp is None: timestamp = int(time.time()) packet = self.line_fmt if not self.date_fmt: date = "%s" % timestamp else: date = datetime.datetime.fromtimestamp(timestamp).strftime(self.date_fmt) packet = packet.replace("#date#", date) packet = packet.replace("#counter#", '%s.%s.%s' % (self.statsd_prefix, self.name, key)) packet = packet.replace("#value#", '%d' % value) packet = packet.replace("#uom#", 'c') try: self.file_d.write(packet) except IOError: logger.warning("Could not write to the file: %s", packet) if self.broks_enabled: logger.debug("alignak stat brok: %s = %s", key, value) if timestamp is None: timestamp = int(time.time()) return Brok({'type': 'alignak_stat', 'data': { 'ts': timestamp, 'type': 'counter', 'metric': '%s.%s.%s' % (self.statsd_prefix, self.name, key), 'value': value, 'uom': 'c' }}) return None
[ "def", "counter", "(", "self", ",", "key", ",", "value", ",", "timestamp", "=", "None", ")", ":", "_min", ",", "_max", ",", "count", ",", "_sum", "=", "self", ".", "stats", ".", "get", "(", "key", ",", "(", "None", ",", "None", ",", "0", ",", "0", ")", ")", "count", "+=", "1", "_sum", "+=", "value", "if", "_min", "is", "None", "or", "value", "<", "_min", ":", "_min", "=", "value", "if", "_max", "is", "None", "or", "value", ">", "_max", ":", "_max", "=", "value", "self", ".", "stats", "[", "key", "]", "=", "(", "_min", ",", "_max", ",", "count", ",", "_sum", ")", "# Manage local statsd part", "if", "self", ".", "statsd_enabled", "and", "self", ".", "statsd_sock", ":", "# beware, we are sending ms here, timer is in seconds", "packet", "=", "'%s.%s.%s:%d|c'", "%", "(", "self", ".", "statsd_prefix", ",", "self", ".", "name", ",", "key", ",", "value", ")", "packet", "=", "packet", ".", "encode", "(", "'utf-8'", ")", "try", ":", "self", ".", "statsd_sock", ".", "sendto", "(", "packet", ",", "self", ".", "statsd_addr", ")", "except", "(", "socket", ".", "error", ",", "socket", ".", "gaierror", ")", ":", "pass", "# cannot send? ok not a huge problem here and we cannot", "# log because it will be far too verbose :p", "# Manage Graphite part", "if", "self", ".", "statsd_enabled", "and", "self", ".", "carbon", ":", "self", ".", "send_to_graphite", "(", "key", ",", "value", ",", "timestamp", "=", "timestamp", ")", "# Manage file part", "if", "self", ".", "statsd_enabled", "and", "self", ".", "file_d", ":", "if", "timestamp", "is", "None", ":", "timestamp", "=", "int", "(", "time", ".", "time", "(", ")", ")", "packet", "=", "self", ".", "line_fmt", "if", "not", "self", ".", "date_fmt", ":", "date", "=", "\"%s\"", "%", "timestamp", "else", ":", "date", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "timestamp", ")", ".", "strftime", "(", "self", ".", "date_fmt", ")", "packet", "=", "packet", ".", "replace", "(", "\"#date#\"", ",", "date", ")", "packet", "=", "packet", ".", "replace", "(", "\"#counter#\"", ",", "'%s.%s.%s'", "%", "(", "self", ".", "statsd_prefix", ",", "self", ".", "name", ",", "key", ")", ")", "packet", "=", "packet", ".", "replace", "(", "\"#value#\"", ",", "'%d'", "%", "value", ")", "packet", "=", "packet", ".", "replace", "(", "\"#uom#\"", ",", "'c'", ")", "try", ":", "self", ".", "file_d", ".", "write", "(", "packet", ")", "except", "IOError", ":", "logger", ".", "warning", "(", "\"Could not write to the file: %s\"", ",", "packet", ")", "if", "self", ".", "broks_enabled", ":", "logger", ".", "debug", "(", "\"alignak stat brok: %s = %s\"", ",", "key", ",", "value", ")", "if", "timestamp", "is", "None", ":", "timestamp", "=", "int", "(", "time", ".", "time", "(", ")", ")", "return", "Brok", "(", "{", "'type'", ":", "'alignak_stat'", ",", "'data'", ":", "{", "'ts'", ":", "timestamp", ",", "'type'", ":", "'counter'", ",", "'metric'", ":", "'%s.%s.%s'", "%", "(", "self", ".", "statsd_prefix", ",", "self", ".", "name", ",", "key", ")", ",", "'value'", ":", "value", ",", "'uom'", ":", "'c'", "}", "}", ")", "return", "None" ]
Set a counter value If the inner key does not exist is is created :param key: counter to update :type key: str :param value: counter value :type value: float :return: An alignak_stat brok if broks are enabled else None
[ "Set", "a", "counter", "value" ]
python
train
tulsawebdevs/django-multi-gtfs
multigtfs/models/base.py
https://github.com/tulsawebdevs/django-multi-gtfs/blob/8c442bfb67e87566c24a7364d8fa0aacd4a0a652/multigtfs/models/base.py#L303-L410
def export_txt(cls, feed): '''Export records as a GTFS comma-separated file''' objects = cls.objects.in_feed(feed) # If no records, return None if not objects.exists(): return # Get the columns used in the dataset column_map = objects.populated_column_map() columns, fields = zip(*column_map) extra_columns = feed.meta.get( 'extra_columns', {}).get(cls.__name__, []) # Get sort order if hasattr(cls, '_sort_order'): sort_fields = cls._sort_order else: sort_fields = [] for field in fields: base_field = field.split('__', 1)[0] point_match = re_point.match(base_field) if point_match: continue field_type = cls._meta.get_field(base_field) assert not isinstance(field_type, ManyToManyField) sort_fields.append(field) # Create CSV writer out = StringIO() csv_writer = writer(out, lineterminator='\n') # Write header row header_row = [text_type(c) for c in columns] header_row.extend(extra_columns) write_text_rows(csv_writer, [header_row]) # Report the work to be done total = objects.count() logger.info( '%d %s to export...', total, cls._meta.verbose_name_plural) # Populate related items cache model_to_field_name = {} cache = {} for field_name in fields: if '__' in field_name: local_field_name, subfield_name = field_name.split('__', 1) field = cls._meta.get_field(local_field_name) field_type = field.related_model model_name = field_type.__name__ if model_name in model_to_field_name: # Already loaded this model under a different field name cache[field_name] = cache[model_to_field_name[model_name]] else: # Load all feed data for this model pairs = field_type.objects.in_feed( feed).values_list('id', subfield_name) cache[field_name] = dict( (i, text_type(x)) for i, x in pairs) cache[field_name][None] = u'' model_to_field_name[model_name] = field_name # Assemble the rows, writing when we hit batch size count = 0 rows = [] for item in objects.order_by(*sort_fields).iterator(): row = [] for csv_name, field_name in column_map: obj = item point_match = re_point.match(field_name) if '__' in field_name: # Return relations from cache local_field_name = field_name.split('__', 1)[0] field_id = getattr(obj, local_field_name + '_id') row.append(cache[field_name][field_id]) elif point_match: # Get the lat or long from the point name, index = point_match.groups() field = getattr(obj, name) row.append(field.coords[int(index)]) else: # Handle other field types field = getattr(obj, field_name) if obj else '' if isinstance(field, date): formatted = field.strftime(u'%Y%m%d') row.append(text_type(formatted)) elif isinstance(field, bool): row.append(1 if field else 0) elif field is None: row.append(u'') else: row.append(text_type(field)) for col in extra_columns: row.append(obj.extra_data.get(col, u'')) rows.append(row) if len(rows) % batch_size == 0: # pragma: no cover write_text_rows(csv_writer, rows) count += len(rows) logger.info( "Exported %d %s", count, cls._meta.verbose_name_plural) rows = [] # Write rows smaller than batch size write_text_rows(csv_writer, rows) return out.getvalue()
[ "def", "export_txt", "(", "cls", ",", "feed", ")", ":", "objects", "=", "cls", ".", "objects", ".", "in_feed", "(", "feed", ")", "# If no records, return None", "if", "not", "objects", ".", "exists", "(", ")", ":", "return", "# Get the columns used in the dataset", "column_map", "=", "objects", ".", "populated_column_map", "(", ")", "columns", ",", "fields", "=", "zip", "(", "*", "column_map", ")", "extra_columns", "=", "feed", ".", "meta", ".", "get", "(", "'extra_columns'", ",", "{", "}", ")", ".", "get", "(", "cls", ".", "__name__", ",", "[", "]", ")", "# Get sort order", "if", "hasattr", "(", "cls", ",", "'_sort_order'", ")", ":", "sort_fields", "=", "cls", ".", "_sort_order", "else", ":", "sort_fields", "=", "[", "]", "for", "field", "in", "fields", ":", "base_field", "=", "field", ".", "split", "(", "'__'", ",", "1", ")", "[", "0", "]", "point_match", "=", "re_point", ".", "match", "(", "base_field", ")", "if", "point_match", ":", "continue", "field_type", "=", "cls", ".", "_meta", ".", "get_field", "(", "base_field", ")", "assert", "not", "isinstance", "(", "field_type", ",", "ManyToManyField", ")", "sort_fields", ".", "append", "(", "field", ")", "# Create CSV writer", "out", "=", "StringIO", "(", ")", "csv_writer", "=", "writer", "(", "out", ",", "lineterminator", "=", "'\\n'", ")", "# Write header row", "header_row", "=", "[", "text_type", "(", "c", ")", "for", "c", "in", "columns", "]", "header_row", ".", "extend", "(", "extra_columns", ")", "write_text_rows", "(", "csv_writer", ",", "[", "header_row", "]", ")", "# Report the work to be done", "total", "=", "objects", ".", "count", "(", ")", "logger", ".", "info", "(", "'%d %s to export...'", ",", "total", ",", "cls", ".", "_meta", ".", "verbose_name_plural", ")", "# Populate related items cache", "model_to_field_name", "=", "{", "}", "cache", "=", "{", "}", "for", "field_name", "in", "fields", ":", "if", "'__'", "in", "field_name", ":", "local_field_name", ",", "subfield_name", "=", "field_name", ".", "split", "(", "'__'", ",", "1", ")", "field", "=", "cls", ".", "_meta", ".", "get_field", "(", "local_field_name", ")", "field_type", "=", "field", ".", "related_model", "model_name", "=", "field_type", ".", "__name__", "if", "model_name", "in", "model_to_field_name", ":", "# Already loaded this model under a different field name", "cache", "[", "field_name", "]", "=", "cache", "[", "model_to_field_name", "[", "model_name", "]", "]", "else", ":", "# Load all feed data for this model", "pairs", "=", "field_type", ".", "objects", ".", "in_feed", "(", "feed", ")", ".", "values_list", "(", "'id'", ",", "subfield_name", ")", "cache", "[", "field_name", "]", "=", "dict", "(", "(", "i", ",", "text_type", "(", "x", ")", ")", "for", "i", ",", "x", "in", "pairs", ")", "cache", "[", "field_name", "]", "[", "None", "]", "=", "u''", "model_to_field_name", "[", "model_name", "]", "=", "field_name", "# Assemble the rows, writing when we hit batch size", "count", "=", "0", "rows", "=", "[", "]", "for", "item", "in", "objects", ".", "order_by", "(", "*", "sort_fields", ")", ".", "iterator", "(", ")", ":", "row", "=", "[", "]", "for", "csv_name", ",", "field_name", "in", "column_map", ":", "obj", "=", "item", "point_match", "=", "re_point", ".", "match", "(", "field_name", ")", "if", "'__'", "in", "field_name", ":", "# Return relations from cache", "local_field_name", "=", "field_name", ".", "split", "(", "'__'", ",", "1", ")", "[", "0", "]", "field_id", "=", "getattr", "(", "obj", ",", "local_field_name", "+", "'_id'", ")", "row", ".", "append", "(", "cache", "[", "field_name", "]", "[", "field_id", "]", ")", "elif", "point_match", ":", "# Get the lat or long from the point", "name", ",", "index", "=", "point_match", ".", "groups", "(", ")", "field", "=", "getattr", "(", "obj", ",", "name", ")", "row", ".", "append", "(", "field", ".", "coords", "[", "int", "(", "index", ")", "]", ")", "else", ":", "# Handle other field types", "field", "=", "getattr", "(", "obj", ",", "field_name", ")", "if", "obj", "else", "''", "if", "isinstance", "(", "field", ",", "date", ")", ":", "formatted", "=", "field", ".", "strftime", "(", "u'%Y%m%d'", ")", "row", ".", "append", "(", "text_type", "(", "formatted", ")", ")", "elif", "isinstance", "(", "field", ",", "bool", ")", ":", "row", ".", "append", "(", "1", "if", "field", "else", "0", ")", "elif", "field", "is", "None", ":", "row", ".", "append", "(", "u''", ")", "else", ":", "row", ".", "append", "(", "text_type", "(", "field", ")", ")", "for", "col", "in", "extra_columns", ":", "row", ".", "append", "(", "obj", ".", "extra_data", ".", "get", "(", "col", ",", "u''", ")", ")", "rows", ".", "append", "(", "row", ")", "if", "len", "(", "rows", ")", "%", "batch_size", "==", "0", ":", "# pragma: no cover", "write_text_rows", "(", "csv_writer", ",", "rows", ")", "count", "+=", "len", "(", "rows", ")", "logger", ".", "info", "(", "\"Exported %d %s\"", ",", "count", ",", "cls", ".", "_meta", ".", "verbose_name_plural", ")", "rows", "=", "[", "]", "# Write rows smaller than batch size", "write_text_rows", "(", "csv_writer", ",", "rows", ")", "return", "out", ".", "getvalue", "(", ")" ]
Export records as a GTFS comma-separated file
[ "Export", "records", "as", "a", "GTFS", "comma", "-", "separated", "file" ]
python
train
tjomasc/snekbol
snekbol/document.py
https://github.com/tjomasc/snekbol/blob/0b491aa96e0b1bd09e6c80cfb43807dd8a876c83/snekbol/document.py#L263-L268
def _get_triplet_value(self, graph, identity, rdf_type): """ Get a value from an RDF triple """ value = graph.value(subject=identity, predicate=rdf_type) return value.toPython() if value is not None else value
[ "def", "_get_triplet_value", "(", "self", ",", "graph", ",", "identity", ",", "rdf_type", ")", ":", "value", "=", "graph", ".", "value", "(", "subject", "=", "identity", ",", "predicate", "=", "rdf_type", ")", "return", "value", ".", "toPython", "(", ")", "if", "value", "is", "not", "None", "else", "value" ]
Get a value from an RDF triple
[ "Get", "a", "value", "from", "an", "RDF", "triple" ]
python
train
gmr/tinman
tinman/application.py
https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/application.py#L55-L77
def log_request(self, handler): """Writes a completed HTTP request to the logs. By default writes to the tinman.application LOGGER. To change this behavior either subclass Application and override this method, or pass a function in the application settings dictionary as 'log_function'. :param tornado.web.RequestHandler handler: The request handler """ if config.LOG_FUNCTION in self.settings: self.settings[config.LOG_FUNCTION](handler) return if handler.get_status() < 400: log_method = LOGGER.info elif handler.get_status() < 500: log_method = LOGGER.warning else: log_method = LOGGER.exception request_time = 1000.0 * handler.request.request_time() log_method("%d %s %.2fms", handler.get_status(), handler._request_summary(), request_time)
[ "def", "log_request", "(", "self", ",", "handler", ")", ":", "if", "config", ".", "LOG_FUNCTION", "in", "self", ".", "settings", ":", "self", ".", "settings", "[", "config", ".", "LOG_FUNCTION", "]", "(", "handler", ")", "return", "if", "handler", ".", "get_status", "(", ")", "<", "400", ":", "log_method", "=", "LOGGER", ".", "info", "elif", "handler", ".", "get_status", "(", ")", "<", "500", ":", "log_method", "=", "LOGGER", ".", "warning", "else", ":", "log_method", "=", "LOGGER", ".", "exception", "request_time", "=", "1000.0", "*", "handler", ".", "request", ".", "request_time", "(", ")", "log_method", "(", "\"%d %s %.2fms\"", ",", "handler", ".", "get_status", "(", ")", ",", "handler", ".", "_request_summary", "(", ")", ",", "request_time", ")" ]
Writes a completed HTTP request to the logs. By default writes to the tinman.application LOGGER. To change this behavior either subclass Application and override this method, or pass a function in the application settings dictionary as 'log_function'. :param tornado.web.RequestHandler handler: The request handler
[ "Writes", "a", "completed", "HTTP", "request", "to", "the", "logs", "." ]
python
train
yyuu/botornado
boto/vpc/__init__.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/vpc/__init__.py#L489-L524
def get_all_subnets(self, subnet_ids=None, filters=None): """ Retrieve information about your Subnets. You can filter results to return information only about those Subnets that match your search parameters. Otherwise, all Subnets associated with your account are returned. :type subnet_ids: list :param subnet_ids: A list of strings with the desired Subnet ID's :type filters: list of tuples :param filters: A list of tuples containing filters. Each tuple consists of a filter key and a filter value. Possible filter keys are: - *state*, the state of the Subnet (pending,available) - *vpdId*, the ID of teh VPC the subnet is in. - *cidrBlock*, CIDR block of the subnet - *availabilityZone*, the Availability Zone the subnet is in. :rtype: list :return: A list of :class:`boto.vpc.subnet.Subnet` """ params = {} if subnet_ids: self.build_list_params(params, subnet_ids, 'SubnetId') if filters: i = 1 for filter in filters: params[('Filter.%d.Name' % i)] = filter[0] params[('Filter.%d.Value.1' % i)] = filter[1] i += 1 return self.get_list('DescribeSubnets', params, [('item', Subnet)])
[ "def", "get_all_subnets", "(", "self", ",", "subnet_ids", "=", "None", ",", "filters", "=", "None", ")", ":", "params", "=", "{", "}", "if", "subnet_ids", ":", "self", ".", "build_list_params", "(", "params", ",", "subnet_ids", ",", "'SubnetId'", ")", "if", "filters", ":", "i", "=", "1", "for", "filter", "in", "filters", ":", "params", "[", "(", "'Filter.%d.Name'", "%", "i", ")", "]", "=", "filter", "[", "0", "]", "params", "[", "(", "'Filter.%d.Value.1'", "%", "i", ")", "]", "=", "filter", "[", "1", "]", "i", "+=", "1", "return", "self", ".", "get_list", "(", "'DescribeSubnets'", ",", "params", ",", "[", "(", "'item'", ",", "Subnet", ")", "]", ")" ]
Retrieve information about your Subnets. You can filter results to return information only about those Subnets that match your search parameters. Otherwise, all Subnets associated with your account are returned. :type subnet_ids: list :param subnet_ids: A list of strings with the desired Subnet ID's :type filters: list of tuples :param filters: A list of tuples containing filters. Each tuple consists of a filter key and a filter value. Possible filter keys are: - *state*, the state of the Subnet (pending,available) - *vpdId*, the ID of teh VPC the subnet is in. - *cidrBlock*, CIDR block of the subnet - *availabilityZone*, the Availability Zone the subnet is in. :rtype: list :return: A list of :class:`boto.vpc.subnet.Subnet`
[ "Retrieve", "information", "about", "your", "Subnets", ".", "You", "can", "filter", "results", "to", "return", "information", "only", "about", "those", "Subnets", "that", "match", "your", "search", "parameters", ".", "Otherwise", "all", "Subnets", "associated", "with", "your", "account", "are", "returned", "." ]
python
train
apache/incubator-mxnet
python/mxnet/module/bucketing_module.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/module/bucketing_module.py#L289-L352
def bind(self, data_shapes, label_shapes=None, for_training=True, inputs_need_grad=False, force_rebind=False, shared_module=None, grad_req='write'): """Binding for a `BucketingModule` means setting up the buckets and binding the executor for the default bucket key. Executors corresponding to other keys are bound afterwards with `switch_bucket`. Parameters ---------- data_shapes : list of (str, tuple) This should correspond to the symbol for the default bucket. label_shapes : list of (str, tuple) This should correspond to the symbol for the default bucket. for_training : bool Default is ``True``. inputs_need_grad : bool Default is ``False``. force_rebind : bool Default is ``False``. shared_module : BucketingModule Default is ``None``. This value is currently not used. grad_req : str, list of str, dict of str to str Requirement for gradient accumulation. Can be 'write', 'add', or 'null' (default to 'write'). Can be specified globally (str) or for each argument (list, dict). bucket_key : str (or any python object) bucket key for binding. by default use the default_bucket_key """ # in case we already initialized params, keep it if self.params_initialized: arg_params, aux_params = self.get_params() # force rebinding is typically used when one want to switch from # training to prediction phase. if force_rebind: self._reset_bind() if self.binded: self.logger.warning('Already bound, ignoring bind()') return assert shared_module is None, 'shared_module for BucketingModule is not supported' self.for_training = for_training self.inputs_need_grad = inputs_need_grad self.binded = True self._grad_req = grad_req symbol, data_names, label_names = self._call_sym_gen(self._default_bucket_key) module = Module(symbol, data_names, label_names, logger=self.logger, context=self._context, work_load_list=self._work_load_list, fixed_param_names=self._fixed_param_names, state_names=self._state_names, group2ctxs=self._group2ctxs, compression_params=self._compression_params) module.bind(data_shapes, label_shapes, for_training, inputs_need_grad, force_rebind=False, shared_module=None, grad_req=self._grad_req) self._curr_module = module self._curr_bucket_key = self._default_bucket_key self._buckets[self._default_bucket_key] = module # copy back saved params, if already initialized if self.params_initialized: self.set_params(arg_params, aux_params)
[ "def", "bind", "(", "self", ",", "data_shapes", ",", "label_shapes", "=", "None", ",", "for_training", "=", "True", ",", "inputs_need_grad", "=", "False", ",", "force_rebind", "=", "False", ",", "shared_module", "=", "None", ",", "grad_req", "=", "'write'", ")", ":", "# in case we already initialized params, keep it", "if", "self", ".", "params_initialized", ":", "arg_params", ",", "aux_params", "=", "self", ".", "get_params", "(", ")", "# force rebinding is typically used when one want to switch from", "# training to prediction phase.", "if", "force_rebind", ":", "self", ".", "_reset_bind", "(", ")", "if", "self", ".", "binded", ":", "self", ".", "logger", ".", "warning", "(", "'Already bound, ignoring bind()'", ")", "return", "assert", "shared_module", "is", "None", ",", "'shared_module for BucketingModule is not supported'", "self", ".", "for_training", "=", "for_training", "self", ".", "inputs_need_grad", "=", "inputs_need_grad", "self", ".", "binded", "=", "True", "self", ".", "_grad_req", "=", "grad_req", "symbol", ",", "data_names", ",", "label_names", "=", "self", ".", "_call_sym_gen", "(", "self", ".", "_default_bucket_key", ")", "module", "=", "Module", "(", "symbol", ",", "data_names", ",", "label_names", ",", "logger", "=", "self", ".", "logger", ",", "context", "=", "self", ".", "_context", ",", "work_load_list", "=", "self", ".", "_work_load_list", ",", "fixed_param_names", "=", "self", ".", "_fixed_param_names", ",", "state_names", "=", "self", ".", "_state_names", ",", "group2ctxs", "=", "self", ".", "_group2ctxs", ",", "compression_params", "=", "self", ".", "_compression_params", ")", "module", ".", "bind", "(", "data_shapes", ",", "label_shapes", ",", "for_training", ",", "inputs_need_grad", ",", "force_rebind", "=", "False", ",", "shared_module", "=", "None", ",", "grad_req", "=", "self", ".", "_grad_req", ")", "self", ".", "_curr_module", "=", "module", "self", ".", "_curr_bucket_key", "=", "self", ".", "_default_bucket_key", "self", ".", "_buckets", "[", "self", ".", "_default_bucket_key", "]", "=", "module", "# copy back saved params, if already initialized", "if", "self", ".", "params_initialized", ":", "self", ".", "set_params", "(", "arg_params", ",", "aux_params", ")" ]
Binding for a `BucketingModule` means setting up the buckets and binding the executor for the default bucket key. Executors corresponding to other keys are bound afterwards with `switch_bucket`. Parameters ---------- data_shapes : list of (str, tuple) This should correspond to the symbol for the default bucket. label_shapes : list of (str, tuple) This should correspond to the symbol for the default bucket. for_training : bool Default is ``True``. inputs_need_grad : bool Default is ``False``. force_rebind : bool Default is ``False``. shared_module : BucketingModule Default is ``None``. This value is currently not used. grad_req : str, list of str, dict of str to str Requirement for gradient accumulation. Can be 'write', 'add', or 'null' (default to 'write'). Can be specified globally (str) or for each argument (list, dict). bucket_key : str (or any python object) bucket key for binding. by default use the default_bucket_key
[ "Binding", "for", "a", "BucketingModule", "means", "setting", "up", "the", "buckets", "and", "binding", "the", "executor", "for", "the", "default", "bucket", "key", ".", "Executors", "corresponding", "to", "other", "keys", "are", "bound", "afterwards", "with", "switch_bucket", "." ]
python
train
GoogleCloudPlatform/compute-image-packages
packages/python-google-compute-engine/google_compute_engine/file_utils.py
https://github.com/GoogleCloudPlatform/compute-image-packages/blob/53ea8cd069fb4d9a1984d1c167e54c133033f8da/packages/python-google-compute-engine/google_compute_engine/file_utils.py#L102-L124
def LockFile(path, blocking=False): """Interface to flock-based file locking to prevent concurrent executions. Args: path: string, the name of the file to lock. blocking: bool, whether the function should return immediately. Yields: None, yields when a lock on the file is obtained. Raises: IOError, raised from flock locking operations on a file. OSError, raised from file operations. """ fd = os.open(path, os.O_CREAT) try: Lock(fd, path, blocking) yield finally: try: Unlock(fd, path) finally: os.close(fd)
[ "def", "LockFile", "(", "path", ",", "blocking", "=", "False", ")", ":", "fd", "=", "os", ".", "open", "(", "path", ",", "os", ".", "O_CREAT", ")", "try", ":", "Lock", "(", "fd", ",", "path", ",", "blocking", ")", "yield", "finally", ":", "try", ":", "Unlock", "(", "fd", ",", "path", ")", "finally", ":", "os", ".", "close", "(", "fd", ")" ]
Interface to flock-based file locking to prevent concurrent executions. Args: path: string, the name of the file to lock. blocking: bool, whether the function should return immediately. Yields: None, yields when a lock on the file is obtained. Raises: IOError, raised from flock locking operations on a file. OSError, raised from file operations.
[ "Interface", "to", "flock", "-", "based", "file", "locking", "to", "prevent", "concurrent", "executions", "." ]
python
train
creare-com/pydem
pydem/processing_manager.py
https://github.com/creare-com/pydem/blob/c2fc8d84cfb411df84f71a6dec9edc4b544f710a/pydem/processing_manager.py#L665-L709
def process(self, index=None): """ This will completely process a directory of elevation tiles (as supplied in the constructor). Both phases of the calculation, the single tile and edge resolution phases are run. Parameters ----------- index : int/slice (optional) Default None - processes all tiles in a directory. See :py:func:`process_twi` for additional options. """ # Round 0 of twi processing, process the magnitude and directions of # slopes print "Starting slope calculation round" self.process_twi(index, do_edges=False, skip_uca_twi=True) # Round 1 of twi processing print "Starting self-area calculation round" self.process_twi(index, do_edges=False) # Round 2 of twi processing: edge resolution i = self.tile_edge.find_best_candidate(self.elev_source_files) print "Starting edge resolution round: ", count = 0 i_old = -1 same_count = 0 while i is not None and same_count < 3: count += 1 print '*' * 10 print count, '(%d -- > %d) .' % (i_old, i) # %% self.process_twi(i, do_edges=True) i_old = i i = self.tile_edge.find_best_candidate(self.elev_source_files) if i_old == i: same_count += 1 else: same_count = 0 print '*'*79 print '******* PROCESSING COMPLETED *******' print '*'*79 return self
[ "def", "process", "(", "self", ",", "index", "=", "None", ")", ":", "# Round 0 of twi processing, process the magnitude and directions of", "# slopes", "print", "\"Starting slope calculation round\"", "self", ".", "process_twi", "(", "index", ",", "do_edges", "=", "False", ",", "skip_uca_twi", "=", "True", ")", "# Round 1 of twi processing", "print", "\"Starting self-area calculation round\"", "self", ".", "process_twi", "(", "index", ",", "do_edges", "=", "False", ")", "# Round 2 of twi processing: edge resolution", "i", "=", "self", ".", "tile_edge", ".", "find_best_candidate", "(", "self", ".", "elev_source_files", ")", "print", "\"Starting edge resolution round: \"", ",", "count", "=", "0", "i_old", "=", "-", "1", "same_count", "=", "0", "while", "i", "is", "not", "None", "and", "same_count", "<", "3", ":", "count", "+=", "1", "print", "'*'", "*", "10", "print", "count", ",", "'(%d -- > %d) .'", "%", "(", "i_old", ",", "i", ")", "# %%", "self", ".", "process_twi", "(", "i", ",", "do_edges", "=", "True", ")", "i_old", "=", "i", "i", "=", "self", ".", "tile_edge", ".", "find_best_candidate", "(", "self", ".", "elev_source_files", ")", "if", "i_old", "==", "i", ":", "same_count", "+=", "1", "else", ":", "same_count", "=", "0", "print", "'*'", "*", "79", "print", "'******* PROCESSING COMPLETED *******'", "print", "'*'", "*", "79", "return", "self" ]
This will completely process a directory of elevation tiles (as supplied in the constructor). Both phases of the calculation, the single tile and edge resolution phases are run. Parameters ----------- index : int/slice (optional) Default None - processes all tiles in a directory. See :py:func:`process_twi` for additional options.
[ "This", "will", "completely", "process", "a", "directory", "of", "elevation", "tiles", "(", "as", "supplied", "in", "the", "constructor", ")", ".", "Both", "phases", "of", "the", "calculation", "the", "single", "tile", "and", "edge", "resolution", "phases", "are", "run", "." ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/ext/_bundled/png.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/ext/_bundled/png.py#L845-L929
def filter_scanline(type, line, fo, prev=None): """Apply a scanline filter to a scanline. `type` specifies the filter type (0 to 4); `line` specifies the current (unfiltered) scanline as a sequence of bytes; `prev` specifies the previous (unfiltered) scanline as a sequence of bytes. `fo` specifies the filter offset; normally this is size of a pixel in bytes (the number of bytes per sample times the number of channels), but when this is < 1 (for bit depths < 8) then the filter offset is 1. """ assert 0 <= type < 5 # The output array. Which, pathetically, we extend one-byte at a # time (fortunately this is linear). out = array('B', [type]) def sub(): ai = -fo for x in line: if ai >= 0: x = (x - line[ai]) & 0xff out.append(x) ai += 1 def up(): for i,x in enumerate(line): x = (x - prev[i]) & 0xff out.append(x) def average(): ai = -fo for i,x in enumerate(line): if ai >= 0: x = (x - ((line[ai] + prev[i]) >> 1)) & 0xff else: x = (x - (prev[i] >> 1)) & 0xff out.append(x) ai += 1 def paeth(): # http://www.w3.org/TR/PNG/#9Filter-type-4-Paeth ai = -fo # also used for ci for i,x in enumerate(line): a = 0 b = prev[i] c = 0 if ai >= 0: a = line[ai] c = prev[ai] p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: Pr = a elif pb <= pc: Pr = b else: Pr = c x = (x - Pr) & 0xff out.append(x) ai += 1 if not prev: # We're on the first line. Some of the filters can be reduced # to simpler cases which makes handling the line "off the top" # of the image simpler. "up" becomes "none"; "paeth" becomes # "left" (non-trivial, but true). "average" needs to be handled # specially. if type == 2: # "up" type = 0 elif type == 3: prev = [0]*len(line) elif type == 4: # "paeth" type = 1 if type == 0: out.extend(line) elif type == 1: sub() elif type == 2: up() elif type == 3: average() else: # type == 4 paeth() return out
[ "def", "filter_scanline", "(", "type", ",", "line", ",", "fo", ",", "prev", "=", "None", ")", ":", "assert", "0", "<=", "type", "<", "5", "# The output array. Which, pathetically, we extend one-byte at a", "# time (fortunately this is linear).", "out", "=", "array", "(", "'B'", ",", "[", "type", "]", ")", "def", "sub", "(", ")", ":", "ai", "=", "-", "fo", "for", "x", "in", "line", ":", "if", "ai", ">=", "0", ":", "x", "=", "(", "x", "-", "line", "[", "ai", "]", ")", "&", "0xff", "out", ".", "append", "(", "x", ")", "ai", "+=", "1", "def", "up", "(", ")", ":", "for", "i", ",", "x", "in", "enumerate", "(", "line", ")", ":", "x", "=", "(", "x", "-", "prev", "[", "i", "]", ")", "&", "0xff", "out", ".", "append", "(", "x", ")", "def", "average", "(", ")", ":", "ai", "=", "-", "fo", "for", "i", ",", "x", "in", "enumerate", "(", "line", ")", ":", "if", "ai", ">=", "0", ":", "x", "=", "(", "x", "-", "(", "(", "line", "[", "ai", "]", "+", "prev", "[", "i", "]", ")", ">>", "1", ")", ")", "&", "0xff", "else", ":", "x", "=", "(", "x", "-", "(", "prev", "[", "i", "]", ">>", "1", ")", ")", "&", "0xff", "out", ".", "append", "(", "x", ")", "ai", "+=", "1", "def", "paeth", "(", ")", ":", "# http://www.w3.org/TR/PNG/#9Filter-type-4-Paeth", "ai", "=", "-", "fo", "# also used for ci", "for", "i", ",", "x", "in", "enumerate", "(", "line", ")", ":", "a", "=", "0", "b", "=", "prev", "[", "i", "]", "c", "=", "0", "if", "ai", ">=", "0", ":", "a", "=", "line", "[", "ai", "]", "c", "=", "prev", "[", "ai", "]", "p", "=", "a", "+", "b", "-", "c", "pa", "=", "abs", "(", "p", "-", "a", ")", "pb", "=", "abs", "(", "p", "-", "b", ")", "pc", "=", "abs", "(", "p", "-", "c", ")", "if", "pa", "<=", "pb", "and", "pa", "<=", "pc", ":", "Pr", "=", "a", "elif", "pb", "<=", "pc", ":", "Pr", "=", "b", "else", ":", "Pr", "=", "c", "x", "=", "(", "x", "-", "Pr", ")", "&", "0xff", "out", ".", "append", "(", "x", ")", "ai", "+=", "1", "if", "not", "prev", ":", "# We're on the first line. Some of the filters can be reduced", "# to simpler cases which makes handling the line \"off the top\"", "# of the image simpler. \"up\" becomes \"none\"; \"paeth\" becomes", "# \"left\" (non-trivial, but true). \"average\" needs to be handled", "# specially.", "if", "type", "==", "2", ":", "# \"up\"", "type", "=", "0", "elif", "type", "==", "3", ":", "prev", "=", "[", "0", "]", "*", "len", "(", "line", ")", "elif", "type", "==", "4", ":", "# \"paeth\"", "type", "=", "1", "if", "type", "==", "0", ":", "out", ".", "extend", "(", "line", ")", "elif", "type", "==", "1", ":", "sub", "(", ")", "elif", "type", "==", "2", ":", "up", "(", ")", "elif", "type", "==", "3", ":", "average", "(", ")", "else", ":", "# type == 4", "paeth", "(", ")", "return", "out" ]
Apply a scanline filter to a scanline. `type` specifies the filter type (0 to 4); `line` specifies the current (unfiltered) scanline as a sequence of bytes; `prev` specifies the previous (unfiltered) scanline as a sequence of bytes. `fo` specifies the filter offset; normally this is size of a pixel in bytes (the number of bytes per sample times the number of channels), but when this is < 1 (for bit depths < 8) then the filter offset is 1.
[ "Apply", "a", "scanline", "filter", "to", "a", "scanline", ".", "type", "specifies", "the", "filter", "type", "(", "0", "to", "4", ")", ";", "line", "specifies", "the", "current", "(", "unfiltered", ")", "scanline", "as", "a", "sequence", "of", "bytes", ";", "prev", "specifies", "the", "previous", "(", "unfiltered", ")", "scanline", "as", "a", "sequence", "of", "bytes", ".", "fo", "specifies", "the", "filter", "offset", ";", "normally", "this", "is", "size", "of", "a", "pixel", "in", "bytes", "(", "the", "number", "of", "bytes", "per", "sample", "times", "the", "number", "of", "channels", ")", "but", "when", "this", "is", "<", "1", "(", "for", "bit", "depths", "<", "8", ")", "then", "the", "filter", "offset", "is", "1", "." ]
python
train
saltstack/salt
salt/modules/rabbitmq.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rabbitmq.py#L132-L143
def _safe_output(line): ''' Looks for rabbitmqctl warning, or general formatting, strings that aren't intended to be parsed as output. Returns a boolean whether the line can be parsed as rabbitmqctl output. ''' return not any([ line.startswith('Listing') and line.endswith('...'), line.startswith('Listing') and '\t' not in line, '...done' in line, line.startswith('WARNING:') ])
[ "def", "_safe_output", "(", "line", ")", ":", "return", "not", "any", "(", "[", "line", ".", "startswith", "(", "'Listing'", ")", "and", "line", ".", "endswith", "(", "'...'", ")", ",", "line", ".", "startswith", "(", "'Listing'", ")", "and", "'\\t'", "not", "in", "line", ",", "'...done'", "in", "line", ",", "line", ".", "startswith", "(", "'WARNING:'", ")", "]", ")" ]
Looks for rabbitmqctl warning, or general formatting, strings that aren't intended to be parsed as output. Returns a boolean whether the line can be parsed as rabbitmqctl output.
[ "Looks", "for", "rabbitmqctl", "warning", "or", "general", "formatting", "strings", "that", "aren", "t", "intended", "to", "be", "parsed", "as", "output", ".", "Returns", "a", "boolean", "whether", "the", "line", "can", "be", "parsed", "as", "rabbitmqctl", "output", "." ]
python
train
glut23/webvtt-py
webvtt/segmenter.py
https://github.com/glut23/webvtt-py/blob/7b4da0123c2e2afaf31402107528721eb1d3d481/webvtt/segmenter.py#L73-L95
def segment(self, webvtt, output='', seconds=SECONDS, mpegts=MPEGTS): """Segments the captions based on a number of seconds.""" if isinstance(webvtt, str): # if a string is supplied we parse the file captions = WebVTT().read(webvtt).captions elif not self._validate_webvtt(webvtt): raise InvalidCaptionsError('The captions provided are invalid') else: # we expect to have a webvtt object captions = webvtt.captions self._total_segments = 0 if not captions else int(ceil(captions[-1].end_in_seconds / seconds)) self._output_folder = output self._seconds = seconds self._mpegts = mpegts output_folder = os.path.join(os.getcwd(), output) if not os.path.exists(output_folder): os.makedirs(output_folder) self._slice_segments(captions) self._write_segments() self._write_manifest()
[ "def", "segment", "(", "self", ",", "webvtt", ",", "output", "=", "''", ",", "seconds", "=", "SECONDS", ",", "mpegts", "=", "MPEGTS", ")", ":", "if", "isinstance", "(", "webvtt", ",", "str", ")", ":", "# if a string is supplied we parse the file", "captions", "=", "WebVTT", "(", ")", ".", "read", "(", "webvtt", ")", ".", "captions", "elif", "not", "self", ".", "_validate_webvtt", "(", "webvtt", ")", ":", "raise", "InvalidCaptionsError", "(", "'The captions provided are invalid'", ")", "else", ":", "# we expect to have a webvtt object", "captions", "=", "webvtt", ".", "captions", "self", ".", "_total_segments", "=", "0", "if", "not", "captions", "else", "int", "(", "ceil", "(", "captions", "[", "-", "1", "]", ".", "end_in_seconds", "/", "seconds", ")", ")", "self", ".", "_output_folder", "=", "output", "self", ".", "_seconds", "=", "seconds", "self", ".", "_mpegts", "=", "mpegts", "output_folder", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "output", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "output_folder", ")", ":", "os", ".", "makedirs", "(", "output_folder", ")", "self", ".", "_slice_segments", "(", "captions", ")", "self", ".", "_write_segments", "(", ")", "self", ".", "_write_manifest", "(", ")" ]
Segments the captions based on a number of seconds.
[ "Segments", "the", "captions", "based", "on", "a", "number", "of", "seconds", "." ]
python
train
ArchiveTeam/wpull
wpull/processor/rule.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/processor/rule.py#L378-L385
def get_wait_time(self, item_session: ItemSession, error=None): '''Return the wait time in seconds between requests.''' seconds = self._waiter.get() try: return self.hook_dispatcher.call(PluginFunctions.wait_time, seconds, item_session, error) except HookDisconnected: return seconds
[ "def", "get_wait_time", "(", "self", ",", "item_session", ":", "ItemSession", ",", "error", "=", "None", ")", ":", "seconds", "=", "self", ".", "_waiter", ".", "get", "(", ")", "try", ":", "return", "self", ".", "hook_dispatcher", ".", "call", "(", "PluginFunctions", ".", "wait_time", ",", "seconds", ",", "item_session", ",", "error", ")", "except", "HookDisconnected", ":", "return", "seconds" ]
Return the wait time in seconds between requests.
[ "Return", "the", "wait", "time", "in", "seconds", "between", "requests", "." ]
python
train
elliterate/capybara.py
capybara/window.py
https://github.com/elliterate/capybara.py/blob/0c6ae449cc37e4445ec3cd6af95674533beedc6c/capybara/window.py#L106-L118
def resize_to(self, width, height): """ Resizes the window to the given dimensions. If this method was called for a window that is not current, then after calling this method the current window should remain the same as it was before calling this method. Args: width (int): The new window width in pixels. height (int): The new window height in pixels. """ self.driver.resize_window_to(self.handle, width, height)
[ "def", "resize_to", "(", "self", ",", "width", ",", "height", ")", ":", "self", ".", "driver", ".", "resize_window_to", "(", "self", ".", "handle", ",", "width", ",", "height", ")" ]
Resizes the window to the given dimensions. If this method was called for a window that is not current, then after calling this method the current window should remain the same as it was before calling this method. Args: width (int): The new window width in pixels. height (int): The new window height in pixels.
[ "Resizes", "the", "window", "to", "the", "given", "dimensions", "." ]
python
test
log2timeline/dftimewolf
dftimewolf/lib/collectors/grr_hosts.py
https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/grr_hosts.py#L257-L287
def setup(self, hosts, artifacts, extra_artifacts, use_tsk, reason, grr_server_url, grr_username, grr_password, approvers=None, verify=True): """Initializes a GRR artifact collector. Args: hosts: Comma-separated list of hostnames to launch the flow on. artifacts: list of GRR-defined artifacts. extra_artifacts: list of GRR-defined artifacts to append. use_tsk: toggle for use_tsk flag on GRR flow. reason: justification for GRR access. grr_server_url: GRR server URL. grr_username: GRR username. grr_password: GRR password. approvers: list of GRR approval recipients. verify: boolean, whether to verify the GRR server's x509 certificate. """ super(GRRArtifactCollector, self).setup( reason, grr_server_url, grr_username, grr_password, approvers=approvers, verify=verify) if artifacts is not None: self.artifacts = [item.strip() for item in artifacts.strip().split(',')] if extra_artifacts is not None: self.extra_artifacts = [item.strip() for item in extra_artifacts.strip().split(',')] self.hostnames = [item.strip() for item in hosts.strip().split(',')] self.use_tsk = use_tsk
[ "def", "setup", "(", "self", ",", "hosts", ",", "artifacts", ",", "extra_artifacts", ",", "use_tsk", ",", "reason", ",", "grr_server_url", ",", "grr_username", ",", "grr_password", ",", "approvers", "=", "None", ",", "verify", "=", "True", ")", ":", "super", "(", "GRRArtifactCollector", ",", "self", ")", ".", "setup", "(", "reason", ",", "grr_server_url", ",", "grr_username", ",", "grr_password", ",", "approvers", "=", "approvers", ",", "verify", "=", "verify", ")", "if", "artifacts", "is", "not", "None", ":", "self", ".", "artifacts", "=", "[", "item", ".", "strip", "(", ")", "for", "item", "in", "artifacts", ".", "strip", "(", ")", ".", "split", "(", "','", ")", "]", "if", "extra_artifacts", "is", "not", "None", ":", "self", ".", "extra_artifacts", "=", "[", "item", ".", "strip", "(", ")", "for", "item", "in", "extra_artifacts", ".", "strip", "(", ")", ".", "split", "(", "','", ")", "]", "self", ".", "hostnames", "=", "[", "item", ".", "strip", "(", ")", "for", "item", "in", "hosts", ".", "strip", "(", ")", ".", "split", "(", "','", ")", "]", "self", ".", "use_tsk", "=", "use_tsk" ]
Initializes a GRR artifact collector. Args: hosts: Comma-separated list of hostnames to launch the flow on. artifacts: list of GRR-defined artifacts. extra_artifacts: list of GRR-defined artifacts to append. use_tsk: toggle for use_tsk flag on GRR flow. reason: justification for GRR access. grr_server_url: GRR server URL. grr_username: GRR username. grr_password: GRR password. approvers: list of GRR approval recipients. verify: boolean, whether to verify the GRR server's x509 certificate.
[ "Initializes", "a", "GRR", "artifact", "collector", "." ]
python
train
juju/python-libjuju
juju/client/_client7.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/client/_client7.py#L1411-L1424
async def SetInstanceInfo(self, machines): ''' machines : typing.Sequence[~InstanceInfo] Returns -> typing.Sequence[~ErrorResult] ''' # map input types to rpc msg _params = dict() msg = dict(type='Provisioner', request='SetInstanceInfo', version=7, params=_params) _params['machines'] = machines reply = await self.rpc(msg) return reply
[ "async", "def", "SetInstanceInfo", "(", "self", ",", "machines", ")", ":", "# map input types to rpc msg", "_params", "=", "dict", "(", ")", "msg", "=", "dict", "(", "type", "=", "'Provisioner'", ",", "request", "=", "'SetInstanceInfo'", ",", "version", "=", "7", ",", "params", "=", "_params", ")", "_params", "[", "'machines'", "]", "=", "machines", "reply", "=", "await", "self", ".", "rpc", "(", "msg", ")", "return", "reply" ]
machines : typing.Sequence[~InstanceInfo] Returns -> typing.Sequence[~ErrorResult]
[ "machines", ":", "typing", ".", "Sequence", "[", "~InstanceInfo", "]", "Returns", "-", ">", "typing", ".", "Sequence", "[", "~ErrorResult", "]" ]
python
train
cprogrammer1994/GLWindow
GLWindow/__init__.py
https://github.com/cprogrammer1994/GLWindow/blob/521e18fcbc15e88d3c1f3547aa313c3a07386ee5/GLWindow/__init__.py#L243-L248
def viewport(self) -> Tuple[int, int, int, int]: ''' tuple: The viewport of the window. ''' return self.wnd.viewport
[ "def", "viewport", "(", "self", ")", "->", "Tuple", "[", "int", ",", "int", ",", "int", ",", "int", "]", ":", "return", "self", ".", "wnd", ".", "viewport" ]
tuple: The viewport of the window.
[ "tuple", ":", "The", "viewport", "of", "the", "window", "." ]
python
train
cltrudeau/django-awl
awl/waelsteng.py
https://github.com/cltrudeau/django-awl/blob/70d469ef9a161c1170b53aa017cf02d7c15eb90c/awl/waelsteng.py#L232-L245
def field_value(self, admin_model, instance, field_name): """Returns the value displayed in the column on the web interface for a given instance. :param admin_model: Instance of a :class:`admin.ModelAdmin` object that is responsible for displaying the change list :param instance: Object instance that is the row in the admin change list :field_name: Name of the field/column to fetch """ _, _, value = lookup_field(field_name, instance, admin_model) return value
[ "def", "field_value", "(", "self", ",", "admin_model", ",", "instance", ",", "field_name", ")", ":", "_", ",", "_", ",", "value", "=", "lookup_field", "(", "field_name", ",", "instance", ",", "admin_model", ")", "return", "value" ]
Returns the value displayed in the column on the web interface for a given instance. :param admin_model: Instance of a :class:`admin.ModelAdmin` object that is responsible for displaying the change list :param instance: Object instance that is the row in the admin change list :field_name: Name of the field/column to fetch
[ "Returns", "the", "value", "displayed", "in", "the", "column", "on", "the", "web", "interface", "for", "a", "given", "instance", "." ]
python
valid
njsmith/colorspacious
colorspacious/ciecam02.py
https://github.com/njsmith/colorspacious/blob/59e0226003fb1b894597c5081e8ca5a3aa4fcefd/colorspacious/ciecam02.py#L143-L252
def XYZ100_to_CIECAM02(self, XYZ100, on_negative_A="raise"): """Computes CIECAM02 appearance correlates for the given tristimulus value(s) XYZ (normalized to be on the 0-100 scale). Example: ``vc.XYZ100_to_CIECAM02([30.0, 45.5, 21.0])`` :param XYZ100: An array-like of tristimulus values. These should be given on the 0-100 scale, not the 0-1 scale. The array-like should have shape ``(..., 3)``; e.g., you can use a simple 3-item list (shape = ``(3,)``), or to efficiently perform multiple computations at once, you could pass a higher-dimensional array, e.g. an image. :arg on_negative_A: A known infelicity of the CIECAM02 model is that for some inputs, the achromatic signal :math:`A` can be negative, which makes it impossible to compute :math:`J`, :math:`C`, :math:`Q`, :math:`M`, or :math:`s` -- only :math:`h`: and :math:`H` are spared. (See, e.g., section 2.6.4.1 of :cite:`Luo-CIECAM02` for discussion.) This argument allows you to specify a strategy for handling such points. Options are: * ``"raise"``: throws a :class:`NegativeAError` (a subclass of :class:`ValueError`) * ``"nan"``: return not-a-number values for the affected elements. (This may be particularly useful if converting a large number of points at once.) :returns: A named tuple of type :class:`JChQMsH`, with attributes ``J``, ``C``, ``h``, ``Q``, ``M``, ``s``, and ``H`` containing the CIECAM02 appearance correlates. """ #### Argument checking XYZ100 = np.asarray(XYZ100, dtype=float) if XYZ100.shape[-1] != 3: raise ValueError("XYZ100 shape must be (..., 3)") #### Step 1 RGB = broadcasting_matvec(M_CAT02, XYZ100) #### Step 2 RGB_C = self.D_RGB * RGB #### Step 3 RGBprime = broadcasting_matvec(M_HPE_M_CAT02_inv, RGB_C) #### Step 4 RGBprime_signs = np.sign(RGBprime) tmp = (self.F_L * RGBprime_signs * RGBprime / 100) ** 0.42 RGBprime_a = RGBprime_signs * 400 * (tmp / (tmp + 27.13)) + 0.1 #### Step 5 a = broadcasting_matvec([1, -12. / 11, 1. / 11], RGBprime_a) b = broadcasting_matvec([1. / 9, 1. / 9, -2. / 9], RGBprime_a) h_rad = np.arctan2(b, a) h = np.rad2deg(h_rad) % 360 # #### Step 6 # hprime = h, unless h < 20.14, in which case hprime = h + 360. hprime = np.select([h < h_i[0], True], [h + 360, h]) # we use 0-based indexing, so our i is one less than the reference # formulas' i. i = np.searchsorted(h_i, hprime, side="right") - 1 tmp = (hprime - h_i[i]) / e_i[i] H = H_i[i] + ((100 * tmp) / (tmp + (h_i[i + 1] - hprime) / e_i[i + 1])) #### Step 7 A = ((broadcasting_matvec([2, 1, 1. / 20], RGBprime_a) - 0.305) * self.N_bb) if on_negative_A == "raise": if np.any(A < 0): raise NegativeAError("attempted to convert a tristimulus " "value whose achromatic signal was " "negative, and on_negative_A=\"raise\"") elif on_negative_A == "nan": A = np.select([A < 0, True], [np.nan, A]) else: raise ValueError("Invalid on_negative_A argument: got %r, " "expected \"raise\" or \"nan\"" % (on_negative_A,)) #### Step 8 J = 100 * (A / self.A_w) ** (self.c * self.z) #### Step 9 Q = self._J_to_Q(J) #### Step 10 e = (12500. / 13) * self.N_c * self.N_cb * (np.cos(h_rad + 2) + 3.8) t = (e * np.sqrt(a ** 2 + b ** 2) / broadcasting_matvec([1, 1, 21. / 20], RGBprime_a)) C = t**0.9 * (J / 100)**0.5 * (1.64 - 0.29**self.n)**0.73 M = C * self.F_L**0.25 s = 100 * (M / Q)**0.5 return JChQMsH(J, C, h, Q, M, s, H)
[ "def", "XYZ100_to_CIECAM02", "(", "self", ",", "XYZ100", ",", "on_negative_A", "=", "\"raise\"", ")", ":", "#### Argument checking", "XYZ100", "=", "np", ".", "asarray", "(", "XYZ100", ",", "dtype", "=", "float", ")", "if", "XYZ100", ".", "shape", "[", "-", "1", "]", "!=", "3", ":", "raise", "ValueError", "(", "\"XYZ100 shape must be (..., 3)\"", ")", "#### Step 1", "RGB", "=", "broadcasting_matvec", "(", "M_CAT02", ",", "XYZ100", ")", "#### Step 2", "RGB_C", "=", "self", ".", "D_RGB", "*", "RGB", "#### Step 3", "RGBprime", "=", "broadcasting_matvec", "(", "M_HPE_M_CAT02_inv", ",", "RGB_C", ")", "#### Step 4", "RGBprime_signs", "=", "np", ".", "sign", "(", "RGBprime", ")", "tmp", "=", "(", "self", ".", "F_L", "*", "RGBprime_signs", "*", "RGBprime", "/", "100", ")", "**", "0.42", "RGBprime_a", "=", "RGBprime_signs", "*", "400", "*", "(", "tmp", "/", "(", "tmp", "+", "27.13", ")", ")", "+", "0.1", "#### Step 5", "a", "=", "broadcasting_matvec", "(", "[", "1", ",", "-", "12.", "/", "11", ",", "1.", "/", "11", "]", ",", "RGBprime_a", ")", "b", "=", "broadcasting_matvec", "(", "[", "1.", "/", "9", ",", "1.", "/", "9", ",", "-", "2.", "/", "9", "]", ",", "RGBprime_a", ")", "h_rad", "=", "np", ".", "arctan2", "(", "b", ",", "a", ")", "h", "=", "np", ".", "rad2deg", "(", "h_rad", ")", "%", "360", "# #### Step 6", "# hprime = h, unless h < 20.14, in which case hprime = h + 360.", "hprime", "=", "np", ".", "select", "(", "[", "h", "<", "h_i", "[", "0", "]", ",", "True", "]", ",", "[", "h", "+", "360", ",", "h", "]", ")", "# we use 0-based indexing, so our i is one less than the reference", "# formulas' i.", "i", "=", "np", ".", "searchsorted", "(", "h_i", ",", "hprime", ",", "side", "=", "\"right\"", ")", "-", "1", "tmp", "=", "(", "hprime", "-", "h_i", "[", "i", "]", ")", "/", "e_i", "[", "i", "]", "H", "=", "H_i", "[", "i", "]", "+", "(", "(", "100", "*", "tmp", ")", "/", "(", "tmp", "+", "(", "h_i", "[", "i", "+", "1", "]", "-", "hprime", ")", "/", "e_i", "[", "i", "+", "1", "]", ")", ")", "#### Step 7", "A", "=", "(", "(", "broadcasting_matvec", "(", "[", "2", ",", "1", ",", "1.", "/", "20", "]", ",", "RGBprime_a", ")", "-", "0.305", ")", "*", "self", ".", "N_bb", ")", "if", "on_negative_A", "==", "\"raise\"", ":", "if", "np", ".", "any", "(", "A", "<", "0", ")", ":", "raise", "NegativeAError", "(", "\"attempted to convert a tristimulus \"", "\"value whose achromatic signal was \"", "\"negative, and on_negative_A=\\\"raise\\\"\"", ")", "elif", "on_negative_A", "==", "\"nan\"", ":", "A", "=", "np", ".", "select", "(", "[", "A", "<", "0", ",", "True", "]", ",", "[", "np", ".", "nan", ",", "A", "]", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid on_negative_A argument: got %r, \"", "\"expected \\\"raise\\\" or \\\"nan\\\"\"", "%", "(", "on_negative_A", ",", ")", ")", "#### Step 8", "J", "=", "100", "*", "(", "A", "/", "self", ".", "A_w", ")", "**", "(", "self", ".", "c", "*", "self", ".", "z", ")", "#### Step 9", "Q", "=", "self", ".", "_J_to_Q", "(", "J", ")", "#### Step 10", "e", "=", "(", "12500.", "/", "13", ")", "*", "self", ".", "N_c", "*", "self", ".", "N_cb", "*", "(", "np", ".", "cos", "(", "h_rad", "+", "2", ")", "+", "3.8", ")", "t", "=", "(", "e", "*", "np", ".", "sqrt", "(", "a", "**", "2", "+", "b", "**", "2", ")", "/", "broadcasting_matvec", "(", "[", "1", ",", "1", ",", "21.", "/", "20", "]", ",", "RGBprime_a", ")", ")", "C", "=", "t", "**", "0.9", "*", "(", "J", "/", "100", ")", "**", "0.5", "*", "(", "1.64", "-", "0.29", "**", "self", ".", "n", ")", "**", "0.73", "M", "=", "C", "*", "self", ".", "F_L", "**", "0.25", "s", "=", "100", "*", "(", "M", "/", "Q", ")", "**", "0.5", "return", "JChQMsH", "(", "J", ",", "C", ",", "h", ",", "Q", ",", "M", ",", "s", ",", "H", ")" ]
Computes CIECAM02 appearance correlates for the given tristimulus value(s) XYZ (normalized to be on the 0-100 scale). Example: ``vc.XYZ100_to_CIECAM02([30.0, 45.5, 21.0])`` :param XYZ100: An array-like of tristimulus values. These should be given on the 0-100 scale, not the 0-1 scale. The array-like should have shape ``(..., 3)``; e.g., you can use a simple 3-item list (shape = ``(3,)``), or to efficiently perform multiple computations at once, you could pass a higher-dimensional array, e.g. an image. :arg on_negative_A: A known infelicity of the CIECAM02 model is that for some inputs, the achromatic signal :math:`A` can be negative, which makes it impossible to compute :math:`J`, :math:`C`, :math:`Q`, :math:`M`, or :math:`s` -- only :math:`h`: and :math:`H` are spared. (See, e.g., section 2.6.4.1 of :cite:`Luo-CIECAM02` for discussion.) This argument allows you to specify a strategy for handling such points. Options are: * ``"raise"``: throws a :class:`NegativeAError` (a subclass of :class:`ValueError`) * ``"nan"``: return not-a-number values for the affected elements. (This may be particularly useful if converting a large number of points at once.) :returns: A named tuple of type :class:`JChQMsH`, with attributes ``J``, ``C``, ``h``, ``Q``, ``M``, ``s``, and ``H`` containing the CIECAM02 appearance correlates.
[ "Computes", "CIECAM02", "appearance", "correlates", "for", "the", "given", "tristimulus", "value", "(", "s", ")", "XYZ", "(", "normalized", "to", "be", "on", "the", "0", "-", "100", "scale", ")", "." ]
python
train
zibertscrem/hexdi
hexdi/__init__.py
https://github.com/zibertscrem/hexdi/blob/4875598299c53f984f2bb1b37060fd42bb7aba84/hexdi/__init__.py#L89-L96
def bind_transient(type_to_bind: hexdi.core.restype, accessor: hexdi.core.clstype): """ shortcut for bind_type with PerResolveLifeTimeManager on root container :param type_to_bind: type that will be resolved by accessor :param accessor: accessor for resolving object """ hexdi.core.get_root_container().bind_type(type_to_bind, accessor, lifetime.PerResolveLifeTimeManager)
[ "def", "bind_transient", "(", "type_to_bind", ":", "hexdi", ".", "core", ".", "restype", ",", "accessor", ":", "hexdi", ".", "core", ".", "clstype", ")", ":", "hexdi", ".", "core", ".", "get_root_container", "(", ")", ".", "bind_type", "(", "type_to_bind", ",", "accessor", ",", "lifetime", ".", "PerResolveLifeTimeManager", ")" ]
shortcut for bind_type with PerResolveLifeTimeManager on root container :param type_to_bind: type that will be resolved by accessor :param accessor: accessor for resolving object
[ "shortcut", "for", "bind_type", "with", "PerResolveLifeTimeManager", "on", "root", "container" ]
python
train
pypa/pipenv
pipenv/vendor/click/termui.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click/termui.py#L261-L351
def progressbar(iterable=None, length=None, label=None, show_eta=True, show_percent=None, show_pos=False, item_show_func=None, fill_char='#', empty_char='-', bar_template='%(label)s [%(bar)s] %(info)s', info_sep=' ', width=36, file=None, color=None): """This function creates an iterable context manager that can be used to iterate over something while showing a progress bar. It will either iterate over the `iterable` or `length` items (that are counted up). While iteration happens, this function will print a rendered progress bar to the given `file` (defaults to stdout) and will attempt to calculate remaining time and more. By default, this progress bar will not be rendered if the file is not a terminal. The context manager creates the progress bar. When the context manager is entered the progress bar is already displayed. With every iteration over the progress bar, the iterable passed to the bar is advanced and the bar is updated. When the context manager exits, a newline is printed and the progress bar is finalized on screen. No printing must happen or the progress bar will be unintentionally destroyed. Example usage:: with progressbar(items) as bar: for item in bar: do_something_with(item) Alternatively, if no iterable is specified, one can manually update the progress bar through the `update()` method instead of directly iterating over the progress bar. The update method accepts the number of steps to increment the bar with:: with progressbar(length=chunks.total_bytes) as bar: for chunk in chunks: process_chunk(chunk) bar.update(chunks.bytes) .. versionadded:: 2.0 .. versionadded:: 4.0 Added the `color` parameter. Added a `update` method to the progressbar object. :param iterable: an iterable to iterate over. If not provided the length is required. :param length: the number of items to iterate over. By default the progressbar will attempt to ask the iterator about its length, which might or might not work. If an iterable is also provided this parameter can be used to override the length. If an iterable is not provided the progress bar will iterate over a range of that length. :param label: the label to show next to the progress bar. :param show_eta: enables or disables the estimated time display. This is automatically disabled if the length cannot be determined. :param show_percent: enables or disables the percentage display. The default is `True` if the iterable has a length or `False` if not. :param show_pos: enables or disables the absolute position display. The default is `False`. :param item_show_func: a function called with the current item which can return a string to show the current item next to the progress bar. Note that the current item can be `None`! :param fill_char: the character to use to show the filled part of the progress bar. :param empty_char: the character to use to show the non-filled part of the progress bar. :param bar_template: the format string to use as template for the bar. The parameters in it are ``label`` for the label, ``bar`` for the progress bar and ``info`` for the info section. :param info_sep: the separator between multiple info items (eta etc.) :param width: the width of the progress bar in characters, 0 means full terminal width :param file: the file to write to. If this is not a terminal then only the label is printed. :param color: controls if the terminal supports ANSI colors or not. The default is autodetection. This is only needed if ANSI codes are included anywhere in the progress bar output which is not the case by default. """ from ._termui_impl import ProgressBar color = resolve_color_default(color) return ProgressBar(iterable=iterable, length=length, show_eta=show_eta, show_percent=show_percent, show_pos=show_pos, item_show_func=item_show_func, fill_char=fill_char, empty_char=empty_char, bar_template=bar_template, info_sep=info_sep, file=file, label=label, width=width, color=color)
[ "def", "progressbar", "(", "iterable", "=", "None", ",", "length", "=", "None", ",", "label", "=", "None", ",", "show_eta", "=", "True", ",", "show_percent", "=", "None", ",", "show_pos", "=", "False", ",", "item_show_func", "=", "None", ",", "fill_char", "=", "'#'", ",", "empty_char", "=", "'-'", ",", "bar_template", "=", "'%(label)s [%(bar)s] %(info)s'", ",", "info_sep", "=", "' '", ",", "width", "=", "36", ",", "file", "=", "None", ",", "color", "=", "None", ")", ":", "from", ".", "_termui_impl", "import", "ProgressBar", "color", "=", "resolve_color_default", "(", "color", ")", "return", "ProgressBar", "(", "iterable", "=", "iterable", ",", "length", "=", "length", ",", "show_eta", "=", "show_eta", ",", "show_percent", "=", "show_percent", ",", "show_pos", "=", "show_pos", ",", "item_show_func", "=", "item_show_func", ",", "fill_char", "=", "fill_char", ",", "empty_char", "=", "empty_char", ",", "bar_template", "=", "bar_template", ",", "info_sep", "=", "info_sep", ",", "file", "=", "file", ",", "label", "=", "label", ",", "width", "=", "width", ",", "color", "=", "color", ")" ]
This function creates an iterable context manager that can be used to iterate over something while showing a progress bar. It will either iterate over the `iterable` or `length` items (that are counted up). While iteration happens, this function will print a rendered progress bar to the given `file` (defaults to stdout) and will attempt to calculate remaining time and more. By default, this progress bar will not be rendered if the file is not a terminal. The context manager creates the progress bar. When the context manager is entered the progress bar is already displayed. With every iteration over the progress bar, the iterable passed to the bar is advanced and the bar is updated. When the context manager exits, a newline is printed and the progress bar is finalized on screen. No printing must happen or the progress bar will be unintentionally destroyed. Example usage:: with progressbar(items) as bar: for item in bar: do_something_with(item) Alternatively, if no iterable is specified, one can manually update the progress bar through the `update()` method instead of directly iterating over the progress bar. The update method accepts the number of steps to increment the bar with:: with progressbar(length=chunks.total_bytes) as bar: for chunk in chunks: process_chunk(chunk) bar.update(chunks.bytes) .. versionadded:: 2.0 .. versionadded:: 4.0 Added the `color` parameter. Added a `update` method to the progressbar object. :param iterable: an iterable to iterate over. If not provided the length is required. :param length: the number of items to iterate over. By default the progressbar will attempt to ask the iterator about its length, which might or might not work. If an iterable is also provided this parameter can be used to override the length. If an iterable is not provided the progress bar will iterate over a range of that length. :param label: the label to show next to the progress bar. :param show_eta: enables or disables the estimated time display. This is automatically disabled if the length cannot be determined. :param show_percent: enables or disables the percentage display. The default is `True` if the iterable has a length or `False` if not. :param show_pos: enables or disables the absolute position display. The default is `False`. :param item_show_func: a function called with the current item which can return a string to show the current item next to the progress bar. Note that the current item can be `None`! :param fill_char: the character to use to show the filled part of the progress bar. :param empty_char: the character to use to show the non-filled part of the progress bar. :param bar_template: the format string to use as template for the bar. The parameters in it are ``label`` for the label, ``bar`` for the progress bar and ``info`` for the info section. :param info_sep: the separator between multiple info items (eta etc.) :param width: the width of the progress bar in characters, 0 means full terminal width :param file: the file to write to. If this is not a terminal then only the label is printed. :param color: controls if the terminal supports ANSI colors or not. The default is autodetection. This is only needed if ANSI codes are included anywhere in the progress bar output which is not the case by default.
[ "This", "function", "creates", "an", "iterable", "context", "manager", "that", "can", "be", "used", "to", "iterate", "over", "something", "while", "showing", "a", "progress", "bar", ".", "It", "will", "either", "iterate", "over", "the", "iterable", "or", "length", "items", "(", "that", "are", "counted", "up", ")", ".", "While", "iteration", "happens", "this", "function", "will", "print", "a", "rendered", "progress", "bar", "to", "the", "given", "file", "(", "defaults", "to", "stdout", ")", "and", "will", "attempt", "to", "calculate", "remaining", "time", "and", "more", ".", "By", "default", "this", "progress", "bar", "will", "not", "be", "rendered", "if", "the", "file", "is", "not", "a", "terminal", "." ]
python
train
aewallin/allantools
allantools/allantools.py
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L568-L660
def totdev(data, rate=1.0, data_type="phase", taus=None): """ Total deviation. Better confidence at long averages for Allan. .. math:: \\sigma^2_{TOTDEV}( m\\tau_0 ) = { 1 \\over 2 (m\\tau_0)^2 (N-2) } \\sum_{i=2}^{N-1} ( {x}^*_{i-m} - 2x^*_{i} + x^*_{i+m} )^2 Where :math:`x^*_i` is a new time-series of length :math:`3N-4` derived from the original phase time-series :math:`x_n` of length :math:`N` by reflection at both ends. FIXME: better description of reflection operation. the original data x is in the center of x*: x*(1-j) = 2x(1) - x(1+j) for j=1..N-2 x*(i) = x(i) for i=1..N x*(N+j) = 2x(N) - x(N-j) for j=1..N-2 x* has length 3N-4 tau = m*tau0 FIXME: bias correction http://www.wriley.com/CI2.pdf page 5 Parameters ---------- phase: np.array Phase data in seconds. Provide either phase or frequency. frequency: np.array Fractional frequency data (nondimensional). Provide either frequency or phase. rate: float The sampling rate for phase or frequency, in Hz taus: np.array Array of tau values for which to compute measurement References ---------- David A. Howe, *The total deviation approach to long-term characterization of frequency stability*, IEEE tr. UFFC vol 47 no 5 (2000) NIST SP 1065 eqn (25) page 23 """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) N = len(phase) # totdev requires a new dataset # Begin by adding reflected data before dataset x1 = 2.0 * phase[0] * np.ones((N - 2,)) x1 = x1 - phase[1:-1] x1 = x1[::-1] # Reflected data at end of dataset x2 = 2.0 * phase[-1] * np.ones((N - 2,)) x2 = x2 - phase[1:-1][::-1] # check length of new dataset assert len(x1)+len(phase)+len(x2) == 3*N - 4 # Combine into a single array x = np.zeros((3*N - 4)) x[0:N-2] = x1 x[N-2:2*(N-2)+2] = phase # original data in the middle x[2*(N-2)+2:] = x2 devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) mid = len(x1) for idx, mj in enumerate(m): mj = int(mj) d0 = x[mid + 1:] d1 = x[mid + mj + 1:] d1n = x[mid - mj + 1:] e = min(len(d0), len(d1), len(d1n)) v_arr = d1n[:e] - 2.0 * d0[:e] + d1[:e] dev = np.sum(v_arr[:mid] * v_arr[:mid]) dev /= float(2 * pow(mj / rate, 2) * (N - 2)) dev = np.sqrt(dev) devs[idx] = dev deverrs[idx] = dev / np.sqrt(mid) ns[idx] = mid return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "totdev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "phase", ",", "m", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ")", "N", "=", "len", "(", "phase", ")", "# totdev requires a new dataset", "# Begin by adding reflected data before dataset", "x1", "=", "2.0", "*", "phase", "[", "0", "]", "*", "np", ".", "ones", "(", "(", "N", "-", "2", ",", ")", ")", "x1", "=", "x1", "-", "phase", "[", "1", ":", "-", "1", "]", "x1", "=", "x1", "[", ":", ":", "-", "1", "]", "# Reflected data at end of dataset", "x2", "=", "2.0", "*", "phase", "[", "-", "1", "]", "*", "np", ".", "ones", "(", "(", "N", "-", "2", ",", ")", ")", "x2", "=", "x2", "-", "phase", "[", "1", ":", "-", "1", "]", "[", ":", ":", "-", "1", "]", "# check length of new dataset", "assert", "len", "(", "x1", ")", "+", "len", "(", "phase", ")", "+", "len", "(", "x2", ")", "==", "3", "*", "N", "-", "4", "# Combine into a single array", "x", "=", "np", ".", "zeros", "(", "(", "3", "*", "N", "-", "4", ")", ")", "x", "[", "0", ":", "N", "-", "2", "]", "=", "x1", "x", "[", "N", "-", "2", ":", "2", "*", "(", "N", "-", "2", ")", "+", "2", "]", "=", "phase", "# original data in the middle", "x", "[", "2", "*", "(", "N", "-", "2", ")", "+", "2", ":", "]", "=", "x2", "devs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "deverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "mid", "=", "len", "(", "x1", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "m", ")", ":", "mj", "=", "int", "(", "mj", ")", "d0", "=", "x", "[", "mid", "+", "1", ":", "]", "d1", "=", "x", "[", "mid", "+", "mj", "+", "1", ":", "]", "d1n", "=", "x", "[", "mid", "-", "mj", "+", "1", ":", "]", "e", "=", "min", "(", "len", "(", "d0", ")", ",", "len", "(", "d1", ")", ",", "len", "(", "d1n", ")", ")", "v_arr", "=", "d1n", "[", ":", "e", "]", "-", "2.0", "*", "d0", "[", ":", "e", "]", "+", "d1", "[", ":", "e", "]", "dev", "=", "np", ".", "sum", "(", "v_arr", "[", ":", "mid", "]", "*", "v_arr", "[", ":", "mid", "]", ")", "dev", "/=", "float", "(", "2", "*", "pow", "(", "mj", "/", "rate", ",", "2", ")", "*", "(", "N", "-", "2", ")", ")", "dev", "=", "np", ".", "sqrt", "(", "dev", ")", "devs", "[", "idx", "]", "=", "dev", "deverrs", "[", "idx", "]", "=", "dev", "/", "np", ".", "sqrt", "(", "mid", ")", "ns", "[", "idx", "]", "=", "mid", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
Total deviation. Better confidence at long averages for Allan. .. math:: \\sigma^2_{TOTDEV}( m\\tau_0 ) = { 1 \\over 2 (m\\tau_0)^2 (N-2) } \\sum_{i=2}^{N-1} ( {x}^*_{i-m} - 2x^*_{i} + x^*_{i+m} )^2 Where :math:`x^*_i` is a new time-series of length :math:`3N-4` derived from the original phase time-series :math:`x_n` of length :math:`N` by reflection at both ends. FIXME: better description of reflection operation. the original data x is in the center of x*: x*(1-j) = 2x(1) - x(1+j) for j=1..N-2 x*(i) = x(i) for i=1..N x*(N+j) = 2x(N) - x(N-j) for j=1..N-2 x* has length 3N-4 tau = m*tau0 FIXME: bias correction http://www.wriley.com/CI2.pdf page 5 Parameters ---------- phase: np.array Phase data in seconds. Provide either phase or frequency. frequency: np.array Fractional frequency data (nondimensional). Provide either frequency or phase. rate: float The sampling rate for phase or frequency, in Hz taus: np.array Array of tau values for which to compute measurement References ---------- David A. Howe, *The total deviation approach to long-term characterization of frequency stability*, IEEE tr. UFFC vol 47 no 5 (2000) NIST SP 1065 eqn (25) page 23
[ "Total", "deviation", ".", "Better", "confidence", "at", "long", "averages", "for", "Allan", "." ]
python
train
O365/python-o365
O365/utils/utils.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/utils/utils.py#L1084-L1106
def any(self, *, collection, attribute, word, func=None, operation=None): """ Performs a filter with the OData 'any' keyword on the collection For example: q.any(collection='email_addresses', attribute='address', operation='eq', word='[email protected]') will transform to a filter such as: emailAddresses/any(a:a/address eq '[email protected]') :param str collection: the collection to apply the any keyword on :param str attribute: the attribute of the collection to check :param str word: the word to check :param str func: the logical function to apply to the attribute inside the collection :param str operation: the logical operation to apply to the attribute inside the collection :rtype: Query """ return self.iterable('any', collection=collection, attribute=attribute, word=word, func=func, operation=operation)
[ "def", "any", "(", "self", ",", "*", ",", "collection", ",", "attribute", ",", "word", ",", "func", "=", "None", ",", "operation", "=", "None", ")", ":", "return", "self", ".", "iterable", "(", "'any'", ",", "collection", "=", "collection", ",", "attribute", "=", "attribute", ",", "word", "=", "word", ",", "func", "=", "func", ",", "operation", "=", "operation", ")" ]
Performs a filter with the OData 'any' keyword on the collection For example: q.any(collection='email_addresses', attribute='address', operation='eq', word='[email protected]') will transform to a filter such as: emailAddresses/any(a:a/address eq '[email protected]') :param str collection: the collection to apply the any keyword on :param str attribute: the attribute of the collection to check :param str word: the word to check :param str func: the logical function to apply to the attribute inside the collection :param str operation: the logical operation to apply to the attribute inside the collection :rtype: Query
[ "Performs", "a", "filter", "with", "the", "OData", "any", "keyword", "on", "the", "collection" ]
python
train
ray-project/ray
python/ray/experimental/streaming/streaming.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/streaming/streaming.py#L585-L600
def sum(self, attribute_selector, state_keeper=None): """Applies a rolling sum operator to the stream. Attributes: sum_attribute_index (int): The index of the attribute to sum (assuming tuple records). """ op = Operator( _generate_uuid(), OpType.Sum, "Sum", _sum, other=attribute_selector, state_actor=state_keeper, num_instances=self.env.config.parallelism) return self.__register(op)
[ "def", "sum", "(", "self", ",", "attribute_selector", ",", "state_keeper", "=", "None", ")", ":", "op", "=", "Operator", "(", "_generate_uuid", "(", ")", ",", "OpType", ".", "Sum", ",", "\"Sum\"", ",", "_sum", ",", "other", "=", "attribute_selector", ",", "state_actor", "=", "state_keeper", ",", "num_instances", "=", "self", ".", "env", ".", "config", ".", "parallelism", ")", "return", "self", ".", "__register", "(", "op", ")" ]
Applies a rolling sum operator to the stream. Attributes: sum_attribute_index (int): The index of the attribute to sum (assuming tuple records).
[ "Applies", "a", "rolling", "sum", "operator", "to", "the", "stream", "." ]
python
train
ibelie/typy
typy/google/protobuf/text_format.py
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/text_format.py#L739-L759
def _SkipFieldValue(tokenizer): """Skips over a field value. Args: tokenizer: A tokenizer to parse the field name and values. Raises: ParseError: In case an invalid field value is found. """ # String/bytes tokens can come in multiple adjacent string literals. # If we can consume one, consume as many as we can. if tokenizer.TryConsumeByteString(): while tokenizer.TryConsumeByteString(): pass return if (not tokenizer.TryConsumeIdentifier() and not tokenizer.TryConsumeInt64() and not tokenizer.TryConsumeUint64() and not tokenizer.TryConsumeFloat()): raise ParseError('Invalid field value: ' + tokenizer.token)
[ "def", "_SkipFieldValue", "(", "tokenizer", ")", ":", "# String/bytes tokens can come in multiple adjacent string literals.", "# If we can consume one, consume as many as we can.", "if", "tokenizer", ".", "TryConsumeByteString", "(", ")", ":", "while", "tokenizer", ".", "TryConsumeByteString", "(", ")", ":", "pass", "return", "if", "(", "not", "tokenizer", ".", "TryConsumeIdentifier", "(", ")", "and", "not", "tokenizer", ".", "TryConsumeInt64", "(", ")", "and", "not", "tokenizer", ".", "TryConsumeUint64", "(", ")", "and", "not", "tokenizer", ".", "TryConsumeFloat", "(", ")", ")", ":", "raise", "ParseError", "(", "'Invalid field value: '", "+", "tokenizer", ".", "token", ")" ]
Skips over a field value. Args: tokenizer: A tokenizer to parse the field name and values. Raises: ParseError: In case an invalid field value is found.
[ "Skips", "over", "a", "field", "value", "." ]
python
valid
xeroc/python-graphenelib
graphenebase/memo.py
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenebase/memo.py#L72-L92
def encode_memo(priv, pub, nonce, message): """ Encode a message with a shared secret between Alice and Bob :param PrivateKey priv: Private Key (of Alice) :param PublicKey pub: Public Key (of Bob) :param int nonce: Random nonce :param str message: Memo message :return: Encrypted message :rtype: hex """ shared_secret = get_shared_secret(priv, pub) aes = init_aes(shared_secret, nonce) " Checksum " raw = bytes(message, "utf8") checksum = hashlib.sha256(raw).digest() raw = checksum[0:4] + raw " Padding " raw = _pad(raw, 16) " Encryption " return hexlify(aes.encrypt(raw)).decode("ascii")
[ "def", "encode_memo", "(", "priv", ",", "pub", ",", "nonce", ",", "message", ")", ":", "shared_secret", "=", "get_shared_secret", "(", "priv", ",", "pub", ")", "aes", "=", "init_aes", "(", "shared_secret", ",", "nonce", ")", "\" Checksum \"", "raw", "=", "bytes", "(", "message", ",", "\"utf8\"", ")", "checksum", "=", "hashlib", ".", "sha256", "(", "raw", ")", ".", "digest", "(", ")", "raw", "=", "checksum", "[", "0", ":", "4", "]", "+", "raw", "\" Padding \"", "raw", "=", "_pad", "(", "raw", ",", "16", ")", "\" Encryption \"", "return", "hexlify", "(", "aes", ".", "encrypt", "(", "raw", ")", ")", ".", "decode", "(", "\"ascii\"", ")" ]
Encode a message with a shared secret between Alice and Bob :param PrivateKey priv: Private Key (of Alice) :param PublicKey pub: Public Key (of Bob) :param int nonce: Random nonce :param str message: Memo message :return: Encrypted message :rtype: hex
[ "Encode", "a", "message", "with", "a", "shared", "secret", "between", "Alice", "and", "Bob" ]
python
valid
pyviz/holoviews
holoviews/core/dimension.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/dimension.py#L379-L383
def pprint_label(self): "The pretty-printed label string for the Dimension" unit = ('' if self.unit is None else type(self.unit)(self.unit_format).format(unit=self.unit)) return bytes_to_unicode(self.label) + bytes_to_unicode(unit)
[ "def", "pprint_label", "(", "self", ")", ":", "unit", "=", "(", "''", "if", "self", ".", "unit", "is", "None", "else", "type", "(", "self", ".", "unit", ")", "(", "self", ".", "unit_format", ")", ".", "format", "(", "unit", "=", "self", ".", "unit", ")", ")", "return", "bytes_to_unicode", "(", "self", ".", "label", ")", "+", "bytes_to_unicode", "(", "unit", ")" ]
The pretty-printed label string for the Dimension
[ "The", "pretty", "-", "printed", "label", "string", "for", "the", "Dimension" ]
python
train
onicagroup/runway
runway/tfenv.py
https://github.com/onicagroup/runway/blob/3f3549ec3bf6e39b9f27d9738a1847f3a4369e7f/runway/tfenv.py#L76-L87
def get_available_tf_versions(include_prerelease=False): """Return available Terraform versions.""" tf_releases = json.loads( requests.get('https://releases.hashicorp.com/index.json').text )['terraform'] tf_versions = sorted([k # descending for k, _v in tf_releases['versions'].items()], key=LooseVersion, reverse=True) if include_prerelease: return tf_versions return [i for i in tf_versions if '-' not in i]
[ "def", "get_available_tf_versions", "(", "include_prerelease", "=", "False", ")", ":", "tf_releases", "=", "json", ".", "loads", "(", "requests", ".", "get", "(", "'https://releases.hashicorp.com/index.json'", ")", ".", "text", ")", "[", "'terraform'", "]", "tf_versions", "=", "sorted", "(", "[", "k", "# descending", "for", "k", ",", "_v", "in", "tf_releases", "[", "'versions'", "]", ".", "items", "(", ")", "]", ",", "key", "=", "LooseVersion", ",", "reverse", "=", "True", ")", "if", "include_prerelease", ":", "return", "tf_versions", "return", "[", "i", "for", "i", "in", "tf_versions", "if", "'-'", "not", "in", "i", "]" ]
Return available Terraform versions.
[ "Return", "available", "Terraform", "versions", "." ]
python
train
LuminosoInsight/langcodes
langcodes/__init__.py
https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L730-L817
def find_name(tagtype: str, name: str, language: {str, 'Language', None}=None): """ Find the subtag of a particular `tagtype` that has the given `name`. The default language, "und", will allow matching names in any language, so you can get the code 'fr' by looking up "French", "Français", or "francés". Occasionally, names are ambiguous in a way that can be resolved by specifying what name the language is supposed to be in. For example, there is a language named 'Malayo' in English, but it's different from the language named 'Malayo' in Spanish (which is Malay). Specifying the language will look up the name in a trie that is only in that language. In a previous version, we thought we were going to deprecate the `language` parameter, as there weren't significant cases of conflicts in names of things between languages. Well, we got more data, and conflicts in names are everywhere. Specifying the language that the name should be in is still not required, but it will help to make sure that names can be round-tripped. >>> Language.find_name('language', 'francés') Language.make(language='fr') >>> Language.find_name('region', 'United Kingdom') Language.make(region='GB') >>> Language.find_name('script', 'Arabic') Language.make(script='Arab') >>> Language.find_name('language', 'norsk bokmål') Language.make(language='nb') >>> Language.find_name('language', 'norsk') Language.make(language='no') >>> Language.find_name('language', 'norsk', 'en') Traceback (most recent call last): ... LookupError: Can't find any language named 'norsk' >>> Language.find_name('language', 'norsk', 'no') Language.make(language='no') >>> Language.find_name('language', 'malayo', 'en') Language.make(language='mbp') >>> Language.find_name('language', 'malayo', 'es') Language.make(language='ms') Some langauge names resolve to more than a language. For example, the name 'Brazilian Portuguese' resolves to a language and a region, and 'Simplified Chinese' resolves to a language and a script. In these cases, a Language object with multiple subtags will be returned. >>> Language.find_name('language', 'Brazilian Portuguese', 'en') Language.make(language='pt', region='BR') >>> Language.find_name('language', 'Simplified Chinese', 'en') Language.make(language='zh', script='Hans') A small amount of fuzzy matching is supported: if the name can be shortened to match a single language name, you get that language. This allows, for example, "Hakka dialect" to match "Hakka". >>> Language.find_name('language', 'Hakka dialect') Language.make(language='hak') """ # No matter what form of language we got, normalize it to a single # language subtag if isinstance(language, Language): language = language.language elif isinstance(language, str): language = get(language).language if language is None: language = 'und' code = name_to_code(tagtype, name, language) if code is None: raise LookupError("Can't find any %s named %r" % (tagtype, name)) if '-' in code: return Language.get(code) else: data = {tagtype: code} return Language.make(**data)
[ "def", "find_name", "(", "tagtype", ":", "str", ",", "name", ":", "str", ",", "language", ":", "{", "str", ",", "'Language'", ",", "None", "}", "=", "None", ")", ":", "# No matter what form of language we got, normalize it to a single", "# language subtag", "if", "isinstance", "(", "language", ",", "Language", ")", ":", "language", "=", "language", ".", "language", "elif", "isinstance", "(", "language", ",", "str", ")", ":", "language", "=", "get", "(", "language", ")", ".", "language", "if", "language", "is", "None", ":", "language", "=", "'und'", "code", "=", "name_to_code", "(", "tagtype", ",", "name", ",", "language", ")", "if", "code", "is", "None", ":", "raise", "LookupError", "(", "\"Can't find any %s named %r\"", "%", "(", "tagtype", ",", "name", ")", ")", "if", "'-'", "in", "code", ":", "return", "Language", ".", "get", "(", "code", ")", "else", ":", "data", "=", "{", "tagtype", ":", "code", "}", "return", "Language", ".", "make", "(", "*", "*", "data", ")" ]
Find the subtag of a particular `tagtype` that has the given `name`. The default language, "und", will allow matching names in any language, so you can get the code 'fr' by looking up "French", "Français", or "francés". Occasionally, names are ambiguous in a way that can be resolved by specifying what name the language is supposed to be in. For example, there is a language named 'Malayo' in English, but it's different from the language named 'Malayo' in Spanish (which is Malay). Specifying the language will look up the name in a trie that is only in that language. In a previous version, we thought we were going to deprecate the `language` parameter, as there weren't significant cases of conflicts in names of things between languages. Well, we got more data, and conflicts in names are everywhere. Specifying the language that the name should be in is still not required, but it will help to make sure that names can be round-tripped. >>> Language.find_name('language', 'francés') Language.make(language='fr') >>> Language.find_name('region', 'United Kingdom') Language.make(region='GB') >>> Language.find_name('script', 'Arabic') Language.make(script='Arab') >>> Language.find_name('language', 'norsk bokmål') Language.make(language='nb') >>> Language.find_name('language', 'norsk') Language.make(language='no') >>> Language.find_name('language', 'norsk', 'en') Traceback (most recent call last): ... LookupError: Can't find any language named 'norsk' >>> Language.find_name('language', 'norsk', 'no') Language.make(language='no') >>> Language.find_name('language', 'malayo', 'en') Language.make(language='mbp') >>> Language.find_name('language', 'malayo', 'es') Language.make(language='ms') Some langauge names resolve to more than a language. For example, the name 'Brazilian Portuguese' resolves to a language and a region, and 'Simplified Chinese' resolves to a language and a script. In these cases, a Language object with multiple subtags will be returned. >>> Language.find_name('language', 'Brazilian Portuguese', 'en') Language.make(language='pt', region='BR') >>> Language.find_name('language', 'Simplified Chinese', 'en') Language.make(language='zh', script='Hans') A small amount of fuzzy matching is supported: if the name can be shortened to match a single language name, you get that language. This allows, for example, "Hakka dialect" to match "Hakka". >>> Language.find_name('language', 'Hakka dialect') Language.make(language='hak')
[ "Find", "the", "subtag", "of", "a", "particular", "tagtype", "that", "has", "the", "given", "name", "." ]
python
train
jobovy/galpy
galpy/potential/KuzminKutuzovStaeckelPotential.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/KuzminKutuzovStaeckelPotential.py#L195-L224
def _Rzderiv(self,R,z,phi=0.,t=0.): """ NAME: _Rzderiv PURPOSE: evaluate the mixed R,z derivative for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t- time OUTPUT: d2phi/dR/dz HISTORY: 2015-02-13 - Written - Trick (MPIA) """ l,n = bovy_coords.Rz_to_lambdanu (R,z,ac=self._ac,Delta=self._Delta) jac = bovy_coords.Rz_to_lambdanu_jac(R,z, Delta=self._Delta) hess = bovy_coords.Rz_to_lambdanu_hess(R,z, Delta=self._Delta) dldR = jac[0,0] dndR = jac[1,0] dldz = jac[0,1] dndz = jac[1,1] d2ldRdz = hess[0,0,1] d2ndRdz = hess[1,0,1] return d2ldRdz * self._lderiv(l,n) + \ d2ndRdz * self._nderiv(l,n) + \ dldR*dldz * self._l2deriv(l,n) + \ dndR*dndz * self._n2deriv(l,n) + \ (dldR*dndz+dldz*dndR)* self._lnderiv(l,n)
[ "def", "_Rzderiv", "(", "self", ",", "R", ",", "z", ",", "phi", "=", "0.", ",", "t", "=", "0.", ")", ":", "l", ",", "n", "=", "bovy_coords", ".", "Rz_to_lambdanu", "(", "R", ",", "z", ",", "ac", "=", "self", ".", "_ac", ",", "Delta", "=", "self", ".", "_Delta", ")", "jac", "=", "bovy_coords", ".", "Rz_to_lambdanu_jac", "(", "R", ",", "z", ",", "Delta", "=", "self", ".", "_Delta", ")", "hess", "=", "bovy_coords", ".", "Rz_to_lambdanu_hess", "(", "R", ",", "z", ",", "Delta", "=", "self", ".", "_Delta", ")", "dldR", "=", "jac", "[", "0", ",", "0", "]", "dndR", "=", "jac", "[", "1", ",", "0", "]", "dldz", "=", "jac", "[", "0", ",", "1", "]", "dndz", "=", "jac", "[", "1", ",", "1", "]", "d2ldRdz", "=", "hess", "[", "0", ",", "0", ",", "1", "]", "d2ndRdz", "=", "hess", "[", "1", ",", "0", ",", "1", "]", "return", "d2ldRdz", "*", "self", ".", "_lderiv", "(", "l", ",", "n", ")", "+", "d2ndRdz", "*", "self", ".", "_nderiv", "(", "l", ",", "n", ")", "+", "dldR", "*", "dldz", "*", "self", ".", "_l2deriv", "(", "l", ",", "n", ")", "+", "dndR", "*", "dndz", "*", "self", ".", "_n2deriv", "(", "l", ",", "n", ")", "+", "(", "dldR", "*", "dndz", "+", "dldz", "*", "dndR", ")", "*", "self", ".", "_lnderiv", "(", "l", ",", "n", ")" ]
NAME: _Rzderiv PURPOSE: evaluate the mixed R,z derivative for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t- time OUTPUT: d2phi/dR/dz HISTORY: 2015-02-13 - Written - Trick (MPIA)
[ "NAME", ":", "_Rzderiv", "PURPOSE", ":", "evaluate", "the", "mixed", "R", "z", "derivative", "for", "this", "potential", "INPUT", ":", "R", "-", "Galactocentric", "cylindrical", "radius", "z", "-", "vertical", "height", "phi", "-", "azimuth", "t", "-", "time", "OUTPUT", ":", "d2phi", "/", "dR", "/", "dz", "HISTORY", ":", "2015", "-", "02", "-", "13", "-", "Written", "-", "Trick", "(", "MPIA", ")" ]
python
train
bloomreach/s4cmd
s4cmd.py
https://github.com/bloomreach/s4cmd/blob/bb51075bf43703e7cd95aa39288cf7732ec13a6d/s4cmd.py#L1278-L1283
def get_file_privilege(self, source): '''Get privileges of a local file''' try: return str(oct(os.stat(source).st_mode)[-3:]) except Exception as e: raise Failure('Could not get stat for %s, error_message = %s', source, e)
[ "def", "get_file_privilege", "(", "self", ",", "source", ")", ":", "try", ":", "return", "str", "(", "oct", "(", "os", ".", "stat", "(", "source", ")", ".", "st_mode", ")", "[", "-", "3", ":", "]", ")", "except", "Exception", "as", "e", ":", "raise", "Failure", "(", "'Could not get stat for %s, error_message = %s'", ",", "source", ",", "e", ")" ]
Get privileges of a local file
[ "Get", "privileges", "of", "a", "local", "file" ]
python
test
tobami/littlechef
littlechef/chef.py
https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/chef.py#L431-L474
def _configure_node(): """Exectutes chef-solo to apply roles and recipes to a node""" print("") msg = "Cooking..." if env.parallel: msg = "[{0}]: {1}".format(env.host_string, msg) print(msg) # Backup last report with settings(hide('stdout', 'warnings', 'running'), warn_only=True): sudo("mv {0} {0}.1".format(LOGFILE)) # Build chef-solo command cmd = "RUBYOPT=-Ku chef-solo" if whyrun: cmd += " --why-run" cmd += ' -l {0} -j /etc/chef/node.json'.format(env.loglevel) if ENABLE_LOGS: cmd += ' | tee {0}'.format(LOGFILE) if env.loglevel == "debug": print("Executing Chef Solo with the following command:\n" "{0}".format(cmd)) with settings(hide('warnings', 'running'), warn_only=True): output = sudo(cmd) if (output.failed or "FATAL: Stacktrace dumped" in output or ("Chef Run complete" not in output and "Report handlers complete" not in output)): if 'chef-solo: command not found' in output: print( colors.red( "\nFAILED: Chef Solo is not installed on this node")) print( "Type 'fix node:{0} deploy_chef' to install it".format( env.host)) abort("") else: print(colors.red( "\nFAILED: chef-solo could not finish configuring the node\n")) import sys sys.exit(1) else: msg = "\n" if env.parallel: msg += "[{0}]: ".format(env.host_string) msg += "SUCCESS: Node correctly configured" print(colors.green(msg))
[ "def", "_configure_node", "(", ")", ":", "print", "(", "\"\"", ")", "msg", "=", "\"Cooking...\"", "if", "env", ".", "parallel", ":", "msg", "=", "\"[{0}]: {1}\"", ".", "format", "(", "env", ".", "host_string", ",", "msg", ")", "print", "(", "msg", ")", "# Backup last report", "with", "settings", "(", "hide", "(", "'stdout'", ",", "'warnings'", ",", "'running'", ")", ",", "warn_only", "=", "True", ")", ":", "sudo", "(", "\"mv {0} {0}.1\"", ".", "format", "(", "LOGFILE", ")", ")", "# Build chef-solo command", "cmd", "=", "\"RUBYOPT=-Ku chef-solo\"", "if", "whyrun", ":", "cmd", "+=", "\" --why-run\"", "cmd", "+=", "' -l {0} -j /etc/chef/node.json'", ".", "format", "(", "env", ".", "loglevel", ")", "if", "ENABLE_LOGS", ":", "cmd", "+=", "' | tee {0}'", ".", "format", "(", "LOGFILE", ")", "if", "env", ".", "loglevel", "==", "\"debug\"", ":", "print", "(", "\"Executing Chef Solo with the following command:\\n\"", "\"{0}\"", ".", "format", "(", "cmd", ")", ")", "with", "settings", "(", "hide", "(", "'warnings'", ",", "'running'", ")", ",", "warn_only", "=", "True", ")", ":", "output", "=", "sudo", "(", "cmd", ")", "if", "(", "output", ".", "failed", "or", "\"FATAL: Stacktrace dumped\"", "in", "output", "or", "(", "\"Chef Run complete\"", "not", "in", "output", "and", "\"Report handlers complete\"", "not", "in", "output", ")", ")", ":", "if", "'chef-solo: command not found'", "in", "output", ":", "print", "(", "colors", ".", "red", "(", "\"\\nFAILED: Chef Solo is not installed on this node\"", ")", ")", "print", "(", "\"Type 'fix node:{0} deploy_chef' to install it\"", ".", "format", "(", "env", ".", "host", ")", ")", "abort", "(", "\"\"", ")", "else", ":", "print", "(", "colors", ".", "red", "(", "\"\\nFAILED: chef-solo could not finish configuring the node\\n\"", ")", ")", "import", "sys", "sys", ".", "exit", "(", "1", ")", "else", ":", "msg", "=", "\"\\n\"", "if", "env", ".", "parallel", ":", "msg", "+=", "\"[{0}]: \"", ".", "format", "(", "env", ".", "host_string", ")", "msg", "+=", "\"SUCCESS: Node correctly configured\"", "print", "(", "colors", ".", "green", "(", "msg", ")", ")" ]
Exectutes chef-solo to apply roles and recipes to a node
[ "Exectutes", "chef", "-", "solo", "to", "apply", "roles", "and", "recipes", "to", "a", "node" ]
python
train
quantopian/pyfolio
pyfolio/plotting.py
https://github.com/quantopian/pyfolio/blob/712716ab0cdebbec9fabb25eea3bf40e4354749d/pyfolio/plotting.py#L712-L836
def plot_rolling_returns(returns, factor_returns=None, live_start_date=None, logy=False, cone_std=None, legend_loc='best', volatility_match=False, cone_function=timeseries.forecast_cone_bootstrap, ax=None, **kwargs): """ Plots cumulative rolling returns versus some benchmarks'. Backtest returns are in green, and out-of-sample (live trading) returns are in red. Additionally, a non-parametric cone plot may be added to the out-of-sample returns region. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. factor_returns : pd.Series, optional Daily noncumulative returns of the benchmark factor to which betas are computed. Usually a benchmark such as market returns. - This is in the same style as returns. live_start_date : datetime, optional The date when the strategy began live trading, after its backtest period. This date should be normalized. logy : bool, optional Whether to log-scale the y-axis. cone_std : float, or tuple, optional If float, The standard deviation to use for the cone plots. If tuple, Tuple of standard deviation values to use for the cone plots - See timeseries.forecast_cone_bounds for more details. legend_loc : matplotlib.loc, optional The location of the legend on the plot. volatility_match : bool, optional Whether to normalize the volatility of the returns to those of the benchmark returns. This helps compare strategies with different volatilities. Requires passing of benchmark_rets. cone_function : function, optional Function to use when generating forecast probability cone. The function signiture must follow the form: def cone(in_sample_returns (pd.Series), days_to_project_forward (int), cone_std= (float, or tuple), starting_value= (int, or float)) See timeseries.forecast_cone_bootstrap for an example. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ if ax is None: ax = plt.gca() ax.set_xlabel('') ax.set_ylabel('Cumulative returns') ax.set_yscale('log' if logy else 'linear') if volatility_match and factor_returns is None: raise ValueError('volatility_match requires passing of ' 'factor_returns.') elif volatility_match and factor_returns is not None: bmark_vol = factor_returns.loc[returns.index].std() returns = (returns / returns.std()) * bmark_vol cum_rets = ep.cum_returns(returns, 1.0) y_axis_formatter = FuncFormatter(utils.two_dec_places) ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter)) if factor_returns is not None: cum_factor_returns = ep.cum_returns( factor_returns[cum_rets.index], 1.0) cum_factor_returns.plot(lw=2, color='gray', label=factor_returns.name, alpha=0.60, ax=ax, **kwargs) if live_start_date is not None: live_start_date = ep.utils.get_utc_timestamp(live_start_date) is_cum_returns = cum_rets.loc[cum_rets.index < live_start_date] oos_cum_returns = cum_rets.loc[cum_rets.index >= live_start_date] else: is_cum_returns = cum_rets oos_cum_returns = pd.Series([]) is_cum_returns.plot(lw=3, color='forestgreen', alpha=0.6, label='Backtest', ax=ax, **kwargs) if len(oos_cum_returns) > 0: oos_cum_returns.plot(lw=4, color='red', alpha=0.6, label='Live', ax=ax, **kwargs) if cone_std is not None: if isinstance(cone_std, (float, int)): cone_std = [cone_std] is_returns = returns.loc[returns.index < live_start_date] cone_bounds = cone_function( is_returns, len(oos_cum_returns), cone_std=cone_std, starting_value=is_cum_returns[-1]) cone_bounds = cone_bounds.set_index(oos_cum_returns.index) for std in cone_std: ax.fill_between(cone_bounds.index, cone_bounds[float(std)], cone_bounds[float(-std)], color='steelblue', alpha=0.5) if legend_loc is not None: ax.legend(loc=legend_loc, frameon=True, framealpha=0.5) ax.axhline(1.0, linestyle='--', color='black', lw=2) return ax
[ "def", "plot_rolling_returns", "(", "returns", ",", "factor_returns", "=", "None", ",", "live_start_date", "=", "None", ",", "logy", "=", "False", ",", "cone_std", "=", "None", ",", "legend_loc", "=", "'best'", ",", "volatility_match", "=", "False", ",", "cone_function", "=", "timeseries", ".", "forecast_cone_bootstrap", ",", "ax", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "ax", "is", "None", ":", "ax", "=", "plt", ".", "gca", "(", ")", "ax", ".", "set_xlabel", "(", "''", ")", "ax", ".", "set_ylabel", "(", "'Cumulative returns'", ")", "ax", ".", "set_yscale", "(", "'log'", "if", "logy", "else", "'linear'", ")", "if", "volatility_match", "and", "factor_returns", "is", "None", ":", "raise", "ValueError", "(", "'volatility_match requires passing of '", "'factor_returns.'", ")", "elif", "volatility_match", "and", "factor_returns", "is", "not", "None", ":", "bmark_vol", "=", "factor_returns", ".", "loc", "[", "returns", ".", "index", "]", ".", "std", "(", ")", "returns", "=", "(", "returns", "/", "returns", ".", "std", "(", ")", ")", "*", "bmark_vol", "cum_rets", "=", "ep", ".", "cum_returns", "(", "returns", ",", "1.0", ")", "y_axis_formatter", "=", "FuncFormatter", "(", "utils", ".", "two_dec_places", ")", "ax", ".", "yaxis", ".", "set_major_formatter", "(", "FuncFormatter", "(", "y_axis_formatter", ")", ")", "if", "factor_returns", "is", "not", "None", ":", "cum_factor_returns", "=", "ep", ".", "cum_returns", "(", "factor_returns", "[", "cum_rets", ".", "index", "]", ",", "1.0", ")", "cum_factor_returns", ".", "plot", "(", "lw", "=", "2", ",", "color", "=", "'gray'", ",", "label", "=", "factor_returns", ".", "name", ",", "alpha", "=", "0.60", ",", "ax", "=", "ax", ",", "*", "*", "kwargs", ")", "if", "live_start_date", "is", "not", "None", ":", "live_start_date", "=", "ep", ".", "utils", ".", "get_utc_timestamp", "(", "live_start_date", ")", "is_cum_returns", "=", "cum_rets", ".", "loc", "[", "cum_rets", ".", "index", "<", "live_start_date", "]", "oos_cum_returns", "=", "cum_rets", ".", "loc", "[", "cum_rets", ".", "index", ">=", "live_start_date", "]", "else", ":", "is_cum_returns", "=", "cum_rets", "oos_cum_returns", "=", "pd", ".", "Series", "(", "[", "]", ")", "is_cum_returns", ".", "plot", "(", "lw", "=", "3", ",", "color", "=", "'forestgreen'", ",", "alpha", "=", "0.6", ",", "label", "=", "'Backtest'", ",", "ax", "=", "ax", ",", "*", "*", "kwargs", ")", "if", "len", "(", "oos_cum_returns", ")", ">", "0", ":", "oos_cum_returns", ".", "plot", "(", "lw", "=", "4", ",", "color", "=", "'red'", ",", "alpha", "=", "0.6", ",", "label", "=", "'Live'", ",", "ax", "=", "ax", ",", "*", "*", "kwargs", ")", "if", "cone_std", "is", "not", "None", ":", "if", "isinstance", "(", "cone_std", ",", "(", "float", ",", "int", ")", ")", ":", "cone_std", "=", "[", "cone_std", "]", "is_returns", "=", "returns", ".", "loc", "[", "returns", ".", "index", "<", "live_start_date", "]", "cone_bounds", "=", "cone_function", "(", "is_returns", ",", "len", "(", "oos_cum_returns", ")", ",", "cone_std", "=", "cone_std", ",", "starting_value", "=", "is_cum_returns", "[", "-", "1", "]", ")", "cone_bounds", "=", "cone_bounds", ".", "set_index", "(", "oos_cum_returns", ".", "index", ")", "for", "std", "in", "cone_std", ":", "ax", ".", "fill_between", "(", "cone_bounds", ".", "index", ",", "cone_bounds", "[", "float", "(", "std", ")", "]", ",", "cone_bounds", "[", "float", "(", "-", "std", ")", "]", ",", "color", "=", "'steelblue'", ",", "alpha", "=", "0.5", ")", "if", "legend_loc", "is", "not", "None", ":", "ax", ".", "legend", "(", "loc", "=", "legend_loc", ",", "frameon", "=", "True", ",", "framealpha", "=", "0.5", ")", "ax", ".", "axhline", "(", "1.0", ",", "linestyle", "=", "'--'", ",", "color", "=", "'black'", ",", "lw", "=", "2", ")", "return", "ax" ]
Plots cumulative rolling returns versus some benchmarks'. Backtest returns are in green, and out-of-sample (live trading) returns are in red. Additionally, a non-parametric cone plot may be added to the out-of-sample returns region. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. factor_returns : pd.Series, optional Daily noncumulative returns of the benchmark factor to which betas are computed. Usually a benchmark such as market returns. - This is in the same style as returns. live_start_date : datetime, optional The date when the strategy began live trading, after its backtest period. This date should be normalized. logy : bool, optional Whether to log-scale the y-axis. cone_std : float, or tuple, optional If float, The standard deviation to use for the cone plots. If tuple, Tuple of standard deviation values to use for the cone plots - See timeseries.forecast_cone_bounds for more details. legend_loc : matplotlib.loc, optional The location of the legend on the plot. volatility_match : bool, optional Whether to normalize the volatility of the returns to those of the benchmark returns. This helps compare strategies with different volatilities. Requires passing of benchmark_rets. cone_function : function, optional Function to use when generating forecast probability cone. The function signiture must follow the form: def cone(in_sample_returns (pd.Series), days_to_project_forward (int), cone_std= (float, or tuple), starting_value= (int, or float)) See timeseries.forecast_cone_bootstrap for an example. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes The axes that were plotted on.
[ "Plots", "cumulative", "rolling", "returns", "versus", "some", "benchmarks", "." ]
python
valid
apple/turicreate
src/unity/python/turicreate/toolkits/_internal_utils.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L64-L83
def _add_docstring(format_dict): """ Format a doc-string on the fly. @arg format_dict: A dictionary to format the doc-strings Example: @add_docstring({'context': __doc_string_context}) def predict(x): ''' {context} >> model.predict(data) ''' return x """ def add_docstring_context(func): def wrapper(*args, **kwargs): return func(*args, **kwargs) wrapper.__doc__ = func.__doc__.format(**format_dict) return wrapper return add_docstring_context
[ "def", "_add_docstring", "(", "format_dict", ")", ":", "def", "add_docstring_context", "(", "func", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "wrapper", ".", "__doc__", "=", "func", ".", "__doc__", ".", "format", "(", "*", "*", "format_dict", ")", "return", "wrapper", "return", "add_docstring_context" ]
Format a doc-string on the fly. @arg format_dict: A dictionary to format the doc-strings Example: @add_docstring({'context': __doc_string_context}) def predict(x): ''' {context} >> model.predict(data) ''' return x
[ "Format", "a", "doc", "-", "string", "on", "the", "fly", ".", "@arg", "format_dict", ":", "A", "dictionary", "to", "format", "the", "doc", "-", "strings", "Example", ":" ]
python
train
Microsoft/botbuilder-python
libraries/botframework-connector/azure_bdist_wheel.py
https://github.com/Microsoft/botbuilder-python/blob/274663dd91c811bae6ac4488915ba5880771b0a7/libraries/botframework-connector/azure_bdist_wheel.py#L368-L448
def egg2dist(self, egginfo_path, distinfo_path): """Convert an .egg-info directory into a .dist-info directory""" def adios(p): """Appropriately delete directory, file or link.""" if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p): shutil.rmtree(p) elif os.path.exists(p): os.unlink(p) adios(distinfo_path) if not os.path.exists(egginfo_path): # There is no egg-info. This is probably because the egg-info # file/directory is not named matching the distribution name used # to name the archive file. Check for this case and report # accordingly. import glob pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info') possible = glob.glob(pat) err = "Egg metadata expected at %s but not found" % (egginfo_path,) if possible: alt = os.path.basename(possible[0]) err += " (%s found - possible misnamed archive file?)" % (alt,) raise ValueError(err) if os.path.isfile(egginfo_path): # .egg-info is a single file pkginfo_path = egginfo_path pkg_info = self._pkginfo_to_metadata(egginfo_path, egginfo_path) os.mkdir(distinfo_path) else: # .egg-info is a directory pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO') pkg_info = self._pkginfo_to_metadata(egginfo_path, pkginfo_path) # ignore common egg metadata that is useless to wheel shutil.copytree(egginfo_path, distinfo_path, ignore=lambda x, y: set(('PKG-INFO', 'requires.txt', 'SOURCES.txt', 'not-zip-safe',))) # delete dependency_links if it is only whitespace dependency_links_path = os.path.join(distinfo_path, 'dependency_links.txt') with open(dependency_links_path, 'r') as dependency_links_file: dependency_links = dependency_links_file.read().strip() if not dependency_links: adios(dependency_links_path) write_pkg_info(os.path.join(distinfo_path, 'METADATA'), pkg_info) # XXX deprecated. Still useful for current distribute/setuptools. metadata_path = os.path.join(distinfo_path, 'METADATA') self.add_requirements(metadata_path) # XXX intentionally a different path than the PEP. metadata_json_path = os.path.join(distinfo_path, 'metadata.json') pymeta = pkginfo_to_dict(metadata_path, distribution=self.distribution) if 'description' in pymeta: description_filename = 'DESCRIPTION.rst' description_text = pymeta.pop('description') description_path = os.path.join(distinfo_path, description_filename) with open(description_path, "wb") as description_file: description_file.write(description_text.encode('utf-8')) pymeta['extensions']['python.details']['document_names']['description'] = description_filename # XXX heuristically copy any LICENSE/LICENSE.txt? license = self.license_file() if license: license_filename = 'LICENSE.txt' shutil.copy(license, os.path.join(self.distinfo_dir, license_filename)) pymeta['extensions']['python.details']['document_names']['license'] = license_filename with open(metadata_json_path, "w") as metadata_json: json.dump(pymeta, metadata_json, sort_keys=True) adios(egginfo_path)
[ "def", "egg2dist", "(", "self", ",", "egginfo_path", ",", "distinfo_path", ")", ":", "def", "adios", "(", "p", ")", ":", "\"\"\"Appropriately delete directory, file or link.\"\"\"", "if", "os", ".", "path", ".", "exists", "(", "p", ")", "and", "not", "os", ".", "path", ".", "islink", "(", "p", ")", "and", "os", ".", "path", ".", "isdir", "(", "p", ")", ":", "shutil", ".", "rmtree", "(", "p", ")", "elif", "os", ".", "path", ".", "exists", "(", "p", ")", ":", "os", ".", "unlink", "(", "p", ")", "adios", "(", "distinfo_path", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "egginfo_path", ")", ":", "# There is no egg-info. This is probably because the egg-info", "# file/directory is not named matching the distribution name used", "# to name the archive file. Check for this case and report", "# accordingly.", "import", "glob", "pat", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "egginfo_path", ")", ",", "'*.egg-info'", ")", "possible", "=", "glob", ".", "glob", "(", "pat", ")", "err", "=", "\"Egg metadata expected at %s but not found\"", "%", "(", "egginfo_path", ",", ")", "if", "possible", ":", "alt", "=", "os", ".", "path", ".", "basename", "(", "possible", "[", "0", "]", ")", "err", "+=", "\" (%s found - possible misnamed archive file?)\"", "%", "(", "alt", ",", ")", "raise", "ValueError", "(", "err", ")", "if", "os", ".", "path", ".", "isfile", "(", "egginfo_path", ")", ":", "# .egg-info is a single file", "pkginfo_path", "=", "egginfo_path", "pkg_info", "=", "self", ".", "_pkginfo_to_metadata", "(", "egginfo_path", ",", "egginfo_path", ")", "os", ".", "mkdir", "(", "distinfo_path", ")", "else", ":", "# .egg-info is a directory", "pkginfo_path", "=", "os", ".", "path", ".", "join", "(", "egginfo_path", ",", "'PKG-INFO'", ")", "pkg_info", "=", "self", ".", "_pkginfo_to_metadata", "(", "egginfo_path", ",", "pkginfo_path", ")", "# ignore common egg metadata that is useless to wheel", "shutil", ".", "copytree", "(", "egginfo_path", ",", "distinfo_path", ",", "ignore", "=", "lambda", "x", ",", "y", ":", "set", "(", "(", "'PKG-INFO'", ",", "'requires.txt'", ",", "'SOURCES.txt'", ",", "'not-zip-safe'", ",", ")", ")", ")", "# delete dependency_links if it is only whitespace", "dependency_links_path", "=", "os", ".", "path", ".", "join", "(", "distinfo_path", ",", "'dependency_links.txt'", ")", "with", "open", "(", "dependency_links_path", ",", "'r'", ")", "as", "dependency_links_file", ":", "dependency_links", "=", "dependency_links_file", ".", "read", "(", ")", ".", "strip", "(", ")", "if", "not", "dependency_links", ":", "adios", "(", "dependency_links_path", ")", "write_pkg_info", "(", "os", ".", "path", ".", "join", "(", "distinfo_path", ",", "'METADATA'", ")", ",", "pkg_info", ")", "# XXX deprecated. Still useful for current distribute/setuptools.", "metadata_path", "=", "os", ".", "path", ".", "join", "(", "distinfo_path", ",", "'METADATA'", ")", "self", ".", "add_requirements", "(", "metadata_path", ")", "# XXX intentionally a different path than the PEP.", "metadata_json_path", "=", "os", ".", "path", ".", "join", "(", "distinfo_path", ",", "'metadata.json'", ")", "pymeta", "=", "pkginfo_to_dict", "(", "metadata_path", ",", "distribution", "=", "self", ".", "distribution", ")", "if", "'description'", "in", "pymeta", ":", "description_filename", "=", "'DESCRIPTION.rst'", "description_text", "=", "pymeta", ".", "pop", "(", "'description'", ")", "description_path", "=", "os", ".", "path", ".", "join", "(", "distinfo_path", ",", "description_filename", ")", "with", "open", "(", "description_path", ",", "\"wb\"", ")", "as", "description_file", ":", "description_file", ".", "write", "(", "description_text", ".", "encode", "(", "'utf-8'", ")", ")", "pymeta", "[", "'extensions'", "]", "[", "'python.details'", "]", "[", "'document_names'", "]", "[", "'description'", "]", "=", "description_filename", "# XXX heuristically copy any LICENSE/LICENSE.txt?", "license", "=", "self", ".", "license_file", "(", ")", "if", "license", ":", "license_filename", "=", "'LICENSE.txt'", "shutil", ".", "copy", "(", "license", ",", "os", ".", "path", ".", "join", "(", "self", ".", "distinfo_dir", ",", "license_filename", ")", ")", "pymeta", "[", "'extensions'", "]", "[", "'python.details'", "]", "[", "'document_names'", "]", "[", "'license'", "]", "=", "license_filename", "with", "open", "(", "metadata_json_path", ",", "\"w\"", ")", "as", "metadata_json", ":", "json", ".", "dump", "(", "pymeta", ",", "metadata_json", ",", "sort_keys", "=", "True", ")", "adios", "(", "egginfo_path", ")" ]
Convert an .egg-info directory into a .dist-info directory
[ "Convert", "an", ".", "egg", "-", "info", "directory", "into", "a", ".", "dist", "-", "info", "directory" ]
python
test
ahwillia/tensortools
tensortools/visualization.py
https://github.com/ahwillia/tensortools/blob/f375633ec621caa96665a56205dcf932590d4a6e/tensortools/visualization.py#L116-L245
def plot_factors(U, plots='line', fig=None, axes=None, scatter_kw=dict(), line_kw=dict(), bar_kw=dict(), **kwargs): """Plots a KTensor. Note: Each keyword option is broadcast to all modes of the KTensor. For example, if `U` is a 3rd-order tensor (i.e. `U.ndim == 3`) then `plot_factors(U, plots=['line','bar','scatter'])` plots all factors for the first mode as a line plot, the second as a bar plot, and the third mode as a scatterplot. But, thanks to broadcasting semantics, `plot_factors(U, color='line')` produces line plots for each mode. Parameters ---------- U : KTensor Kruskal tensor to be plotted. plots : str or list One of {'bar','line','scatter'} to specify the type of plot for each factor. The default is 'line'. fig : matplotlib Figure object If provided, add plots to the specified figure. The figure must have a sufficient number of axes objects. axes : 2d numpy array of matplotlib Axes objects If provided, add plots to the specified figure. scatter_kw : dict or sequence of dicts Keyword arguments provided to scatterplots. If a single dict is provided, these options are broadcasted to all modes. line_kw : dict or sequence of dicts Keyword arguments provided to line plots. If a single dict is provided, these options are broadcasted to all modes. bar_kw : dict or sequence of dicts Keyword arguments provided to bar plots. If a single dict is provided, these options are broadcasted to all modes. **kwargs : dict Additional keyword parameters are passed to the `subplots(...)` function to specify options such as `figsize` and `gridspec_kw`. See `matplotlib.pyplot.subplots(...)` documentation for more info. """ # ~~~~~~~~~~~~~ # PARSE OPTIONS # ~~~~~~~~~~~~~ kwargs.setdefault('figsize', (8, U.rank)) # parse optional inputs plots = _broadcast_arg(U, plots, str, 'plots') bar_kw = _broadcast_arg(U, bar_kw, dict, 'bar_kw') line_kw = _broadcast_arg(U, line_kw, dict, 'line_kw') scatter_kw = _broadcast_arg(U, scatter_kw, dict, 'scatter_kw') # default scatterplot options for sckw in scatter_kw: sckw.setdefault('edgecolor', 'none') sckw.setdefault('s', 10) # ~~~~~~~~~~~~~~ # SETUP SUBPLOTS # ~~~~~~~~~~~~~~ if fig is None and axes is None: fig, axes = plt.subplots(U.rank, U.ndim, **kwargs) # make sure axes is a 2d-array if U.rank == 1: axes = axes[None, :] # if axes are passed in, identify figure elif fig is None: fig = axes[0, 0].get_figure() # if figure is passed, identify axes else: axes = np.array(fig.get_axes(), dtype=object).reshape(U.rank, U.ndim) # main loop, plot each factor plot_obj = np.empty((U.rank, U.ndim), dtype=object) for r in range(U.rank): for i, f in enumerate(U): # start plots at 1 instead of zero x = np.arange(1, f.shape[0]+1) # determine type of plot if plots[i] == 'bar': plot_obj[r, i] = axes[r, i].bar(x, f[:, r], **bar_kw[i]) axes[r, i].set_xlim(0, f.shape[0]+1) elif plots[i] == 'scatter': plot_obj[r, i] = axes[r, i].scatter(x, f[:, r], **scatter_kw[i]) axes[r, i].set_xlim(0, f.shape[0]) elif plots[i] == 'line': plot_obj[r, i] = axes[r, i].plot(f[:, r], '-', **line_kw[i]) axes[r, i].set_xlim(0, f.shape[0]) else: raise ValueError('invalid plot type') # format axes axes[r, i].locator_params(nbins=4) axes[r, i].spines['top'].set_visible(False) axes[r, i].spines['right'].set_visible(False) axes[r, i].xaxis.set_tick_params(direction='out') axes[r, i].yaxis.set_tick_params(direction='out') axes[r, i].yaxis.set_ticks_position('left') axes[r, i].xaxis.set_ticks_position('bottom') # remove xticks on all but bottom row if r != U.rank-1: plt.setp(axes[r, i].get_xticklabels(), visible=False) # link y-axes within columns for i in range(U.ndim): yl = [a.get_ylim() for a in axes[:, i]] y0, y1 = min([y[0] for y in yl]), max([y[1] for y in yl]) [a.set_ylim((y0, y1)) for a in axes[:, i]] # format y-ticks for r in range(U.rank): for i in range(U.ndim): # only two labels ymin, ymax = np.round(axes[r, i].get_ylim(), 2) axes[r, i].set_ylim((ymin, ymax)) # remove decimals from labels if ymin.is_integer(): ymin = int(ymin) if ymax.is_integer(): ymax = int(ymax) # update plot axes[r, i].set_yticks([ymin, ymax]) plt.tight_layout() return fig, axes, plot_obj
[ "def", "plot_factors", "(", "U", ",", "plots", "=", "'line'", ",", "fig", "=", "None", ",", "axes", "=", "None", ",", "scatter_kw", "=", "dict", "(", ")", ",", "line_kw", "=", "dict", "(", ")", ",", "bar_kw", "=", "dict", "(", ")", ",", "*", "*", "kwargs", ")", ":", "# ~~~~~~~~~~~~~", "# PARSE OPTIONS", "# ~~~~~~~~~~~~~", "kwargs", ".", "setdefault", "(", "'figsize'", ",", "(", "8", ",", "U", ".", "rank", ")", ")", "# parse optional inputs", "plots", "=", "_broadcast_arg", "(", "U", ",", "plots", ",", "str", ",", "'plots'", ")", "bar_kw", "=", "_broadcast_arg", "(", "U", ",", "bar_kw", ",", "dict", ",", "'bar_kw'", ")", "line_kw", "=", "_broadcast_arg", "(", "U", ",", "line_kw", ",", "dict", ",", "'line_kw'", ")", "scatter_kw", "=", "_broadcast_arg", "(", "U", ",", "scatter_kw", ",", "dict", ",", "'scatter_kw'", ")", "# default scatterplot options", "for", "sckw", "in", "scatter_kw", ":", "sckw", ".", "setdefault", "(", "'edgecolor'", ",", "'none'", ")", "sckw", ".", "setdefault", "(", "'s'", ",", "10", ")", "# ~~~~~~~~~~~~~~", "# SETUP SUBPLOTS", "# ~~~~~~~~~~~~~~", "if", "fig", "is", "None", "and", "axes", "is", "None", ":", "fig", ",", "axes", "=", "plt", ".", "subplots", "(", "U", ".", "rank", ",", "U", ".", "ndim", ",", "*", "*", "kwargs", ")", "# make sure axes is a 2d-array", "if", "U", ".", "rank", "==", "1", ":", "axes", "=", "axes", "[", "None", ",", ":", "]", "# if axes are passed in, identify figure", "elif", "fig", "is", "None", ":", "fig", "=", "axes", "[", "0", ",", "0", "]", ".", "get_figure", "(", ")", "# if figure is passed, identify axes", "else", ":", "axes", "=", "np", ".", "array", "(", "fig", ".", "get_axes", "(", ")", ",", "dtype", "=", "object", ")", ".", "reshape", "(", "U", ".", "rank", ",", "U", ".", "ndim", ")", "# main loop, plot each factor", "plot_obj", "=", "np", ".", "empty", "(", "(", "U", ".", "rank", ",", "U", ".", "ndim", ")", ",", "dtype", "=", "object", ")", "for", "r", "in", "range", "(", "U", ".", "rank", ")", ":", "for", "i", ",", "f", "in", "enumerate", "(", "U", ")", ":", "# start plots at 1 instead of zero", "x", "=", "np", ".", "arange", "(", "1", ",", "f", ".", "shape", "[", "0", "]", "+", "1", ")", "# determine type of plot", "if", "plots", "[", "i", "]", "==", "'bar'", ":", "plot_obj", "[", "r", ",", "i", "]", "=", "axes", "[", "r", ",", "i", "]", ".", "bar", "(", "x", ",", "f", "[", ":", ",", "r", "]", ",", "*", "*", "bar_kw", "[", "i", "]", ")", "axes", "[", "r", ",", "i", "]", ".", "set_xlim", "(", "0", ",", "f", ".", "shape", "[", "0", "]", "+", "1", ")", "elif", "plots", "[", "i", "]", "==", "'scatter'", ":", "plot_obj", "[", "r", ",", "i", "]", "=", "axes", "[", "r", ",", "i", "]", ".", "scatter", "(", "x", ",", "f", "[", ":", ",", "r", "]", ",", "*", "*", "scatter_kw", "[", "i", "]", ")", "axes", "[", "r", ",", "i", "]", ".", "set_xlim", "(", "0", ",", "f", ".", "shape", "[", "0", "]", ")", "elif", "plots", "[", "i", "]", "==", "'line'", ":", "plot_obj", "[", "r", ",", "i", "]", "=", "axes", "[", "r", ",", "i", "]", ".", "plot", "(", "f", "[", ":", ",", "r", "]", ",", "'-'", ",", "*", "*", "line_kw", "[", "i", "]", ")", "axes", "[", "r", ",", "i", "]", ".", "set_xlim", "(", "0", ",", "f", ".", "shape", "[", "0", "]", ")", "else", ":", "raise", "ValueError", "(", "'invalid plot type'", ")", "# format axes", "axes", "[", "r", ",", "i", "]", ".", "locator_params", "(", "nbins", "=", "4", ")", "axes", "[", "r", ",", "i", "]", ".", "spines", "[", "'top'", "]", ".", "set_visible", "(", "False", ")", "axes", "[", "r", ",", "i", "]", ".", "spines", "[", "'right'", "]", ".", "set_visible", "(", "False", ")", "axes", "[", "r", ",", "i", "]", ".", "xaxis", ".", "set_tick_params", "(", "direction", "=", "'out'", ")", "axes", "[", "r", ",", "i", "]", ".", "yaxis", ".", "set_tick_params", "(", "direction", "=", "'out'", ")", "axes", "[", "r", ",", "i", "]", ".", "yaxis", ".", "set_ticks_position", "(", "'left'", ")", "axes", "[", "r", ",", "i", "]", ".", "xaxis", ".", "set_ticks_position", "(", "'bottom'", ")", "# remove xticks on all but bottom row", "if", "r", "!=", "U", ".", "rank", "-", "1", ":", "plt", ".", "setp", "(", "axes", "[", "r", ",", "i", "]", ".", "get_xticklabels", "(", ")", ",", "visible", "=", "False", ")", "# link y-axes within columns", "for", "i", "in", "range", "(", "U", ".", "ndim", ")", ":", "yl", "=", "[", "a", ".", "get_ylim", "(", ")", "for", "a", "in", "axes", "[", ":", ",", "i", "]", "]", "y0", ",", "y1", "=", "min", "(", "[", "y", "[", "0", "]", "for", "y", "in", "yl", "]", ")", ",", "max", "(", "[", "y", "[", "1", "]", "for", "y", "in", "yl", "]", ")", "[", "a", ".", "set_ylim", "(", "(", "y0", ",", "y1", ")", ")", "for", "a", "in", "axes", "[", ":", ",", "i", "]", "]", "# format y-ticks", "for", "r", "in", "range", "(", "U", ".", "rank", ")", ":", "for", "i", "in", "range", "(", "U", ".", "ndim", ")", ":", "# only two labels", "ymin", ",", "ymax", "=", "np", ".", "round", "(", "axes", "[", "r", ",", "i", "]", ".", "get_ylim", "(", ")", ",", "2", ")", "axes", "[", "r", ",", "i", "]", ".", "set_ylim", "(", "(", "ymin", ",", "ymax", ")", ")", "# remove decimals from labels", "if", "ymin", ".", "is_integer", "(", ")", ":", "ymin", "=", "int", "(", "ymin", ")", "if", "ymax", ".", "is_integer", "(", ")", ":", "ymax", "=", "int", "(", "ymax", ")", "# update plot", "axes", "[", "r", ",", "i", "]", ".", "set_yticks", "(", "[", "ymin", ",", "ymax", "]", ")", "plt", ".", "tight_layout", "(", ")", "return", "fig", ",", "axes", ",", "plot_obj" ]
Plots a KTensor. Note: Each keyword option is broadcast to all modes of the KTensor. For example, if `U` is a 3rd-order tensor (i.e. `U.ndim == 3`) then `plot_factors(U, plots=['line','bar','scatter'])` plots all factors for the first mode as a line plot, the second as a bar plot, and the third mode as a scatterplot. But, thanks to broadcasting semantics, `plot_factors(U, color='line')` produces line plots for each mode. Parameters ---------- U : KTensor Kruskal tensor to be plotted. plots : str or list One of {'bar','line','scatter'} to specify the type of plot for each factor. The default is 'line'. fig : matplotlib Figure object If provided, add plots to the specified figure. The figure must have a sufficient number of axes objects. axes : 2d numpy array of matplotlib Axes objects If provided, add plots to the specified figure. scatter_kw : dict or sequence of dicts Keyword arguments provided to scatterplots. If a single dict is provided, these options are broadcasted to all modes. line_kw : dict or sequence of dicts Keyword arguments provided to line plots. If a single dict is provided, these options are broadcasted to all modes. bar_kw : dict or sequence of dicts Keyword arguments provided to bar plots. If a single dict is provided, these options are broadcasted to all modes. **kwargs : dict Additional keyword parameters are passed to the `subplots(...)` function to specify options such as `figsize` and `gridspec_kw`. See `matplotlib.pyplot.subplots(...)` documentation for more info.
[ "Plots", "a", "KTensor", "." ]
python
train
ucsb-cs/submit
submit/models.py
https://github.com/ucsb-cs/submit/blob/92810c81255a4fc6bbebac1ac8aae856fd576ffe/submit/models.py#L488-L497
def recent_submissions(self): """Generate a list of the most recent submissions for each user. Only yields a submission for a user if they've made one. """ for group in self.groups: submission = Submission.most_recent_submission(self, group) if submission: yield submission
[ "def", "recent_submissions", "(", "self", ")", ":", "for", "group", "in", "self", ".", "groups", ":", "submission", "=", "Submission", ".", "most_recent_submission", "(", "self", ",", "group", ")", "if", "submission", ":", "yield", "submission" ]
Generate a list of the most recent submissions for each user. Only yields a submission for a user if they've made one.
[ "Generate", "a", "list", "of", "the", "most", "recent", "submissions", "for", "each", "user", "." ]
python
train
andycasey/sick
sick/models/base.py
https://github.com/andycasey/sick/blob/6c37686182794c4cafea45abf7062b30b789b1a2/sick/models/base.py#L82-L115
def save(self, filename, clobber=False, **kwargs): """ Save the model configuration to a YAML-formatted file. :param filename: The filename to save the model configuration to. :type filename: str :param clobber: [optional] Clobber the filename if it already exists. :type clobber: bool :returns: True :raises: IOError """ if os.path.exists(filename) and not clobber: raise IOError("filename {} already exists".format(filename)) kwds = { "allow_unicode": True, "default_flow_style": False } kwds.update(kwargs) with open(filename, "w+") as fp: yaml.safe_dump(self._configuration, stream=fp, **kwds) return True
[ "def", "save", "(", "self", ",", "filename", ",", "clobber", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", "and", "not", "clobber", ":", "raise", "IOError", "(", "\"filename {} already exists\"", ".", "format", "(", "filename", ")", ")", "kwds", "=", "{", "\"allow_unicode\"", ":", "True", ",", "\"default_flow_style\"", ":", "False", "}", "kwds", ".", "update", "(", "kwargs", ")", "with", "open", "(", "filename", ",", "\"w+\"", ")", "as", "fp", ":", "yaml", ".", "safe_dump", "(", "self", ".", "_configuration", ",", "stream", "=", "fp", ",", "*", "*", "kwds", ")", "return", "True" ]
Save the model configuration to a YAML-formatted file. :param filename: The filename to save the model configuration to. :type filename: str :param clobber: [optional] Clobber the filename if it already exists. :type clobber: bool :returns: True :raises: IOError
[ "Save", "the", "model", "configuration", "to", "a", "YAML", "-", "formatted", "file", "." ]
python
train
trailofbits/manticore
manticore/core/workspace.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/core/workspace.py#L119-L129
def save_stream(self, key, binary=False): """ Return a managed file-like object into which the calling code can write arbitrary data. :param key: :return: A managed stream-like object """ s = io.BytesIO() if binary else io.StringIO() yield s self.save_value(key, s.getvalue())
[ "def", "save_stream", "(", "self", ",", "key", ",", "binary", "=", "False", ")", ":", "s", "=", "io", ".", "BytesIO", "(", ")", "if", "binary", "else", "io", ".", "StringIO", "(", ")", "yield", "s", "self", ".", "save_value", "(", "key", ",", "s", ".", "getvalue", "(", ")", ")" ]
Return a managed file-like object into which the calling code can write arbitrary data. :param key: :return: A managed stream-like object
[ "Return", "a", "managed", "file", "-", "like", "object", "into", "which", "the", "calling", "code", "can", "write", "arbitrary", "data", "." ]
python
valid
saltstack/salt
salt/utils/vmware.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2927-L2944
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids): ''' Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails ''' if not disk_group.ssd.canonicalName == cache_disk_id: raise salt.exceptions.ArgumentValueError( 'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: ' '\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id)) non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd] if sorted(non_ssd_disks) != sorted(capacity_disk_ids): raise salt.exceptions.ArgumentValueError( 'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\'' ''.format(sorted(non_ssd_disks), sorted(capacity_disk_ids))) log.trace('Checked disks in diskgroup with cache disk id \'%s\'', cache_disk_id) return True
[ "def", "_check_disks_in_diskgroup", "(", "disk_group", ",", "cache_disk_id", ",", "capacity_disk_ids", ")", ":", "if", "not", "disk_group", ".", "ssd", ".", "canonicalName", "==", "cache_disk_id", ":", "raise", "salt", ".", "exceptions", ".", "ArgumentValueError", "(", "'Incorrect diskgroup cache disk; got id: \\'{0}\\'; expected id: '", "'\\'{1}\\''", ".", "format", "(", "disk_group", ".", "ssd", ".", "canonicalName", ",", "cache_disk_id", ")", ")", "non_ssd_disks", "=", "[", "d", ".", "canonicalName", "for", "d", "in", "disk_group", ".", "nonSsd", "]", "if", "sorted", "(", "non_ssd_disks", ")", "!=", "sorted", "(", "capacity_disk_ids", ")", ":", "raise", "salt", ".", "exceptions", ".", "ArgumentValueError", "(", "'Incorrect capacity disks; got ids: \\'{0}\\'; expected ids: \\'{1}\\''", "''", ".", "format", "(", "sorted", "(", "non_ssd_disks", ")", ",", "sorted", "(", "capacity_disk_ids", ")", ")", ")", "log", ".", "trace", "(", "'Checked disks in diskgroup with cache disk id \\'%s\\''", ",", "cache_disk_id", ")", "return", "True" ]
Checks that the disks in a disk group are as expected and raises CheckError exceptions if the check fails
[ "Checks", "that", "the", "disks", "in", "a", "disk", "group", "are", "as", "expected", "and", "raises", "CheckError", "exceptions", "if", "the", "check", "fails" ]
python
train
authomatic/authomatic
authomatic/core.py
https://github.com/authomatic/authomatic/blob/90a9ce60cc405ae8a2bf5c3713acd5d78579a04e/authomatic/core.py#L434-L440
def _signature(self, *parts): """ Creates signature for the session. """ signature = hmac.new(six.b(self.secret), digestmod=hashlib.sha1) signature.update(six.b('|'.join(parts))) return signature.hexdigest()
[ "def", "_signature", "(", "self", ",", "*", "parts", ")", ":", "signature", "=", "hmac", ".", "new", "(", "six", ".", "b", "(", "self", ".", "secret", ")", ",", "digestmod", "=", "hashlib", ".", "sha1", ")", "signature", ".", "update", "(", "six", ".", "b", "(", "'|'", ".", "join", "(", "parts", ")", ")", ")", "return", "signature", ".", "hexdigest", "(", ")" ]
Creates signature for the session.
[ "Creates", "signature", "for", "the", "session", "." ]
python
test
Tinche/cattrs
src/cattr/multistrategy_dispatch.py
https://github.com/Tinche/cattrs/blob/481bc9bdb69b2190d699b54f331c8c5c075506d5/src/cattr/multistrategy_dispatch.py#L46-L52
def register_func_list(self, func_and_handler): """ register a function to determine if the handle should be used for the type """ for func, handler in func_and_handler: self._function_dispatch.register(func, handler) self.dispatch.cache_clear()
[ "def", "register_func_list", "(", "self", ",", "func_and_handler", ")", ":", "for", "func", ",", "handler", "in", "func_and_handler", ":", "self", ".", "_function_dispatch", ".", "register", "(", "func", ",", "handler", ")", "self", ".", "dispatch", ".", "cache_clear", "(", ")" ]
register a function to determine if the handle should be used for the type
[ "register", "a", "function", "to", "determine", "if", "the", "handle", "should", "be", "used", "for", "the", "type" ]
python
train
python-cmd2/cmd2
cmd2/utils.py
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/utils.py#L322-L324
def getvalue(self) -> str: """Get the internal contents as a str""" return self.buffer.byte_buf.decode(encoding=self.encoding, errors=self.errors)
[ "def", "getvalue", "(", "self", ")", "->", "str", ":", "return", "self", ".", "buffer", ".", "byte_buf", ".", "decode", "(", "encoding", "=", "self", ".", "encoding", ",", "errors", "=", "self", ".", "errors", ")" ]
Get the internal contents as a str
[ "Get", "the", "internal", "contents", "as", "a", "str" ]
python
train
Clinical-Genomics/scout
scout/parse/hpo.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/parse/hpo.py#L42-L64
def parse_hpo_disease(hpo_line): """Parse hpo disease line Args: hpo_line(str) """ hpo_line = hpo_line.rstrip().split('\t') hpo_info = {} disease = hpo_line[0].split(':') hpo_info['source'] = disease[0] hpo_info['disease_nr'] = int(disease[1]) hpo_info['hgnc_symbol'] = None hpo_info['hpo_term'] = None if len(hpo_line) >= 3: hpo_info['hgnc_symbol'] = hpo_line[2] if len(hpo_line) >= 4: hpo_info['hpo_term'] = hpo_line[3] return hpo_info
[ "def", "parse_hpo_disease", "(", "hpo_line", ")", ":", "hpo_line", "=", "hpo_line", ".", "rstrip", "(", ")", ".", "split", "(", "'\\t'", ")", "hpo_info", "=", "{", "}", "disease", "=", "hpo_line", "[", "0", "]", ".", "split", "(", "':'", ")", "hpo_info", "[", "'source'", "]", "=", "disease", "[", "0", "]", "hpo_info", "[", "'disease_nr'", "]", "=", "int", "(", "disease", "[", "1", "]", ")", "hpo_info", "[", "'hgnc_symbol'", "]", "=", "None", "hpo_info", "[", "'hpo_term'", "]", "=", "None", "if", "len", "(", "hpo_line", ")", ">=", "3", ":", "hpo_info", "[", "'hgnc_symbol'", "]", "=", "hpo_line", "[", "2", "]", "if", "len", "(", "hpo_line", ")", ">=", "4", ":", "hpo_info", "[", "'hpo_term'", "]", "=", "hpo_line", "[", "3", "]", "return", "hpo_info" ]
Parse hpo disease line Args: hpo_line(str)
[ "Parse", "hpo", "disease", "line", "Args", ":", "hpo_line", "(", "str", ")" ]
python
test
carljm/django-adminfiles
adminfiles/flickr.py
https://github.com/carljm/django-adminfiles/blob/b01dc7be266305d575c11d5ff9a37ccac04a78c2/adminfiles/flickr.py#L436-L441
def _general_getattr(self, var): """Generic get attribute function.""" if getattr(self, "_%s__%s" % (self.__class__.__name__, var)) is None \ and not self.__loaded: self._load_properties() return getattr(self, "_%s__%s" % (self.__class__.__name__, var))
[ "def", "_general_getattr", "(", "self", ",", "var", ")", ":", "if", "getattr", "(", "self", ",", "\"_%s__%s\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "var", ")", ")", "is", "None", "and", "not", "self", ".", "__loaded", ":", "self", ".", "_load_properties", "(", ")", "return", "getattr", "(", "self", ",", "\"_%s__%s\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "var", ")", ")" ]
Generic get attribute function.
[ "Generic", "get", "attribute", "function", "." ]
python
train