repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
ask/carrot
carrot/backends/pyamqplib.py
https://github.com/ask/carrot/blob/5889a25cd2e274642071c9bba39772f4b3e3d9da/carrot/backends/pyamqplib.py#L221-L234
def queue_exists(self, queue): """Check if a queue has been declared. :rtype bool: """ try: self.channel.queue_declare(queue=queue, passive=True) except AMQPChannelException, e: if e.amqp_reply_code == 404: return False raise e else: return True
[ "def", "queue_exists", "(", "self", ",", "queue", ")", ":", "try", ":", "self", ".", "channel", ".", "queue_declare", "(", "queue", "=", "queue", ",", "passive", "=", "True", ")", "except", "AMQPChannelException", ",", "e", ":", "if", "e", ".", "amqp_reply_code", "==", "404", ":", "return", "False", "raise", "e", "else", ":", "return", "True" ]
Check if a queue has been declared. :rtype bool:
[ "Check", "if", "a", "queue", "has", "been", "declared", "." ]
python
train
ploneintranet/ploneintranet.workspace
src/ploneintranet/workspace/adapters.py
https://github.com/ploneintranet/ploneintranet.workspace/blob/a4fc7a5c61f9c6d4d4ad25478ff5250f342ffbba/src/ploneintranet/workspace/adapters.py#L46-L57
def group_for_policy(self, policy=None): """ Lookup the collective.workspace usergroup corresponding to the given policy :param policy: The value of the policy to lookup, defaults to the current policy :type policy: str """ if policy is None: policy = self.context.participant_policy return "%s:%s" % (policy.title(), self.context.UID())
[ "def", "group_for_policy", "(", "self", ",", "policy", "=", "None", ")", ":", "if", "policy", "is", "None", ":", "policy", "=", "self", ".", "context", ".", "participant_policy", "return", "\"%s:%s\"", "%", "(", "policy", ".", "title", "(", ")", ",", "self", ".", "context", ".", "UID", "(", ")", ")" ]
Lookup the collective.workspace usergroup corresponding to the given policy :param policy: The value of the policy to lookup, defaults to the current policy :type policy: str
[ "Lookup", "the", "collective", ".", "workspace", "usergroup", "corresponding", "to", "the", "given", "policy" ]
python
train
scot-dev/scot
doc/make.py
https://github.com/scot-dev/scot/blob/48598b79d4400dad893b134cd2194715511facda/doc/make.py#L41-L105
def copytree(src, dst, symlinks=False, ignore=None): """Recursively copy a directory tree using copy2(). The destination directory must not already exist. If exception(s) occur, an Error is raised with a list of reasons. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. The optional ignore argument is a callable. If given, it is called with the `src` parameter, which is the directory being visited by copytree(), and `names` which is the list of `src` contents, as returned by os.listdir(): callable(src, names) -> ignored_names Since copytree() is called recursively, the callable will be called once for each directory that is copied. It returns a list of names relative to the `src` directory that should not be copied. XXX Consider this example code rather than the ultimate tool. """ from shutil import copy2, Error, copystat names = os.listdir(src) if ignore is not None: ignored_names = ignore(src, names) else: ignored_names = set() os.makedirs(dst) errors = [] for name in names: if name in ignored_names: continue srcname = os.path.join(src, name) dstname = os.path.join(dst, name) try: if symlinks and os.path.islink(srcname): linkto = os.readlink(srcname) os.symlink(linkto, dstname) elif os.path.isdir(srcname): copytree(srcname, dstname, symlinks, ignore) else: # Will raise a SpecialFileError for unsupported file types copy2(srcname, dstname) # catch the Error from the recursive copytree so that we can # continue with other files except Error as err: errors.extend(err.args[0]) except EnvironmentError as why: errors.append((srcname, dstname, str(why))) try: copystat(src, dst) except OSError as why: if WindowsError is not None and isinstance(why, WindowsError): # Copying file access times may fail on Windows pass else: errors.extend((src, dst, str(why))) if errors: raise Error(errors)
[ "def", "copytree", "(", "src", ",", "dst", ",", "symlinks", "=", "False", ",", "ignore", "=", "None", ")", ":", "from", "shutil", "import", "copy2", ",", "Error", ",", "copystat", "names", "=", "os", ".", "listdir", "(", "src", ")", "if", "ignore", "is", "not", "None", ":", "ignored_names", "=", "ignore", "(", "src", ",", "names", ")", "else", ":", "ignored_names", "=", "set", "(", ")", "os", ".", "makedirs", "(", "dst", ")", "errors", "=", "[", "]", "for", "name", "in", "names", ":", "if", "name", "in", "ignored_names", ":", "continue", "srcname", "=", "os", ".", "path", ".", "join", "(", "src", ",", "name", ")", "dstname", "=", "os", ".", "path", ".", "join", "(", "dst", ",", "name", ")", "try", ":", "if", "symlinks", "and", "os", ".", "path", ".", "islink", "(", "srcname", ")", ":", "linkto", "=", "os", ".", "readlink", "(", "srcname", ")", "os", ".", "symlink", "(", "linkto", ",", "dstname", ")", "elif", "os", ".", "path", ".", "isdir", "(", "srcname", ")", ":", "copytree", "(", "srcname", ",", "dstname", ",", "symlinks", ",", "ignore", ")", "else", ":", "# Will raise a SpecialFileError for unsupported file types", "copy2", "(", "srcname", ",", "dstname", ")", "# catch the Error from the recursive copytree so that we can", "# continue with other files", "except", "Error", "as", "err", ":", "errors", ".", "extend", "(", "err", ".", "args", "[", "0", "]", ")", "except", "EnvironmentError", "as", "why", ":", "errors", ".", "append", "(", "(", "srcname", ",", "dstname", ",", "str", "(", "why", ")", ")", ")", "try", ":", "copystat", "(", "src", ",", "dst", ")", "except", "OSError", "as", "why", ":", "if", "WindowsError", "is", "not", "None", "and", "isinstance", "(", "why", ",", "WindowsError", ")", ":", "# Copying file access times may fail on Windows", "pass", "else", ":", "errors", ".", "extend", "(", "(", "src", ",", "dst", ",", "str", "(", "why", ")", ")", ")", "if", "errors", ":", "raise", "Error", "(", "errors", ")" ]
Recursively copy a directory tree using copy2(). The destination directory must not already exist. If exception(s) occur, an Error is raised with a list of reasons. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. The optional ignore argument is a callable. If given, it is called with the `src` parameter, which is the directory being visited by copytree(), and `names` which is the list of `src` contents, as returned by os.listdir(): callable(src, names) -> ignored_names Since copytree() is called recursively, the callable will be called once for each directory that is copied. It returns a list of names relative to the `src` directory that should not be copied. XXX Consider this example code rather than the ultimate tool.
[ "Recursively", "copy", "a", "directory", "tree", "using", "copy2", "()", "." ]
python
train
tanghaibao/jcvi
jcvi/annotation/evm.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/evm.py#L168-L193
def pasa(args): """ %prog pasa pasa_db fastafile Run EVM in TIGR-only mode. """ p = OptionParser(pasa.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) pasa_db, fastafile = args termexons = "pasa.terminal_exons.gff3" if need_update(fastafile, termexons): cmd = "$ANNOT_DEVEL/PASA2/scripts/pasa_asmbls_to_training_set.dbi" cmd += ' -M "{0}:mysql.tigr.org" -p "access:access"'.format(pasa_db) cmd += ' -g {0}'.format(fastafile) sh(cmd) cmd = "$EVM/PasaUtils/retrieve_terminal_CDS_exons.pl" cmd += " trainingSetCandidates.fasta trainingSetCandidates.gff" sh(cmd, outfile=termexons) return termexons
[ "def", "pasa", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "pasa", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "pasa_db", ",", "fastafile", "=", "args", "termexons", "=", "\"pasa.terminal_exons.gff3\"", "if", "need_update", "(", "fastafile", ",", "termexons", ")", ":", "cmd", "=", "\"$ANNOT_DEVEL/PASA2/scripts/pasa_asmbls_to_training_set.dbi\"", "cmd", "+=", "' -M \"{0}:mysql.tigr.org\" -p \"access:access\"'", ".", "format", "(", "pasa_db", ")", "cmd", "+=", "' -g {0}'", ".", "format", "(", "fastafile", ")", "sh", "(", "cmd", ")", "cmd", "=", "\"$EVM/PasaUtils/retrieve_terminal_CDS_exons.pl\"", "cmd", "+=", "\" trainingSetCandidates.fasta trainingSetCandidates.gff\"", "sh", "(", "cmd", ",", "outfile", "=", "termexons", ")", "return", "termexons" ]
%prog pasa pasa_db fastafile Run EVM in TIGR-only mode.
[ "%prog", "pasa", "pasa_db", "fastafile" ]
python
train
flashashen/flange
flange/data.py
https://github.com/flashashen/flange/blob/67ebaf70e39887f65ce1163168d182a8e4c2774a/flange/data.py#L37-L52
def search(self, path_expression, mode=UXP, values=None, ifunc=lambda x: x): """ find matches for the given path expression in the data :param path_expression: path tuple or string :return: """ # keys = path_expression if isinstance(path_expression, six.string_types) else path_expression[-1] path_and_value_list = iterutils.search( self.data, path_expression=path_expression, required_values=values, exact=(mode[1] == "x")) return self.__return_value(path_and_value_list, mode, ifunc)
[ "def", "search", "(", "self", ",", "path_expression", ",", "mode", "=", "UXP", ",", "values", "=", "None", ",", "ifunc", "=", "lambda", "x", ":", "x", ")", ":", "# keys = path_expression if isinstance(path_expression, six.string_types) else path_expression[-1]", "path_and_value_list", "=", "iterutils", ".", "search", "(", "self", ".", "data", ",", "path_expression", "=", "path_expression", ",", "required_values", "=", "values", ",", "exact", "=", "(", "mode", "[", "1", "]", "==", "\"x\"", ")", ")", "return", "self", ".", "__return_value", "(", "path_and_value_list", ",", "mode", ",", "ifunc", ")" ]
find matches for the given path expression in the data :param path_expression: path tuple or string :return:
[ "find", "matches", "for", "the", "given", "path", "expression", "in", "the", "data" ]
python
train
LordGaav/python-chaos
chaos/amqp/rpc.py
https://github.com/LordGaav/python-chaos/blob/52cd29a6fd15693ee1e53786b93bcb23fbf84ddd/chaos/amqp/rpc.py#L323-L345
def rpc_reply(channel, original_headers, message, properties=None): """ Reply to a RPC request. This function will use the default exchange, to directly contact the reply_to queue. Parameters ---------- channel: object Properly initialized AMQP channel to use. original_headers: dict The headers of the originating message that caused this reply. message: string Message to reply with properties: dict Properties to set on message. This parameter is optional, but if set, at least the following options must be set: content_type: string - what content_type to specify, default is 'text/plain'. delivery_mode: int - what delivery_mode to use. By default message are not persistent, but this can be set by specifying PERSISTENT_MESSAGE . """ if not properties: properties = {} properties['correlation_id'] = original_headers.correlation_id publish_message(channel, '', original_headers.reply_to, message, properties)
[ "def", "rpc_reply", "(", "channel", ",", "original_headers", ",", "message", ",", "properties", "=", "None", ")", ":", "if", "not", "properties", ":", "properties", "=", "{", "}", "properties", "[", "'correlation_id'", "]", "=", "original_headers", ".", "correlation_id", "publish_message", "(", "channel", ",", "''", ",", "original_headers", ".", "reply_to", ",", "message", ",", "properties", ")" ]
Reply to a RPC request. This function will use the default exchange, to directly contact the reply_to queue. Parameters ---------- channel: object Properly initialized AMQP channel to use. original_headers: dict The headers of the originating message that caused this reply. message: string Message to reply with properties: dict Properties to set on message. This parameter is optional, but if set, at least the following options must be set: content_type: string - what content_type to specify, default is 'text/plain'. delivery_mode: int - what delivery_mode to use. By default message are not persistent, but this can be set by specifying PERSISTENT_MESSAGE .
[ "Reply", "to", "a", "RPC", "request", ".", "This", "function", "will", "use", "the", "default", "exchange", "to", "directly", "contact", "the", "reply_to", "queue", "." ]
python
train
CZ-NIC/python-rt
rt.py
https://github.com/CZ-NIC/python-rt/blob/e7a9f555e136708aec3317f857045145a2271e16/rt.py#L570-L640
def get_ticket(self, ticket_id): """ Fetch ticket by its ID. :param ticket_id: ID of demanded ticket :returns: Dictionary with key, value pairs for ticket with *ticket_id* or None if ticket does not exist. List of keys: * id * numerical_id * Queue * Owner * Creator * Subject * Status * Priority * InitialPriority * FinalPriority * Requestors * Cc * AdminCc * Created * Starts * Started * Due * Resolved * Told * TimeEstimated * TimeWorked * TimeLeft :raises UnexpectedMessageFormat: Unexpected format of returned message. """ msg = self.__request('ticket/{}/show'.format(str(ticket_id), )) status_code = self.__get_status_code(msg) if status_code == 200: pairs = {} msg = msg.split('\n') if (len(msg) > 2) and self.RE_PATTERNS['does_not_exist_pattern'].match(msg[2]): return None req_matching = [i for i, m in enumerate(msg) if self.RE_PATTERNS['requestors_pattern'].match(m)] req_id = req_matching[0] if req_matching else None if not req_id: raise UnexpectedMessageFormat('Missing line starting with `Requestors:`.') for i in range(req_id): if ': ' in msg[i]: header, content = self.split_header(msg[i]) pairs[header.strip()] = content.strip() requestors = [msg[req_id][12:]] req_id += 1 while (req_id < len(msg)) and (msg[req_id][:12] == ' ' * 12): requestors.append(msg[req_id][12:]) req_id += 1 pairs['Requestors'] = self.__normalize_list(requestors) for i in range(req_id, len(msg)): if ': ' in msg[i]: header, content = self.split_header(msg[i]) pairs[header.strip()] = content.strip() if 'Cc' in pairs: pairs['Cc'] = self.__normalize_list(pairs['Cc']) if 'AdminCc' in pairs: pairs['AdminCc'] = self.__normalize_list(pairs['AdminCc']) if 'id' not in pairs and not pairs['id'].startswitch('ticket/'): raise UnexpectedMessageFormat('Response from RT didn\'t contain a valid ticket_id') else: pairs['numerical_id'] = pairs['id'].split('ticket/')[1] return pairs else: raise UnexpectedMessageFormat('Received status code is {:d} instead of 200.'.format(status_code))
[ "def", "get_ticket", "(", "self", ",", "ticket_id", ")", ":", "msg", "=", "self", ".", "__request", "(", "'ticket/{}/show'", ".", "format", "(", "str", "(", "ticket_id", ")", ",", ")", ")", "status_code", "=", "self", ".", "__get_status_code", "(", "msg", ")", "if", "status_code", "==", "200", ":", "pairs", "=", "{", "}", "msg", "=", "msg", ".", "split", "(", "'\\n'", ")", "if", "(", "len", "(", "msg", ")", ">", "2", ")", "and", "self", ".", "RE_PATTERNS", "[", "'does_not_exist_pattern'", "]", ".", "match", "(", "msg", "[", "2", "]", ")", ":", "return", "None", "req_matching", "=", "[", "i", "for", "i", ",", "m", "in", "enumerate", "(", "msg", ")", "if", "self", ".", "RE_PATTERNS", "[", "'requestors_pattern'", "]", ".", "match", "(", "m", ")", "]", "req_id", "=", "req_matching", "[", "0", "]", "if", "req_matching", "else", "None", "if", "not", "req_id", ":", "raise", "UnexpectedMessageFormat", "(", "'Missing line starting with `Requestors:`.'", ")", "for", "i", "in", "range", "(", "req_id", ")", ":", "if", "': '", "in", "msg", "[", "i", "]", ":", "header", ",", "content", "=", "self", ".", "split_header", "(", "msg", "[", "i", "]", ")", "pairs", "[", "header", ".", "strip", "(", ")", "]", "=", "content", ".", "strip", "(", ")", "requestors", "=", "[", "msg", "[", "req_id", "]", "[", "12", ":", "]", "]", "req_id", "+=", "1", "while", "(", "req_id", "<", "len", "(", "msg", ")", ")", "and", "(", "msg", "[", "req_id", "]", "[", ":", "12", "]", "==", "' '", "*", "12", ")", ":", "requestors", ".", "append", "(", "msg", "[", "req_id", "]", "[", "12", ":", "]", ")", "req_id", "+=", "1", "pairs", "[", "'Requestors'", "]", "=", "self", ".", "__normalize_list", "(", "requestors", ")", "for", "i", "in", "range", "(", "req_id", ",", "len", "(", "msg", ")", ")", ":", "if", "': '", "in", "msg", "[", "i", "]", ":", "header", ",", "content", "=", "self", ".", "split_header", "(", "msg", "[", "i", "]", ")", "pairs", "[", "header", ".", "strip", "(", ")", "]", "=", "content", ".", "strip", "(", ")", "if", "'Cc'", "in", "pairs", ":", "pairs", "[", "'Cc'", "]", "=", "self", ".", "__normalize_list", "(", "pairs", "[", "'Cc'", "]", ")", "if", "'AdminCc'", "in", "pairs", ":", "pairs", "[", "'AdminCc'", "]", "=", "self", ".", "__normalize_list", "(", "pairs", "[", "'AdminCc'", "]", ")", "if", "'id'", "not", "in", "pairs", "and", "not", "pairs", "[", "'id'", "]", ".", "startswitch", "(", "'ticket/'", ")", ":", "raise", "UnexpectedMessageFormat", "(", "'Response from RT didn\\'t contain a valid ticket_id'", ")", "else", ":", "pairs", "[", "'numerical_id'", "]", "=", "pairs", "[", "'id'", "]", ".", "split", "(", "'ticket/'", ")", "[", "1", "]", "return", "pairs", "else", ":", "raise", "UnexpectedMessageFormat", "(", "'Received status code is {:d} instead of 200.'", ".", "format", "(", "status_code", ")", ")" ]
Fetch ticket by its ID. :param ticket_id: ID of demanded ticket :returns: Dictionary with key, value pairs for ticket with *ticket_id* or None if ticket does not exist. List of keys: * id * numerical_id * Queue * Owner * Creator * Subject * Status * Priority * InitialPriority * FinalPriority * Requestors * Cc * AdminCc * Created * Starts * Started * Due * Resolved * Told * TimeEstimated * TimeWorked * TimeLeft :raises UnexpectedMessageFormat: Unexpected format of returned message.
[ "Fetch", "ticket", "by", "its", "ID", "." ]
python
train
mailgun/talon
talon/signature/learning/dataset.py
https://github.com/mailgun/talon/blob/cdd84563dd329c4f887591807870d10015e0c7a7/talon/signature/learning/dataset.py#L48-L85
def parse_msg_sender(filename, sender_known=True): """Given a filename returns the sender and the message. Here the message is assumed to be a whole MIME message or just message body. >>> sender, msg = parse_msg_sender('msg.eml') >>> sender, msg = parse_msg_sender('msg_body') If you don't want to consider the sender's name in your classification algorithm: >>> parse_msg_sender(filename, False) """ import sys kwargs = {} if sys.version_info > (3, 0): kwargs["encoding"] = "utf8" sender, msg = None, None if os.path.isfile(filename) and not is_sender_filename(filename): with open(filename, **kwargs) as f: msg = f.read() sender = u'' if sender_known: sender_filename = build_sender_filename(filename) if os.path.exists(sender_filename): with open(sender_filename) as sender_file: sender = sender_file.read().strip() else: # if sender isn't found then the next line fails # and it is ok lines = msg.splitlines() for line in lines: match = re.match('From:(.*)', line) if match: sender = match.group(1) break return (sender, msg)
[ "def", "parse_msg_sender", "(", "filename", ",", "sender_known", "=", "True", ")", ":", "import", "sys", "kwargs", "=", "{", "}", "if", "sys", ".", "version_info", ">", "(", "3", ",", "0", ")", ":", "kwargs", "[", "\"encoding\"", "]", "=", "\"utf8\"", "sender", ",", "msg", "=", "None", ",", "None", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", "and", "not", "is_sender_filename", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "*", "*", "kwargs", ")", "as", "f", ":", "msg", "=", "f", ".", "read", "(", ")", "sender", "=", "u''", "if", "sender_known", ":", "sender_filename", "=", "build_sender_filename", "(", "filename", ")", "if", "os", ".", "path", ".", "exists", "(", "sender_filename", ")", ":", "with", "open", "(", "sender_filename", ")", "as", "sender_file", ":", "sender", "=", "sender_file", ".", "read", "(", ")", ".", "strip", "(", ")", "else", ":", "# if sender isn't found then the next line fails", "# and it is ok", "lines", "=", "msg", ".", "splitlines", "(", ")", "for", "line", "in", "lines", ":", "match", "=", "re", ".", "match", "(", "'From:(.*)'", ",", "line", ")", "if", "match", ":", "sender", "=", "match", ".", "group", "(", "1", ")", "break", "return", "(", "sender", ",", "msg", ")" ]
Given a filename returns the sender and the message. Here the message is assumed to be a whole MIME message or just message body. >>> sender, msg = parse_msg_sender('msg.eml') >>> sender, msg = parse_msg_sender('msg_body') If you don't want to consider the sender's name in your classification algorithm: >>> parse_msg_sender(filename, False)
[ "Given", "a", "filename", "returns", "the", "sender", "and", "the", "message", "." ]
python
train
ksbg/sparklanes
sparklanes/_framework/validation.py
https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/validation.py#L55-L105
def validate_params(cls, mtd_name, *args, **kwargs): """Validates if the given args/kwargs match the method signature. Checks if: - at least all required args/kwargs are given - no redundant args/kwargs are given Parameters ---------- cls : Class mtd_name : str Name of the method whose parameters shall be validated args: list Positional arguments kwargs : dict Dict of keyword arguments """ mtd = getattr(cls, mtd_name) py3_mtd_condition = (not (inspect.isfunction(mtd) or inspect.ismethod(mtd)) and hasattr(cls, mtd_name)) py2_mtd_condition = (not inspect.ismethod(mtd) and not isinstance(cls.__dict__[mtd_name], staticmethod)) if (PY3 and py3_mtd_condition) or (PY2 and py2_mtd_condition): raise TypeError('Attribute `%s` of class `%s` must be a method. Got type `%s` instead.' % (mtd_name, cls.__name__, type(mtd))) req_params, opt_params = arg_spec(cls, mtd_name) n_params = len(req_params) + len(opt_params) n_args_kwargs = len(args) + len(kwargs) for k in kwargs: if k not in req_params and k not in opt_params: raise TaskInitializationError('kwarg `%s` is not a parameter of callable `%s`.' % (k, mtd.__name__)) if n_args_kwargs < len(req_params): raise TaskInitializationError('Not enough args/kwargs supplied for callable `%s`. ' 'Required args: %s' % (mtd.__name__, str(req_params))) if len(args) > n_params or n_args_kwargs > n_params or len(kwargs) > n_params: raise TaskInitializationError('Too many args/kwargs supplied for callable `%s`. ' 'Required args: %s' % (mtd.__name__, str(req_params))) redundant_p = [p for p in kwargs if p not in req_params[len(args):] + opt_params] if redundant_p: raise TaskInitializationError('Supplied one or more kwargs that in the signature of ' 'callable `%s`. Redundant kwargs: %s' % (mtd.__name__, str(redundant_p))) needed_kwargs = req_params[len(args):] if not all([True if p in kwargs else False for p in needed_kwargs]): raise TaskInitializationError('Not enough args/kwargs supplied for callable `%s`. ' 'Required args: %s' % (mtd.__name__, str(req_params)))
[ "def", "validate_params", "(", "cls", ",", "mtd_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "mtd", "=", "getattr", "(", "cls", ",", "mtd_name", ")", "py3_mtd_condition", "=", "(", "not", "(", "inspect", ".", "isfunction", "(", "mtd", ")", "or", "inspect", ".", "ismethod", "(", "mtd", ")", ")", "and", "hasattr", "(", "cls", ",", "mtd_name", ")", ")", "py2_mtd_condition", "=", "(", "not", "inspect", ".", "ismethod", "(", "mtd", ")", "and", "not", "isinstance", "(", "cls", ".", "__dict__", "[", "mtd_name", "]", ",", "staticmethod", ")", ")", "if", "(", "PY3", "and", "py3_mtd_condition", ")", "or", "(", "PY2", "and", "py2_mtd_condition", ")", ":", "raise", "TypeError", "(", "'Attribute `%s` of class `%s` must be a method. Got type `%s` instead.'", "%", "(", "mtd_name", ",", "cls", ".", "__name__", ",", "type", "(", "mtd", ")", ")", ")", "req_params", ",", "opt_params", "=", "arg_spec", "(", "cls", ",", "mtd_name", ")", "n_params", "=", "len", "(", "req_params", ")", "+", "len", "(", "opt_params", ")", "n_args_kwargs", "=", "len", "(", "args", ")", "+", "len", "(", "kwargs", ")", "for", "k", "in", "kwargs", ":", "if", "k", "not", "in", "req_params", "and", "k", "not", "in", "opt_params", ":", "raise", "TaskInitializationError", "(", "'kwarg `%s` is not a parameter of callable `%s`.'", "%", "(", "k", ",", "mtd", ".", "__name__", ")", ")", "if", "n_args_kwargs", "<", "len", "(", "req_params", ")", ":", "raise", "TaskInitializationError", "(", "'Not enough args/kwargs supplied for callable `%s`. '", "'Required args: %s'", "%", "(", "mtd", ".", "__name__", ",", "str", "(", "req_params", ")", ")", ")", "if", "len", "(", "args", ")", ">", "n_params", "or", "n_args_kwargs", ">", "n_params", "or", "len", "(", "kwargs", ")", ">", "n_params", ":", "raise", "TaskInitializationError", "(", "'Too many args/kwargs supplied for callable `%s`. '", "'Required args: %s'", "%", "(", "mtd", ".", "__name__", ",", "str", "(", "req_params", ")", ")", ")", "redundant_p", "=", "[", "p", "for", "p", "in", "kwargs", "if", "p", "not", "in", "req_params", "[", "len", "(", "args", ")", ":", "]", "+", "opt_params", "]", "if", "redundant_p", ":", "raise", "TaskInitializationError", "(", "'Supplied one or more kwargs that in the signature of '", "'callable `%s`. Redundant kwargs: %s'", "%", "(", "mtd", ".", "__name__", ",", "str", "(", "redundant_p", ")", ")", ")", "needed_kwargs", "=", "req_params", "[", "len", "(", "args", ")", ":", "]", "if", "not", "all", "(", "[", "True", "if", "p", "in", "kwargs", "else", "False", "for", "p", "in", "needed_kwargs", "]", ")", ":", "raise", "TaskInitializationError", "(", "'Not enough args/kwargs supplied for callable `%s`. '", "'Required args: %s'", "%", "(", "mtd", ".", "__name__", ",", "str", "(", "req_params", ")", ")", ")" ]
Validates if the given args/kwargs match the method signature. Checks if: - at least all required args/kwargs are given - no redundant args/kwargs are given Parameters ---------- cls : Class mtd_name : str Name of the method whose parameters shall be validated args: list Positional arguments kwargs : dict Dict of keyword arguments
[ "Validates", "if", "the", "given", "args", "/", "kwargs", "match", "the", "method", "signature", ".", "Checks", "if", ":", "-", "at", "least", "all", "required", "args", "/", "kwargs", "are", "given", "-", "no", "redundant", "args", "/", "kwargs", "are", "given" ]
python
train
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers2.py#L621-L638
def convert_merge(builder, layer, input_names, output_names, keras_layer): """ Convert concat layer from keras to coreml. Parameters ---------- keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """ # Get input and output names output_name = output_names[0] mode = _get_elementwise_name_from_keras_layer(keras_layer) builder.add_elementwise(name = layer, input_names = input_names, output_name = output_name, mode = mode)
[ "def", "convert_merge", "(", "builder", ",", "layer", ",", "input_names", ",", "output_names", ",", "keras_layer", ")", ":", "# Get input and output names", "output_name", "=", "output_names", "[", "0", "]", "mode", "=", "_get_elementwise_name_from_keras_layer", "(", "keras_layer", ")", "builder", ".", "add_elementwise", "(", "name", "=", "layer", ",", "input_names", "=", "input_names", ",", "output_name", "=", "output_name", ",", "mode", "=", "mode", ")" ]
Convert concat layer from keras to coreml. Parameters ---------- keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object.
[ "Convert", "concat", "layer", "from", "keras", "to", "coreml", "." ]
python
train
SavinaRoja/OpenAccess_EPUB
src/openaccess_epub/navigation/__init__.py
https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/navigation/__init__.py#L63-L88
def process(self, article): """ Ingests an Article to create navigation structures and parse global metadata. """ if self.article is not None and not self.collection: log.warning('Could not process additional article. Navigation only \ handles one article unless collection mode is set.') return False if article.publisher is None: log.error('''Navigation cannot be generated for an Article \ without a publisher!''') return self.article = article self.article_doi = self.article.doi.split('/')[1] self.all_dois.append(self.article.doi) if self.collection: pass else: self.title = self.article.publisher.nav_title() for author in self.article.publisher.nav_contributors(): self.contributors.add(author) #Analyze the structure of the article to create internal mapping self.map_navigation()
[ "def", "process", "(", "self", ",", "article", ")", ":", "if", "self", ".", "article", "is", "not", "None", "and", "not", "self", ".", "collection", ":", "log", ".", "warning", "(", "'Could not process additional article. Navigation only \\\nhandles one article unless collection mode is set.'", ")", "return", "False", "if", "article", ".", "publisher", "is", "None", ":", "log", ".", "error", "(", "'''Navigation cannot be generated for an Article \\\nwithout a publisher!'''", ")", "return", "self", ".", "article", "=", "article", "self", ".", "article_doi", "=", "self", ".", "article", ".", "doi", ".", "split", "(", "'/'", ")", "[", "1", "]", "self", ".", "all_dois", ".", "append", "(", "self", ".", "article", ".", "doi", ")", "if", "self", ".", "collection", ":", "pass", "else", ":", "self", ".", "title", "=", "self", ".", "article", ".", "publisher", ".", "nav_title", "(", ")", "for", "author", "in", "self", ".", "article", ".", "publisher", ".", "nav_contributors", "(", ")", ":", "self", ".", "contributors", ".", "add", "(", "author", ")", "#Analyze the structure of the article to create internal mapping", "self", ".", "map_navigation", "(", ")" ]
Ingests an Article to create navigation structures and parse global metadata.
[ "Ingests", "an", "Article", "to", "create", "navigation", "structures", "and", "parse", "global", "metadata", "." ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/query_formatting/graphql_formatting.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/query_formatting/graphql_formatting.py#L10-L20
def pretty_print_graphql(query, use_four_spaces=True): """Take a GraphQL query, pretty print it, and return it.""" # Use our custom visitor, which fixes directive argument order # to get the canonical representation output = visit(parse(query), CustomPrintingVisitor()) # Using four spaces for indentation makes it easier to edit in # Python source files. if use_four_spaces: return fix_indentation_depth(output) return output
[ "def", "pretty_print_graphql", "(", "query", ",", "use_four_spaces", "=", "True", ")", ":", "# Use our custom visitor, which fixes directive argument order", "# to get the canonical representation", "output", "=", "visit", "(", "parse", "(", "query", ")", ",", "CustomPrintingVisitor", "(", ")", ")", "# Using four spaces for indentation makes it easier to edit in", "# Python source files.", "if", "use_four_spaces", ":", "return", "fix_indentation_depth", "(", "output", ")", "return", "output" ]
Take a GraphQL query, pretty print it, and return it.
[ "Take", "a", "GraphQL", "query", "pretty", "print", "it", "and", "return", "it", "." ]
python
train
aloetesting/aloe_webdriver
aloe_webdriver/__init__.py
https://github.com/aloetesting/aloe_webdriver/blob/65d847da4bdc63f9c015cb19d4efdee87df8ffad/aloe_webdriver/__init__.py#L208-L216
def element_not_contains(self, element_id, value): """ Assert provided content is not contained within an element found by ``id``. """ elem = world.browser.find_elements_by_xpath(str( 'id("{id}")[contains(., "{value}")]'.format( id=element_id, value=value))) assert not elem, \ "Expected element not to contain the given text."
[ "def", "element_not_contains", "(", "self", ",", "element_id", ",", "value", ")", ":", "elem", "=", "world", ".", "browser", ".", "find_elements_by_xpath", "(", "str", "(", "'id(\"{id}\")[contains(., \"{value}\")]'", ".", "format", "(", "id", "=", "element_id", ",", "value", "=", "value", ")", ")", ")", "assert", "not", "elem", ",", "\"Expected element not to contain the given text.\"" ]
Assert provided content is not contained within an element found by ``id``.
[ "Assert", "provided", "content", "is", "not", "contained", "within", "an", "element", "found", "by", "id", "." ]
python
train
quantopian/zipline
zipline/pipeline/loaders/blaze/utils.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/blaze/utils.py#L5-L48
def load_raw_data(assets, data_query_cutoff_times, expr, odo_kwargs, checkpoints=None): """ Given an expression representing data to load, perform normalization and forward-filling and return the data, materialized. Only accepts data with a `sid` field. Parameters ---------- assets : pd.int64index the assets to load data for. data_query_cutoff_times : pd.DatetimeIndex The datetime when data should no longer be considered available for a session. expr : expr the expression representing the data to load. odo_kwargs : dict extra keyword arguments to pass to odo when executing the expression. checkpoints : expr, optional the expression representing the checkpointed data for `expr`. Returns ------- raw : pd.dataframe The result of computing expr and materializing the result as a dataframe. """ lower_dt, upper_dt = data_query_cutoff_times[[0, -1]] raw = ffill_query_in_range( expr, lower_dt, upper_dt, checkpoints=checkpoints, odo_kwargs=odo_kwargs, ) sids = raw[SID_FIELD_NAME] raw.drop( sids[~sids.isin(assets)].index, inplace=True ) return raw
[ "def", "load_raw_data", "(", "assets", ",", "data_query_cutoff_times", ",", "expr", ",", "odo_kwargs", ",", "checkpoints", "=", "None", ")", ":", "lower_dt", ",", "upper_dt", "=", "data_query_cutoff_times", "[", "[", "0", ",", "-", "1", "]", "]", "raw", "=", "ffill_query_in_range", "(", "expr", ",", "lower_dt", ",", "upper_dt", ",", "checkpoints", "=", "checkpoints", ",", "odo_kwargs", "=", "odo_kwargs", ",", ")", "sids", "=", "raw", "[", "SID_FIELD_NAME", "]", "raw", ".", "drop", "(", "sids", "[", "~", "sids", ".", "isin", "(", "assets", ")", "]", ".", "index", ",", "inplace", "=", "True", ")", "return", "raw" ]
Given an expression representing data to load, perform normalization and forward-filling and return the data, materialized. Only accepts data with a `sid` field. Parameters ---------- assets : pd.int64index the assets to load data for. data_query_cutoff_times : pd.DatetimeIndex The datetime when data should no longer be considered available for a session. expr : expr the expression representing the data to load. odo_kwargs : dict extra keyword arguments to pass to odo when executing the expression. checkpoints : expr, optional the expression representing the checkpointed data for `expr`. Returns ------- raw : pd.dataframe The result of computing expr and materializing the result as a dataframe.
[ "Given", "an", "expression", "representing", "data", "to", "load", "perform", "normalization", "and", "forward", "-", "filling", "and", "return", "the", "data", "materialized", ".", "Only", "accepts", "data", "with", "a", "sid", "field", "." ]
python
train
INM-6/hybridLFPy
hybridLFPy/helpers.py
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L378-L398
def normalize(data): """ Function to normalize data to have mean 0 and unity standard deviation (also called z-transform) Parameters ---------- data : numpy.ndarray Returns ------- numpy.ndarray z-transform of input array """ data = data.astype(float) data -= data.mean() return data / data.std()
[ "def", "normalize", "(", "data", ")", ":", "data", "=", "data", ".", "astype", "(", "float", ")", "data", "-=", "data", ".", "mean", "(", ")", "return", "data", "/", "data", ".", "std", "(", ")" ]
Function to normalize data to have mean 0 and unity standard deviation (also called z-transform) Parameters ---------- data : numpy.ndarray Returns ------- numpy.ndarray z-transform of input array
[ "Function", "to", "normalize", "data", "to", "have", "mean", "0", "and", "unity", "standard", "deviation", "(", "also", "called", "z", "-", "transform", ")", "Parameters", "----------", "data", ":", "numpy", ".", "ndarray", "Returns", "-------", "numpy", ".", "ndarray", "z", "-", "transform", "of", "input", "array" ]
python
train
senaite/senaite.core
bika/lims/browser/analysisrequest/add2.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/analysisrequest/add2.py#L1205-L1218
def ajax_get_service(self): """Returns the services information """ uid = self.request.form.get("uid", None) if uid is None: return self.error("Invalid UID", status=400) service = self.get_object_by_uid(uid) if not service: return self.error("Service not found", status=404) info = self.get_service_info(service) return info
[ "def", "ajax_get_service", "(", "self", ")", ":", "uid", "=", "self", ".", "request", ".", "form", ".", "get", "(", "\"uid\"", ",", "None", ")", "if", "uid", "is", "None", ":", "return", "self", ".", "error", "(", "\"Invalid UID\"", ",", "status", "=", "400", ")", "service", "=", "self", ".", "get_object_by_uid", "(", "uid", ")", "if", "not", "service", ":", "return", "self", ".", "error", "(", "\"Service not found\"", ",", "status", "=", "404", ")", "info", "=", "self", ".", "get_service_info", "(", "service", ")", "return", "info" ]
Returns the services information
[ "Returns", "the", "services", "information" ]
python
train
kadrlica/pymodeler
pymodeler/parameter.py
https://github.com/kadrlica/pymodeler/blob/f426c01416fd4b8fc3afeeb6d3b5d1cb0cb8f8e3/pymodeler/parameter.py#L147-L166
def _load(self, **kwargs): """Load kwargs key,value pairs into __dict__ """ defaults = dict([(d[0], d[1]) for d in self.defaults]) # Require kwargs are in defaults for k in kwargs: if k not in defaults: msg = "Unrecognized attribute of %s: %s" % ( self.__class__.__name__, k) raise AttributeError(msg) defaults.update(kwargs) # This doesn't overwrite the properties self.__dict__.update(defaults) # This should now be set self.check_type(self.__dict__['default']) # This sets the underlying property values (i.e., __value__) self.set(**defaults)
[ "def", "_load", "(", "self", ",", "*", "*", "kwargs", ")", ":", "defaults", "=", "dict", "(", "[", "(", "d", "[", "0", "]", ",", "d", "[", "1", "]", ")", "for", "d", "in", "self", ".", "defaults", "]", ")", "# Require kwargs are in defaults", "for", "k", "in", "kwargs", ":", "if", "k", "not", "in", "defaults", ":", "msg", "=", "\"Unrecognized attribute of %s: %s\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "k", ")", "raise", "AttributeError", "(", "msg", ")", "defaults", ".", "update", "(", "kwargs", ")", "# This doesn't overwrite the properties", "self", ".", "__dict__", ".", "update", "(", "defaults", ")", "# This should now be set", "self", ".", "check_type", "(", "self", ".", "__dict__", "[", "'default'", "]", ")", "# This sets the underlying property values (i.e., __value__)", "self", ".", "set", "(", "*", "*", "defaults", ")" ]
Load kwargs key,value pairs into __dict__
[ "Load", "kwargs", "key", "value", "pairs", "into", "__dict__" ]
python
test
SeanOC/sharpy
sharpy/product.py
https://github.com/SeanOC/sharpy/blob/935943ca86034255f0a93c1a84734814be176ed4/sharpy/product.py#L214-L241
def get_customers(self, filter_data=None): ''' Returns all customers. Sometimes they are too much and cause internal server errors on CG. API call permits post parameters for filtering which tends to fix this https://cheddargetter.com/developers#all-customers filter_data Will be processed by urlencode and can be used for filtering Example value: [ ("subscriptionStatus": "activeOnly"), ("planCode[]": "100GB"), ("planCode[]": "200GB") ] ''' customers = [] try: response = self.client.make_request(path='customers/get', data=filter_data) except NotFound: response = None if response: customer_parser = CustomersParser() customers_data = customer_parser.parse_xml(response.content) for customer_data in customers_data: customers.append(Customer(product=self, **customer_data)) return customers
[ "def", "get_customers", "(", "self", ",", "filter_data", "=", "None", ")", ":", "customers", "=", "[", "]", "try", ":", "response", "=", "self", ".", "client", ".", "make_request", "(", "path", "=", "'customers/get'", ",", "data", "=", "filter_data", ")", "except", "NotFound", ":", "response", "=", "None", "if", "response", ":", "customer_parser", "=", "CustomersParser", "(", ")", "customers_data", "=", "customer_parser", ".", "parse_xml", "(", "response", ".", "content", ")", "for", "customer_data", "in", "customers_data", ":", "customers", ".", "append", "(", "Customer", "(", "product", "=", "self", ",", "*", "*", "customer_data", ")", ")", "return", "customers" ]
Returns all customers. Sometimes they are too much and cause internal server errors on CG. API call permits post parameters for filtering which tends to fix this https://cheddargetter.com/developers#all-customers filter_data Will be processed by urlencode and can be used for filtering Example value: [ ("subscriptionStatus": "activeOnly"), ("planCode[]": "100GB"), ("planCode[]": "200GB") ]
[ "Returns", "all", "customers", ".", "Sometimes", "they", "are", "too", "much", "and", "cause", "internal", "server", "errors", "on", "CG", ".", "API", "call", "permits", "post", "parameters", "for", "filtering", "which", "tends", "to", "fix", "this", "https", ":", "//", "cheddargetter", ".", "com", "/", "developers#all", "-", "customers" ]
python
train
gmr/tinman
tinman/handlers/base.py
https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/handlers/base.py#L222-L229
def prepare(self): """Prepare the session, setting up the session object and loading in the values, assigning the IP address to the session if it's an new one. """ super(SessionRequestHandler, self).prepare() result = yield gen.Task(self.start_session) LOGGER.debug('Exiting SessionRequestHandler.prepare: %r', result)
[ "def", "prepare", "(", "self", ")", ":", "super", "(", "SessionRequestHandler", ",", "self", ")", ".", "prepare", "(", ")", "result", "=", "yield", "gen", ".", "Task", "(", "self", ".", "start_session", ")", "LOGGER", ".", "debug", "(", "'Exiting SessionRequestHandler.prepare: %r'", ",", "result", ")" ]
Prepare the session, setting up the session object and loading in the values, assigning the IP address to the session if it's an new one.
[ "Prepare", "the", "session", "setting", "up", "the", "session", "object", "and", "loading", "in", "the", "values", "assigning", "the", "IP", "address", "to", "the", "session", "if", "it", "s", "an", "new", "one", "." ]
python
train
hydraplatform/hydra-base
hydra_base/lib/attributes.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/attributes.py#L425-L453
def get_resource_attributes(ref_key, ref_id, type_id=None, **kwargs): """ Get all the resource attributes for a given resource. If type_id is specified, only return the resource attributes within the type. """ user_id = kwargs.get('user_id') resource_attr_qry = db.DBSession.query(ResourceAttr).filter( ResourceAttr.ref_key == ref_key, or_( ResourceAttr.network_id==ref_id, ResourceAttr.node_id==ref_id, ResourceAttr.link_id==ref_id, ResourceAttr.group_id==ref_id )) if type_id is not None: attr_ids = [] rs = db.DBSession.query(TypeAttr).filter(TypeAttr.type_id==type_id).all() for r in rs: attr_ids.append(r.attr_id) resource_attr_qry = resource_attr_qry.filter(ResourceAttr.attr_id.in_(attr_ids)) resource_attrs = resource_attr_qry.all() return resource_attrs
[ "def", "get_resource_attributes", "(", "ref_key", ",", "ref_id", ",", "type_id", "=", "None", ",", "*", "*", "kwargs", ")", ":", "user_id", "=", "kwargs", ".", "get", "(", "'user_id'", ")", "resource_attr_qry", "=", "db", ".", "DBSession", ".", "query", "(", "ResourceAttr", ")", ".", "filter", "(", "ResourceAttr", ".", "ref_key", "==", "ref_key", ",", "or_", "(", "ResourceAttr", ".", "network_id", "==", "ref_id", ",", "ResourceAttr", ".", "node_id", "==", "ref_id", ",", "ResourceAttr", ".", "link_id", "==", "ref_id", ",", "ResourceAttr", ".", "group_id", "==", "ref_id", ")", ")", "if", "type_id", "is", "not", "None", ":", "attr_ids", "=", "[", "]", "rs", "=", "db", ".", "DBSession", ".", "query", "(", "TypeAttr", ")", ".", "filter", "(", "TypeAttr", ".", "type_id", "==", "type_id", ")", ".", "all", "(", ")", "for", "r", "in", "rs", ":", "attr_ids", ".", "append", "(", "r", ".", "attr_id", ")", "resource_attr_qry", "=", "resource_attr_qry", ".", "filter", "(", "ResourceAttr", ".", "attr_id", ".", "in_", "(", "attr_ids", ")", ")", "resource_attrs", "=", "resource_attr_qry", ".", "all", "(", ")", "return", "resource_attrs" ]
Get all the resource attributes for a given resource. If type_id is specified, only return the resource attributes within the type.
[ "Get", "all", "the", "resource", "attributes", "for", "a", "given", "resource", ".", "If", "type_id", "is", "specified", "only", "return", "the", "resource", "attributes", "within", "the", "type", "." ]
python
train
waqasbhatti/astrobase
astrobase/checkplot/pkl_png.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/checkplot/pkl_png.py#L75-L1129
def checkplot_pickle_to_png( checkplotin, outfile, extrarows=None ): '''This reads the checkplot pickle or dict provided, and writes out a PNG. The output PNG contains most of the information in the input checkplot pickle/dict, and can be used to quickly glance through the highlights instead of having to review the checkplot with the `checkplotserver` webapp. This is useful for exporting read-only views of finalized checkplots from the `checkplotserver` as well, to share them with other people. The PNG has 4 x N tiles:: [ finder ] [ objectinfo ] [ varinfo/comments ] [ unphased LC ] [ periodogram1 ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ] [ periodogram2 ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ] . . [ periodogramN ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ] for N independent period-finding methods producing: - periodogram1,2,3...N: the periodograms from each method - phased LC P1,P2,P3: the phased lightcurves using the best 3 peaks in each periodogram Parameters ---------- checkplotin : dict or str This is either a checkplotdict produced by :py:func:`astrobase.checkplot.pkl.checkplot_dict` or a checkplot pickle file produced by :py:func:`astrobase.checkplot.pkl.checkplot_pickle`. outfile : str The filename of the output PNG file to create. extrarows : list of tuples This is a list of 4-element tuples containing paths to PNG files that will be added to the end of the rows generated from the checkplotin pickle/dict. Each tuple represents a row in the final output PNG file. If there are less than 4 elements per tuple, the missing elements will be filled in with white-space. If there are more than 4 elements per tuple, only the first four will be used. The purpose of this kwarg is to incorporate periodograms and phased LC plots (in the form of PNGs) generated from an external period-finding function or program (like VARTOOLS) to allow for comparison with astrobase results. NOTE: the PNG files specified in `extrarows` here will be added to those already present in the input checkplotdict['externalplots'] if that is None because you passed in a similar list of external plots to the :py:func:`astrobase.checkplot.pkl.checkplot_pickle` function earlier. In this case, `extrarows` can be used to add even more external plots if desired. Each external plot PNG will be resized to 750 x 480 pixels to fit into an output image cell. By convention, each 4-element tuple should contain: - a periodiogram PNG - phased LC PNG with 1st best peak period from periodogram - phased LC PNG with 2nd best peak period from periodogram - phased LC PNG with 3rd best peak period from periodogram Example of extrarows:: [('/path/to/external/bls-periodogram.png', '/path/to/external/bls-phasedlc-plot-bestpeak.png', '/path/to/external/bls-phasedlc-plot-peak2.png', '/path/to/external/bls-phasedlc-plot-peak3.png'), ('/path/to/external/pdm-periodogram.png', '/path/to/external/pdm-phasedlc-plot-bestpeak.png', '/path/to/external/pdm-phasedlc-plot-peak2.png', '/path/to/external/pdm-phasedlc-plot-peak3.png'), ...] Returns ------- str The absolute path to the generated checkplot PNG. ''' # figure out if the checkplotpickle is a filename # python 3 if sys.version_info[:2] > (3,2): if (isinstance(checkplotin, str) and os.path.exists(checkplotin)): cpd = _read_checkplot_picklefile(checkplotin) elif isinstance(checkplotin, dict): cpd = checkplotin else: LOGERROR('checkplotin: %s of type %s is not a ' 'valid checkplot filename (or does not exist), or a dict' % (os.path.abspath(checkplotin), type(checkplotin))) return None # check for unicode in python 2.7 else: # get the current checkplotdict if ((isinstance(checkplotin, str) or isinstance(checkplotin, unicode)) and os.path.exists(checkplotin)): cpd = _read_checkplot_picklefile(checkplotin) elif isinstance(checkplotin,dict): cpd = checkplotin else: LOGERROR('checkplotin: %s of type %s is not a ' 'valid checkplot filename (or does not exist), or a dict' % (os.path.abspath(checkplotin), type(checkplotin))) return None # figure out the dimensions of the output png # each cell is 750 x 480 pixels # a row is made of four cells # - the first row is for object info # - the rest are for periodograms and phased LCs, one row per method # if there are more than three phased LC plots per method, we'll only plot 3 if 'pfmethods' in cpd: cplspmethods = cpd['pfmethods'] else: cplspmethods = [] for pfm in METHODSHORTLABELS: if pfm in cpd: cplspmethods.append(pfm) cprows = len(cplspmethods) # add in any extra rows from neighbors if 'neighbors' in cpd and cpd['neighbors'] and len(cpd['neighbors']) > 0: nbrrows = len(cpd['neighbors']) else: nbrrows = 0 # add in any extra rows from keyword arguments if extrarows and len(extrarows) > 0: erows = len(extrarows) else: erows = 0 # add in any extra rows from the checkplot dict if ('externalplots' in cpd and cpd['externalplots'] and len(cpd['externalplots']) > 0): cpderows = len(cpd['externalplots']) else: cpderows = 0 totalwidth = 3000 totalheight = 480 + (cprows + erows + nbrrows + cpderows)*480 # this is the output PNG outimg = Image.new('RGBA',(totalwidth, totalheight),(255,255,255,255)) # now fill in the rows of the output png. we'll use Pillow to build up the # output image from the already stored plots and stuff in the checkplot # dict. ############################### # row 1, cell 1: finder chart # ############################### if cpd['finderchart']: finder = Image.open( _base64_to_file(cpd['finderchart'], None, writetostrio=True) ) bigfinder = finder.resize((450,450), Image.ANTIALIAS) outimg.paste(bigfinder,(150,20)) ##################################### # row 1, cell 2: object information # ##################################### # find the font we need from the package data fontpath = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', 'cpserver', 'cps-assets', 'DejaVuSans.ttf') ) # load the font if os.path.exists(fontpath): cpfontnormal = ImageFont.truetype(fontpath, 20) cpfontlarge = ImageFont.truetype(fontpath, 28) else: LOGWARNING('could not find bundled ' 'DejaVu Sans font in the astrobase package ' 'data, using ugly defaults...') cpfontnormal = ImageFont.load_default() cpfontlarge = ImageFont.load_default() # the image draw object objinfodraw = ImageDraw.Draw(outimg) # write out the object information # objectid objinfodraw.text( (625, 25), cpd['objectid'] if cpd['objectid'] else 'no objectid', font=cpfontlarge, fill=(0,0,255,255) ) # twomass id if 'twomassid' in cpd['objectinfo']: objinfodraw.text( (625, 60), ('2MASS J%s' % cpd['objectinfo']['twomassid'] if cpd['objectinfo']['twomassid'] else ''), font=cpfontnormal, fill=(0,0,0,255) ) # ndet if 'ndet' in cpd['objectinfo']: objinfodraw.text( (625, 85), ('LC points: %s' % cpd['objectinfo']['ndet'] if cpd['objectinfo']['ndet'] is not None else ''), font=cpfontnormal, fill=(0,0,0,255) ) else: objinfodraw.text( (625, 85), ('LC points: %s' % cpd['magseries']['times'].size), font=cpfontnormal, fill=(0,0,0,255) ) # coords and PM objinfodraw.text( (625, 125), ('Coords and PM'), font=cpfontnormal, fill=(0,0,0,255) ) if 'ra' in cpd['objectinfo'] and 'decl' in cpd['objectinfo']: objinfodraw.text( (900, 125), (('RA, Dec: %.3f, %.3f' % (cpd['objectinfo']['ra'], cpd['objectinfo']['decl'])) if (cpd['objectinfo']['ra'] is not None and cpd['objectinfo']['decl'] is not None) else ''), font=cpfontnormal, fill=(0,0,0,255) ) else: objinfodraw.text( (900, 125), 'RA, Dec: nan, nan', font=cpfontnormal, fill=(0,0,0,255) ) if 'propermotion' in cpd['objectinfo']: objinfodraw.text( (900, 150), (('Total PM: %.5f mas/yr' % cpd['objectinfo']['propermotion']) if (cpd['objectinfo']['propermotion'] is not None) else ''), font=cpfontnormal, fill=(0,0,0,255) ) else: objinfodraw.text( (900, 150), 'Total PM: nan', font=cpfontnormal, fill=(0,0,0,255) ) if 'rpmj' in cpd['objectinfo']: objinfodraw.text( (900, 175), (('Reduced PM [Jmag]: %.3f' % cpd['objectinfo']['rpmj']) if (cpd['objectinfo']['rpmj'] is not None) else ''), font=cpfontnormal, fill=(0,0,0,255) ) else: objinfodraw.text( (900, 175), 'Reduced PM [Jmag]: nan', font=cpfontnormal, fill=(0,0,0,255) ) # here, we have to deal with two generations of objectinfo dicts # first, deal with the new generation of objectinfo dicts if 'available_dereddened_bands' in cpd['objectinfo']: # # first, we deal with the bands and mags # # magnitudes objinfodraw.text( (625, 200), 'Magnitudes', font=cpfontnormal, fill=(0,0,0,255) ) # process the various bands # if dereddened mags aren't available, use the observed mags if len(cpd['objectinfo']['available_bands']) > 0: # we'll get all the available mags for bandind, band, label in zip( range(len(cpd['objectinfo']['available_bands'])), cpd['objectinfo']['available_bands'], cpd['objectinfo']['available_band_labels'] ): thisbandmag = cpd['objectinfo'][band] # we'll draw stuff in three rows depending on the number of # bands we have to use if bandind in (0,1,2,3,4): thispos = (900+125*bandind, 200) objinfodraw.text( thispos, '%s: %.3f' % (label, thisbandmag), font=cpfontnormal, fill=(0,0,0,255) ) elif bandind in (5,6,7,8,9): rowbandind = bandind - 5 thispos = (900+125*rowbandind, 225) objinfodraw.text( thispos, '%s: %.3f' % (label, thisbandmag), font=cpfontnormal, fill=(0,0,0,255) ) else: rowbandind = bandind - 10 thispos = (900+125*rowbandind, 250) objinfodraw.text( thispos, '%s: %.3f' % (label, thisbandmag), font=cpfontnormal, fill=(0,0,0,255) ) # # next, deal with the colors # # colors if ('dereddened' in cpd['objectinfo'] and cpd['objectinfo']['dereddened'] is True): deredlabel = "(dereddened)" else: deredlabel = "" objinfodraw.text( (625, 275), 'Colors %s' % deredlabel, font=cpfontnormal, fill=(0,0,0,255) ) if len(cpd['objectinfo']['available_colors']) > 0: # we'll get all the available mags (dereddened versions preferred) for colorind, color, colorlabel in zip( range(len(cpd['objectinfo']['available_colors'])), cpd['objectinfo']['available_colors'], cpd['objectinfo']['available_color_labels'] ): thiscolor = cpd['objectinfo'][color] # we'll draw stuff in three rows depending on the number of # bands we have to use if colorind in (0,1,2,3,4): thispos = (900+150*colorind, 275) objinfodraw.text( thispos, '%s: %.3f' % (colorlabel, thiscolor), font=cpfontnormal, fill=(0,0,0,255) ) elif colorind in (5,6,7,8,9): thisrowind = colorind - 5 thispos = (900+150*thisrowind, 300) objinfodraw.text( thispos, '%s: %.3f' % (colorlabel, thiscolor), font=cpfontnormal, fill=(0,0,0,255) ) elif colorind in (10,11,12,13,14): thisrowind = colorind - 10 thispos = (900+150*thisrowind, 325) objinfodraw.text( thispos, '%s: %.3f' % (colorlabel, thiscolor), font=cpfontnormal, fill=(0,0,0,255) ) else: thisrowind = colorind - 15 thispos = (900+150*thisrowind, 350) objinfodraw.text( thispos, '%s: %.3f' % (colorlabel, thiscolor), font=cpfontnormal, fill=(0,0,0,255) ) # otherwise, deal with older generation of checkplots else: objinfodraw.text( (625, 200), ('Magnitudes'), font=cpfontnormal, fill=(0,0,0,255) ) objinfodraw.text( (900, 200), ('gri: %.3f, %.3f, %.3f' % ((cpd['objectinfo']['sdssg'] if ('sdssg' in cpd['objectinfo'] and cpd['objectinfo']['sdssg'] is not None) else npnan), (cpd['objectinfo']['sdssr'] if ('sdssr' in cpd['objectinfo'] and cpd['objectinfo']['sdssr'] is not None) else npnan), (cpd['objectinfo']['sdssi'] if ('sdssi' in cpd['objectinfo'] and cpd['objectinfo']['sdssi'] is not None) else npnan))), font=cpfontnormal, fill=(0,0,0,255) ) objinfodraw.text( (900, 225), ('JHK: %.3f, %.3f, %.3f' % ((cpd['objectinfo']['jmag'] if ('jmag' in cpd['objectinfo'] and cpd['objectinfo']['jmag'] is not None) else npnan), (cpd['objectinfo']['hmag'] if ('hmag' in cpd['objectinfo'] and cpd['objectinfo']['hmag'] is not None) else npnan), (cpd['objectinfo']['kmag'] if ('kmag' in cpd['objectinfo'] and cpd['objectinfo']['kmag'] is not None) else npnan))), font=cpfontnormal, fill=(0,0,0,255) ) objinfodraw.text( (900, 250), ('BV: %.3f, %.3f' % ((cpd['objectinfo']['bmag'] if ('bmag' in cpd['objectinfo'] and cpd['objectinfo']['bmag'] is not None) else npnan), (cpd['objectinfo']['vmag'] if ('vmag' in cpd['objectinfo'] and cpd['objectinfo']['vmag'] is not None) else npnan))), font=cpfontnormal, fill=(0,0,0,255) ) # colors if ('dereddened' in cpd['objectinfo'] and cpd['objectinfo']['dereddened'] is True): deredlabel = "(dereddened)" else: deredlabel = "" objinfodraw.text( (625, 275), 'Colors %s' % deredlabel, font=cpfontnormal, fill=(0,0,0,255) ) objinfodraw.text( (900, 275), ('B - V: %.3f, V - K: %.3f' % ( (cpd['objectinfo']['bvcolor'] if ('bvcolor' in cpd['objectinfo'] and cpd['objectinfo']['bvcolor'] is not None) else npnan), (cpd['objectinfo']['vkcolor'] if ('vkcolor' in cpd['objectinfo'] and cpd['objectinfo']['vkcolor'] is not None) else npnan) )), font=cpfontnormal, fill=(0,0,0,255) ) objinfodraw.text( (900, 300), ('i - J: %.3f, g - K: %.3f' % ( (cpd['objectinfo']['ijcolor'] if ('ijcolor' in cpd['objectinfo'] and cpd['objectinfo']['ijcolor'] is not None) else npnan), (cpd['objectinfo']['gkcolor'] if ('gkcolor' in cpd['objectinfo'] and cpd['objectinfo']['gkcolor'] is not None) else npnan) )), font=cpfontnormal, fill=(0,0,0,255) ) objinfodraw.text( (900, 325), ('J - K: %.3f' % ( (cpd['objectinfo']['jkcolor'] if ('jkcolor' in cpd['objectinfo'] and cpd['objectinfo']['jkcolor'] is not None) else npnan),) ), font=cpfontnormal, fill=(0,0,0,255) ) # # rest of the object information # # color classification if ('color_classes' in cpd['objectinfo'] and cpd['objectinfo']['color_classes']): objinfodraw.text( (625, 375), ('star classification by color: %s' % (', '.join(cpd['objectinfo']['color_classes']))), font=cpfontnormal, fill=(0,0,0,255) ) # GAIA neighbors if ( ('gaia_neighbors' in cpd['objectinfo']) and (cpd['objectinfo']['gaia_neighbors'] is not None) and (np.isfinite(cpd['objectinfo']['gaia_neighbors'])) and ('searchradarcsec' in cpd['objectinfo']) and (cpd['objectinfo']['searchradarcsec']) ): objinfodraw.text( (625, 400), ('%s GAIA close neighbors within %.1f arcsec' % (cpd['objectinfo']['gaia_neighbors'], cpd['objectinfo']['searchradarcsec'])), font=cpfontnormal, fill=(0,0,0,255) ) # closest GAIA neighbor if ( ('gaia_closest_distarcsec' in cpd['objectinfo']) and (cpd['objectinfo']['gaia_closest_distarcsec'] is not None) and (np.isfinite(cpd['objectinfo']['gaia_closest_distarcsec'])) and ('gaia_closest_gmagdiff' in cpd['objectinfo']) and (cpd['objectinfo']['gaia_closest_gmagdiff'] is not None) and (np.isfinite(cpd['objectinfo']['gaia_closest_gmagdiff'])) ): objinfodraw.text( (625, 425), ('closest GAIA neighbor is %.1f arcsec away, ' 'GAIA mag (obj-nbr): %.3f' % (cpd['objectinfo']['gaia_closest_distarcsec'], cpd['objectinfo']['gaia_closest_gmagdiff'])), font=cpfontnormal, fill=(0,0,0,255) ) # object tags if 'objecttags' in cpd['objectinfo'] and cpd['objectinfo']['objecttags']: objtagsplit = cpd['objectinfo']['objecttags'].split(',') # write three tags per line nobjtaglines = int(np.ceil(len(objtagsplit)/3.0)) for objtagline in range(nobjtaglines): objtagslice = ','.join(objtagsplit[objtagline*3:objtagline*3+3]) objinfodraw.text( (625, 450+objtagline*25), objtagslice, font=cpfontnormal, fill=(135, 54, 0, 255) ) ################################################ # row 1, cell 3: variability info and comments # ################################################ # objectisvar objisvar = cpd['varinfo']['objectisvar'] if objisvar == '0': objvarflag = 'Variable star flag not set' elif objisvar == '1': objvarflag = 'Object is probably a variable star' elif objisvar == '2': objvarflag = 'Object is probably not a variable star' elif objisvar == '3': objvarflag = 'Not sure if this object is a variable star' elif objisvar is None: objvarflag = 'Variable star flag not set' elif objisvar is True: objvarflag = 'Object is probably a variable star' elif objisvar is False: objvarflag = 'Object is probably not a variable star' else: objvarflag = 'Variable star flag: %s' % objisvar objinfodraw.text( (1650, 125), objvarflag, font=cpfontnormal, fill=(0,0,0,255) ) # period objinfodraw.text( (1650, 150), ('Period [days]: %.6f' % (cpd['varinfo']['varperiod'] if cpd['varinfo']['varperiod'] is not None else np.nan)), font=cpfontnormal, fill=(0,0,0,255) ) # epoch objinfodraw.text( (1650, 175), ('Epoch [JD]: %.6f' % (cpd['varinfo']['varepoch'] if cpd['varinfo']['varepoch'] is not None else np.nan)), font=cpfontnormal, fill=(0,0,0,255) ) # variability tags if cpd['varinfo']['vartags']: vartagsplit = cpd['varinfo']['vartags'].split(',') # write three tags per line nvartaglines = int(np.ceil(len(vartagsplit)/3.0)) for vartagline in range(nvartaglines): vartagslice = ','.join(vartagsplit[vartagline*3:vartagline*3+3]) objinfodraw.text( (1650, 225+vartagline*25), vartagslice, font=cpfontnormal, fill=(135, 54, 0, 255) ) # object comments if 'comments' in cpd and cpd['comments']: commentsplit = cpd['comments'].split(' ') # write 10 words per line ncommentlines = int(np.ceil(len(commentsplit)/10.0)) for commentline in range(ncommentlines): commentslice = ' '.join( commentsplit[commentline*10:commentline*10+10] ) objinfodraw.text( (1650, 325+commentline*25), commentslice, font=cpfontnormal, fill=(0,0,0,255) ) # this handles JSON-ified checkplots returned by LCC server elif 'objectcomments' in cpd and cpd['objectcomments']: commentsplit = cpd['objectcomments'].split(' ') # write 10 words per line ncommentlines = int(np.ceil(len(commentsplit)/10.0)) for commentline in range(ncommentlines): commentslice = ' '.join( commentsplit[commentline*10:commentline*10+10] ) objinfodraw.text( (1650, 325+commentline*25), commentslice, font=cpfontnormal, fill=(0,0,0,255) ) ####################################### # row 1, cell 4: unphased light curve # ####################################### if (cpd['magseries'] and 'plot' in cpd['magseries'] and cpd['magseries']['plot']): magseries = Image.open( _base64_to_file(cpd['magseries']['plot'], None, writetostrio=True) ) outimg.paste(magseries,(750*3,0)) # this handles JSON-ified checkplots from LCC server elif ('magseries' in cpd and isinstance(cpd['magseries'],str)): magseries = Image.open( _base64_to_file(cpd['magseries'], None, writetostrio=True) ) outimg.paste(magseries,(750*3,0)) ############################### # the rest of the rows in cpd # ############################### for lspmethodind, lspmethod in enumerate(cplspmethods): ############################### # the periodogram comes first # ############################### if (cpd[lspmethod] and cpd[lspmethod]['periodogram']): pgram = Image.open( _base64_to_file(cpd[lspmethod]['periodogram'], None, writetostrio=True) ) outimg.paste(pgram,(0,480 + 480*lspmethodind)) ############################# # best phased LC comes next # ############################# if (cpd[lspmethod] and 0 in cpd[lspmethod] and cpd[lspmethod][0]): plc1 = Image.open( _base64_to_file(cpd[lspmethod][0]['plot'], None, writetostrio=True) ) outimg.paste(plc1,(750,480 + 480*lspmethodind)) # this handles JSON-ified checkplots from LCC server elif (cpd[lspmethod] and 'phasedlc0' in cpd[lspmethod] and isinstance(cpd[lspmethod]['phasedlc0']['plot'], str)): plc1 = Image.open( _base64_to_file(cpd[lspmethod]['phasedlc0']['plot'], None, writetostrio=True) ) outimg.paste(plc1,(750,480 + 480*lspmethodind)) ################################# # 2nd best phased LC comes next # ################################# if (cpd[lspmethod] and 1 in cpd[lspmethod] and cpd[lspmethod][1]): plc2 = Image.open( _base64_to_file(cpd[lspmethod][1]['plot'], None, writetostrio=True) ) outimg.paste(plc2,(750*2,480 + 480*lspmethodind)) # this handles JSON-ified checkplots from LCC server elif (cpd[lspmethod] and 'phasedlc1' in cpd[lspmethod] and isinstance(cpd[lspmethod]['phasedlc1']['plot'], str)): plc2 = Image.open( _base64_to_file(cpd[lspmethod]['phasedlc1']['plot'], None, writetostrio=True) ) outimg.paste(plc2,(750*2,480 + 480*lspmethodind)) ################################# # 3rd best phased LC comes next # ################################# if (cpd[lspmethod] and 2 in cpd[lspmethod] and cpd[lspmethod][2]): plc3 = Image.open( _base64_to_file(cpd[lspmethod][2]['plot'], None, writetostrio=True) ) outimg.paste(plc3,(750*3,480 + 480*lspmethodind)) # this handles JSON-ified checkplots from LCC server elif (cpd[lspmethod] and 'phasedlc2' in cpd[lspmethod] and isinstance(cpd[lspmethod]['phasedlc2']['plot'], str)): plc3 = Image.open( _base64_to_file(cpd[lspmethod]['phasedlc2']['plot'], None, writetostrio=True) ) outimg.paste(plc3,(750*3,480 + 480*lspmethodind)) ################################ ## ALL DONE WITH BUILDING PNG ## ################################ ######################### # add in any extra rows # ######################### # from the keyword arguments if erows > 0: for erowind, erow in enumerate(extrarows): # make sure we never go above 4 plots in a row for ecolind, ecol in enumerate(erow[:4]): eplot = Image.open(ecol) eplotresized = eplot.resize((750,480), Image.ANTIALIAS) outimg.paste(eplotresized, (750*ecolind, (cprows+1)*480 + 480*erowind)) # from the checkplotdict if cpderows > 0: for cpderowind, cpderow in enumerate(cpd['externalplots']): # make sure we never go above 4 plots in a row for cpdecolind, cpdecol in enumerate(cpderow[:4]): cpdeplot = Image.open(cpdecol) cpdeplotresized = cpdeplot.resize((750,480), Image.ANTIALIAS) outimg.paste(cpdeplotresized, (750*cpdecolind, (cprows+1)*480 + (erows*480) + 480*cpderowind)) # from neighbors: if nbrrows > 0: # we have four tiles # tile 1: neighbor objectid, ra, decl, distance, unphased LC # tile 2: phased LC for gls # tile 3: phased LC for pdm # tile 4: phased LC for any other period finding method # the priority is like so: ['bls','mav','aov','win'] for nbrind, nbr in enumerate(cpd['neighbors']): # figure out which period finding methods are available for this # neighbor. make sure to match the ones from the actual object in # order of priority: 'gls','pdm','bls','aov','mav','acf','win' nbrlspmethods = [] for lspmethod in cpd['pfmethods']: if lspmethod in nbr: nbrlspmethods.append(lspmethod) # restrict to top three in priority nbrlspmethods = nbrlspmethods[:3] try: # first panel: neighbor objectid, ra, decl, distance, unphased # LC nbrlc = Image.open( _base64_to_file( nbr['magseries']['plot'], None, writetostrio=True ) ) outimg.paste(nbrlc, (750*0, (cprows+1)*480 + (erows*480) + (cpderows*480) + 480*nbrind)) # overlay the objectinfo objinfodraw.text( (98, (cprows+1)*480 + (erows*480) + (cpderows*480) + 480*nbrind + 15), ('N%s: %s' % (nbrind + 1, nbr['objectid'])), font=cpfontlarge, fill=(0,0,255,255) ) # overlay the objectinfo objinfodraw.text( (98, (cprows+1)*480 + (erows*480) + (cpderows*480) + 480*nbrind + 50), ('(RA, DEC) = (%.3f, %.3f), distance: %.1f arcsec' % (nbr['ra'], nbr['decl'], nbr['dist'])), font=cpfontnormal, fill=(0,0,255,255) ) # second panel: phased LC for gls lsp1lc = Image.open( _base64_to_file( nbr[nbrlspmethods[0]][0]['plot'], None, writetostrio=True ) ) outimg.paste(lsp1lc, (750*1, (cprows+1)*480 + (erows*480) + (cpderows*480) + 480*nbrind)) # second panel: phased LC for gls lsp2lc = Image.open( _base64_to_file( nbr[nbrlspmethods[1]][0]['plot'], None, writetostrio=True ) ) outimg.paste(lsp2lc, (750*2, (cprows+1)*480 + (erows*480) + (cpderows*480) + 480*nbrind)) # second panel: phased LC for gls lsp3lc = Image.open( _base64_to_file( nbr[nbrlspmethods[2]][0]['plot'], None, writetostrio=True ) ) outimg.paste(lsp3lc, (750*3, (cprows+1)*480 + (erows*480) + (cpderows*480) + 480*nbrind)) except Exception as e: LOGERROR('neighbor %s does not have a magseries plot, ' 'measurements are probably all nan' % nbr['objectid']) # overlay the objectinfo objinfodraw.text( (98, (cprows+1)*480 + (erows*480) + (cpderows*480) + 480*nbrind + 15), ('N%s: %s' % (nbrind + 1, nbr['objectid'])), font=cpfontlarge, fill=(0,0,255,255) ) if 'ra' in nbr and 'decl' in nbr and 'dist' in nbr: # overlay the objectinfo objinfodraw.text( (98, (cprows+1)*480 + (erows*480) + (cpderows*480) + 480*nbrind + 50), ('(RA, DEC) = (%.3f, %.3f), distance: %.1f arcsec' % (nbr['ra'], nbr['decl'], nbr['dist'])), font=cpfontnormal, fill=(0,0,255,255) ) elif 'objectinfo' in nbr: # overlay the objectinfo objinfodraw.text( (98, (cprows+1)*480 + (erows*480) + (cpderows*480) + 480*nbrind + 50), ('(RA, DEC) = (%.3f, %.3f), distance: %.1f arcsec' % (nbr['objectinfo']['ra'], nbr['objectinfo']['decl'], nbr['objectinfo']['distarcsec'])), font=cpfontnormal, fill=(0,0,255,255) ) ##################### ## WRITE FINAL PNG ## ##################### # check if the output filename is actually an instance of StringIO if sys.version_info[:2] < (3,0): is_strio = isinstance(outfile, cStringIO.InputType) else: is_strio = isinstance(outfile, StrIO) if not is_strio: # check if we've stupidly copied over the same filename as the input # pickle to expected output file if outfile.endswith('pkl'): LOGWARNING('expected output PNG filename ends with .pkl, ' 'changed to .png') outfile = outfile.replace('.pkl','.png') outimg.save(outfile, format='PNG', optimize=True) if not is_strio: if os.path.exists(outfile): LOGINFO('checkplot pickle -> checkplot PNG: %s OK' % outfile) return outfile else: LOGERROR('failed to write checkplot PNG') return None else: LOGINFO('checkplot pickle -> StringIO instance OK') return outfile
[ "def", "checkplot_pickle_to_png", "(", "checkplotin", ",", "outfile", ",", "extrarows", "=", "None", ")", ":", "# figure out if the checkplotpickle is a filename", "# python 3", "if", "sys", ".", "version_info", "[", ":", "2", "]", ">", "(", "3", ",", "2", ")", ":", "if", "(", "isinstance", "(", "checkplotin", ",", "str", ")", "and", "os", ".", "path", ".", "exists", "(", "checkplotin", ")", ")", ":", "cpd", "=", "_read_checkplot_picklefile", "(", "checkplotin", ")", "elif", "isinstance", "(", "checkplotin", ",", "dict", ")", ":", "cpd", "=", "checkplotin", "else", ":", "LOGERROR", "(", "'checkplotin: %s of type %s is not a '", "'valid checkplot filename (or does not exist), or a dict'", "%", "(", "os", ".", "path", ".", "abspath", "(", "checkplotin", ")", ",", "type", "(", "checkplotin", ")", ")", ")", "return", "None", "# check for unicode in python 2.7", "else", ":", "# get the current checkplotdict", "if", "(", "(", "isinstance", "(", "checkplotin", ",", "str", ")", "or", "isinstance", "(", "checkplotin", ",", "unicode", ")", ")", "and", "os", ".", "path", ".", "exists", "(", "checkplotin", ")", ")", ":", "cpd", "=", "_read_checkplot_picklefile", "(", "checkplotin", ")", "elif", "isinstance", "(", "checkplotin", ",", "dict", ")", ":", "cpd", "=", "checkplotin", "else", ":", "LOGERROR", "(", "'checkplotin: %s of type %s is not a '", "'valid checkplot filename (or does not exist), or a dict'", "%", "(", "os", ".", "path", ".", "abspath", "(", "checkplotin", ")", ",", "type", "(", "checkplotin", ")", ")", ")", "return", "None", "# figure out the dimensions of the output png", "# each cell is 750 x 480 pixels", "# a row is made of four cells", "# - the first row is for object info", "# - the rest are for periodograms and phased LCs, one row per method", "# if there are more than three phased LC plots per method, we'll only plot 3", "if", "'pfmethods'", "in", "cpd", ":", "cplspmethods", "=", "cpd", "[", "'pfmethods'", "]", "else", ":", "cplspmethods", "=", "[", "]", "for", "pfm", "in", "METHODSHORTLABELS", ":", "if", "pfm", "in", "cpd", ":", "cplspmethods", ".", "append", "(", "pfm", ")", "cprows", "=", "len", "(", "cplspmethods", ")", "# add in any extra rows from neighbors", "if", "'neighbors'", "in", "cpd", "and", "cpd", "[", "'neighbors'", "]", "and", "len", "(", "cpd", "[", "'neighbors'", "]", ")", ">", "0", ":", "nbrrows", "=", "len", "(", "cpd", "[", "'neighbors'", "]", ")", "else", ":", "nbrrows", "=", "0", "# add in any extra rows from keyword arguments", "if", "extrarows", "and", "len", "(", "extrarows", ")", ">", "0", ":", "erows", "=", "len", "(", "extrarows", ")", "else", ":", "erows", "=", "0", "# add in any extra rows from the checkplot dict", "if", "(", "'externalplots'", "in", "cpd", "and", "cpd", "[", "'externalplots'", "]", "and", "len", "(", "cpd", "[", "'externalplots'", "]", ")", ">", "0", ")", ":", "cpderows", "=", "len", "(", "cpd", "[", "'externalplots'", "]", ")", "else", ":", "cpderows", "=", "0", "totalwidth", "=", "3000", "totalheight", "=", "480", "+", "(", "cprows", "+", "erows", "+", "nbrrows", "+", "cpderows", ")", "*", "480", "# this is the output PNG", "outimg", "=", "Image", ".", "new", "(", "'RGBA'", ",", "(", "totalwidth", ",", "totalheight", ")", ",", "(", "255", ",", "255", ",", "255", ",", "255", ")", ")", "# now fill in the rows of the output png. we'll use Pillow to build up the", "# output image from the already stored plots and stuff in the checkplot", "# dict.", "###############################", "# row 1, cell 1: finder chart #", "###############################", "if", "cpd", "[", "'finderchart'", "]", ":", "finder", "=", "Image", ".", "open", "(", "_base64_to_file", "(", "cpd", "[", "'finderchart'", "]", ",", "None", ",", "writetostrio", "=", "True", ")", ")", "bigfinder", "=", "finder", ".", "resize", "(", "(", "450", ",", "450", ")", ",", "Image", ".", "ANTIALIAS", ")", "outimg", ".", "paste", "(", "bigfinder", ",", "(", "150", ",", "20", ")", ")", "#####################################", "# row 1, cell 2: object information #", "#####################################", "# find the font we need from the package data", "fontpath", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'..'", ",", "'cpserver'", ",", "'cps-assets'", ",", "'DejaVuSans.ttf'", ")", ")", "# load the font", "if", "os", ".", "path", ".", "exists", "(", "fontpath", ")", ":", "cpfontnormal", "=", "ImageFont", ".", "truetype", "(", "fontpath", ",", "20", ")", "cpfontlarge", "=", "ImageFont", ".", "truetype", "(", "fontpath", ",", "28", ")", "else", ":", "LOGWARNING", "(", "'could not find bundled '", "'DejaVu Sans font in the astrobase package '", "'data, using ugly defaults...'", ")", "cpfontnormal", "=", "ImageFont", ".", "load_default", "(", ")", "cpfontlarge", "=", "ImageFont", ".", "load_default", "(", ")", "# the image draw object", "objinfodraw", "=", "ImageDraw", ".", "Draw", "(", "outimg", ")", "# write out the object information", "# objectid", "objinfodraw", ".", "text", "(", "(", "625", ",", "25", ")", ",", "cpd", "[", "'objectid'", "]", "if", "cpd", "[", "'objectid'", "]", "else", "'no objectid'", ",", "font", "=", "cpfontlarge", ",", "fill", "=", "(", "0", ",", "0", ",", "255", ",", "255", ")", ")", "# twomass id", "if", "'twomassid'", "in", "cpd", "[", "'objectinfo'", "]", ":", "objinfodraw", ".", "text", "(", "(", "625", ",", "60", ")", ",", "(", "'2MASS J%s'", "%", "cpd", "[", "'objectinfo'", "]", "[", "'twomassid'", "]", "if", "cpd", "[", "'objectinfo'", "]", "[", "'twomassid'", "]", "else", "''", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "# ndet", "if", "'ndet'", "in", "cpd", "[", "'objectinfo'", "]", ":", "objinfodraw", ".", "text", "(", "(", "625", ",", "85", ")", ",", "(", "'LC points: %s'", "%", "cpd", "[", "'objectinfo'", "]", "[", "'ndet'", "]", "if", "cpd", "[", "'objectinfo'", "]", "[", "'ndet'", "]", "is", "not", "None", "else", "''", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "else", ":", "objinfodraw", ".", "text", "(", "(", "625", ",", "85", ")", ",", "(", "'LC points: %s'", "%", "cpd", "[", "'magseries'", "]", "[", "'times'", "]", ".", "size", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "# coords and PM", "objinfodraw", ".", "text", "(", "(", "625", ",", "125", ")", ",", "(", "'Coords and PM'", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "if", "'ra'", "in", "cpd", "[", "'objectinfo'", "]", "and", "'decl'", "in", "cpd", "[", "'objectinfo'", "]", ":", "objinfodraw", ".", "text", "(", "(", "900", ",", "125", ")", ",", "(", "(", "'RA, Dec: %.3f, %.3f'", "%", "(", "cpd", "[", "'objectinfo'", "]", "[", "'ra'", "]", ",", "cpd", "[", "'objectinfo'", "]", "[", "'decl'", "]", ")", ")", "if", "(", "cpd", "[", "'objectinfo'", "]", "[", "'ra'", "]", "is", "not", "None", "and", "cpd", "[", "'objectinfo'", "]", "[", "'decl'", "]", "is", "not", "None", ")", "else", "''", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "else", ":", "objinfodraw", ".", "text", "(", "(", "900", ",", "125", ")", ",", "'RA, Dec: nan, nan'", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "if", "'propermotion'", "in", "cpd", "[", "'objectinfo'", "]", ":", "objinfodraw", ".", "text", "(", "(", "900", ",", "150", ")", ",", "(", "(", "'Total PM: %.5f mas/yr'", "%", "cpd", "[", "'objectinfo'", "]", "[", "'propermotion'", "]", ")", "if", "(", "cpd", "[", "'objectinfo'", "]", "[", "'propermotion'", "]", "is", "not", "None", ")", "else", "''", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "else", ":", "objinfodraw", ".", "text", "(", "(", "900", ",", "150", ")", ",", "'Total PM: nan'", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "if", "'rpmj'", "in", "cpd", "[", "'objectinfo'", "]", ":", "objinfodraw", ".", "text", "(", "(", "900", ",", "175", ")", ",", "(", "(", "'Reduced PM [Jmag]: %.3f'", "%", "cpd", "[", "'objectinfo'", "]", "[", "'rpmj'", "]", ")", "if", "(", "cpd", "[", "'objectinfo'", "]", "[", "'rpmj'", "]", "is", "not", "None", ")", "else", "''", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "else", ":", "objinfodraw", ".", "text", "(", "(", "900", ",", "175", ")", ",", "'Reduced PM [Jmag]: nan'", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "# here, we have to deal with two generations of objectinfo dicts", "# first, deal with the new generation of objectinfo dicts", "if", "'available_dereddened_bands'", "in", "cpd", "[", "'objectinfo'", "]", ":", "#", "# first, we deal with the bands and mags", "#", "# magnitudes", "objinfodraw", ".", "text", "(", "(", "625", ",", "200", ")", ",", "'Magnitudes'", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "# process the various bands", "# if dereddened mags aren't available, use the observed mags", "if", "len", "(", "cpd", "[", "'objectinfo'", "]", "[", "'available_bands'", "]", ")", ">", "0", ":", "# we'll get all the available mags", "for", "bandind", ",", "band", ",", "label", "in", "zip", "(", "range", "(", "len", "(", "cpd", "[", "'objectinfo'", "]", "[", "'available_bands'", "]", ")", ")", ",", "cpd", "[", "'objectinfo'", "]", "[", "'available_bands'", "]", ",", "cpd", "[", "'objectinfo'", "]", "[", "'available_band_labels'", "]", ")", ":", "thisbandmag", "=", "cpd", "[", "'objectinfo'", "]", "[", "band", "]", "# we'll draw stuff in three rows depending on the number of", "# bands we have to use", "if", "bandind", "in", "(", "0", ",", "1", ",", "2", ",", "3", ",", "4", ")", ":", "thispos", "=", "(", "900", "+", "125", "*", "bandind", ",", "200", ")", "objinfodraw", ".", "text", "(", "thispos", ",", "'%s: %.3f'", "%", "(", "label", ",", "thisbandmag", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "elif", "bandind", "in", "(", "5", ",", "6", ",", "7", ",", "8", ",", "9", ")", ":", "rowbandind", "=", "bandind", "-", "5", "thispos", "=", "(", "900", "+", "125", "*", "rowbandind", ",", "225", ")", "objinfodraw", ".", "text", "(", "thispos", ",", "'%s: %.3f'", "%", "(", "label", ",", "thisbandmag", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "else", ":", "rowbandind", "=", "bandind", "-", "10", "thispos", "=", "(", "900", "+", "125", "*", "rowbandind", ",", "250", ")", "objinfodraw", ".", "text", "(", "thispos", ",", "'%s: %.3f'", "%", "(", "label", ",", "thisbandmag", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "#", "# next, deal with the colors", "#", "# colors", "if", "(", "'dereddened'", "in", "cpd", "[", "'objectinfo'", "]", "and", "cpd", "[", "'objectinfo'", "]", "[", "'dereddened'", "]", "is", "True", ")", ":", "deredlabel", "=", "\"(dereddened)\"", "else", ":", "deredlabel", "=", "\"\"", "objinfodraw", ".", "text", "(", "(", "625", ",", "275", ")", ",", "'Colors %s'", "%", "deredlabel", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "if", "len", "(", "cpd", "[", "'objectinfo'", "]", "[", "'available_colors'", "]", ")", ">", "0", ":", "# we'll get all the available mags (dereddened versions preferred)", "for", "colorind", ",", "color", ",", "colorlabel", "in", "zip", "(", "range", "(", "len", "(", "cpd", "[", "'objectinfo'", "]", "[", "'available_colors'", "]", ")", ")", ",", "cpd", "[", "'objectinfo'", "]", "[", "'available_colors'", "]", ",", "cpd", "[", "'objectinfo'", "]", "[", "'available_color_labels'", "]", ")", ":", "thiscolor", "=", "cpd", "[", "'objectinfo'", "]", "[", "color", "]", "# we'll draw stuff in three rows depending on the number of", "# bands we have to use", "if", "colorind", "in", "(", "0", ",", "1", ",", "2", ",", "3", ",", "4", ")", ":", "thispos", "=", "(", "900", "+", "150", "*", "colorind", ",", "275", ")", "objinfodraw", ".", "text", "(", "thispos", ",", "'%s: %.3f'", "%", "(", "colorlabel", ",", "thiscolor", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "elif", "colorind", "in", "(", "5", ",", "6", ",", "7", ",", "8", ",", "9", ")", ":", "thisrowind", "=", "colorind", "-", "5", "thispos", "=", "(", "900", "+", "150", "*", "thisrowind", ",", "300", ")", "objinfodraw", ".", "text", "(", "thispos", ",", "'%s: %.3f'", "%", "(", "colorlabel", ",", "thiscolor", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "elif", "colorind", "in", "(", "10", ",", "11", ",", "12", ",", "13", ",", "14", ")", ":", "thisrowind", "=", "colorind", "-", "10", "thispos", "=", "(", "900", "+", "150", "*", "thisrowind", ",", "325", ")", "objinfodraw", ".", "text", "(", "thispos", ",", "'%s: %.3f'", "%", "(", "colorlabel", ",", "thiscolor", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "else", ":", "thisrowind", "=", "colorind", "-", "15", "thispos", "=", "(", "900", "+", "150", "*", "thisrowind", ",", "350", ")", "objinfodraw", ".", "text", "(", "thispos", ",", "'%s: %.3f'", "%", "(", "colorlabel", ",", "thiscolor", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "# otherwise, deal with older generation of checkplots", "else", ":", "objinfodraw", ".", "text", "(", "(", "625", ",", "200", ")", ",", "(", "'Magnitudes'", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "objinfodraw", ".", "text", "(", "(", "900", ",", "200", ")", ",", "(", "'gri: %.3f, %.3f, %.3f'", "%", "(", "(", "cpd", "[", "'objectinfo'", "]", "[", "'sdssg'", "]", "if", "(", "'sdssg'", "in", "cpd", "[", "'objectinfo'", "]", "and", "cpd", "[", "'objectinfo'", "]", "[", "'sdssg'", "]", "is", "not", "None", ")", "else", "npnan", ")", ",", "(", "cpd", "[", "'objectinfo'", "]", "[", "'sdssr'", "]", "if", "(", "'sdssr'", "in", "cpd", "[", "'objectinfo'", "]", "and", "cpd", "[", "'objectinfo'", "]", "[", "'sdssr'", "]", "is", "not", "None", ")", "else", "npnan", ")", ",", "(", "cpd", "[", "'objectinfo'", "]", "[", "'sdssi'", "]", "if", "(", "'sdssi'", "in", "cpd", "[", "'objectinfo'", "]", "and", "cpd", "[", "'objectinfo'", "]", "[", "'sdssi'", "]", "is", "not", "None", ")", "else", "npnan", ")", ")", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "objinfodraw", ".", "text", "(", "(", "900", ",", "225", ")", ",", "(", "'JHK: %.3f, %.3f, %.3f'", "%", "(", "(", "cpd", "[", "'objectinfo'", "]", "[", "'jmag'", "]", "if", "(", "'jmag'", "in", "cpd", "[", "'objectinfo'", "]", "and", "cpd", "[", "'objectinfo'", "]", "[", "'jmag'", "]", "is", "not", "None", ")", "else", "npnan", ")", ",", "(", "cpd", "[", "'objectinfo'", "]", "[", "'hmag'", "]", "if", "(", "'hmag'", "in", "cpd", "[", "'objectinfo'", "]", "and", "cpd", "[", "'objectinfo'", "]", "[", "'hmag'", "]", "is", "not", "None", ")", "else", "npnan", ")", ",", "(", "cpd", "[", "'objectinfo'", "]", "[", "'kmag'", "]", "if", "(", "'kmag'", "in", "cpd", "[", "'objectinfo'", "]", "and", "cpd", "[", "'objectinfo'", "]", "[", "'kmag'", "]", "is", "not", "None", ")", "else", "npnan", ")", ")", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "objinfodraw", ".", "text", "(", "(", "900", ",", "250", ")", ",", "(", "'BV: %.3f, %.3f'", "%", "(", "(", "cpd", "[", "'objectinfo'", "]", "[", "'bmag'", "]", "if", "(", "'bmag'", "in", "cpd", "[", "'objectinfo'", "]", "and", "cpd", "[", "'objectinfo'", "]", "[", "'bmag'", "]", "is", "not", "None", ")", "else", "npnan", ")", ",", "(", "cpd", "[", "'objectinfo'", "]", "[", "'vmag'", "]", "if", "(", "'vmag'", "in", "cpd", "[", "'objectinfo'", "]", "and", "cpd", "[", "'objectinfo'", "]", "[", "'vmag'", "]", "is", "not", "None", ")", "else", "npnan", ")", ")", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "# colors", "if", "(", "'dereddened'", "in", "cpd", "[", "'objectinfo'", "]", "and", "cpd", "[", "'objectinfo'", "]", "[", "'dereddened'", "]", "is", "True", ")", ":", "deredlabel", "=", "\"(dereddened)\"", "else", ":", "deredlabel", "=", "\"\"", "objinfodraw", ".", "text", "(", "(", "625", ",", "275", ")", ",", "'Colors %s'", "%", "deredlabel", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "objinfodraw", ".", "text", "(", "(", "900", ",", "275", ")", ",", "(", "'B - V: %.3f, V - K: %.3f'", "%", "(", "(", "cpd", "[", "'objectinfo'", "]", "[", "'bvcolor'", "]", "if", "(", "'bvcolor'", "in", "cpd", "[", "'objectinfo'", "]", "and", "cpd", "[", "'objectinfo'", "]", "[", "'bvcolor'", "]", "is", "not", "None", ")", "else", "npnan", ")", ",", "(", "cpd", "[", "'objectinfo'", "]", "[", "'vkcolor'", "]", "if", "(", "'vkcolor'", "in", "cpd", "[", "'objectinfo'", "]", "and", "cpd", "[", "'objectinfo'", "]", "[", "'vkcolor'", "]", "is", "not", "None", ")", "else", "npnan", ")", ")", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "objinfodraw", ".", "text", "(", "(", "900", ",", "300", ")", ",", "(", "'i - J: %.3f, g - K: %.3f'", "%", "(", "(", "cpd", "[", "'objectinfo'", "]", "[", "'ijcolor'", "]", "if", "(", "'ijcolor'", "in", "cpd", "[", "'objectinfo'", "]", "and", "cpd", "[", "'objectinfo'", "]", "[", "'ijcolor'", "]", "is", "not", "None", ")", "else", "npnan", ")", ",", "(", "cpd", "[", "'objectinfo'", "]", "[", "'gkcolor'", "]", "if", "(", "'gkcolor'", "in", "cpd", "[", "'objectinfo'", "]", "and", "cpd", "[", "'objectinfo'", "]", "[", "'gkcolor'", "]", "is", "not", "None", ")", "else", "npnan", ")", ")", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "objinfodraw", ".", "text", "(", "(", "900", ",", "325", ")", ",", "(", "'J - K: %.3f'", "%", "(", "(", "cpd", "[", "'objectinfo'", "]", "[", "'jkcolor'", "]", "if", "(", "'jkcolor'", "in", "cpd", "[", "'objectinfo'", "]", "and", "cpd", "[", "'objectinfo'", "]", "[", "'jkcolor'", "]", "is", "not", "None", ")", "else", "npnan", ")", ",", ")", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "#", "# rest of the object information", "#", "# color classification", "if", "(", "'color_classes'", "in", "cpd", "[", "'objectinfo'", "]", "and", "cpd", "[", "'objectinfo'", "]", "[", "'color_classes'", "]", ")", ":", "objinfodraw", ".", "text", "(", "(", "625", ",", "375", ")", ",", "(", "'star classification by color: %s'", "%", "(", "', '", ".", "join", "(", "cpd", "[", "'objectinfo'", "]", "[", "'color_classes'", "]", ")", ")", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "# GAIA neighbors", "if", "(", "(", "'gaia_neighbors'", "in", "cpd", "[", "'objectinfo'", "]", ")", "and", "(", "cpd", "[", "'objectinfo'", "]", "[", "'gaia_neighbors'", "]", "is", "not", "None", ")", "and", "(", "np", ".", "isfinite", "(", "cpd", "[", "'objectinfo'", "]", "[", "'gaia_neighbors'", "]", ")", ")", "and", "(", "'searchradarcsec'", "in", "cpd", "[", "'objectinfo'", "]", ")", "and", "(", "cpd", "[", "'objectinfo'", "]", "[", "'searchradarcsec'", "]", ")", ")", ":", "objinfodraw", ".", "text", "(", "(", "625", ",", "400", ")", ",", "(", "'%s GAIA close neighbors within %.1f arcsec'", "%", "(", "cpd", "[", "'objectinfo'", "]", "[", "'gaia_neighbors'", "]", ",", "cpd", "[", "'objectinfo'", "]", "[", "'searchradarcsec'", "]", ")", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "# closest GAIA neighbor", "if", "(", "(", "'gaia_closest_distarcsec'", "in", "cpd", "[", "'objectinfo'", "]", ")", "and", "(", "cpd", "[", "'objectinfo'", "]", "[", "'gaia_closest_distarcsec'", "]", "is", "not", "None", ")", "and", "(", "np", ".", "isfinite", "(", "cpd", "[", "'objectinfo'", "]", "[", "'gaia_closest_distarcsec'", "]", ")", ")", "and", "(", "'gaia_closest_gmagdiff'", "in", "cpd", "[", "'objectinfo'", "]", ")", "and", "(", "cpd", "[", "'objectinfo'", "]", "[", "'gaia_closest_gmagdiff'", "]", "is", "not", "None", ")", "and", "(", "np", ".", "isfinite", "(", "cpd", "[", "'objectinfo'", "]", "[", "'gaia_closest_gmagdiff'", "]", ")", ")", ")", ":", "objinfodraw", ".", "text", "(", "(", "625", ",", "425", ")", ",", "(", "'closest GAIA neighbor is %.1f arcsec away, '", "'GAIA mag (obj-nbr): %.3f'", "%", "(", "cpd", "[", "'objectinfo'", "]", "[", "'gaia_closest_distarcsec'", "]", ",", "cpd", "[", "'objectinfo'", "]", "[", "'gaia_closest_gmagdiff'", "]", ")", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "# object tags", "if", "'objecttags'", "in", "cpd", "[", "'objectinfo'", "]", "and", "cpd", "[", "'objectinfo'", "]", "[", "'objecttags'", "]", ":", "objtagsplit", "=", "cpd", "[", "'objectinfo'", "]", "[", "'objecttags'", "]", ".", "split", "(", "','", ")", "# write three tags per line", "nobjtaglines", "=", "int", "(", "np", ".", "ceil", "(", "len", "(", "objtagsplit", ")", "/", "3.0", ")", ")", "for", "objtagline", "in", "range", "(", "nobjtaglines", ")", ":", "objtagslice", "=", "','", ".", "join", "(", "objtagsplit", "[", "objtagline", "*", "3", ":", "objtagline", "*", "3", "+", "3", "]", ")", "objinfodraw", ".", "text", "(", "(", "625", ",", "450", "+", "objtagline", "*", "25", ")", ",", "objtagslice", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "135", ",", "54", ",", "0", ",", "255", ")", ")", "################################################", "# row 1, cell 3: variability info and comments #", "################################################", "# objectisvar", "objisvar", "=", "cpd", "[", "'varinfo'", "]", "[", "'objectisvar'", "]", "if", "objisvar", "==", "'0'", ":", "objvarflag", "=", "'Variable star flag not set'", "elif", "objisvar", "==", "'1'", ":", "objvarflag", "=", "'Object is probably a variable star'", "elif", "objisvar", "==", "'2'", ":", "objvarflag", "=", "'Object is probably not a variable star'", "elif", "objisvar", "==", "'3'", ":", "objvarflag", "=", "'Not sure if this object is a variable star'", "elif", "objisvar", "is", "None", ":", "objvarflag", "=", "'Variable star flag not set'", "elif", "objisvar", "is", "True", ":", "objvarflag", "=", "'Object is probably a variable star'", "elif", "objisvar", "is", "False", ":", "objvarflag", "=", "'Object is probably not a variable star'", "else", ":", "objvarflag", "=", "'Variable star flag: %s'", "%", "objisvar", "objinfodraw", ".", "text", "(", "(", "1650", ",", "125", ")", ",", "objvarflag", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "# period", "objinfodraw", ".", "text", "(", "(", "1650", ",", "150", ")", ",", "(", "'Period [days]: %.6f'", "%", "(", "cpd", "[", "'varinfo'", "]", "[", "'varperiod'", "]", "if", "cpd", "[", "'varinfo'", "]", "[", "'varperiod'", "]", "is", "not", "None", "else", "np", ".", "nan", ")", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "# epoch", "objinfodraw", ".", "text", "(", "(", "1650", ",", "175", ")", ",", "(", "'Epoch [JD]: %.6f'", "%", "(", "cpd", "[", "'varinfo'", "]", "[", "'varepoch'", "]", "if", "cpd", "[", "'varinfo'", "]", "[", "'varepoch'", "]", "is", "not", "None", "else", "np", ".", "nan", ")", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "# variability tags", "if", "cpd", "[", "'varinfo'", "]", "[", "'vartags'", "]", ":", "vartagsplit", "=", "cpd", "[", "'varinfo'", "]", "[", "'vartags'", "]", ".", "split", "(", "','", ")", "# write three tags per line", "nvartaglines", "=", "int", "(", "np", ".", "ceil", "(", "len", "(", "vartagsplit", ")", "/", "3.0", ")", ")", "for", "vartagline", "in", "range", "(", "nvartaglines", ")", ":", "vartagslice", "=", "','", ".", "join", "(", "vartagsplit", "[", "vartagline", "*", "3", ":", "vartagline", "*", "3", "+", "3", "]", ")", "objinfodraw", ".", "text", "(", "(", "1650", ",", "225", "+", "vartagline", "*", "25", ")", ",", "vartagslice", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "135", ",", "54", ",", "0", ",", "255", ")", ")", "# object comments", "if", "'comments'", "in", "cpd", "and", "cpd", "[", "'comments'", "]", ":", "commentsplit", "=", "cpd", "[", "'comments'", "]", ".", "split", "(", "' '", ")", "# write 10 words per line", "ncommentlines", "=", "int", "(", "np", ".", "ceil", "(", "len", "(", "commentsplit", ")", "/", "10.0", ")", ")", "for", "commentline", "in", "range", "(", "ncommentlines", ")", ":", "commentslice", "=", "' '", ".", "join", "(", "commentsplit", "[", "commentline", "*", "10", ":", "commentline", "*", "10", "+", "10", "]", ")", "objinfodraw", ".", "text", "(", "(", "1650", ",", "325", "+", "commentline", "*", "25", ")", ",", "commentslice", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "# this handles JSON-ified checkplots returned by LCC server", "elif", "'objectcomments'", "in", "cpd", "and", "cpd", "[", "'objectcomments'", "]", ":", "commentsplit", "=", "cpd", "[", "'objectcomments'", "]", ".", "split", "(", "' '", ")", "# write 10 words per line", "ncommentlines", "=", "int", "(", "np", ".", "ceil", "(", "len", "(", "commentsplit", ")", "/", "10.0", ")", ")", "for", "commentline", "in", "range", "(", "ncommentlines", ")", ":", "commentslice", "=", "' '", ".", "join", "(", "commentsplit", "[", "commentline", "*", "10", ":", "commentline", "*", "10", "+", "10", "]", ")", "objinfodraw", ".", "text", "(", "(", "1650", ",", "325", "+", "commentline", "*", "25", ")", ",", "commentslice", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "0", ",", "255", ")", ")", "#######################################", "# row 1, cell 4: unphased light curve #", "#######################################", "if", "(", "cpd", "[", "'magseries'", "]", "and", "'plot'", "in", "cpd", "[", "'magseries'", "]", "and", "cpd", "[", "'magseries'", "]", "[", "'plot'", "]", ")", ":", "magseries", "=", "Image", ".", "open", "(", "_base64_to_file", "(", "cpd", "[", "'magseries'", "]", "[", "'plot'", "]", ",", "None", ",", "writetostrio", "=", "True", ")", ")", "outimg", ".", "paste", "(", "magseries", ",", "(", "750", "*", "3", ",", "0", ")", ")", "# this handles JSON-ified checkplots from LCC server", "elif", "(", "'magseries'", "in", "cpd", "and", "isinstance", "(", "cpd", "[", "'magseries'", "]", ",", "str", ")", ")", ":", "magseries", "=", "Image", ".", "open", "(", "_base64_to_file", "(", "cpd", "[", "'magseries'", "]", ",", "None", ",", "writetostrio", "=", "True", ")", ")", "outimg", ".", "paste", "(", "magseries", ",", "(", "750", "*", "3", ",", "0", ")", ")", "###############################", "# the rest of the rows in cpd #", "###############################", "for", "lspmethodind", ",", "lspmethod", "in", "enumerate", "(", "cplspmethods", ")", ":", "###############################", "# the periodogram comes first #", "###############################", "if", "(", "cpd", "[", "lspmethod", "]", "and", "cpd", "[", "lspmethod", "]", "[", "'periodogram'", "]", ")", ":", "pgram", "=", "Image", ".", "open", "(", "_base64_to_file", "(", "cpd", "[", "lspmethod", "]", "[", "'periodogram'", "]", ",", "None", ",", "writetostrio", "=", "True", ")", ")", "outimg", ".", "paste", "(", "pgram", ",", "(", "0", ",", "480", "+", "480", "*", "lspmethodind", ")", ")", "#############################", "# best phased LC comes next #", "#############################", "if", "(", "cpd", "[", "lspmethod", "]", "and", "0", "in", "cpd", "[", "lspmethod", "]", "and", "cpd", "[", "lspmethod", "]", "[", "0", "]", ")", ":", "plc1", "=", "Image", ".", "open", "(", "_base64_to_file", "(", "cpd", "[", "lspmethod", "]", "[", "0", "]", "[", "'plot'", "]", ",", "None", ",", "writetostrio", "=", "True", ")", ")", "outimg", ".", "paste", "(", "plc1", ",", "(", "750", ",", "480", "+", "480", "*", "lspmethodind", ")", ")", "# this handles JSON-ified checkplots from LCC server", "elif", "(", "cpd", "[", "lspmethod", "]", "and", "'phasedlc0'", "in", "cpd", "[", "lspmethod", "]", "and", "isinstance", "(", "cpd", "[", "lspmethod", "]", "[", "'phasedlc0'", "]", "[", "'plot'", "]", ",", "str", ")", ")", ":", "plc1", "=", "Image", ".", "open", "(", "_base64_to_file", "(", "cpd", "[", "lspmethod", "]", "[", "'phasedlc0'", "]", "[", "'plot'", "]", ",", "None", ",", "writetostrio", "=", "True", ")", ")", "outimg", ".", "paste", "(", "plc1", ",", "(", "750", ",", "480", "+", "480", "*", "lspmethodind", ")", ")", "#################################", "# 2nd best phased LC comes next #", "#################################", "if", "(", "cpd", "[", "lspmethod", "]", "and", "1", "in", "cpd", "[", "lspmethod", "]", "and", "cpd", "[", "lspmethod", "]", "[", "1", "]", ")", ":", "plc2", "=", "Image", ".", "open", "(", "_base64_to_file", "(", "cpd", "[", "lspmethod", "]", "[", "1", "]", "[", "'plot'", "]", ",", "None", ",", "writetostrio", "=", "True", ")", ")", "outimg", ".", "paste", "(", "plc2", ",", "(", "750", "*", "2", ",", "480", "+", "480", "*", "lspmethodind", ")", ")", "# this handles JSON-ified checkplots from LCC server", "elif", "(", "cpd", "[", "lspmethod", "]", "and", "'phasedlc1'", "in", "cpd", "[", "lspmethod", "]", "and", "isinstance", "(", "cpd", "[", "lspmethod", "]", "[", "'phasedlc1'", "]", "[", "'plot'", "]", ",", "str", ")", ")", ":", "plc2", "=", "Image", ".", "open", "(", "_base64_to_file", "(", "cpd", "[", "lspmethod", "]", "[", "'phasedlc1'", "]", "[", "'plot'", "]", ",", "None", ",", "writetostrio", "=", "True", ")", ")", "outimg", ".", "paste", "(", "plc2", ",", "(", "750", "*", "2", ",", "480", "+", "480", "*", "lspmethodind", ")", ")", "#################################", "# 3rd best phased LC comes next #", "#################################", "if", "(", "cpd", "[", "lspmethod", "]", "and", "2", "in", "cpd", "[", "lspmethod", "]", "and", "cpd", "[", "lspmethod", "]", "[", "2", "]", ")", ":", "plc3", "=", "Image", ".", "open", "(", "_base64_to_file", "(", "cpd", "[", "lspmethod", "]", "[", "2", "]", "[", "'plot'", "]", ",", "None", ",", "writetostrio", "=", "True", ")", ")", "outimg", ".", "paste", "(", "plc3", ",", "(", "750", "*", "3", ",", "480", "+", "480", "*", "lspmethodind", ")", ")", "# this handles JSON-ified checkplots from LCC server", "elif", "(", "cpd", "[", "lspmethod", "]", "and", "'phasedlc2'", "in", "cpd", "[", "lspmethod", "]", "and", "isinstance", "(", "cpd", "[", "lspmethod", "]", "[", "'phasedlc2'", "]", "[", "'plot'", "]", ",", "str", ")", ")", ":", "plc3", "=", "Image", ".", "open", "(", "_base64_to_file", "(", "cpd", "[", "lspmethod", "]", "[", "'phasedlc2'", "]", "[", "'plot'", "]", ",", "None", ",", "writetostrio", "=", "True", ")", ")", "outimg", ".", "paste", "(", "plc3", ",", "(", "750", "*", "3", ",", "480", "+", "480", "*", "lspmethodind", ")", ")", "################################", "## ALL DONE WITH BUILDING PNG ##", "################################", "#########################", "# add in any extra rows #", "#########################", "# from the keyword arguments", "if", "erows", ">", "0", ":", "for", "erowind", ",", "erow", "in", "enumerate", "(", "extrarows", ")", ":", "# make sure we never go above 4 plots in a row", "for", "ecolind", ",", "ecol", "in", "enumerate", "(", "erow", "[", ":", "4", "]", ")", ":", "eplot", "=", "Image", ".", "open", "(", "ecol", ")", "eplotresized", "=", "eplot", ".", "resize", "(", "(", "750", ",", "480", ")", ",", "Image", ".", "ANTIALIAS", ")", "outimg", ".", "paste", "(", "eplotresized", ",", "(", "750", "*", "ecolind", ",", "(", "cprows", "+", "1", ")", "*", "480", "+", "480", "*", "erowind", ")", ")", "# from the checkplotdict", "if", "cpderows", ">", "0", ":", "for", "cpderowind", ",", "cpderow", "in", "enumerate", "(", "cpd", "[", "'externalplots'", "]", ")", ":", "# make sure we never go above 4 plots in a row", "for", "cpdecolind", ",", "cpdecol", "in", "enumerate", "(", "cpderow", "[", ":", "4", "]", ")", ":", "cpdeplot", "=", "Image", ".", "open", "(", "cpdecol", ")", "cpdeplotresized", "=", "cpdeplot", ".", "resize", "(", "(", "750", ",", "480", ")", ",", "Image", ".", "ANTIALIAS", ")", "outimg", ".", "paste", "(", "cpdeplotresized", ",", "(", "750", "*", "cpdecolind", ",", "(", "cprows", "+", "1", ")", "*", "480", "+", "(", "erows", "*", "480", ")", "+", "480", "*", "cpderowind", ")", ")", "# from neighbors:", "if", "nbrrows", ">", "0", ":", "# we have four tiles", "# tile 1: neighbor objectid, ra, decl, distance, unphased LC", "# tile 2: phased LC for gls", "# tile 3: phased LC for pdm", "# tile 4: phased LC for any other period finding method", "# the priority is like so: ['bls','mav','aov','win']", "for", "nbrind", ",", "nbr", "in", "enumerate", "(", "cpd", "[", "'neighbors'", "]", ")", ":", "# figure out which period finding methods are available for this", "# neighbor. make sure to match the ones from the actual object in", "# order of priority: 'gls','pdm','bls','aov','mav','acf','win'", "nbrlspmethods", "=", "[", "]", "for", "lspmethod", "in", "cpd", "[", "'pfmethods'", "]", ":", "if", "lspmethod", "in", "nbr", ":", "nbrlspmethods", ".", "append", "(", "lspmethod", ")", "# restrict to top three in priority", "nbrlspmethods", "=", "nbrlspmethods", "[", ":", "3", "]", "try", ":", "# first panel: neighbor objectid, ra, decl, distance, unphased", "# LC", "nbrlc", "=", "Image", ".", "open", "(", "_base64_to_file", "(", "nbr", "[", "'magseries'", "]", "[", "'plot'", "]", ",", "None", ",", "writetostrio", "=", "True", ")", ")", "outimg", ".", "paste", "(", "nbrlc", ",", "(", "750", "*", "0", ",", "(", "cprows", "+", "1", ")", "*", "480", "+", "(", "erows", "*", "480", ")", "+", "(", "cpderows", "*", "480", ")", "+", "480", "*", "nbrind", ")", ")", "# overlay the objectinfo", "objinfodraw", ".", "text", "(", "(", "98", ",", "(", "cprows", "+", "1", ")", "*", "480", "+", "(", "erows", "*", "480", ")", "+", "(", "cpderows", "*", "480", ")", "+", "480", "*", "nbrind", "+", "15", ")", ",", "(", "'N%s: %s'", "%", "(", "nbrind", "+", "1", ",", "nbr", "[", "'objectid'", "]", ")", ")", ",", "font", "=", "cpfontlarge", ",", "fill", "=", "(", "0", ",", "0", ",", "255", ",", "255", ")", ")", "# overlay the objectinfo", "objinfodraw", ".", "text", "(", "(", "98", ",", "(", "cprows", "+", "1", ")", "*", "480", "+", "(", "erows", "*", "480", ")", "+", "(", "cpderows", "*", "480", ")", "+", "480", "*", "nbrind", "+", "50", ")", ",", "(", "'(RA, DEC) = (%.3f, %.3f), distance: %.1f arcsec'", "%", "(", "nbr", "[", "'ra'", "]", ",", "nbr", "[", "'decl'", "]", ",", "nbr", "[", "'dist'", "]", ")", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "255", ",", "255", ")", ")", "# second panel: phased LC for gls", "lsp1lc", "=", "Image", ".", "open", "(", "_base64_to_file", "(", "nbr", "[", "nbrlspmethods", "[", "0", "]", "]", "[", "0", "]", "[", "'plot'", "]", ",", "None", ",", "writetostrio", "=", "True", ")", ")", "outimg", ".", "paste", "(", "lsp1lc", ",", "(", "750", "*", "1", ",", "(", "cprows", "+", "1", ")", "*", "480", "+", "(", "erows", "*", "480", ")", "+", "(", "cpderows", "*", "480", ")", "+", "480", "*", "nbrind", ")", ")", "# second panel: phased LC for gls", "lsp2lc", "=", "Image", ".", "open", "(", "_base64_to_file", "(", "nbr", "[", "nbrlspmethods", "[", "1", "]", "]", "[", "0", "]", "[", "'plot'", "]", ",", "None", ",", "writetostrio", "=", "True", ")", ")", "outimg", ".", "paste", "(", "lsp2lc", ",", "(", "750", "*", "2", ",", "(", "cprows", "+", "1", ")", "*", "480", "+", "(", "erows", "*", "480", ")", "+", "(", "cpderows", "*", "480", ")", "+", "480", "*", "nbrind", ")", ")", "# second panel: phased LC for gls", "lsp3lc", "=", "Image", ".", "open", "(", "_base64_to_file", "(", "nbr", "[", "nbrlspmethods", "[", "2", "]", "]", "[", "0", "]", "[", "'plot'", "]", ",", "None", ",", "writetostrio", "=", "True", ")", ")", "outimg", ".", "paste", "(", "lsp3lc", ",", "(", "750", "*", "3", ",", "(", "cprows", "+", "1", ")", "*", "480", "+", "(", "erows", "*", "480", ")", "+", "(", "cpderows", "*", "480", ")", "+", "480", "*", "nbrind", ")", ")", "except", "Exception", "as", "e", ":", "LOGERROR", "(", "'neighbor %s does not have a magseries plot, '", "'measurements are probably all nan'", "%", "nbr", "[", "'objectid'", "]", ")", "# overlay the objectinfo", "objinfodraw", ".", "text", "(", "(", "98", ",", "(", "cprows", "+", "1", ")", "*", "480", "+", "(", "erows", "*", "480", ")", "+", "(", "cpderows", "*", "480", ")", "+", "480", "*", "nbrind", "+", "15", ")", ",", "(", "'N%s: %s'", "%", "(", "nbrind", "+", "1", ",", "nbr", "[", "'objectid'", "]", ")", ")", ",", "font", "=", "cpfontlarge", ",", "fill", "=", "(", "0", ",", "0", ",", "255", ",", "255", ")", ")", "if", "'ra'", "in", "nbr", "and", "'decl'", "in", "nbr", "and", "'dist'", "in", "nbr", ":", "# overlay the objectinfo", "objinfodraw", ".", "text", "(", "(", "98", ",", "(", "cprows", "+", "1", ")", "*", "480", "+", "(", "erows", "*", "480", ")", "+", "(", "cpderows", "*", "480", ")", "+", "480", "*", "nbrind", "+", "50", ")", ",", "(", "'(RA, DEC) = (%.3f, %.3f), distance: %.1f arcsec'", "%", "(", "nbr", "[", "'ra'", "]", ",", "nbr", "[", "'decl'", "]", ",", "nbr", "[", "'dist'", "]", ")", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "255", ",", "255", ")", ")", "elif", "'objectinfo'", "in", "nbr", ":", "# overlay the objectinfo", "objinfodraw", ".", "text", "(", "(", "98", ",", "(", "cprows", "+", "1", ")", "*", "480", "+", "(", "erows", "*", "480", ")", "+", "(", "cpderows", "*", "480", ")", "+", "480", "*", "nbrind", "+", "50", ")", ",", "(", "'(RA, DEC) = (%.3f, %.3f), distance: %.1f arcsec'", "%", "(", "nbr", "[", "'objectinfo'", "]", "[", "'ra'", "]", ",", "nbr", "[", "'objectinfo'", "]", "[", "'decl'", "]", ",", "nbr", "[", "'objectinfo'", "]", "[", "'distarcsec'", "]", ")", ")", ",", "font", "=", "cpfontnormal", ",", "fill", "=", "(", "0", ",", "0", ",", "255", ",", "255", ")", ")", "#####################", "## WRITE FINAL PNG ##", "#####################", "# check if the output filename is actually an instance of StringIO", "if", "sys", ".", "version_info", "[", ":", "2", "]", "<", "(", "3", ",", "0", ")", ":", "is_strio", "=", "isinstance", "(", "outfile", ",", "cStringIO", ".", "InputType", ")", "else", ":", "is_strio", "=", "isinstance", "(", "outfile", ",", "StrIO", ")", "if", "not", "is_strio", ":", "# check if we've stupidly copied over the same filename as the input", "# pickle to expected output file", "if", "outfile", ".", "endswith", "(", "'pkl'", ")", ":", "LOGWARNING", "(", "'expected output PNG filename ends with .pkl, '", "'changed to .png'", ")", "outfile", "=", "outfile", ".", "replace", "(", "'.pkl'", ",", "'.png'", ")", "outimg", ".", "save", "(", "outfile", ",", "format", "=", "'PNG'", ",", "optimize", "=", "True", ")", "if", "not", "is_strio", ":", "if", "os", ".", "path", ".", "exists", "(", "outfile", ")", ":", "LOGINFO", "(", "'checkplot pickle -> checkplot PNG: %s OK'", "%", "outfile", ")", "return", "outfile", "else", ":", "LOGERROR", "(", "'failed to write checkplot PNG'", ")", "return", "None", "else", ":", "LOGINFO", "(", "'checkplot pickle -> StringIO instance OK'", ")", "return", "outfile" ]
This reads the checkplot pickle or dict provided, and writes out a PNG. The output PNG contains most of the information in the input checkplot pickle/dict, and can be used to quickly glance through the highlights instead of having to review the checkplot with the `checkplotserver` webapp. This is useful for exporting read-only views of finalized checkplots from the `checkplotserver` as well, to share them with other people. The PNG has 4 x N tiles:: [ finder ] [ objectinfo ] [ varinfo/comments ] [ unphased LC ] [ periodogram1 ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ] [ periodogram2 ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ] . . [ periodogramN ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ] for N independent period-finding methods producing: - periodogram1,2,3...N: the periodograms from each method - phased LC P1,P2,P3: the phased lightcurves using the best 3 peaks in each periodogram Parameters ---------- checkplotin : dict or str This is either a checkplotdict produced by :py:func:`astrobase.checkplot.pkl.checkplot_dict` or a checkplot pickle file produced by :py:func:`astrobase.checkplot.pkl.checkplot_pickle`. outfile : str The filename of the output PNG file to create. extrarows : list of tuples This is a list of 4-element tuples containing paths to PNG files that will be added to the end of the rows generated from the checkplotin pickle/dict. Each tuple represents a row in the final output PNG file. If there are less than 4 elements per tuple, the missing elements will be filled in with white-space. If there are more than 4 elements per tuple, only the first four will be used. The purpose of this kwarg is to incorporate periodograms and phased LC plots (in the form of PNGs) generated from an external period-finding function or program (like VARTOOLS) to allow for comparison with astrobase results. NOTE: the PNG files specified in `extrarows` here will be added to those already present in the input checkplotdict['externalplots'] if that is None because you passed in a similar list of external plots to the :py:func:`astrobase.checkplot.pkl.checkplot_pickle` function earlier. In this case, `extrarows` can be used to add even more external plots if desired. Each external plot PNG will be resized to 750 x 480 pixels to fit into an output image cell. By convention, each 4-element tuple should contain: - a periodiogram PNG - phased LC PNG with 1st best peak period from periodogram - phased LC PNG with 2nd best peak period from periodogram - phased LC PNG with 3rd best peak period from periodogram Example of extrarows:: [('/path/to/external/bls-periodogram.png', '/path/to/external/bls-phasedlc-plot-bestpeak.png', '/path/to/external/bls-phasedlc-plot-peak2.png', '/path/to/external/bls-phasedlc-plot-peak3.png'), ('/path/to/external/pdm-periodogram.png', '/path/to/external/pdm-phasedlc-plot-bestpeak.png', '/path/to/external/pdm-phasedlc-plot-peak2.png', '/path/to/external/pdm-phasedlc-plot-peak3.png'), ...] Returns ------- str The absolute path to the generated checkplot PNG.
[ "This", "reads", "the", "checkplot", "pickle", "or", "dict", "provided", "and", "writes", "out", "a", "PNG", "." ]
python
valid
cons3rt/pycons3rt
pycons3rt/awsapi/metadata.py
https://github.com/cons3rt/pycons3rt/blob/f004ab3a35c5bff2f698131fef3b2a8ed5a7596d/pycons3rt/awsapi/metadata.py#L44-L79
def is_aws(): """Determines if this system is on AWS :return: bool True if this system is running on AWS """ log = logging.getLogger(mod_logger + '.is_aws') log.info('Querying AWS meta data URL: {u}'.format(u=metadata_url)) # Re-try logic for checking the AWS meta data URL retry_time_sec = 10 max_num_tries = 10 attempt_num = 1 while True: if attempt_num > max_num_tries: log.info('Unable to query the AWS meta data URL, this system is NOT running on AWS\n{e}') return False # Query the AWS meta data URL try: response = urllib.urlopen(metadata_url) except(IOError, OSError) as ex: log.warn('Failed to query the AWS meta data URL\n{e}'.format(e=str(ex))) attempt_num += 1 time.sleep(retry_time_sec) continue # Check the code if response.getcode() == 200: log.info('AWS metadata service returned code 200, this system is running on AWS') return True else: log.warn('AWS metadata service returned code: {c}'.format(c=response.getcode())) attempt_num += 1 time.sleep(retry_time_sec) continue
[ "def", "is_aws", "(", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "mod_logger", "+", "'.is_aws'", ")", "log", ".", "info", "(", "'Querying AWS meta data URL: {u}'", ".", "format", "(", "u", "=", "metadata_url", ")", ")", "# Re-try logic for checking the AWS meta data URL", "retry_time_sec", "=", "10", "max_num_tries", "=", "10", "attempt_num", "=", "1", "while", "True", ":", "if", "attempt_num", ">", "max_num_tries", ":", "log", ".", "info", "(", "'Unable to query the AWS meta data URL, this system is NOT running on AWS\\n{e}'", ")", "return", "False", "# Query the AWS meta data URL", "try", ":", "response", "=", "urllib", ".", "urlopen", "(", "metadata_url", ")", "except", "(", "IOError", ",", "OSError", ")", "as", "ex", ":", "log", ".", "warn", "(", "'Failed to query the AWS meta data URL\\n{e}'", ".", "format", "(", "e", "=", "str", "(", "ex", ")", ")", ")", "attempt_num", "+=", "1", "time", ".", "sleep", "(", "retry_time_sec", ")", "continue", "# Check the code", "if", "response", ".", "getcode", "(", ")", "==", "200", ":", "log", ".", "info", "(", "'AWS metadata service returned code 200, this system is running on AWS'", ")", "return", "True", "else", ":", "log", ".", "warn", "(", "'AWS metadata service returned code: {c}'", ".", "format", "(", "c", "=", "response", ".", "getcode", "(", ")", ")", ")", "attempt_num", "+=", "1", "time", ".", "sleep", "(", "retry_time_sec", ")", "continue" ]
Determines if this system is on AWS :return: bool True if this system is running on AWS
[ "Determines", "if", "this", "system", "is", "on", "AWS" ]
python
train
proycon/pynlpl
pynlpl/formats/folia.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L1877-L2047
def xml(self, attribs = None,elements = None, skipchildren = False): """Serialises the FoLiA element and all its contents to XML. Arguments are mostly for internal use. Returns: an lxml.etree.Element See also: :meth:`AbstractElement.xmlstring` - for direct string output """ E = ElementMaker(namespace=NSFOLIA,nsmap={None: NSFOLIA, 'xml' : "http://www.w3.org/XML/1998/namespace"}) if not attribs: attribs = {} if not elements: elements = [] if self.id: attribs['{http://www.w3.org/XML/1998/namespace}id'] = self.id #Some attributes only need to be added if they are not the same as what's already set in the declaration if not isinstance(self, AbstractAnnotationLayer): if '{' + NSFOLIA + '}set' not in attribs: #do not override if overloaded function already set it try: if self.set: if not self.ANNOTATIONTYPE in self.doc.annotationdefaults or len(self.doc.annotationdefaults[self.ANNOTATIONTYPE]) != 1 or list(self.doc.annotationdefaults[self.ANNOTATIONTYPE].keys())[0] != self.set: if self.set != None: if self.ANNOTATIONTYPE in self.doc.set_alias and self.set in self.doc.set_alias[self.ANNOTATIONTYPE]: attribs['{' + NSFOLIA + '}set'] = self.doc.set_alias[self.ANNOTATIONTYPE][self.set] #use alias instead else: attribs['{' + NSFOLIA + '}set'] = self.set except AttributeError: pass if '{' + NSFOLIA + '}class' not in attribs: #do not override if caller already set it try: if self.cls: attribs['{' + NSFOLIA + '}class'] = self.cls except AttributeError: pass if '{' + NSFOLIA + '}annotator' not in attribs: #do not override if caller already set it try: if self.annotator and ((not (self.ANNOTATIONTYPE in self.doc.annotationdefaults)) or (not ( 'annotator' in self.doc.annotationdefaults[self.ANNOTATIONTYPE][self.set])) or (self.annotator != self.doc.annotationdefaults[self.ANNOTATIONTYPE][self.set]['annotator'])): attribs['{' + NSFOLIA + '}annotator'] = self.annotator if self.annotatortype and ((not (self.ANNOTATIONTYPE in self.doc.annotationdefaults)) or (not ('annotatortype' in self.doc.annotationdefaults[self.ANNOTATIONTYPE][self.set])) or (self.annotatortype != self.doc.annotationdefaults[self.ANNOTATIONTYPE][self.set]['annotatortype'])): if self.annotatortype == AnnotatorType.AUTO: attribs['{' + NSFOLIA + '}annotatortype'] = 'auto' elif self.annotatortype == AnnotatorType.MANUAL: attribs['{' + NSFOLIA + '}annotatortype'] = 'manual' except AttributeError: pass if '{' + NSFOLIA + '}confidence' not in attribs: #do not override if caller already set it if self.confidence: attribs['{' + NSFOLIA + '}confidence'] = str(self.confidence) if '{' + NSFOLIA + '}n' not in attribs: #do not override if caller already set it if self.n: attribs['{' + NSFOLIA + '}n'] = str(self.n) if '{' + NSFOLIA + '}auth' not in attribs: #do not override if caller already set it try: if not self.AUTH or not self.auth: #(former is static, latter isn't) attribs['{' + NSFOLIA + '}auth'] = 'no' except AttributeError: pass if '{' + NSFOLIA + '}datetime' not in attribs: #do not override if caller already set it if self.datetime and ((not (self.ANNOTATIONTYPE in self.doc.annotationdefaults)) or (not ( 'datetime' in self.doc.annotationdefaults[self.ANNOTATIONTYPE][self.set])) or (self.datetime != self.doc.annotationdefaults[self.ANNOTATIONTYPE][self.set]['datetime'])): attribs['{' + NSFOLIA + '}datetime'] = self.datetime.strftime("%Y-%m-%dT%H:%M:%S") if '{' + NSFOLIA + '}src' not in attribs: #do not override if caller already set it if self.src: attribs['{' + NSFOLIA + '}src'] = self.src if '{' + NSFOLIA + '}speaker' not in attribs: #do not override if caller already set it if self.speaker: attribs['{' + NSFOLIA + '}speaker'] = self.speaker if '{' + NSFOLIA + '}begintime' not in attribs: #do not override if caller already set it if self.begintime: attribs['{' + NSFOLIA + '}begintime'] = "%02d:%02d:%02d.%03d" % self.begintime if '{' + NSFOLIA + '}endtime' not in attribs: #do not override if caller already set it if self.endtime: attribs['{' + NSFOLIA + '}endtime'] = "%02d:%02d:%02d.%03d" % self.endtime if '{' + NSFOLIA + '}textclass' not in attribs: #do not override if caller already set it if self.textclass and self.textclass != "current": attribs['{' + NSFOLIA + '}textclass'] = self.textclass if '{' + NSFOLIA + '}metadata' not in attribs: #do not override if caller already set it if self.metadata: attribs['{' + NSFOLIA + '}metadata'] = self.metadata if self.XLINK: if self.href: attribs['{http://www.w3.org/1999/xlink}href'] = self.href if not self.xlinktype: attribs['{http://www.w3.org/1999/xlink}type'] = "simple" if self.xlinktype: attribs['{http://www.w3.org/1999/xlink}type'] = self.xlinktype if self.xlinklabel: attribs['{http://www.w3.org/1999/xlink}label'] = self.xlinklabel if self.xlinkrole: attribs['{http://www.w3.org/1999/xlink}role'] = self.xlinkrole if self.xlinkshow: attribs['{http://www.w3.org/1999/xlink}show'] = self.xlinkshow if self.xlinktitle: attribs['{http://www.w3.org/1999/xlink}title'] = self.xlinktitle omitchildren = [] #Are there predetermined Features in ACCEPTED_DATA? for c in self.ACCEPTED_DATA: if issubclass(c, Feature) and c.SUBSET: #Do we have any of those? for c2 in self.data: if c2.__class__ is c and c.SUBSET == c2.SUBSET and c2.cls: #Yes, serialize them as attributes attribs[c2.SUBSET] = c2.cls omitchildren.append(c2) #and skip them as elements break #only one e = makeelement(E, '{' + NSFOLIA + '}' + self.XMLTAG, **attribs) if not skipchildren and self.data: #append children, # we want make sure that text elements are in the right order, 'current' class first # so we first put them in a list textelements = [] otherelements = [] for child in self: if isinstance(child, TextContent): if child.cls == 'current': textelements.insert(0, child) else: textelements.append(child) elif not child in omitchildren: otherelements.append(child) for child in textelements+otherelements: if (self.TEXTCONTAINER or self.PHONCONTAINER) and isstring(child): if len(e) == 0: if e.text: e.text += child else: e.text = child else: #add to tail of last child if e[-1].tail: e[-1].tail += child else: e[-1].tail = child else: xml = child.xml() #may return None in rare occassions, meaning we wan to skip if not xml is None: e.append(xml) if elements: #extra elements for e2 in elements: if isinstance(e2, str) or (sys.version < '3' and isinstance(e2, unicode)): if e.text is None: e.text = e2 else: e.text += e2 else: e.append(e2) return e
[ "def", "xml", "(", "self", ",", "attribs", "=", "None", ",", "elements", "=", "None", ",", "skipchildren", "=", "False", ")", ":", "E", "=", "ElementMaker", "(", "namespace", "=", "NSFOLIA", ",", "nsmap", "=", "{", "None", ":", "NSFOLIA", ",", "'xml'", ":", "\"http://www.w3.org/XML/1998/namespace\"", "}", ")", "if", "not", "attribs", ":", "attribs", "=", "{", "}", "if", "not", "elements", ":", "elements", "=", "[", "]", "if", "self", ".", "id", ":", "attribs", "[", "'{http://www.w3.org/XML/1998/namespace}id'", "]", "=", "self", ".", "id", "#Some attributes only need to be added if they are not the same as what's already set in the declaration", "if", "not", "isinstance", "(", "self", ",", "AbstractAnnotationLayer", ")", ":", "if", "'{'", "+", "NSFOLIA", "+", "'}set'", "not", "in", "attribs", ":", "#do not override if overloaded function already set it", "try", ":", "if", "self", ".", "set", ":", "if", "not", "self", ".", "ANNOTATIONTYPE", "in", "self", ".", "doc", ".", "annotationdefaults", "or", "len", "(", "self", ".", "doc", ".", "annotationdefaults", "[", "self", ".", "ANNOTATIONTYPE", "]", ")", "!=", "1", "or", "list", "(", "self", ".", "doc", ".", "annotationdefaults", "[", "self", ".", "ANNOTATIONTYPE", "]", ".", "keys", "(", ")", ")", "[", "0", "]", "!=", "self", ".", "set", ":", "if", "self", ".", "set", "!=", "None", ":", "if", "self", ".", "ANNOTATIONTYPE", "in", "self", ".", "doc", ".", "set_alias", "and", "self", ".", "set", "in", "self", ".", "doc", ".", "set_alias", "[", "self", ".", "ANNOTATIONTYPE", "]", ":", "attribs", "[", "'{'", "+", "NSFOLIA", "+", "'}set'", "]", "=", "self", ".", "doc", ".", "set_alias", "[", "self", ".", "ANNOTATIONTYPE", "]", "[", "self", ".", "set", "]", "#use alias instead", "else", ":", "attribs", "[", "'{'", "+", "NSFOLIA", "+", "'}set'", "]", "=", "self", ".", "set", "except", "AttributeError", ":", "pass", "if", "'{'", "+", "NSFOLIA", "+", "'}class'", "not", "in", "attribs", ":", "#do not override if caller already set it", "try", ":", "if", "self", ".", "cls", ":", "attribs", "[", "'{'", "+", "NSFOLIA", "+", "'}class'", "]", "=", "self", ".", "cls", "except", "AttributeError", ":", "pass", "if", "'{'", "+", "NSFOLIA", "+", "'}annotator'", "not", "in", "attribs", ":", "#do not override if caller already set it", "try", ":", "if", "self", ".", "annotator", "and", "(", "(", "not", "(", "self", ".", "ANNOTATIONTYPE", "in", "self", ".", "doc", ".", "annotationdefaults", ")", ")", "or", "(", "not", "(", "'annotator'", "in", "self", ".", "doc", ".", "annotationdefaults", "[", "self", ".", "ANNOTATIONTYPE", "]", "[", "self", ".", "set", "]", ")", ")", "or", "(", "self", ".", "annotator", "!=", "self", ".", "doc", ".", "annotationdefaults", "[", "self", ".", "ANNOTATIONTYPE", "]", "[", "self", ".", "set", "]", "[", "'annotator'", "]", ")", ")", ":", "attribs", "[", "'{'", "+", "NSFOLIA", "+", "'}annotator'", "]", "=", "self", ".", "annotator", "if", "self", ".", "annotatortype", "and", "(", "(", "not", "(", "self", ".", "ANNOTATIONTYPE", "in", "self", ".", "doc", ".", "annotationdefaults", ")", ")", "or", "(", "not", "(", "'annotatortype'", "in", "self", ".", "doc", ".", "annotationdefaults", "[", "self", ".", "ANNOTATIONTYPE", "]", "[", "self", ".", "set", "]", ")", ")", "or", "(", "self", ".", "annotatortype", "!=", "self", ".", "doc", ".", "annotationdefaults", "[", "self", ".", "ANNOTATIONTYPE", "]", "[", "self", ".", "set", "]", "[", "'annotatortype'", "]", ")", ")", ":", "if", "self", ".", "annotatortype", "==", "AnnotatorType", ".", "AUTO", ":", "attribs", "[", "'{'", "+", "NSFOLIA", "+", "'}annotatortype'", "]", "=", "'auto'", "elif", "self", ".", "annotatortype", "==", "AnnotatorType", ".", "MANUAL", ":", "attribs", "[", "'{'", "+", "NSFOLIA", "+", "'}annotatortype'", "]", "=", "'manual'", "except", "AttributeError", ":", "pass", "if", "'{'", "+", "NSFOLIA", "+", "'}confidence'", "not", "in", "attribs", ":", "#do not override if caller already set it", "if", "self", ".", "confidence", ":", "attribs", "[", "'{'", "+", "NSFOLIA", "+", "'}confidence'", "]", "=", "str", "(", "self", ".", "confidence", ")", "if", "'{'", "+", "NSFOLIA", "+", "'}n'", "not", "in", "attribs", ":", "#do not override if caller already set it", "if", "self", ".", "n", ":", "attribs", "[", "'{'", "+", "NSFOLIA", "+", "'}n'", "]", "=", "str", "(", "self", ".", "n", ")", "if", "'{'", "+", "NSFOLIA", "+", "'}auth'", "not", "in", "attribs", ":", "#do not override if caller already set it", "try", ":", "if", "not", "self", ".", "AUTH", "or", "not", "self", ".", "auth", ":", "#(former is static, latter isn't)", "attribs", "[", "'{'", "+", "NSFOLIA", "+", "'}auth'", "]", "=", "'no'", "except", "AttributeError", ":", "pass", "if", "'{'", "+", "NSFOLIA", "+", "'}datetime'", "not", "in", "attribs", ":", "#do not override if caller already set it", "if", "self", ".", "datetime", "and", "(", "(", "not", "(", "self", ".", "ANNOTATIONTYPE", "in", "self", ".", "doc", ".", "annotationdefaults", ")", ")", "or", "(", "not", "(", "'datetime'", "in", "self", ".", "doc", ".", "annotationdefaults", "[", "self", ".", "ANNOTATIONTYPE", "]", "[", "self", ".", "set", "]", ")", ")", "or", "(", "self", ".", "datetime", "!=", "self", ".", "doc", ".", "annotationdefaults", "[", "self", ".", "ANNOTATIONTYPE", "]", "[", "self", ".", "set", "]", "[", "'datetime'", "]", ")", ")", ":", "attribs", "[", "'{'", "+", "NSFOLIA", "+", "'}datetime'", "]", "=", "self", ".", "datetime", ".", "strftime", "(", "\"%Y-%m-%dT%H:%M:%S\"", ")", "if", "'{'", "+", "NSFOLIA", "+", "'}src'", "not", "in", "attribs", ":", "#do not override if caller already set it", "if", "self", ".", "src", ":", "attribs", "[", "'{'", "+", "NSFOLIA", "+", "'}src'", "]", "=", "self", ".", "src", "if", "'{'", "+", "NSFOLIA", "+", "'}speaker'", "not", "in", "attribs", ":", "#do not override if caller already set it", "if", "self", ".", "speaker", ":", "attribs", "[", "'{'", "+", "NSFOLIA", "+", "'}speaker'", "]", "=", "self", ".", "speaker", "if", "'{'", "+", "NSFOLIA", "+", "'}begintime'", "not", "in", "attribs", ":", "#do not override if caller already set it", "if", "self", ".", "begintime", ":", "attribs", "[", "'{'", "+", "NSFOLIA", "+", "'}begintime'", "]", "=", "\"%02d:%02d:%02d.%03d\"", "%", "self", ".", "begintime", "if", "'{'", "+", "NSFOLIA", "+", "'}endtime'", "not", "in", "attribs", ":", "#do not override if caller already set it", "if", "self", ".", "endtime", ":", "attribs", "[", "'{'", "+", "NSFOLIA", "+", "'}endtime'", "]", "=", "\"%02d:%02d:%02d.%03d\"", "%", "self", ".", "endtime", "if", "'{'", "+", "NSFOLIA", "+", "'}textclass'", "not", "in", "attribs", ":", "#do not override if caller already set it", "if", "self", ".", "textclass", "and", "self", ".", "textclass", "!=", "\"current\"", ":", "attribs", "[", "'{'", "+", "NSFOLIA", "+", "'}textclass'", "]", "=", "self", ".", "textclass", "if", "'{'", "+", "NSFOLIA", "+", "'}metadata'", "not", "in", "attribs", ":", "#do not override if caller already set it", "if", "self", ".", "metadata", ":", "attribs", "[", "'{'", "+", "NSFOLIA", "+", "'}metadata'", "]", "=", "self", ".", "metadata", "if", "self", ".", "XLINK", ":", "if", "self", ".", "href", ":", "attribs", "[", "'{http://www.w3.org/1999/xlink}href'", "]", "=", "self", ".", "href", "if", "not", "self", ".", "xlinktype", ":", "attribs", "[", "'{http://www.w3.org/1999/xlink}type'", "]", "=", "\"simple\"", "if", "self", ".", "xlinktype", ":", "attribs", "[", "'{http://www.w3.org/1999/xlink}type'", "]", "=", "self", ".", "xlinktype", "if", "self", ".", "xlinklabel", ":", "attribs", "[", "'{http://www.w3.org/1999/xlink}label'", "]", "=", "self", ".", "xlinklabel", "if", "self", ".", "xlinkrole", ":", "attribs", "[", "'{http://www.w3.org/1999/xlink}role'", "]", "=", "self", ".", "xlinkrole", "if", "self", ".", "xlinkshow", ":", "attribs", "[", "'{http://www.w3.org/1999/xlink}show'", "]", "=", "self", ".", "xlinkshow", "if", "self", ".", "xlinktitle", ":", "attribs", "[", "'{http://www.w3.org/1999/xlink}title'", "]", "=", "self", ".", "xlinktitle", "omitchildren", "=", "[", "]", "#Are there predetermined Features in ACCEPTED_DATA?", "for", "c", "in", "self", ".", "ACCEPTED_DATA", ":", "if", "issubclass", "(", "c", ",", "Feature", ")", "and", "c", ".", "SUBSET", ":", "#Do we have any of those?", "for", "c2", "in", "self", ".", "data", ":", "if", "c2", ".", "__class__", "is", "c", "and", "c", ".", "SUBSET", "==", "c2", ".", "SUBSET", "and", "c2", ".", "cls", ":", "#Yes, serialize them as attributes", "attribs", "[", "c2", ".", "SUBSET", "]", "=", "c2", ".", "cls", "omitchildren", ".", "append", "(", "c2", ")", "#and skip them as elements", "break", "#only one", "e", "=", "makeelement", "(", "E", ",", "'{'", "+", "NSFOLIA", "+", "'}'", "+", "self", ".", "XMLTAG", ",", "*", "*", "attribs", ")", "if", "not", "skipchildren", "and", "self", ".", "data", ":", "#append children,", "# we want make sure that text elements are in the right order, 'current' class first", "# so we first put them in a list", "textelements", "=", "[", "]", "otherelements", "=", "[", "]", "for", "child", "in", "self", ":", "if", "isinstance", "(", "child", ",", "TextContent", ")", ":", "if", "child", ".", "cls", "==", "'current'", ":", "textelements", ".", "insert", "(", "0", ",", "child", ")", "else", ":", "textelements", ".", "append", "(", "child", ")", "elif", "not", "child", "in", "omitchildren", ":", "otherelements", ".", "append", "(", "child", ")", "for", "child", "in", "textelements", "+", "otherelements", ":", "if", "(", "self", ".", "TEXTCONTAINER", "or", "self", ".", "PHONCONTAINER", ")", "and", "isstring", "(", "child", ")", ":", "if", "len", "(", "e", ")", "==", "0", ":", "if", "e", ".", "text", ":", "e", ".", "text", "+=", "child", "else", ":", "e", ".", "text", "=", "child", "else", ":", "#add to tail of last child", "if", "e", "[", "-", "1", "]", ".", "tail", ":", "e", "[", "-", "1", "]", ".", "tail", "+=", "child", "else", ":", "e", "[", "-", "1", "]", ".", "tail", "=", "child", "else", ":", "xml", "=", "child", ".", "xml", "(", ")", "#may return None in rare occassions, meaning we wan to skip", "if", "not", "xml", "is", "None", ":", "e", ".", "append", "(", "xml", ")", "if", "elements", ":", "#extra elements", "for", "e2", "in", "elements", ":", "if", "isinstance", "(", "e2", ",", "str", ")", "or", "(", "sys", ".", "version", "<", "'3'", "and", "isinstance", "(", "e2", ",", "unicode", ")", ")", ":", "if", "e", ".", "text", "is", "None", ":", "e", ".", "text", "=", "e2", "else", ":", "e", ".", "text", "+=", "e2", "else", ":", "e", ".", "append", "(", "e2", ")", "return", "e" ]
Serialises the FoLiA element and all its contents to XML. Arguments are mostly for internal use. Returns: an lxml.etree.Element See also: :meth:`AbstractElement.xmlstring` - for direct string output
[ "Serialises", "the", "FoLiA", "element", "and", "all", "its", "contents", "to", "XML", "." ]
python
train
gamechanger/dusty
dusty/compiler/compose/__init__.py
https://github.com/gamechanger/dusty/blob/dc12de90bb6945023d6f43a8071e984313a1d984/dusty/compiler/compose/__init__.py#L70-L75
def _get_build_path(app_spec): """ Given a spec for an app, returns the value of the `build` field for docker-compose. If the path is relative, it is expanded and added to the path of the app's repo. """ if os.path.isabs(app_spec['build']): return app_spec['build'] return os.path.join(Repo(app_spec['repo']).local_path, app_spec['build'])
[ "def", "_get_build_path", "(", "app_spec", ")", ":", "if", "os", ".", "path", ".", "isabs", "(", "app_spec", "[", "'build'", "]", ")", ":", "return", "app_spec", "[", "'build'", "]", "return", "os", ".", "path", ".", "join", "(", "Repo", "(", "app_spec", "[", "'repo'", "]", ")", ".", "local_path", ",", "app_spec", "[", "'build'", "]", ")" ]
Given a spec for an app, returns the value of the `build` field for docker-compose. If the path is relative, it is expanded and added to the path of the app's repo.
[ "Given", "a", "spec", "for", "an", "app", "returns", "the", "value", "of", "the", "build", "field", "for", "docker", "-", "compose", ".", "If", "the", "path", "is", "relative", "it", "is", "expanded", "and", "added", "to", "the", "path", "of", "the", "app", "s", "repo", "." ]
python
valid
dask/dask-ml
dask_ml/utils.py
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/utils.py#L224-L261
def check_chunks(n_samples, n_features, chunks=None): """Validate and normalize the chunks argument for a dask.array Parameters ---------- n_samples, n_features : int Give the shape of the array chunks : int, sequence, optional, default None * For 'chunks=None', this picks a "good" default number of chunks based on the number of CPU cores. The default results in a block structure with one block per core along the first dimension (of roughly equal lengths) and a single block along the second dimension. This may or may not be appropriate for your use-case. The chunk size will be at least 100 along the first dimension. * When chunks is an int, we split the ``n_samples`` into ``chunks`` blocks along the first dimension, and a single block along the second. Again, the chunksize will be at least 100 along the first dimension. * When chunks is a sequence, we validate that it's length two and turn it into a tuple. Returns ------- chunks : tuple """ if chunks is None: chunks = (max(100, n_samples // cpu_count()), n_features) elif isinstance(chunks, Integral): chunks = (max(100, n_samples // chunks), n_features) elif isinstance(chunks, Sequence): chunks = tuple(chunks) if len(chunks) != 2: raise AssertionError("Chunks should be a 2-tuple.") else: raise ValueError("Unknown type of chunks: '{}'".format(type(chunks))) return chunks
[ "def", "check_chunks", "(", "n_samples", ",", "n_features", ",", "chunks", "=", "None", ")", ":", "if", "chunks", "is", "None", ":", "chunks", "=", "(", "max", "(", "100", ",", "n_samples", "//", "cpu_count", "(", ")", ")", ",", "n_features", ")", "elif", "isinstance", "(", "chunks", ",", "Integral", ")", ":", "chunks", "=", "(", "max", "(", "100", ",", "n_samples", "//", "chunks", ")", ",", "n_features", ")", "elif", "isinstance", "(", "chunks", ",", "Sequence", ")", ":", "chunks", "=", "tuple", "(", "chunks", ")", "if", "len", "(", "chunks", ")", "!=", "2", ":", "raise", "AssertionError", "(", "\"Chunks should be a 2-tuple.\"", ")", "else", ":", "raise", "ValueError", "(", "\"Unknown type of chunks: '{}'\"", ".", "format", "(", "type", "(", "chunks", ")", ")", ")", "return", "chunks" ]
Validate and normalize the chunks argument for a dask.array Parameters ---------- n_samples, n_features : int Give the shape of the array chunks : int, sequence, optional, default None * For 'chunks=None', this picks a "good" default number of chunks based on the number of CPU cores. The default results in a block structure with one block per core along the first dimension (of roughly equal lengths) and a single block along the second dimension. This may or may not be appropriate for your use-case. The chunk size will be at least 100 along the first dimension. * When chunks is an int, we split the ``n_samples`` into ``chunks`` blocks along the first dimension, and a single block along the second. Again, the chunksize will be at least 100 along the first dimension. * When chunks is a sequence, we validate that it's length two and turn it into a tuple. Returns ------- chunks : tuple
[ "Validate", "and", "normalize", "the", "chunks", "argument", "for", "a", "dask", ".", "array" ]
python
train
amicks/Speculator
speculator/utils/date.py
https://github.com/amicks/Speculator/blob/f7d6590aded20b1e1b5df16a4b27228ee821c4ab/speculator/utils/date.py#L71-L103
def get_end_start_epochs(year, month, day, direction, unit, count): """ Gets epoch from a start date and epoch from a shifted date Args: year: Int between 1 and 9999. month: Int between 1 and 12. day: Int between 1 and 31. direction: String to shift time forwards or backwards. Valid values: 'last', 'next'. unit: String of time period unit for count argument. How far back to check historical market data. Valid values: 'hour', 'day', 'week', 'month', 'year'. count: Int of units. How far back to check historical market data? Returns: Dict of int epochs in UTC with keys 'initial' and 'shifted' """ if year or month or day: # Date is specified if not year: year = 2017 if not month: month = 1 if not day: day = 1 initial_delorean = date_to_delorean(year, month, day) else: # Date is not specified, get current date count += 1 # Get another date because market is still open initial_delorean = now_delorean() initial_epoch = int(initial_delorean.epoch) shifted_epoch = shift_epoch(initial_delorean, direction, unit, count) return { 'initial': initial_epoch, 'shifted': shifted_epoch }
[ "def", "get_end_start_epochs", "(", "year", ",", "month", ",", "day", ",", "direction", ",", "unit", ",", "count", ")", ":", "if", "year", "or", "month", "or", "day", ":", "# Date is specified", "if", "not", "year", ":", "year", "=", "2017", "if", "not", "month", ":", "month", "=", "1", "if", "not", "day", ":", "day", "=", "1", "initial_delorean", "=", "date_to_delorean", "(", "year", ",", "month", ",", "day", ")", "else", ":", "# Date is not specified, get current date", "count", "+=", "1", "# Get another date because market is still open", "initial_delorean", "=", "now_delorean", "(", ")", "initial_epoch", "=", "int", "(", "initial_delorean", ".", "epoch", ")", "shifted_epoch", "=", "shift_epoch", "(", "initial_delorean", ",", "direction", ",", "unit", ",", "count", ")", "return", "{", "'initial'", ":", "initial_epoch", ",", "'shifted'", ":", "shifted_epoch", "}" ]
Gets epoch from a start date and epoch from a shifted date Args: year: Int between 1 and 9999. month: Int between 1 and 12. day: Int between 1 and 31. direction: String to shift time forwards or backwards. Valid values: 'last', 'next'. unit: String of time period unit for count argument. How far back to check historical market data. Valid values: 'hour', 'day', 'week', 'month', 'year'. count: Int of units. How far back to check historical market data? Returns: Dict of int epochs in UTC with keys 'initial' and 'shifted'
[ "Gets", "epoch", "from", "a", "start", "date", "and", "epoch", "from", "a", "shifted", "date" ]
python
train
gaqzi/py-gocd
gocd/api/pipeline.py
https://github.com/gaqzi/py-gocd/blob/6fe5b62dea51e665c11a343aba5fc98e130c5c63/gocd/api/pipeline.py#L192-L204
def artifact(self, counter, stage, job, stage_counter=1): """Helper to instantiate an :class:`gocd.api.artifact.Artifact` object Args: counter (int): The pipeline counter to get the artifact for stage: Stage name job: Job name stage_counter: Defaults to 1 Returns: Artifact: :class:`gocd.api.artifact.Artifact` object """ return Artifact(self.server, self.name, counter, stage, job, stage_counter)
[ "def", "artifact", "(", "self", ",", "counter", ",", "stage", ",", "job", ",", "stage_counter", "=", "1", ")", ":", "return", "Artifact", "(", "self", ".", "server", ",", "self", ".", "name", ",", "counter", ",", "stage", ",", "job", ",", "stage_counter", ")" ]
Helper to instantiate an :class:`gocd.api.artifact.Artifact` object Args: counter (int): The pipeline counter to get the artifact for stage: Stage name job: Job name stage_counter: Defaults to 1 Returns: Artifact: :class:`gocd.api.artifact.Artifact` object
[ "Helper", "to", "instantiate", "an", ":", "class", ":", "gocd", ".", "api", ".", "artifact", ".", "Artifact", "object" ]
python
valid
pallets/werkzeug
src/werkzeug/datastructures.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/datastructures.py#L2091-L2100
def find(self, header): """Return the index of the header in the set or return -1 if not found. :param header: the header to be looked up. """ header = header.lower() for idx, item in enumerate(self._headers): if item.lower() == header: return idx return -1
[ "def", "find", "(", "self", ",", "header", ")", ":", "header", "=", "header", ".", "lower", "(", ")", "for", "idx", ",", "item", "in", "enumerate", "(", "self", ".", "_headers", ")", ":", "if", "item", ".", "lower", "(", ")", "==", "header", ":", "return", "idx", "return", "-", "1" ]
Return the index of the header in the set or return -1 if not found. :param header: the header to be looked up.
[ "Return", "the", "index", "of", "the", "header", "in", "the", "set", "or", "return", "-", "1", "if", "not", "found", "." ]
python
train
b3j0f/utils
b3j0f/utils/proxy.py
https://github.com/b3j0f/utils/blob/793871b98e90fd1c7ce9ef0dce839cc18fcbc6ff/b3j0f/utils/proxy.py#L174-L259
def proxify_routine(routine, impl=None): """Proxify a routine with input impl. :param routine: routine to proxify. :param impl: new impl to use. If None, use routine. """ # init impl impl = routine if impl is None else impl is_method = ismethod(routine) if is_method: function = get_method_function(routine) else: function = routine # flag which indicates that the function is not a pure python function # and has to be wrapped wrap_function = not hasattr(function, '__code__') try: # get params from routine args, varargs, kwargs, _ = getargspec(function) except TypeError: # in case of error, wrap the function wrap_function = True if wrap_function: # if function is not pure python, create a generic one # with assignments assigned = [] for wrapper_assignment in WRAPPER_ASSIGNMENTS: if hasattr(function, wrapper_assignment): assigned.append(wrapper_assignment) # and updates updated = [] for wrapper_update in WRAPPER_UPDATES: if hasattr(function, wrapper_update): updated.append(wrapper_update) @wraps(function, assigned=assigned, updated=updated) def wrappedfunction(*args, **kwargs): """Default wrap function.""" function = wrappedfunction # get params from function args, varargs, kwargs, _ = getargspec(function) name = function.__name__ result = _compilecode( function=function, name=name, impl=impl, args=args, varargs=varargs, kwargs=kwargs ) # set wrapping assignments for wrapper_assignment in WRAPPER_ASSIGNMENTS: try: value = getattr(function, wrapper_assignment) except AttributeError: pass else: setattr(result, wrapper_assignment, value) # set proxy module result.__module__ = proxify_routine.__module__ # update wrapping updating for wrapper_update in WRAPPER_UPDATES: try: value = getattr(function, wrapper_update) except AttributeError: pass else: getattr(result, wrapper_update).update(value) # set proxyfied element on proxy setattr(result, __PROXIFIED__, routine) if is_method: # create a new method args = [result, get_method_self(routine)] if PY2: args.append(routine.im_class) result = MethodType(*args) return result
[ "def", "proxify_routine", "(", "routine", ",", "impl", "=", "None", ")", ":", "# init impl", "impl", "=", "routine", "if", "impl", "is", "None", "else", "impl", "is_method", "=", "ismethod", "(", "routine", ")", "if", "is_method", ":", "function", "=", "get_method_function", "(", "routine", ")", "else", ":", "function", "=", "routine", "# flag which indicates that the function is not a pure python function", "# and has to be wrapped", "wrap_function", "=", "not", "hasattr", "(", "function", ",", "'__code__'", ")", "try", ":", "# get params from routine", "args", ",", "varargs", ",", "kwargs", ",", "_", "=", "getargspec", "(", "function", ")", "except", "TypeError", ":", "# in case of error, wrap the function", "wrap_function", "=", "True", "if", "wrap_function", ":", "# if function is not pure python, create a generic one", "# with assignments", "assigned", "=", "[", "]", "for", "wrapper_assignment", "in", "WRAPPER_ASSIGNMENTS", ":", "if", "hasattr", "(", "function", ",", "wrapper_assignment", ")", ":", "assigned", ".", "append", "(", "wrapper_assignment", ")", "# and updates", "updated", "=", "[", "]", "for", "wrapper_update", "in", "WRAPPER_UPDATES", ":", "if", "hasattr", "(", "function", ",", "wrapper_update", ")", ":", "updated", ".", "append", "(", "wrapper_update", ")", "@", "wraps", "(", "function", ",", "assigned", "=", "assigned", ",", "updated", "=", "updated", ")", "def", "wrappedfunction", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Default wrap function.\"\"\"", "function", "=", "wrappedfunction", "# get params from function", "args", ",", "varargs", ",", "kwargs", ",", "_", "=", "getargspec", "(", "function", ")", "name", "=", "function", ".", "__name__", "result", "=", "_compilecode", "(", "function", "=", "function", ",", "name", "=", "name", ",", "impl", "=", "impl", ",", "args", "=", "args", ",", "varargs", "=", "varargs", ",", "kwargs", "=", "kwargs", ")", "# set wrapping assignments", "for", "wrapper_assignment", "in", "WRAPPER_ASSIGNMENTS", ":", "try", ":", "value", "=", "getattr", "(", "function", ",", "wrapper_assignment", ")", "except", "AttributeError", ":", "pass", "else", ":", "setattr", "(", "result", ",", "wrapper_assignment", ",", "value", ")", "# set proxy module", "result", ".", "__module__", "=", "proxify_routine", ".", "__module__", "# update wrapping updating", "for", "wrapper_update", "in", "WRAPPER_UPDATES", ":", "try", ":", "value", "=", "getattr", "(", "function", ",", "wrapper_update", ")", "except", "AttributeError", ":", "pass", "else", ":", "getattr", "(", "result", ",", "wrapper_update", ")", ".", "update", "(", "value", ")", "# set proxyfied element on proxy", "setattr", "(", "result", ",", "__PROXIFIED__", ",", "routine", ")", "if", "is_method", ":", "# create a new method", "args", "=", "[", "result", ",", "get_method_self", "(", "routine", ")", "]", "if", "PY2", ":", "args", ".", "append", "(", "routine", ".", "im_class", ")", "result", "=", "MethodType", "(", "*", "args", ")", "return", "result" ]
Proxify a routine with input impl. :param routine: routine to proxify. :param impl: new impl to use. If None, use routine.
[ "Proxify", "a", "routine", "with", "input", "impl", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_nameserver.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_nameserver.py#L199-L213
def get_nameserver_detail_output_show_nameserver_nameserver_porttype(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_nameserver_detail = ET.Element("get_nameserver_detail") config = get_nameserver_detail output = ET.SubElement(get_nameserver_detail, "output") show_nameserver = ET.SubElement(output, "show-nameserver") nameserver_portid_key = ET.SubElement(show_nameserver, "nameserver-portid") nameserver_portid_key.text = kwargs.pop('nameserver_portid') nameserver_porttype = ET.SubElement(show_nameserver, "nameserver-porttype") nameserver_porttype.text = kwargs.pop('nameserver_porttype') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_nameserver_detail_output_show_nameserver_nameserver_porttype", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_nameserver_detail", "=", "ET", ".", "Element", "(", "\"get_nameserver_detail\"", ")", "config", "=", "get_nameserver_detail", "output", "=", "ET", ".", "SubElement", "(", "get_nameserver_detail", ",", "\"output\"", ")", "show_nameserver", "=", "ET", ".", "SubElement", "(", "output", ",", "\"show-nameserver\"", ")", "nameserver_portid_key", "=", "ET", ".", "SubElement", "(", "show_nameserver", ",", "\"nameserver-portid\"", ")", "nameserver_portid_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'nameserver_portid'", ")", "nameserver_porttype", "=", "ET", ".", "SubElement", "(", "show_nameserver", ",", "\"nameserver-porttype\"", ")", "nameserver_porttype", ".", "text", "=", "kwargs", ".", "pop", "(", "'nameserver_porttype'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
open-homeautomation/miflora
miflora/miflora_scanner.py
https://github.com/open-homeautomation/miflora/blob/916606e7edc70bdc017dfbe681bc81771e0df7f3/miflora/miflora_scanner.py#L10-L20
def scan(backend, timeout=10): """Scan for miflora devices. Note: this must be run as root! """ result = [] for (mac, name) in backend.scan_for_devices(timeout): if (name is not None and name.lower() in VALID_DEVICE_NAMES) or \ mac is not None and mac.upper().startswith(DEVICE_PREFIX): result.append(mac.upper()) return result
[ "def", "scan", "(", "backend", ",", "timeout", "=", "10", ")", ":", "result", "=", "[", "]", "for", "(", "mac", ",", "name", ")", "in", "backend", ".", "scan_for_devices", "(", "timeout", ")", ":", "if", "(", "name", "is", "not", "None", "and", "name", ".", "lower", "(", ")", "in", "VALID_DEVICE_NAMES", ")", "or", "mac", "is", "not", "None", "and", "mac", ".", "upper", "(", ")", ".", "startswith", "(", "DEVICE_PREFIX", ")", ":", "result", ".", "append", "(", "mac", ".", "upper", "(", ")", ")", "return", "result" ]
Scan for miflora devices. Note: this must be run as root!
[ "Scan", "for", "miflora", "devices", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/research/transformer_revnet.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_revnet.py#L136-L213
def transformer_revnet_decoder(decoder_input, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, hparams, name="decoder"): """A stack of transformer layers. Args: decoder_input: a Tensor encoder_output: a Tensor decoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string Returns: y: a Tensors """ def f(x, side_input): """f(x) for reversible layer, self-attention and enc-dec attention.""" decoder_self_attention_bias = side_input[0] encoder_decoder_attention_bias = side_input[1] encoder_output = side_input[2] old_hid_size = hparams.hidden_size hparams.hidden_size = old_hid_size // 2 with tf.variable_scope("self_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess( x, hparams), None, decoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout) y = common_layers.layer_postprocess(x, y, hparams) if encoder_output is not None: with tf.variable_scope("encdec_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess( x, hparams), encoder_output, encoder_decoder_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout) y = common_layers.layer_postprocess(x, y, hparams) hparams.hidden_size = old_hid_size return y def g(x): """g(x) for reversible layer, feed-forward layer.""" old_hid_size = hparams.hidden_size hparams.hidden_size = old_hid_size // 2 with tf.variable_scope("ffn"): y = transformer.transformer_ffn_layer( common_layers.layer_preprocess(x, hparams), hparams) y = common_layers.layer_postprocess(x, y, hparams) hparams.hidden_size = old_hid_size return y x1, x2 = tf.split(decoder_input, 2, axis=-1) with tf.variable_scope(name): y1, y2 = tf.contrib.layers.rev_block( x1, x2, f, g, num_layers=hparams.num_hidden_layers, f_side_input=[ decoder_self_attention_bias, encoder_decoder_attention_bias, encoder_output ], is_training=hparams.mode == tf.estimator.ModeKeys.TRAIN) y = tf.concat([y1, y2], axis=-1) return common_layers.layer_preprocess(y, hparams)
[ "def", "transformer_revnet_decoder", "(", "decoder_input", ",", "encoder_output", ",", "decoder_self_attention_bias", ",", "encoder_decoder_attention_bias", ",", "hparams", ",", "name", "=", "\"decoder\"", ")", ":", "def", "f", "(", "x", ",", "side_input", ")", ":", "\"\"\"f(x) for reversible layer, self-attention and enc-dec attention.\"\"\"", "decoder_self_attention_bias", "=", "side_input", "[", "0", "]", "encoder_decoder_attention_bias", "=", "side_input", "[", "1", "]", "encoder_output", "=", "side_input", "[", "2", "]", "old_hid_size", "=", "hparams", ".", "hidden_size", "hparams", ".", "hidden_size", "=", "old_hid_size", "//", "2", "with", "tf", ".", "variable_scope", "(", "\"self_attention\"", ")", ":", "y", "=", "common_attention", ".", "multihead_attention", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "None", ",", "decoder_self_attention_bias", ",", "hparams", ".", "attention_key_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "attention_value_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "num_heads", ",", "hparams", ".", "attention_dropout", ")", "y", "=", "common_layers", ".", "layer_postprocess", "(", "x", ",", "y", ",", "hparams", ")", "if", "encoder_output", "is", "not", "None", ":", "with", "tf", ".", "variable_scope", "(", "\"encdec_attention\"", ")", ":", "y", "=", "common_attention", ".", "multihead_attention", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "encoder_output", ",", "encoder_decoder_attention_bias", ",", "hparams", ".", "attention_key_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "attention_value_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "num_heads", ",", "hparams", ".", "attention_dropout", ")", "y", "=", "common_layers", ".", "layer_postprocess", "(", "x", ",", "y", ",", "hparams", ")", "hparams", ".", "hidden_size", "=", "old_hid_size", "return", "y", "def", "g", "(", "x", ")", ":", "\"\"\"g(x) for reversible layer, feed-forward layer.\"\"\"", "old_hid_size", "=", "hparams", ".", "hidden_size", "hparams", ".", "hidden_size", "=", "old_hid_size", "//", "2", "with", "tf", ".", "variable_scope", "(", "\"ffn\"", ")", ":", "y", "=", "transformer", ".", "transformer_ffn_layer", "(", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "hparams", ")", "y", "=", "common_layers", ".", "layer_postprocess", "(", "x", ",", "y", ",", "hparams", ")", "hparams", ".", "hidden_size", "=", "old_hid_size", "return", "y", "x1", ",", "x2", "=", "tf", ".", "split", "(", "decoder_input", ",", "2", ",", "axis", "=", "-", "1", ")", "with", "tf", ".", "variable_scope", "(", "name", ")", ":", "y1", ",", "y2", "=", "tf", ".", "contrib", ".", "layers", ".", "rev_block", "(", "x1", ",", "x2", ",", "f", ",", "g", ",", "num_layers", "=", "hparams", ".", "num_hidden_layers", ",", "f_side_input", "=", "[", "decoder_self_attention_bias", ",", "encoder_decoder_attention_bias", ",", "encoder_output", "]", ",", "is_training", "=", "hparams", ".", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ")", "y", "=", "tf", ".", "concat", "(", "[", "y1", ",", "y2", "]", ",", "axis", "=", "-", "1", ")", "return", "common_layers", ".", "layer_preprocess", "(", "y", ",", "hparams", ")" ]
A stack of transformer layers. Args: decoder_input: a Tensor encoder_output: a Tensor decoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string Returns: y: a Tensors
[ "A", "stack", "of", "transformer", "layers", "." ]
python
train
saltstack/salt
salt/returners/redis_return.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/redis_return.py#L219-L224
def save_load(jid, load, minions=None): ''' Save the load to the specified jid ''' serv = _get_serv(ret=None) serv.setex('load:{0}'.format(jid), _get_ttl(), salt.utils.json.dumps(load))
[ "def", "save_load", "(", "jid", ",", "load", ",", "minions", "=", "None", ")", ":", "serv", "=", "_get_serv", "(", "ret", "=", "None", ")", "serv", ".", "setex", "(", "'load:{0}'", ".", "format", "(", "jid", ")", ",", "_get_ttl", "(", ")", ",", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "load", ")", ")" ]
Save the load to the specified jid
[ "Save", "the", "load", "to", "the", "specified", "jid" ]
python
train
ismms-himc/clustergrammer2
setupbase.py
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/setupbase.py#L321-L377
def install_npm(path=None, build_dir=None, source_dir=None, build_cmd='build', force=False, npm=None): """Return a Command for managing an npm installation. Note: The command is skipped if the `--skip-npm` flag is used. Parameters ---------- path: str, optional The base path of the node package. Defaults to the repo root. build_dir: str, optional The target build directory. If this and source_dir are given, the JavaScript will only be build if necessary. source_dir: str, optional The source code directory. build_cmd: str, optional The npm command to build assets to the build_dir. npm: str or list, optional. The npm executable name, or a tuple of ['node', executable]. """ class NPM(BaseCommand): description = 'install package.json dependencies using npm' def run(self): if skip_npm: log.info('Skipping npm-installation') return node_package = path or HERE node_modules = pjoin(node_package, 'node_modules') is_yarn = os.path.exists(pjoin(node_package, 'yarn.lock')) npm_cmd = npm if npm is None: if is_yarn: npm_cmd = ['yarn'] else: npm_cmd = ['npm'] if not which(npm_cmd[0]): log.error("`{0}` unavailable. If you're running this command " "using sudo, make sure `{0}` is availble to sudo" .format(npm_cmd[0])) return if force or is_stale(node_modules, pjoin(node_package, 'package.json')): log.info('Installing build dependencies with npm. This may ' 'take a while...') run(npm_cmd + ['install'], cwd=node_package) if build_dir and source_dir and not force: should_build = is_stale(build_dir, source_dir) else: should_build = True if should_build: run(npm_cmd + ['run', build_cmd], cwd=node_package) return NPM
[ "def", "install_npm", "(", "path", "=", "None", ",", "build_dir", "=", "None", ",", "source_dir", "=", "None", ",", "build_cmd", "=", "'build'", ",", "force", "=", "False", ",", "npm", "=", "None", ")", ":", "class", "NPM", "(", "BaseCommand", ")", ":", "description", "=", "'install package.json dependencies using npm'", "def", "run", "(", "self", ")", ":", "if", "skip_npm", ":", "log", ".", "info", "(", "'Skipping npm-installation'", ")", "return", "node_package", "=", "path", "or", "HERE", "node_modules", "=", "pjoin", "(", "node_package", ",", "'node_modules'", ")", "is_yarn", "=", "os", ".", "path", ".", "exists", "(", "pjoin", "(", "node_package", ",", "'yarn.lock'", ")", ")", "npm_cmd", "=", "npm", "if", "npm", "is", "None", ":", "if", "is_yarn", ":", "npm_cmd", "=", "[", "'yarn'", "]", "else", ":", "npm_cmd", "=", "[", "'npm'", "]", "if", "not", "which", "(", "npm_cmd", "[", "0", "]", ")", ":", "log", ".", "error", "(", "\"`{0}` unavailable. If you're running this command \"", "\"using sudo, make sure `{0}` is availble to sudo\"", ".", "format", "(", "npm_cmd", "[", "0", "]", ")", ")", "return", "if", "force", "or", "is_stale", "(", "node_modules", ",", "pjoin", "(", "node_package", ",", "'package.json'", ")", ")", ":", "log", ".", "info", "(", "'Installing build dependencies with npm. This may '", "'take a while...'", ")", "run", "(", "npm_cmd", "+", "[", "'install'", "]", ",", "cwd", "=", "node_package", ")", "if", "build_dir", "and", "source_dir", "and", "not", "force", ":", "should_build", "=", "is_stale", "(", "build_dir", ",", "source_dir", ")", "else", ":", "should_build", "=", "True", "if", "should_build", ":", "run", "(", "npm_cmd", "+", "[", "'run'", ",", "build_cmd", "]", ",", "cwd", "=", "node_package", ")", "return", "NPM" ]
Return a Command for managing an npm installation. Note: The command is skipped if the `--skip-npm` flag is used. Parameters ---------- path: str, optional The base path of the node package. Defaults to the repo root. build_dir: str, optional The target build directory. If this and source_dir are given, the JavaScript will only be build if necessary. source_dir: str, optional The source code directory. build_cmd: str, optional The npm command to build assets to the build_dir. npm: str or list, optional. The npm executable name, or a tuple of ['node', executable].
[ "Return", "a", "Command", "for", "managing", "an", "npm", "installation", "." ]
python
train
JarryShaw/PyPCAPKit
src/toolkit/scapy.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/toolkit/scapy.py#L48-L63
def packet2dict(packet, *, count=NotImplemented): """Convert Scapy packet into dict.""" if scapy_all is None: raise ModuleNotFound("No module named 'scapy'", name='scapy') def wrapper(packet): dict_ = packet.fields payload = packet.payload if not isinstance(payload, scapy_all.packet.NoPayload): dict_[payload.name] = wrapper(payload) return dict_ return { 'packet': bytes(packet), packet.name: wrapper(packet), }
[ "def", "packet2dict", "(", "packet", ",", "*", ",", "count", "=", "NotImplemented", ")", ":", "if", "scapy_all", "is", "None", ":", "raise", "ModuleNotFound", "(", "\"No module named 'scapy'\"", ",", "name", "=", "'scapy'", ")", "def", "wrapper", "(", "packet", ")", ":", "dict_", "=", "packet", ".", "fields", "payload", "=", "packet", ".", "payload", "if", "not", "isinstance", "(", "payload", ",", "scapy_all", ".", "packet", ".", "NoPayload", ")", ":", "dict_", "[", "payload", ".", "name", "]", "=", "wrapper", "(", "payload", ")", "return", "dict_", "return", "{", "'packet'", ":", "bytes", "(", "packet", ")", ",", "packet", ".", "name", ":", "wrapper", "(", "packet", ")", ",", "}" ]
Convert Scapy packet into dict.
[ "Convert", "Scapy", "packet", "into", "dict", "." ]
python
train
gmr/helper
helper/unix.py
https://github.com/gmr/helper/blob/fe8e45fc8eabf619429b2940c682c252ee33c082/helper/unix.py#L131-L142
def uid(self): """Return the user id that the process will run as :rtype: int """ if not self._uid: if self.config.daemon.user: self._uid = pwd.getpwnam(self.config.daemon.user).pw_uid else: self._uid = os.getuid() return self._uid
[ "def", "uid", "(", "self", ")", ":", "if", "not", "self", ".", "_uid", ":", "if", "self", ".", "config", ".", "daemon", ".", "user", ":", "self", ".", "_uid", "=", "pwd", ".", "getpwnam", "(", "self", ".", "config", ".", "daemon", ".", "user", ")", ".", "pw_uid", "else", ":", "self", ".", "_uid", "=", "os", ".", "getuid", "(", ")", "return", "self", ".", "_uid" ]
Return the user id that the process will run as :rtype: int
[ "Return", "the", "user", "id", "that", "the", "process", "will", "run", "as" ]
python
train
pantsbuild/pants
pants-plugins/src/python/internal_backend/sitegen/tasks/sitegen.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/pants-plugins/src/python/internal_backend/sitegen/tasks/sitegen.py#L249-L256
def transform_soups(config, soups, precomputed): """Mutate our soups to be better when we write them out later.""" fixup_internal_links(config, soups) ensure_headings_linkable(soups) # Do this after ensure_headings_linkable so that there will be links. generate_page_tocs(soups, precomputed) link_pantsrefs(soups, precomputed)
[ "def", "transform_soups", "(", "config", ",", "soups", ",", "precomputed", ")", ":", "fixup_internal_links", "(", "config", ",", "soups", ")", "ensure_headings_linkable", "(", "soups", ")", "# Do this after ensure_headings_linkable so that there will be links.", "generate_page_tocs", "(", "soups", ",", "precomputed", ")", "link_pantsrefs", "(", "soups", ",", "precomputed", ")" ]
Mutate our soups to be better when we write them out later.
[ "Mutate", "our", "soups", "to", "be", "better", "when", "we", "write", "them", "out", "later", "." ]
python
train
Qiskit/qiskit-terra
qiskit/qasm/node/id.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/qasm/node/id.py#L56-L63
def sym(self, nested_scope=None): """Return the correspond symbolic number.""" if not nested_scope or self.name not in nested_scope[-1]: raise NodeException("Expected local parameter name: ", "name=%s, line=%s, file=%s" % ( self.name, self.line, self.file)) else: return nested_scope[-1][self.name].sym(nested_scope[0:-1])
[ "def", "sym", "(", "self", ",", "nested_scope", "=", "None", ")", ":", "if", "not", "nested_scope", "or", "self", ".", "name", "not", "in", "nested_scope", "[", "-", "1", "]", ":", "raise", "NodeException", "(", "\"Expected local parameter name: \"", ",", "\"name=%s, line=%s, file=%s\"", "%", "(", "self", ".", "name", ",", "self", ".", "line", ",", "self", ".", "file", ")", ")", "else", ":", "return", "nested_scope", "[", "-", "1", "]", "[", "self", ".", "name", "]", ".", "sym", "(", "nested_scope", "[", "0", ":", "-", "1", "]", ")" ]
Return the correspond symbolic number.
[ "Return", "the", "correspond", "symbolic", "number", "." ]
python
test
EventRegistry/event-registry-python
eventregistry/QueryEvents.py
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/QueryEvents.py#L172-L179
def initWithEventUriWgtList(uriWgtList): """ Set a custom list of event uris. The results will be then computed on this list - no query will be done (all conditions will be ignored). """ q = QueryEvents() assert isinstance(uriWgtList, list), "uriWgtList has to be a list of strings that represent event uris with their weights" q.queryParams = { "action": "getEvents", "eventUriWgtList": ",".join(uriWgtList) } return q
[ "def", "initWithEventUriWgtList", "(", "uriWgtList", ")", ":", "q", "=", "QueryEvents", "(", ")", "assert", "isinstance", "(", "uriWgtList", ",", "list", ")", ",", "\"uriWgtList has to be a list of strings that represent event uris with their weights\"", "q", ".", "queryParams", "=", "{", "\"action\"", ":", "\"getEvents\"", ",", "\"eventUriWgtList\"", ":", "\",\"", ".", "join", "(", "uriWgtList", ")", "}", "return", "q" ]
Set a custom list of event uris. The results will be then computed on this list - no query will be done (all conditions will be ignored).
[ "Set", "a", "custom", "list", "of", "event", "uris", ".", "The", "results", "will", "be", "then", "computed", "on", "this", "list", "-", "no", "query", "will", "be", "done", "(", "all", "conditions", "will", "be", "ignored", ")", "." ]
python
train
ome/omego
omego/fileutils.py
https://github.com/ome/omego/blob/2dadbf3c6342b6c995f9e0dceaf3c0b7fab030fb/omego/fileutils.py#L194-L228
def check_extracted_paths(namelist, subdir=None): """ Check whether zip file paths are all relative, and optionally in a specified subdirectory, raises an exception if not namelist: A list of paths from the zip file subdir: If specified then check whether all paths in the zip file are under this subdirectory Python docs are unclear about the security of extract/extractall: https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extractall https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extract """ def relpath(p): # relpath strips a trailing sep # Windows paths may also use unix sep q = os.path.relpath(p) if p.endswith(os.path.sep) or p.endswith('/'): q += os.path.sep return q parent = os.path.abspath('.') if subdir: if os.path.isabs(subdir): raise FileException('subdir must be a relative path', subdir) subdir = relpath(subdir + os.path.sep) for name in namelist: if os.path.commonprefix([parent, os.path.abspath(name)]) != parent: raise FileException('Insecure path in zipfile', name) if subdir and os.path.commonprefix( [subdir, relpath(name)]) != subdir: raise FileException( 'Path in zipfile is not in required subdir', name)
[ "def", "check_extracted_paths", "(", "namelist", ",", "subdir", "=", "None", ")", ":", "def", "relpath", "(", "p", ")", ":", "# relpath strips a trailing sep", "# Windows paths may also use unix sep", "q", "=", "os", ".", "path", ".", "relpath", "(", "p", ")", "if", "p", ".", "endswith", "(", "os", ".", "path", ".", "sep", ")", "or", "p", ".", "endswith", "(", "'/'", ")", ":", "q", "+=", "os", ".", "path", ".", "sep", "return", "q", "parent", "=", "os", ".", "path", ".", "abspath", "(", "'.'", ")", "if", "subdir", ":", "if", "os", ".", "path", ".", "isabs", "(", "subdir", ")", ":", "raise", "FileException", "(", "'subdir must be a relative path'", ",", "subdir", ")", "subdir", "=", "relpath", "(", "subdir", "+", "os", ".", "path", ".", "sep", ")", "for", "name", "in", "namelist", ":", "if", "os", ".", "path", ".", "commonprefix", "(", "[", "parent", ",", "os", ".", "path", ".", "abspath", "(", "name", ")", "]", ")", "!=", "parent", ":", "raise", "FileException", "(", "'Insecure path in zipfile'", ",", "name", ")", "if", "subdir", "and", "os", ".", "path", ".", "commonprefix", "(", "[", "subdir", ",", "relpath", "(", "name", ")", "]", ")", "!=", "subdir", ":", "raise", "FileException", "(", "'Path in zipfile is not in required subdir'", ",", "name", ")" ]
Check whether zip file paths are all relative, and optionally in a specified subdirectory, raises an exception if not namelist: A list of paths from the zip file subdir: If specified then check whether all paths in the zip file are under this subdirectory Python docs are unclear about the security of extract/extractall: https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extractall https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extract
[ "Check", "whether", "zip", "file", "paths", "are", "all", "relative", "and", "optionally", "in", "a", "specified", "subdirectory", "raises", "an", "exception", "if", "not" ]
python
train
ThomasChiroux/attowiki
src/attowiki/views.py
https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/views.py#L314-L378
def view_page(name=None): """Serve a page name. .. note:: this is a bottle view * if the view is called with the POST method, write the new page content to the file, commit the modification and then display the html rendering of the restructured text file * if the view is called with the GET method, directly display the html rendering of the restructured text file Keyword Arguments: :name: (str) -- name of the rest file (without the .rst extension) OPTIONAL if no filename is given, first try to find a "index.rst" file in the directory and serve it. If not found, serve the meta page __index__ Returns: bottle response object """ if request.method == 'POST': if name is None: # new file if len(request.forms.filename) > 0: name = request.forms.filename if name is not None: filename = "{0}.rst".format(name) file_handle = open(filename, 'w') file_handle.write(request.forms.content.encode('utf-8')) file_handle.close() add_file_to_repo(filename) commit(filename) response.set_header('Cache-control', 'no-cache') response.set_header('Pragma', 'no-cache') if name is None: # we try to find an index file index_files = glob.glob("./[Ii][Nn][Dd][Ee][Xx].rst") if len(index_files) == 0: # not found # redirect to __index__ return view_meta_index() else: name = index_files[0][2:-4] files = glob.glob("{0}.rst".format(name)) if len(files) > 0: file_handle = open(files[0], 'r') html_body = publish_parts(file_handle.read(), writer=AttowikiWriter(), settings=None, settings_overrides=None)['html_body'] history = commit_history("{0}.rst".format(name)) return template('page', type="view", name=name, extended_name=None, is_repo=check_repo(), history=history, gitref=None, content=html_body) else: return static_file(name, '')
[ "def", "view_page", "(", "name", "=", "None", ")", ":", "if", "request", ".", "method", "==", "'POST'", ":", "if", "name", "is", "None", ":", "# new file", "if", "len", "(", "request", ".", "forms", ".", "filename", ")", ">", "0", ":", "name", "=", "request", ".", "forms", ".", "filename", "if", "name", "is", "not", "None", ":", "filename", "=", "\"{0}.rst\"", ".", "format", "(", "name", ")", "file_handle", "=", "open", "(", "filename", ",", "'w'", ")", "file_handle", ".", "write", "(", "request", ".", "forms", ".", "content", ".", "encode", "(", "'utf-8'", ")", ")", "file_handle", ".", "close", "(", ")", "add_file_to_repo", "(", "filename", ")", "commit", "(", "filename", ")", "response", ".", "set_header", "(", "'Cache-control'", ",", "'no-cache'", ")", "response", ".", "set_header", "(", "'Pragma'", ",", "'no-cache'", ")", "if", "name", "is", "None", ":", "# we try to find an index file", "index_files", "=", "glob", ".", "glob", "(", "\"./[Ii][Nn][Dd][Ee][Xx].rst\"", ")", "if", "len", "(", "index_files", ")", "==", "0", ":", "# not found", "# redirect to __index__", "return", "view_meta_index", "(", ")", "else", ":", "name", "=", "index_files", "[", "0", "]", "[", "2", ":", "-", "4", "]", "files", "=", "glob", ".", "glob", "(", "\"{0}.rst\"", ".", "format", "(", "name", ")", ")", "if", "len", "(", "files", ")", ">", "0", ":", "file_handle", "=", "open", "(", "files", "[", "0", "]", ",", "'r'", ")", "html_body", "=", "publish_parts", "(", "file_handle", ".", "read", "(", ")", ",", "writer", "=", "AttowikiWriter", "(", ")", ",", "settings", "=", "None", ",", "settings_overrides", "=", "None", ")", "[", "'html_body'", "]", "history", "=", "commit_history", "(", "\"{0}.rst\"", ".", "format", "(", "name", ")", ")", "return", "template", "(", "'page'", ",", "type", "=", "\"view\"", ",", "name", "=", "name", ",", "extended_name", "=", "None", ",", "is_repo", "=", "check_repo", "(", ")", ",", "history", "=", "history", ",", "gitref", "=", "None", ",", "content", "=", "html_body", ")", "else", ":", "return", "static_file", "(", "name", ",", "''", ")" ]
Serve a page name. .. note:: this is a bottle view * if the view is called with the POST method, write the new page content to the file, commit the modification and then display the html rendering of the restructured text file * if the view is called with the GET method, directly display the html rendering of the restructured text file Keyword Arguments: :name: (str) -- name of the rest file (without the .rst extension) OPTIONAL if no filename is given, first try to find a "index.rst" file in the directory and serve it. If not found, serve the meta page __index__ Returns: bottle response object
[ "Serve", "a", "page", "name", "." ]
python
train
hamelsmu/ktext
ktext/preprocess.py
https://github.com/hamelsmu/ktext/blob/221f09f5b1762705075fd1bd914881c0724d5e02/ktext/preprocess.py#L307-L311
def token_count_pandas(self): """ See token counts as pandas dataframe""" freq_df = pd.DataFrame.from_dict(self.indexer.word_counts, orient='index') freq_df.columns = ['count'] return freq_df.sort_values('count', ascending=False)
[ "def", "token_count_pandas", "(", "self", ")", ":", "freq_df", "=", "pd", ".", "DataFrame", ".", "from_dict", "(", "self", ".", "indexer", ".", "word_counts", ",", "orient", "=", "'index'", ")", "freq_df", ".", "columns", "=", "[", "'count'", "]", "return", "freq_df", ".", "sort_values", "(", "'count'", ",", "ascending", "=", "False", ")" ]
See token counts as pandas dataframe
[ "See", "token", "counts", "as", "pandas", "dataframe" ]
python
test
ARMmbed/icetea
icetea_lib/ResourceProvider/ResourceConfig.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/ResourceProvider/ResourceConfig.py#L79-L107
def resolve_configuration(self, configuration): """ Resolve requirements from given JSON encoded data. The JSON should follow the testcase meta-data requirements field format. This function will resolve requirements for each individual DUT and create a DUT requirements list that contains the configuration for each DUT, eg: { "duts": [ { "*": {"count": 2, "type": "process" } } ] } would result in the following configuration: [ { "1": {"type": "process", "allowed_platforms": [], "nick": None } { "2": {"type": "process", "allowed_platforms": [], "nick": None } ] :param requirements: optional argument if requirements come from external source, should be similar to the following format: { "duts": [ { "*": {"count": 2, "type": "process" } } ] } """ configuration = configuration if configuration else self.json_config self._resolve_requirements(configuration["requirements"]) self._resolve_dut_count()
[ "def", "resolve_configuration", "(", "self", ",", "configuration", ")", ":", "configuration", "=", "configuration", "if", "configuration", "else", "self", ".", "json_config", "self", ".", "_resolve_requirements", "(", "configuration", "[", "\"requirements\"", "]", ")", "self", ".", "_resolve_dut_count", "(", ")" ]
Resolve requirements from given JSON encoded data. The JSON should follow the testcase meta-data requirements field format. This function will resolve requirements for each individual DUT and create a DUT requirements list that contains the configuration for each DUT, eg: { "duts": [ { "*": {"count": 2, "type": "process" } } ] } would result in the following configuration: [ { "1": {"type": "process", "allowed_platforms": [], "nick": None } { "2": {"type": "process", "allowed_platforms": [], "nick": None } ] :param requirements: optional argument if requirements come from external source, should be similar to the following format: { "duts": [ { "*": {"count": 2, "type": "process" } } ] }
[ "Resolve", "requirements", "from", "given", "JSON", "encoded", "data", ".", "The", "JSON", "should", "follow", "the", "testcase", "meta", "-", "data", "requirements", "field", "format", ".", "This", "function", "will", "resolve", "requirements", "for", "each", "individual", "DUT", "and", "create", "a", "DUT", "requirements", "list", "that", "contains", "the", "configuration", "for", "each", "DUT", "eg", ":", "{", "duts", ":", "[", "{", "*", ":", "{", "count", ":", "2", "type", ":", "process", "}", "}", "]", "}", "would", "result", "in", "the", "following", "configuration", ":", "[", "{", "1", ":", "{", "type", ":", "process", "allowed_platforms", ":", "[]", "nick", ":", "None", "}", "{", "2", ":", "{", "type", ":", "process", "allowed_platforms", ":", "[]", "nick", ":", "None", "}", "]" ]
python
train
h2oai/h2o-3
h2o-py/h2o/model/model_base.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/model/model_base.py#L823-L838
def save_mojo(self, path="", force=False): """ Save an H2O Model as MOJO (Model Object, Optimized) to disk. :param model: The model object to save. :param path: a path to save the model at (hdfs, s3, local) :param force: if True overwrite destination directory in case it exists, or throw exception if set to False. :returns str: the path of the saved model """ assert_is_type(path, str) assert_is_type(force, bool) if not self.have_mojo: raise H2OValueError("Export to MOJO not supported") path = os.path.join(os.getcwd() if path == "" else path, self.model_id + ".zip") return h2o.api("GET /99/Models.mojo/%s" % self.model_id, data={"dir": path, "force": force})["dir"]
[ "def", "save_mojo", "(", "self", ",", "path", "=", "\"\"", ",", "force", "=", "False", ")", ":", "assert_is_type", "(", "path", ",", "str", ")", "assert_is_type", "(", "force", ",", "bool", ")", "if", "not", "self", ".", "have_mojo", ":", "raise", "H2OValueError", "(", "\"Export to MOJO not supported\"", ")", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", "if", "path", "==", "\"\"", "else", "path", ",", "self", ".", "model_id", "+", "\".zip\"", ")", "return", "h2o", ".", "api", "(", "\"GET /99/Models.mojo/%s\"", "%", "self", ".", "model_id", ",", "data", "=", "{", "\"dir\"", ":", "path", ",", "\"force\"", ":", "force", "}", ")", "[", "\"dir\"", "]" ]
Save an H2O Model as MOJO (Model Object, Optimized) to disk. :param model: The model object to save. :param path: a path to save the model at (hdfs, s3, local) :param force: if True overwrite destination directory in case it exists, or throw exception if set to False. :returns str: the path of the saved model
[ "Save", "an", "H2O", "Model", "as", "MOJO", "(", "Model", "Object", "Optimized", ")", "to", "disk", "." ]
python
test
rstoneback/pysat
pysat/instruments/dmsp_ivm.py
https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/instruments/dmsp_ivm.py#L135-L175
def download(date_array, tag, sat_id, data_path=None, user=None, password=None): """Downloads data from Madrigal. The user's names should be provided in field user. John Malkovich should be entered as John+Malkovich The password field should be the user's email address. These parameters are passed to Madrigal when downloading. The affiliation field is set to pysat to enable tracking of pysat downloads. Parameters ---------- """ import subprocess # currently passes things along if no user and password supplied # need to do this for testing # TODO, implement user and password values in test code # specific to DMSP if user is None: print ('No user information supplied for download.') user = 'pysat_testing' if password is None: print ('Please provide email address in password field.') password = 'pysat_testing@not_real_email.org' a = subprocess.check_output(["globalDownload.py", "--verbose", "--url=http://cedar.openmadrigal.org", '--outputDir='+data_path, '--user_fullname='+user, '--user_email='+password, '--user_affiliation=pysat', '--format=hdf5', '--startDate='+date_array[0].strftime('%m/%d/%Y'), '--endDate='+date_array[-1].strftime('%m/%d/%Y'), '--inst=8100', '--kindat='+str(madrigal_tag[sat_id])]) print ('Feedback from openMadrigal ', a)
[ "def", "download", "(", "date_array", ",", "tag", ",", "sat_id", ",", "data_path", "=", "None", ",", "user", "=", "None", ",", "password", "=", "None", ")", ":", "import", "subprocess", "# currently passes things along if no user and password supplied", "# need to do this for testing", "# TODO, implement user and password values in test code", "# specific to DMSP", "if", "user", "is", "None", ":", "print", "(", "'No user information supplied for download.'", ")", "user", "=", "'pysat_testing'", "if", "password", "is", "None", ":", "print", "(", "'Please provide email address in password field.'", ")", "password", "=", "'pysat_testing@not_real_email.org'", "a", "=", "subprocess", ".", "check_output", "(", "[", "\"globalDownload.py\"", ",", "\"--verbose\"", ",", "\"--url=http://cedar.openmadrigal.org\"", ",", "'--outputDir='", "+", "data_path", ",", "'--user_fullname='", "+", "user", ",", "'--user_email='", "+", "password", ",", "'--user_affiliation=pysat'", ",", "'--format=hdf5'", ",", "'--startDate='", "+", "date_array", "[", "0", "]", ".", "strftime", "(", "'%m/%d/%Y'", ")", ",", "'--endDate='", "+", "date_array", "[", "-", "1", "]", ".", "strftime", "(", "'%m/%d/%Y'", ")", ",", "'--inst=8100'", ",", "'--kindat='", "+", "str", "(", "madrigal_tag", "[", "sat_id", "]", ")", "]", ")", "print", "(", "'Feedback from openMadrigal '", ",", "a", ")" ]
Downloads data from Madrigal. The user's names should be provided in field user. John Malkovich should be entered as John+Malkovich The password field should be the user's email address. These parameters are passed to Madrigal when downloading. The affiliation field is set to pysat to enable tracking of pysat downloads. Parameters ----------
[ "Downloads", "data", "from", "Madrigal", ".", "The", "user", "s", "names", "should", "be", "provided", "in", "field", "user", ".", "John", "Malkovich", "should", "be", "entered", "as", "John", "+", "Malkovich", "The", "password", "field", "should", "be", "the", "user", "s", "email", "address", ".", "These", "parameters", "are", "passed", "to", "Madrigal", "when", "downloading", ".", "The", "affiliation", "field", "is", "set", "to", "pysat", "to", "enable", "tracking", "of", "pysat", "downloads", ".", "Parameters", "----------" ]
python
train
nuagenetworks/bambou
bambou/nurest_object.py
https://github.com/nuagenetworks/bambou/blob/d334fea23e384d3df8e552fe1849ad707941c666/bambou/nurest_object.py#L432-L464
def expose_attribute(self, local_name, attribute_type, remote_name=None, display_name=None, is_required=False, is_readonly=False, max_length=None, min_length=None, is_identifier=False, choices=None, is_unique=False, is_email=False, is_login=False, is_editable=True, is_password=False, can_order=False, can_search=False, subtype=None, min_value=None, max_value=None): """ Expose local_name as remote_name An exposed attribute `local_name` will be sent within the HTTP request as a `remote_name` """ if remote_name is None: remote_name = local_name if display_name is None: display_name = local_name attribute = NURemoteAttribute(local_name=local_name, remote_name=remote_name, attribute_type=attribute_type) attribute.display_name = display_name attribute.is_required = is_required attribute.is_readonly = is_readonly attribute.min_length = min_length attribute.max_length = max_length attribute.is_editable = is_editable attribute.is_identifier = is_identifier attribute.choices = choices attribute.is_unique = is_unique attribute.is_email = is_email attribute.is_login = is_login attribute.is_password = is_password attribute.can_order = can_order attribute.can_search = can_search attribute.subtype = subtype attribute.min_value = min_value attribute.max_value = max_value self._attributes[local_name] = attribute
[ "def", "expose_attribute", "(", "self", ",", "local_name", ",", "attribute_type", ",", "remote_name", "=", "None", ",", "display_name", "=", "None", ",", "is_required", "=", "False", ",", "is_readonly", "=", "False", ",", "max_length", "=", "None", ",", "min_length", "=", "None", ",", "is_identifier", "=", "False", ",", "choices", "=", "None", ",", "is_unique", "=", "False", ",", "is_email", "=", "False", ",", "is_login", "=", "False", ",", "is_editable", "=", "True", ",", "is_password", "=", "False", ",", "can_order", "=", "False", ",", "can_search", "=", "False", ",", "subtype", "=", "None", ",", "min_value", "=", "None", ",", "max_value", "=", "None", ")", ":", "if", "remote_name", "is", "None", ":", "remote_name", "=", "local_name", "if", "display_name", "is", "None", ":", "display_name", "=", "local_name", "attribute", "=", "NURemoteAttribute", "(", "local_name", "=", "local_name", ",", "remote_name", "=", "remote_name", ",", "attribute_type", "=", "attribute_type", ")", "attribute", ".", "display_name", "=", "display_name", "attribute", ".", "is_required", "=", "is_required", "attribute", ".", "is_readonly", "=", "is_readonly", "attribute", ".", "min_length", "=", "min_length", "attribute", ".", "max_length", "=", "max_length", "attribute", ".", "is_editable", "=", "is_editable", "attribute", ".", "is_identifier", "=", "is_identifier", "attribute", ".", "choices", "=", "choices", "attribute", ".", "is_unique", "=", "is_unique", "attribute", ".", "is_email", "=", "is_email", "attribute", ".", "is_login", "=", "is_login", "attribute", ".", "is_password", "=", "is_password", "attribute", ".", "can_order", "=", "can_order", "attribute", ".", "can_search", "=", "can_search", "attribute", ".", "subtype", "=", "subtype", "attribute", ".", "min_value", "=", "min_value", "attribute", ".", "max_value", "=", "max_value", "self", ".", "_attributes", "[", "local_name", "]", "=", "attribute" ]
Expose local_name as remote_name An exposed attribute `local_name` will be sent within the HTTP request as a `remote_name`
[ "Expose", "local_name", "as", "remote_name" ]
python
train
esterhui/pypu
pypu/service_facebook.py
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/service_facebook.py#L103-L111
def Remove(self,directory,filename): """Deletes files from fb""" if self._isMediaFile(filename): return self._remove_media(directory,filename) elif self._isConfigFile(filename): return True print "Not handled!" return False
[ "def", "Remove", "(", "self", ",", "directory", ",", "filename", ")", ":", "if", "self", ".", "_isMediaFile", "(", "filename", ")", ":", "return", "self", ".", "_remove_media", "(", "directory", ",", "filename", ")", "elif", "self", ".", "_isConfigFile", "(", "filename", ")", ":", "return", "True", "print", "\"Not handled!\"", "return", "False" ]
Deletes files from fb
[ "Deletes", "files", "from", "fb" ]
python
train
pywbem/pywbem
attic/cim_provider.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/attic/cim_provider.py#L604-L632
def MI_createInstance(self, env, instance): # pylint: disable=invalid-name """Create a CIM instance, and return its instance name Implements the WBEM operation CreateInstance in terms of the set_instance method. A derived class will not normally override this method. """ logger = env.get_logger() logger.log_debug('CIMProvider MI_createInstance called...') rval = None ch = env.get_cimom_handle() cimClass = ch.GetClass(instance.classname, instance.path.namespace, LocalOnly=False, IncludeQualifiers=True) # CIMOM has already filled in default property values for # props with default values, if values not supplied by client. rval = self.set_instance(env=env, instance=instance, previous_instance=None, cim_class=cimClass) rval = build_instance_name(rval, cimClass) logger.log_debug('CIMProvider MI_createInstance returning') return rval
[ "def", "MI_createInstance", "(", "self", ",", "env", ",", "instance", ")", ":", "# pylint: disable=invalid-name", "logger", "=", "env", ".", "get_logger", "(", ")", "logger", ".", "log_debug", "(", "'CIMProvider MI_createInstance called...'", ")", "rval", "=", "None", "ch", "=", "env", ".", "get_cimom_handle", "(", ")", "cimClass", "=", "ch", ".", "GetClass", "(", "instance", ".", "classname", ",", "instance", ".", "path", ".", "namespace", ",", "LocalOnly", "=", "False", ",", "IncludeQualifiers", "=", "True", ")", "# CIMOM has already filled in default property values for", "# props with default values, if values not supplied by client.", "rval", "=", "self", ".", "set_instance", "(", "env", "=", "env", ",", "instance", "=", "instance", ",", "previous_instance", "=", "None", ",", "cim_class", "=", "cimClass", ")", "rval", "=", "build_instance_name", "(", "rval", ",", "cimClass", ")", "logger", ".", "log_debug", "(", "'CIMProvider MI_createInstance returning'", ")", "return", "rval" ]
Create a CIM instance, and return its instance name Implements the WBEM operation CreateInstance in terms of the set_instance method. A derived class will not normally override this method.
[ "Create", "a", "CIM", "instance", "and", "return", "its", "instance", "name" ]
python
train
saltstack/salt
salt/modules/redismod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/redismod.py#L346-L359
def hlen(key, host=None, port=None, db=None, password=None): ''' Returns number of fields of a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hlen foo_hash ''' server = _connect(host, port, db, password) return server.hlen(key)
[ "def", "hlen", "(", "key", ",", "host", "=", "None", ",", "port", "=", "None", ",", "db", "=", "None", ",", "password", "=", "None", ")", ":", "server", "=", "_connect", "(", "host", ",", "port", ",", "db", ",", "password", ")", "return", "server", ".", "hlen", "(", "key", ")" ]
Returns number of fields of a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hlen foo_hash
[ "Returns", "number", "of", "fields", "of", "a", "hash", "." ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/gui/widgetdelegate.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgetdelegate.py#L534-L547
def get_total_indentation(self, index): """Get the indentation for the given index :param index: the index to query :type index: :class:`QtCore.ModelIndex` :returns: the number of parents :rtype: int :raises: None """ n = 0 while index.isValid(): n += 1 index = index.parent() return n * self.indentation()
[ "def", "get_total_indentation", "(", "self", ",", "index", ")", ":", "n", "=", "0", "while", "index", ".", "isValid", "(", ")", ":", "n", "+=", "1", "index", "=", "index", ".", "parent", "(", ")", "return", "n", "*", "self", ".", "indentation", "(", ")" ]
Get the indentation for the given index :param index: the index to query :type index: :class:`QtCore.ModelIndex` :returns: the number of parents :rtype: int :raises: None
[ "Get", "the", "indentation", "for", "the", "given", "index" ]
python
train
vmonaco/pohmm
pohmm/classification.py
https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/classification.py#L32-L45
def fit_df(self, labels, dfs, pstate_col=PSTATE_COL): """ Fit the classifier with labels y and DataFrames dfs """ assert len(labels) == len(dfs) for label in set(labels): label_dfs = [s for l,s in zip(labels, dfs) if l == label] pohmm = self.pohmm_factory() pohmm.fit_df(label_dfs, pstate_col=pstate_col) self.pohmms[label] = pohmm return self
[ "def", "fit_df", "(", "self", ",", "labels", ",", "dfs", ",", "pstate_col", "=", "PSTATE_COL", ")", ":", "assert", "len", "(", "labels", ")", "==", "len", "(", "dfs", ")", "for", "label", "in", "set", "(", "labels", ")", ":", "label_dfs", "=", "[", "s", "for", "l", ",", "s", "in", "zip", "(", "labels", ",", "dfs", ")", "if", "l", "==", "label", "]", "pohmm", "=", "self", ".", "pohmm_factory", "(", ")", "pohmm", ".", "fit_df", "(", "label_dfs", ",", "pstate_col", "=", "pstate_col", ")", "self", ".", "pohmms", "[", "label", "]", "=", "pohmm", "return", "self" ]
Fit the classifier with labels y and DataFrames dfs
[ "Fit", "the", "classifier", "with", "labels", "y", "and", "DataFrames", "dfs" ]
python
train
wbond/vat_moss-python
vat_moss/billing_address.py
https://github.com/wbond/vat_moss-python/blob/5089dcf036eb2e9abc58e78186fd46b522a50620/vat_moss/billing_address.py#L17-L104
def calculate_rate(country_code, postal_code, city): """ Calculates the VAT rate that should be collected based on address information provided :param country_code: The two-character country code :param postal_code: The postal code for the user :param city: The city name for the user :raises: ValueError - If country code is not two characers, or postal_code or city are not strings. postal_code may be None or blank string for countries without postal codes. :return: A tuple of (Decimal percentage rate, country code, exception name [or None]) """ if not country_code or not isinstance(country_code, str_cls): raise ValueError('Invalidly formatted country code') country_code = country_code.strip() if len(country_code) != 2: raise ValueError('Invalidly formatted country code') country_code = country_code.upper() if country_code not in COUNTRIES_WITHOUT_POSTAL_CODES: if not postal_code or not isinstance(postal_code, str_cls): raise ValueError('Postal code is not a string') if not city or not isinstance(city, str_cls): raise ValueError('City is not a string') if isinstance(postal_code, str_cls): postal_code = re.sub('\\s+', '', postal_code) postal_code = postal_code.upper() # Remove the common european practice of adding the country code # to the beginning of a postal code, followed by a dash if len(postal_code) > 3 and postal_code[0:3] == country_code + '-': postal_code = postal_code[3:] postal_code = postal_code.replace('-', '') city = city.lower().strip() if country_code not in rates.BY_COUNTRY and country_code not in POSTAL_CODE_EXCEPTIONS: return (Decimal('0.0'), country_code, None) country_default = rates.BY_COUNTRY.get(country_code, {'rate': Decimal('0.0')})['rate'] if country_code not in POSTAL_CODE_EXCEPTIONS: return (country_default, country_code, None) exceptions = POSTAL_CODE_EXCEPTIONS[country_code] for matcher in exceptions: # Postal code-only match if isinstance(matcher, str_cls): postal_regex = matcher city_regex = None else: postal_regex, city_regex = matcher if not re.match(postal_regex, postal_code): continue if city_regex and not re.search(city_regex, city): continue mapped_country = exceptions[matcher]['country_code'] # There is at least one entry where we map to a different country, # but are not mapping to an exception if 'name' not in exceptions[matcher]: country_code = mapped_country country_default = rates.BY_COUNTRY[country_code]['rate'] break mapped_name = exceptions[matcher]['name'] rate = rates.BY_COUNTRY[mapped_country]['exceptions'][mapped_name] return (rate, mapped_country, mapped_name) return (country_default, country_code, None)
[ "def", "calculate_rate", "(", "country_code", ",", "postal_code", ",", "city", ")", ":", "if", "not", "country_code", "or", "not", "isinstance", "(", "country_code", ",", "str_cls", ")", ":", "raise", "ValueError", "(", "'Invalidly formatted country code'", ")", "country_code", "=", "country_code", ".", "strip", "(", ")", "if", "len", "(", "country_code", ")", "!=", "2", ":", "raise", "ValueError", "(", "'Invalidly formatted country code'", ")", "country_code", "=", "country_code", ".", "upper", "(", ")", "if", "country_code", "not", "in", "COUNTRIES_WITHOUT_POSTAL_CODES", ":", "if", "not", "postal_code", "or", "not", "isinstance", "(", "postal_code", ",", "str_cls", ")", ":", "raise", "ValueError", "(", "'Postal code is not a string'", ")", "if", "not", "city", "or", "not", "isinstance", "(", "city", ",", "str_cls", ")", ":", "raise", "ValueError", "(", "'City is not a string'", ")", "if", "isinstance", "(", "postal_code", ",", "str_cls", ")", ":", "postal_code", "=", "re", ".", "sub", "(", "'\\\\s+'", ",", "''", ",", "postal_code", ")", "postal_code", "=", "postal_code", ".", "upper", "(", ")", "# Remove the common european practice of adding the country code", "# to the beginning of a postal code, followed by a dash", "if", "len", "(", "postal_code", ")", ">", "3", "and", "postal_code", "[", "0", ":", "3", "]", "==", "country_code", "+", "'-'", ":", "postal_code", "=", "postal_code", "[", "3", ":", "]", "postal_code", "=", "postal_code", ".", "replace", "(", "'-'", ",", "''", ")", "city", "=", "city", ".", "lower", "(", ")", ".", "strip", "(", ")", "if", "country_code", "not", "in", "rates", ".", "BY_COUNTRY", "and", "country_code", "not", "in", "POSTAL_CODE_EXCEPTIONS", ":", "return", "(", "Decimal", "(", "'0.0'", ")", ",", "country_code", ",", "None", ")", "country_default", "=", "rates", ".", "BY_COUNTRY", ".", "get", "(", "country_code", ",", "{", "'rate'", ":", "Decimal", "(", "'0.0'", ")", "}", ")", "[", "'rate'", "]", "if", "country_code", "not", "in", "POSTAL_CODE_EXCEPTIONS", ":", "return", "(", "country_default", ",", "country_code", ",", "None", ")", "exceptions", "=", "POSTAL_CODE_EXCEPTIONS", "[", "country_code", "]", "for", "matcher", "in", "exceptions", ":", "# Postal code-only match", "if", "isinstance", "(", "matcher", ",", "str_cls", ")", ":", "postal_regex", "=", "matcher", "city_regex", "=", "None", "else", ":", "postal_regex", ",", "city_regex", "=", "matcher", "if", "not", "re", ".", "match", "(", "postal_regex", ",", "postal_code", ")", ":", "continue", "if", "city_regex", "and", "not", "re", ".", "search", "(", "city_regex", ",", "city", ")", ":", "continue", "mapped_country", "=", "exceptions", "[", "matcher", "]", "[", "'country_code'", "]", "# There is at least one entry where we map to a different country,", "# but are not mapping to an exception", "if", "'name'", "not", "in", "exceptions", "[", "matcher", "]", ":", "country_code", "=", "mapped_country", "country_default", "=", "rates", ".", "BY_COUNTRY", "[", "country_code", "]", "[", "'rate'", "]", "break", "mapped_name", "=", "exceptions", "[", "matcher", "]", "[", "'name'", "]", "rate", "=", "rates", ".", "BY_COUNTRY", "[", "mapped_country", "]", "[", "'exceptions'", "]", "[", "mapped_name", "]", "return", "(", "rate", ",", "mapped_country", ",", "mapped_name", ")", "return", "(", "country_default", ",", "country_code", ",", "None", ")" ]
Calculates the VAT rate that should be collected based on address information provided :param country_code: The two-character country code :param postal_code: The postal code for the user :param city: The city name for the user :raises: ValueError - If country code is not two characers, or postal_code or city are not strings. postal_code may be None or blank string for countries without postal codes. :return: A tuple of (Decimal percentage rate, country code, exception name [or None])
[ "Calculates", "the", "VAT", "rate", "that", "should", "be", "collected", "based", "on", "address", "information", "provided" ]
python
train
gem/oq-engine
openquake/baselib/datastore.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/datastore.py#L256-L268
def get_attrs(self, key): """ :param key: dataset path :returns: dictionary of attributes for that path """ try: dset = h5py.File.__getitem__(self.hdf5, key) except KeyError: if self.parent != (): dset = h5py.File.__getitem__(self.parent.hdf5, key) else: raise return dict(dset.attrs)
[ "def", "get_attrs", "(", "self", ",", "key", ")", ":", "try", ":", "dset", "=", "h5py", ".", "File", ".", "__getitem__", "(", "self", ".", "hdf5", ",", "key", ")", "except", "KeyError", ":", "if", "self", ".", "parent", "!=", "(", ")", ":", "dset", "=", "h5py", ".", "File", ".", "__getitem__", "(", "self", ".", "parent", ".", "hdf5", ",", "key", ")", "else", ":", "raise", "return", "dict", "(", "dset", ".", "attrs", ")" ]
:param key: dataset path :returns: dictionary of attributes for that path
[ ":", "param", "key", ":", "dataset", "path", ":", "returns", ":", "dictionary", "of", "attributes", "for", "that", "path" ]
python
train
cltk/cltk
cltk/stem/latin/declension.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/stem/latin/declension.py#L113-L189
def decline(self, lemma, flatten=False, collatinus_dict=False): """ Decline a lemma .. warning:: POS are incomplete as we do not detect the type outside of verbs, participle and adjective. :raise UnknownLemma: When the lemma is unknown to our data :param lemma: Lemma (Canonical form) to decline :type lemma: str :param flatten: If set to True, returns a list of forms without natural language information about them :type flatten: bool :param collatinus_dict: If sets to True, Dictionary of grammatically valid forms, including variants, with keys\ corresponding to morpho informations. :type collatinus_dict: bool :return: List of tuple where first value is the form and second the pos, ie [("sum", "v1ppip---")] :rtype: list or dict """ if lemma not in self.__lemmas__: raise UnknownLemma("%s is unknown" % lemma) # Get data information lemma_entry = self.__lemmas__[lemma] model = self.__models__[lemma_entry["model"]] # Get the roots roots = self.__getRoots(lemma, model=model) # Get the known forms in order keys = sorted([int(key) for key in model["des"].keys()]) forms_data = [(key, model["des"][str(key)]) for key in keys] # Generate the return dict forms = {key: [] for key in keys} for key, form_list in forms_data: for form in form_list: root_id, endings = tuple(form) for root in roots[root_id]: for ending in endings: forms[key].append(root + ending) # sufd means we have the original forms of the parent but we add a suffix if len(model["sufd"]): # For each constant form1 for key, iter_forms in forms.items(): new_forms = [] # We add the constant suffix for sufd in model["sufd"]: new_forms += [form+sufd for form in iter_forms] forms[key] = new_forms # If we need a secure version of the forms. For example, if we have variants if len(model["suf"]): cached_forms = {k: v+[] for k, v in forms.items()} # Making cache without using copy # For each suffix # The format is [suffix characters, [modified forms]] for suffixes in model["suf"]: suffix, modified_forms = suffixes[0], suffixes[1] for modified_form in modified_forms: forms[modified_form] += [f+suffix for f in cached_forms[modified_form]] # We update with the new roots # If some form do not exist, we delete them prehentively if len(model["abs"]): for abs_form in model["abs"]: if abs_form in forms: del forms[abs_form] if flatten: return list([form for case_forms in forms.values() for form in case_forms]) elif collatinus_dict: return forms else: return list( [(form, self.__getPOS(key)) for key, case_forms in forms.items() for form in case_forms] )
[ "def", "decline", "(", "self", ",", "lemma", ",", "flatten", "=", "False", ",", "collatinus_dict", "=", "False", ")", ":", "if", "lemma", "not", "in", "self", ".", "__lemmas__", ":", "raise", "UnknownLemma", "(", "\"%s is unknown\"", "%", "lemma", ")", "# Get data information", "lemma_entry", "=", "self", ".", "__lemmas__", "[", "lemma", "]", "model", "=", "self", ".", "__models__", "[", "lemma_entry", "[", "\"model\"", "]", "]", "# Get the roots", "roots", "=", "self", ".", "__getRoots", "(", "lemma", ",", "model", "=", "model", ")", "# Get the known forms in order", "keys", "=", "sorted", "(", "[", "int", "(", "key", ")", "for", "key", "in", "model", "[", "\"des\"", "]", ".", "keys", "(", ")", "]", ")", "forms_data", "=", "[", "(", "key", ",", "model", "[", "\"des\"", "]", "[", "str", "(", "key", ")", "]", ")", "for", "key", "in", "keys", "]", "# Generate the return dict", "forms", "=", "{", "key", ":", "[", "]", "for", "key", "in", "keys", "}", "for", "key", ",", "form_list", "in", "forms_data", ":", "for", "form", "in", "form_list", ":", "root_id", ",", "endings", "=", "tuple", "(", "form", ")", "for", "root", "in", "roots", "[", "root_id", "]", ":", "for", "ending", "in", "endings", ":", "forms", "[", "key", "]", ".", "append", "(", "root", "+", "ending", ")", "# sufd means we have the original forms of the parent but we add a suffix", "if", "len", "(", "model", "[", "\"sufd\"", "]", ")", ":", "# For each constant form1", "for", "key", ",", "iter_forms", "in", "forms", ".", "items", "(", ")", ":", "new_forms", "=", "[", "]", "# We add the constant suffix", "for", "sufd", "in", "model", "[", "\"sufd\"", "]", ":", "new_forms", "+=", "[", "form", "+", "sufd", "for", "form", "in", "iter_forms", "]", "forms", "[", "key", "]", "=", "new_forms", "# If we need a secure version of the forms. For example, if we have variants", "if", "len", "(", "model", "[", "\"suf\"", "]", ")", ":", "cached_forms", "=", "{", "k", ":", "v", "+", "[", "]", "for", "k", ",", "v", "in", "forms", ".", "items", "(", ")", "}", "# Making cache without using copy", "# For each suffix", "# The format is [suffix characters, [modified forms]]", "for", "suffixes", "in", "model", "[", "\"suf\"", "]", ":", "suffix", ",", "modified_forms", "=", "suffixes", "[", "0", "]", ",", "suffixes", "[", "1", "]", "for", "modified_form", "in", "modified_forms", ":", "forms", "[", "modified_form", "]", "+=", "[", "f", "+", "suffix", "for", "f", "in", "cached_forms", "[", "modified_form", "]", "]", "# We update with the new roots", "# If some form do not exist, we delete them prehentively", "if", "len", "(", "model", "[", "\"abs\"", "]", ")", ":", "for", "abs_form", "in", "model", "[", "\"abs\"", "]", ":", "if", "abs_form", "in", "forms", ":", "del", "forms", "[", "abs_form", "]", "if", "flatten", ":", "return", "list", "(", "[", "form", "for", "case_forms", "in", "forms", ".", "values", "(", ")", "for", "form", "in", "case_forms", "]", ")", "elif", "collatinus_dict", ":", "return", "forms", "else", ":", "return", "list", "(", "[", "(", "form", ",", "self", ".", "__getPOS", "(", "key", ")", ")", "for", "key", ",", "case_forms", "in", "forms", ".", "items", "(", ")", "for", "form", "in", "case_forms", "]", ")" ]
Decline a lemma .. warning:: POS are incomplete as we do not detect the type outside of verbs, participle and adjective. :raise UnknownLemma: When the lemma is unknown to our data :param lemma: Lemma (Canonical form) to decline :type lemma: str :param flatten: If set to True, returns a list of forms without natural language information about them :type flatten: bool :param collatinus_dict: If sets to True, Dictionary of grammatically valid forms, including variants, with keys\ corresponding to morpho informations. :type collatinus_dict: bool :return: List of tuple where first value is the form and second the pos, ie [("sum", "v1ppip---")] :rtype: list or dict
[ "Decline", "a", "lemma" ]
python
train
Dallinger/Dallinger
dallinger/deployment.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/deployment.py#L548-L568
def check_status(self): """Check the output of the summary route until the experiment is complete, then we can stop monitoring Heroku subprocess output. """ self.out.log("Recruitment is complete. Waiting for experiment completion...") base_url = get_base_url() status_url = base_url + "/summary" while not self.complete: time.sleep(10) try: resp = requests.get(status_url) exp_data = resp.json() except (ValueError, requests.exceptions.RequestException): self.out.error("Error fetching experiment status.") else: self.out.log("Experiment summary: {}".format(exp_data)) if exp_data.get("completed", False): self.out.log("Experiment completed, all nodes filled.") self.complete = True self.heroku.stop()
[ "def", "check_status", "(", "self", ")", ":", "self", ".", "out", ".", "log", "(", "\"Recruitment is complete. Waiting for experiment completion...\"", ")", "base_url", "=", "get_base_url", "(", ")", "status_url", "=", "base_url", "+", "\"/summary\"", "while", "not", "self", ".", "complete", ":", "time", ".", "sleep", "(", "10", ")", "try", ":", "resp", "=", "requests", ".", "get", "(", "status_url", ")", "exp_data", "=", "resp", ".", "json", "(", ")", "except", "(", "ValueError", ",", "requests", ".", "exceptions", ".", "RequestException", ")", ":", "self", ".", "out", ".", "error", "(", "\"Error fetching experiment status.\"", ")", "else", ":", "self", ".", "out", ".", "log", "(", "\"Experiment summary: {}\"", ".", "format", "(", "exp_data", ")", ")", "if", "exp_data", ".", "get", "(", "\"completed\"", ",", "False", ")", ":", "self", ".", "out", ".", "log", "(", "\"Experiment completed, all nodes filled.\"", ")", "self", ".", "complete", "=", "True", "self", ".", "heroku", ".", "stop", "(", ")" ]
Check the output of the summary route until the experiment is complete, then we can stop monitoring Heroku subprocess output.
[ "Check", "the", "output", "of", "the", "summary", "route", "until", "the", "experiment", "is", "complete", "then", "we", "can", "stop", "monitoring", "Heroku", "subprocess", "output", "." ]
python
train
sammchardy/python-binance
binance/client.py
https://github.com/sammchardy/python-binance/blob/31c0d0a32f9edd528c6c2c1dd3044d9a34ce43cc/binance/client.py#L1286-L1310
def order_market_buy(self, **params): """Send in a new market buy order :param symbol: required :type symbol: str :param quantity: required :type quantity: decimal :param newClientOrderId: A unique id for the order. Automatically generated if not sent. :type newClientOrderId: str :param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT. :type newOrderRespType: str :param recvWindow: the number of milliseconds the request is valid for :type recvWindow: int :returns: API response See order endpoint for full response options :raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException """ params.update({ 'side': self.SIDE_BUY }) return self.order_market(**params)
[ "def", "order_market_buy", "(", "self", ",", "*", "*", "params", ")", ":", "params", ".", "update", "(", "{", "'side'", ":", "self", ".", "SIDE_BUY", "}", ")", "return", "self", ".", "order_market", "(", "*", "*", "params", ")" ]
Send in a new market buy order :param symbol: required :type symbol: str :param quantity: required :type quantity: decimal :param newClientOrderId: A unique id for the order. Automatically generated if not sent. :type newClientOrderId: str :param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT. :type newOrderRespType: str :param recvWindow: the number of milliseconds the request is valid for :type recvWindow: int :returns: API response See order endpoint for full response options :raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
[ "Send", "in", "a", "new", "market", "buy", "order" ]
python
train
eqcorrscan/EQcorrscan
eqcorrscan/core/match_filter.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L3808-L3825
def read_party(fname=None, read_detection_catalog=True): """ Read detections and metadata from a tar archive. :type fname: str :param fname: Filename to read from, if this contains a single Family, then will return a party of length = 1 :type read_detection_catalog: bool :param read_detection_catalog: Whether to read the detection catalog or not, if False, catalog will be regenerated - for large catalogs this can be faster. :return: :class:`eqcorrscan.core.match_filter.Party` """ party = Party() party.read(filename=fname, read_detection_catalog=read_detection_catalog) return party
[ "def", "read_party", "(", "fname", "=", "None", ",", "read_detection_catalog", "=", "True", ")", ":", "party", "=", "Party", "(", ")", "party", ".", "read", "(", "filename", "=", "fname", ",", "read_detection_catalog", "=", "read_detection_catalog", ")", "return", "party" ]
Read detections and metadata from a tar archive. :type fname: str :param fname: Filename to read from, if this contains a single Family, then will return a party of length = 1 :type read_detection_catalog: bool :param read_detection_catalog: Whether to read the detection catalog or not, if False, catalog will be regenerated - for large catalogs this can be faster. :return: :class:`eqcorrscan.core.match_filter.Party`
[ "Read", "detections", "and", "metadata", "from", "a", "tar", "archive", "." ]
python
train
aleju/imgaug
imgaug/augmentables/lines.py
https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/lines.py#L1286-L1324
def to_segmentation_map(self, image_shape, size_lines=1, size_points=0, raise_if_out_of_image=False): """ Generate a segmentation map object from the line string. This is similar to :func:`imgaug.augmentables.lines.LineString.draw_mask`. The result is wrapped in a ``SegmentationMapOnImage`` object instead of just an array. Parameters ---------- image_shape : tuple of int The shape of the image onto which to draw the line mask. size_lines : int, optional Thickness of the line. size_points : int, optional Size of the points in pixels. raise_if_out_of_image : bool, optional Whether to raise an error if the line string is fully outside of the image. If set to False, no error will be raised and only the parts inside the image will be drawn. Returns ------- imgaug.augmentables.segmaps.SegmentationMapOnImage Segmentation map object containing drawn line string. """ from .segmaps import SegmentationMapOnImage return SegmentationMapOnImage( self.draw_mask( image_shape, size_lines=size_lines, size_points=size_points, raise_if_out_of_image=raise_if_out_of_image), shape=image_shape )
[ "def", "to_segmentation_map", "(", "self", ",", "image_shape", ",", "size_lines", "=", "1", ",", "size_points", "=", "0", ",", "raise_if_out_of_image", "=", "False", ")", ":", "from", ".", "segmaps", "import", "SegmentationMapOnImage", "return", "SegmentationMapOnImage", "(", "self", ".", "draw_mask", "(", "image_shape", ",", "size_lines", "=", "size_lines", ",", "size_points", "=", "size_points", ",", "raise_if_out_of_image", "=", "raise_if_out_of_image", ")", ",", "shape", "=", "image_shape", ")" ]
Generate a segmentation map object from the line string. This is similar to :func:`imgaug.augmentables.lines.LineString.draw_mask`. The result is wrapped in a ``SegmentationMapOnImage`` object instead of just an array. Parameters ---------- image_shape : tuple of int The shape of the image onto which to draw the line mask. size_lines : int, optional Thickness of the line. size_points : int, optional Size of the points in pixels. raise_if_out_of_image : bool, optional Whether to raise an error if the line string is fully outside of the image. If set to False, no error will be raised and only the parts inside the image will be drawn. Returns ------- imgaug.augmentables.segmaps.SegmentationMapOnImage Segmentation map object containing drawn line string.
[ "Generate", "a", "segmentation", "map", "object", "from", "the", "line", "string", "." ]
python
valid
dead-beef/markovchain
markovchain/cli/image.py
https://github.com/dead-beef/markovchain/blob/9bd10b2f01089341c4a875a0fa569d50caba22c7/markovchain/cli/image.py#L286-L325
def cmd_generate(args): """Generate images. Parameters ---------- args : `argparse.Namespace` Command arguments. """ check_output_format(args.output, args.count) markov = load(MarkovImage, args.state, args) if args.size is None: if markov.scanner.resize is None: print('Unknown output image size', file=stderr) exit(1) width, height = markov.scanner.resize else: width, height = args.size if args.level is None: scale = markov.scanner.min_size else: scale = reduce( lambda x, y: x * y, islice(markov.scanner.level_scale, 0, args.level - 1), 1 ) width, height = width // scale, height // scale markov.scanner.traversal[0].show_progress = args.progress for fname in outfiles(markov, args.output, args.count, args.progress): img = markov( width, height, state_size=args.state_size, levels=args.level ) save_image(img, fname)
[ "def", "cmd_generate", "(", "args", ")", ":", "check_output_format", "(", "args", ".", "output", ",", "args", ".", "count", ")", "markov", "=", "load", "(", "MarkovImage", ",", "args", ".", "state", ",", "args", ")", "if", "args", ".", "size", "is", "None", ":", "if", "markov", ".", "scanner", ".", "resize", "is", "None", ":", "print", "(", "'Unknown output image size'", ",", "file", "=", "stderr", ")", "exit", "(", "1", ")", "width", ",", "height", "=", "markov", ".", "scanner", ".", "resize", "else", ":", "width", ",", "height", "=", "args", ".", "size", "if", "args", ".", "level", "is", "None", ":", "scale", "=", "markov", ".", "scanner", ".", "min_size", "else", ":", "scale", "=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "*", "y", ",", "islice", "(", "markov", ".", "scanner", ".", "level_scale", ",", "0", ",", "args", ".", "level", "-", "1", ")", ",", "1", ")", "width", ",", "height", "=", "width", "//", "scale", ",", "height", "//", "scale", "markov", ".", "scanner", ".", "traversal", "[", "0", "]", ".", "show_progress", "=", "args", ".", "progress", "for", "fname", "in", "outfiles", "(", "markov", ",", "args", ".", "output", ",", "args", ".", "count", ",", "args", ".", "progress", ")", ":", "img", "=", "markov", "(", "width", ",", "height", ",", "state_size", "=", "args", ".", "state_size", ",", "levels", "=", "args", ".", "level", ")", "save_image", "(", "img", ",", "fname", ")" ]
Generate images. Parameters ---------- args : `argparse.Namespace` Command arguments.
[ "Generate", "images", "." ]
python
train
mardix/Mocha
mocha/contrib/auth/__init__.py
https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/contrib/auth/__init__.py#L636-L645
def add_federation(self, provider, federated_id): """ Add federated login to the current user :param provider: :param federated_id: :return: """ models.AuthUserFederation.new(user=self, provider=provider, federated_id=federated_id)
[ "def", "add_federation", "(", "self", ",", "provider", ",", "federated_id", ")", ":", "models", ".", "AuthUserFederation", ".", "new", "(", "user", "=", "self", ",", "provider", "=", "provider", ",", "federated_id", "=", "federated_id", ")" ]
Add federated login to the current user :param provider: :param federated_id: :return:
[ "Add", "federated", "login", "to", "the", "current", "user", ":", "param", "provider", ":", ":", "param", "federated_id", ":", ":", "return", ":" ]
python
train
Zsailer/pandas_flavor
pandas_flavor/register.py
https://github.com/Zsailer/pandas_flavor/blob/1953aeee09424300d69a11dd2ffd3460a806fb65/pandas_flavor/register.py#L6-L35
def register_dataframe_method(method): """Register a function as a method attached to the Pandas DataFrame. Example ------- .. code-block:: python @register_dataframe_method def print_column(df, col): '''Print the dataframe column given''' print(df[col]) """ def inner(*args, **kwargs): class AccessorMethod(object): def __init__(self, pandas_obj): self._obj = pandas_obj @wraps(method) def __call__(self, *args, **kwargs): return method(self._obj, *args, **kwargs) register_dataframe_accessor(method.__name__)(AccessorMethod) return method return inner()
[ "def", "register_dataframe_method", "(", "method", ")", ":", "def", "inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "class", "AccessorMethod", "(", "object", ")", ":", "def", "__init__", "(", "self", ",", "pandas_obj", ")", ":", "self", ".", "_obj", "=", "pandas_obj", "@", "wraps", "(", "method", ")", "def", "__call__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "method", "(", "self", ".", "_obj", ",", "*", "args", ",", "*", "*", "kwargs", ")", "register_dataframe_accessor", "(", "method", ".", "__name__", ")", "(", "AccessorMethod", ")", "return", "method", "return", "inner", "(", ")" ]
Register a function as a method attached to the Pandas DataFrame. Example ------- .. code-block:: python @register_dataframe_method def print_column(df, col): '''Print the dataframe column given''' print(df[col])
[ "Register", "a", "function", "as", "a", "method", "attached", "to", "the", "Pandas", "DataFrame", "." ]
python
train
onecodex/onecodex
onecodex/utils.py
https://github.com/onecodex/onecodex/blob/326a0a1af140e3a57ccf31c3c9c5e17a5775c13d/onecodex/utils.py#L100-L109
def valid_api_key(ctx, param, value): """ Ensures an API has valid length (this is a click callback) """ if value is not None and len(value) != 32: raise click.BadParameter( "API Key must be 32 characters long, not {}".format(str(len(value))) ) else: return value
[ "def", "valid_api_key", "(", "ctx", ",", "param", ",", "value", ")", ":", "if", "value", "is", "not", "None", "and", "len", "(", "value", ")", "!=", "32", ":", "raise", "click", ".", "BadParameter", "(", "\"API Key must be 32 characters long, not {}\"", ".", "format", "(", "str", "(", "len", "(", "value", ")", ")", ")", ")", "else", ":", "return", "value" ]
Ensures an API has valid length (this is a click callback)
[ "Ensures", "an", "API", "has", "valid", "length", "(", "this", "is", "a", "click", "callback", ")" ]
python
train
TrafficSenseMSD/SumoTools
sumolib/miscutils.py
https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/sumolib/miscutils.py#L124-L130
def avg(self): """return the mean value""" # XXX rename this method if len(self.values) > 0: return sum(self.values) / float(len(self.values)) else: return None
[ "def", "avg", "(", "self", ")", ":", "# XXX rename this method", "if", "len", "(", "self", ".", "values", ")", ">", "0", ":", "return", "sum", "(", "self", ".", "values", ")", "/", "float", "(", "len", "(", "self", ".", "values", ")", ")", "else", ":", "return", "None" ]
return the mean value
[ "return", "the", "mean", "value" ]
python
train
Robpol86/libnl
libnl/nl80211/iw_scan.py
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/nl80211/iw_scan.py#L306-L328
def get_11u_advert(_, data): """http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n676. Positional arguments: data -- bytearray data to read. Returns: Dict. """ answers = dict() idx = 0 while idx < len(data) - 1: qri = data[idx] proto_id = data[idx + 1] answers['Query Response Info'] = qri answers['Query Response Length Limit'] = qri & 0x7f if qri & (1 << 7): answers['PAME-BI'] = True answers['proto_id'] = {0: 'ANQP', 1: 'MIH Information Service', 3: 'Emergency Alert System (EAS)', 2: 'MIH Command and Event Services Capability Discovery', 221: 'Vendor Specific'}.get(proto_id, 'Reserved: {0}'.format(proto_id)) idx += 2 return answers
[ "def", "get_11u_advert", "(", "_", ",", "data", ")", ":", "answers", "=", "dict", "(", ")", "idx", "=", "0", "while", "idx", "<", "len", "(", "data", ")", "-", "1", ":", "qri", "=", "data", "[", "idx", "]", "proto_id", "=", "data", "[", "idx", "+", "1", "]", "answers", "[", "'Query Response Info'", "]", "=", "qri", "answers", "[", "'Query Response Length Limit'", "]", "=", "qri", "&", "0x7f", "if", "qri", "&", "(", "1", "<<", "7", ")", ":", "answers", "[", "'PAME-BI'", "]", "=", "True", "answers", "[", "'proto_id'", "]", "=", "{", "0", ":", "'ANQP'", ",", "1", ":", "'MIH Information Service'", ",", "3", ":", "'Emergency Alert System (EAS)'", ",", "2", ":", "'MIH Command and Event Services Capability Discovery'", ",", "221", ":", "'Vendor Specific'", "}", ".", "get", "(", "proto_id", ",", "'Reserved: {0}'", ".", "format", "(", "proto_id", ")", ")", "idx", "+=", "2", "return", "answers" ]
http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n676. Positional arguments: data -- bytearray data to read. Returns: Dict.
[ "http", ":", "//", "git", ".", "kernel", ".", "org", "/", "cgit", "/", "linux", "/", "kernel", "/", "git", "/", "jberg", "/", "iw", ".", "git", "/", "tree", "/", "scan", ".", "c?id", "=", "v3", ".", "17#n676", "." ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/rbridge_id/router/ospf/timers/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/router/ospf/timers/__init__.py#L126-L147
def _set_throttle(self, v, load=False): """ Setter method for throttle, mapped from YANG variable /rbridge_id/router/ospf/timers/throttle (container) If this variable is read-only (config: false) in the source YANG file, then _set_throttle is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_throttle() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=throttle.throttle, is_container='container', presence=False, yang_name="throttle", rest_name="throttle", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The OSPF SPF timers.', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """throttle must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=throttle.throttle, is_container='container', presence=False, yang_name="throttle", rest_name="throttle", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The OSPF SPF timers.', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""", }) self.__throttle = t if hasattr(self, '_set'): self._set()
[ "def", "_set_throttle", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "throttle", ".", "throttle", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"throttle\"", ",", "rest_name", "=", "\"throttle\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'The OSPF SPF timers.'", ",", "u'cli-incomplete-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-ospf'", ",", "defining_module", "=", "'brocade-ospf'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"throttle must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=throttle.throttle, is_container='container', presence=False, yang_name=\"throttle\", rest_name=\"throttle\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The OSPF SPF timers.', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__throttle", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for throttle, mapped from YANG variable /rbridge_id/router/ospf/timers/throttle (container) If this variable is read-only (config: false) in the source YANG file, then _set_throttle is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_throttle() directly.
[ "Setter", "method", "for", "throttle", "mapped", "from", "YANG", "variable", "/", "rbridge_id", "/", "router", "/", "ospf", "/", "timers", "/", "throttle", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_throttle", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_throttle", "()", "directly", "." ]
python
train
toumorokoshi/sprinter
sprinter/formula/base.py
https://github.com/toumorokoshi/sprinter/blob/846697a7a087e69c61d075232e754d6975a64152/sprinter/formula/base.py#L192-L212
def _prompt_value(self, key, prompt_string, default=None, only_if_empty=True): """prompts the user for a value, and saves it to either the target or source manifest (whichever is appropriate for the phase) this method takes will default to the original value passed by the user in the case one exists. e.g. if a user already answered 'yes' to a question, it will use 'yes' as the default vs the one passed into this method. """ main_manifest = self.target or self.source if only_if_empty and main_manifest.has(key): return main_manifest.get(key) prompt_default = default if self.source and self.source.has(key): prompt_default = self.source.get(key) main_manifest.set(key, lib.prompt(prompt_string, default=prompt_default))
[ "def", "_prompt_value", "(", "self", ",", "key", ",", "prompt_string", ",", "default", "=", "None", ",", "only_if_empty", "=", "True", ")", ":", "main_manifest", "=", "self", ".", "target", "or", "self", ".", "source", "if", "only_if_empty", "and", "main_manifest", ".", "has", "(", "key", ")", ":", "return", "main_manifest", ".", "get", "(", "key", ")", "prompt_default", "=", "default", "if", "self", ".", "source", "and", "self", ".", "source", ".", "has", "(", "key", ")", ":", "prompt_default", "=", "self", ".", "source", ".", "get", "(", "key", ")", "main_manifest", ".", "set", "(", "key", ",", "lib", ".", "prompt", "(", "prompt_string", ",", "default", "=", "prompt_default", ")", ")" ]
prompts the user for a value, and saves it to either the target or source manifest (whichever is appropriate for the phase) this method takes will default to the original value passed by the user in the case one exists. e.g. if a user already answered 'yes' to a question, it will use 'yes' as the default vs the one passed into this method.
[ "prompts", "the", "user", "for", "a", "value", "and", "saves", "it", "to", "either", "the", "target", "or", "source", "manifest", "(", "whichever", "is", "appropriate", "for", "the", "phase", ")" ]
python
train
samghelms/mathviz
mathviz_hopper/src/bottle.py
https://github.com/samghelms/mathviz/blob/30fe89537379faea4de8c8b568ac6e52e4d15353/mathviz_hopper/src/bottle.py#L581-L597
def get_undecorated_callback(self): """ Return the callback. If the callback is a decorated function, try to recover the original function. """ func = self.callback func = getattr(func, '__func__' if py3k else 'im_func', func) closure_attr = '__closure__' if py3k else 'func_closure' while hasattr(func, closure_attr) and getattr(func, closure_attr): attributes = getattr(func, closure_attr) func = attributes[0].cell_contents # in case of decorators with multiple arguments if not isinstance(func, FunctionType): # pick first FunctionType instance from multiple arguments func = filter(lambda x: isinstance(x, FunctionType), map(lambda x: x.cell_contents, attributes)) func = list(func)[0] # py3 support return func
[ "def", "get_undecorated_callback", "(", "self", ")", ":", "func", "=", "self", ".", "callback", "func", "=", "getattr", "(", "func", ",", "'__func__'", "if", "py3k", "else", "'im_func'", ",", "func", ")", "closure_attr", "=", "'__closure__'", "if", "py3k", "else", "'func_closure'", "while", "hasattr", "(", "func", ",", "closure_attr", ")", "and", "getattr", "(", "func", ",", "closure_attr", ")", ":", "attributes", "=", "getattr", "(", "func", ",", "closure_attr", ")", "func", "=", "attributes", "[", "0", "]", ".", "cell_contents", "# in case of decorators with multiple arguments", "if", "not", "isinstance", "(", "func", ",", "FunctionType", ")", ":", "# pick first FunctionType instance from multiple arguments", "func", "=", "filter", "(", "lambda", "x", ":", "isinstance", "(", "x", ",", "FunctionType", ")", ",", "map", "(", "lambda", "x", ":", "x", ".", "cell_contents", ",", "attributes", ")", ")", "func", "=", "list", "(", "func", ")", "[", "0", "]", "# py3 support", "return", "func" ]
Return the callback. If the callback is a decorated function, try to recover the original function.
[ "Return", "the", "callback", ".", "If", "the", "callback", "is", "a", "decorated", "function", "try", "to", "recover", "the", "original", "function", "." ]
python
train
orb-framework/orb
orb/core/column_types/string.py
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/column_types/string.py#L250-L263
def clean(self, py_value): """ Cleans the value before storing it. :param: py_value : <str> :return: <str> """ try: import bleach return bleach.clean(py_value, **self.__bleachOptions) except ImportError: warnings.warn('Unable to clean string column without webhelpers installed.') return py_value
[ "def", "clean", "(", "self", ",", "py_value", ")", ":", "try", ":", "import", "bleach", "return", "bleach", ".", "clean", "(", "py_value", ",", "*", "*", "self", ".", "__bleachOptions", ")", "except", "ImportError", ":", "warnings", ".", "warn", "(", "'Unable to clean string column without webhelpers installed.'", ")", "return", "py_value" ]
Cleans the value before storing it. :param: py_value : <str> :return: <str>
[ "Cleans", "the", "value", "before", "storing", "it", "." ]
python
train
cloudtools/troposphere
troposphere/template_generator.py
https://github.com/cloudtools/troposphere/blob/f7ea5591a7c287a843adc9c184d2f56064cfc632/troposphere/template_generator.py#L301-L332
def _normalize_properties(self, definition): """ Inspects the definition and returns a copy of it that is updated with any special property such as Condition, UpdatePolicy and the like. """ args = definition.get('Properties', {}).copy() if 'Condition' in definition: args.update({'Condition': definition['Condition']}) if 'UpdatePolicy' in definition: # there's only 1 kind of UpdatePolicy; use it args.update({'UpdatePolicy': self._create_instance( UpdatePolicy, definition['UpdatePolicy'])}) if 'CreationPolicy' in definition: # there's only 1 kind of CreationPolicy; use it args.update({'CreationPolicy': self._create_instance( CreationPolicy, definition['CreationPolicy'])}) if 'DeletionPolicy' in definition: # DeletionPolicity is very basic args.update( {'DeletionPolicy': self._convert_definition( definition['DeletionPolicy'])}) if 'Metadata' in definition: # there are various kind of metadata; pass it as-is args.update( {'Metadata': self._convert_definition( definition['Metadata'])}) if 'DependsOn' in definition: args.update( {'DependsOn': self._convert_definition( definition['DependsOn'])}) return args
[ "def", "_normalize_properties", "(", "self", ",", "definition", ")", ":", "args", "=", "definition", ".", "get", "(", "'Properties'", ",", "{", "}", ")", ".", "copy", "(", ")", "if", "'Condition'", "in", "definition", ":", "args", ".", "update", "(", "{", "'Condition'", ":", "definition", "[", "'Condition'", "]", "}", ")", "if", "'UpdatePolicy'", "in", "definition", ":", "# there's only 1 kind of UpdatePolicy; use it", "args", ".", "update", "(", "{", "'UpdatePolicy'", ":", "self", ".", "_create_instance", "(", "UpdatePolicy", ",", "definition", "[", "'UpdatePolicy'", "]", ")", "}", ")", "if", "'CreationPolicy'", "in", "definition", ":", "# there's only 1 kind of CreationPolicy; use it", "args", ".", "update", "(", "{", "'CreationPolicy'", ":", "self", ".", "_create_instance", "(", "CreationPolicy", ",", "definition", "[", "'CreationPolicy'", "]", ")", "}", ")", "if", "'DeletionPolicy'", "in", "definition", ":", "# DeletionPolicity is very basic", "args", ".", "update", "(", "{", "'DeletionPolicy'", ":", "self", ".", "_convert_definition", "(", "definition", "[", "'DeletionPolicy'", "]", ")", "}", ")", "if", "'Metadata'", "in", "definition", ":", "# there are various kind of metadata; pass it as-is", "args", ".", "update", "(", "{", "'Metadata'", ":", "self", ".", "_convert_definition", "(", "definition", "[", "'Metadata'", "]", ")", "}", ")", "if", "'DependsOn'", "in", "definition", ":", "args", ".", "update", "(", "{", "'DependsOn'", ":", "self", ".", "_convert_definition", "(", "definition", "[", "'DependsOn'", "]", ")", "}", ")", "return", "args" ]
Inspects the definition and returns a copy of it that is updated with any special property such as Condition, UpdatePolicy and the like.
[ "Inspects", "the", "definition", "and", "returns", "a", "copy", "of", "it", "that", "is", "updated", "with", "any", "special", "property", "such", "as", "Condition", "UpdatePolicy", "and", "the", "like", "." ]
python
train
tensorflow/tensorboard
tensorboard/plugins/interactive_inference/interactive_inference_plugin.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/interactive_inference_plugin.py#L84-L100
def get_plugin_apps(self): """Obtains a mapping between routes and handlers. Stores the logdir. Returns: A mapping between routes and handlers (functions that respond to requests). """ return { '/infer': self._infer, '/update_example': self._update_example, '/examples_from_path': self._examples_from_path_handler, '/sprite': self._serve_sprite, '/duplicate_example': self._duplicate_example, '/delete_example': self._delete_example, '/infer_mutants': self._infer_mutants_handler, '/eligible_features': self._eligible_features_from_example_handler, }
[ "def", "get_plugin_apps", "(", "self", ")", ":", "return", "{", "'/infer'", ":", "self", ".", "_infer", ",", "'/update_example'", ":", "self", ".", "_update_example", ",", "'/examples_from_path'", ":", "self", ".", "_examples_from_path_handler", ",", "'/sprite'", ":", "self", ".", "_serve_sprite", ",", "'/duplicate_example'", ":", "self", ".", "_duplicate_example", ",", "'/delete_example'", ":", "self", ".", "_delete_example", ",", "'/infer_mutants'", ":", "self", ".", "_infer_mutants_handler", ",", "'/eligible_features'", ":", "self", ".", "_eligible_features_from_example_handler", ",", "}" ]
Obtains a mapping between routes and handlers. Stores the logdir. Returns: A mapping between routes and handlers (functions that respond to requests).
[ "Obtains", "a", "mapping", "between", "routes", "and", "handlers", ".", "Stores", "the", "logdir", "." ]
python
train
wandb/client
wandb/vendor/prompt_toolkit/key_binding/bindings/named_commands.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/key_binding/bindings/named_commands.py#L186-L190
def delete_char(event): " Delete character before the cursor. " deleted = event.current_buffer.delete(count=event.arg) if not deleted: event.cli.output.bell()
[ "def", "delete_char", "(", "event", ")", ":", "deleted", "=", "event", ".", "current_buffer", ".", "delete", "(", "count", "=", "event", ".", "arg", ")", "if", "not", "deleted", ":", "event", ".", "cli", ".", "output", ".", "bell", "(", ")" ]
Delete character before the cursor.
[ "Delete", "character", "before", "the", "cursor", "." ]
python
train
skorch-dev/skorch
skorch/classifier.py
https://github.com/skorch-dev/skorch/blob/5b9b8b7b7712cb6e5aaa759d9608ea6269d5bcd3/skorch/classifier.py#L158-L193
def predict(self, X): """Where applicable, return class labels for samples in X. If the module's forward method returns multiple outputs as a tuple, it is assumed that the first output contains the relevant information and the other values are ignored. If all values are relevant, consider using :func:`~skorch.NeuralNet.forward` instead. Parameters ---------- X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. Returns ------- y_pred : numpy ndarray """ y_preds = [] for yp in self.forward_iter(X, training=False): yp = yp[0] if isinstance(yp, tuple) else yp y_preds.append(to_numpy(yp.max(-1)[-1])) y_pred = np.concatenate(y_preds, 0) return y_pred
[ "def", "predict", "(", "self", ",", "X", ")", ":", "y_preds", "=", "[", "]", "for", "yp", "in", "self", ".", "forward_iter", "(", "X", ",", "training", "=", "False", ")", ":", "yp", "=", "yp", "[", "0", "]", "if", "isinstance", "(", "yp", ",", "tuple", ")", "else", "yp", "y_preds", ".", "append", "(", "to_numpy", "(", "yp", ".", "max", "(", "-", "1", ")", "[", "-", "1", "]", ")", ")", "y_pred", "=", "np", ".", "concatenate", "(", "y_preds", ",", "0", ")", "return", "y_pred" ]
Where applicable, return class labels for samples in X. If the module's forward method returns multiple outputs as a tuple, it is assumed that the first output contains the relevant information and the other values are ignored. If all values are relevant, consider using :func:`~skorch.NeuralNet.forward` instead. Parameters ---------- X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. Returns ------- y_pred : numpy ndarray
[ "Where", "applicable", "return", "class", "labels", "for", "samples", "in", "X", "." ]
python
train
locationlabs/mockredis
mockredis/client.py
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L394-L400
def psetex(self, key, time, value): """ Set the value of ``key`` to ``value`` that expires in ``time`` milliseconds. ``time`` can be represented by an integer or a Python timedelta object. """ return self.set(key, value, px=time)
[ "def", "psetex", "(", "self", ",", "key", ",", "time", ",", "value", ")", ":", "return", "self", ".", "set", "(", "key", ",", "value", ",", "px", "=", "time", ")" ]
Set the value of ``key`` to ``value`` that expires in ``time`` milliseconds. ``time`` can be represented by an integer or a Python timedelta object.
[ "Set", "the", "value", "of", "key", "to", "value", "that", "expires", "in", "time", "milliseconds", ".", "time", "can", "be", "represented", "by", "an", "integer", "or", "a", "Python", "timedelta", "object", "." ]
python
train
DataBiosphere/toil
src/toil/wdl/wdl_synthesis.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_synthesis.py#L424-L443
def write_scatterfunction(self, job, scattername): ''' Writes out a python function for each WDL "scatter" object. ''' scatter_outputs = self.fetch_scatter_outputs(job) # write the function header fn_section = self.write_scatterfunction_header(scattername) # write the scatter definitions fn_section += self.write_scatterfunction_lists(scatter_outputs) # write fn_section += self.write_scatterfunction_loop(job, scatter_outputs) # write the outputs for the task to return fn_section += self.write_scatterfunction_outputreturn(scatter_outputs) return fn_section
[ "def", "write_scatterfunction", "(", "self", ",", "job", ",", "scattername", ")", ":", "scatter_outputs", "=", "self", ".", "fetch_scatter_outputs", "(", "job", ")", "# write the function header", "fn_section", "=", "self", ".", "write_scatterfunction_header", "(", "scattername", ")", "# write the scatter definitions", "fn_section", "+=", "self", ".", "write_scatterfunction_lists", "(", "scatter_outputs", ")", "# write", "fn_section", "+=", "self", ".", "write_scatterfunction_loop", "(", "job", ",", "scatter_outputs", ")", "# write the outputs for the task to return", "fn_section", "+=", "self", ".", "write_scatterfunction_outputreturn", "(", "scatter_outputs", ")", "return", "fn_section" ]
Writes out a python function for each WDL "scatter" object.
[ "Writes", "out", "a", "python", "function", "for", "each", "WDL", "scatter", "object", "." ]
python
train
apple/turicreate
src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/nearest_neighbors/_nearest_neighbors.py#L777-L935
def query(self, dataset, label=None, k=5, radius=None, verbose=True): """ For each row of the input 'dataset', retrieve the nearest neighbors from the model's stored data. In general, the query dataset does not need to be the same as the reference data stored in the model, but if it is, the 'include_self_edges' parameter can be set to False to exclude results that match query points to themselves. Parameters ---------- dataset : SFrame Query data. Must contain columns with the same names and types as the features used to train the model. Additional columns are allowed, but ignored. Please see the nearest neighbors :func:`~turicreate.nearest_neighbors.create` documentation for more detail on allowable data types. label : str, optional Name of the query SFrame column with row labels. If 'label' is not specified, row numbers are used to identify query dataset rows in the output SFrame. k : int, optional Number of nearest neighbors to return from the reference set for each query observation. The default is 5 neighbors, but setting it to ``None`` will return all neighbors within ``radius`` of the query point. radius : float, optional Only neighbors whose distance to a query point is smaller than this value are returned. The default is ``None``, in which case the ``k`` nearest neighbors are returned for each query point, regardless of distance. verbose: bool, optional If True, print progress updates and model details. Returns ------- out : SFrame An SFrame with the k-nearest neighbors of each query observation. The result contains four columns: the first is the label of the query observation, the second is the label of the nearby reference observation, the third is the distance between the query and reference observations, and the fourth is the rank of the reference observation among the query's k-nearest neighbors. See Also -------- similarity_graph Notes ----- - The `dataset` input to this method *can* have missing values (in contrast to the reference dataset used to create the nearest neighbors model). Missing numeric values are imputed to be the mean of the corresponding feature in the reference dataset, and missing strings are imputed to be empty strings. - If both ``k`` and ``radius`` are set to ``None``, each query point returns all of the reference set. If the reference dataset has :math:`n` rows and the query dataset has :math:`m` rows, the output is an SFrame with :math:`nm` rows. - For models created with the 'lsh' method, the query results may have fewer query labels than input query points. Because LSH is an approximate method, a query point may have fewer than 'k' neighbors. If LSH returns no neighbors at all for a query, the query point is omitted from the results. Examples -------- First construct a toy SFrame and create a nearest neighbors model: >>> sf = turicreate.SFrame({'label': range(3), ... 'feature1': [0.98, 0.62, 0.11], ... 'feature2': [0.69, 0.58, 0.36]}) >>> model = turicreate.nearest_neighbors.create(sf, 'label') A new SFrame contains query observations with same schema as the reference SFrame. This SFrame is passed to the ``query`` method. >>> queries = turicreate.SFrame({'label': range(3), ... 'feature1': [0.05, 0.61, 0.99], ... 'feature2': [0.06, 0.97, 0.86]}) >>> model.query(queries, 'label', k=2) +-------------+-----------------+----------------+------+ | query_label | reference_label | distance | rank | +-------------+-----------------+----------------+------+ | 0 | 2 | 0.305941170816 | 1 | | 0 | 1 | 0.771556867638 | 2 | | 1 | 1 | 0.390128184063 | 1 | | 1 | 0 | 0.464004310325 | 2 | | 2 | 0 | 0.170293863659 | 1 | | 2 | 1 | 0.464004310325 | 2 | +-------------+-----------------+----------------+------+ """ ## Validate the 'dataset' input _tkutl._raise_error_if_not_sframe(dataset, "dataset") _tkutl._raise_error_if_sframe_empty(dataset, "dataset") ## Get model features ref_features = self.features sf_features = _tkutl._toolkits_select_columns(dataset, ref_features) ## Validate and preprocess the 'label' input if label is None: query_labels = _turicreate.SArray.from_sequence(len(dataset)) else: if not label in dataset.column_names(): raise ValueError( "Input 'label' must be a string matching the name of a " +\ "column in the reference SFrame 'dataset'.") if not dataset[label].dtype == str and not dataset[label].dtype == int: raise TypeError("The label column must contain integers or strings.") if label in ref_features: raise ValueError("The label column cannot be one of the features.") query_labels = dataset[label] ## Validate neighborhood parameters 'k' and 'radius' if k is not None: if not isinstance(k, int): raise ValueError("Input 'k' must be an integer.") if k <= 0: raise ValueError("Input 'k' must be larger than 0.") if radius is not None: if not isinstance(radius, (int, float)): raise ValueError("Input 'radius' must be an integer or float.") if radius < 0: raise ValueError("Input 'radius' must be non-negative.") ## Set k and radius to special values to indicate 'None' if k is None: k = -1 if radius is None: radius = -1.0 opts = {'model': self.__proxy__, 'model_name': self.__name__, 'features': sf_features, 'query_labels': query_labels, 'k': k, 'radius': radius} with QuietProgress(verbose): result = _turicreate.extensions._nearest_neighbors.query(opts) return result['neighbors']
[ "def", "query", "(", "self", ",", "dataset", ",", "label", "=", "None", ",", "k", "=", "5", ",", "radius", "=", "None", ",", "verbose", "=", "True", ")", ":", "## Validate the 'dataset' input", "_tkutl", ".", "_raise_error_if_not_sframe", "(", "dataset", ",", "\"dataset\"", ")", "_tkutl", ".", "_raise_error_if_sframe_empty", "(", "dataset", ",", "\"dataset\"", ")", "## Get model features", "ref_features", "=", "self", ".", "features", "sf_features", "=", "_tkutl", ".", "_toolkits_select_columns", "(", "dataset", ",", "ref_features", ")", "## Validate and preprocess the 'label' input", "if", "label", "is", "None", ":", "query_labels", "=", "_turicreate", ".", "SArray", ".", "from_sequence", "(", "len", "(", "dataset", ")", ")", "else", ":", "if", "not", "label", "in", "dataset", ".", "column_names", "(", ")", ":", "raise", "ValueError", "(", "\"Input 'label' must be a string matching the name of a \"", "+", "\"column in the reference SFrame 'dataset'.\"", ")", "if", "not", "dataset", "[", "label", "]", ".", "dtype", "==", "str", "and", "not", "dataset", "[", "label", "]", ".", "dtype", "==", "int", ":", "raise", "TypeError", "(", "\"The label column must contain integers or strings.\"", ")", "if", "label", "in", "ref_features", ":", "raise", "ValueError", "(", "\"The label column cannot be one of the features.\"", ")", "query_labels", "=", "dataset", "[", "label", "]", "## Validate neighborhood parameters 'k' and 'radius'", "if", "k", "is", "not", "None", ":", "if", "not", "isinstance", "(", "k", ",", "int", ")", ":", "raise", "ValueError", "(", "\"Input 'k' must be an integer.\"", ")", "if", "k", "<=", "0", ":", "raise", "ValueError", "(", "\"Input 'k' must be larger than 0.\"", ")", "if", "radius", "is", "not", "None", ":", "if", "not", "isinstance", "(", "radius", ",", "(", "int", ",", "float", ")", ")", ":", "raise", "ValueError", "(", "\"Input 'radius' must be an integer or float.\"", ")", "if", "radius", "<", "0", ":", "raise", "ValueError", "(", "\"Input 'radius' must be non-negative.\"", ")", "## Set k and radius to special values to indicate 'None'", "if", "k", "is", "None", ":", "k", "=", "-", "1", "if", "radius", "is", "None", ":", "radius", "=", "-", "1.0", "opts", "=", "{", "'model'", ":", "self", ".", "__proxy__", ",", "'model_name'", ":", "self", ".", "__name__", ",", "'features'", ":", "sf_features", ",", "'query_labels'", ":", "query_labels", ",", "'k'", ":", "k", ",", "'radius'", ":", "radius", "}", "with", "QuietProgress", "(", "verbose", ")", ":", "result", "=", "_turicreate", ".", "extensions", ".", "_nearest_neighbors", ".", "query", "(", "opts", ")", "return", "result", "[", "'neighbors'", "]" ]
For each row of the input 'dataset', retrieve the nearest neighbors from the model's stored data. In general, the query dataset does not need to be the same as the reference data stored in the model, but if it is, the 'include_self_edges' parameter can be set to False to exclude results that match query points to themselves. Parameters ---------- dataset : SFrame Query data. Must contain columns with the same names and types as the features used to train the model. Additional columns are allowed, but ignored. Please see the nearest neighbors :func:`~turicreate.nearest_neighbors.create` documentation for more detail on allowable data types. label : str, optional Name of the query SFrame column with row labels. If 'label' is not specified, row numbers are used to identify query dataset rows in the output SFrame. k : int, optional Number of nearest neighbors to return from the reference set for each query observation. The default is 5 neighbors, but setting it to ``None`` will return all neighbors within ``radius`` of the query point. radius : float, optional Only neighbors whose distance to a query point is smaller than this value are returned. The default is ``None``, in which case the ``k`` nearest neighbors are returned for each query point, regardless of distance. verbose: bool, optional If True, print progress updates and model details. Returns ------- out : SFrame An SFrame with the k-nearest neighbors of each query observation. The result contains four columns: the first is the label of the query observation, the second is the label of the nearby reference observation, the third is the distance between the query and reference observations, and the fourth is the rank of the reference observation among the query's k-nearest neighbors. See Also -------- similarity_graph Notes ----- - The `dataset` input to this method *can* have missing values (in contrast to the reference dataset used to create the nearest neighbors model). Missing numeric values are imputed to be the mean of the corresponding feature in the reference dataset, and missing strings are imputed to be empty strings. - If both ``k`` and ``radius`` are set to ``None``, each query point returns all of the reference set. If the reference dataset has :math:`n` rows and the query dataset has :math:`m` rows, the output is an SFrame with :math:`nm` rows. - For models created with the 'lsh' method, the query results may have fewer query labels than input query points. Because LSH is an approximate method, a query point may have fewer than 'k' neighbors. If LSH returns no neighbors at all for a query, the query point is omitted from the results. Examples -------- First construct a toy SFrame and create a nearest neighbors model: >>> sf = turicreate.SFrame({'label': range(3), ... 'feature1': [0.98, 0.62, 0.11], ... 'feature2': [0.69, 0.58, 0.36]}) >>> model = turicreate.nearest_neighbors.create(sf, 'label') A new SFrame contains query observations with same schema as the reference SFrame. This SFrame is passed to the ``query`` method. >>> queries = turicreate.SFrame({'label': range(3), ... 'feature1': [0.05, 0.61, 0.99], ... 'feature2': [0.06, 0.97, 0.86]}) >>> model.query(queries, 'label', k=2) +-------------+-----------------+----------------+------+ | query_label | reference_label | distance | rank | +-------------+-----------------+----------------+------+ | 0 | 2 | 0.305941170816 | 1 | | 0 | 1 | 0.771556867638 | 2 | | 1 | 1 | 0.390128184063 | 1 | | 1 | 0 | 0.464004310325 | 2 | | 2 | 0 | 0.170293863659 | 1 | | 2 | 1 | 0.464004310325 | 2 | +-------------+-----------------+----------------+------+
[ "For", "each", "row", "of", "the", "input", "dataset", "retrieve", "the", "nearest", "neighbors", "from", "the", "model", "s", "stored", "data", ".", "In", "general", "the", "query", "dataset", "does", "not", "need", "to", "be", "the", "same", "as", "the", "reference", "data", "stored", "in", "the", "model", "but", "if", "it", "is", "the", "include_self_edges", "parameter", "can", "be", "set", "to", "False", "to", "exclude", "results", "that", "match", "query", "points", "to", "themselves", "." ]
python
train
palantir/python-jsonrpc-server
pyls_jsonrpc/endpoint.py
https://github.com/palantir/python-jsonrpc-server/blob/7021d849901705ab53c141e483a71d0779aff3d2/pyls_jsonrpc/endpoint.py#L153-L161
def _notification_callback(method, params): """Construct a notification callback for the given request ID.""" def callback(future): try: future.result() log.debug("Successfully handled async notification %s %s", method, params) except Exception: # pylint: disable=broad-except log.exception("Failed to handle async notification %s %s", method, params) return callback
[ "def", "_notification_callback", "(", "method", ",", "params", ")", ":", "def", "callback", "(", "future", ")", ":", "try", ":", "future", ".", "result", "(", ")", "log", ".", "debug", "(", "\"Successfully handled async notification %s %s\"", ",", "method", ",", "params", ")", "except", "Exception", ":", "# pylint: disable=broad-except", "log", ".", "exception", "(", "\"Failed to handle async notification %s %s\"", ",", "method", ",", "params", ")", "return", "callback" ]
Construct a notification callback for the given request ID.
[ "Construct", "a", "notification", "callback", "for", "the", "given", "request", "ID", "." ]
python
train
pandas-dev/pandas
pandas/io/sas/sas_xport.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sas/sas_xport.py#L170-L224
def _parse_float_vec(vec): """ Parse a vector of float values representing IBM 8 byte floats into native 8 byte floats. """ dtype = np.dtype('>u4,>u4') vec1 = vec.view(dtype=dtype) xport1 = vec1['f0'] xport2 = vec1['f1'] # Start by setting first half of ieee number to first half of IBM # number sans exponent ieee1 = xport1 & 0x00ffffff # The fraction bit to the left of the binary point in the ieee # format was set and the number was shifted 0, 1, 2, or 3 # places. This will tell us how to adjust the ibm exponent to be a # power of 2 ieee exponent and how to shift the fraction bits to # restore the correct magnitude. shift = np.zeros(len(vec), dtype=np.uint8) shift[np.where(xport1 & 0x00200000)] = 1 shift[np.where(xport1 & 0x00400000)] = 2 shift[np.where(xport1 & 0x00800000)] = 3 # shift the ieee number down the correct number of places then # set the second half of the ieee number to be the second half # of the ibm number shifted appropriately, ored with the bits # from the first half that would have been shifted in if we # could shift a double. All we are worried about are the low # order 3 bits of the first half since we're only shifting by # 1, 2, or 3. ieee1 >>= shift ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift))) # clear the 1 bit to the left of the binary point ieee1 &= 0xffefffff # set the exponent of the ieee number to be the actual exponent # plus the shift count + 1023. Or this into the first half of the # ieee number. The ibm exponent is excess 64 but is adjusted by 65 # since during conversion to ibm format the exponent is # incremented by 1 and the fraction bits left 4 positions to the # right of the radix point. (had to add >> 24 because C treats & # 0x7f as 0x7f000000 and Python doesn't) ieee1 |= ((((((xport1 >> 24) & 0x7f) - 65) << 2) + shift + 1023) << 20) | (xport1 & 0x80000000) ieee = np.empty((len(ieee1),), dtype='>u4,>u4') ieee['f0'] = ieee1 ieee['f1'] = ieee2 ieee = ieee.view(dtype='>f8') ieee = ieee.astype('f8') return ieee
[ "def", "_parse_float_vec", "(", "vec", ")", ":", "dtype", "=", "np", ".", "dtype", "(", "'>u4,>u4'", ")", "vec1", "=", "vec", ".", "view", "(", "dtype", "=", "dtype", ")", "xport1", "=", "vec1", "[", "'f0'", "]", "xport2", "=", "vec1", "[", "'f1'", "]", "# Start by setting first half of ieee number to first half of IBM", "# number sans exponent", "ieee1", "=", "xport1", "&", "0x00ffffff", "# The fraction bit to the left of the binary point in the ieee", "# format was set and the number was shifted 0, 1, 2, or 3", "# places. This will tell us how to adjust the ibm exponent to be a", "# power of 2 ieee exponent and how to shift the fraction bits to", "# restore the correct magnitude.", "shift", "=", "np", ".", "zeros", "(", "len", "(", "vec", ")", ",", "dtype", "=", "np", ".", "uint8", ")", "shift", "[", "np", ".", "where", "(", "xport1", "&", "0x00200000", ")", "]", "=", "1", "shift", "[", "np", ".", "where", "(", "xport1", "&", "0x00400000", ")", "]", "=", "2", "shift", "[", "np", ".", "where", "(", "xport1", "&", "0x00800000", ")", "]", "=", "3", "# shift the ieee number down the correct number of places then", "# set the second half of the ieee number to be the second half", "# of the ibm number shifted appropriately, ored with the bits", "# from the first half that would have been shifted in if we", "# could shift a double. All we are worried about are the low", "# order 3 bits of the first half since we're only shifting by", "# 1, 2, or 3.", "ieee1", ">>=", "shift", "ieee2", "=", "(", "xport2", ">>", "shift", ")", "|", "(", "(", "xport1", "&", "0x00000007", ")", "<<", "(", "29", "+", "(", "3", "-", "shift", ")", ")", ")", "# clear the 1 bit to the left of the binary point", "ieee1", "&=", "0xffefffff", "# set the exponent of the ieee number to be the actual exponent", "# plus the shift count + 1023. Or this into the first half of the", "# ieee number. The ibm exponent is excess 64 but is adjusted by 65", "# since during conversion to ibm format the exponent is", "# incremented by 1 and the fraction bits left 4 positions to the", "# right of the radix point. (had to add >> 24 because C treats &", "# 0x7f as 0x7f000000 and Python doesn't)", "ieee1", "|=", "(", "(", "(", "(", "(", "(", "xport1", ">>", "24", ")", "&", "0x7f", ")", "-", "65", ")", "<<", "2", ")", "+", "shift", "+", "1023", ")", "<<", "20", ")", "|", "(", "xport1", "&", "0x80000000", ")", "ieee", "=", "np", ".", "empty", "(", "(", "len", "(", "ieee1", ")", ",", ")", ",", "dtype", "=", "'>u4,>u4'", ")", "ieee", "[", "'f0'", "]", "=", "ieee1", "ieee", "[", "'f1'", "]", "=", "ieee2", "ieee", "=", "ieee", ".", "view", "(", "dtype", "=", "'>f8'", ")", "ieee", "=", "ieee", ".", "astype", "(", "'f8'", ")", "return", "ieee" ]
Parse a vector of float values representing IBM 8 byte floats into native 8 byte floats.
[ "Parse", "a", "vector", "of", "float", "values", "representing", "IBM", "8", "byte", "floats", "into", "native", "8", "byte", "floats", "." ]
python
train
titusjan/argos
argos/inspector/dialog.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/inspector/dialog.py#L85-L89
def setCurrentInspectorRegItem(self, regItem): """ Sets the current inspector given an InspectorRegItem """ check_class(regItem, InspectorRegItem, allow_none=True) self.inspectorTab.setCurrentRegItem(regItem)
[ "def", "setCurrentInspectorRegItem", "(", "self", ",", "regItem", ")", ":", "check_class", "(", "regItem", ",", "InspectorRegItem", ",", "allow_none", "=", "True", ")", "self", ".", "inspectorTab", ".", "setCurrentRegItem", "(", "regItem", ")" ]
Sets the current inspector given an InspectorRegItem
[ "Sets", "the", "current", "inspector", "given", "an", "InspectorRegItem" ]
python
train
kennethreitz/legit
legit/cli.py
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L135-L173
def sync(ctx, scm, to_branch, verbose, fake): """Stashes unstaged changes, Fetches remote data, Performs smart pull+merge, Pushes local commits up, and Unstashes changes. Defaults to current branch. """ scm.fake = fake scm.verbose = fake or verbose scm.repo_check(require_remote=True) if to_branch: # Optional branch specifier. branch = scm.fuzzy_match_branch(to_branch) if branch: is_external = True original_branch = scm.get_current_branch_name() else: raise click.BadArgumentUsage( "Branch {0} does not exist. Use an existing branch." .format(crayons.yellow(branch))) else: # Sync current branch. branch = scm.get_current_branch_name() is_external = False if branch in scm.get_branch_names(local=False): if is_external: ctx.invoke(switch, to_branch=branch, verbose=verbose, fake=fake) scm.stash_log(sync=True) status_log(scm.smart_pull, 'Pulling commits from the server.') status_log(scm.push, 'Pushing commits to the server.', branch) scm.unstash_log(sync=True) if is_external: ctx.invoke(switch, to_branch=original_branch, verbose=verbose, fake=fake) else: raise click.BadArgumentUsage( "Branch {0} is not published. Publish before syncing." .format(crayons.yellow(branch)))
[ "def", "sync", "(", "ctx", ",", "scm", ",", "to_branch", ",", "verbose", ",", "fake", ")", ":", "scm", ".", "fake", "=", "fake", "scm", ".", "verbose", "=", "fake", "or", "verbose", "scm", ".", "repo_check", "(", "require_remote", "=", "True", ")", "if", "to_branch", ":", "# Optional branch specifier.", "branch", "=", "scm", ".", "fuzzy_match_branch", "(", "to_branch", ")", "if", "branch", ":", "is_external", "=", "True", "original_branch", "=", "scm", ".", "get_current_branch_name", "(", ")", "else", ":", "raise", "click", ".", "BadArgumentUsage", "(", "\"Branch {0} does not exist. Use an existing branch.\"", ".", "format", "(", "crayons", ".", "yellow", "(", "branch", ")", ")", ")", "else", ":", "# Sync current branch.", "branch", "=", "scm", ".", "get_current_branch_name", "(", ")", "is_external", "=", "False", "if", "branch", "in", "scm", ".", "get_branch_names", "(", "local", "=", "False", ")", ":", "if", "is_external", ":", "ctx", ".", "invoke", "(", "switch", ",", "to_branch", "=", "branch", ",", "verbose", "=", "verbose", ",", "fake", "=", "fake", ")", "scm", ".", "stash_log", "(", "sync", "=", "True", ")", "status_log", "(", "scm", ".", "smart_pull", ",", "'Pulling commits from the server.'", ")", "status_log", "(", "scm", ".", "push", ",", "'Pushing commits to the server.'", ",", "branch", ")", "scm", ".", "unstash_log", "(", "sync", "=", "True", ")", "if", "is_external", ":", "ctx", ".", "invoke", "(", "switch", ",", "to_branch", "=", "original_branch", ",", "verbose", "=", "verbose", ",", "fake", "=", "fake", ")", "else", ":", "raise", "click", ".", "BadArgumentUsage", "(", "\"Branch {0} is not published. Publish before syncing.\"", ".", "format", "(", "crayons", ".", "yellow", "(", "branch", ")", ")", ")" ]
Stashes unstaged changes, Fetches remote data, Performs smart pull+merge, Pushes local commits up, and Unstashes changes. Defaults to current branch.
[ "Stashes", "unstaged", "changes", "Fetches", "remote", "data", "Performs", "smart", "pull", "+", "merge", "Pushes", "local", "commits", "up", "and", "Unstashes", "changes", "." ]
python
train
Opentrons/opentrons
api/src/opentrons/protocol_api/labware.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/protocol_api/labware.py#L114-L122
def center(self) -> Location: """ :return: a Point corresponding to the absolute position of the center of the well relative to the deck (with the front-left corner of slot 1 as (0,0,0)) """ top = self.top() center_z = top.point.z - (self._depth / 2.0) return Location(Point(x=top.point.x, y=top.point.y, z=center_z), self)
[ "def", "center", "(", "self", ")", "->", "Location", ":", "top", "=", "self", ".", "top", "(", ")", "center_z", "=", "top", ".", "point", ".", "z", "-", "(", "self", ".", "_depth", "/", "2.0", ")", "return", "Location", "(", "Point", "(", "x", "=", "top", ".", "point", ".", "x", ",", "y", "=", "top", ".", "point", ".", "y", ",", "z", "=", "center_z", ")", ",", "self", ")" ]
:return: a Point corresponding to the absolute position of the center of the well relative to the deck (with the front-left corner of slot 1 as (0,0,0))
[ ":", "return", ":", "a", "Point", "corresponding", "to", "the", "absolute", "position", "of", "the", "center", "of", "the", "well", "relative", "to", "the", "deck", "(", "with", "the", "front", "-", "left", "corner", "of", "slot", "1", "as", "(", "0", "0", "0", "))" ]
python
train
petl-developers/petl
petl/transform/setops.py
https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/transform/setops.py#L227-L287
def diff(a, b, presorted=False, buffersize=None, tempdir=None, cache=True, strict=False): """ Find the difference between rows in two tables. Returns a pair of tables. E.g.:: >>> import petl as etl >>> a = [['foo', 'bar', 'baz'], ... ['A', 1, True], ... ['C', 7, False], ... ['B', 2, False], ... ['C', 9, True]] >>> b = [['x', 'y', 'z'], ... ['B', 2, False], ... ['A', 9, False], ... ['B', 3, True], ... ['C', 9, True]] >>> added, subtracted = etl.diff(a, b) >>> # rows in b not in a ... added +-----+---+-------+ | x | y | z | +=====+===+=======+ | 'A' | 9 | False | +-----+---+-------+ | 'B' | 3 | True | +-----+---+-------+ >>> # rows in a not in b ... subtracted +-----+-----+-------+ | foo | bar | baz | +=====+=====+=======+ | 'A' | 1 | True | +-----+-----+-------+ | 'C' | 7 | False | +-----+-----+-------+ Convenient shorthand for ``(complement(b, a), complement(a, b))``. See also :func:`petl.transform.setops.complement`. If `presorted` is True, it is assumed that the data are already sorted by the given key, and the `buffersize`, `tempdir` and `cache` arguments are ignored. Otherwise, the data are sorted, see also the discussion of the `buffersize`, `tempdir` and `cache` arguments under the :func:`petl.transform.sorts.sort` function. .. versionchanged:: 1.1.0 If `strict` is `True` then strict set-like behaviour is used. """ if not presorted: a = sort(a) b = sort(b) added = complement(b, a, presorted=True, buffersize=buffersize, tempdir=tempdir, cache=cache, strict=strict) subtracted = complement(a, b, presorted=True, buffersize=buffersize, tempdir=tempdir, cache=cache, strict=strict) return added, subtracted
[ "def", "diff", "(", "a", ",", "b", ",", "presorted", "=", "False", ",", "buffersize", "=", "None", ",", "tempdir", "=", "None", ",", "cache", "=", "True", ",", "strict", "=", "False", ")", ":", "if", "not", "presorted", ":", "a", "=", "sort", "(", "a", ")", "b", "=", "sort", "(", "b", ")", "added", "=", "complement", "(", "b", ",", "a", ",", "presorted", "=", "True", ",", "buffersize", "=", "buffersize", ",", "tempdir", "=", "tempdir", ",", "cache", "=", "cache", ",", "strict", "=", "strict", ")", "subtracted", "=", "complement", "(", "a", ",", "b", ",", "presorted", "=", "True", ",", "buffersize", "=", "buffersize", ",", "tempdir", "=", "tempdir", ",", "cache", "=", "cache", ",", "strict", "=", "strict", ")", "return", "added", ",", "subtracted" ]
Find the difference between rows in two tables. Returns a pair of tables. E.g.:: >>> import petl as etl >>> a = [['foo', 'bar', 'baz'], ... ['A', 1, True], ... ['C', 7, False], ... ['B', 2, False], ... ['C', 9, True]] >>> b = [['x', 'y', 'z'], ... ['B', 2, False], ... ['A', 9, False], ... ['B', 3, True], ... ['C', 9, True]] >>> added, subtracted = etl.diff(a, b) >>> # rows in b not in a ... added +-----+---+-------+ | x | y | z | +=====+===+=======+ | 'A' | 9 | False | +-----+---+-------+ | 'B' | 3 | True | +-----+---+-------+ >>> # rows in a not in b ... subtracted +-----+-----+-------+ | foo | bar | baz | +=====+=====+=======+ | 'A' | 1 | True | +-----+-----+-------+ | 'C' | 7 | False | +-----+-----+-------+ Convenient shorthand for ``(complement(b, a), complement(a, b))``. See also :func:`petl.transform.setops.complement`. If `presorted` is True, it is assumed that the data are already sorted by the given key, and the `buffersize`, `tempdir` and `cache` arguments are ignored. Otherwise, the data are sorted, see also the discussion of the `buffersize`, `tempdir` and `cache` arguments under the :func:`petl.transform.sorts.sort` function. .. versionchanged:: 1.1.0 If `strict` is `True` then strict set-like behaviour is used.
[ "Find", "the", "difference", "between", "rows", "in", "two", "tables", ".", "Returns", "a", "pair", "of", "tables", ".", "E", ".", "g", ".", "::" ]
python
train
qiniu/python-sdk
qiniu/auth.py
https://github.com/qiniu/python-sdk/blob/a69fbef4e3e6ea1ebe09f4610a5b18bb2c17de59/qiniu/auth.py#L166-L185
def verify_callback( self, origin_authorization, url, body, content_type='application/x-www-form-urlencoded'): """回调验证 Args: origin_authorization: 回调时请求Header中的Authorization字段 url: 回调请求的url body: 回调请求的body content_type: 回调请求body的Content-Type Returns: 返回true表示验证成功,返回false表示验证失败 """ token = self.token_of_request(url, body, content_type) authorization = 'QBox {0}'.format(token) return origin_authorization == authorization
[ "def", "verify_callback", "(", "self", ",", "origin_authorization", ",", "url", ",", "body", ",", "content_type", "=", "'application/x-www-form-urlencoded'", ")", ":", "token", "=", "self", ".", "token_of_request", "(", "url", ",", "body", ",", "content_type", ")", "authorization", "=", "'QBox {0}'", ".", "format", "(", "token", ")", "return", "origin_authorization", "==", "authorization" ]
回调验证 Args: origin_authorization: 回调时请求Header中的Authorization字段 url: 回调请求的url body: 回调请求的body content_type: 回调请求body的Content-Type Returns: 返回true表示验证成功,返回false表示验证失败
[ "回调验证" ]
python
train
adamrehn/ue4cli
ue4cli/ThirdPartyLibraryDetails.py
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/ThirdPartyLibraryDetails.py#L83-L92
def getLinkerFlags(self, engineRoot, fmt, includeLibs=True): """ Constructs the linker flags string for building against this library """ components = self.resolveRoot(self.ldFlags, engineRoot) if includeLibs == True: components.extend(self.prefixedStrings(self.linkerDirPrefix, self.linkDirs, engineRoot)) components.extend(self.resolveRoot(self.libs, engineRoot)) return Utility.join(fmt.delim, components, fmt.quotes)
[ "def", "getLinkerFlags", "(", "self", ",", "engineRoot", ",", "fmt", ",", "includeLibs", "=", "True", ")", ":", "components", "=", "self", ".", "resolveRoot", "(", "self", ".", "ldFlags", ",", "engineRoot", ")", "if", "includeLibs", "==", "True", ":", "components", ".", "extend", "(", "self", ".", "prefixedStrings", "(", "self", ".", "linkerDirPrefix", ",", "self", ".", "linkDirs", ",", "engineRoot", ")", ")", "components", ".", "extend", "(", "self", ".", "resolveRoot", "(", "self", ".", "libs", ",", "engineRoot", ")", ")", "return", "Utility", ".", "join", "(", "fmt", ".", "delim", ",", "components", ",", "fmt", ".", "quotes", ")" ]
Constructs the linker flags string for building against this library
[ "Constructs", "the", "linker", "flags", "string", "for", "building", "against", "this", "library" ]
python
train
PyCQA/astroid
astroid/node_classes.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/node_classes.py#L4116-L4134
def block_range(self, lineno): """Get a range from the given line number to where this node ends. :param lineno: The line number to start the range at. :type lineno: int :returns: The range of line numbers that this node belongs to, starting at the given line number. :rtype: tuple(int, int) """ last = None for exhandler in self.handlers: if exhandler.type and lineno == exhandler.type.fromlineno: return lineno, lineno if exhandler.body[0].fromlineno <= lineno <= exhandler.body[-1].tolineno: return lineno, exhandler.body[-1].tolineno if last is None: last = exhandler.body[0].fromlineno - 1 return self._elsed_block_range(lineno, self.orelse, last)
[ "def", "block_range", "(", "self", ",", "lineno", ")", ":", "last", "=", "None", "for", "exhandler", "in", "self", ".", "handlers", ":", "if", "exhandler", ".", "type", "and", "lineno", "==", "exhandler", ".", "type", ".", "fromlineno", ":", "return", "lineno", ",", "lineno", "if", "exhandler", ".", "body", "[", "0", "]", ".", "fromlineno", "<=", "lineno", "<=", "exhandler", ".", "body", "[", "-", "1", "]", ".", "tolineno", ":", "return", "lineno", ",", "exhandler", ".", "body", "[", "-", "1", "]", ".", "tolineno", "if", "last", "is", "None", ":", "last", "=", "exhandler", ".", "body", "[", "0", "]", ".", "fromlineno", "-", "1", "return", "self", ".", "_elsed_block_range", "(", "lineno", ",", "self", ".", "orelse", ",", "last", ")" ]
Get a range from the given line number to where this node ends. :param lineno: The line number to start the range at. :type lineno: int :returns: The range of line numbers that this node belongs to, starting at the given line number. :rtype: tuple(int, int)
[ "Get", "a", "range", "from", "the", "given", "line", "number", "to", "where", "this", "node", "ends", "." ]
python
train
asweigart/pysimplevalidate
src/pysimplevalidate/__init__.py
https://github.com/asweigart/pysimplevalidate/blob/3ca27228abb7355d14bbf8abc225c63366379e44/src/pysimplevalidate/__init__.py#L1373-L1411
def validateDayOfMonth(value, year, month, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None): """Raises ValidationException if value is not a day of the month, from 1 to 28, 29, 30, or 31 depending on the month and year. Returns value. * value (str): The value being validated as existing as a numbered day in the given year and month. * year (int): The given year. * month (int): The given month. 1 is January, 2 is February, and so on. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateDayOfMonth('31', 2019, 10) 31 >>> pysv.validateDayOfMonth('32', 2019, 10) Traceback (most recent call last): ... pysimplevalidate.ValidationException: '32' is not a day in the month of October 2019 >>> pysv.validateDayOfMonth('29', 2004, 2) 29 >>> pysv.validateDayOfMonth('29', 2005, 2) Traceback (most recent call last): ... pysimplevalidate.ValidationException: '29' is not a day in the month of February 2005 """ try: daysInMonth = calendar.monthrange(year, month)[1] except: raise PySimpleValidateException('invalid arguments for year and/or month') try: return validateInt(value, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes, min=1, max=daysInMonth) except: # Replace the exception message. _raiseValidationException(_('%r is not a day in the month of %s %s') % (_errstr(value), ENGLISH_MONTH_NAMES[month - 1], year), excMsg)
[ "def", "validateDayOfMonth", "(", "value", ",", "year", ",", "month", ",", "blank", "=", "False", ",", "strip", "=", "None", ",", "allowlistRegexes", "=", "None", ",", "blocklistRegexes", "=", "None", ",", "excMsg", "=", "None", ")", ":", "try", ":", "daysInMonth", "=", "calendar", ".", "monthrange", "(", "year", ",", "month", ")", "[", "1", "]", "except", ":", "raise", "PySimpleValidateException", "(", "'invalid arguments for year and/or month'", ")", "try", ":", "return", "validateInt", "(", "value", ",", "blank", "=", "blank", ",", "strip", "=", "strip", ",", "allowlistRegexes", "=", "allowlistRegexes", ",", "blocklistRegexes", "=", "blocklistRegexes", ",", "min", "=", "1", ",", "max", "=", "daysInMonth", ")", "except", ":", "# Replace the exception message.", "_raiseValidationException", "(", "_", "(", "'%r is not a day in the month of %s %s'", ")", "%", "(", "_errstr", "(", "value", ")", ",", "ENGLISH_MONTH_NAMES", "[", "month", "-", "1", "]", ",", "year", ")", ",", "excMsg", ")" ]
Raises ValidationException if value is not a day of the month, from 1 to 28, 29, 30, or 31 depending on the month and year. Returns value. * value (str): The value being validated as existing as a numbered day in the given year and month. * year (int): The given year. * month (int): The given month. 1 is January, 2 is February, and so on. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateDayOfMonth('31', 2019, 10) 31 >>> pysv.validateDayOfMonth('32', 2019, 10) Traceback (most recent call last): ... pysimplevalidate.ValidationException: '32' is not a day in the month of October 2019 >>> pysv.validateDayOfMonth('29', 2004, 2) 29 >>> pysv.validateDayOfMonth('29', 2005, 2) Traceback (most recent call last): ... pysimplevalidate.ValidationException: '29' is not a day in the month of February 2005
[ "Raises", "ValidationException", "if", "value", "is", "not", "a", "day", "of", "the", "month", "from", "1", "to", "28", "29", "30", "or", "31", "depending", "on", "the", "month", "and", "year", ".", "Returns", "value", "." ]
python
train
jobovy/galpy
galpy/potential/TwoPowerSphericalPotential.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/TwoPowerSphericalPotential.py#L283-L308
def _evaluate(self,R,z,phi=0.,t=0.): """ NAME: _evaluate PURPOSE: evaluate the potential at R,z INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: Phi(R,z) HISTORY: 2010-07-09 - Started - Bovy (NYU) """ if not self.HernquistSelf == None: return self.HernquistSelf._evaluate(R,z,phi=phi,t=t) elif not self.JaffeSelf == None: return self.JaffeSelf._evaluate(R,z,phi=phi,t=t) elif not self.NFWSelf == None: return self.NFWSelf._evaluate(R,z,phi=phi,t=t) else: return TwoPowerSphericalPotential._evaluate(self,R,z, phi=phi,t=t, _forceFloatEval=True)
[ "def", "_evaluate", "(", "self", ",", "R", ",", "z", ",", "phi", "=", "0.", ",", "t", "=", "0.", ")", ":", "if", "not", "self", ".", "HernquistSelf", "==", "None", ":", "return", "self", ".", "HernquistSelf", ".", "_evaluate", "(", "R", ",", "z", ",", "phi", "=", "phi", ",", "t", "=", "t", ")", "elif", "not", "self", ".", "JaffeSelf", "==", "None", ":", "return", "self", ".", "JaffeSelf", ".", "_evaluate", "(", "R", ",", "z", ",", "phi", "=", "phi", ",", "t", "=", "t", ")", "elif", "not", "self", ".", "NFWSelf", "==", "None", ":", "return", "self", ".", "NFWSelf", ".", "_evaluate", "(", "R", ",", "z", ",", "phi", "=", "phi", ",", "t", "=", "t", ")", "else", ":", "return", "TwoPowerSphericalPotential", ".", "_evaluate", "(", "self", ",", "R", ",", "z", ",", "phi", "=", "phi", ",", "t", "=", "t", ",", "_forceFloatEval", "=", "True", ")" ]
NAME: _evaluate PURPOSE: evaluate the potential at R,z INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: Phi(R,z) HISTORY: 2010-07-09 - Started - Bovy (NYU)
[ "NAME", ":", "_evaluate", "PURPOSE", ":", "evaluate", "the", "potential", "at", "R", "z", "INPUT", ":", "R", "-", "Galactocentric", "cylindrical", "radius", "z", "-", "vertical", "height", "phi", "-", "azimuth", "t", "-", "time", "OUTPUT", ":", "Phi", "(", "R", "z", ")", "HISTORY", ":", "2010", "-", "07", "-", "09", "-", "Started", "-", "Bovy", "(", "NYU", ")" ]
python
train
DiscordBotList/DBL-Python-Library
dbl/http.py
https://github.com/DiscordBotList/DBL-Python-Library/blob/c1461ae0acc644cdeedef8fd6b5e36f76d81c1aa/dbl/http.py#L190-L194
async def get_bots(self, limit, offset): '''Gets an object of bots on DBL''' if limit > 500: limit = 50 return await self.request('GET', '{}/bots?limit={}&offset={}'.format(self.BASE, limit, offset))
[ "async", "def", "get_bots", "(", "self", ",", "limit", ",", "offset", ")", ":", "if", "limit", ">", "500", ":", "limit", "=", "50", "return", "await", "self", ".", "request", "(", "'GET'", ",", "'{}/bots?limit={}&offset={}'", ".", "format", "(", "self", ".", "BASE", ",", "limit", ",", "offset", ")", ")" ]
Gets an object of bots on DBL
[ "Gets", "an", "object", "of", "bots", "on", "DBL" ]
python
test
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/scene/cameras/arcball.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/scene/cameras/arcball.py#L71-L77
def _dist_to_trans(self, dist): """Convert mouse x, y movement into x, y, z translations""" rot, x, y, z = self._quaternion.get_axis_angle() tr = MatrixTransform() tr.rotate(180 * rot / np.pi, (x, y, z)) dx, dz, dy = np.dot(tr.matrix[:3, :3], (dist[0], dist[1], 0.)) return dx, dy, dz
[ "def", "_dist_to_trans", "(", "self", ",", "dist", ")", ":", "rot", ",", "x", ",", "y", ",", "z", "=", "self", ".", "_quaternion", ".", "get_axis_angle", "(", ")", "tr", "=", "MatrixTransform", "(", ")", "tr", ".", "rotate", "(", "180", "*", "rot", "/", "np", ".", "pi", ",", "(", "x", ",", "y", ",", "z", ")", ")", "dx", ",", "dz", ",", "dy", "=", "np", ".", "dot", "(", "tr", ".", "matrix", "[", ":", "3", ",", ":", "3", "]", ",", "(", "dist", "[", "0", "]", ",", "dist", "[", "1", "]", ",", "0.", ")", ")", "return", "dx", ",", "dy", ",", "dz" ]
Convert mouse x, y movement into x, y, z translations
[ "Convert", "mouse", "x", "y", "movement", "into", "x", "y", "z", "translations" ]
python
train
CodeReclaimers/neat-python
neat/population.py
https://github.com/CodeReclaimers/neat-python/blob/e3dbe77c0d776eae41d598e6439e6ac02ab90b18/neat/population.py#L59-L136
def run(self, fitness_function, n=None): """ Runs NEAT's genetic algorithm for at most n generations. If n is None, run until solution is found or extinction occurs. The user-provided fitness_function must take only two arguments: 1. The population as a list of (genome id, genome) tuples. 2. The current configuration object. The return value of the fitness function is ignored, but it must assign a Python float to the `fitness` member of each genome. The fitness function is free to maintain external state, perform evaluations in parallel, etc. It is assumed that fitness_function does not modify the list of genomes, the genomes themselves (apart from updating the fitness member), or the configuration object. """ if self.config.no_fitness_termination and (n is None): raise RuntimeError("Cannot have no generational limit with no fitness termination") k = 0 while n is None or k < n: k += 1 self.reporters.start_generation(self.generation) # Evaluate all genomes using the user-provided function. fitness_function(list(iteritems(self.population)), self.config) # Gather and report statistics. best = None for g in itervalues(self.population): if best is None or g.fitness > best.fitness: best = g self.reporters.post_evaluate(self.config, self.population, self.species, best) # Track the best genome ever seen. if self.best_genome is None or best.fitness > self.best_genome.fitness: self.best_genome = best if not self.config.no_fitness_termination: # End if the fitness threshold is reached. fv = self.fitness_criterion(g.fitness for g in itervalues(self.population)) if fv >= self.config.fitness_threshold: self.reporters.found_solution(self.config, self.generation, best) break # Create the next generation from the current generation. self.population = self.reproduction.reproduce(self.config, self.species, self.config.pop_size, self.generation) # Check for complete extinction. if not self.species.species: self.reporters.complete_extinction() # If requested by the user, create a completely new population, # otherwise raise an exception. if self.config.reset_on_extinction: self.population = self.reproduction.create_new(self.config.genome_type, self.config.genome_config, self.config.pop_size) else: raise CompleteExtinctionException() # Divide the new population into species. self.species.speciate(self.config, self.population, self.generation) self.reporters.end_generation(self.config, self.population, self.species) self.generation += 1 if self.config.no_fitness_termination: self.reporters.found_solution(self.config, self.generation, self.best_genome) return self.best_genome
[ "def", "run", "(", "self", ",", "fitness_function", ",", "n", "=", "None", ")", ":", "if", "self", ".", "config", ".", "no_fitness_termination", "and", "(", "n", "is", "None", ")", ":", "raise", "RuntimeError", "(", "\"Cannot have no generational limit with no fitness termination\"", ")", "k", "=", "0", "while", "n", "is", "None", "or", "k", "<", "n", ":", "k", "+=", "1", "self", ".", "reporters", ".", "start_generation", "(", "self", ".", "generation", ")", "# Evaluate all genomes using the user-provided function.", "fitness_function", "(", "list", "(", "iteritems", "(", "self", ".", "population", ")", ")", ",", "self", ".", "config", ")", "# Gather and report statistics.", "best", "=", "None", "for", "g", "in", "itervalues", "(", "self", ".", "population", ")", ":", "if", "best", "is", "None", "or", "g", ".", "fitness", ">", "best", ".", "fitness", ":", "best", "=", "g", "self", ".", "reporters", ".", "post_evaluate", "(", "self", ".", "config", ",", "self", ".", "population", ",", "self", ".", "species", ",", "best", ")", "# Track the best genome ever seen.", "if", "self", ".", "best_genome", "is", "None", "or", "best", ".", "fitness", ">", "self", ".", "best_genome", ".", "fitness", ":", "self", ".", "best_genome", "=", "best", "if", "not", "self", ".", "config", ".", "no_fitness_termination", ":", "# End if the fitness threshold is reached.", "fv", "=", "self", ".", "fitness_criterion", "(", "g", ".", "fitness", "for", "g", "in", "itervalues", "(", "self", ".", "population", ")", ")", "if", "fv", ">=", "self", ".", "config", ".", "fitness_threshold", ":", "self", ".", "reporters", ".", "found_solution", "(", "self", ".", "config", ",", "self", ".", "generation", ",", "best", ")", "break", "# Create the next generation from the current generation.", "self", ".", "population", "=", "self", ".", "reproduction", ".", "reproduce", "(", "self", ".", "config", ",", "self", ".", "species", ",", "self", ".", "config", ".", "pop_size", ",", "self", ".", "generation", ")", "# Check for complete extinction.", "if", "not", "self", ".", "species", ".", "species", ":", "self", ".", "reporters", ".", "complete_extinction", "(", ")", "# If requested by the user, create a completely new population,", "# otherwise raise an exception.", "if", "self", ".", "config", ".", "reset_on_extinction", ":", "self", ".", "population", "=", "self", ".", "reproduction", ".", "create_new", "(", "self", ".", "config", ".", "genome_type", ",", "self", ".", "config", ".", "genome_config", ",", "self", ".", "config", ".", "pop_size", ")", "else", ":", "raise", "CompleteExtinctionException", "(", ")", "# Divide the new population into species.", "self", ".", "species", ".", "speciate", "(", "self", ".", "config", ",", "self", ".", "population", ",", "self", ".", "generation", ")", "self", ".", "reporters", ".", "end_generation", "(", "self", ".", "config", ",", "self", ".", "population", ",", "self", ".", "species", ")", "self", ".", "generation", "+=", "1", "if", "self", ".", "config", ".", "no_fitness_termination", ":", "self", ".", "reporters", ".", "found_solution", "(", "self", ".", "config", ",", "self", ".", "generation", ",", "self", ".", "best_genome", ")", "return", "self", ".", "best_genome" ]
Runs NEAT's genetic algorithm for at most n generations. If n is None, run until solution is found or extinction occurs. The user-provided fitness_function must take only two arguments: 1. The population as a list of (genome id, genome) tuples. 2. The current configuration object. The return value of the fitness function is ignored, but it must assign a Python float to the `fitness` member of each genome. The fitness function is free to maintain external state, perform evaluations in parallel, etc. It is assumed that fitness_function does not modify the list of genomes, the genomes themselves (apart from updating the fitness member), or the configuration object.
[ "Runs", "NEAT", "s", "genetic", "algorithm", "for", "at", "most", "n", "generations", ".", "If", "n", "is", "None", "run", "until", "solution", "is", "found", "or", "extinction", "occurs", "." ]
python
train
GoogleCloudPlatform/cloud-debug-python
src/googleclouddebugger/capture_collector.py
https://github.com/GoogleCloudPlatform/cloud-debug-python/blob/89ce3782c98b814838a3ecb5479ed3882368cbee/src/googleclouddebugger/capture_collector.py#L593-L609
def _CaptureExpression(self, frame, expression): """Evalutes the expression and captures it into a Variable object. Args: frame: evaluation context. expression: watched expression to compile and evaluate. Returns: Variable object (which will have error status if the expression fails to evaluate). """ rc, value = _EvaluateExpression(frame, expression) if not rc: return {'name': expression, 'status': value} return self.CaptureNamedVariable(expression, value, 0, self.expression_capture_limits)
[ "def", "_CaptureExpression", "(", "self", ",", "frame", ",", "expression", ")", ":", "rc", ",", "value", "=", "_EvaluateExpression", "(", "frame", ",", "expression", ")", "if", "not", "rc", ":", "return", "{", "'name'", ":", "expression", ",", "'status'", ":", "value", "}", "return", "self", ".", "CaptureNamedVariable", "(", "expression", ",", "value", ",", "0", ",", "self", ".", "expression_capture_limits", ")" ]
Evalutes the expression and captures it into a Variable object. Args: frame: evaluation context. expression: watched expression to compile and evaluate. Returns: Variable object (which will have error status if the expression fails to evaluate).
[ "Evalutes", "the", "expression", "and", "captures", "it", "into", "a", "Variable", "object", "." ]
python
train
fracpete/python-weka-wrapper3
python/weka/flow/transformer.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/transformer.py#L1774-L1797
def fix_config(self, options): """ Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict """ options = super(Predict, self).fix_config(options) opt = "model" if opt not in options: options[opt] = "." if opt not in self.help: self.help[opt] = "The serialized model to use for making predictions (string)." opt = "storage_name" if opt not in options: options[opt] = "unknown" if opt not in self.help: self.help[opt] = "The name of the model (or ModelContainer) in storage to use (string)." return options
[ "def", "fix_config", "(", "self", ",", "options", ")", ":", "options", "=", "super", "(", "Predict", ",", "self", ")", ".", "fix_config", "(", "options", ")", "opt", "=", "\"model\"", "if", "opt", "not", "in", "options", ":", "options", "[", "opt", "]", "=", "\".\"", "if", "opt", "not", "in", "self", ".", "help", ":", "self", ".", "help", "[", "opt", "]", "=", "\"The serialized model to use for making predictions (string).\"", "opt", "=", "\"storage_name\"", "if", "opt", "not", "in", "options", ":", "options", "[", "opt", "]", "=", "\"unknown\"", "if", "opt", "not", "in", "self", ".", "help", ":", "self", ".", "help", "[", "opt", "]", "=", "\"The name of the model (or ModelContainer) in storage to use (string).\"", "return", "options" ]
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict
[ "Fixes", "the", "options", "if", "necessary", ".", "I", ".", "e", ".", "it", "adds", "all", "required", "elements", "to", "the", "dictionary", "." ]
python
train
BlueBrain/nat
nat/gitManager.py
https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/gitManager.py#L141-L167
def push(self): """ Adding the no_thin argument to the GIT push because we had some issues pushing previously. According to http://stackoverflow.com/questions/16586642/git-unpack-error-on-push-to-gerrit#comment42953435_23610917, "a new optimization which causes git to send as little data as possible over the network caused this bug to manifest, so my guess is --no-thin just turns these optimizations off. From git push --help: "A thin transfer significantly reduces the amount of sent data when the sender and receiver share many of the same objects in common." (--thin is the default)." """ if not self.canRunRemoteCmd(): return None try: fetchInfo = self.repo.remotes.origin.push(no_thin=True)[0] except exc.GitCommandError as e: print(dir(e)) print(e) raise if fetchInfo.flags & fetchInfo.ERROR: try: raise IOError("An error occured while trying to push the GIT repository from the server. Error flag: '" + str(fetchInfo.flags) + "', message: '" + str(fetchInfo.note) + "'.") except: IOError("An error occured while trying to push the GIT repository from the server.") return fetchInfo
[ "def", "push", "(", "self", ")", ":", "if", "not", "self", ".", "canRunRemoteCmd", "(", ")", ":", "return", "None", "try", ":", "fetchInfo", "=", "self", ".", "repo", ".", "remotes", ".", "origin", ".", "push", "(", "no_thin", "=", "True", ")", "[", "0", "]", "except", "exc", ".", "GitCommandError", "as", "e", ":", "print", "(", "dir", "(", "e", ")", ")", "print", "(", "e", ")", "raise", "if", "fetchInfo", ".", "flags", "&", "fetchInfo", ".", "ERROR", ":", "try", ":", "raise", "IOError", "(", "\"An error occured while trying to push the GIT repository from the server. Error flag: '\"", "+", "str", "(", "fetchInfo", ".", "flags", ")", "+", "\"', message: '\"", "+", "str", "(", "fetchInfo", ".", "note", ")", "+", "\"'.\"", ")", "except", ":", "IOError", "(", "\"An error occured while trying to push the GIT repository from the server.\"", ")", "return", "fetchInfo" ]
Adding the no_thin argument to the GIT push because we had some issues pushing previously. According to http://stackoverflow.com/questions/16586642/git-unpack-error-on-push-to-gerrit#comment42953435_23610917, "a new optimization which causes git to send as little data as possible over the network caused this bug to manifest, so my guess is --no-thin just turns these optimizations off. From git push --help: "A thin transfer significantly reduces the amount of sent data when the sender and receiver share many of the same objects in common." (--thin is the default)."
[ "Adding", "the", "no_thin", "argument", "to", "the", "GIT", "push", "because", "we", "had", "some", "issues", "pushing", "previously", ".", "According", "to", "http", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "16586642", "/", "git", "-", "unpack", "-", "error", "-", "on", "-", "push", "-", "to", "-", "gerrit#comment42953435_23610917", "a", "new", "optimization", "which", "causes", "git", "to", "send", "as", "little", "data", "as", "possible", "over", "the", "network", "caused", "this", "bug", "to", "manifest", "so", "my", "guess", "is", "--", "no", "-", "thin", "just", "turns", "these", "optimizations", "off", ".", "From", "git", "push", "--", "help", ":", "A", "thin", "transfer", "significantly", "reduces", "the", "amount", "of", "sent", "data", "when", "the", "sender", "and", "receiver", "share", "many", "of", "the", "same", "objects", "in", "common", ".", "(", "--", "thin", "is", "the", "default", ")", "." ]
python
train
Skyscanner/pycfmodel
pycfmodel/model/parameter.py
https://github.com/Skyscanner/pycfmodel/blob/e3da4db96f59c0a5dba06ae66ad25645775e5500/pycfmodel/model/parameter.py#L31-L41
def set_generic_keys(self, properties, exclude_list): """ Sets all the key value pairs that were not set manually in __init__. """ generic_keys = set(properties.keys()) - set(exclude_list) for generic_key in generic_keys: self.__setattr__( self._convert_to_snake_case(generic_key), properties[generic_key], )
[ "def", "set_generic_keys", "(", "self", ",", "properties", ",", "exclude_list", ")", ":", "generic_keys", "=", "set", "(", "properties", ".", "keys", "(", ")", ")", "-", "set", "(", "exclude_list", ")", "for", "generic_key", "in", "generic_keys", ":", "self", ".", "__setattr__", "(", "self", ".", "_convert_to_snake_case", "(", "generic_key", ")", ",", "properties", "[", "generic_key", "]", ",", ")" ]
Sets all the key value pairs that were not set manually in __init__.
[ "Sets", "all", "the", "key", "value", "pairs", "that", "were", "not", "set", "manually", "in", "__init__", "." ]
python
train
spry-group/python-vultr
vultr/v1_server.py
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server.py#L118-L130
def os_change(self, subid, osid, params=None): ''' /v1/server/os_change POST - account Changes the operating system of a virtual machine. All data will be permanently lost. Link: https://www.vultr.com/api/#server_os_change ''' params = update_params(params, { 'SUBID': subid, 'OSID': osid }) return self.request('/v1/server/os_change', params, 'POST')
[ "def", "os_change", "(", "self", ",", "subid", ",", "osid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", ",", "'OSID'", ":", "osid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/os_change'", ",", "params", ",", "'POST'", ")" ]
/v1/server/os_change POST - account Changes the operating system of a virtual machine. All data will be permanently lost. Link: https://www.vultr.com/api/#server_os_change
[ "/", "v1", "/", "server", "/", "os_change", "POST", "-", "account", "Changes", "the", "operating", "system", "of", "a", "virtual", "machine", ".", "All", "data", "will", "be", "permanently", "lost", "." ]
python
train
hyperledger/sawtooth-core
cli/sawtooth_cli/state.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/cli/sawtooth_cli/state.py#L82-L135
def do_state(args): """Runs the batch list or batch show command, printing output to the console Args: args: The parsed arguments sent to the command at runtime """ rest_client = RestClient(args.url, args.user) if args.subcommand == 'list': response = rest_client.list_state(args.subtree, args.head) leaves = response['data'] head = response['head'] keys = ('address', 'size', 'data') headers = tuple(k.upper() for k in keys) def parse_leaf_row(leaf, decode=True): decoded = b64decode(leaf['data']) return ( leaf['address'], len(decoded), str(decoded) if decode else leaf['data']) if args.format == 'default': fmt.print_terminal_table(headers, leaves, parse_leaf_row) print('HEAD BLOCK: "{}"'.format(head)) elif args.format == 'csv': fmt.print_csv(headers, leaves, parse_leaf_row) print('(data for head block: "{}")'.format(head)) elif args.format == 'json' or args.format == 'yaml': state_data = { 'head': head, 'data': [{k: d for k, d in zip(keys, parse_leaf_row(l, False))} for l in leaves]} if args.format == 'yaml': fmt.print_yaml(state_data) elif args.format == 'json': fmt.print_json(state_data) else: raise AssertionError('Missing handler: {}'.format(args.format)) else: raise AssertionError('Missing handler: {}'.format(args.format)) if args.subcommand == 'show': output = rest_client.get_leaf(args.address, args.head) if output is not None: print('DATA: "{}"'.format(b64decode(output['data']))) print('HEAD: "{}"'.format(output['head'])) else: raise CliException('No data available at {}'.format(args.address))
[ "def", "do_state", "(", "args", ")", ":", "rest_client", "=", "RestClient", "(", "args", ".", "url", ",", "args", ".", "user", ")", "if", "args", ".", "subcommand", "==", "'list'", ":", "response", "=", "rest_client", ".", "list_state", "(", "args", ".", "subtree", ",", "args", ".", "head", ")", "leaves", "=", "response", "[", "'data'", "]", "head", "=", "response", "[", "'head'", "]", "keys", "=", "(", "'address'", ",", "'size'", ",", "'data'", ")", "headers", "=", "tuple", "(", "k", ".", "upper", "(", ")", "for", "k", "in", "keys", ")", "def", "parse_leaf_row", "(", "leaf", ",", "decode", "=", "True", ")", ":", "decoded", "=", "b64decode", "(", "leaf", "[", "'data'", "]", ")", "return", "(", "leaf", "[", "'address'", "]", ",", "len", "(", "decoded", ")", ",", "str", "(", "decoded", ")", "if", "decode", "else", "leaf", "[", "'data'", "]", ")", "if", "args", ".", "format", "==", "'default'", ":", "fmt", ".", "print_terminal_table", "(", "headers", ",", "leaves", ",", "parse_leaf_row", ")", "print", "(", "'HEAD BLOCK: \"{}\"'", ".", "format", "(", "head", ")", ")", "elif", "args", ".", "format", "==", "'csv'", ":", "fmt", ".", "print_csv", "(", "headers", ",", "leaves", ",", "parse_leaf_row", ")", "print", "(", "'(data for head block: \"{}\")'", ".", "format", "(", "head", ")", ")", "elif", "args", ".", "format", "==", "'json'", "or", "args", ".", "format", "==", "'yaml'", ":", "state_data", "=", "{", "'head'", ":", "head", ",", "'data'", ":", "[", "{", "k", ":", "d", "for", "k", ",", "d", "in", "zip", "(", "keys", ",", "parse_leaf_row", "(", "l", ",", "False", ")", ")", "}", "for", "l", "in", "leaves", "]", "}", "if", "args", ".", "format", "==", "'yaml'", ":", "fmt", ".", "print_yaml", "(", "state_data", ")", "elif", "args", ".", "format", "==", "'json'", ":", "fmt", ".", "print_json", "(", "state_data", ")", "else", ":", "raise", "AssertionError", "(", "'Missing handler: {}'", ".", "format", "(", "args", ".", "format", ")", ")", "else", ":", "raise", "AssertionError", "(", "'Missing handler: {}'", ".", "format", "(", "args", ".", "format", ")", ")", "if", "args", ".", "subcommand", "==", "'show'", ":", "output", "=", "rest_client", ".", "get_leaf", "(", "args", ".", "address", ",", "args", ".", "head", ")", "if", "output", "is", "not", "None", ":", "print", "(", "'DATA: \"{}\"'", ".", "format", "(", "b64decode", "(", "output", "[", "'data'", "]", ")", ")", ")", "print", "(", "'HEAD: \"{}\"'", ".", "format", "(", "output", "[", "'head'", "]", ")", ")", "else", ":", "raise", "CliException", "(", "'No data available at {}'", ".", "format", "(", "args", ".", "address", ")", ")" ]
Runs the batch list or batch show command, printing output to the console Args: args: The parsed arguments sent to the command at runtime
[ "Runs", "the", "batch", "list", "or", "batch", "show", "command", "printing", "output", "to", "the", "console" ]
python
train
PyCQA/pylint
pylint/lint.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/lint.py#L787-L805
def error_mode(self): """error mode: enable only errors; no reports, no persistent""" self._error_mode = True self.disable_noerror_messages() self.disable("miscellaneous") if self._python3_porting_mode: self.disable("all") for msg_id in self._checker_messages("python3"): if msg_id.startswith("E"): self.enable(msg_id) config_parser = self.cfgfile_parser if config_parser.has_option("MESSAGES CONTROL", "disable"): value = config_parser.get("MESSAGES CONTROL", "disable") self.global_set_option("disable", value) else: self.disable("python3") self.set_option("reports", False) self.set_option("persistent", False) self.set_option("score", False)
[ "def", "error_mode", "(", "self", ")", ":", "self", ".", "_error_mode", "=", "True", "self", ".", "disable_noerror_messages", "(", ")", "self", ".", "disable", "(", "\"miscellaneous\"", ")", "if", "self", ".", "_python3_porting_mode", ":", "self", ".", "disable", "(", "\"all\"", ")", "for", "msg_id", "in", "self", ".", "_checker_messages", "(", "\"python3\"", ")", ":", "if", "msg_id", ".", "startswith", "(", "\"E\"", ")", ":", "self", ".", "enable", "(", "msg_id", ")", "config_parser", "=", "self", ".", "cfgfile_parser", "if", "config_parser", ".", "has_option", "(", "\"MESSAGES CONTROL\"", ",", "\"disable\"", ")", ":", "value", "=", "config_parser", ".", "get", "(", "\"MESSAGES CONTROL\"", ",", "\"disable\"", ")", "self", ".", "global_set_option", "(", "\"disable\"", ",", "value", ")", "else", ":", "self", ".", "disable", "(", "\"python3\"", ")", "self", ".", "set_option", "(", "\"reports\"", ",", "False", ")", "self", ".", "set_option", "(", "\"persistent\"", ",", "False", ")", "self", ".", "set_option", "(", "\"score\"", ",", "False", ")" ]
error mode: enable only errors; no reports, no persistent
[ "error", "mode", ":", "enable", "only", "errors", ";", "no", "reports", "no", "persistent" ]
python
test
quantumlib/Cirq
cirq/google/line/placement/greedy.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/google/line/placement/greedy.py#L105-L130
def _sequence_search(self, start: GridQubit, current: List[GridQubit]) -> List[GridQubit]: """Search for the continuous linear sequence from the given qubit. This method is called twice for the same starting qubit, so that sequences that begin and end on this qubit are searched for. Args: start: The first qubit, where search should be trigerred from. current: Previously found linear sequence, which qubits are forbidden to use during the search. Returns: Continuous linear sequence that begins with the starting qubit and does not contain any qubits from the current list. """ used = set(current) seq = [] n = start # type: Optional[GridQubit] while n is not None: # Append qubit n to the sequence and mark it is as visited. seq.append(n) used.add(n) # Advance search to the next qubit. n = self._choose_next_qubit(n, used) return seq
[ "def", "_sequence_search", "(", "self", ",", "start", ":", "GridQubit", ",", "current", ":", "List", "[", "GridQubit", "]", ")", "->", "List", "[", "GridQubit", "]", ":", "used", "=", "set", "(", "current", ")", "seq", "=", "[", "]", "n", "=", "start", "# type: Optional[GridQubit]", "while", "n", "is", "not", "None", ":", "# Append qubit n to the sequence and mark it is as visited.", "seq", ".", "append", "(", "n", ")", "used", ".", "add", "(", "n", ")", "# Advance search to the next qubit.", "n", "=", "self", ".", "_choose_next_qubit", "(", "n", ",", "used", ")", "return", "seq" ]
Search for the continuous linear sequence from the given qubit. This method is called twice for the same starting qubit, so that sequences that begin and end on this qubit are searched for. Args: start: The first qubit, where search should be trigerred from. current: Previously found linear sequence, which qubits are forbidden to use during the search. Returns: Continuous linear sequence that begins with the starting qubit and does not contain any qubits from the current list.
[ "Search", "for", "the", "continuous", "linear", "sequence", "from", "the", "given", "qubit", "." ]
python
train
loganasherjones/yapconf
yapconf/items.py
https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/items.py#L243-L254
def update_default(self, new_default, respect_none=False): """Update our current default with the new_default. Args: new_default: New default to set. respect_none: Flag to determine if ``None`` is a valid value. """ if new_default is not None: self.default = new_default elif new_default is None and respect_none: self.default = None
[ "def", "update_default", "(", "self", ",", "new_default", ",", "respect_none", "=", "False", ")", ":", "if", "new_default", "is", "not", "None", ":", "self", ".", "default", "=", "new_default", "elif", "new_default", "is", "None", "and", "respect_none", ":", "self", ".", "default", "=", "None" ]
Update our current default with the new_default. Args: new_default: New default to set. respect_none: Flag to determine if ``None`` is a valid value.
[ "Update", "our", "current", "default", "with", "the", "new_default", "." ]
python
train
satellogic/telluric
telluric/collections.py
https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/collections.py#L209-L219
def dissolve(self, by=None, aggfunc=None): # type: (Optional[str], Optional[Callable]) -> FeatureCollection """Dissolve geometries and rasters within `groupby`. """ if by: agg = partial(dissolve, aggfunc=aggfunc) # type: Callable[[BaseCollection], GeoFeature] return self.groupby(by).agg(agg) else: return FeatureCollection([dissolve(self, aggfunc)])
[ "def", "dissolve", "(", "self", ",", "by", "=", "None", ",", "aggfunc", "=", "None", ")", ":", "# type: (Optional[str], Optional[Callable]) -> FeatureCollection", "if", "by", ":", "agg", "=", "partial", "(", "dissolve", ",", "aggfunc", "=", "aggfunc", ")", "# type: Callable[[BaseCollection], GeoFeature]", "return", "self", ".", "groupby", "(", "by", ")", ".", "agg", "(", "agg", ")", "else", ":", "return", "FeatureCollection", "(", "[", "dissolve", "(", "self", ",", "aggfunc", ")", "]", ")" ]
Dissolve geometries and rasters within `groupby`.
[ "Dissolve", "geometries", "and", "rasters", "within", "groupby", "." ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/classtracker.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker.py#L217-L228
def total(self): """ Return the total (virtual) size of the process in bytes. If process information is not available, get the best number available, even if it is a poor approximation of reality. """ if self.system_total.available: return self.system_total.vsz elif self.asizeof_total: # pragma: no cover return self.asizeof_total else: # pragma: no cover return self.tracked_total
[ "def", "total", "(", "self", ")", ":", "if", "self", ".", "system_total", ".", "available", ":", "return", "self", ".", "system_total", ".", "vsz", "elif", "self", ".", "asizeof_total", ":", "# pragma: no cover", "return", "self", ".", "asizeof_total", "else", ":", "# pragma: no cover", "return", "self", ".", "tracked_total" ]
Return the total (virtual) size of the process in bytes. If process information is not available, get the best number available, even if it is a poor approximation of reality.
[ "Return", "the", "total", "(", "virtual", ")", "size", "of", "the", "process", "in", "bytes", ".", "If", "process", "information", "is", "not", "available", "get", "the", "best", "number", "available", "even", "if", "it", "is", "a", "poor", "approximation", "of", "reality", "." ]
python
train