text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def dump_hash_prefix_values(self): """Export all hash prefix values. Returns a list of known hash prefix values """ q = '''SELECT distinct value from hash_prefix''' output = [] with self.get_cursor() as dbc: dbc.execute(q) output = [bytes(r[0]) for r in dbc.fetchall()] return output
[ "def", "dump_hash_prefix_values", "(", "self", ")", ":", "q", "=", "'''SELECT distinct value from hash_prefix'''", "output", "=", "[", "]", "with", "self", ".", "get_cursor", "(", ")", "as", "dbc", ":", "dbc", ".", "execute", "(", "q", ")", "output", "=", "[", "bytes", "(", "r", "[", "0", "]", ")", "for", "r", "in", "dbc", ".", "fetchall", "(", ")", "]", "return", "output" ]
32.181818
13.272727
def create(cls, name, ipv4_network=None, ipv6_network=None, comment=None): """ Create the network element :param str name: Name of element :param str ipv4_network: network cidr (optional if ipv6) :param str ipv6_network: network cidr (optional if ipv4) :param str comment: comment (optional) :raises CreateElementFailed: element creation failed with reason :return: instance with meta :rtype: Network .. note:: Either an ipv4_network or ipv6_network must be specified """ ipv4_network = ipv4_network if ipv4_network else None ipv6_network = ipv6_network if ipv6_network else None json = {'name': name, 'ipv4_network': ipv4_network, 'ipv6_network': ipv6_network, 'comment': comment} return ElementCreator(cls, json)
[ "def", "create", "(", "cls", ",", "name", ",", "ipv4_network", "=", "None", ",", "ipv6_network", "=", "None", ",", "comment", "=", "None", ")", ":", "ipv4_network", "=", "ipv4_network", "if", "ipv4_network", "else", "None", "ipv6_network", "=", "ipv6_network", "if", "ipv6_network", "else", "None", "json", "=", "{", "'name'", ":", "name", ",", "'ipv4_network'", ":", "ipv4_network", ",", "'ipv6_network'", ":", "ipv6_network", ",", "'comment'", ":", "comment", "}", "return", "ElementCreator", "(", "cls", ",", "json", ")" ]
38.521739
15.565217
def main(): '''Main routine.''' # process arguments if len(sys.argv) < 3: usage() rgname = sys.argv[1] vmss = sys.argv[2] # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit("Error: Expecting azurermconfig.json in current folder") tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] sub_id = config_data['subscriptionId'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # get metric definitions provider = 'Microsoft.Compute' resource_type = 'virtualMachineScaleSets' metric_definitions = azurerm.list_metric_defs_for_resource(access_token, sub_id, rgname, provider, resource_type, vmss) print(json.dumps(metric_definitions, sort_keys=False, indent=2, separators=(',', ': '))) metrics = azurerm.get_metrics_for_resource(access_token, sub_id, rgname, provider, resource_type, vmss) print(json.dumps(metrics, sort_keys=False, indent=2, separators=(',', ': ')))
[ "def", "main", "(", ")", ":", "# process arguments", "if", "len", "(", "sys", ".", "argv", ")", "<", "3", ":", "usage", "(", ")", "rgname", "=", "sys", ".", "argv", "[", "1", "]", "vmss", "=", "sys", ".", "argv", "[", "2", "]", "# Load Azure app defaults", "try", ":", "with", "open", "(", "'azurermconfig.json'", ")", "as", "config_file", ":", "config_data", "=", "json", ".", "load", "(", "config_file", ")", "except", "FileNotFoundError", ":", "sys", ".", "exit", "(", "\"Error: Expecting azurermconfig.json in current folder\"", ")", "tenant_id", "=", "config_data", "[", "'tenantId'", "]", "app_id", "=", "config_data", "[", "'appId'", "]", "app_secret", "=", "config_data", "[", "'appSecret'", "]", "sub_id", "=", "config_data", "[", "'subscriptionId'", "]", "access_token", "=", "azurerm", ".", "get_access_token", "(", "tenant_id", ",", "app_id", ",", "app_secret", ")", "# get metric definitions", "provider", "=", "'Microsoft.Compute'", "resource_type", "=", "'virtualMachineScaleSets'", "metric_definitions", "=", "azurerm", ".", "list_metric_defs_for_resource", "(", "access_token", ",", "sub_id", ",", "rgname", ",", "provider", ",", "resource_type", ",", "vmss", ")", "print", "(", "json", ".", "dumps", "(", "metric_definitions", ",", "sort_keys", "=", "False", ",", "indent", "=", "2", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", ")", "metrics", "=", "azurerm", ".", "get_metrics_for_resource", "(", "access_token", ",", "sub_id", ",", "rgname", ",", "provider", ",", "resource_type", ",", "vmss", ")", "print", "(", "json", ".", "dumps", "(", "metrics", ",", "sort_keys", "=", "False", ",", "indent", "=", "2", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", ")" ]
33.297297
26.648649
def get_unique_counter(self, redis_conn=None, host='localhost', port=6379, key='unique_counter', cycle_time=5, start_time=None, window=SECONDS_1_HOUR, roll=True, keep_max=12): ''' Generate a new UniqueCounter. Useful for exactly counting unique objects @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port @param key: the key for your stats collection @param cycle_time: how often to check for expiring counts @param start_time: the time to start valid collection @param window: how long to collect data for in seconds (if rolling) @param roll: Roll the window after it expires, to continue collecting on a new date based key. @keep_max: If rolling the static window, the max number of prior windows to keep ''' counter = UniqueCounter(key=key, cycle_time=cycle_time, start_time=start_time, window=window, roll=roll, keep_max=keep_max) counter.setup(redis_conn=redis_conn, host=host, port=port) return counter
[ "def", "get_unique_counter", "(", "self", ",", "redis_conn", "=", "None", ",", "host", "=", "'localhost'", ",", "port", "=", "6379", ",", "key", "=", "'unique_counter'", ",", "cycle_time", "=", "5", ",", "start_time", "=", "None", ",", "window", "=", "SECONDS_1_HOUR", ",", "roll", "=", "True", ",", "keep_max", "=", "12", ")", ":", "counter", "=", "UniqueCounter", "(", "key", "=", "key", ",", "cycle_time", "=", "cycle_time", ",", "start_time", "=", "start_time", ",", "window", "=", "window", ",", "roll", "=", "roll", ",", "keep_max", "=", "keep_max", ")", "counter", ".", "setup", "(", "redis_conn", "=", "redis_conn", ",", "host", "=", "host", ",", "port", "=", "port", ")", "return", "counter" ]
51.333333
23.5
def _create_parser(cls): """ Need to check the specific symbol "/" in attr_value part as well. I checked some multipath configuraion files from the sosreport and got although there are some more specific symbols like "-%", it is enclosed in double quotes and will be accepted. Furthermore, I also checked the source code of "device-mapper-multipath" and got if the attr_value in "multipath.conf" include a "whitespace", it must be enclosed in double quotation marks. So, we could just add one more specific symbol "/" to check. ---------------------------------------------------------- udev_dir /dev getuid_callout "/sbin/scsi_id -g -u -s /block/%n" ---------------------------------------------------------- """ section_name = p.Word(p.alphas + "_") attr_name = attr_value = p.Word(p.alphanums + "_/") LBRACE, RBRACE = map(p.Suppress, "{}") attr = p.Group(attr_name + (attr_value | p.quotedString.setParseAction(p.removeQuotes))) attr_list = p.Dict(p.ZeroOrMore(attr)) simple_section = p.Group(section_name + LBRACE + attr_list + RBRACE) complex_section = p.Group(section_name + LBRACE + p.OneOrMore(simple_section) + RBRACE) simple_or_complex = p.Dict(simple_section | complex_section) my_conf = p.Group(p.ZeroOrMore(simple_or_complex)) my_conf.ignore("#" + p.restOfLine) return my_conf
[ "def", "_create_parser", "(", "cls", ")", ":", "section_name", "=", "p", ".", "Word", "(", "p", ".", "alphas", "+", "\"_\"", ")", "attr_name", "=", "attr_value", "=", "p", ".", "Word", "(", "p", ".", "alphanums", "+", "\"_/\"", ")", "LBRACE", ",", "RBRACE", "=", "map", "(", "p", ".", "Suppress", ",", "\"{}\"", ")", "attr", "=", "p", ".", "Group", "(", "attr_name", "+", "(", "attr_value", "|", "p", ".", "quotedString", ".", "setParseAction", "(", "p", ".", "removeQuotes", ")", ")", ")", "attr_list", "=", "p", ".", "Dict", "(", "p", ".", "ZeroOrMore", "(", "attr", ")", ")", "simple_section", "=", "p", ".", "Group", "(", "section_name", "+", "LBRACE", "+", "attr_list", "+", "RBRACE", ")", "complex_section", "=", "p", ".", "Group", "(", "section_name", "+", "LBRACE", "+", "p", ".", "OneOrMore", "(", "simple_section", ")", "+", "RBRACE", ")", "simple_or_complex", "=", "p", ".", "Dict", "(", "simple_section", "|", "complex_section", ")", "my_conf", "=", "p", ".", "Group", "(", "p", ".", "ZeroOrMore", "(", "simple_or_complex", ")", ")", "my_conf", ".", "ignore", "(", "\"#\"", "+", "p", ".", "restOfLine", ")", "return", "my_conf" ]
57.230769
24.384615
def ping(dest_addr: str, timeout: int = 4, unit: str = "s", src_addr: str = None, ttl: int = 64, seq: int = 0, size: int = 56) -> float or None: """ Send one ping to destination address with the given timeout. Args: dest_addr: The destination address, can be an IP address or a domain name. Ex. "192.168.1.1"/"example.com" timeout: Timeout in seconds. Default is 4s, same as Windows CMD. (default 4) unit: The unit of returned value. "s" for seconds, "ms" for milliseconds. (default "s") src_addr: The IP address to ping from. This is for multi-interface clients. Ex. "192.168.1.20". (default None) ttl: The Time-To-Live of the outgoing packet. Default is 64, same as in Linux and macOS. (default 64) seq: ICMP packet sequence, usually increases from 0 in the same process. (default 0) size: The ICMP packet payload size in bytes. Default is 56, same as in macOS. (default 56) Returns: The delay in seconds/milliseconds or None on timeout. Raises: PingError: Any PingError will raise again if `ping3.EXCEPTIONS` is True. """ with socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP) as sock: sock.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl) if src_addr: sock.bind((src_addr, 0)) icmp_id = threading.current_thread().ident % 0xFFFF try: send_one_ping(sock=sock, dest_addr=dest_addr, icmp_id=icmp_id, seq=seq, size=size) delay = receive_one_ping(sock=sock, icmp_id=icmp_id, seq=seq, timeout=timeout) # in seconds except errors.PingError as e: _debug(e) if EXCEPTIONS: raise e return None if delay is None: return None if unit == "ms": delay *= 1000 # in milliseconds return delay
[ "def", "ping", "(", "dest_addr", ":", "str", ",", "timeout", ":", "int", "=", "4", ",", "unit", ":", "str", "=", "\"s\"", ",", "src_addr", ":", "str", "=", "None", ",", "ttl", ":", "int", "=", "64", ",", "seq", ":", "int", "=", "0", ",", "size", ":", "int", "=", "56", ")", "->", "float", "or", "None", ":", "with", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_RAW", ",", "socket", ".", "IPPROTO_ICMP", ")", "as", "sock", ":", "sock", ".", "setsockopt", "(", "socket", ".", "SOL_IP", ",", "socket", ".", "IP_TTL", ",", "ttl", ")", "if", "src_addr", ":", "sock", ".", "bind", "(", "(", "src_addr", ",", "0", ")", ")", "icmp_id", "=", "threading", ".", "current_thread", "(", ")", ".", "ident", "%", "0xFFFF", "try", ":", "send_one_ping", "(", "sock", "=", "sock", ",", "dest_addr", "=", "dest_addr", ",", "icmp_id", "=", "icmp_id", ",", "seq", "=", "seq", ",", "size", "=", "size", ")", "delay", "=", "receive_one_ping", "(", "sock", "=", "sock", ",", "icmp_id", "=", "icmp_id", ",", "seq", "=", "seq", ",", "timeout", "=", "timeout", ")", "# in seconds", "except", "errors", ".", "PingError", "as", "e", ":", "_debug", "(", "e", ")", "if", "EXCEPTIONS", ":", "raise", "e", "return", "None", "if", "delay", "is", "None", ":", "return", "None", "if", "unit", "==", "\"ms\"", ":", "delay", "*=", "1000", "# in milliseconds", "return", "delay" ]
49.594595
33.108108
def _from_dict(cls, _dict): """Initialize a Tables object from a json dictionary.""" args = {} if 'location' in _dict: args['location'] = Location._from_dict(_dict.get('location')) if 'text' in _dict: args['text'] = _dict.get('text') if 'section_title' in _dict: args['section_title'] = SectionTitle._from_dict( _dict.get('section_title')) if 'table_headers' in _dict: args['table_headers'] = [ TableHeaders._from_dict(x) for x in (_dict.get('table_headers')) ] if 'row_headers' in _dict: args['row_headers'] = [ RowHeaders._from_dict(x) for x in (_dict.get('row_headers')) ] if 'column_headers' in _dict: args['column_headers'] = [ ColumnHeaders._from_dict(x) for x in (_dict.get('column_headers')) ] if 'key_value_pairs' in _dict: args['key_value_pairs'] = [ KeyValuePair._from_dict(x) for x in (_dict.get('key_value_pairs')) ] if 'body_cells' in _dict: args['body_cells'] = [ BodyCells._from_dict(x) for x in (_dict.get('body_cells')) ] return cls(**args)
[ "def", "_from_dict", "(", "cls", ",", "_dict", ")", ":", "args", "=", "{", "}", "if", "'location'", "in", "_dict", ":", "args", "[", "'location'", "]", "=", "Location", ".", "_from_dict", "(", "_dict", ".", "get", "(", "'location'", ")", ")", "if", "'text'", "in", "_dict", ":", "args", "[", "'text'", "]", "=", "_dict", ".", "get", "(", "'text'", ")", "if", "'section_title'", "in", "_dict", ":", "args", "[", "'section_title'", "]", "=", "SectionTitle", ".", "_from_dict", "(", "_dict", ".", "get", "(", "'section_title'", ")", ")", "if", "'table_headers'", "in", "_dict", ":", "args", "[", "'table_headers'", "]", "=", "[", "TableHeaders", ".", "_from_dict", "(", "x", ")", "for", "x", "in", "(", "_dict", ".", "get", "(", "'table_headers'", ")", ")", "]", "if", "'row_headers'", "in", "_dict", ":", "args", "[", "'row_headers'", "]", "=", "[", "RowHeaders", ".", "_from_dict", "(", "x", ")", "for", "x", "in", "(", "_dict", ".", "get", "(", "'row_headers'", ")", ")", "]", "if", "'column_headers'", "in", "_dict", ":", "args", "[", "'column_headers'", "]", "=", "[", "ColumnHeaders", ".", "_from_dict", "(", "x", ")", "for", "x", "in", "(", "_dict", ".", "get", "(", "'column_headers'", ")", ")", "]", "if", "'key_value_pairs'", "in", "_dict", ":", "args", "[", "'key_value_pairs'", "]", "=", "[", "KeyValuePair", ".", "_from_dict", "(", "x", ")", "for", "x", "in", "(", "_dict", ".", "get", "(", "'key_value_pairs'", ")", ")", "]", "if", "'body_cells'", "in", "_dict", ":", "args", "[", "'body_cells'", "]", "=", "[", "BodyCells", ".", "_from_dict", "(", "x", ")", "for", "x", "in", "(", "_dict", ".", "get", "(", "'body_cells'", ")", ")", "]", "return", "cls", "(", "*", "*", "args", ")" ]
39.333333
13.757576
def init(obj, resource_root, attrs=None): """ Wraper around the real constructor to avoid issues with the 'self' argument. Call like this, from a subclass's constructor: - BaseApiObject.init(self, locals()) """ # This works around http://bugs.python.org/issue2646 # We use unicode strings as keys in kwargs. str_attrs = { } if attrs: for k, v in attrs.iteritems(): if k not in ('self', 'resource_root'): str_attrs[k] = v BaseApiObject.__init__(obj, resource_root, **str_attrs)
[ "def", "init", "(", "obj", ",", "resource_root", ",", "attrs", "=", "None", ")", ":", "# This works around http://bugs.python.org/issue2646", "# We use unicode strings as keys in kwargs.", "str_attrs", "=", "{", "}", "if", "attrs", ":", "for", "k", ",", "v", "in", "attrs", ".", "iteritems", "(", ")", ":", "if", "k", "not", "in", "(", "'self'", ",", "'resource_root'", ")", ":", "str_attrs", "[", "k", "]", "=", "v", "BaseApiObject", ".", "__init__", "(", "obj", ",", "resource_root", ",", "*", "*", "str_attrs", ")" ]
35.2
13.733333
def get_token(opts, tok): ''' Fetch the token data from the store. :param opts: Salt master config options :param tok: Token value to get :returns: Token data if successful. Empty dict if failed. ''' redis_client = _redis_client(opts) if not redis_client: return {} serial = salt.payload.Serial(opts) try: tdata = serial.loads(redis_client.get(tok)) return tdata except Exception as err: log.warning( 'Authentication failure: cannot get token %s from redis: %s', tok, err ) return {}
[ "def", "get_token", "(", "opts", ",", "tok", ")", ":", "redis_client", "=", "_redis_client", "(", "opts", ")", "if", "not", "redis_client", ":", "return", "{", "}", "serial", "=", "salt", ".", "payload", ".", "Serial", "(", "opts", ")", "try", ":", "tdata", "=", "serial", ".", "loads", "(", "redis_client", ".", "get", "(", "tok", ")", ")", "return", "tdata", "except", "Exception", "as", "err", ":", "log", ".", "warning", "(", "'Authentication failure: cannot get token %s from redis: %s'", ",", "tok", ",", "err", ")", "return", "{", "}" ]
27.619048
18.857143
def Generate(self, items, token=None): """Generates archive from a given collection. Iterates the collection and generates an archive by yielding contents of every referenced AFF4Stream. Args: items: Iterable with items that point to aff4 paths. token: User's ACLToken. Yields: Binary chunks comprising the generated archive. """ clients = set() for fd_urn_batch in collection.Batch( self._ItemsToUrns(items), self.BATCH_SIZE): self.total_files += len(fd_urn_batch) fds_to_write = {} for fd in aff4.FACTORY.MultiOpen(fd_urn_batch, token=token): # Derive a ClientPath from AFF4 URN to make new and old # archive_generator predicate input consistent. # TODO(user): This code is clearly hacky and intended to be removed. urn_components = fd.urn.Split() if urn_components[1:3] != ["fs", "os"]: raise AssertionError("URN components are expected to start with " "client, 'fs', 'os'. Got %r" % (urn_components,)) client_path = db.ClientPath.OS( client_id=urn_components[0], components=urn_components[3:]) if not self.predicate(client_path): self.ignored_files.add(utils.SmartUnicode(fd.urn)) continue # Any file-like object with data in AFF4 should inherit AFF4Stream. if isinstance(fd, aff4.AFF4Stream): urn_components = fd.urn.Split() clients.add(rdf_client.ClientURN(urn_components[0])) content_path = os.path.join(self.prefix, *urn_components) # Make sure size of the original file is passed. It's required # when output_writer is StreamingTarWriter. st = os.stat_result((0o644, 0, 0, 0, 0, 0, fd.size, 0, 0, 0)) fds_to_write[fd] = (content_path, st) if fds_to_write: prev_fd = None for fd, chunk, exception in aff4.AFF4Stream.MultiStream(fds_to_write): if exception: logging.exception(exception) try: self.archived_files.remove(utils.SmartUnicode(fd.urn)) except KeyError: pass # Failing is fine, since removal should be idempotent. self.failed_files.add(utils.SmartUnicode(fd.urn)) continue if prev_fd != fd: if prev_fd: yield self.archive_generator.WriteFileFooter() prev_fd = fd content_path, st = fds_to_write[fd] yield self.archive_generator.WriteFileHeader(content_path, st=st) yield self.archive_generator.WriteFileChunk(chunk) self.archived_files.add(utils.SmartUnicode(fd.urn)) if self.archive_generator.is_file_write_in_progress: yield self.archive_generator.WriteFileFooter() if clients: for client_urn_batch in collection.Batch(clients, self.BATCH_SIZE): for fd in aff4.FACTORY.MultiOpen( client_urn_batch, aff4_type=aff4_grr.VFSGRRClient, token=token): for chunk in self._GenerateClientInfo(fd): yield chunk for chunk in self._GenerateDescription(): yield chunk yield self.archive_generator.Close()
[ "def", "Generate", "(", "self", ",", "items", ",", "token", "=", "None", ")", ":", "clients", "=", "set", "(", ")", "for", "fd_urn_batch", "in", "collection", ".", "Batch", "(", "self", ".", "_ItemsToUrns", "(", "items", ")", ",", "self", ".", "BATCH_SIZE", ")", ":", "self", ".", "total_files", "+=", "len", "(", "fd_urn_batch", ")", "fds_to_write", "=", "{", "}", "for", "fd", "in", "aff4", ".", "FACTORY", ".", "MultiOpen", "(", "fd_urn_batch", ",", "token", "=", "token", ")", ":", "# Derive a ClientPath from AFF4 URN to make new and old", "# archive_generator predicate input consistent.", "# TODO(user): This code is clearly hacky and intended to be removed.", "urn_components", "=", "fd", ".", "urn", ".", "Split", "(", ")", "if", "urn_components", "[", "1", ":", "3", "]", "!=", "[", "\"fs\"", ",", "\"os\"", "]", ":", "raise", "AssertionError", "(", "\"URN components are expected to start with \"", "\"client, 'fs', 'os'. Got %r\"", "%", "(", "urn_components", ",", ")", ")", "client_path", "=", "db", ".", "ClientPath", ".", "OS", "(", "client_id", "=", "urn_components", "[", "0", "]", ",", "components", "=", "urn_components", "[", "3", ":", "]", ")", "if", "not", "self", ".", "predicate", "(", "client_path", ")", ":", "self", ".", "ignored_files", ".", "add", "(", "utils", ".", "SmartUnicode", "(", "fd", ".", "urn", ")", ")", "continue", "# Any file-like object with data in AFF4 should inherit AFF4Stream.", "if", "isinstance", "(", "fd", ",", "aff4", ".", "AFF4Stream", ")", ":", "urn_components", "=", "fd", ".", "urn", ".", "Split", "(", ")", "clients", ".", "add", "(", "rdf_client", ".", "ClientURN", "(", "urn_components", "[", "0", "]", ")", ")", "content_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "prefix", ",", "*", "urn_components", ")", "# Make sure size of the original file is passed. It's required", "# when output_writer is StreamingTarWriter.", "st", "=", "os", ".", "stat_result", "(", "(", "0o644", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "fd", ".", "size", ",", "0", ",", "0", ",", "0", ")", ")", "fds_to_write", "[", "fd", "]", "=", "(", "content_path", ",", "st", ")", "if", "fds_to_write", ":", "prev_fd", "=", "None", "for", "fd", ",", "chunk", ",", "exception", "in", "aff4", ".", "AFF4Stream", ".", "MultiStream", "(", "fds_to_write", ")", ":", "if", "exception", ":", "logging", ".", "exception", "(", "exception", ")", "try", ":", "self", ".", "archived_files", ".", "remove", "(", "utils", ".", "SmartUnicode", "(", "fd", ".", "urn", ")", ")", "except", "KeyError", ":", "pass", "# Failing is fine, since removal should be idempotent.", "self", ".", "failed_files", ".", "add", "(", "utils", ".", "SmartUnicode", "(", "fd", ".", "urn", ")", ")", "continue", "if", "prev_fd", "!=", "fd", ":", "if", "prev_fd", ":", "yield", "self", ".", "archive_generator", ".", "WriteFileFooter", "(", ")", "prev_fd", "=", "fd", "content_path", ",", "st", "=", "fds_to_write", "[", "fd", "]", "yield", "self", ".", "archive_generator", ".", "WriteFileHeader", "(", "content_path", ",", "st", "=", "st", ")", "yield", "self", ".", "archive_generator", ".", "WriteFileChunk", "(", "chunk", ")", "self", ".", "archived_files", ".", "add", "(", "utils", ".", "SmartUnicode", "(", "fd", ".", "urn", ")", ")", "if", "self", ".", "archive_generator", ".", "is_file_write_in_progress", ":", "yield", "self", ".", "archive_generator", ".", "WriteFileFooter", "(", ")", "if", "clients", ":", "for", "client_urn_batch", "in", "collection", ".", "Batch", "(", "clients", ",", "self", ".", "BATCH_SIZE", ")", ":", "for", "fd", "in", "aff4", ".", "FACTORY", ".", "MultiOpen", "(", "client_urn_batch", ",", "aff4_type", "=", "aff4_grr", ".", "VFSGRRClient", ",", "token", "=", "token", ")", ":", "for", "chunk", "in", "self", ".", "_GenerateClientInfo", "(", "fd", ")", ":", "yield", "chunk", "for", "chunk", "in", "self", ".", "_GenerateDescription", "(", ")", ":", "yield", "chunk", "yield", "self", ".", "archive_generator", ".", "Close", "(", ")" ]
36.835294
22.529412
def __FieldDescriptorFromProperties(self, name, index, attrs): """Create a field descriptor for these attrs.""" field = descriptor.FieldDescriptor() field.name = self.__names.CleanName(name) field.number = index field.label = self.__ComputeLabel(attrs) new_type_name_hint = self.__names.ClassName( '%sValue' % self.__names.ClassName(name)) type_info = self.__GetTypeInfo(attrs, new_type_name_hint) field.type_name = type_info.type_name field.variant = type_info.variant if 'default' in attrs: # TODO(craigcitro): Correctly handle non-primitive default values. default = attrs['default'] if not (field.type_name == 'string' or field.variant == messages.Variant.ENUM): default = str(json.loads(default)) if field.variant == messages.Variant.ENUM: default = self.__names.NormalizeEnumName(default) field.default_value = default extended_field = extended_descriptor.ExtendedFieldDescriptor() extended_field.name = field.name extended_field.description = util.CleanDescription( attrs.get('description', 'A %s attribute.' % field.type_name)) extended_field.field_descriptor = field return extended_field
[ "def", "__FieldDescriptorFromProperties", "(", "self", ",", "name", ",", "index", ",", "attrs", ")", ":", "field", "=", "descriptor", ".", "FieldDescriptor", "(", ")", "field", ".", "name", "=", "self", ".", "__names", ".", "CleanName", "(", "name", ")", "field", ".", "number", "=", "index", "field", ".", "label", "=", "self", ".", "__ComputeLabel", "(", "attrs", ")", "new_type_name_hint", "=", "self", ".", "__names", ".", "ClassName", "(", "'%sValue'", "%", "self", ".", "__names", ".", "ClassName", "(", "name", ")", ")", "type_info", "=", "self", ".", "__GetTypeInfo", "(", "attrs", ",", "new_type_name_hint", ")", "field", ".", "type_name", "=", "type_info", ".", "type_name", "field", ".", "variant", "=", "type_info", ".", "variant", "if", "'default'", "in", "attrs", ":", "# TODO(craigcitro): Correctly handle non-primitive default values.", "default", "=", "attrs", "[", "'default'", "]", "if", "not", "(", "field", ".", "type_name", "==", "'string'", "or", "field", ".", "variant", "==", "messages", ".", "Variant", ".", "ENUM", ")", ":", "default", "=", "str", "(", "json", ".", "loads", "(", "default", ")", ")", "if", "field", ".", "variant", "==", "messages", ".", "Variant", ".", "ENUM", ":", "default", "=", "self", ".", "__names", ".", "NormalizeEnumName", "(", "default", ")", "field", ".", "default_value", "=", "default", "extended_field", "=", "extended_descriptor", ".", "ExtendedFieldDescriptor", "(", ")", "extended_field", ".", "name", "=", "field", ".", "name", "extended_field", ".", "description", "=", "util", ".", "CleanDescription", "(", "attrs", ".", "get", "(", "'description'", ",", "'A %s attribute.'", "%", "field", ".", "type_name", ")", ")", "extended_field", ".", "field_descriptor", "=", "field", "return", "extended_field" ]
51.076923
13.153846
def dec(self,*args,**kwargs): """ NAME: dec PURPOSE: return the declination INPUT: t - (optional) time at which to get dec obs=[X,Y,Z] - (optional) position of observer (in kpc) (default=Object-wide default) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= distance in kpc corresponding to R=1. (default=Object-wide default) OUTPUT: dec(t) HISTORY: 2011-02-23 - Written - Bovy (NYU) """ _check_roSet(self,kwargs,'dec') radec= self._radec(*args,**kwargs) return radec[:,1]
[ "def", "dec", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "_check_roSet", "(", "self", ",", "kwargs", ",", "'dec'", ")", "radec", "=", "self", ".", "_radec", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "radec", "[", ":", ",", "1", "]" ]
34.909091
16.545455
def keys(self): """Return iterable of columns used by this object.""" columns = set() for name in vars(self): if (not name) or name[0] == "_": continue columns.add(name) return columns
[ "def", "keys", "(", "self", ")", ":", "columns", "=", "set", "(", ")", "for", "name", "in", "vars", "(", "self", ")", ":", "if", "(", "not", "name", ")", "or", "name", "[", "0", "]", "==", "\"_\"", ":", "continue", "columns", ".", "add", "(", "name", ")", "return", "columns" ]
26.625
15.5
def push_accepts_more(self): """Return whether a block of interactive input can accept more input. This method is meant to be used by line-oriented frontends, who need to guess whether a block is complete or not based solely on prior and current input lines. The InputSplitter considers it has a complete interactive block and will not accept more input only when either a SyntaxError is raised, or *all* of the following are true: 1. The input compiles to a complete statement. 2. The indentation level is flush-left (because if we are indented, like inside a function definition or for loop, we need to keep reading new input). 3. There is one extra line consisting only of whitespace. Because of condition #3, this method should be used only by *line-oriented* frontends, since it means that intermediate blank lines are not allowed in function definitions (or any other indented block). If the current input produces a syntax error, this method immediately returns False but does *not* raise the syntax error exception, as typically clients will want to send invalid syntax to an execution backend which might convert the invalid syntax into valid Python via one of the dynamic IPython mechanisms. """ # With incomplete input, unconditionally accept more if not self._is_complete: return True # If we already have complete input and we're flush left, the answer # depends. In line mode, if there hasn't been any indentation, # that's it. If we've come back from some indentation, we need # the blank final line to finish. # In cell mode, we need to check how many blocks the input so far # compiles into, because if there's already more than one full # independent block of input, then the client has entered full # 'cell' mode and is feeding lines that each is complete. In this # case we should then keep accepting. The Qt terminal-like console # does precisely this, to provide the convenience of terminal-like # input of single expressions, but allowing the user (with a # separate keystroke) to switch to 'cell' mode and type multiple # expressions in one shot. if self.indent_spaces==0: if self.input_mode=='line': if not self._full_dedent: return False else: try: code_ast = ast.parse(u''.join(self._buffer)) except Exception: return False else: if len(code_ast.body) == 1: return False # When input is complete, then termination is marked by an extra blank # line at the end. last_line = self.source.splitlines()[-1] return bool(last_line and not last_line.isspace())
[ "def", "push_accepts_more", "(", "self", ")", ":", "# With incomplete input, unconditionally accept more", "if", "not", "self", ".", "_is_complete", ":", "return", "True", "# If we already have complete input and we're flush left, the answer", "# depends. In line mode, if there hasn't been any indentation,", "# that's it. If we've come back from some indentation, we need", "# the blank final line to finish.", "# In cell mode, we need to check how many blocks the input so far", "# compiles into, because if there's already more than one full", "# independent block of input, then the client has entered full", "# 'cell' mode and is feeding lines that each is complete. In this", "# case we should then keep accepting. The Qt terminal-like console", "# does precisely this, to provide the convenience of terminal-like", "# input of single expressions, but allowing the user (with a", "# separate keystroke) to switch to 'cell' mode and type multiple", "# expressions in one shot.", "if", "self", ".", "indent_spaces", "==", "0", ":", "if", "self", ".", "input_mode", "==", "'line'", ":", "if", "not", "self", ".", "_full_dedent", ":", "return", "False", "else", ":", "try", ":", "code_ast", "=", "ast", ".", "parse", "(", "u''", ".", "join", "(", "self", ".", "_buffer", ")", ")", "except", "Exception", ":", "return", "False", "else", ":", "if", "len", "(", "code_ast", ".", "body", ")", "==", "1", ":", "return", "False", "# When input is complete, then termination is marked by an extra blank", "# line at the end.", "last_line", "=", "self", ".", "source", ".", "splitlines", "(", ")", "[", "-", "1", "]", "return", "bool", "(", "last_line", "and", "not", "last_line", ".", "isspace", "(", ")", ")" ]
48.032258
24.193548
def find_node_modules_basedir(self): """ Find all node_modules directories configured to be accessible through this driver instance. This is typically used for adding the direct instance, and does not traverse the parent directories like what Node.js does. Returns a list of directories that contain a 'node_modules' directory. """ paths = [] # First do the working dir. local_node_path = self.join_cwd(NODE_MODULES) if isdir(local_node_path): paths.append(local_node_path) # do the NODE_PATH environment variable last, as Node.js seem to # have these resolving just before the global. if self.node_path: paths.extend(self.node_path.split(pathsep)) return paths
[ "def", "find_node_modules_basedir", "(", "self", ")", ":", "paths", "=", "[", "]", "# First do the working dir.", "local_node_path", "=", "self", ".", "join_cwd", "(", "NODE_MODULES", ")", "if", "isdir", "(", "local_node_path", ")", ":", "paths", ".", "append", "(", "local_node_path", ")", "# do the NODE_PATH environment variable last, as Node.js seem to", "# have these resolving just before the global.", "if", "self", ".", "node_path", ":", "paths", ".", "extend", "(", "self", ".", "node_path", ".", "split", "(", "pathsep", ")", ")", "return", "paths" ]
31.8
21
def no_redirect(pattern, locale_prefix=True, re_flags=None): """ Return a url matcher that will stop the redirect middleware and force Django to continue with regular URL matching. For use when you have a URL pattern you want to serve, and a broad catch-all pattern you want to redirect. :param pattern: regex URL patter that will definitely not redirect. :param locale_prefix: prepend the locale matching pattern. :param re_flags: a string of any of the characters: "iLmsux". Will modify the `pattern` regex based on the documented meaning of the flags (see python re module docs). :return: """ if locale_prefix: pattern = pattern.lstrip('^/') pattern = LOCALE_RE + pattern if re_flags: pattern = '(?{})'.format(re_flags) + pattern def _view(request, *args, **kwargs): return None return url(pattern, _view)
[ "def", "no_redirect", "(", "pattern", ",", "locale_prefix", "=", "True", ",", "re_flags", "=", "None", ")", ":", "if", "locale_prefix", ":", "pattern", "=", "pattern", ".", "lstrip", "(", "'^/'", ")", "pattern", "=", "LOCALE_RE", "+", "pattern", "if", "re_flags", ":", "pattern", "=", "'(?{})'", ".", "format", "(", "re_flags", ")", "+", "pattern", "def", "_view", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "None", "return", "url", "(", "pattern", ",", "_view", ")" ]
40.090909
23.727273
def _parse_curves(block, **kwargs): """Parse nonlinear curves block.""" count = int(block.pop(0)) curves = [] for i in range(count): for param in ['mod_reduc', 'damping']: length, name = parse_fixed_width([(5, int), (65, to_str)], block) curves.append( site.NonlinearProperty( name, parse_fixed_width(length * [(10, float)], block), parse_fixed_width(length * [(10, float)], block), param)) length = int(block[0][:5]) soil_types = parse_fixed_width((length + 1) * [(5, int)], block)[1:] # Group soil type number and curves together return {(soil_types[i // 2], c.param): c for i, c in enumerate(curves)}
[ "def", "_parse_curves", "(", "block", ",", "*", "*", "kwargs", ")", ":", "count", "=", "int", "(", "block", ".", "pop", "(", "0", ")", ")", "curves", "=", "[", "]", "for", "i", "in", "range", "(", "count", ")", ":", "for", "param", "in", "[", "'mod_reduc'", ",", "'damping'", "]", ":", "length", ",", "name", "=", "parse_fixed_width", "(", "[", "(", "5", ",", "int", ")", ",", "(", "65", ",", "to_str", ")", "]", ",", "block", ")", "curves", ".", "append", "(", "site", ".", "NonlinearProperty", "(", "name", ",", "parse_fixed_width", "(", "length", "*", "[", "(", "10", ",", "float", ")", "]", ",", "block", ")", ",", "parse_fixed_width", "(", "length", "*", "[", "(", "10", ",", "float", ")", "]", ",", "block", ")", ",", "param", ")", ")", "length", "=", "int", "(", "block", "[", "0", "]", "[", ":", "5", "]", ")", "soil_types", "=", "parse_fixed_width", "(", "(", "length", "+", "1", ")", "*", "[", "(", "5", ",", "int", ")", "]", ",", "block", ")", "[", "1", ":", "]", "# Group soil type number and curves together", "return", "{", "(", "soil_types", "[", "i", "//", "2", "]", ",", "c", ".", "param", ")", ":", "c", "for", "i", ",", "c", "in", "enumerate", "(", "curves", ")", "}" ]
38.315789
21
def CheckTaskToMerge(self, task): """Checks if the task should be merged. Args: task (Task): task. Returns: bool: True if the task should be merged. Raises: KeyError: if the task was not queued, processing or abandoned. """ with self._lock: is_abandoned = task.identifier in self._tasks_abandoned is_processing = task.identifier in self._tasks_processing is_queued = task.identifier in self._tasks_queued if not is_queued and not is_processing and not is_abandoned: raise KeyError('Status of task {0:s} is unknown.'.format( task.identifier)) return is_queued or is_processing or is_abandoned and not task.has_retry
[ "def", "CheckTaskToMerge", "(", "self", ",", "task", ")", ":", "with", "self", ".", "_lock", ":", "is_abandoned", "=", "task", ".", "identifier", "in", "self", ".", "_tasks_abandoned", "is_processing", "=", "task", ".", "identifier", "in", "self", ".", "_tasks_processing", "is_queued", "=", "task", ".", "identifier", "in", "self", ".", "_tasks_queued", "if", "not", "is_queued", "and", "not", "is_processing", "and", "not", "is_abandoned", ":", "raise", "KeyError", "(", "'Status of task {0:s} is unknown.'", ".", "format", "(", "task", ".", "identifier", ")", ")", "return", "is_queued", "or", "is_processing", "or", "is_abandoned", "and", "not", "task", ".", "has_retry" ]
31.363636
23.818182
def json(self): """ Return a JSON-serializable representation of this result. The output of this function can be converted to a serialized string with :any:`json.dumps`. """ return { "status": self.status, "criteria_name": self.criteria_name, "warnings": [w.json() for w in self.warnings], "settings": self.settings, }
[ "def", "json", "(", "self", ")", ":", "return", "{", "\"status\"", ":", "self", ".", "status", ",", "\"criteria_name\"", ":", "self", ".", "criteria_name", ",", "\"warnings\"", ":", "[", "w", ".", "json", "(", ")", "for", "w", "in", "self", ".", "warnings", "]", ",", "\"settings\"", ":", "self", ".", "settings", ",", "}" ]
33.666667
16.5
def _convert_markup_images(self, soup): """ Convert images of instructions markup. Images are downloaded, base64-encoded and inserted into <img> tags. @param soup: BeautifulSoup instance. @type soup: BeautifulSoup """ # 6. Replace <img> assets with actual image contents images = [image for image in soup.find_all('img') if image.attrs.get('assetid') is not None] if not images: return # Get assetid attribute from all images asset_ids = [image.attrs.get('assetid') for image in images] self._asset_retriever(asset_ids) for image in images: # Encode each image using base64 asset = self._asset_retriever[image['assetid']] if asset.data is not None: encoded64 = base64.b64encode(asset.data).decode() image['src'] = 'data:%s;base64,%s' % ( asset.content_type, encoded64)
[ "def", "_convert_markup_images", "(", "self", ",", "soup", ")", ":", "# 6. Replace <img> assets with actual image contents", "images", "=", "[", "image", "for", "image", "in", "soup", ".", "find_all", "(", "'img'", ")", "if", "image", ".", "attrs", ".", "get", "(", "'assetid'", ")", "is", "not", "None", "]", "if", "not", "images", ":", "return", "# Get assetid attribute from all images", "asset_ids", "=", "[", "image", ".", "attrs", ".", "get", "(", "'assetid'", ")", "for", "image", "in", "images", "]", "self", ".", "_asset_retriever", "(", "asset_ids", ")", "for", "image", "in", "images", ":", "# Encode each image using base64", "asset", "=", "self", ".", "_asset_retriever", "[", "image", "[", "'assetid'", "]", "]", "if", "asset", ".", "data", "is", "not", "None", ":", "encoded64", "=", "base64", ".", "b64encode", "(", "asset", ".", "data", ")", ".", "decode", "(", ")", "image", "[", "'src'", "]", "=", "'data:%s;base64,%s'", "%", "(", "asset", ".", "content_type", ",", "encoded64", ")" ]
38.76
15.64
def say(self, event): """Chat event handler for incoming events :param event: say-event with incoming chat message """ try: userid = event.user.uuid recipient = self._get_recipient(event) content = self._get_content(event) message = objectmodels['chatmessage']({ 'timestamp': time(), 'recipient': recipient, 'sender': userid, 'content': content, 'uuid': std_uuid() }) message.save() chat_packet = { 'component': 'hfos.chat.host', 'action': 'say', 'data': message.serializablefields() } if recipient in self.chat_channels: for useruuid in self.users: if useruuid in self.chat_channels[recipient].users: self.log('User in channel', lvl=debug) self.update_lastlog(useruuid, recipient) self.log('Sending message', lvl=debug) self.fireEvent(send(useruuid, chat_packet, sendtype='user')) except Exception as e: self.log("Error: '%s' %s" % (e, type(e)), exc=True, lvl=error)
[ "def", "say", "(", "self", ",", "event", ")", ":", "try", ":", "userid", "=", "event", ".", "user", ".", "uuid", "recipient", "=", "self", ".", "_get_recipient", "(", "event", ")", "content", "=", "self", ".", "_get_content", "(", "event", ")", "message", "=", "objectmodels", "[", "'chatmessage'", "]", "(", "{", "'timestamp'", ":", "time", "(", ")", ",", "'recipient'", ":", "recipient", ",", "'sender'", ":", "userid", ",", "'content'", ":", "content", ",", "'uuid'", ":", "std_uuid", "(", ")", "}", ")", "message", ".", "save", "(", ")", "chat_packet", "=", "{", "'component'", ":", "'hfos.chat.host'", ",", "'action'", ":", "'say'", ",", "'data'", ":", "message", ".", "serializablefields", "(", ")", "}", "if", "recipient", "in", "self", ".", "chat_channels", ":", "for", "useruuid", "in", "self", ".", "users", ":", "if", "useruuid", "in", "self", ".", "chat_channels", "[", "recipient", "]", ".", "users", ":", "self", ".", "log", "(", "'User in channel'", ",", "lvl", "=", "debug", ")", "self", ".", "update_lastlog", "(", "useruuid", ",", "recipient", ")", "self", ".", "log", "(", "'Sending message'", ",", "lvl", "=", "debug", ")", "self", ".", "fireEvent", "(", "send", "(", "useruuid", ",", "chat_packet", ",", "sendtype", "=", "'user'", ")", ")", "except", "Exception", "as", "e", ":", "self", ".", "log", "(", "\"Error: '%s' %s\"", "%", "(", "e", ",", "type", "(", "e", ")", ")", ",", "exc", "=", "True", ",", "lvl", "=", "error", ")" ]
34.236842
18.552632
def coffee_compile(source): """Compiles the given ``source`` from CoffeeScript to JavaScript""" with open(COFFEE_COMPILER, 'rb') as coffeescript_js: return evaljs( (coffeescript_js.read().decode('utf-8'), 'CoffeeScript.compile(dukpy.coffeecode)'), coffeecode=source )
[ "def", "coffee_compile", "(", "source", ")", ":", "with", "open", "(", "COFFEE_COMPILER", ",", "'rb'", ")", "as", "coffeescript_js", ":", "return", "evaljs", "(", "(", "coffeescript_js", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", ",", "'CoffeeScript.compile(dukpy.coffeecode)'", ")", ",", "coffeecode", "=", "source", ")" ]
40.125
14.5
def content_type(self, content_type): """Sets the content_type of this Notificant. The value of the Content-Type header of the webhook POST request. # noqa: E501 :param content_type: The content_type of this Notificant. # noqa: E501 :type: str """ allowed_values = ["application/json", "text/html", "text/plain", "application/x-www-form-urlencoded", ""] # noqa: E501 if content_type not in allowed_values: raise ValueError( "Invalid value for `content_type` ({0}), must be one of {1}" # noqa: E501 .format(content_type, allowed_values) ) self._content_type = content_type
[ "def", "content_type", "(", "self", ",", "content_type", ")", ":", "allowed_values", "=", "[", "\"application/json\"", ",", "\"text/html\"", ",", "\"text/plain\"", ",", "\"application/x-www-form-urlencoded\"", ",", "\"\"", "]", "# noqa: E501", "if", "content_type", "not", "in", "allowed_values", ":", "raise", "ValueError", "(", "\"Invalid value for `content_type` ({0}), must be one of {1}\"", "# noqa: E501", ".", "format", "(", "content_type", ",", "allowed_values", ")", ")", "self", ".", "_content_type", "=", "content_type" ]
42.6875
26.625
def contact_addresses(self): """ Provides a reference to contact addresses used by this server. Obtain a reference to manipulate or iterate existing contact addresses:: >>> from smc.elements.servers import ManagementServer >>> mgt_server = ManagementServer.objects.first() >>> for contact_address in mgt_server.contact_addresses: ... contact_address ... ContactAddress(location=Default,addresses=[u'1.1.1.1']) ContactAddress(location=foolocation,addresses=[u'12.12.12.12']) :rtype: MultiContactAddress """ return MultiContactAddress( href=self.get_relation('contact_addresses'), type=self.typeof, name=self.name)
[ "def", "contact_addresses", "(", "self", ")", ":", "return", "MultiContactAddress", "(", "href", "=", "self", ".", "get_relation", "(", "'contact_addresses'", ")", ",", "type", "=", "self", ".", "typeof", ",", "name", "=", "self", ".", "name", ")" ]
38
19.238095
def decrease_crypto_config(self, crypto_adapters, crypto_domain_indexes): """ Remove crypto adapters and/or crypto domains from the crypto configuration of this partition. For the general principle for maintaining crypto configurations of partitions, see :meth:`~zhmcclient.Partition.increase_crypto_config`. Example: Assume that the current crypto configuration of a partition includes crypto adapters A, B and C and crypto domains 0, 1, and 2 (on each of the adapters). When this method is called to remove adapter C and domain 2, the resulting crypto configuration of the partition will include domains 0 and 1 on each of the adapters A and B. Authorization requirements: * Object-access permission to this Partition. * Task permission to the "Partition Details" task. Parameters: crypto_adapters (:term:`iterable` of :class:`~zhmcclient.Adapter`): Crypto adapters that should be removed from the crypto configuration of this partition. crypto_domain_indexes (:term:`iterable` of :term:`integer`): Domain indexes of the crypto domains that should be removed from the crypto configuration of this partition. For values, see :meth:`~zhmcclient.Partition.increase_crypto_config`. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ crypto_adapter_uris = [a.uri for a in crypto_adapters] body = {'crypto-adapter-uris': crypto_adapter_uris, 'crypto-domain-indexes': crypto_domain_indexes} self.manager.session.post( self.uri + '/operations/decrease-crypto-configuration', body)
[ "def", "decrease_crypto_config", "(", "self", ",", "crypto_adapters", ",", "crypto_domain_indexes", ")", ":", "crypto_adapter_uris", "=", "[", "a", ".", "uri", "for", "a", "in", "crypto_adapters", "]", "body", "=", "{", "'crypto-adapter-uris'", ":", "crypto_adapter_uris", ",", "'crypto-domain-indexes'", ":", "crypto_domain_indexes", "}", "self", ".", "manager", ".", "session", ".", "post", "(", "self", ".", "uri", "+", "'/operations/decrease-crypto-configuration'", ",", "body", ")" ]
43.162791
24.139535
def setQuery(self, query): """ Sets the query instance for this widget to the inputed query. :param query | <orb.Query> || <orb.QueryCompound> """ if not query.isNull() and hash(query) == hash(self._query): return self._query = query if QueryCompound.typecheck(query): self.uiColumnDDL.hide() self.uiOperatorDDL.hide() # setup the compound editor editor = XLineEdit(self) editor.setReadOnly(True) editor.setText(query.name() + ' %s' % nativestring(query)) editor.setHint(nativestring(query)) self.setEditor(editor) else: self.uiColumnDDL.show() self.uiOperatorDDL.show() text = query.columnName() self.uiColumnDDL.setCurrentSchemaPath(nativestring(text)) self.uiOperatorDDL.blockSignals(True) plug = self.currentPlugin() if plug: op = plug.operator(query.operatorType(), query.value()) index = self.uiOperatorDDL.findText(op) if index != -1: self.uiOperatorDDL.setCurrentIndex(index) self.uiOperatorDDL.blockSignals(False) self.refreshButtons()
[ "def", "setQuery", "(", "self", ",", "query", ")", ":", "if", "not", "query", ".", "isNull", "(", ")", "and", "hash", "(", "query", ")", "==", "hash", "(", "self", ".", "_query", ")", ":", "return", "self", ".", "_query", "=", "query", "if", "QueryCompound", ".", "typecheck", "(", "query", ")", ":", "self", ".", "uiColumnDDL", ".", "hide", "(", ")", "self", ".", "uiOperatorDDL", ".", "hide", "(", ")", "# setup the compound editor\r", "editor", "=", "XLineEdit", "(", "self", ")", "editor", ".", "setReadOnly", "(", "True", ")", "editor", ".", "setText", "(", "query", ".", "name", "(", ")", "+", "' %s'", "%", "nativestring", "(", "query", ")", ")", "editor", ".", "setHint", "(", "nativestring", "(", "query", ")", ")", "self", ".", "setEditor", "(", "editor", ")", "else", ":", "self", ".", "uiColumnDDL", ".", "show", "(", ")", "self", ".", "uiOperatorDDL", ".", "show", "(", ")", "text", "=", "query", ".", "columnName", "(", ")", "self", ".", "uiColumnDDL", ".", "setCurrentSchemaPath", "(", "nativestring", "(", "text", ")", ")", "self", ".", "uiOperatorDDL", ".", "blockSignals", "(", "True", ")", "plug", "=", "self", ".", "currentPlugin", "(", ")", "if", "plug", ":", "op", "=", "plug", ".", "operator", "(", "query", ".", "operatorType", "(", ")", ",", "query", ".", "value", "(", ")", ")", "index", "=", "self", ".", "uiOperatorDDL", ".", "findText", "(", "op", ")", "if", "index", "!=", "-", "1", ":", "self", ".", "uiOperatorDDL", ".", "setCurrentIndex", "(", "index", ")", "self", ".", "uiOperatorDDL", ".", "blockSignals", "(", "False", ")", "self", ".", "refreshButtons", "(", ")" ]
33.809524
16.47619
def read(path): """ Reads a file located at the given path. """ data = None with open(path, 'r') as f: data = f.read() f.close() return data
[ "def", "read", "(", "path", ")", ":", "data", "=", "None", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "data", "=", "f", ".", "read", "(", ")", "f", ".", "close", "(", ")", "return", "data" ]
23.142857
18.428571
def find_find_repos(where, ignore_error=True): """ Search for repositories with GNU find Args: where (str): path to search from ignore_error (bool): if False, raise Exception when the returncode is not zero. Yields: Repository subclass instance """ log.debug(('REPO_REGEX', REPO_REGEX)) FIND_REPO_REGEXCMD = ("-regex", '.*(%s)$' % REPO_REGEX) if os.uname()[0] == 'Darwin': cmd = ("find", '-E', '-L', # dereference symlinks where, FIND_REPO_REGEXCMD[0], FIND_REPO_REGEXCMD[1]) else: cmd = ("find", '-O3', '-L', # dereference symlinks where, # " .", "-regextype","posix-egrep", FIND_REPO_REGEXCMD[0], FIND_REPO_REGEXCMD[1]) _cmd = ' '.join(cmd) log.debug("find_find_repos(%r) = %s" % (where, _cmd)) kwargs = { #'shell': True, 'cwd': where, 'stderr': sys.stderr, 'stdout': subprocess.PIPE,} p = subprocess.Popen(cmd, universal_newlines=True, **kwargs) if p.returncode and not ignore_error: p_stdout = p.communicate()[0] raise Exception("Subprocess return code: %d\n%r\n%r" % ( p.returncode, cmd, p_stdout)) for l in iter(p.stdout.readline, ''): path = l.rstrip() _path, _prefix = os.path.dirname(path), os.path.basename(path) repo = REPO_PREFIXES.get(_prefix) if repo is None: log.error("repo for path %r and prefix %r is None" % (path, _prefix)) if repo: yield repo(_path)
[ "def", "find_find_repos", "(", "where", ",", "ignore_error", "=", "True", ")", ":", "log", ".", "debug", "(", "(", "'REPO_REGEX'", ",", "REPO_REGEX", ")", ")", "FIND_REPO_REGEXCMD", "=", "(", "\"-regex\"", ",", "'.*(%s)$'", "%", "REPO_REGEX", ")", "if", "os", ".", "uname", "(", ")", "[", "0", "]", "==", "'Darwin'", ":", "cmd", "=", "(", "\"find\"", ",", "'-E'", ",", "'-L'", ",", "# dereference symlinks", "where", ",", "FIND_REPO_REGEXCMD", "[", "0", "]", ",", "FIND_REPO_REGEXCMD", "[", "1", "]", ")", "else", ":", "cmd", "=", "(", "\"find\"", ",", "'-O3'", ",", "'-L'", ",", "# dereference symlinks", "where", ",", "# \" .\",", "\"-regextype\"", ",", "\"posix-egrep\"", ",", "FIND_REPO_REGEXCMD", "[", "0", "]", ",", "FIND_REPO_REGEXCMD", "[", "1", "]", ")", "_cmd", "=", "' '", ".", "join", "(", "cmd", ")", "log", ".", "debug", "(", "\"find_find_repos(%r) = %s\"", "%", "(", "where", ",", "_cmd", ")", ")", "kwargs", "=", "{", "#'shell': True,", "'cwd'", ":", "where", ",", "'stderr'", ":", "sys", ".", "stderr", ",", "'stdout'", ":", "subprocess", ".", "PIPE", ",", "}", "p", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "universal_newlines", "=", "True", ",", "*", "*", "kwargs", ")", "if", "p", ".", "returncode", "and", "not", "ignore_error", ":", "p_stdout", "=", "p", ".", "communicate", "(", ")", "[", "0", "]", "raise", "Exception", "(", "\"Subprocess return code: %d\\n%r\\n%r\"", "%", "(", "p", ".", "returncode", ",", "cmd", ",", "p_stdout", ")", ")", "for", "l", "in", "iter", "(", "p", ".", "stdout", ".", "readline", ",", "''", ")", ":", "path", "=", "l", ".", "rstrip", "(", ")", "_path", ",", "_prefix", "=", "os", ".", "path", ".", "dirname", "(", "path", ")", ",", "os", ".", "path", ".", "basename", "(", "path", ")", "repo", "=", "REPO_PREFIXES", ".", "get", "(", "_prefix", ")", "if", "repo", "is", "None", ":", "log", ".", "error", "(", "\"repo for path %r and prefix %r is None\"", "%", "(", "path", ",", "_prefix", ")", ")", "if", "repo", ":", "yield", "repo", "(", "_path", ")" ]
32.333333
14.098039
def _check_submodule_no_git(self): """ Like ``_check_submodule_using_git``, but simply parses the .gitmodules file to determine if the supplied path is a git submodule, and does not exec any subprocesses. This can only determine if a path is a submodule--it does not perform updates, etc. This function may need to be updated if the format of the .gitmodules file is changed between git versions. """ gitmodules_path = os.path.abspath('.gitmodules') if not os.path.isfile(gitmodules_path): return False # This is a minimal reader for gitconfig-style files. It handles a few of # the quirks that make gitconfig files incompatible with ConfigParser-style # files, but does not support the full gitconfig syntax (just enough # needed to read a .gitmodules file). gitmodules_fileobj = io.StringIO() # Must use io.open for cross-Python-compatible behavior wrt unicode with io.open(gitmodules_path) as f: for line in f: # gitconfig files are more flexible with leading whitespace; just # go ahead and remove it line = line.lstrip() # comments can start with either # or ; if line and line[0] in (':', ';'): continue gitmodules_fileobj.write(line) gitmodules_fileobj.seek(0) cfg = RawConfigParser() try: cfg.readfp(gitmodules_fileobj) except Exception as exc: log.warn('Malformatted .gitmodules file: {0}\n' '{1} cannot be assumed to be a git submodule.'.format( exc, self.path)) return False for section in cfg.sections(): if not cfg.has_option(section, 'path'): continue submodule_path = cfg.get(section, 'path').rstrip(os.sep) if submodule_path == self.path.rstrip(os.sep): return True return False
[ "def", "_check_submodule_no_git", "(", "self", ")", ":", "gitmodules_path", "=", "os", ".", "path", ".", "abspath", "(", "'.gitmodules'", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "gitmodules_path", ")", ":", "return", "False", "# This is a minimal reader for gitconfig-style files. It handles a few of", "# the quirks that make gitconfig files incompatible with ConfigParser-style", "# files, but does not support the full gitconfig syntax (just enough", "# needed to read a .gitmodules file).", "gitmodules_fileobj", "=", "io", ".", "StringIO", "(", ")", "# Must use io.open for cross-Python-compatible behavior wrt unicode", "with", "io", ".", "open", "(", "gitmodules_path", ")", "as", "f", ":", "for", "line", "in", "f", ":", "# gitconfig files are more flexible with leading whitespace; just", "# go ahead and remove it", "line", "=", "line", ".", "lstrip", "(", ")", "# comments can start with either # or ;", "if", "line", "and", "line", "[", "0", "]", "in", "(", "':'", ",", "';'", ")", ":", "continue", "gitmodules_fileobj", ".", "write", "(", "line", ")", "gitmodules_fileobj", ".", "seek", "(", "0", ")", "cfg", "=", "RawConfigParser", "(", ")", "try", ":", "cfg", ".", "readfp", "(", "gitmodules_fileobj", ")", "except", "Exception", "as", "exc", ":", "log", ".", "warn", "(", "'Malformatted .gitmodules file: {0}\\n'", "'{1} cannot be assumed to be a git submodule.'", ".", "format", "(", "exc", ",", "self", ".", "path", ")", ")", "return", "False", "for", "section", "in", "cfg", ".", "sections", "(", ")", ":", "if", "not", "cfg", ".", "has_option", "(", "section", ",", "'path'", ")", ":", "continue", "submodule_path", "=", "cfg", ".", "get", "(", "section", ",", "'path'", ")", ".", "rstrip", "(", "os", ".", "sep", ")", "if", "submodule_path", "==", "self", ".", "path", ".", "rstrip", "(", "os", ".", "sep", ")", ":", "return", "True", "return", "False" ]
35.578947
22.877193
def _set_show_linkinfo(self, v, load=False): """ Setter method for show_linkinfo, mapped from YANG variable /brocade_fabric_service_rpc/show_linkinfo (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_show_linkinfo is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_linkinfo() directly. YANG Description: Provides details of all the links connected in the fabric. This information is given in groups for all the RBridges in the fabric. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=show_linkinfo.show_linkinfo, is_leaf=True, yang_name="show-linkinfo", rest_name="show-linkinfo", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'show_linkinfo_all'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """show_linkinfo must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=show_linkinfo.show_linkinfo, is_leaf=True, yang_name="show-linkinfo", rest_name="show-linkinfo", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'show_linkinfo_all'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='rpc', is_config=True)""", }) self.__show_linkinfo = t if hasattr(self, '_set'): self._set()
[ "def", "_set_show_linkinfo", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "show_linkinfo", ".", "show_linkinfo", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"show-linkinfo\"", ",", "rest_name", "=", "\"show-linkinfo\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "False", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'hidden'", ":", "u'rpccmd'", ",", "u'actionpoint'", ":", "u'show_linkinfo_all'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-fabric-service'", ",", "defining_module", "=", "'brocade-fabric-service'", ",", "yang_type", "=", "'rpc'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"show_linkinfo must be of a type compatible with rpc\"\"\"", ",", "'defined-type'", ":", "\"rpc\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=show_linkinfo.show_linkinfo, is_leaf=True, yang_name=\"show-linkinfo\", rest_name=\"show-linkinfo\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'show_linkinfo_all'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='rpc', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__show_linkinfo", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
69.076923
32.923077
def _writeBk(target="sentenceContainsTarget(+SID,+WID).", treeDepth="3", nodeSize="3", numOfClauses="8"): """ Writes a background file to disk. :param target: Target predicate with modes. :type target: str. :param treeDepth: Depth of the tree. :type treeDepth: str. :param nodeSize: Maximum size of each node in the tree. :type nodeSize: str. :param numOfClauses: Number of clauses in total. :type numOfClauses: str. """ with open('bk.txt', 'w') as bk: bk.write("useStdLogicVariables: true\n") bk.write("setParam: treeDepth=" + str(treeDepth) + '.\n') bk.write("setParam: nodeSize=" + str(nodeSize) + '.\n') bk.write("setParam: numOfClauses=" + str(numOfClauses) + '.\n') bk.write("mode: nextSentenceInBlock(+BID,+SID,-SID).\n") bk.write("mode: nextSentenceInBlock(+BID,-SID,+SID).\n") bk.write("mode: earlySentenceInBlock(+BID,-SID).\n") bk.write("mode: midWaySentenceInBlock(+BID,-SID).\n") bk.write("mode: lateSentenceInBlock(+BID,-SID).\n") bk.write("mode: sentenceInBlock(-SID,+BID).\n") bk.write("mode: wordString(+WID,#WSTR).\n") bk.write("mode: partOfSpeechTag(+WID,#WPOS).\n") bk.write("mode: nextWordInSentence(+SID,+WID,-WID).\n") bk.write("mode: earlyWordInSentence(+SID,-WID).\n") bk.write("mode: midWayWordInSentence(+SID,-WID).\n") bk.write("mode: lateWordInSentence(+SID,-WID).\n") bk.write("mode: wordInSentence(-WID,+SID).\n") bk.write("mode: " + target + "\n") return
[ "def", "_writeBk", "(", "target", "=", "\"sentenceContainsTarget(+SID,+WID).\"", ",", "treeDepth", "=", "\"3\"", ",", "nodeSize", "=", "\"3\"", ",", "numOfClauses", "=", "\"8\"", ")", ":", "with", "open", "(", "'bk.txt'", ",", "'w'", ")", "as", "bk", ":", "bk", ".", "write", "(", "\"useStdLogicVariables: true\\n\"", ")", "bk", ".", "write", "(", "\"setParam: treeDepth=\"", "+", "str", "(", "treeDepth", ")", "+", "'.\\n'", ")", "bk", ".", "write", "(", "\"setParam: nodeSize=\"", "+", "str", "(", "nodeSize", ")", "+", "'.\\n'", ")", "bk", ".", "write", "(", "\"setParam: numOfClauses=\"", "+", "str", "(", "numOfClauses", ")", "+", "'.\\n'", ")", "bk", ".", "write", "(", "\"mode: nextSentenceInBlock(+BID,+SID,-SID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: nextSentenceInBlock(+BID,-SID,+SID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: earlySentenceInBlock(+BID,-SID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: midWaySentenceInBlock(+BID,-SID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: lateSentenceInBlock(+BID,-SID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: sentenceInBlock(-SID,+BID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: wordString(+WID,#WSTR).\\n\"", ")", "bk", ".", "write", "(", "\"mode: partOfSpeechTag(+WID,#WPOS).\\n\"", ")", "bk", ".", "write", "(", "\"mode: nextWordInSentence(+SID,+WID,-WID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: earlyWordInSentence(+SID,-WID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: midWayWordInSentence(+SID,-WID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: lateWordInSentence(+SID,-WID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: wordInSentence(-WID,+SID).\\n\"", ")", "bk", ".", "write", "(", "\"mode: \"", "+", "target", "+", "\"\\n\"", ")", "return" ]
39.075
19.675
def should_regenerate(self, response): """ Check if this page was originally generated less than LOCAL_POSTCHECK seconds ago """ if response.has_header('Last-Modified'): last_modified = parse_http_date(response['Last-Modified']) next_regen = last_modified + settings.BETTERCACHE_LOCAL_POSTCHECK return time.time() > next_regen
[ "def", "should_regenerate", "(", "self", ",", "response", ")", ":", "if", "response", ".", "has_header", "(", "'Last-Modified'", ")", ":", "last_modified", "=", "parse_http_date", "(", "response", "[", "'Last-Modified'", "]", ")", "next_regen", "=", "last_modified", "+", "settings", ".", "BETTERCACHE_LOCAL_POSTCHECK", "return", "time", ".", "time", "(", ")", ">", "next_regen" ]
53.285714
17.142857
def apply_conversation_reference(activity: Activity, reference: ConversationReference, is_incoming: bool=False) -> Activity: """ Updates an activity with the delivery information from a conversation reference. Calling this after get_conversation_reference on an incoming activity will properly address the reply to a received activity. :param activity: :param reference: :param is_incoming: :return: """ activity.channel_id = reference.channel_id activity.service_url = reference.service_url activity.conversation = reference.conversation if is_incoming: activity.from_property = reference.user activity.recipient = reference.bot if reference.activity_id: activity.id = reference.activity_id else: activity.from_property = reference.bot activity.recipient = reference.user if reference.activity_id: activity.reply_to_id = reference.activity_id return activity
[ "def", "apply_conversation_reference", "(", "activity", ":", "Activity", ",", "reference", ":", "ConversationReference", ",", "is_incoming", ":", "bool", "=", "False", ")", "->", "Activity", ":", "activity", ".", "channel_id", "=", "reference", ".", "channel_id", "activity", ".", "service_url", "=", "reference", ".", "service_url", "activity", ".", "conversation", "=", "reference", ".", "conversation", "if", "is_incoming", ":", "activity", ".", "from_property", "=", "reference", ".", "user", "activity", ".", "recipient", "=", "reference", ".", "bot", "if", "reference", ".", "activity_id", ":", "activity", ".", "id", "=", "reference", ".", "activity_id", "else", ":", "activity", ".", "from_property", "=", "reference", ".", "bot", "activity", ".", "recipient", "=", "reference", ".", "user", "if", "reference", ".", "activity_id", ":", "activity", ".", "reply_to_id", "=", "reference", ".", "activity_id", "return", "activity" ]
41.925926
17.037037
def discover_yaml(bank=None, **meta): """Discovers the YAML format and registers it if available. Install YAML support via PIP:: pip install PyYAML :param bank: The format bank to register the format in :param meta: Extra information associated with the format """ try: import yaml if bank is None: bank = default_bank bank.register('yaml', yaml.load, yaml.dump, **meta) except ImportError: pass
[ "def", "discover_yaml", "(", "bank", "=", "None", ",", "*", "*", "meta", ")", ":", "try", ":", "import", "yaml", "if", "bank", "is", "None", ":", "bank", "=", "default_bank", "bank", ".", "register", "(", "'yaml'", ",", "yaml", ".", "load", ",", "yaml", ".", "dump", ",", "*", "*", "meta", ")", "except", "ImportError", ":", "pass" ]
27.176471
19.058824
def _rd_dat_file(file_name, dir_name, pb_dir, fmt, start_byte, n_samp): """ Read data from a dat file, either local or remote, into a 1d numpy array. This is the lowest level dat reading function (along with `_stream_dat` which this function may call), and is called by `_rd_dat_signals`. Parameters ---------- start_byte : int The starting byte number to read from. n_samp : int The total number of samples to read. Does NOT need to create whole blocks for special format. Any number of samples should be readable. * other params See docstring for `_rd_dat_signals` Returns ------- sig_data : numpy array The data read from the dat file. The dtype varies depending on fmt. Byte aligned fmts are read in their final required format. Unaligned formats are read as uint8 to be further processed. Notes ----- See docstring notes for `_rd_dat_signals` """ # element_count is the number of elements to read using np.fromfile # for local files # byte_count is the number of bytes to read for streaming files if fmt == '212': byte_count = _required_byte_num('read', '212', n_samp) element_count = byte_count elif fmt in ['310', '311']: byte_count = _required_byte_num('read', fmt, n_samp) element_count = byte_count else: element_count = n_samp byte_count = n_samp * BYTES_PER_SAMPLE[fmt] # Local dat file if pb_dir is None: with open(os.path.join(dir_name, file_name), 'rb') as fp: fp.seek(start_byte) sig_data = np.fromfile(fp, dtype=np.dtype(DATA_LOAD_TYPES[fmt]), count=element_count) # Stream dat file from physiobank else: sig_data = download._stream_dat(file_name, pb_dir, byte_count, start_byte, np.dtype(DATA_LOAD_TYPES[fmt])) return sig_data
[ "def", "_rd_dat_file", "(", "file_name", ",", "dir_name", ",", "pb_dir", ",", "fmt", ",", "start_byte", ",", "n_samp", ")", ":", "# element_count is the number of elements to read using np.fromfile", "# for local files", "# byte_count is the number of bytes to read for streaming files", "if", "fmt", "==", "'212'", ":", "byte_count", "=", "_required_byte_num", "(", "'read'", ",", "'212'", ",", "n_samp", ")", "element_count", "=", "byte_count", "elif", "fmt", "in", "[", "'310'", ",", "'311'", "]", ":", "byte_count", "=", "_required_byte_num", "(", "'read'", ",", "fmt", ",", "n_samp", ")", "element_count", "=", "byte_count", "else", ":", "element_count", "=", "n_samp", "byte_count", "=", "n_samp", "*", "BYTES_PER_SAMPLE", "[", "fmt", "]", "# Local dat file", "if", "pb_dir", "is", "None", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "dir_name", ",", "file_name", ")", ",", "'rb'", ")", "as", "fp", ":", "fp", ".", "seek", "(", "start_byte", ")", "sig_data", "=", "np", ".", "fromfile", "(", "fp", ",", "dtype", "=", "np", ".", "dtype", "(", "DATA_LOAD_TYPES", "[", "fmt", "]", ")", ",", "count", "=", "element_count", ")", "# Stream dat file from physiobank", "else", ":", "sig_data", "=", "download", ".", "_stream_dat", "(", "file_name", ",", "pb_dir", ",", "byte_count", ",", "start_byte", ",", "np", ".", "dtype", "(", "DATA_LOAD_TYPES", "[", "fmt", "]", ")", ")", "return", "sig_data" ]
33.508475
23.305085
def format_parameters(params): '''Reformat parameters into dict of format expected by the API.''' if not params: return {} # expect multiple invocations of --parameters but fall back # to ; delimited if only one --parameters is specified if len(params) == 1: if params[0].find(';') != -1: # found params = params[0].split(';') else: params = params[0].split(',') parameters = {} for p in params: try: (n, v) = p.split('=', 1) except ValueError: msg = '%s(%s). %s.' % ('Malformed parameter', p, 'Use the key=value format') raise exc.CommandError(msg) if n not in parameters: parameters[n] = v else: if not isinstance(parameters[n], list): parameters[n] = [parameters[n]] parameters[n].append(v) return parameters
[ "def", "format_parameters", "(", "params", ")", ":", "if", "not", "params", ":", "return", "{", "}", "# expect multiple invocations of --parameters but fall back", "# to ; delimited if only one --parameters is specified", "if", "len", "(", "params", ")", "==", "1", ":", "if", "params", "[", "0", "]", ".", "find", "(", "';'", ")", "!=", "-", "1", ":", "# found", "params", "=", "params", "[", "0", "]", ".", "split", "(", "';'", ")", "else", ":", "params", "=", "params", "[", "0", "]", ".", "split", "(", "','", ")", "parameters", "=", "{", "}", "for", "p", "in", "params", ":", "try", ":", "(", "n", ",", "v", ")", "=", "p", ".", "split", "(", "'='", ",", "1", ")", "except", "ValueError", ":", "msg", "=", "'%s(%s). %s.'", "%", "(", "'Malformed parameter'", ",", "p", ",", "'Use the key=value format'", ")", "raise", "exc", ".", "CommandError", "(", "msg", ")", "if", "n", "not", "in", "parameters", ":", "parameters", "[", "n", "]", "=", "v", "else", ":", "if", "not", "isinstance", "(", "parameters", "[", "n", "]", ",", "list", ")", ":", "parameters", "[", "n", "]", "=", "[", "parameters", "[", "n", "]", "]", "parameters", "[", "n", "]", ".", "append", "(", "v", ")", "return", "parameters" ]
29.741935
19.225806
def listdir(self, target_directory): """Return a list of file names in target_directory. Args: target_directory: Path to the target directory within the fake filesystem. Returns: A list of file names within the target directory in arbitrary order. Raises: OSError: if the target is not a directory. """ target_directory = self.resolve_path(target_directory, allow_fd=True) directory = self.confirmdir(target_directory) directory_contents = directory.contents return list(directory_contents.keys())
[ "def", "listdir", "(", "self", ",", "target_directory", ")", ":", "target_directory", "=", "self", ".", "resolve_path", "(", "target_directory", ",", "allow_fd", "=", "True", ")", "directory", "=", "self", ".", "confirmdir", "(", "target_directory", ")", "directory_contents", "=", "directory", ".", "contents", "return", "list", "(", "directory_contents", ".", "keys", "(", ")", ")" ]
34.388889
20.5
def formatDecimalMark(value, decimalmark='.'): """ Dummy method to replace decimal mark from an input string. Assumes that 'value' uses '.' as decimal mark and ',' as thousand mark. ::value:: is a string ::returns:: is a string with the decimal mark if needed """ # We have to consider the possibility of working with decimals such as # X.000 where those decimals are important because of the precission # and significant digits matters # Using 'float' the system delete the extre desimals with 0 as a value # Example: float(2.00) -> 2.0 # So we have to save the decimal length, this is one reason we are usnig # strings for results rawval = str(value) try: return decimalmark.join(rawval.split('.')) except: return rawval
[ "def", "formatDecimalMark", "(", "value", ",", "decimalmark", "=", "'.'", ")", ":", "# We have to consider the possibility of working with decimals such as", "# X.000 where those decimals are important because of the precission", "# and significant digits matters", "# Using 'float' the system delete the extre desimals with 0 as a value", "# Example: float(2.00) -> 2.0", "# So we have to save the decimal length, this is one reason we are usnig", "# strings for results", "rawval", "=", "str", "(", "value", ")", "try", ":", "return", "decimalmark", ".", "join", "(", "rawval", ".", "split", "(", "'.'", ")", ")", "except", ":", "return", "rawval" ]
40.35
18.85
def run_multiple_commands_redirect_stdout( multiple_args_dict, print_commands=True, process_limit=-1, polling_freq=0.5, **kwargs): """ Run multiple shell commands in parallel, write each of their stdout output to files associated with each command. Parameters ---------- multiple_args_dict : dict A dictionary whose keys are files and values are args list. Run each args list as a subprocess and write stdout to the corresponding file. print_commands : bool Print shell commands before running them. process_limit : int Limit the number of concurrent processes to this number. 0 if there is no limit, -1 to use max number of processors polling_freq : int Number of seconds between checking for done processes, if we have a process limit """ assert len(multiple_args_dict) > 0 assert all(len(args) > 0 for args in multiple_args_dict.values()) assert all(hasattr(f, 'name') for f in multiple_args_dict.keys()) if process_limit < 0: logger.debug("Using %d processes" % cpu_count()) process_limit = cpu_count() start_time = time.time() processes = Queue(maxsize=process_limit) def add_to_queue(process): process.start() if print_commands: handler = logging.FileHandler(process.redirect_stdout_file.name) handler.setLevel(logging.DEBUG) logger.addHandler(handler) logger.debug(" ".join(process.args)) logger.removeHandler(handler) processes.put(process) for f, args in multiple_args_dict.items(): p = AsyncProcess( args, redirect_stdout_file=f, **kwargs) if not processes.full(): add_to_queue(p) else: while processes.full(): # Are there any done processes? to_remove = [] for possibly_done in processes.queue: if possibly_done.poll() is not None: possibly_done.wait() to_remove.append(possibly_done) # Remove them from the queue and stop checking if to_remove: for process_to_remove in to_remove: processes.queue.remove(process_to_remove) break # Check again in a second if there weren't time.sleep(polling_freq) add_to_queue(p) # Wait for all the rest of the processes while not processes.empty(): processes.get().wait() elapsed_time = time.time() - start_time logger.info( "Ran %d commands in %0.4f seconds", len(multiple_args_dict), elapsed_time)
[ "def", "run_multiple_commands_redirect_stdout", "(", "multiple_args_dict", ",", "print_commands", "=", "True", ",", "process_limit", "=", "-", "1", ",", "polling_freq", "=", "0.5", ",", "*", "*", "kwargs", ")", ":", "assert", "len", "(", "multiple_args_dict", ")", ">", "0", "assert", "all", "(", "len", "(", "args", ")", ">", "0", "for", "args", "in", "multiple_args_dict", ".", "values", "(", ")", ")", "assert", "all", "(", "hasattr", "(", "f", ",", "'name'", ")", "for", "f", "in", "multiple_args_dict", ".", "keys", "(", ")", ")", "if", "process_limit", "<", "0", ":", "logger", ".", "debug", "(", "\"Using %d processes\"", "%", "cpu_count", "(", ")", ")", "process_limit", "=", "cpu_count", "(", ")", "start_time", "=", "time", ".", "time", "(", ")", "processes", "=", "Queue", "(", "maxsize", "=", "process_limit", ")", "def", "add_to_queue", "(", "process", ")", ":", "process", ".", "start", "(", ")", "if", "print_commands", ":", "handler", "=", "logging", ".", "FileHandler", "(", "process", ".", "redirect_stdout_file", ".", "name", ")", "handler", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "logger", ".", "addHandler", "(", "handler", ")", "logger", ".", "debug", "(", "\" \"", ".", "join", "(", "process", ".", "args", ")", ")", "logger", ".", "removeHandler", "(", "handler", ")", "processes", ".", "put", "(", "process", ")", "for", "f", ",", "args", "in", "multiple_args_dict", ".", "items", "(", ")", ":", "p", "=", "AsyncProcess", "(", "args", ",", "redirect_stdout_file", "=", "f", ",", "*", "*", "kwargs", ")", "if", "not", "processes", ".", "full", "(", ")", ":", "add_to_queue", "(", "p", ")", "else", ":", "while", "processes", ".", "full", "(", ")", ":", "# Are there any done processes?", "to_remove", "=", "[", "]", "for", "possibly_done", "in", "processes", ".", "queue", ":", "if", "possibly_done", ".", "poll", "(", ")", "is", "not", "None", ":", "possibly_done", ".", "wait", "(", ")", "to_remove", ".", "append", "(", "possibly_done", ")", "# Remove them from the queue and stop checking", "if", "to_remove", ":", "for", "process_to_remove", "in", "to_remove", ":", "processes", ".", "queue", ".", "remove", "(", "process_to_remove", ")", "break", "# Check again in a second if there weren't", "time", ".", "sleep", "(", "polling_freq", ")", "add_to_queue", "(", "p", ")", "# Wait for all the rest of the processes", "while", "not", "processes", ".", "empty", "(", ")", ":", "processes", ".", "get", "(", ")", ".", "wait", "(", ")", "elapsed_time", "=", "time", ".", "time", "(", ")", "-", "start_time", "logger", ".", "info", "(", "\"Ran %d commands in %0.4f seconds\"", ",", "len", "(", "multiple_args_dict", ")", ",", "elapsed_time", ")" ]
33.950617
16.493827
def rmon_event_entry_log(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") rmon = ET.SubElement(config, "rmon", xmlns="urn:brocade.com:mgmt:brocade-rmon") event_entry = ET.SubElement(rmon, "event-entry") event_index_key = ET.SubElement(event_entry, "event-index") event_index_key.text = kwargs.pop('event_index') log = ET.SubElement(event_entry, "log") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "rmon_event_entry_log", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "rmon", "=", "ET", ".", "SubElement", "(", "config", ",", "\"rmon\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-rmon\"", ")", "event_entry", "=", "ET", ".", "SubElement", "(", "rmon", ",", "\"event-entry\"", ")", "event_index_key", "=", "ET", ".", "SubElement", "(", "event_entry", ",", "\"event-index\"", ")", "event_index_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'event_index'", ")", "log", "=", "ET", ".", "SubElement", "(", "event_entry", ",", "\"log\"", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
43.333333
15.25
def _refresh_token(self): """ Retrieves the OAuth2 token generated by the user's API key and API secret. Sets the instance property 'token' to this new token. If the current token is still live, the server will simply return that. """ # use basic auth with API key and secret client_auth = requests.auth.HTTPBasicAuth(self.api_key, self.api_secret) # make request post_data = {"grant_type": "client_credentials"} response = requests.post(self.auth, auth=client_auth, data=post_data, proxies=self.proxies) self.last_response = response # raise exception if status code indicates an error if 400 <= response.status_code < 600: message = "{} {} Error (Trace-Id: {}): {}".format(response.status_code, "Client" if response.status_code < 500 else "Server", self._get_trace_id(response), "unable to get token") raise HTTPError(message, response=response) # set token property to the received token self.token = response.json()["access_token"]
[ "def", "_refresh_token", "(", "self", ")", ":", "# use basic auth with API key and secret", "client_auth", "=", "requests", ".", "auth", ".", "HTTPBasicAuth", "(", "self", ".", "api_key", ",", "self", ".", "api_secret", ")", "# make request", "post_data", "=", "{", "\"grant_type\"", ":", "\"client_credentials\"", "}", "response", "=", "requests", ".", "post", "(", "self", ".", "auth", ",", "auth", "=", "client_auth", ",", "data", "=", "post_data", ",", "proxies", "=", "self", ".", "proxies", ")", "self", ".", "last_response", "=", "response", "# raise exception if status code indicates an error", "if", "400", "<=", "response", ".", "status_code", "<", "600", ":", "message", "=", "\"{} {} Error (Trace-Id: {}): {}\"", ".", "format", "(", "response", ".", "status_code", ",", "\"Client\"", "if", "response", ".", "status_code", "<", "500", "else", "\"Server\"", ",", "self", ".", "_get_trace_id", "(", "response", ")", ",", "\"unable to get token\"", ")", "raise", "HTTPError", "(", "message", ",", "response", "=", "response", ")", "# set token property to the received token", "self", ".", "token", "=", "response", ".", "json", "(", ")", "[", "\"access_token\"", "]" ]
49.8
27.8
def decode_payload(self, specialize = False): """Decode payload from the element passed to the stanza constructor. Iterates over stanza children and creates StanzaPayload objects for them. Called automatically by `get_payload()` and other methods that access the payload. For the `Stanza` class stanza namespace child elements will also be included as the payload. For subclasses these are no considered payload.""" if self._payload is not None: # already decoded return if self._element is None: raise ValueError("This stanza has no element to decode""") payload = [] if specialize: factory = payload_factory else: factory = XMLPayload for child in self._element: if self.__class__ is not Stanza: if child.tag.startswith(self._ns_prefix): continue payload.append(factory(child)) self._payload = payload
[ "def", "decode_payload", "(", "self", ",", "specialize", "=", "False", ")", ":", "if", "self", ".", "_payload", "is", "not", "None", ":", "# already decoded", "return", "if", "self", ".", "_element", "is", "None", ":", "raise", "ValueError", "(", "\"This stanza has no element to decode\"", "\"\"", ")", "payload", "=", "[", "]", "if", "specialize", ":", "factory", "=", "payload_factory", "else", ":", "factory", "=", "XMLPayload", "for", "child", "in", "self", ".", "_element", ":", "if", "self", ".", "__class__", "is", "not", "Stanza", ":", "if", "child", ".", "tag", ".", "startswith", "(", "self", ".", "_ns_prefix", ")", ":", "continue", "payload", ".", "append", "(", "factory", "(", "child", ")", ")", "self", ".", "_payload", "=", "payload" ]
38.923077
15.5
def GET_AUTH(self, courseid): # pylint: disable=arguments-differ """ GET request """ course, _ = self.get_course_and_check_rights(courseid) return self.page(course)
[ "def", "GET_AUTH", "(", "self", ",", "courseid", ")", ":", "# pylint: disable=arguments-differ", "course", ",", "_", "=", "self", ".", "get_course_and_check_rights", "(", "courseid", ")", "return", "self", ".", "page", "(", "course", ")" ]
46.5
13.75
def dump(self, full=True, redact=False): """Dump the Configuration object to a YAML file. The order of the keys is determined from the default configuration file. All keys not in the default configuration will be appended to the end of the file. :param filename: The file to dump the configuration to, or None if the YAML string should be returned instead :type filename: unicode :param full: Dump settings that don't differ from the defaults as well :param redact: Remove sensitive information (views with the `redact` flag set) from the output """ if full: out_dict = self.flatten(redact=redact) else: # Exclude defaults when flattening. sources = [s for s in self.sources if not s.default] temp_root = RootView(sources) temp_root.redactions = self.redactions out_dict = temp_root.flatten(redact=redact) yaml_out = yaml.dump(out_dict, Dumper=Dumper, default_flow_style=None, indent=4, width=1000) # Restore comments to the YAML text. default_source = None for source in self.sources: if source.default: default_source = source break if default_source and default_source.filename: with open(default_source.filename, 'rb') as fp: default_data = fp.read() yaml_out = restore_yaml_comments(yaml_out, default_data.decode('utf-8')) return yaml_out
[ "def", "dump", "(", "self", ",", "full", "=", "True", ",", "redact", "=", "False", ")", ":", "if", "full", ":", "out_dict", "=", "self", ".", "flatten", "(", "redact", "=", "redact", ")", "else", ":", "# Exclude defaults when flattening.", "sources", "=", "[", "s", "for", "s", "in", "self", ".", "sources", "if", "not", "s", ".", "default", "]", "temp_root", "=", "RootView", "(", "sources", ")", "temp_root", ".", "redactions", "=", "self", ".", "redactions", "out_dict", "=", "temp_root", ".", "flatten", "(", "redact", "=", "redact", ")", "yaml_out", "=", "yaml", ".", "dump", "(", "out_dict", ",", "Dumper", "=", "Dumper", ",", "default_flow_style", "=", "None", ",", "indent", "=", "4", ",", "width", "=", "1000", ")", "# Restore comments to the YAML text.", "default_source", "=", "None", "for", "source", "in", "self", ".", "sources", ":", "if", "source", ".", "default", ":", "default_source", "=", "source", "break", "if", "default_source", "and", "default_source", ".", "filename", ":", "with", "open", "(", "default_source", ".", "filename", ",", "'rb'", ")", "as", "fp", ":", "default_data", "=", "fp", ".", "read", "(", ")", "yaml_out", "=", "restore_yaml_comments", "(", "yaml_out", ",", "default_data", ".", "decode", "(", "'utf-8'", ")", ")", "return", "yaml_out" ]
41.268293
17.585366
def make_model(corpus, lemmatize=False, rm_stops=False, size=100, window=10, min_count=5, workers=4, sg=1, save_path=None): """Train W2V model.""" # Simple training, with one large list t0 = time.time() sentences_stream = gen_docs(corpus, lemmatize=lemmatize, rm_stops=rm_stops) # sentences_list = [] # for sent in sentences_stream: # sentences_list.append(sent) model = Word2Vec(sentences=list(sentences_stream), size=size, window=window, min_count=min_count, workers=workers, sg=sg) # "Trim" the model of unnecessary data. Model cannot be updated anymore. model.init_sims(replace=True) if save_path: save_path = os.path.expanduser(save_path) model.save(save_path) print('Total training time for {0}: {1} minutes'.format(save_path, (time.time() - t0) / 60))
[ "def", "make_model", "(", "corpus", ",", "lemmatize", "=", "False", ",", "rm_stops", "=", "False", ",", "size", "=", "100", ",", "window", "=", "10", ",", "min_count", "=", "5", ",", "workers", "=", "4", ",", "sg", "=", "1", ",", "save_path", "=", "None", ")", ":", "# Simple training, with one large list", "t0", "=", "time", ".", "time", "(", ")", "sentences_stream", "=", "gen_docs", "(", "corpus", ",", "lemmatize", "=", "lemmatize", ",", "rm_stops", "=", "rm_stops", ")", "# sentences_list = []", "# for sent in sentences_stream:", "# sentences_list.append(sent)", "model", "=", "Word2Vec", "(", "sentences", "=", "list", "(", "sentences_stream", ")", ",", "size", "=", "size", ",", "window", "=", "window", ",", "min_count", "=", "min_count", ",", "workers", "=", "workers", ",", "sg", "=", "sg", ")", "# \"Trim\" the model of unnecessary data. Model cannot be updated anymore.", "model", ".", "init_sims", "(", "replace", "=", "True", ")", "if", "save_path", ":", "save_path", "=", "os", ".", "path", ".", "expanduser", "(", "save_path", ")", "model", ".", "save", "(", "save_path", ")", "print", "(", "'Total training time for {0}: {1} minutes'", ".", "format", "(", "save_path", ",", "(", "time", ".", "time", "(", ")", "-", "t0", ")", "/", "60", ")", ")" ]
36.73913
27.521739
def hpd_credible_interval(mu_in, post, alpha=0.9, tolerance=1e-3): ''' Returns the minimum and maximum rate values of the HPD (Highest Posterior Density) credible interval for a posterior post defined at the sample values mu_in. Samples need not be uniformly spaced and posterior need not be normalized. Will not return a correct credible interval if the posterior is multimodal and the correct interval is not contiguous; in this case will over-cover by including the whole range from minimum to maximum mu. ''' if alpha == 1: nonzero_samples = mu_in[post > 0] mu_low = numpy.min(nonzero_samples) mu_high = numpy.max(nonzero_samples) elif 0 < alpha < 1: # determine the highest PDF for which the region with # higher density has sufficient coverage pthresh = hpd_threshold(mu_in, post, alpha, tol=tolerance) samples_over_threshold = mu_in[post > pthresh] mu_low = numpy.min(samples_over_threshold) mu_high = numpy.max(samples_over_threshold) return mu_low, mu_high
[ "def", "hpd_credible_interval", "(", "mu_in", ",", "post", ",", "alpha", "=", "0.9", ",", "tolerance", "=", "1e-3", ")", ":", "if", "alpha", "==", "1", ":", "nonzero_samples", "=", "mu_in", "[", "post", ">", "0", "]", "mu_low", "=", "numpy", ".", "min", "(", "nonzero_samples", ")", "mu_high", "=", "numpy", ".", "max", "(", "nonzero_samples", ")", "elif", "0", "<", "alpha", "<", "1", ":", "# determine the highest PDF for which the region with", "# higher density has sufficient coverage", "pthresh", "=", "hpd_threshold", "(", "mu_in", ",", "post", ",", "alpha", ",", "tol", "=", "tolerance", ")", "samples_over_threshold", "=", "mu_in", "[", "post", ">", "pthresh", "]", "mu_low", "=", "numpy", ".", "min", "(", "samples_over_threshold", ")", "mu_high", "=", "numpy", ".", "max", "(", "samples_over_threshold", ")", "return", "mu_low", ",", "mu_high" ]
42.72
19.76
def push_activations(activations, from_layer, to_layer): """Push activations from one model to another using prerecorded correlations""" inverse_covariance_matrix = layer_inverse_covariance(from_layer) activations_decorrelated = np.dot(inverse_covariance_matrix, activations.T).T covariance_matrix = layer_covariance(from_layer, to_layer) activation_recorrelated = np.dot(activations_decorrelated, covariance_matrix) return activation_recorrelated
[ "def", "push_activations", "(", "activations", ",", "from_layer", ",", "to_layer", ")", ":", "inverse_covariance_matrix", "=", "layer_inverse_covariance", "(", "from_layer", ")", "activations_decorrelated", "=", "np", ".", "dot", "(", "inverse_covariance_matrix", ",", "activations", ".", "T", ")", ".", "T", "covariance_matrix", "=", "layer_covariance", "(", "from_layer", ",", "to_layer", ")", "activation_recorrelated", "=", "np", ".", "dot", "(", "activations_decorrelated", ",", "covariance_matrix", ")", "return", "activation_recorrelated" ]
66.428571
22
def legacy_events_view(request): """ View to see legacy events. """ events = TeacherEvent.objects.all() event_count = events.count() paginator = Paginator(events, 100) page = request.GET.get('page') try: events = paginator.page(page) except PageNotAnInteger: events = paginator.page(1) except EmptyPage: events = paginator.page(paginator.num_pages) return render_to_response( 'teacher_events.html', {'page_name': "Legacy Events", 'events': events, 'event_count': event_count,}, context_instance=RequestContext(request) )
[ "def", "legacy_events_view", "(", "request", ")", ":", "events", "=", "TeacherEvent", ".", "objects", ".", "all", "(", ")", "event_count", "=", "events", ".", "count", "(", ")", "paginator", "=", "Paginator", "(", "events", ",", "100", ")", "page", "=", "request", ".", "GET", ".", "get", "(", "'page'", ")", "try", ":", "events", "=", "paginator", ".", "page", "(", "page", ")", "except", "PageNotAnInteger", ":", "events", "=", "paginator", ".", "page", "(", "1", ")", "except", "EmptyPage", ":", "events", "=", "paginator", ".", "page", "(", "paginator", ".", "num_pages", ")", "return", "render_to_response", "(", "'teacher_events.html'", ",", "{", "'page_name'", ":", "\"Legacy Events\"", ",", "'events'", ":", "events", ",", "'event_count'", ":", "event_count", ",", "}", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ")" ]
27.909091
10.909091
def _generate_walks(self): """ Generates the random walks which will be used as the skip-gram input. :return: List of walks. Each walk is a list of nodes. """ flatten = lambda l: [item for sublist in l for item in sublist] # Split num_walks for each worker num_walks_lists = np.array_split(range(self.num_walks), self.workers) walk_results = Parallel(n_jobs=self.workers, temp_folder=self.temp_folder, require=self.require)( delayed(parallel_generate_walks)(self.d_graph, self.walk_length, len(num_walks), idx, self.sampling_strategy, self.NUM_WALKS_KEY, self.WALK_LENGTH_KEY, self.NEIGHBORS_KEY, self.PROBABILITIES_KEY, self.FIRST_TRAVEL_KEY, self.quiet) for idx, num_walks in enumerate(num_walks_lists, 1)) walks = flatten(walk_results) return walks
[ "def", "_generate_walks", "(", "self", ")", ":", "flatten", "=", "lambda", "l", ":", "[", "item", "for", "sublist", "in", "l", "for", "item", "in", "sublist", "]", "# Split num_walks for each worker", "num_walks_lists", "=", "np", ".", "array_split", "(", "range", "(", "self", ".", "num_walks", ")", ",", "self", ".", "workers", ")", "walk_results", "=", "Parallel", "(", "n_jobs", "=", "self", ".", "workers", ",", "temp_folder", "=", "self", ".", "temp_folder", ",", "require", "=", "self", ".", "require", ")", "(", "delayed", "(", "parallel_generate_walks", ")", "(", "self", ".", "d_graph", ",", "self", ".", "walk_length", ",", "len", "(", "num_walks", ")", ",", "idx", ",", "self", ".", "sampling_strategy", ",", "self", ".", "NUM_WALKS_KEY", ",", "self", ".", "WALK_LENGTH_KEY", ",", "self", ".", "NEIGHBORS_KEY", ",", "self", ".", "PROBABILITIES_KEY", ",", "self", ".", "FIRST_TRAVEL_KEY", ",", "self", ".", "quiet", ")", "for", "idx", ",", "num_walks", "in", "enumerate", "(", "num_walks_lists", ",", "1", ")", ")", "walks", "=", "flatten", "(", "walk_results", ")", "return", "walks" ]
44.62069
23.931034
def update_user(self, id, **kwargs): # noqa: E501 """Update user with given user groups and permissions. # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_user(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param UserRequestDTO body: Example Body: <pre>{ \"identifier\": \"[email protected]\", \"groups\": [ \"user_management\" ], \"userGroups\": [ \"8b23136b-ecd2-4cb5-8c92-62477dcc4090\" ] }</pre> :return: UserModel If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.update_user_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.update_user_with_http_info(id, **kwargs) # noqa: E501 return data
[ "def", "update_user", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "update_user_with_http_info", "(", "id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "update_user_with_http_info", "(", "id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
48.5
25.090909
def execute_command(self): """ The web command runs the Scrapple web interface through a simple \ `Flask <http://flask.pocoo.org>`_ app. When the execute_command() method is called from the \ :ref:`runCLI() <implementation-cli>` function, it starts of two simultaneous \ processes : - Calls the run_flask() method to start the Flask app on port 5000 of localhost - Opens the web interface on a web browser The '/' view of the Flask app, opens up the Scrapple web interface. This \ provides a basic form, to fill in the required configuration file. On submitting \ the form, it makes a POST request, passing in the form in the request header. \ This form is passed to the form_to_json() \ :ref:`utility function <implementation-utils>`, where the form is converted into \ the resultant JSON configuration file. Currently, closing the web command execution requires making a keyboard interrupt \ on the command line after the web interface has been closed. """ print(Back.GREEN + Fore.BLACK + "Scrapple Web Interface") print(Back.RESET + Fore.RESET) p1 = Process(target = self.run_flask) p2 = Process(target = lambda : webbrowser.open('http://127.0.0.1:5000')) p1.start() p2.start()
[ "def", "execute_command", "(", "self", ")", ":", "print", "(", "Back", ".", "GREEN", "+", "Fore", ".", "BLACK", "+", "\"Scrapple Web Interface\"", ")", "print", "(", "Back", ".", "RESET", "+", "Fore", ".", "RESET", ")", "p1", "=", "Process", "(", "target", "=", "self", ".", "run_flask", ")", "p2", "=", "Process", "(", "target", "=", "lambda", ":", "webbrowser", ".", "open", "(", "'http://127.0.0.1:5000'", ")", ")", "p1", ".", "start", "(", ")", "p2", ".", "start", "(", ")" ]
46.310345
27.62069
def copy(src, dst, symlink=False, rellink=False): """Copy or symlink the file.""" func = os.symlink if symlink else shutil.copy2 if symlink and os.path.lexists(dst): os.remove(dst) if rellink: # relative symlink from dst func(os.path.relpath(src, os.path.dirname(dst)), dst) else: func(src, dst)
[ "def", "copy", "(", "src", ",", "dst", ",", "symlink", "=", "False", ",", "rellink", "=", "False", ")", ":", "func", "=", "os", ".", "symlink", "if", "symlink", "else", "shutil", ".", "copy2", "if", "symlink", "and", "os", ".", "path", ".", "lexists", "(", "dst", ")", ":", "os", ".", "remove", "(", "dst", ")", "if", "rellink", ":", "# relative symlink from dst", "func", "(", "os", ".", "path", ".", "relpath", "(", "src", ",", "os", ".", "path", ".", "dirname", "(", "dst", ")", ")", ",", "dst", ")", "else", ":", "func", "(", "src", ",", "dst", ")" ]
36.888889
12.333333
def get_private_room_history(self, room_id, oldest=None, **kwargs): """ Get various history of specific private group in this case private :param room_id: :param kwargs: :return: """ return GetPrivateRoomHistory(settings=self.settings, **kwargs).call( room_id=room_id, oldest=oldest, **kwargs )
[ "def", "get_private_room_history", "(", "self", ",", "room_id", ",", "oldest", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "GetPrivateRoomHistory", "(", "settings", "=", "self", ".", "settings", ",", "*", "*", "kwargs", ")", ".", "call", "(", "room_id", "=", "room_id", ",", "oldest", "=", "oldest", ",", "*", "*", "kwargs", ")" ]
29.461538
21
def whichchain(atom): """Returns the residue number of an PyBel or OpenBabel atom.""" atom = atom if not isinstance(atom, Atom) else atom.OBAtom # Convert to OpenBabel Atom return atom.GetResidue().GetChain() if atom.GetResidue() is not None else None
[ "def", "whichchain", "(", "atom", ")", ":", "atom", "=", "atom", "if", "not", "isinstance", "(", "atom", ",", "Atom", ")", "else", "atom", ".", "OBAtom", "# Convert to OpenBabel Atom", "return", "atom", ".", "GetResidue", "(", ")", ".", "GetChain", "(", ")", "if", "atom", ".", "GetResidue", "(", ")", "is", "not", "None", "else", "None" ]
65.25
28
def all(self): r"""Returns all content in this node, regardless of whitespace or not. This includes all LaTeX needed to reconstruct the original source. >>> from TexSoup import TexSoup >>> soup = TexSoup(r''' ... \newcommand{reverseconcat}[3]{#3#2#1} ... ''') >>> list(soup.all) ['\n', \newcommand{reverseconcat}[3]{#3#2#1}, '\n'] """ for child in self.expr.all: if isinstance(child, TexExpr): node = TexNode(child) node.parent = self yield node else: yield child
[ "def", "all", "(", "self", ")", ":", "for", "child", "in", "self", ".", "expr", ".", "all", ":", "if", "isinstance", "(", "child", ",", "TexExpr", ")", ":", "node", "=", "TexNode", "(", "child", ")", "node", ".", "parent", "=", "self", "yield", "node", "else", ":", "yield", "child" ]
34.166667
13.722222
def as_dict(self): """Return the configuration as a dict""" dictionary = {} for section in self.parser.sections(): dictionary[section] = {} for option in self.parser.options(section): dictionary[section][option] = self.parser.get(section, option) return dictionary
[ "def", "as_dict", "(", "self", ")", ":", "dictionary", "=", "{", "}", "for", "section", "in", "self", ".", "parser", ".", "sections", "(", ")", ":", "dictionary", "[", "section", "]", "=", "{", "}", "for", "option", "in", "self", ".", "parser", ".", "options", "(", "section", ")", ":", "dictionary", "[", "section", "]", "[", "option", "]", "=", "self", ".", "parser", ".", "get", "(", "section", ",", "option", ")", "return", "dictionary" ]
41.125
14.625
def generate_calculus_integrate_sample(vlist, ops, min_depth, max_depth, functions): """Randomly generate a symbolic integral dataset sample. Given an input expression, produce the indefinite integral. Args: vlist: Variable list. List of chars that can be used in the expression. ops: List of ExprOp instances. The allowed operators for the expression. min_depth: Expression trees will not have a smaller depth than this. 0 means there is just a variable. 1 means there is one operation. max_depth: Expression trees will not have a larger depth than this. To make all trees have the same depth, set this equal to `min_depth`. functions: Defines special functions. A dict mapping human readable string names, like "log", "exp", "sin", "cos", etc., to single chars. Each function gets a unique token, like "L" for "log". Returns: sample: String representation of the input. Will be of the form 'var:expression'. target: String representation of the solution. """ var_index = random.randrange(len(vlist)) var = vlist[var_index] consts = vlist[:var_index] + vlist[var_index + 1:] depth = random.randrange(min_depth, max_depth + 1) expr = random_expr_with_required_var(depth, var, consts, ops) expr_str = str(expr) sample = var + ":" + expr_str target = format_sympy_expr( sympy.integrate(expr_str, sympy.Symbol(var)), functions=functions) return sample, target
[ "def", "generate_calculus_integrate_sample", "(", "vlist", ",", "ops", ",", "min_depth", ",", "max_depth", ",", "functions", ")", ":", "var_index", "=", "random", ".", "randrange", "(", "len", "(", "vlist", ")", ")", "var", "=", "vlist", "[", "var_index", "]", "consts", "=", "vlist", "[", ":", "var_index", "]", "+", "vlist", "[", "var_index", "+", "1", ":", "]", "depth", "=", "random", ".", "randrange", "(", "min_depth", ",", "max_depth", "+", "1", ")", "expr", "=", "random_expr_with_required_var", "(", "depth", ",", "var", ",", "consts", ",", "ops", ")", "expr_str", "=", "str", "(", "expr", ")", "sample", "=", "var", "+", "\":\"", "+", "expr_str", "target", "=", "format_sympy_expr", "(", "sympy", ".", "integrate", "(", "expr_str", ",", "sympy", ".", "Symbol", "(", "var", ")", ")", ",", "functions", "=", "functions", ")", "return", "sample", ",", "target" ]
43.205882
24.235294
def work_request(self, worker_name, md5, subkeys=None): """ Make a work request for an existing stored sample. Args: worker_name: 'strings', 'pe_features', whatever md5: the md5 of the sample (or sample_set!) subkeys: just get a subkey of the output: 'foo' or 'foo.bar' (None for all) Returns: The output of the worker. """ # Pull the worker output work_results = self._recursive_work_resolver(worker_name, md5) # Subkeys (Fixme this is super klutzy) if subkeys: if isinstance(subkeys, str): subkeys = [subkeys] try: sub_results = {} for subkey in subkeys: tmp = work_results[worker_name] # Traverse any subkeys for key in subkey.split('.')[:-1]: tmp = tmp[key] # Last subkey key = subkey.split('.')[-1] if key == '*': for key in tmp.keys(): sub_results[key] = tmp[key] else: sub_results[key] = tmp[key] # Set the output work_results = sub_results except (KeyError, TypeError): raise RuntimeError('Could not get one or more subkeys for: %s' % (work_results)) # Clean it and ship it return self.data_store.clean_for_serialization(work_results)
[ "def", "work_request", "(", "self", ",", "worker_name", ",", "md5", ",", "subkeys", "=", "None", ")", ":", "# Pull the worker output", "work_results", "=", "self", ".", "_recursive_work_resolver", "(", "worker_name", ",", "md5", ")", "# Subkeys (Fixme this is super klutzy)", "if", "subkeys", ":", "if", "isinstance", "(", "subkeys", ",", "str", ")", ":", "subkeys", "=", "[", "subkeys", "]", "try", ":", "sub_results", "=", "{", "}", "for", "subkey", "in", "subkeys", ":", "tmp", "=", "work_results", "[", "worker_name", "]", "# Traverse any subkeys", "for", "key", "in", "subkey", ".", "split", "(", "'.'", ")", "[", ":", "-", "1", "]", ":", "tmp", "=", "tmp", "[", "key", "]", "# Last subkey", "key", "=", "subkey", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "if", "key", "==", "'*'", ":", "for", "key", "in", "tmp", ".", "keys", "(", ")", ":", "sub_results", "[", "key", "]", "=", "tmp", "[", "key", "]", "else", ":", "sub_results", "[", "key", "]", "=", "tmp", "[", "key", "]", "# Set the output", "work_results", "=", "sub_results", "except", "(", "KeyError", ",", "TypeError", ")", ":", "raise", "RuntimeError", "(", "'Could not get one or more subkeys for: %s'", "%", "(", "work_results", ")", ")", "# Clean it and ship it", "return", "self", ".", "data_store", ".", "clean_for_serialization", "(", "work_results", ")" ]
36.5
17.571429
def checkInstrumentsValidity(self): """Checks the validity of the instruments used in the Analyses If an analysis with an invalid instrument (out-of-date or with calibration tests failed) is found, a warn message will be displayed. """ invalid = [] ans = self.context.getAnalyses() for an in ans: valid = an.isInstrumentValid() if not valid: instrument = an.getInstrument() inv = "%s (%s)" % ( safe_unicode(an.Title()), safe_unicode(instrument.Title())) if inv not in invalid: invalid.append(inv) if len(invalid) > 0: message = _("Some analyses use out-of-date or uncalibrated " "instruments. Results edition not allowed") message = "%s: %s" % (message, (", ".join(invalid))) self.context.plone_utils.addPortalMessage(message, "warning")
[ "def", "checkInstrumentsValidity", "(", "self", ")", ":", "invalid", "=", "[", "]", "ans", "=", "self", ".", "context", ".", "getAnalyses", "(", ")", "for", "an", "in", "ans", ":", "valid", "=", "an", ".", "isInstrumentValid", "(", ")", "if", "not", "valid", ":", "instrument", "=", "an", ".", "getInstrument", "(", ")", "inv", "=", "\"%s (%s)\"", "%", "(", "safe_unicode", "(", "an", ".", "Title", "(", ")", ")", ",", "safe_unicode", "(", "instrument", ".", "Title", "(", ")", ")", ")", "if", "inv", "not", "in", "invalid", ":", "invalid", ".", "append", "(", "inv", ")", "if", "len", "(", "invalid", ")", ">", "0", ":", "message", "=", "_", "(", "\"Some analyses use out-of-date or uncalibrated \"", "\"instruments. Results edition not allowed\"", ")", "message", "=", "\"%s: %s\"", "%", "(", "message", ",", "(", "\", \"", ".", "join", "(", "invalid", ")", ")", ")", "self", ".", "context", ".", "plone_utils", ".", "addPortalMessage", "(", "message", ",", "\"warning\"", ")" ]
47.7
15.15
def path_file_to_list(path_file): """ :return: A list with the paths which are stored in a text file in a line-by- line format. Validate each path using is_valid_path """ paths = [] path_file_fd = file(path_file) for line_no, line in enumerate(path_file_fd.readlines(), start=1): line = line.strip() if not line: # Blank line support continue if line.startswith('#'): # Comment support continue try: is_valid_path(line) paths.append(line) except ValueError, ve: args = (ve, path_file, line_no) raise ValueError('%s error found in %s:%s.' % args) return paths
[ "def", "path_file_to_list", "(", "path_file", ")", ":", "paths", "=", "[", "]", "path_file_fd", "=", "file", "(", "path_file", ")", "for", "line_no", ",", "line", "in", "enumerate", "(", "path_file_fd", ".", "readlines", "(", ")", ",", "start", "=", "1", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "line", ":", "# Blank line support", "continue", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "# Comment support", "continue", "try", ":", "is_valid_path", "(", "line", ")", "paths", ".", "append", "(", "line", ")", "except", "ValueError", ",", "ve", ":", "args", "=", "(", "ve", ",", "path_file", ",", "line_no", ")", "raise", "ValueError", "(", "'%s error found in %s:%s.'", "%", "args", ")", "return", "paths" ]
26.444444
20
def get_marker(self, increment=1): """ Returns the current marker, then increments the marker by what's specified """ i = self.markers_index self.markers_index += increment if self.markers_index >= len(self.markers): self.markers_index = self.markers_index-len(self.markers) if self.markers_index >= len(self.markers): self.markers_index=0 # to be safe return self.markers[i]
[ "def", "get_marker", "(", "self", ",", "increment", "=", "1", ")", ":", "i", "=", "self", ".", "markers_index", "self", ".", "markers_index", "+=", "increment", "if", "self", ".", "markers_index", ">=", "len", "(", "self", ".", "markers", ")", ":", "self", ".", "markers_index", "=", "self", ".", "markers_index", "-", "len", "(", "self", ".", "markers", ")", "if", "self", ".", "markers_index", ">=", "len", "(", "self", ".", "markers", ")", ":", "self", ".", "markers_index", "=", "0", "# to be safe", "return", "self", ".", "markers", "[", "i", "]" ]
34.307692
21.384615
def _handle_offset_response(self, response): """ Handle responses to both OffsetRequest and OffsetFetchRequest, since they are similar enough. :param response: A tuple of a single OffsetFetchResponse or OffsetResponse """ # Got a response, clear our outstanding request deferred self._request_d = None # Successful request, reset our retry delay, count, etc self.retry_delay = self.retry_init_delay self._fetch_attempt_count = 1 response = response[0] if hasattr(response, 'offsets'): # It's a response to an OffsetRequest self._fetch_offset = response.offsets[0] else: # It's a response to an OffsetFetchRequest # Make sure we got a valid offset back. Kafka uses -1 to indicate # no committed offset was retrieved if response.offset == OFFSET_NOT_COMMITTED: self._fetch_offset = OFFSET_EARLIEST else: self._fetch_offset = response.offset + 1 self._last_committed_offset = response.offset self._do_fetch()
[ "def", "_handle_offset_response", "(", "self", ",", "response", ")", ":", "# Got a response, clear our outstanding request deferred", "self", ".", "_request_d", "=", "None", "# Successful request, reset our retry delay, count, etc", "self", ".", "retry_delay", "=", "self", ".", "retry_init_delay", "self", ".", "_fetch_attempt_count", "=", "1", "response", "=", "response", "[", "0", "]", "if", "hasattr", "(", "response", ",", "'offsets'", ")", ":", "# It's a response to an OffsetRequest", "self", ".", "_fetch_offset", "=", "response", ".", "offsets", "[", "0", "]", "else", ":", "# It's a response to an OffsetFetchRequest", "# Make sure we got a valid offset back. Kafka uses -1 to indicate", "# no committed offset was retrieved", "if", "response", ".", "offset", "==", "OFFSET_NOT_COMMITTED", ":", "self", ".", "_fetch_offset", "=", "OFFSET_EARLIEST", "else", ":", "self", ".", "_fetch_offset", "=", "response", ".", "offset", "+", "1", "self", ".", "_last_committed_offset", "=", "response", ".", "offset", "self", ".", "_do_fetch", "(", ")" ]
39.172414
17.241379
def summary(self): """Summary statistics describing the fit. Set alpha property in the object before calling. Returns ------- df : DataFrame Contains columns coef, np.exp(coef), se(coef), z, p, lower, upper""" ci = 1 - self.alpha with np.errstate(invalid="ignore", divide="ignore"): df = pd.DataFrame(index=self.hazards_.index) df["coef"] = self.hazards_ df["exp(coef)"] = np.exp(self.hazards_) df["se(coef)"] = self.standard_errors_ df["z"] = self._compute_z_values() df["p"] = self._compute_p_values() df["-log2(p)"] = -np.log2(df["p"]) df["lower %g" % ci] = self.confidence_intervals_["lower-bound"] df["upper %g" % ci] = self.confidence_intervals_["upper-bound"] return df
[ "def", "summary", "(", "self", ")", ":", "ci", "=", "1", "-", "self", ".", "alpha", "with", "np", ".", "errstate", "(", "invalid", "=", "\"ignore\"", ",", "divide", "=", "\"ignore\"", ")", ":", "df", "=", "pd", ".", "DataFrame", "(", "index", "=", "self", ".", "hazards_", ".", "index", ")", "df", "[", "\"coef\"", "]", "=", "self", ".", "hazards_", "df", "[", "\"exp(coef)\"", "]", "=", "np", ".", "exp", "(", "self", ".", "hazards_", ")", "df", "[", "\"se(coef)\"", "]", "=", "self", ".", "standard_errors_", "df", "[", "\"z\"", "]", "=", "self", ".", "_compute_z_values", "(", ")", "df", "[", "\"p\"", "]", "=", "self", ".", "_compute_p_values", "(", ")", "df", "[", "\"-log2(p)\"", "]", "=", "-", "np", ".", "log2", "(", "df", "[", "\"p\"", "]", ")", "df", "[", "\"lower %g\"", "%", "ci", "]", "=", "self", ".", "confidence_intervals_", "[", "\"lower-bound\"", "]", "df", "[", "\"upper %g\"", "%", "ci", "]", "=", "self", ".", "confidence_intervals_", "[", "\"upper-bound\"", "]", "return", "df" ]
42.3
16.25
def update_user(self, user, name=None, password=None, host=None): """ Allows you to change one or more of the user's username, password, or host. """ return self._user_manager.update(user, name=name, password=password, host=host)
[ "def", "update_user", "(", "self", ",", "user", ",", "name", "=", "None", ",", "password", "=", "None", ",", "host", "=", "None", ")", ":", "return", "self", ".", "_user_manager", ".", "update", "(", "user", ",", "name", "=", "name", ",", "password", "=", "password", ",", "host", "=", "host", ")" ]
39.857143
19.857143
def get_methods(self): """ Return all method objects :rtype: a list of :class:`EncodedMethod` objects """ l = [] for i in self.classes.class_def: for j in i.get_methods(): l.append(j) return l
[ "def", "get_methods", "(", "self", ")", ":", "l", "=", "[", "]", "for", "i", "in", "self", ".", "classes", ".", "class_def", ":", "for", "j", "in", "i", ".", "get_methods", "(", ")", ":", "l", ".", "append", "(", "j", ")", "return", "l" ]
24.636364
13.363636
def getUrlMeta(self, url): """ Retrieve various metadata associated with a URL, as seen by Skype. Args: url (str): address to ping for info Returns: dict: metadata for the website queried """ return self.conn("GET", SkypeConnection.API_URL, params={"url": url}, auth=SkypeConnection.Auth.Authorize).json()
[ "def", "getUrlMeta", "(", "self", ",", "url", ")", ":", "return", "self", ".", "conn", "(", "\"GET\"", ",", "SkypeConnection", ".", "API_URL", ",", "params", "=", "{", "\"url\"", ":", "url", "}", ",", "auth", "=", "SkypeConnection", ".", "Auth", ".", "Authorize", ")", ".", "json", "(", ")" ]
32.75
21.75
def convexHull(self, geometries, sr=None): """ The convexHull operation is performed on a geometry service resource. It returns the convex hull of the input geometry. The input geometry can be a point, multipoint, polyline, or polygon. The convex hull is typically a polygon but can also be a polyline or point in degenerate cases. Inputs: geometries - array of geometries (structured as JSON geometry objects returned by the ArcGIS REST API). sr - spatial reference of the input geometries WKID. """ url = self._url + "/convexHull" params = { "f" : "json" } if isinstance(geometries, list) and len(geometries) > 0: g = geometries[0] if sr is not None: params['sr'] = sr else: params['sr'] = g._wkid if isinstance(g, Polygon): params['geometries'] = {"geometryType": "esriGeometryPolygon", "geometries" : self.__geomToStringArray(geometries, "list")} elif isinstance(g, Point): params['geometries'] = {"geometryType": "esriGeometryPoint", "geometries" : self.__geomToStringArray(geometries, "list")} elif isinstance(g, Polyline): params['geometries'] = {"geometryType": "esriGeometryPolyline", "geometries" : self.__geomToStringArray(geometries, "list")} else: return None return self._get(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url)
[ "def", "convexHull", "(", "self", ",", "geometries", ",", "sr", "=", "None", ")", ":", "url", "=", "self", ".", "_url", "+", "\"/convexHull\"", "params", "=", "{", "\"f\"", ":", "\"json\"", "}", "if", "isinstance", "(", "geometries", ",", "list", ")", "and", "len", "(", "geometries", ")", ">", "0", ":", "g", "=", "geometries", "[", "0", "]", "if", "sr", "is", "not", "None", ":", "params", "[", "'sr'", "]", "=", "sr", "else", ":", "params", "[", "'sr'", "]", "=", "g", ".", "_wkid", "if", "isinstance", "(", "g", ",", "Polygon", ")", ":", "params", "[", "'geometries'", "]", "=", "{", "\"geometryType\"", ":", "\"esriGeometryPolygon\"", ",", "\"geometries\"", ":", "self", ".", "__geomToStringArray", "(", "geometries", ",", "\"list\"", ")", "}", "elif", "isinstance", "(", "g", ",", "Point", ")", ":", "params", "[", "'geometries'", "]", "=", "{", "\"geometryType\"", ":", "\"esriGeometryPoint\"", ",", "\"geometries\"", ":", "self", ".", "__geomToStringArray", "(", "geometries", ",", "\"list\"", ")", "}", "elif", "isinstance", "(", "g", ",", "Polyline", ")", ":", "params", "[", "'geometries'", "]", "=", "{", "\"geometryType\"", ":", "\"esriGeometryPolyline\"", ",", "\"geometries\"", ":", "self", ".", "__geomToStringArray", "(", "geometries", ",", "\"list\"", ")", "}", "else", ":", "return", "None", "return", "self", ".", "_get", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_port", "=", "self", ".", "_proxy_port", ",", "proxy_url", "=", "self", ".", "_proxy_url", ")" ]
46.365854
23.243902
def _bind_parameter(self, parameter, value): """Assigns a parameter value to matching instructions in-place.""" for (instr, param_index) in self._parameter_table[parameter]: instr.params[param_index] = value
[ "def", "_bind_parameter", "(", "self", ",", "parameter", ",", "value", ")", ":", "for", "(", "instr", ",", "param_index", ")", "in", "self", ".", "_parameter_table", "[", "parameter", "]", ":", "instr", ".", "params", "[", "param_index", "]", "=", "value" ]
58
9.5
def do_request(self, method, params=None): """Make request to Zabbix API. :type method: str :param method: ZabbixAPI method, like: `apiinfo.version`. :type params: str :param params: ZabbixAPI method arguments. >>> from pyzabbix import ZabbixAPI >>> z = ZabbixAPI() >>> apiinfo = z.do_request('apiinfo.version') """ request_json = { 'jsonrpc': '2.0', 'method': method, 'params': params or {}, 'id': '1', } # apiinfo.version and user.login doesn't require auth token if self.auth and (method not in ('apiinfo.version', 'user.login')): request_json['auth'] = self.auth logger.debug( 'urllib2.Request({0}, {1})'.format( self.url, json.dumps(request_json))) data = json.dumps(request_json) if not isinstance(data, bytes): data = data.encode("utf-8") req = urllib2.Request(self.url, data) req.get_method = lambda: 'POST' req.add_header('Content-Type', 'application/json-rpc') try: res = urlopen(req) res_str = res.read().decode('utf-8') res_json = json.loads(res_str) except ValueError as e: raise ZabbixAPIException("Unable to parse json: %s" % e.message) res_str = json.dumps(res_json, indent=4, separators=(',', ': ')) logger.debug("Response Body: %s", res_str) if 'error' in res_json: err = res_json['error'].copy() err.update({'json': str(request_json)}) raise ZabbixAPIException(err) return res_json
[ "def", "do_request", "(", "self", ",", "method", ",", "params", "=", "None", ")", ":", "request_json", "=", "{", "'jsonrpc'", ":", "'2.0'", ",", "'method'", ":", "method", ",", "'params'", ":", "params", "or", "{", "}", ",", "'id'", ":", "'1'", ",", "}", "# apiinfo.version and user.login doesn't require auth token", "if", "self", ".", "auth", "and", "(", "method", "not", "in", "(", "'apiinfo.version'", ",", "'user.login'", ")", ")", ":", "request_json", "[", "'auth'", "]", "=", "self", ".", "auth", "logger", ".", "debug", "(", "'urllib2.Request({0}, {1})'", ".", "format", "(", "self", ".", "url", ",", "json", ".", "dumps", "(", "request_json", ")", ")", ")", "data", "=", "json", ".", "dumps", "(", "request_json", ")", "if", "not", "isinstance", "(", "data", ",", "bytes", ")", ":", "data", "=", "data", ".", "encode", "(", "\"utf-8\"", ")", "req", "=", "urllib2", ".", "Request", "(", "self", ".", "url", ",", "data", ")", "req", ".", "get_method", "=", "lambda", ":", "'POST'", "req", ".", "add_header", "(", "'Content-Type'", ",", "'application/json-rpc'", ")", "try", ":", "res", "=", "urlopen", "(", "req", ")", "res_str", "=", "res", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "res_json", "=", "json", ".", "loads", "(", "res_str", ")", "except", "ValueError", "as", "e", ":", "raise", "ZabbixAPIException", "(", "\"Unable to parse json: %s\"", "%", "e", ".", "message", ")", "res_str", "=", "json", ".", "dumps", "(", "res_json", ",", "indent", "=", "4", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", "logger", ".", "debug", "(", "\"Response Body: %s\"", ",", "res_str", ")", "if", "'error'", "in", "res_json", ":", "err", "=", "res_json", "[", "'error'", "]", ".", "copy", "(", ")", "err", ".", "update", "(", "{", "'json'", ":", "str", "(", "request_json", ")", "}", ")", "raise", "ZabbixAPIException", "(", "err", ")", "return", "res_json" ]
30.722222
18.185185
def hax(self): """ Returns the histogram axes, creating it only on demand. """ if make_axes_locatable is None: raise YellowbrickValueError(( "residuals histogram requires matplotlib 2.0.2 or greater " "please upgrade matplotlib or set hist=False on the visualizer" )) divider = make_axes_locatable(self.ax) hax = divider.append_axes("right", size=1, pad=0.1, sharey=self.ax) hax.yaxis.tick_right() hax.grid(False, axis='x') return hax
[ "def", "hax", "(", "self", ")", ":", "if", "make_axes_locatable", "is", "None", ":", "raise", "YellowbrickValueError", "(", "(", "\"residuals histogram requires matplotlib 2.0.2 or greater \"", "\"please upgrade matplotlib or set hist=False on the visualizer\"", ")", ")", "divider", "=", "make_axes_locatable", "(", "self", ".", "ax", ")", "hax", "=", "divider", ".", "append_axes", "(", "\"right\"", ",", "size", "=", "1", ",", "pad", "=", "0.1", ",", "sharey", "=", "self", ".", "ax", ")", "hax", ".", "yaxis", ".", "tick_right", "(", ")", "hax", ".", "grid", "(", "False", ",", "axis", "=", "'x'", ")", "return", "hax" ]
32.294118
20.647059
def build(self, builder): """ Build XML by appending to builder """ if self.text is None: raise ValueError("Text is not set.") params = {} if self.sponsor_or_site is not None: params['SponsorOrSite'] = self.sponsor_or_site builder.start("Comment", params) builder.data(self.text) builder.end("Comment")
[ "def", "build", "(", "self", ",", "builder", ")", ":", "if", "self", ".", "text", "is", "None", ":", "raise", "ValueError", "(", "\"Text is not set.\"", ")", "params", "=", "{", "}", "if", "self", ".", "sponsor_or_site", "is", "not", "None", ":", "params", "[", "'SponsorOrSite'", "]", "=", "self", ".", "sponsor_or_site", "builder", ".", "start", "(", "\"Comment\"", ",", "params", ")", "builder", ".", "data", "(", "self", ".", "text", ")", "builder", ".", "end", "(", "\"Comment\"", ")" ]
29.769231
10.538462
def plot_residuals(self, plot=None): """ Plot normalized fit residuals. The sum of the squares of the residuals equals ``self.chi2``. Individual residuals should be distributed about one, in a Gaussian distribution. Args: plot: :mod:`matplotlib` plotter. If ``None``, uses ``matplotlib.pyplot`. Returns: Plotter ``plot``. """ if plot is None: import matplotlib.pyplot as plot x = numpy.arange(1, len(self.residuals) + 1) y = _gvar.mean(self.residuals) yerr = _gvar.sdev(self.residuals) plot.errorbar(x=x, y=y, yerr=yerr, fmt='o', color='b') plot.ylabel('normalized residuals') xr = [x[0], x[-1]] plot.plot([x[0], x[-1]], [0, 0], 'r-') plot.fill_between( x=xr, y1=[-1,-1], y2=[1,1], color='r', alpha=0.075 ) return plot
[ "def", "plot_residuals", "(", "self", ",", "plot", "=", "None", ")", ":", "if", "plot", "is", "None", ":", "import", "matplotlib", ".", "pyplot", "as", "plot", "x", "=", "numpy", ".", "arange", "(", "1", ",", "len", "(", "self", ".", "residuals", ")", "+", "1", ")", "y", "=", "_gvar", ".", "mean", "(", "self", ".", "residuals", ")", "yerr", "=", "_gvar", ".", "sdev", "(", "self", ".", "residuals", ")", "plot", ".", "errorbar", "(", "x", "=", "x", ",", "y", "=", "y", ",", "yerr", "=", "yerr", ",", "fmt", "=", "'o'", ",", "color", "=", "'b'", ")", "plot", ".", "ylabel", "(", "'normalized residuals'", ")", "xr", "=", "[", "x", "[", "0", "]", ",", "x", "[", "-", "1", "]", "]", "plot", ".", "plot", "(", "[", "x", "[", "0", "]", ",", "x", "[", "-", "1", "]", "]", ",", "[", "0", ",", "0", "]", ",", "'r-'", ")", "plot", ".", "fill_between", "(", "x", "=", "xr", ",", "y1", "=", "[", "-", "1", ",", "-", "1", "]", ",", "y2", "=", "[", "1", ",", "1", "]", ",", "color", "=", "'r'", ",", "alpha", "=", "0.075", ")", "return", "plot" ]
33.592593
16.148148
def discover(timeout=1, retries=1): """Discover Raumfeld devices in the network :param timeout: The timeout in seconds :param retries: How often the search should be retried :returns: A list of raumfeld devices, sorted by name """ locations = [] group = ('239.255.255.250', 1900) service = 'ssdp:urn:schemas-upnp-org:device:MediaRenderer:1' # 'ssdp:all' message = '\r\n'.join(['M-SEARCH * HTTP/1.1', 'HOST: {group[0]}:{group[1]}', 'MAN: "ssdp:discover"', 'ST: {st}', 'MX: 1', '', '']).format(group=group, st=service) socket.setdefaulttimeout(timeout) for _ in range(retries): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) # socket options sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2) # send group multicast sock.sendto(message.encode('utf-8'), group) while True: try: response = sock.recv(2048).decode('utf-8') for line in response.split('\r\n'): if line.startswith('Location: '): location = line.split(' ')[1].strip() if not location in locations: locations.append(location) except socket.timeout: break devices = [RaumfeldDevice(location) for location in locations] # only return 'Virtual Media Player' and sort the list return sorted([device for device in devices if device.model_description == 'Virtual Media Player'], key=lambda device: device.friendly_name)
[ "def", "discover", "(", "timeout", "=", "1", ",", "retries", "=", "1", ")", ":", "locations", "=", "[", "]", "group", "=", "(", "'239.255.255.250'", ",", "1900", ")", "service", "=", "'ssdp:urn:schemas-upnp-org:device:MediaRenderer:1'", "# 'ssdp:all'", "message", "=", "'\\r\\n'", ".", "join", "(", "[", "'M-SEARCH * HTTP/1.1'", ",", "'HOST: {group[0]}:{group[1]}'", ",", "'MAN: \"ssdp:discover\"'", ",", "'ST: {st}'", ",", "'MX: 1'", ",", "''", ",", "''", "]", ")", ".", "format", "(", "group", "=", "group", ",", "st", "=", "service", ")", "socket", ".", "setdefaulttimeout", "(", "timeout", ")", "for", "_", "in", "range", "(", "retries", ")", ":", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_DGRAM", ",", "socket", ".", "IPPROTO_UDP", ")", "# socket options", "sock", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_REUSEADDR", ",", "1", ")", "sock", ".", "setsockopt", "(", "socket", ".", "IPPROTO_IP", ",", "socket", ".", "IP_MULTICAST_TTL", ",", "2", ")", "# send group multicast", "sock", ".", "sendto", "(", "message", ".", "encode", "(", "'utf-8'", ")", ",", "group", ")", "while", "True", ":", "try", ":", "response", "=", "sock", ".", "recv", "(", "2048", ")", ".", "decode", "(", "'utf-8'", ")", "for", "line", "in", "response", ".", "split", "(", "'\\r\\n'", ")", ":", "if", "line", ".", "startswith", "(", "'Location: '", ")", ":", "location", "=", "line", ".", "split", "(", "' '", ")", "[", "1", "]", ".", "strip", "(", ")", "if", "not", "location", "in", "locations", ":", "locations", ".", "append", "(", "location", ")", "except", "socket", ".", "timeout", ":", "break", "devices", "=", "[", "RaumfeldDevice", "(", "location", ")", "for", "location", "in", "locations", "]", "# only return 'Virtual Media Player' and sort the list", "return", "sorted", "(", "[", "device", "for", "device", "in", "devices", "if", "device", ".", "model_description", "==", "'Virtual Media Player'", "]", ",", "key", "=", "lambda", "device", ":", "device", ".", "friendly_name", ")" ]
40.488889
18.088889
def make_command(self, ctx, name, info): """ make click sub-command from command info gotten from xbahn engineer """ @self.command() @click.option("--debug/--no-debug", default=False, help="Show debug information") @doc(info.get("description")) def func(*args, **kwargs): if "debug" in kwargs: del kwargs["debug"] fn = getattr(ctx.widget, name) result = fn(*args, **kwargs) click.echo("%s: %s> %s" % (ctx.params["host"],name,result)) ctx.conn.close() ctx.info_name = "%s %s" % (ctx.info_name , ctx.params["host"]) for a in info.get("arguments",[]): deco = click.argument(*a["args"], **a["kwargs"]) func = deco(func) for o in info.get("options",[]): deco = click.option(*o["args"], **o["kwargs"]) func = deco(func) return func
[ "def", "make_command", "(", "self", ",", "ctx", ",", "name", ",", "info", ")", ":", "@", "self", ".", "command", "(", ")", "@", "click", ".", "option", "(", "\"--debug/--no-debug\"", ",", "default", "=", "False", ",", "help", "=", "\"Show debug information\"", ")", "@", "doc", "(", "info", ".", "get", "(", "\"description\"", ")", ")", "def", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "\"debug\"", "in", "kwargs", ":", "del", "kwargs", "[", "\"debug\"", "]", "fn", "=", "getattr", "(", "ctx", ".", "widget", ",", "name", ")", "result", "=", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "click", ".", "echo", "(", "\"%s: %s> %s\"", "%", "(", "ctx", ".", "params", "[", "\"host\"", "]", ",", "name", ",", "result", ")", ")", "ctx", ".", "conn", ".", "close", "(", ")", "ctx", ".", "info_name", "=", "\"%s %s\"", "%", "(", "ctx", ".", "info_name", ",", "ctx", ".", "params", "[", "\"host\"", "]", ")", "for", "a", "in", "info", ".", "get", "(", "\"arguments\"", ",", "[", "]", ")", ":", "deco", "=", "click", ".", "argument", "(", "*", "a", "[", "\"args\"", "]", ",", "*", "*", "a", "[", "\"kwargs\"", "]", ")", "func", "=", "deco", "(", "func", ")", "for", "o", "in", "info", ".", "get", "(", "\"options\"", ",", "[", "]", ")", ":", "deco", "=", "click", ".", "option", "(", "*", "o", "[", "\"args\"", "]", ",", "*", "*", "o", "[", "\"kwargs\"", "]", ")", "func", "=", "deco", "(", "func", ")", "return", "func" ]
32.964286
16.392857
def _upsample(self, method, limit=None, fill_value=None): """ Parameters ---------- method : string {'backfill', 'bfill', 'pad', 'ffill'} method for upsampling limit : int, default None Maximum size gap to fill when reindexing fill_value : scalar, default None Value to use for missing values See Also -------- .fillna """ # we may need to actually resample as if we are timestamps if self.kind == 'timestamp': return super()._upsample(method, limit=limit, fill_value=fill_value) self._set_binner() ax = self.ax obj = self.obj new_index = self.binner # Start vs. end of period memb = ax.asfreq(self.freq, how=self.convention) # Get the fill indexer indexer = memb.get_indexer(new_index, method=method, limit=limit) return self._wrap_result(_take_new_index( obj, indexer, new_index, axis=self.axis))
[ "def", "_upsample", "(", "self", ",", "method", ",", "limit", "=", "None", ",", "fill_value", "=", "None", ")", ":", "# we may need to actually resample as if we are timestamps", "if", "self", ".", "kind", "==", "'timestamp'", ":", "return", "super", "(", ")", ".", "_upsample", "(", "method", ",", "limit", "=", "limit", ",", "fill_value", "=", "fill_value", ")", "self", ".", "_set_binner", "(", ")", "ax", "=", "self", ".", "ax", "obj", "=", "self", ".", "obj", "new_index", "=", "self", ".", "binner", "# Start vs. end of period", "memb", "=", "ax", ".", "asfreq", "(", "self", ".", "freq", ",", "how", "=", "self", ".", "convention", ")", "# Get the fill indexer", "indexer", "=", "memb", ".", "get_indexer", "(", "new_index", ",", "method", "=", "method", ",", "limit", "=", "limit", ")", "return", "self", ".", "_wrap_result", "(", "_take_new_index", "(", "obj", ",", "indexer", ",", "new_index", ",", "axis", "=", "self", ".", "axis", ")", ")" ]
30.470588
18.823529
def allowed(self) -> Set[int]: """ Returns the set of constrained words that could follow this one. For unfinished phrasal constraints, it is the next word in the phrase. In other cases, it is the list of all unmet constraints. If all constraints are met, an empty set is returned. :return: The ID of the next required word, or -1 if any word can follow """ items = set() # type: Set[int] # Add extensions of a started-but-incomplete sequential constraint if self.last_met != -1 and self.is_sequence[self.last_met] == 1: word_id = self.constraints[self.last_met + 1] if word_id != self.eos_id or self.num_needed() == 1: items.add(word_id) # Add all constraints that aren't non-initial sequences else: for i, word_id in enumerate(self.constraints): if not self.met[i] and (i == 0 or not self.is_sequence[i - 1]): if word_id != self.eos_id or self.num_needed() == 1: items.add(word_id) return items
[ "def", "allowed", "(", "self", ")", "->", "Set", "[", "int", "]", ":", "items", "=", "set", "(", ")", "# type: Set[int]", "# Add extensions of a started-but-incomplete sequential constraint", "if", "self", ".", "last_met", "!=", "-", "1", "and", "self", ".", "is_sequence", "[", "self", ".", "last_met", "]", "==", "1", ":", "word_id", "=", "self", ".", "constraints", "[", "self", ".", "last_met", "+", "1", "]", "if", "word_id", "!=", "self", ".", "eos_id", "or", "self", ".", "num_needed", "(", ")", "==", "1", ":", "items", ".", "add", "(", "word_id", ")", "# Add all constraints that aren't non-initial sequences", "else", ":", "for", "i", ",", "word_id", "in", "enumerate", "(", "self", ".", "constraints", ")", ":", "if", "not", "self", ".", "met", "[", "i", "]", "and", "(", "i", "==", "0", "or", "not", "self", ".", "is_sequence", "[", "i", "-", "1", "]", ")", ":", "if", "word_id", "!=", "self", ".", "eos_id", "or", "self", ".", "num_needed", "(", ")", "==", "1", ":", "items", ".", "add", "(", "word_id", ")", "return", "items" ]
45.541667
23.291667
def warn(msg,level=2,exit_val=1): """Standard warning printer. Gives formatting consistency. Output is sent to io.stderr (sys.stderr by default). Options: -level(2): allows finer control: 0 -> Do nothing, dummy function. 1 -> Print message. 2 -> Print 'WARNING:' + message. (Default level). 3 -> Print 'ERROR:' + message. 4 -> Print 'FATAL ERROR:' + message and trigger a sys.exit(exit_val). -exit_val (1): exit value returned by sys.exit() for a level 4 warning. Ignored for all other levels.""" if level>0: header = ['','','WARNING: ','ERROR: ','FATAL ERROR: '] io.stderr.write('%s%s' % (header[level],msg)) if level == 4: print >> io.stderr,'Exiting.\n' sys.exit(exit_val)
[ "def", "warn", "(", "msg", ",", "level", "=", "2", ",", "exit_val", "=", "1", ")", ":", "if", "level", ">", "0", ":", "header", "=", "[", "''", ",", "''", ",", "'WARNING: '", ",", "'ERROR: '", ",", "'FATAL ERROR: '", "]", "io", ".", "stderr", ".", "write", "(", "'%s%s'", "%", "(", "header", "[", "level", "]", ",", "msg", ")", ")", "if", "level", "==", "4", ":", "print", ">>", "io", ".", "stderr", ",", "'Exiting.\\n'", "sys", ".", "exit", "(", "exit_val", ")" ]
33.217391
19.26087
def windowed_run_count(da, window, dim='time'): """Return the number of consecutive true values in array for runs at least as long as given duration. Parameters ---------- da: N-dimensional Xarray data array (boolean) Input data array window : int Minimum run length. dim : Xarray dimension (default = 'time') Dimension along which to calculate consecutive run Returns ------- out : N-dimensional xarray data array (int) Total number of true values part of a consecutive runs of at least `window` long. """ d = rle(da, dim=dim) out = d.where(d >= window, 0).sum(dim=dim) return out
[ "def", "windowed_run_count", "(", "da", ",", "window", ",", "dim", "=", "'time'", ")", ":", "d", "=", "rle", "(", "da", ",", "dim", "=", "dim", ")", "out", "=", "d", ".", "where", "(", "d", ">=", "window", ",", "0", ")", ".", "sum", "(", "dim", "=", "dim", ")", "return", "out" ]
33
19.952381
def enroll_users_in_program(cls, enterprise_customer, program_details, course_mode, emails, cohort=None): """ Enroll existing users in all courses in a program, and create pending enrollments for nonexisting users. Args: enterprise_customer: The EnterpriseCustomer which is sponsoring the enrollment program_details: The details of the program in which we're enrolling course_mode (str): The mode with which we're enrolling in the program emails: An iterable of email addresses which need to be enrolled Returns: successes: A list of users who were successfully enrolled in all courses of the program pending: A list of PendingEnterpriseCustomerUsers who were successfully linked and had pending enrollments created for them in the database failures: A list of users who could not be enrolled in the program """ existing_users, unregistered_emails = cls.get_users_by_email(emails) course_ids = get_course_runs_from_program(program_details) successes = [] pending = [] failures = [] for user in existing_users: succeeded = cls.enroll_user(enterprise_customer, user, course_mode, *course_ids) if succeeded: successes.append(user) else: failures.append(user) for email in unregistered_emails: pending_user = enterprise_customer.enroll_user_pending_registration( email, course_mode, *course_ids, cohort=cohort ) pending.append(pending_user) return successes, pending, failures
[ "def", "enroll_users_in_program", "(", "cls", ",", "enterprise_customer", ",", "program_details", ",", "course_mode", ",", "emails", ",", "cohort", "=", "None", ")", ":", "existing_users", ",", "unregistered_emails", "=", "cls", ".", "get_users_by_email", "(", "emails", ")", "course_ids", "=", "get_course_runs_from_program", "(", "program_details", ")", "successes", "=", "[", "]", "pending", "=", "[", "]", "failures", "=", "[", "]", "for", "user", "in", "existing_users", ":", "succeeded", "=", "cls", ".", "enroll_user", "(", "enterprise_customer", ",", "user", ",", "course_mode", ",", "*", "course_ids", ")", "if", "succeeded", ":", "successes", ".", "append", "(", "user", ")", "else", ":", "failures", ".", "append", "(", "user", ")", "for", "email", "in", "unregistered_emails", ":", "pending_user", "=", "enterprise_customer", ".", "enroll_user_pending_registration", "(", "email", ",", "course_mode", ",", "*", "course_ids", ",", "cohort", "=", "cohort", ")", "pending", ".", "append", "(", "pending_user", ")", "return", "successes", ",", "pending", ",", "failures" ]
42.775
28.025
def to_XML(self, xml_declaration=True, xmlns=True): """ Dumps object fields to an XML-formatted string. The 'xml_declaration' switch enables printing of a leading standard XML line containing XML version and encoding. The 'xmlns' switch enables printing of qualified XMLNS prefixes. :param XML_declaration: if ``True`` (default) prints a leading XML declaration line :type XML_declaration: bool :param xmlns: if ``True`` (default) prints full XMLNS prefixes :type xmlns: bool :returns: an XML-formatted string """ root_node = self._to_DOM() if xmlns: xmlutils.annotate_with_XMLNS(root_node, OZONE_XMLNS_PREFIX, OZONE_XMLNS_URL) return xmlutils.DOM_node_to_XML(root_node, xml_declaration)
[ "def", "to_XML", "(", "self", ",", "xml_declaration", "=", "True", ",", "xmlns", "=", "True", ")", ":", "root_node", "=", "self", ".", "_to_DOM", "(", ")", "if", "xmlns", ":", "xmlutils", ".", "annotate_with_XMLNS", "(", "root_node", ",", "OZONE_XMLNS_PREFIX", ",", "OZONE_XMLNS_URL", ")", "return", "xmlutils", ".", "DOM_node_to_XML", "(", "root_node", ",", "xml_declaration", ")" ]
42.285714
20.095238
def sever_sink_ports(self, context, ports, connected_to=None): # type: (AContext, APortMap, str) -> None """Conditionally sever Sink Ports of the child. If connected_to is then None then sever all, otherwise restrict to connected_to's Source Ports Args: context (Context): The context to use ports (dict): {part_name: [PortInfo]} connected_to (str): Restrict severing to this part """ # Find the Source Ports to connect to if connected_to: # Calculate a lookup of the Source Port "name" to type source_port_lookup = self._source_port_lookup( ports.get(connected_to, [])) else: source_port_lookup = True # Find our Sink Ports sink_ports = self._get_flowgraph_ports(ports, SinkPortInfo) # If we have Sunk Ports that need to be disconnected then do so if sink_ports and source_port_lookup: child = context.block_view(self.mri) attribute_values = {} for name, port_info in sink_ports.items(): if source_port_lookup is True or source_port_lookup.get( child[name].value, None) == port_info.port: attribute_values[name] = port_info.disconnected_value child.put_attribute_values(attribute_values)
[ "def", "sever_sink_ports", "(", "self", ",", "context", ",", "ports", ",", "connected_to", "=", "None", ")", ":", "# type: (AContext, APortMap, str) -> None", "# Find the Source Ports to connect to", "if", "connected_to", ":", "# Calculate a lookup of the Source Port \"name\" to type", "source_port_lookup", "=", "self", ".", "_source_port_lookup", "(", "ports", ".", "get", "(", "connected_to", ",", "[", "]", ")", ")", "else", ":", "source_port_lookup", "=", "True", "# Find our Sink Ports", "sink_ports", "=", "self", ".", "_get_flowgraph_ports", "(", "ports", ",", "SinkPortInfo", ")", "# If we have Sunk Ports that need to be disconnected then do so", "if", "sink_ports", "and", "source_port_lookup", ":", "child", "=", "context", ".", "block_view", "(", "self", ".", "mri", ")", "attribute_values", "=", "{", "}", "for", "name", ",", "port_info", "in", "sink_ports", ".", "items", "(", ")", ":", "if", "source_port_lookup", "is", "True", "or", "source_port_lookup", ".", "get", "(", "child", "[", "name", "]", ".", "value", ",", "None", ")", "==", "port_info", ".", "port", ":", "attribute_values", "[", "name", "]", "=", "port_info", ".", "disconnected_value", "child", ".", "put_attribute_values", "(", "attribute_values", ")" ]
43.903226
18.741935
def vcfheader(data, names, ofile): """ Prints header for vcf files """ ## choose reference string if data.paramsdict["reference_sequence"]: reference = data.paramsdict["reference_sequence"] else: reference = "pseudo-reference (most common base at site)" ##FILTER=<ID=minCov,Description="Data shared across <{mincov} samples"> ##FILTER=<ID=maxSH,Description="Heterozygosous site shared across >{maxsh} samples"> header = """\ ##fileformat=VCFv4.0 ##fileDate={date} ##source=ipyrad_v.{version} ##reference={reference} ##phasing=unphased ##INFO=<ID=NS,Number=1,Type=Integer,Description="Number of Samples With Data"> ##INFO=<ID=DP,Number=1,Type=Integer,Description="Total Depth"> ##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype"> ##FORMAT=<ID=DP,Number=1,Type=Integer,Description="Read Depth"> ##FORMAT=<ID=CATG,Number=1,Type=String,Description="Base Counts (CATG)"> #CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t{names} """.format(date=time.strftime("%Y/%m/%d"), version=__version__, reference=os.path.basename(reference), mincov=data.paramsdict["min_samples_locus"], maxsh=data.paramsdict["max_shared_Hs_locus"], names="\t".join(names)) ## WRITE ofile.write(header)
[ "def", "vcfheader", "(", "data", ",", "names", ",", "ofile", ")", ":", "## choose reference string", "if", "data", ".", "paramsdict", "[", "\"reference_sequence\"", "]", ":", "reference", "=", "data", ".", "paramsdict", "[", "\"reference_sequence\"", "]", "else", ":", "reference", "=", "\"pseudo-reference (most common base at site)\"", "##FILTER=<ID=minCov,Description=\"Data shared across <{mincov} samples\">", "##FILTER=<ID=maxSH,Description=\"Heterozygosous site shared across >{maxsh} samples\">", "header", "=", "\"\"\"\\\n##fileformat=VCFv4.0\n##fileDate={date}\n##source=ipyrad_v.{version}\n##reference={reference}\n##phasing=unphased\n##INFO=<ID=NS,Number=1,Type=Integer,Description=\"Number of Samples With Data\">\n##INFO=<ID=DP,Number=1,Type=Integer,Description=\"Total Depth\">\n##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">\n##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\"Read Depth\">\n##FORMAT=<ID=CATG,Number=1,Type=String,Description=\"Base Counts (CATG)\">\n#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\t{names}\n\"\"\"", ".", "format", "(", "date", "=", "time", ".", "strftime", "(", "\"%Y/%m/%d\"", ")", ",", "version", "=", "__version__", ",", "reference", "=", "os", ".", "path", ".", "basename", "(", "reference", ")", ",", "mincov", "=", "data", ".", "paramsdict", "[", "\"min_samples_locus\"", "]", ",", "maxsh", "=", "data", ".", "paramsdict", "[", "\"max_shared_Hs_locus\"", "]", ",", "names", "=", "\"\\t\"", ".", "join", "(", "names", ")", ")", "## WRITE", "ofile", ".", "write", "(", "header", ")" ]
38.454545
18.727273
def linked_model_for_class(self, cls, make_constants_variable=False, **kwargs): """ Create a PriorModel wrapping the specified class with attributes from this instance. Priors can be overridden using keyword arguments. Any constructor arguments of the new class for which there is no attribute associated with this class and no keyword argument are created from config. If make_constants_variable is True then constants associated with this instance will be used to set the mean of priors in the new instance rather than overriding them. Parameters ---------- cls: class The class that the new PriorModel will wrap make_constants_variable: bool If True constants from this instance will be used to determine the mean values for priors in the new instance rather than overriding them kwargs Keyword arguments passed in here are used to override attributes from this instance or add new attributes Returns ------- new_model: PriorModel A new prior model with priors derived from this instance """ constructor_args = inspect.getfullargspec(cls).args attribute_tuples = self.attribute_tuples new_model = PriorModel(cls) for attribute_tuple in attribute_tuples: name = attribute_tuple.name if name in constructor_args or ( is_tuple_like_attribute_name(name) and tuple_name(name) in constructor_args): attribute = kwargs[name] if name in kwargs else attribute_tuple.value if make_constants_variable and isinstance(attribute, Constant): new_attribute = getattr(new_model, name) if isinstance(new_attribute, Prior): new_attribute.mean = attribute.value continue setattr(new_model, name, attribute) return new_model
[ "def", "linked_model_for_class", "(", "self", ",", "cls", ",", "make_constants_variable", "=", "False", ",", "*", "*", "kwargs", ")", ":", "constructor_args", "=", "inspect", ".", "getfullargspec", "(", "cls", ")", ".", "args", "attribute_tuples", "=", "self", ".", "attribute_tuples", "new_model", "=", "PriorModel", "(", "cls", ")", "for", "attribute_tuple", "in", "attribute_tuples", ":", "name", "=", "attribute_tuple", ".", "name", "if", "name", "in", "constructor_args", "or", "(", "is_tuple_like_attribute_name", "(", "name", ")", "and", "tuple_name", "(", "name", ")", "in", "constructor_args", ")", ":", "attribute", "=", "kwargs", "[", "name", "]", "if", "name", "in", "kwargs", "else", "attribute_tuple", ".", "value", "if", "make_constants_variable", "and", "isinstance", "(", "attribute", ",", "Constant", ")", ":", "new_attribute", "=", "getattr", "(", "new_model", ",", "name", ")", "if", "isinstance", "(", "new_attribute", ",", "Prior", ")", ":", "new_attribute", ".", "mean", "=", "attribute", ".", "value", "continue", "setattr", "(", "new_model", ",", "name", ",", "attribute", ")", "return", "new_model" ]
50.538462
27.717949
def Aitken(s): """Accelerate the convergence of the a series using Aitken's delta-squared process (SCIP calls it Euler). """ def accel(): s0, s1, s2 = s >> item[:3] while 1: yield s2 - (s2 - s1)**2 / (s0 - 2*s1 + s2) s0, s1, s2 = s1, s2, next(s) return accel()
[ "def", "Aitken", "(", "s", ")", ":", "def", "accel", "(", ")", ":", "s0", ",", "s1", ",", "s2", "=", "s", ">>", "item", "[", ":", "3", "]", "while", "1", ":", "yield", "s2", "-", "(", "s2", "-", "s1", ")", "**", "2", "/", "(", "s0", "-", "2", "*", "s1", "+", "s2", ")", "s0", ",", "s1", ",", "s2", "=", "s1", ",", "s2", ",", "next", "(", "s", ")", "return", "accel", "(", ")" ]
26.6
15.4
def print_meta(ds, ds_path=None): "Prints meta data for subjects in given dataset." print('\n#' + ds_path) for sub, cls in ds.classes.items(): print('{},{}'.format(sub, cls)) return
[ "def", "print_meta", "(", "ds", ",", "ds_path", "=", "None", ")", ":", "print", "(", "'\\n#'", "+", "ds_path", ")", "for", "sub", ",", "cls", "in", "ds", ".", "classes", ".", "items", "(", ")", ":", "print", "(", "'{},{}'", ".", "format", "(", "sub", ",", "cls", ")", ")", "return" ]
25
18.25
def __set_clear_button_visibility(self, text): """ Sets the clear button visibility. :param text: Current field text. :type text: QString """ if text: self.__clear_button.show() else: self.__clear_button.hide()
[ "def", "__set_clear_button_visibility", "(", "self", ",", "text", ")", ":", "if", "text", ":", "self", ".", "__clear_button", ".", "show", "(", ")", "else", ":", "self", ".", "__clear_button", ".", "hide", "(", ")" ]
23.416667
12.916667
def construct(parent=None, defaults=None, **kwargs): """ Random variable constructor. Args: cdf: Cumulative distribution function. Optional if ``parent`` is used. bnd: Boundary interval. Optional if ``parent`` is used. parent (Dist): Distribution used as basis for new distribution. Any other argument that is omitted will instead take is function from ``parent``. doc (str]): Documentation for the distribution. str (str, :py:data:typing.Callable): Pretty print of the variable. pdf: Probability density function. ppf: Point percentile function. mom: Raw moment generator. ttr: Three terms recursion coefficient generator. init: Custom initialiser method. defaults (dict): Default values to provide to initialiser. Returns: (Dist): New custom distribution. """ for key in kwargs: assert key in LEGAL_ATTRS, "{} is not legal input".format(key) if parent is not None: for key, value in LEGAL_ATTRS.items(): if key not in kwargs and hasattr(parent, value): kwargs[key] = getattr(parent, value) assert "cdf" in kwargs, "cdf function must be defined" assert "bnd" in kwargs, "bnd function must be defined" if "str" in kwargs and isinstance(kwargs["str"], str): string = kwargs.pop("str") kwargs["str"] = lambda *args, **kwargs: string defaults = defaults if defaults else {} for key in defaults: assert key in LEGAL_ATTRS, "invalid default value {}".format(key) def custom_distribution(**kws): prm = defaults.copy() prm.update(kws) dist = Dist(**prm) for key, function in kwargs.items(): attr_name = LEGAL_ATTRS[key] setattr(dist, attr_name, types.MethodType(function, dist)) return dist if "doc" in kwargs: custom_distribution.__doc__ = kwargs["doc"] return custom_distribution
[ "def", "construct", "(", "parent", "=", "None", ",", "defaults", "=", "None", ",", "*", "*", "kwargs", ")", ":", "for", "key", "in", "kwargs", ":", "assert", "key", "in", "LEGAL_ATTRS", ",", "\"{} is not legal input\"", ".", "format", "(", "key", ")", "if", "parent", "is", "not", "None", ":", "for", "key", ",", "value", "in", "LEGAL_ATTRS", ".", "items", "(", ")", ":", "if", "key", "not", "in", "kwargs", "and", "hasattr", "(", "parent", ",", "value", ")", ":", "kwargs", "[", "key", "]", "=", "getattr", "(", "parent", ",", "value", ")", "assert", "\"cdf\"", "in", "kwargs", ",", "\"cdf function must be defined\"", "assert", "\"bnd\"", "in", "kwargs", ",", "\"bnd function must be defined\"", "if", "\"str\"", "in", "kwargs", "and", "isinstance", "(", "kwargs", "[", "\"str\"", "]", ",", "str", ")", ":", "string", "=", "kwargs", ".", "pop", "(", "\"str\"", ")", "kwargs", "[", "\"str\"", "]", "=", "lambda", "*", "args", ",", "*", "*", "kwargs", ":", "string", "defaults", "=", "defaults", "if", "defaults", "else", "{", "}", "for", "key", "in", "defaults", ":", "assert", "key", "in", "LEGAL_ATTRS", ",", "\"invalid default value {}\"", ".", "format", "(", "key", ")", "def", "custom_distribution", "(", "*", "*", "kws", ")", ":", "prm", "=", "defaults", ".", "copy", "(", ")", "prm", ".", "update", "(", "kws", ")", "dist", "=", "Dist", "(", "*", "*", "prm", ")", "for", "key", ",", "function", "in", "kwargs", ".", "items", "(", ")", ":", "attr_name", "=", "LEGAL_ATTRS", "[", "key", "]", "setattr", "(", "dist", ",", "attr_name", ",", "types", ".", "MethodType", "(", "function", ",", "dist", ")", ")", "return", "dist", "if", "\"doc\"", "in", "kwargs", ":", "custom_distribution", ".", "__doc__", "=", "kwargs", "[", "\"doc\"", "]", "return", "custom_distribution" ]
31.439394
19.893939
def fetch(self): """ Fetch a ExecutionStepInstance :returns: Fetched ExecutionStepInstance :rtype: twilio.rest.studio.v1.flow.execution.execution_step.ExecutionStepInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return ExecutionStepInstance( self._version, payload, flow_sid=self._solution['flow_sid'], execution_sid=self._solution['execution_sid'], sid=self._solution['sid'], )
[ "def", "fetch", "(", "self", ")", ":", "params", "=", "values", ".", "of", "(", "{", "}", ")", "payload", "=", "self", ".", "_version", ".", "fetch", "(", "'GET'", ",", "self", ".", "_uri", ",", "params", "=", "params", ",", ")", "return", "ExecutionStepInstance", "(", "self", ".", "_version", ",", "payload", ",", "flow_sid", "=", "self", ".", "_solution", "[", "'flow_sid'", "]", ",", "execution_sid", "=", "self", ".", "_solution", "[", "'execution_sid'", "]", ",", "sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")" ]
26.818182
18
def register_job(self, job_details): """Register a job in this `JobArchive` """ # check to see if the job already exists try: job_details_old = self.get_details(job_details.jobname, job_details.jobkey) if job_details_old.status <= JobStatus.running: job_details_old.status = job_details.status job_details_old.update_table_row( self._table, job_details_old.dbkey - 1) job_details = job_details_old except KeyError: job_details.dbkey = len(self._table) + 1 job_details.get_file_ids( self._file_archive, creator=job_details.dbkey) job_details.append_to_tables(self._table, self._table_ids) self._table_id_array = self._table_ids['file_id'].data self._cache[job_details.fullkey] = job_details return job_details
[ "def", "register_job", "(", "self", ",", "job_details", ")", ":", "# check to see if the job already exists", "try", ":", "job_details_old", "=", "self", ".", "get_details", "(", "job_details", ".", "jobname", ",", "job_details", ".", "jobkey", ")", "if", "job_details_old", ".", "status", "<=", "JobStatus", ".", "running", ":", "job_details_old", ".", "status", "=", "job_details", ".", "status", "job_details_old", ".", "update_table_row", "(", "self", ".", "_table", ",", "job_details_old", ".", "dbkey", "-", "1", ")", "job_details", "=", "job_details_old", "except", "KeyError", ":", "job_details", ".", "dbkey", "=", "len", "(", "self", ".", "_table", ")", "+", "1", "job_details", ".", "get_file_ids", "(", "self", ".", "_file_archive", ",", "creator", "=", "job_details", ".", "dbkey", ")", "job_details", ".", "append_to_tables", "(", "self", ".", "_table", ",", "self", ".", "_table_ids", ")", "self", ".", "_table_id_array", "=", "self", ".", "_table_ids", "[", "'file_id'", "]", ".", "data", "self", ".", "_cache", "[", "job_details", ".", "fullkey", "]", "=", "job_details", "return", "job_details" ]
49.105263
15.421053
def create_docker_credentials_file( username, password, file_name='docker.tar.gz'): """ Create a docker credentials file. Docker username and password are used to create a `{file_name}` with `.docker/config.json` containing the credentials. :param username: docker username :type username: str :param password: docker password :type password: str :param file_name: credentials file name `docker.tar.gz` by default :type file_name: str """ import base64 auth_hash = base64.b64encode( '{}:{}'.format(username, password).encode()).decode() config_json = { "auths": { "https://index.docker.io/v1/": {"auth": auth_hash} } } config_json_filename = 'config.json' # Write config.json to file with open(config_json_filename, 'w') as f: json.dump(config_json, f, indent=4) try: # Create a docker.tar.gz import tarfile with tarfile.open(file_name, 'w:gz') as tar: tar.add(config_json_filename, arcname='.docker/config.json') tar.close() except Exception as e: print('Failed to create a docker credentils file {}'.format(e)) raise e finally: os.remove(config_json_filename)
[ "def", "create_docker_credentials_file", "(", "username", ",", "password", ",", "file_name", "=", "'docker.tar.gz'", ")", ":", "import", "base64", "auth_hash", "=", "base64", ".", "b64encode", "(", "'{}:{}'", ".", "format", "(", "username", ",", "password", ")", ".", "encode", "(", ")", ")", ".", "decode", "(", ")", "config_json", "=", "{", "\"auths\"", ":", "{", "\"https://index.docker.io/v1/\"", ":", "{", "\"auth\"", ":", "auth_hash", "}", "}", "}", "config_json_filename", "=", "'config.json'", "# Write config.json to file", "with", "open", "(", "config_json_filename", ",", "'w'", ")", "as", "f", ":", "json", ".", "dump", "(", "config_json", ",", "f", ",", "indent", "=", "4", ")", "try", ":", "# Create a docker.tar.gz", "import", "tarfile", "with", "tarfile", ".", "open", "(", "file_name", ",", "'w:gz'", ")", "as", "tar", ":", "tar", ".", "add", "(", "config_json_filename", ",", "arcname", "=", "'.docker/config.json'", ")", "tar", ".", "close", "(", ")", "except", "Exception", "as", "e", ":", "print", "(", "'Failed to create a docker credentils file {}'", ".", "format", "(", "e", ")", ")", "raise", "e", "finally", ":", "os", ".", "remove", "(", "config_json_filename", ")" ]
30.404762
19.02381
def _readable(self): """The readable parsed article""" if not self.candidates: logger.info("No candidates found in document.") return self._handle_no_candidates() # right now we return the highest scoring candidate content best_candidates = sorted( (c for c in self.candidates.values()), key=attrgetter("content_score"), reverse=True) printer = PrettyPrinter(indent=2) logger.debug(printer.pformat(best_candidates)) # since we have several candidates, check the winner's siblings # for extra content winner = best_candidates[0] updated_winner = check_siblings(winner, self.candidates) updated_winner.node = prep_article(updated_winner.node) if updated_winner.node is not None: dom = build_base_document( updated_winner.node, self._return_fragment) else: logger.info( 'Had candidates but failed to find a cleaned winning DOM.') dom = self._handle_no_candidates() return self._remove_orphans(dom.get_element_by_id("readabilityBody"))
[ "def", "_readable", "(", "self", ")", ":", "if", "not", "self", ".", "candidates", ":", "logger", ".", "info", "(", "\"No candidates found in document.\"", ")", "return", "self", ".", "_handle_no_candidates", "(", ")", "# right now we return the highest scoring candidate content", "best_candidates", "=", "sorted", "(", "(", "c", "for", "c", "in", "self", ".", "candidates", ".", "values", "(", ")", ")", ",", "key", "=", "attrgetter", "(", "\"content_score\"", ")", ",", "reverse", "=", "True", ")", "printer", "=", "PrettyPrinter", "(", "indent", "=", "2", ")", "logger", ".", "debug", "(", "printer", ".", "pformat", "(", "best_candidates", ")", ")", "# since we have several candidates, check the winner's siblings", "# for extra content", "winner", "=", "best_candidates", "[", "0", "]", "updated_winner", "=", "check_siblings", "(", "winner", ",", "self", ".", "candidates", ")", "updated_winner", ".", "node", "=", "prep_article", "(", "updated_winner", ".", "node", ")", "if", "updated_winner", ".", "node", "is", "not", "None", ":", "dom", "=", "build_base_document", "(", "updated_winner", ".", "node", ",", "self", ".", "_return_fragment", ")", "else", ":", "logger", ".", "info", "(", "'Had candidates but failed to find a cleaned winning DOM.'", ")", "dom", "=", "self", ".", "_handle_no_candidates", "(", ")", "return", "self", ".", "_remove_orphans", "(", "dom", ".", "get_element_by_id", "(", "\"readabilityBody\"", ")", ")" ]
40.571429
19.035714
def validate_get_arguments(kwargs): # type: (Dict[Text, Any]) -> None """Verify that attribute filtering parameters are not found in the request. :raises InvalidArgumentError: if banned parameters are found """ for arg in ("AttributesToGet", "ProjectionExpression"): if arg in kwargs: raise InvalidArgumentError('"{}" is not supported for this operation'.format(arg)) if kwargs.get("Select", None) in ("SPECIFIC_ATTRIBUTES", "ALL_PROJECTED_ATTRIBUTES"): raise InvalidArgumentError('Scan "Select" value of "{}" is not supported'.format(kwargs["Select"]))
[ "def", "validate_get_arguments", "(", "kwargs", ")", ":", "# type: (Dict[Text, Any]) -> None", "for", "arg", "in", "(", "\"AttributesToGet\"", ",", "\"ProjectionExpression\"", ")", ":", "if", "arg", "in", "kwargs", ":", "raise", "InvalidArgumentError", "(", "'\"{}\" is not supported for this operation'", ".", "format", "(", "arg", ")", ")", "if", "kwargs", ".", "get", "(", "\"Select\"", ",", "None", ")", "in", "(", "\"SPECIFIC_ATTRIBUTES\"", ",", "\"ALL_PROJECTED_ATTRIBUTES\"", ")", ":", "raise", "InvalidArgumentError", "(", "'Scan \"Select\" value of \"{}\" is not supported'", ".", "format", "(", "kwargs", "[", "\"Select\"", "]", ")", ")" ]
49.666667
26.333333
def _set_repository_view(self, session): """Sets the underlying repository view to match current view""" if self._repository_view == FEDERATED: try: session.use_federated_repository_view() except AttributeError: pass else: try: session.use_isolated_repository_view() except AttributeError: pass
[ "def", "_set_repository_view", "(", "self", ",", "session", ")", ":", "if", "self", ".", "_repository_view", "==", "FEDERATED", ":", "try", ":", "session", ".", "use_federated_repository_view", "(", ")", "except", "AttributeError", ":", "pass", "else", ":", "try", ":", "session", ".", "use_isolated_repository_view", "(", ")", "except", "AttributeError", ":", "pass" ]
34.916667
13.5
def _get_orb_type_lobster(orb): """ Args: orb: string representation of orbital Returns: OrbitalType """ orb_labs = ["s", "p_y", "p_z", "p_x", "d_xy", "d_yz", "d_z^2", "d_xz", "d_x^2-y^2", "f_y(3x^2-y^2)", "f_xyz", "f_yz^2", "f_z^3", "f_xz^2", "f_z(x^2-y^2)", "f_x(x^2-3y^2)"] try: orbital = Orbital(orb_labs.index(orb[1:])) return orbital.orbital_type except AttributeError: print("Orb not in list")
[ "def", "_get_orb_type_lobster", "(", "orb", ")", ":", "orb_labs", "=", "[", "\"s\"", ",", "\"p_y\"", ",", "\"p_z\"", ",", "\"p_x\"", ",", "\"d_xy\"", ",", "\"d_yz\"", ",", "\"d_z^2\"", ",", "\"d_xz\"", ",", "\"d_x^2-y^2\"", ",", "\"f_y(3x^2-y^2)\"", ",", "\"f_xyz\"", ",", "\"f_yz^2\"", ",", "\"f_z^3\"", ",", "\"f_xz^2\"", ",", "\"f_z(x^2-y^2)\"", ",", "\"f_x(x^2-3y^2)\"", "]", "try", ":", "orbital", "=", "Orbital", "(", "orb_labs", ".", "index", "(", "orb", "[", "1", ":", "]", ")", ")", "return", "orbital", ".", "orbital_type", "except", "AttributeError", ":", "print", "(", "\"Orb not in list\"", ")" ]
30
18
def __get_stack_trace(self, depth = 16, bUseLabels = True, bMakePretty = True): """ Tries to get a stack trace for the current function using the debug helper API (dbghelp.dll). @type depth: int @param depth: Maximum depth of stack trace. @type bUseLabels: bool @param bUseLabels: C{True} to use labels, C{False} to use addresses. @type bMakePretty: bool @param bMakePretty: C{True} for user readable labels, C{False} for labels that can be passed to L{Process.resolve_label}. "Pretty" labels look better when producing output for the user to read, while pure labels are more useful programatically. @rtype: tuple of tuple( int, int, str ) @return: Stack trace of the thread as a tuple of ( return address, frame pointer address, module filename ) when C{bUseLabels} is C{True}, or a tuple of ( return address, frame pointer label ) when C{bUseLabels} is C{False}. @raise WindowsError: Raises an exception on error. """ aProcess = self.get_process() arch = aProcess.get_arch() bits = aProcess.get_bits() if arch == win32.ARCH_I386: MachineType = win32.IMAGE_FILE_MACHINE_I386 elif arch == win32.ARCH_AMD64: MachineType = win32.IMAGE_FILE_MACHINE_AMD64 elif arch == win32.ARCH_IA64: MachineType = win32.IMAGE_FILE_MACHINE_IA64 else: msg = "Stack walking is not available for this architecture: %s" raise NotImplementedError(msg % arch) hProcess = aProcess.get_handle( win32.PROCESS_VM_READ | win32.PROCESS_QUERY_INFORMATION ) hThread = self.get_handle( win32.THREAD_GET_CONTEXT | win32.THREAD_QUERY_INFORMATION ) StackFrame = win32.STACKFRAME64() StackFrame.AddrPC = win32.ADDRESS64( self.get_pc() ) StackFrame.AddrFrame = win32.ADDRESS64( self.get_fp() ) StackFrame.AddrStack = win32.ADDRESS64( self.get_sp() ) trace = list() while win32.StackWalk64(MachineType, hProcess, hThread, StackFrame): if depth <= 0: break fp = StackFrame.AddrFrame.Offset ra = aProcess.peek_pointer(fp + 4) if ra == 0: break lib = aProcess.get_module_at_address(ra) if lib is None: lib = "" else: if lib.fileName: lib = lib.fileName else: lib = "%s" % HexDump.address(lib.lpBaseOfDll, bits) if bUseLabels: label = aProcess.get_label_at_address(ra) if bMakePretty: label = '%s (%s)' % (HexDump.address(ra, bits), label) trace.append( (fp, label) ) else: trace.append( (fp, ra, lib) ) fp = aProcess.peek_pointer(fp) return tuple(trace)
[ "def", "__get_stack_trace", "(", "self", ",", "depth", "=", "16", ",", "bUseLabels", "=", "True", ",", "bMakePretty", "=", "True", ")", ":", "aProcess", "=", "self", ".", "get_process", "(", ")", "arch", "=", "aProcess", ".", "get_arch", "(", ")", "bits", "=", "aProcess", ".", "get_bits", "(", ")", "if", "arch", "==", "win32", ".", "ARCH_I386", ":", "MachineType", "=", "win32", ".", "IMAGE_FILE_MACHINE_I386", "elif", "arch", "==", "win32", ".", "ARCH_AMD64", ":", "MachineType", "=", "win32", ".", "IMAGE_FILE_MACHINE_AMD64", "elif", "arch", "==", "win32", ".", "ARCH_IA64", ":", "MachineType", "=", "win32", ".", "IMAGE_FILE_MACHINE_IA64", "else", ":", "msg", "=", "\"Stack walking is not available for this architecture: %s\"", "raise", "NotImplementedError", "(", "msg", "%", "arch", ")", "hProcess", "=", "aProcess", ".", "get_handle", "(", "win32", ".", "PROCESS_VM_READ", "|", "win32", ".", "PROCESS_QUERY_INFORMATION", ")", "hThread", "=", "self", ".", "get_handle", "(", "win32", ".", "THREAD_GET_CONTEXT", "|", "win32", ".", "THREAD_QUERY_INFORMATION", ")", "StackFrame", "=", "win32", ".", "STACKFRAME64", "(", ")", "StackFrame", ".", "AddrPC", "=", "win32", ".", "ADDRESS64", "(", "self", ".", "get_pc", "(", ")", ")", "StackFrame", ".", "AddrFrame", "=", "win32", ".", "ADDRESS64", "(", "self", ".", "get_fp", "(", ")", ")", "StackFrame", ".", "AddrStack", "=", "win32", ".", "ADDRESS64", "(", "self", ".", "get_sp", "(", ")", ")", "trace", "=", "list", "(", ")", "while", "win32", ".", "StackWalk64", "(", "MachineType", ",", "hProcess", ",", "hThread", ",", "StackFrame", ")", ":", "if", "depth", "<=", "0", ":", "break", "fp", "=", "StackFrame", ".", "AddrFrame", ".", "Offset", "ra", "=", "aProcess", ".", "peek_pointer", "(", "fp", "+", "4", ")", "if", "ra", "==", "0", ":", "break", "lib", "=", "aProcess", ".", "get_module_at_address", "(", "ra", ")", "if", "lib", "is", "None", ":", "lib", "=", "\"\"", "else", ":", "if", "lib", ".", "fileName", ":", "lib", "=", "lib", ".", "fileName", "else", ":", "lib", "=", "\"%s\"", "%", "HexDump", ".", "address", "(", "lib", ".", "lpBaseOfDll", ",", "bits", ")", "if", "bUseLabels", ":", "label", "=", "aProcess", ".", "get_label_at_address", "(", "ra", ")", "if", "bMakePretty", ":", "label", "=", "'%s (%s)'", "%", "(", "HexDump", ".", "address", "(", "ra", ",", "bits", ")", ",", "label", ")", "trace", ".", "append", "(", "(", "fp", ",", "label", ")", ")", "else", ":", "trace", ".", "append", "(", "(", "fp", ",", "ra", ",", "lib", ")", ")", "fp", "=", "aProcess", ".", "peek_pointer", "(", "fp", ")", "return", "tuple", "(", "trace", ")" ]
39.21519
19.468354
def to_text(self): """Render a MessageElement queue as plain text. :returns: Plain text representation of the message. :rtype: str """ message = '' last_was_text = False for m in self.message: if last_was_text and not isinstance(m, Text): message += '\n' message += m.to_text() if isinstance(m, Text): last_was_text = True else: message += '\n' last_was_text = False return message
[ "def", "to_text", "(", "self", ")", ":", "message", "=", "''", "last_was_text", "=", "False", "for", "m", "in", "self", ".", "message", ":", "if", "last_was_text", "and", "not", "isinstance", "(", "m", ",", "Text", ")", ":", "message", "+=", "'\\n'", "message", "+=", "m", ".", "to_text", "(", ")", "if", "isinstance", "(", "m", ",", "Text", ")", ":", "last_was_text", "=", "True", "else", ":", "message", "+=", "'\\n'", "last_was_text", "=", "False", "return", "message" ]
27.05
15.85
def weave(*iterables): r"""weave(seq1 [, seq2] [...]) -> iter([seq1[0], seq2[0] ...]). >>> list(weave([1,2,3], [4,5,6,'A'], [6,7,8, 'B', 'C'])) [1, 4, 6, 2, 5, 7, 3, 6, 8] Any iterable will work. The first exhausted iterable determines when to stop. FIXME rethink stopping semantics. >>> list(weave(iter(('is','psu')), ('there','no', 'censorship'))) ['is', 'there', 'psu', 'no'] >>> list(weave(('there','no', 'censorship'), iter(('is','psu')))) ['there', 'is', 'no', 'psu', 'censorship'] """ iterables = map(iter, iterables) while True: for it in iterables: yield it.next()
[ "def", "weave", "(", "*", "iterables", ")", ":", "iterables", "=", "map", "(", "iter", ",", "iterables", ")", "while", "True", ":", "for", "it", "in", "iterables", ":", "yield", "it", ".", "next", "(", ")" ]
36.294118
18.235294
def recode(self, table: pd.DataFrame, validate=False) -> pd.DataFrame: """Return a fully recoded dataframe. Args: table (pd.DataFrame): A dataframe on which to apply recoding logic. validate (bool): If ``True``, recoded table must pass validation tests. """ df = pd.DataFrame(index=table.index) for column in self.columns: df = column.update_dataframe(df, table=table, validate=validate) return df
[ "def", "recode", "(", "self", ",", "table", ":", "pd", ".", "DataFrame", ",", "validate", "=", "False", ")", "->", "pd", ".", "DataFrame", ":", "df", "=", "pd", ".", "DataFrame", "(", "index", "=", "table", ".", "index", ")", "for", "column", "in", "self", ".", "columns", ":", "df", "=", "column", ".", "update_dataframe", "(", "df", ",", "table", "=", "table", ",", "validate", "=", "validate", ")", "return", "df" ]
36.307692
25.153846
def cget(self, key): """ Query widget option. :param key: option name :type key: str :return: value of the option To get the list of options for this widget, call the method :meth:`~ScaleEntry.keys`. """ if key == 'scalewidth': return self._scale.cget('length') elif key == 'from': return self._scale.cget('from') elif key == 'to': return self._scale.cget('to') elif key == 'entrywidth': return self._entry.cget('width') elif key == 'entryscalepad': return self.__entryscalepad elif key == 'compound': return self.__compound elif key == 'orient': return str(self._scale.cget('orient')) else: return ttk.Frame.cget(self, key)
[ "def", "cget", "(", "self", ",", "key", ")", ":", "if", "key", "==", "'scalewidth'", ":", "return", "self", ".", "_scale", ".", "cget", "(", "'length'", ")", "elif", "key", "==", "'from'", ":", "return", "self", ".", "_scale", ".", "cget", "(", "'from'", ")", "elif", "key", "==", "'to'", ":", "return", "self", ".", "_scale", ".", "cget", "(", "'to'", ")", "elif", "key", "==", "'entrywidth'", ":", "return", "self", ".", "_entry", ".", "cget", "(", "'width'", ")", "elif", "key", "==", "'entryscalepad'", ":", "return", "self", ".", "__entryscalepad", "elif", "key", "==", "'compound'", ":", "return", "self", ".", "__compound", "elif", "key", "==", "'orient'", ":", "return", "str", "(", "self", ".", "_scale", ".", "cget", "(", "'orient'", ")", ")", "else", ":", "return", "ttk", ".", "Frame", ".", "cget", "(", "self", ",", "key", ")" ]
31.423077
12.5
def unindent(self): """ Un-indents text at cursor position. """ _logger().debug('unindent') cursor = self.editor.textCursor() _logger().debug('cursor has selection %r', cursor.hasSelection()) if cursor.hasSelection(): cursor.beginEditBlock() self.unindent_selection(cursor) cursor.endEditBlock() self.editor.setTextCursor(cursor) else: tab_len = self.editor.tab_length indentation = cursor.positionInBlock() indentation -= self.min_column if indentation == 0: return max_spaces = indentation % tab_len if max_spaces == 0: max_spaces = tab_len spaces = self.count_deletable_spaces(cursor, max_spaces) _logger().info('deleting %d space before cursor' % spaces) cursor.beginEditBlock() for _ in range(spaces): cursor.deletePreviousChar() cursor.endEditBlock() self.editor.setTextCursor(cursor) _logger().debug(cursor.block().text())
[ "def", "unindent", "(", "self", ")", ":", "_logger", "(", ")", ".", "debug", "(", "'unindent'", ")", "cursor", "=", "self", ".", "editor", ".", "textCursor", "(", ")", "_logger", "(", ")", ".", "debug", "(", "'cursor has selection %r'", ",", "cursor", ".", "hasSelection", "(", ")", ")", "if", "cursor", ".", "hasSelection", "(", ")", ":", "cursor", ".", "beginEditBlock", "(", ")", "self", ".", "unindent_selection", "(", "cursor", ")", "cursor", ".", "endEditBlock", "(", ")", "self", ".", "editor", ".", "setTextCursor", "(", "cursor", ")", "else", ":", "tab_len", "=", "self", ".", "editor", ".", "tab_length", "indentation", "=", "cursor", ".", "positionInBlock", "(", ")", "indentation", "-=", "self", ".", "min_column", "if", "indentation", "==", "0", ":", "return", "max_spaces", "=", "indentation", "%", "tab_len", "if", "max_spaces", "==", "0", ":", "max_spaces", "=", "tab_len", "spaces", "=", "self", ".", "count_deletable_spaces", "(", "cursor", ",", "max_spaces", ")", "_logger", "(", ")", ".", "info", "(", "'deleting %d space before cursor'", "%", "spaces", ")", "cursor", ".", "beginEditBlock", "(", ")", "for", "_", "in", "range", "(", "spaces", ")", ":", "cursor", ".", "deletePreviousChar", "(", ")", "cursor", ".", "endEditBlock", "(", ")", "self", ".", "editor", ".", "setTextCursor", "(", "cursor", ")", "_logger", "(", ")", ".", "debug", "(", "cursor", ".", "block", "(", ")", ".", "text", "(", ")", ")" ]
37.233333
10.366667