text
stringlengths
89
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
630
def delete_episode(db, aid, episode): """Delete an episode.""" db.cursor().execute( 'DELETE FROM episode WHERE aid=:aid AND type=:type AND number=:number', { 'aid': aid, 'type': episode.type, 'number': episode.number, })
[ "def", "delete_episode", "(", "db", ",", "aid", ",", "episode", ")", ":", "db", ".", "cursor", "(", ")", ".", "execute", "(", "'DELETE FROM episode WHERE aid=:aid AND type=:type AND number=:number'", ",", "{", "'aid'", ":", "aid", ",", "'type'", ":", "episode", ".", "type", ",", "'number'", ":", "episode", ".", "number", ",", "}", ")" ]
31.111111
16.222222
def describe_object(self, obj): """ Get the description of an object from Salesforce. This description is the object's schema and some extra metadata that Salesforce stores for each object. :param obj: The name of the Salesforce object that we are getting a description of. :type obj: str :return: the description of the Salesforce object. :rtype: dict """ conn = self.get_conn() return conn.__getattr__(obj).describe()
[ "def", "describe_object", "(", "self", ",", "obj", ")", ":", "conn", "=", "self", ".", "get_conn", "(", ")", "return", "conn", ".", "__getattr__", "(", "obj", ")", ".", "describe", "(", ")" ]
35.428571
19.142857
def echo_to_output_stream(self, email_messages): """ Write all messages to the stream in a thread-safe way. """ if not email_messages: return with self._lock: try: stream_created = self.open() for message in email_messages: self.write_to_stream(message) self.stream.flush() # flush after each message if stream_created: self.close() except Exception: if not self.fail_silently: raise
[ "def", "echo_to_output_stream", "(", "self", ",", "email_messages", ")", ":", "if", "not", "email_messages", ":", "return", "with", "self", ".", "_lock", ":", "try", ":", "stream_created", "=", "self", ".", "open", "(", ")", "for", "message", "in", "email_messages", ":", "self", ".", "write_to_stream", "(", "message", ")", "self", ".", "stream", ".", "flush", "(", ")", "# flush after each message", "if", "stream_created", ":", "self", ".", "close", "(", ")", "except", "Exception", ":", "if", "not", "self", ".", "fail_silently", ":", "raise" ]
38.266667
11.2
def _read_section(self): """Read and return an entire section""" lines = [self._last[self._last.find(":")+1:]] self._last = self._f.readline() while len(self._last) > 0 and len(self._last[0].strip()) == 0: lines.append(self._last) self._last = self._f.readline() return lines
[ "def", "_read_section", "(", "self", ")", ":", "lines", "=", "[", "self", ".", "_last", "[", "self", ".", "_last", ".", "find", "(", "\":\"", ")", "+", "1", ":", "]", "]", "self", ".", "_last", "=", "self", ".", "_f", ".", "readline", "(", ")", "while", "len", "(", "self", ".", "_last", ")", ">", "0", "and", "len", "(", "self", ".", "_last", "[", "0", "]", ".", "strip", "(", ")", ")", "==", "0", ":", "lines", ".", "append", "(", "self", ".", "_last", ")", "self", ".", "_last", "=", "self", ".", "_f", ".", "readline", "(", ")", "return", "lines" ]
41.5
10.875
def _shorten_version(ver, num_components=2): """ If ``ver`` is a dot-separated string with at least (num_components +1) components, return only the first two. Else return the original string. :param ver: version string :type ver: str :return: shortened (major, minor) version :rtype: str """ parts = ver.split('.') if len(parts) <= num_components: return ver return '.'.join(parts[:num_components])
[ "def", "_shorten_version", "(", "ver", ",", "num_components", "=", "2", ")", ":", "parts", "=", "ver", ".", "split", "(", "'.'", ")", "if", "len", "(", "parts", ")", "<=", "num_components", ":", "return", "ver", "return", "'.'", ".", "join", "(", "parts", "[", ":", "num_components", "]", ")" ]
34.714286
15
def send_voice(self, *args, **kwargs): """See :func:`send_voice`""" return send_voice(*args, **self._merge_overrides(**kwargs)).run()
[ "def", "send_voice", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "send_voice", "(", "*", "args", ",", "*", "*", "self", ".", "_merge_overrides", "(", "*", "*", "kwargs", ")", ")", ".", "run", "(", ")" ]
49
11.666667
def _check_bullets(lines, **kwargs): """Check that the bullet point list is well formatted. Each bullet point shall have one space before and after it. The bullet character is the "*" and there is no space before it but one after it meaning the next line are starting with two blanks spaces to respect the indentation. :param lines: all the lines of the message :type lines: list :param max_lengths: maximum length of any line. (Default 72) :return: errors as in (code, line number, *args) :rtype: list """ max_length = kwargs.get("max_length", 72) labels = {l for l, _ in kwargs.get("commit_msg_labels", tuple())} def _strip_ticket_directives(line): return re.sub(r'( \([^)]*\)){1,}$', '', line) errors = [] missed_lines = [] skipped = [] for (i, line) in enumerate(lines[1:]): if line.startswith('*'): dot_found = False if len(missed_lines) > 0: errors.append(("M130", i + 2)) if lines[i].strip() != '': errors.append(("M120", i + 2)) if _strip_ticket_directives(line).endswith('.'): dot_found = True label = _re_bullet_label.search(line) if label and label.group('label') not in labels: errors.append(("M122", i + 2, label.group('label'))) for (j, indented) in enumerate(lines[i + 2:]): if indented.strip() == '': break if not re.search(r"^ {2}\S", indented): errors.append(("M121", i + j + 3)) else: skipped.append(i + j + 1) stripped_line = _strip_ticket_directives(indented) if stripped_line.endswith('.'): dot_found = True elif stripped_line.strip(): dot_found = False if not dot_found: errors.append(("M123", i + 2)) elif i not in skipped and line.strip(): missed_lines.append((i + 2, line)) if len(line) > max_length: errors.append(("M190", i + 2, max_length, len(line))) return errors, missed_lines
[ "def", "_check_bullets", "(", "lines", ",", "*", "*", "kwargs", ")", ":", "max_length", "=", "kwargs", ".", "get", "(", "\"max_length\"", ",", "72", ")", "labels", "=", "{", "l", "for", "l", ",", "_", "in", "kwargs", ".", "get", "(", "\"commit_msg_labels\"", ",", "tuple", "(", ")", ")", "}", "def", "_strip_ticket_directives", "(", "line", ")", ":", "return", "re", ".", "sub", "(", "r'( \\([^)]*\\)){1,}$'", ",", "''", ",", "line", ")", "errors", "=", "[", "]", "missed_lines", "=", "[", "]", "skipped", "=", "[", "]", "for", "(", "i", ",", "line", ")", "in", "enumerate", "(", "lines", "[", "1", ":", "]", ")", ":", "if", "line", ".", "startswith", "(", "'*'", ")", ":", "dot_found", "=", "False", "if", "len", "(", "missed_lines", ")", ">", "0", ":", "errors", ".", "append", "(", "(", "\"M130\"", ",", "i", "+", "2", ")", ")", "if", "lines", "[", "i", "]", ".", "strip", "(", ")", "!=", "''", ":", "errors", ".", "append", "(", "(", "\"M120\"", ",", "i", "+", "2", ")", ")", "if", "_strip_ticket_directives", "(", "line", ")", ".", "endswith", "(", "'.'", ")", ":", "dot_found", "=", "True", "label", "=", "_re_bullet_label", ".", "search", "(", "line", ")", "if", "label", "and", "label", ".", "group", "(", "'label'", ")", "not", "in", "labels", ":", "errors", ".", "append", "(", "(", "\"M122\"", ",", "i", "+", "2", ",", "label", ".", "group", "(", "'label'", ")", ")", ")", "for", "(", "j", ",", "indented", ")", "in", "enumerate", "(", "lines", "[", "i", "+", "2", ":", "]", ")", ":", "if", "indented", ".", "strip", "(", ")", "==", "''", ":", "break", "if", "not", "re", ".", "search", "(", "r\"^ {2}\\S\"", ",", "indented", ")", ":", "errors", ".", "append", "(", "(", "\"M121\"", ",", "i", "+", "j", "+", "3", ")", ")", "else", ":", "skipped", ".", "append", "(", "i", "+", "j", "+", "1", ")", "stripped_line", "=", "_strip_ticket_directives", "(", "indented", ")", "if", "stripped_line", ".", "endswith", "(", "'.'", ")", ":", "dot_found", "=", "True", "elif", "stripped_line", ".", "strip", "(", ")", ":", "dot_found", "=", "False", "if", "not", "dot_found", ":", "errors", ".", "append", "(", "(", "\"M123\"", ",", "i", "+", "2", ")", ")", "elif", "i", "not", "in", "skipped", "and", "line", ".", "strip", "(", ")", ":", "missed_lines", ".", "append", "(", "(", "i", "+", "2", ",", "line", ")", ")", "if", "len", "(", "line", ")", ">", "max_length", ":", "errors", ".", "append", "(", "(", "\"M190\"", ",", "i", "+", "2", ",", "max_length", ",", "len", "(", "line", ")", ")", ")", "return", "errors", ",", "missed_lines" ]
35.209677
18.419355
def t_quotedvar_DOLLAR_OPEN_CURLY_BRACES(t): r'\$\{' if re.match(r'[A-Za-z_]', peek(t.lexer)): t.lexer.begin('varname') else: t.lexer.begin('php') return t
[ "def", "t_quotedvar_DOLLAR_OPEN_CURLY_BRACES", "(", "t", ")", ":", "if", "re", ".", "match", "(", "r'[A-Za-z_]'", ",", "peek", "(", "t", ".", "lexer", ")", ")", ":", "t", ".", "lexer", ".", "begin", "(", "'varname'", ")", "else", ":", "t", ".", "lexer", ".", "begin", "(", "'php'", ")", "return", "t" ]
25.857143
16.714286
def is_valid(self): """ Is every geometry connected to the root node. Returns ----------- is_valid : bool Does every geometry have a transform """ if len(self.geometry) == 0: return True try: referenced = {self.graph[i][1] for i in self.graph.nodes_geometry} except BaseException: # if connectivity to world frame is broken return false return False # every geometry is referenced ok = referenced == set(self.geometry.keys()) return ok
[ "def", "is_valid", "(", "self", ")", ":", "if", "len", "(", "self", ".", "geometry", ")", "==", "0", ":", "return", "True", "try", ":", "referenced", "=", "{", "self", ".", "graph", "[", "i", "]", "[", "1", "]", "for", "i", "in", "self", ".", "graph", ".", "nodes_geometry", "}", "except", "BaseException", ":", "# if connectivity to world frame is broken return false", "return", "False", "# every geometry is referenced", "ok", "=", "referenced", "==", "set", "(", "self", ".", "geometry", ".", "keys", "(", ")", ")", "return", "ok" ]
25.956522
18.565217
def set_redraw_lag(self, lag_sec): """Set lag time for redrawing the canvas. Parameters ---------- lag_sec : float Number of seconds to wait. """ self.defer_redraw = (lag_sec > 0.0) if self.defer_redraw: self.defer_lagtime = lag_sec
[ "def", "set_redraw_lag", "(", "self", ",", "lag_sec", ")", ":", "self", ".", "defer_redraw", "=", "(", "lag_sec", ">", "0.0", ")", "if", "self", ".", "defer_redraw", ":", "self", ".", "defer_lagtime", "=", "lag_sec" ]
25.25
13.583333
def init(filename, order=3, tokenizer=None): """Initialize a brain. This brain's file must not already exist. Keyword arguments: order -- Order of the forward/reverse Markov chains (integer) tokenizer -- One of Cobe, MegaHAL (default Cobe). See documentation for cobe.tokenizers for details. (string)""" log.info("Initializing a cobe brain: %s" % filename) if tokenizer is None: tokenizer = "Cobe" if tokenizer not in ("Cobe", "MegaHAL"): log.info("Unknown tokenizer: %s. Using CobeTokenizer", tokenizer) tokenizer = "Cobe" graph = Graph(sqlite3.connect(filename)) with trace_us("Brain.init_time_us"): graph.init(order, tokenizer)
[ "def", "init", "(", "filename", ",", "order", "=", "3", ",", "tokenizer", "=", "None", ")", ":", "log", ".", "info", "(", "\"Initializing a cobe brain: %s\"", "%", "filename", ")", "if", "tokenizer", "is", "None", ":", "tokenizer", "=", "\"Cobe\"", "if", "tokenizer", "not", "in", "(", "\"Cobe\"", ",", "\"MegaHAL\"", ")", ":", "log", ".", "info", "(", "\"Unknown tokenizer: %s. Using CobeTokenizer\"", ",", "tokenizer", ")", "tokenizer", "=", "\"Cobe\"", "graph", "=", "Graph", "(", "sqlite3", ".", "connect", "(", "filename", ")", ")", "with", "trace_us", "(", "\"Brain.init_time_us\"", ")", ":", "graph", ".", "init", "(", "order", ",", "tokenizer", ")" ]
36.25
19.1
def genl_ctrl_resolve(sk, name): """Resolve Generic Netlink family name to numeric identifier. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/ctrl.c#L429 Resolves the Generic Netlink family name to the corresponding numeric family identifier. This function queries the kernel directly, use genl_ctrl_search_by_name() if you need to resolve multiple names. Positional arguments: sk -- Generic Netlink socket (nl_sock class instance). name -- name of Generic Netlink family (bytes). Returns: The numeric family identifier or a negative error code. """ family = genl_ctrl_probe_by_name(sk, name) if family is None: return -NLE_OBJ_NOTFOUND return int(genl_family_get_id(family))
[ "def", "genl_ctrl_resolve", "(", "sk", ",", "name", ")", ":", "family", "=", "genl_ctrl_probe_by_name", "(", "sk", ",", "name", ")", "if", "family", "is", "None", ":", "return", "-", "NLE_OBJ_NOTFOUND", "return", "int", "(", "genl_family_get_id", "(", "family", ")", ")" ]
36.65
24.75
def doppler_width(transition, Temperature): r"""Return the Doppler width of a transition at a given temperature (in angular frequency). The usual Doppler FWHM of the rubidium D2 line (in MHz). >>> g = State("Rb", 87, 5, 0, 1/Integer(2), 2) >>> e = State("Rb", 87, 5, 1, 3/Integer(2)) >>> t = Transition(e, g) >>> omega = doppler_width(t, 273.15 + 22) >>> "{:2.3f}".format(omega/2/np.pi*1e-6) '522.477' """ atom = Atom(transition.e1.element, transition.e1.isotope) m = atom.mass omega = transition.omega return omega*np.log(8*np.sqrt(2))*np.sqrt(k_B*Temperature/m/c**2)
[ "def", "doppler_width", "(", "transition", ",", "Temperature", ")", ":", "atom", "=", "Atom", "(", "transition", ".", "e1", ".", "element", ",", "transition", ".", "e1", ".", "isotope", ")", "m", "=", "atom", ".", "mass", "omega", "=", "transition", ".", "omega", "return", "omega", "*", "np", ".", "log", "(", "8", "*", "np", ".", "sqrt", "(", "2", ")", ")", "*", "np", ".", "sqrt", "(", "k_B", "*", "Temperature", "/", "m", "/", "c", "**", "2", ")" ]
33.888889
17
def set_trace(frame=None, skip=0, server=None, port=None): """Set trace on current line, or on given frame""" frame = frame or sys._getframe().f_back for i in range(skip): if not frame.f_back: break frame = frame.f_back wdb = Wdb.get(server=server, port=port) wdb.set_trace(frame) return wdb
[ "def", "set_trace", "(", "frame", "=", "None", ",", "skip", "=", "0", ",", "server", "=", "None", ",", "port", "=", "None", ")", ":", "frame", "=", "frame", "or", "sys", ".", "_getframe", "(", ")", ".", "f_back", "for", "i", "in", "range", "(", "skip", ")", ":", "if", "not", "frame", ".", "f_back", ":", "break", "frame", "=", "frame", ".", "f_back", "wdb", "=", "Wdb", ".", "get", "(", "server", "=", "server", ",", "port", "=", "port", ")", "wdb", ".", "set_trace", "(", "frame", ")", "return", "wdb" ]
33.4
12.8
def use_project(self, project_id): """Creates an instance of [ProjectClient](#projectclient), providing session authentication. Parameters: * `project_id` - project identifier. Returns: Instance of [ProjectClient](#projectclient) with session authentication. Example: ```python client = Client('deform.io') session_client = client.auth( 'session', client.user.login('[email protected]', 'password') ) session_client.use_project('some-project-id') ``` """ return ProjectClient( base_uri=get_base_uri( project=project_id, host=self.host, port=self.port, secure=self.secure, api_base_path=self.api_base_path ), auth_header=self.auth_header, requests_session=self.requests_session, request_defaults=self.request_defaults, )
[ "def", "use_project", "(", "self", ",", "project_id", ")", ":", "return", "ProjectClient", "(", "base_uri", "=", "get_base_uri", "(", "project", "=", "project_id", ",", "host", "=", "self", ".", "host", ",", "port", "=", "self", ".", "port", ",", "secure", "=", "self", ".", "secure", ",", "api_base_path", "=", "self", ".", "api_base_path", ")", ",", "auth_header", "=", "self", ".", "auth_header", ",", "requests_session", "=", "self", ".", "requests_session", ",", "request_defaults", "=", "self", ".", "request_defaults", ",", ")" ]
26.756757
17.864865
def doc(func): """ Find the message shown when someone calls the help command Parameters ---------- func : function the function Returns ------- str The help message for this command """ stripped_chars = " \t" if hasattr(func, '__doc__'): docstring = func.__doc__.lstrip(" \n\t") if "\n" in docstring: i = docstring.index("\n") return docstring[:i].rstrip(stripped_chars) elif docstring: return docstring.rstrip(stripped_chars) return ""
[ "def", "doc", "(", "func", ")", ":", "stripped_chars", "=", "\" \\t\"", "if", "hasattr", "(", "func", ",", "'__doc__'", ")", ":", "docstring", "=", "func", ".", "__doc__", ".", "lstrip", "(", "\" \\n\\t\"", ")", "if", "\"\\n\"", "in", "docstring", ":", "i", "=", "docstring", ".", "index", "(", "\"\\n\"", ")", "return", "docstring", "[", ":", "i", "]", ".", "rstrip", "(", "stripped_chars", ")", "elif", "docstring", ":", "return", "docstring", ".", "rstrip", "(", "stripped_chars", ")", "return", "\"\"" ]
21.8
20.44
def retrieve_config_file(self): """ Retrieve config file """ try: if self.args["configfile"]: return self.args["configfile"] except KeyError: pass return os.path.expanduser('~/.config/greg/greg.conf')
[ "def", "retrieve_config_file", "(", "self", ")", ":", "try", ":", "if", "self", ".", "args", "[", "\"configfile\"", "]", ":", "return", "self", ".", "args", "[", "\"configfile\"", "]", "except", "KeyError", ":", "pass", "return", "os", ".", "path", ".", "expanduser", "(", "'~/.config/greg/greg.conf'", ")" ]
27.9
11.7
def is_gvcf_file(in_file): """Check if an input file is raw gVCF """ to_check = 100 n = 0 with utils.open_gzipsafe(in_file) as in_handle: for line in in_handle: if not line.startswith("##"): if n > to_check: break n += 1 parts = line.split("\t") # GATK if parts[4] == "<NON_REF>": return True # strelka2 if parts[4] == "." and parts[7].startswith("BLOCKAVG"): return True # freebayes if parts[4] == "<*>": return True # platypue if parts[4] == "N" and parts[6] == "REFCALL": return True
[ "def", "is_gvcf_file", "(", "in_file", ")", ":", "to_check", "=", "100", "n", "=", "0", "with", "utils", ".", "open_gzipsafe", "(", "in_file", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "not", "line", ".", "startswith", "(", "\"##\"", ")", ":", "if", "n", ">", "to_check", ":", "break", "n", "+=", "1", "parts", "=", "line", ".", "split", "(", "\"\\t\"", ")", "# GATK", "if", "parts", "[", "4", "]", "==", "\"<NON_REF>\"", ":", "return", "True", "# strelka2", "if", "parts", "[", "4", "]", "==", "\".\"", "and", "parts", "[", "7", "]", ".", "startswith", "(", "\"BLOCKAVG\"", ")", ":", "return", "True", "# freebayes", "if", "parts", "[", "4", "]", "==", "\"<*>\"", ":", "return", "True", "# platypue", "if", "parts", "[", "4", "]", "==", "\"N\"", "and", "parts", "[", "6", "]", "==", "\"REFCALL\"", ":", "return", "True" ]
32.458333
11.791667
def notify_slaves(self): """Checks to see if slaves should be notified, and notifies them if needed""" if self.disable_slave_notify is not None: LOGGER.debug('Slave notifications disabled') return False if self.zone_data()['kind'] == 'Master': response_code = self._put('/zones/' + self.domain + '/notify').status_code if response_code == 200: LOGGER.debug('Slave(s) notified') return True LOGGER.debug('Slave notification failed with code %i', response_code) else: LOGGER.debug('Zone type should be \'Master\' for slave notifications') return False
[ "def", "notify_slaves", "(", "self", ")", ":", "if", "self", ".", "disable_slave_notify", "is", "not", "None", ":", "LOGGER", ".", "debug", "(", "'Slave notifications disabled'", ")", "return", "False", "if", "self", ".", "zone_data", "(", ")", "[", "'kind'", "]", "==", "'Master'", ":", "response_code", "=", "self", ".", "_put", "(", "'/zones/'", "+", "self", ".", "domain", "+", "'/notify'", ")", ".", "status_code", "if", "response_code", "==", "200", ":", "LOGGER", ".", "debug", "(", "'Slave(s) notified'", ")", "return", "True", "LOGGER", ".", "debug", "(", "'Slave notification failed with code %i'", ",", "response_code", ")", "else", ":", "LOGGER", ".", "debug", "(", "'Zone type should be \\'Master\\' for slave notifications'", ")", "return", "False" ]
45.333333
20.466667
def _add_logical_operator(self, operator): """Adds a logical operator in query :param operator: logical operator (str) :raise: - QueryExpressionError: if a expression hasn't been set """ if not self.c_oper: raise QueryExpressionError("Logical operators must be preceded by an expression") self.current_field = None self.c_oper = None self.l_oper = inspect.currentframe().f_back.f_code.co_name self._query.append(operator) return self
[ "def", "_add_logical_operator", "(", "self", ",", "operator", ")", ":", "if", "not", "self", ".", "c_oper", ":", "raise", "QueryExpressionError", "(", "\"Logical operators must be preceded by an expression\"", ")", "self", ".", "current_field", "=", "None", "self", ".", "c_oper", "=", "None", "self", ".", "l_oper", "=", "inspect", ".", "currentframe", "(", ")", ".", "f_back", ".", "f_code", ".", "co_name", "self", ".", "_query", ".", "append", "(", "operator", ")", "return", "self" ]
30.882353
21.117647
def schema_create(conn): """Create the index for storing profiles. This is idempotent; it can be called every time a database is opened to make sure it's ready to use and up-to-date. :param conn: A connection to an SQLite3 database. """ conn.execute(dedent("""\ CREATE TABLE IF NOT EXISTS profiles (id INTEGER PRIMARY KEY, name TEXT NOT NULL UNIQUE, data BLOB NOT NULL, selected BOOLEAN NOT NULL DEFAULT FALSE) """)) # Partial indexes are only available in >=3.8.0 and expressions in indexes # are only available in >=3.9.0 (https://www.sqlite.org/partialindex.html # & https://www.sqlite.org/expridx.html). Don't bother with any kind of # index before that because it would complicate upgrades. if sqlite3.sqlite_version_info >= (3, 9, 0): # This index is for data integrity -- ensuring that only one profile # is the default ("selected") profile -- and speed a distant second. conn.execute(dedent("""\ CREATE UNIQUE INDEX IF NOT EXISTS only_one_profile_selected ON profiles (selected IS NOT NULL) WHERE selected """))
[ "def", "schema_create", "(", "conn", ")", ":", "conn", ".", "execute", "(", "dedent", "(", "\"\"\"\\\n CREATE TABLE IF NOT EXISTS profiles\n (id INTEGER PRIMARY KEY,\n name TEXT NOT NULL UNIQUE,\n data BLOB NOT NULL,\n selected BOOLEAN NOT NULL DEFAULT FALSE)\n \"\"\"", ")", ")", "# Partial indexes are only available in >=3.8.0 and expressions in indexes", "# are only available in >=3.9.0 (https://www.sqlite.org/partialindex.html", "# & https://www.sqlite.org/expridx.html). Don't bother with any kind of", "# index before that because it would complicate upgrades.", "if", "sqlite3", ".", "sqlite_version_info", ">=", "(", "3", ",", "9", ",", "0", ")", ":", "# This index is for data integrity -- ensuring that only one profile", "# is the default (\"selected\") profile -- and speed a distant second.", "conn", ".", "execute", "(", "dedent", "(", "\"\"\"\\\n CREATE UNIQUE INDEX IF NOT EXISTS\n only_one_profile_selected ON profiles\n (selected IS NOT NULL) WHERE selected\n \"\"\"", ")", ")" ]
42
15.407407
def lastpayout(delegate_address, blacklist=None): ''' Assumes that all send transactions from a delegate are payouts. Use blacklist to remove rewardwallet and other transactions if the address is not a voter. blacklist can contain both addresses and transactionIds''' if blacklist and len(blacklist) > 1: command_blacklist = 'NOT IN ' + str(tuple(blacklist)) elif blacklist and len(blacklist) == 1: command_blacklist = '!= ' + "'" + blacklist[0] + "'" else: command_blacklist = "!= 'nothing'" qry = DbCursor().execute_and_fetchall(""" SELECT ts."recipientId", ts."id", ts."timestamp" FROM transactions ts, (SELECT MAX(transactions."timestamp") AS max_timestamp, transactions."recipientId" FROM transactions WHERE transactions."senderId" = '{0}' AND transactions."id" {1} GROUP BY transactions."recipientId") maxresults WHERE ts."recipientId" = maxresults."recipientId" AND ts."recipientId" {1} AND ts."timestamp"= maxresults.max_timestamp; """.format(delegate_address, command_blacklist)) result = [] Payout = namedtuple( 'payout', 'address id timestamp') for i in qry: payout = Payout( address=i[0], id=i[1], timestamp=i[2] ) result.append(payout) return result
[ "def", "lastpayout", "(", "delegate_address", ",", "blacklist", "=", "None", ")", ":", "if", "blacklist", "and", "len", "(", "blacklist", ")", ">", "1", ":", "command_blacklist", "=", "'NOT IN '", "+", "str", "(", "tuple", "(", "blacklist", ")", ")", "elif", "blacklist", "and", "len", "(", "blacklist", ")", "==", "1", ":", "command_blacklist", "=", "'!= '", "+", "\"'\"", "+", "blacklist", "[", "0", "]", "+", "\"'\"", "else", ":", "command_blacklist", "=", "\"!= 'nothing'\"", "qry", "=", "DbCursor", "(", ")", ".", "execute_and_fetchall", "(", "\"\"\"\n SELECT ts.\"recipientId\", ts.\"id\", ts.\"timestamp\"\n FROM transactions ts,\n (SELECT MAX(transactions.\"timestamp\") AS max_timestamp, transactions.\"recipientId\"\n FROM transactions\n WHERE transactions.\"senderId\" = '{0}'\n AND transactions.\"id\" {1}\n GROUP BY transactions.\"recipientId\") maxresults\n WHERE ts.\"recipientId\" = maxresults.\"recipientId\"\n AND ts.\"recipientId\" {1}\n AND ts.\"timestamp\"= maxresults.max_timestamp;\n\n \"\"\"", ".", "format", "(", "delegate_address", ",", "command_blacklist", ")", ")", "result", "=", "[", "]", "Payout", "=", "namedtuple", "(", "'payout'", ",", "'address id timestamp'", ")", "for", "i", "in", "qry", ":", "payout", "=", "Payout", "(", "address", "=", "i", "[", "0", "]", ",", "id", "=", "i", "[", "1", "]", ",", "timestamp", "=", "i", "[", "2", "]", ")", "result", ".", "append", "(", "payout", ")", "return", "result" ]
42.157895
19.815789
def present(name, protocol=None, service_address=None, scheduler='wlc', ): ''' Ensure that the named service is present. name The LVS service name protocol The service protocol service_address The LVS service address scheduler Algorithm for allocating TCP connections and UDP datagrams to real servers. .. code-block:: yaml lvstest: lvs_service.present: - service_address: 1.1.1.1:80 - protocol: tcp - scheduler: rr ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} #check service service_check = __salt__['lvs.check_service'](protocol=protocol, service_address=service_address) if service_check is True: service_rule_check = __salt__['lvs.check_service'](protocol=protocol, service_address=service_address, scheduler=scheduler) if service_rule_check is True: ret['comment'] = 'LVS Service {0} is present'.format(name) return ret else: if __opts__['test']: ret['result'] = None ret['comment'] = 'LVS Service {0} is present but some options should update'.format(name) return ret else: service_edit = __salt__['lvs.edit_service'](protocol=protocol, service_address=service_address, scheduler=scheduler) if service_edit is True: ret['comment'] = 'LVS Service {0} has been updated'.format(name) ret['changes'][name] = 'Update' return ret else: ret['result'] = False ret['comment'] = 'LVS Service {0} update failed'.format(name) return ret else: if __opts__['test']: ret['comment'] = 'LVS Service {0} is not present and needs to be created'.format(name) ret['result'] = None return ret else: service_add = __salt__['lvs.add_service'](protocol=protocol, service_address=service_address, scheduler=scheduler) if service_add is True: ret['comment'] = 'LVS Service {0} has been created'.format(name) ret['changes'][name] = 'Present' return ret else: ret['comment'] = 'LVS Service {0} create failed({1})'.format(name, service_add) ret['result'] = False return ret
[ "def", "present", "(", "name", ",", "protocol", "=", "None", ",", "service_address", "=", "None", ",", "scheduler", "=", "'wlc'", ",", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", "}", "#check service", "service_check", "=", "__salt__", "[", "'lvs.check_service'", "]", "(", "protocol", "=", "protocol", ",", "service_address", "=", "service_address", ")", "if", "service_check", "is", "True", ":", "service_rule_check", "=", "__salt__", "[", "'lvs.check_service'", "]", "(", "protocol", "=", "protocol", ",", "service_address", "=", "service_address", ",", "scheduler", "=", "scheduler", ")", "if", "service_rule_check", "is", "True", ":", "ret", "[", "'comment'", "]", "=", "'LVS Service {0} is present'", ".", "format", "(", "name", ")", "return", "ret", "else", ":", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'LVS Service {0} is present but some options should update'", ".", "format", "(", "name", ")", "return", "ret", "else", ":", "service_edit", "=", "__salt__", "[", "'lvs.edit_service'", "]", "(", "protocol", "=", "protocol", ",", "service_address", "=", "service_address", ",", "scheduler", "=", "scheduler", ")", "if", "service_edit", "is", "True", ":", "ret", "[", "'comment'", "]", "=", "'LVS Service {0} has been updated'", ".", "format", "(", "name", ")", "ret", "[", "'changes'", "]", "[", "name", "]", "=", "'Update'", "return", "ret", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'LVS Service {0} update failed'", ".", "format", "(", "name", ")", "return", "ret", "else", ":", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'LVS Service {0} is not present and needs to be created'", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "None", "return", "ret", "else", ":", "service_add", "=", "__salt__", "[", "'lvs.add_service'", "]", "(", "protocol", "=", "protocol", ",", "service_address", "=", "service_address", ",", "scheduler", "=", "scheduler", ")", "if", "service_add", "is", "True", ":", "ret", "[", "'comment'", "]", "=", "'LVS Service {0} has been created'", ".", "format", "(", "name", ")", "ret", "[", "'changes'", "]", "[", "name", "]", "=", "'Present'", "return", "ret", "else", ":", "ret", "[", "'comment'", "]", "=", "'LVS Service {0} create failed({1})'", ".", "format", "(", "name", ",", "service_add", ")", "ret", "[", "'result'", "]", "=", "False", "return", "ret" ]
37.558442
24.285714
def version_cmp(ver_a, ver_b): """ Compares two version strings in the dotted-numeric-label format. Returns -1 if a < b, 0 if a == b, and +1 if a > b. Inputs may include a prefix string that matches '^\w+[_-]', but both strings must start with the same prefix. If present, it is ignored for purposes of comparision. If it does not match, straight lexicographical ordering is used for the entire string. If a is None, it is always considered less than b, even if b is also None. The function also accepts the case where both args are ints or can be converted to ints. """ if ver_a is None and ver_b is None: return 0 if ver_a is None: return -1 elif ver_b is None: return 1 try: a = int(ver_a) b = int(ver_b) if a < b: return -1 elif a > b: return 1 else: return 0 except: pass m = re.match('^(\w+[_-])(\d.*)$', ver_a) if m: pref_a = m.group(1) a = m.group(2) else: pref_a = '' a = ver_a m = re.match('^(\w+[_-])(\d.*)$', ver_b) if m: pref_b = m.group(1) b = m.group(2) else: pref_b = '' b = ver_b if pref_a != pref_b: if ver_a < ver_b: return -1 else: return 1 a = a.split('.') b = b.split('.') restrip = re.compile(r'[^\d]+$') for i in range(0, max(len(a), len(b))): if i >= len(a): return -1 if i >= len(b): return 1 astr = restrip.sub('', a[i]) if not astr: astr = '0' bstr = restrip.sub('', b[i]) if not bstr: bstr = '0' try: aint = int(astr) except: return -1 # pragma: no cover try: bint = int(bstr) except: return -1 # pragma: no cover if aint < bint: return -1 elif aint > bint: return 1 return 0
[ "def", "version_cmp", "(", "ver_a", ",", "ver_b", ")", ":", "if", "ver_a", "is", "None", "and", "ver_b", "is", "None", ":", "return", "0", "if", "ver_a", "is", "None", ":", "return", "-", "1", "elif", "ver_b", "is", "None", ":", "return", "1", "try", ":", "a", "=", "int", "(", "ver_a", ")", "b", "=", "int", "(", "ver_b", ")", "if", "a", "<", "b", ":", "return", "-", "1", "elif", "a", ">", "b", ":", "return", "1", "else", ":", "return", "0", "except", ":", "pass", "m", "=", "re", ".", "match", "(", "'^(\\w+[_-])(\\d.*)$'", ",", "ver_a", ")", "if", "m", ":", "pref_a", "=", "m", ".", "group", "(", "1", ")", "a", "=", "m", ".", "group", "(", "2", ")", "else", ":", "pref_a", "=", "''", "a", "=", "ver_a", "m", "=", "re", ".", "match", "(", "'^(\\w+[_-])(\\d.*)$'", ",", "ver_b", ")", "if", "m", ":", "pref_b", "=", "m", ".", "group", "(", "1", ")", "b", "=", "m", ".", "group", "(", "2", ")", "else", ":", "pref_b", "=", "''", "b", "=", "ver_b", "if", "pref_a", "!=", "pref_b", ":", "if", "ver_a", "<", "ver_b", ":", "return", "-", "1", "else", ":", "return", "1", "a", "=", "a", ".", "split", "(", "'.'", ")", "b", "=", "b", ".", "split", "(", "'.'", ")", "restrip", "=", "re", ".", "compile", "(", "r'[^\\d]+$'", ")", "for", "i", "in", "range", "(", "0", ",", "max", "(", "len", "(", "a", ")", ",", "len", "(", "b", ")", ")", ")", ":", "if", "i", ">=", "len", "(", "a", ")", ":", "return", "-", "1", "if", "i", ">=", "len", "(", "b", ")", ":", "return", "1", "astr", "=", "restrip", ".", "sub", "(", "''", ",", "a", "[", "i", "]", ")", "if", "not", "astr", ":", "astr", "=", "'0'", "bstr", "=", "restrip", ".", "sub", "(", "''", ",", "b", "[", "i", "]", ")", "if", "not", "bstr", ":", "bstr", "=", "'0'", "try", ":", "aint", "=", "int", "(", "astr", ")", "except", ":", "return", "-", "1", "# pragma: no cover", "try", ":", "bint", "=", "int", "(", "bstr", ")", "except", ":", "return", "-", "1", "# pragma: no cover", "if", "aint", "<", "bint", ":", "return", "-", "1", "elif", "aint", ">", "bint", ":", "return", "1", "return", "0" ]
29.044776
20.477612
def pack_small_tensors(tower_grads, max_bytes=0): """Concatenate gradients together more intelligently. Does binpacking Args: tower_grads: List of lists of (gradient, variable) tuples. max_bytes: Int giving max number of bytes in a tensor that may be considered small. """ assert max_bytes >= 0 orig_grads = [g for g, _ in tower_grads[0]] # Check to make sure sizes are accurate; not entirely important assert all(g.dtype == tf.float32 for g in orig_grads) sizes = [4 * g.shape.num_elements() for g in orig_grads] print_stats(sizes) small_ranges = [] large_indices = [] new_sizes = [] def end_interval(indices, small_ranges, large_indices): if len(indices) > 1: small_ranges.insert(0, [indices[0], indices[-1]]) else: large_indices.insert(0, indices[0]) cur_range = [] cur_size = 0 for i, s in reversed(list(enumerate(sizes))): if cur_size > max_bytes: end_interval(cur_range, small_ranges, large_indices) new_sizes.insert(0, cur_size) cur_range = [] cur_size = 0 cur_range.insert(0, i) cur_size += s end_interval(cur_range, small_ranges, large_indices) new_sizes.insert(0, cur_size) print_stats(new_sizes) num_gv = len(orig_grads) packing = {} if len(small_ranges): new_tower_grads = [] for dev_idx, gv_list in enumerate(tower_grads): assert len(gv_list) == num_gv, ( "Possible cause: " "Networks constructed on different workers " "don't have the same number of variables. " "If you use tf.GraphKeys or tf.global_variables() " "with multiple graphs per worker during network " "construction, you need to use " "appropriate scopes, see " "https://github.com/ray-project/ray/issues/3136") new_gv_list = [] for r in small_ranges: key = "%d:%d" % (dev_idx, len(new_gv_list)) new_gv_list.append((pack_range(key, packing, gv_list, r), "packing_var_placeholder")) for i in large_indices: new_gv_list.append(gv_list[i]) new_tower_grads.append(new_gv_list) return new_tower_grads, packing else: return tower_grads, None
[ "def", "pack_small_tensors", "(", "tower_grads", ",", "max_bytes", "=", "0", ")", ":", "assert", "max_bytes", ">=", "0", "orig_grads", "=", "[", "g", "for", "g", ",", "_", "in", "tower_grads", "[", "0", "]", "]", "# Check to make sure sizes are accurate; not entirely important", "assert", "all", "(", "g", ".", "dtype", "==", "tf", ".", "float32", "for", "g", "in", "orig_grads", ")", "sizes", "=", "[", "4", "*", "g", ".", "shape", ".", "num_elements", "(", ")", "for", "g", "in", "orig_grads", "]", "print_stats", "(", "sizes", ")", "small_ranges", "=", "[", "]", "large_indices", "=", "[", "]", "new_sizes", "=", "[", "]", "def", "end_interval", "(", "indices", ",", "small_ranges", ",", "large_indices", ")", ":", "if", "len", "(", "indices", ")", ">", "1", ":", "small_ranges", ".", "insert", "(", "0", ",", "[", "indices", "[", "0", "]", ",", "indices", "[", "-", "1", "]", "]", ")", "else", ":", "large_indices", ".", "insert", "(", "0", ",", "indices", "[", "0", "]", ")", "cur_range", "=", "[", "]", "cur_size", "=", "0", "for", "i", ",", "s", "in", "reversed", "(", "list", "(", "enumerate", "(", "sizes", ")", ")", ")", ":", "if", "cur_size", ">", "max_bytes", ":", "end_interval", "(", "cur_range", ",", "small_ranges", ",", "large_indices", ")", "new_sizes", ".", "insert", "(", "0", ",", "cur_size", ")", "cur_range", "=", "[", "]", "cur_size", "=", "0", "cur_range", ".", "insert", "(", "0", ",", "i", ")", "cur_size", "+=", "s", "end_interval", "(", "cur_range", ",", "small_ranges", ",", "large_indices", ")", "new_sizes", ".", "insert", "(", "0", ",", "cur_size", ")", "print_stats", "(", "new_sizes", ")", "num_gv", "=", "len", "(", "orig_grads", ")", "packing", "=", "{", "}", "if", "len", "(", "small_ranges", ")", ":", "new_tower_grads", "=", "[", "]", "for", "dev_idx", ",", "gv_list", "in", "enumerate", "(", "tower_grads", ")", ":", "assert", "len", "(", "gv_list", ")", "==", "num_gv", ",", "(", "\"Possible cause: \"", "\"Networks constructed on different workers \"", "\"don't have the same number of variables. \"", "\"If you use tf.GraphKeys or tf.global_variables() \"", "\"with multiple graphs per worker during network \"", "\"construction, you need to use \"", "\"appropriate scopes, see \"", "\"https://github.com/ray-project/ray/issues/3136\"", ")", "new_gv_list", "=", "[", "]", "for", "r", "in", "small_ranges", ":", "key", "=", "\"%d:%d\"", "%", "(", "dev_idx", ",", "len", "(", "new_gv_list", ")", ")", "new_gv_list", ".", "append", "(", "(", "pack_range", "(", "key", ",", "packing", ",", "gv_list", ",", "r", ")", ",", "\"packing_var_placeholder\"", ")", ")", "for", "i", "in", "large_indices", ":", "new_gv_list", ".", "append", "(", "gv_list", "[", "i", "]", ")", "new_tower_grads", ".", "append", "(", "new_gv_list", ")", "return", "new_tower_grads", ",", "packing", "else", ":", "return", "tower_grads", ",", "None" ]
37.078125
16.828125
def interp_head_addr(self): """Returns PtrTo(PtrTo(PyInterpreterState)) value""" if self._interp_head_addr is not None: return self._interp_head_addr try: interp_head_addr = self.get_interp_head_addr_through_symbol() except SymbolNotFound: logger.debug("Could not find interp_head symbol") # Hard way interp_head_addr = self.get_interp_head_addr_through_PyInterpreterState_Head() self._interp_head_addr = interp_head_addr return interp_head_addr
[ "def", "interp_head_addr", "(", "self", ")", ":", "if", "self", ".", "_interp_head_addr", "is", "not", "None", ":", "return", "self", ".", "_interp_head_addr", "try", ":", "interp_head_addr", "=", "self", ".", "get_interp_head_addr_through_symbol", "(", ")", "except", "SymbolNotFound", ":", "logger", ".", "debug", "(", "\"Could not find interp_head symbol\"", ")", "# Hard way", "interp_head_addr", "=", "self", ".", "get_interp_head_addr_through_PyInterpreterState_Head", "(", ")", "self", ".", "_interp_head_addr", "=", "interp_head_addr", "return", "interp_head_addr" ]
45.166667
16.5
def param_id(self, param, i): """Parse a node name in parameter list""" param.pair = (self.value(i), parsing.Node) return True
[ "def", "param_id", "(", "self", ",", "param", ",", "i", ")", ":", "param", ".", "pair", "=", "(", "self", ".", "value", "(", "i", ")", ",", "parsing", ".", "Node", ")", "return", "True" ]
33.75
10.5
def create(self, username, password, tags=''): """Create User. :param str username: Username :param str password: Password :param str tags: Comma-separate list of tags (e.g. monitoring) :rtype: None """ user_payload = json.dumps({ 'password': password, 'tags': tags }) return self.http_client.put(API_USER % username, payload=user_payload)
[ "def", "create", "(", "self", ",", "username", ",", "password", ",", "tags", "=", "''", ")", ":", "user_payload", "=", "json", ".", "dumps", "(", "{", "'password'", ":", "password", ",", "'tags'", ":", "tags", "}", ")", "return", "self", ".", "http_client", ".", "put", "(", "API_USER", "%", "username", ",", "payload", "=", "user_payload", ")" ]
30.6
15.533333
def _process_current(self, handle, op, dest_path=None, dest_name=None): """Process current member with 'op' operation.""" unrarlib.RARProcessFileW(handle, op, dest_path, dest_name)
[ "def", "_process_current", "(", "self", ",", "handle", ",", "op", ",", "dest_path", "=", "None", ",", "dest_name", "=", "None", ")", ":", "unrarlib", ".", "RARProcessFileW", "(", "handle", ",", "op", ",", "dest_path", ",", "dest_name", ")" ]
64.666667
19
def margin(file_, geometry_string): """ Returns the calculated margin for an image and geometry """ if not file_ or (sorl_settings.THUMBNAIL_DUMMY or isinstance(file_, DummyImageFile)): return 'auto' margin = [0, 0, 0, 0] image_file = default.kvstore.get_or_set(ImageFile(file_)) x, y = parse_geometry(geometry_string, image_file.ratio) ex = x - image_file.x margin[3] = ex / 2 margin[1] = ex / 2 if ex % 2: margin[1] += 1 ey = y - image_file.y margin[0] = ey / 2 margin[2] = ey / 2 if ey % 2: margin[2] += 1 return ' '.join(['%dpx' % n for n in margin])
[ "def", "margin", "(", "file_", ",", "geometry_string", ")", ":", "if", "not", "file_", "or", "(", "sorl_settings", ".", "THUMBNAIL_DUMMY", "or", "isinstance", "(", "file_", ",", "DummyImageFile", ")", ")", ":", "return", "'auto'", "margin", "=", "[", "0", ",", "0", ",", "0", ",", "0", "]", "image_file", "=", "default", ".", "kvstore", ".", "get_or_set", "(", "ImageFile", "(", "file_", ")", ")", "x", ",", "y", "=", "parse_geometry", "(", "geometry_string", ",", "image_file", ".", "ratio", ")", "ex", "=", "x", "-", "image_file", ".", "x", "margin", "[", "3", "]", "=", "ex", "/", "2", "margin", "[", "1", "]", "=", "ex", "/", "2", "if", "ex", "%", "2", ":", "margin", "[", "1", "]", "+=", "1", "ey", "=", "y", "-", "image_file", ".", "y", "margin", "[", "0", "]", "=", "ey", "/", "2", "margin", "[", "2", "]", "=", "ey", "/", "2", "if", "ey", "%", "2", ":", "margin", "[", "2", "]", "+=", "1", "return", "' '", ".", "join", "(", "[", "'%dpx'", "%", "n", "for", "n", "in", "margin", "]", ")" ]
22.25
23.821429
def horizontal_angle(C, P): """Return the angle to the horizontal for the connection from C to P. This uses the arcus sine function and resolves its inherent ambiguity by looking up in which quadrant vector S = P - C is located. """ S = Point(P - C).unit # unit vector 'C' -> 'P' alfa = math.asin(abs(S.y)) # absolute angle from horizontal if S.x < 0: # make arcsin result unique if S.y <= 0: # bottom-left alfa = -(math.pi - alfa) else: # top-left alfa = math.pi - alfa else: if S.y >= 0: # top-right pass else: # bottom-right alfa = - alfa return alfa
[ "def", "horizontal_angle", "(", "C", ",", "P", ")", ":", "S", "=", "Point", "(", "P", "-", "C", ")", ".", "unit", "# unit vector 'C' -> 'P'", "alfa", "=", "math", ".", "asin", "(", "abs", "(", "S", ".", "y", ")", ")", "# absolute angle from horizontal", "if", "S", ".", "x", "<", "0", ":", "# make arcsin result unique", "if", "S", ".", "y", "<=", "0", ":", "# bottom-left", "alfa", "=", "-", "(", "math", ".", "pi", "-", "alfa", ")", "else", ":", "# top-left", "alfa", "=", "math", ".", "pi", "-", "alfa", "else", ":", "if", "S", ".", "y", ">=", "0", ":", "# top-right", "pass", "else", ":", "# bottom-right", "alfa", "=", "-", "alfa", "return", "alfa" ]
49.166667
19.277778
def changed(self, selection='all'): ''' Returns the list of changed values. The key is added to each item. selection Specifies the desired changes. Supported values are ``all`` - all changed items are included in the output ``intersect`` - changed items present in both lists are included ''' changed = [] if selection == 'all': for recursive_item in self._get_recursive_difference(type='all'): # We want the unset values as well recursive_item.ignore_unset_values = False key_val = six.text_type(recursive_item.past_dict[self._key]) \ if self._key in recursive_item.past_dict \ else six.text_type(recursive_item.current_dict[self._key]) for change in recursive_item.changed(): if change != self._key: changed.append('.'.join([self._key, key_val, change])) return changed elif selection == 'intersect': # We want the unset values as well for recursive_item in self._get_recursive_difference(type='intersect'): recursive_item.ignore_unset_values = False key_val = six.text_type(recursive_item.past_dict[self._key]) \ if self._key in recursive_item.past_dict \ else six.text_type(recursive_item.current_dict[self._key]) for change in recursive_item.changed(): if change != self._key: changed.append('.'.join([self._key, key_val, change])) return changed
[ "def", "changed", "(", "self", ",", "selection", "=", "'all'", ")", ":", "changed", "=", "[", "]", "if", "selection", "==", "'all'", ":", "for", "recursive_item", "in", "self", ".", "_get_recursive_difference", "(", "type", "=", "'all'", ")", ":", "# We want the unset values as well", "recursive_item", ".", "ignore_unset_values", "=", "False", "key_val", "=", "six", ".", "text_type", "(", "recursive_item", ".", "past_dict", "[", "self", ".", "_key", "]", ")", "if", "self", ".", "_key", "in", "recursive_item", ".", "past_dict", "else", "six", ".", "text_type", "(", "recursive_item", ".", "current_dict", "[", "self", ".", "_key", "]", ")", "for", "change", "in", "recursive_item", ".", "changed", "(", ")", ":", "if", "change", "!=", "self", ".", "_key", ":", "changed", ".", "append", "(", "'.'", ".", "join", "(", "[", "self", ".", "_key", ",", "key_val", ",", "change", "]", ")", ")", "return", "changed", "elif", "selection", "==", "'intersect'", ":", "# We want the unset values as well", "for", "recursive_item", "in", "self", ".", "_get_recursive_difference", "(", "type", "=", "'intersect'", ")", ":", "recursive_item", ".", "ignore_unset_values", "=", "False", "key_val", "=", "six", ".", "text_type", "(", "recursive_item", ".", "past_dict", "[", "self", ".", "_key", "]", ")", "if", "self", ".", "_key", "in", "recursive_item", ".", "past_dict", "else", "six", ".", "text_type", "(", "recursive_item", ".", "current_dict", "[", "self", ".", "_key", "]", ")", "for", "change", "in", "recursive_item", ".", "changed", "(", ")", ":", "if", "change", "!=", "self", ".", "_key", ":", "changed", ".", "append", "(", "'.'", ".", "join", "(", "[", "self", ".", "_key", ",", "key_val", ",", "change", "]", ")", ")", "return", "changed" ]
47.194444
22.527778
def make_gtf_url(ensembl_release, species, server=ENSEMBL_FTP_SERVER): """ Returns a URL and a filename, which can be joined together. """ ensembl_release, species, _ = \ normalize_release_properties(ensembl_release, species) subdir = _species_subdir( ensembl_release, species=species, filetype="gtf", server=server) url_subdir = urllib_parse.urljoin(server, subdir) filename = make_gtf_filename( ensembl_release=ensembl_release, species=species) return join(url_subdir, filename)
[ "def", "make_gtf_url", "(", "ensembl_release", ",", "species", ",", "server", "=", "ENSEMBL_FTP_SERVER", ")", ":", "ensembl_release", ",", "species", ",", "_", "=", "normalize_release_properties", "(", "ensembl_release", ",", "species", ")", "subdir", "=", "_species_subdir", "(", "ensembl_release", ",", "species", "=", "species", ",", "filetype", "=", "\"gtf\"", ",", "server", "=", "server", ")", "url_subdir", "=", "urllib_parse", ".", "urljoin", "(", "server", ",", "subdir", ")", "filename", "=", "make_gtf_filename", "(", "ensembl_release", "=", "ensembl_release", ",", "species", "=", "species", ")", "return", "join", "(", "url_subdir", ",", "filename", ")" ]
34.5625
12.3125
async def stop(self, **kwargs): """Stop pairing server and unpublish service.""" _LOGGER.debug('Shutting down pairing server') if self._web_server is not None: await self._web_server.shutdown() self._server.close() if self._server is not None: await self._server.wait_closed()
[ "async", "def", "stop", "(", "self", ",", "*", "*", "kwargs", ")", ":", "_LOGGER", ".", "debug", "(", "'Shutting down pairing server'", ")", "if", "self", ".", "_web_server", "is", "not", "None", ":", "await", "self", ".", "_web_server", ".", "shutdown", "(", ")", "self", ".", "_server", ".", "close", "(", ")", "if", "self", ".", "_server", "is", "not", "None", ":", "await", "self", ".", "_server", ".", "wait_closed", "(", ")" ]
37.444444
9.222222
def add_members(self, rtcs): '''Add other RT Components to this composite component as members. This component must be a composite component. ''' if not self.is_composite: raise exceptions.NotCompositeError(self.name) for rtc in rtcs: if self.is_member(rtc): raise exceptions.AlreadyInCompositionError(self.name, rtc.instance_name) org = self.organisations[0].obj org.add_members([x.object for x in rtcs]) # Force a reparse of the member information self._orgs = []
[ "def", "add_members", "(", "self", ",", "rtcs", ")", ":", "if", "not", "self", ".", "is_composite", ":", "raise", "exceptions", ".", "NotCompositeError", "(", "self", ".", "name", ")", "for", "rtc", "in", "rtcs", ":", "if", "self", ".", "is_member", "(", "rtc", ")", ":", "raise", "exceptions", ".", "AlreadyInCompositionError", "(", "self", ".", "name", ",", "rtc", ".", "instance_name", ")", "org", "=", "self", ".", "organisations", "[", "0", "]", ".", "obj", "org", ".", "add_members", "(", "[", "x", ".", "object", "for", "x", "in", "rtcs", "]", ")", "# Force a reparse of the member information", "self", ".", "_orgs", "=", "[", "]" ]
37.666667
19.933333
def _invalid_docstring_quote(self, quote, row, col=None): """Add a message for an invalid docstring quote. Args: quote: The quote characters that were found. row: The row number the quote characters were found on. col: The column the quote characters were found on. """ self.add_message( 'invalid-docstring-quote', line=row, args=(quote, TRIPLE_QUOTE_OPTS.get(self.config.docstring_quote)), **self.get_offset(col) )
[ "def", "_invalid_docstring_quote", "(", "self", ",", "quote", ",", "row", ",", "col", "=", "None", ")", ":", "self", ".", "add_message", "(", "'invalid-docstring-quote'", ",", "line", "=", "row", ",", "args", "=", "(", "quote", ",", "TRIPLE_QUOTE_OPTS", ".", "get", "(", "self", ".", "config", ".", "docstring_quote", ")", ")", ",", "*", "*", "self", ".", "get_offset", "(", "col", ")", ")" ]
37.642857
18.571429
def volume_attach(name, server_name, device='/dev/xvdb', **kwargs): ''' Attach block volume ''' conn = get_conn() return conn.volume_attach( name, server_name, device, timeout=300 )
[ "def", "volume_attach", "(", "name", ",", "server_name", ",", "device", "=", "'/dev/xvdb'", ",", "*", "*", "kwargs", ")", ":", "conn", "=", "get_conn", "(", ")", "return", "conn", ".", "volume_attach", "(", "name", ",", "server_name", ",", "device", ",", "timeout", "=", "300", ")" ]
20.636364
24.272727
def getAppExt(self, loops=float('inf')): """ getAppExt(loops=float('inf')) Application extension. This part specifies the amount of loops. If loops is 0 or inf, it goes on infinitely. """ if loops == 0 or loops == float('inf'): loops = 2 ** 16 - 1 bb = b"" if loops != 1: # omit the extension if we would like a nonlooping gif bb += b"\x21\xFF\x0B" # application extension bb += b"NETSCAPE2.0" bb += b"\x03\x01" bb += intToBin(loops) bb += b'\x00' # end return bb
[ "def", "getAppExt", "(", "self", ",", "loops", "=", "float", "(", "'inf'", ")", ")", ":", "if", "loops", "==", "0", "or", "loops", "==", "float", "(", "'inf'", ")", ":", "loops", "=", "2", "**", "16", "-", "1", "bb", "=", "b\"\"", "if", "loops", "!=", "1", ":", "# omit the extension if we would like a nonlooping gif", "bb", "+=", "b\"\\x21\\xFF\\x0B\"", "# application extension", "bb", "+=", "b\"NETSCAPE2.0\"", "bb", "+=", "b\"\\x03\\x01\"", "bb", "+=", "intToBin", "(", "loops", ")", "bb", "+=", "b'\\x00'", "# end", "return", "bb" ]
32.666667
17.555556
def _fit_spatial(noise, noise_temporal, mask, template, spatial_sd, temporal_sd, noise_dict, fit_thresh, fit_delta, iterations, ): """ Fit the noise model to match the SNR of the data Parameters ---------- noise : multidimensional array, float Initial estimate of the noise noise_temporal : multidimensional array, float The temporal noise that was generated by _generate_temporal_noise tr_duration : float What is the duration, in seconds, of each TR? template : 3d array, float A continuous (0 -> 1) volume describing the likelihood a voxel is in the brain. This can be used to contrast the brain and non brain. mask : 3d array, binary The mask of the brain volume, distinguishing brain from non-brain spatial_sd : float What is the standard deviation in space of the noise volume to be generated temporal_sd : float What is the standard deviation in time of the noise volume to be generated noise_dict : dict A dictionary specifying the types of noise in this experiment. The noise types interact in important ways. First, all noise types ending with sigma (e.g. motion sigma) are mixed together in _generate_temporal_noise. These values describe the proportion of mixing of these elements. However critically, SFNR is the parameter that describes how much noise these components contribute to the brain. If you set the noise dict to matched then it will fit the parameters to match the participant as best as possible. fit_thresh : float What proportion of the target parameter value is sufficient error to warrant finishing fit search. fit_delta : float How much are the parameters attenuated during the fitting process, in terms of the proportion of difference between the target parameter and the actual parameter iterations : int The first element is how many steps of fitting the SFNR and SNR values will be performed. Usually converges after < 5. The second element is the number of iterations for the AR fitting. This is much more time consuming (has to make a new timecourse on each iteration) so be careful about setting this appropriately. Returns ------- noise : multidimensional array, float Generates the noise volume given these parameters """ # Pull out information that is needed dim_tr = noise.shape base = template * noise_dict['max_activity'] base = base.reshape(dim_tr[0], dim_tr[1], dim_tr[2], 1) mean_signal = (base[mask > 0]).mean() target_snr = noise_dict['snr'] # Iterate through different parameters to fit SNR and SFNR spat_sd_orig = np.copy(spatial_sd) iteration = 0 for iteration in list(range(iterations)): # Calculate the new metrics new_snr = _calc_snr(noise, mask) # Calculate the difference between the real and simulated data diff_snr = abs(new_snr - target_snr) / target_snr # If the AR is sufficiently close then break the loop if diff_snr < fit_thresh: logger.info('Terminated SNR fit after ' + str( iteration) + ' iterations.') break # Convert the SFNR and SNR spat_sd_new = mean_signal / new_snr # Update the variable spatial_sd -= ((spat_sd_new - spat_sd_orig) * fit_delta) # Prevent these going out of range if spatial_sd < 0 or np.isnan(spatial_sd): spatial_sd = 10e-3 # Set up the machine noise noise_system = _generate_noise_system(dimensions_tr=dim_tr, spatial_sd=spatial_sd, temporal_sd=temporal_sd, ) # Sum up the noise of the brain noise = base + (noise_temporal * temporal_sd) + noise_system # Reject negative values (only happens outside of the brain) noise[noise < 0] = 0 # Failed to converge if iterations == 0: logger.info('No fitting iterations were run') elif iteration == iterations: logger.warning('SNR failed to converge.') # Return the updated noise return noise, spatial_sd
[ "def", "_fit_spatial", "(", "noise", ",", "noise_temporal", ",", "mask", ",", "template", ",", "spatial_sd", ",", "temporal_sd", ",", "noise_dict", ",", "fit_thresh", ",", "fit_delta", ",", "iterations", ",", ")", ":", "# Pull out information that is needed", "dim_tr", "=", "noise", ".", "shape", "base", "=", "template", "*", "noise_dict", "[", "'max_activity'", "]", "base", "=", "base", ".", "reshape", "(", "dim_tr", "[", "0", "]", ",", "dim_tr", "[", "1", "]", ",", "dim_tr", "[", "2", "]", ",", "1", ")", "mean_signal", "=", "(", "base", "[", "mask", ">", "0", "]", ")", ".", "mean", "(", ")", "target_snr", "=", "noise_dict", "[", "'snr'", "]", "# Iterate through different parameters to fit SNR and SFNR", "spat_sd_orig", "=", "np", ".", "copy", "(", "spatial_sd", ")", "iteration", "=", "0", "for", "iteration", "in", "list", "(", "range", "(", "iterations", ")", ")", ":", "# Calculate the new metrics", "new_snr", "=", "_calc_snr", "(", "noise", ",", "mask", ")", "# Calculate the difference between the real and simulated data", "diff_snr", "=", "abs", "(", "new_snr", "-", "target_snr", ")", "/", "target_snr", "# If the AR is sufficiently close then break the loop", "if", "diff_snr", "<", "fit_thresh", ":", "logger", ".", "info", "(", "'Terminated SNR fit after '", "+", "str", "(", "iteration", ")", "+", "' iterations.'", ")", "break", "# Convert the SFNR and SNR", "spat_sd_new", "=", "mean_signal", "/", "new_snr", "# Update the variable", "spatial_sd", "-=", "(", "(", "spat_sd_new", "-", "spat_sd_orig", ")", "*", "fit_delta", ")", "# Prevent these going out of range", "if", "spatial_sd", "<", "0", "or", "np", ".", "isnan", "(", "spatial_sd", ")", ":", "spatial_sd", "=", "10e-3", "# Set up the machine noise", "noise_system", "=", "_generate_noise_system", "(", "dimensions_tr", "=", "dim_tr", ",", "spatial_sd", "=", "spatial_sd", ",", "temporal_sd", "=", "temporal_sd", ",", ")", "# Sum up the noise of the brain", "noise", "=", "base", "+", "(", "noise_temporal", "*", "temporal_sd", ")", "+", "noise_system", "# Reject negative values (only happens outside of the brain)", "noise", "[", "noise", "<", "0", "]", "=", "0", "# Failed to converge", "if", "iterations", "==", "0", ":", "logger", ".", "info", "(", "'No fitting iterations were run'", ")", "elif", "iteration", "==", "iterations", ":", "logger", ".", "warning", "(", "'SNR failed to converge.'", ")", "# Return the updated noise", "return", "noise", ",", "spatial_sd" ]
35.576923
22.653846
def get_value_prob(self, attr_name, value): """ Returns the value probability of the given attribute at this node. """ if attr_name not in self._attr_value_count_totals: return n = self._attr_value_counts[attr_name][value] d = self._attr_value_count_totals[attr_name] return n/float(d)
[ "def", "get_value_prob", "(", "self", ",", "attr_name", ",", "value", ")", ":", "if", "attr_name", "not", "in", "self", ".", "_attr_value_count_totals", ":", "return", "n", "=", "self", ".", "_attr_value_counts", "[", "attr_name", "]", "[", "value", "]", "d", "=", "self", ".", "_attr_value_count_totals", "[", "attr_name", "]", "return", "n", "/", "float", "(", "d", ")" ]
38.333333
13
def intervalCreateSimulateAnalyze (netParams=None, simConfig=None, output=False, interval=None): ''' Sequence of commands create, simulate and analyse network ''' import os from .. import sim (pops, cells, conns, stims, rxd, simData) = sim.create(netParams, simConfig, output=True) try: if sim.rank==0: if os.path.exists('temp'): for f in os.listdir('temp'): os.unlink('temp/{}'.format(f)) else: os.mkdir('temp') sim.intervalSimulate(interval) except Exception as e: print(e) return sim.pc.barrier() sim.analyze() if output: return (pops, cells, conns, stims, simData)
[ "def", "intervalCreateSimulateAnalyze", "(", "netParams", "=", "None", ",", "simConfig", "=", "None", ",", "output", "=", "False", ",", "interval", "=", "None", ")", ":", "import", "os", "from", ".", ".", "import", "sim", "(", "pops", ",", "cells", ",", "conns", ",", "stims", ",", "rxd", ",", "simData", ")", "=", "sim", ".", "create", "(", "netParams", ",", "simConfig", ",", "output", "=", "True", ")", "try", ":", "if", "sim", ".", "rank", "==", "0", ":", "if", "os", ".", "path", ".", "exists", "(", "'temp'", ")", ":", "for", "f", "in", "os", ".", "listdir", "(", "'temp'", ")", ":", "os", ".", "unlink", "(", "'temp/{}'", ".", "format", "(", "f", ")", ")", "else", ":", "os", ".", "mkdir", "(", "'temp'", ")", "sim", ".", "intervalSimulate", "(", "interval", ")", "except", "Exception", "as", "e", ":", "print", "(", "e", ")", "return", "sim", ".", "pc", ".", "barrier", "(", ")", "sim", ".", "analyze", "(", ")", "if", "output", ":", "return", "(", "pops", ",", "cells", ",", "conns", ",", "stims", ",", "simData", ")" ]
36.526316
21.368421
def rs_correct_msg_nofsynd(msg_in, nsym, fcr=0, generator=2, erase_pos=None, only_erasures=False): '''Reed-Solomon main decoding function, without using the modified Forney syndromes''' global field_charac if len(msg_in) > field_charac: raise ValueError("Message is too long (%i when max is %i)" % (len(msg_in), field_charac)) msg_out = bytearray(msg_in) # copy of message # erasures: set them to null bytes for easier decoding (but this is not necessary, they will be corrected anyway, but debugging will be easier with null bytes because the error locator polynomial values will only depend on the errors locations, not their values) if erase_pos is None: erase_pos = [] else: for e_pos in erase_pos: msg_out[e_pos] = 0 # check if there are too many erasures if len(erase_pos) > nsym: raise ReedSolomonError("Too many erasures to correct") # prepare the syndrome polynomial using only errors (ie: errors = characters that were either replaced by null byte or changed to another character, but we don't know their positions) synd = rs_calc_syndromes(msg_out, nsym, fcr, generator) # check if there's any error/erasure in the input codeword. If not (all syndromes coefficients are 0), then just return the codeword as-is. if max(synd) == 0: return msg_out[:-nsym], msg_out[-nsym:] # no errors # prepare erasures locator and evaluator polynomials erase_loc = None #erase_eval = None erase_count = 0 if erase_pos: erase_count = len(erase_pos) erase_pos_reversed = [len(msg_out)-1-eras for eras in erase_pos] erase_loc = rs_find_errata_locator(erase_pos_reversed, generator=generator) #erase_eval = rs_find_error_evaluator(synd[::-1], erase_loc, len(erase_loc)-1) # prepare errors/errata locator polynomial if only_erasures: err_loc = erase_loc[::-1] #err_eval = erase_eval[::-1] else: err_loc = rs_find_error_locator(synd, nsym, erase_loc=erase_loc, erase_count=erase_count) err_loc = err_loc[::-1] #err_eval = rs_find_error_evaluator(synd[::-1], err_loc[::-1], len(err_loc)-1)[::-1] # find error/errata evaluator polynomial (not really necessary since we already compute it at the same time as the error locator poly in BM) # locate the message errors err_pos = rs_find_errors(err_loc, len(msg_out), generator) # find the roots of the errata locator polynomial (ie: the positions of the errors/errata) if err_pos is None: raise ReedSolomonError("Could not locate error") # compute errata evaluator and errata magnitude polynomials, then correct errors and erasures msg_out = rs_correct_errata(msg_out, synd, err_pos, fcr=fcr, generator=generator) # check if the final message is fully repaired synd = rs_calc_syndromes(msg_out, nsym, fcr, generator) if max(synd) > 0: raise ReedSolomonError("Could not correct message") # return the successfully decoded message return msg_out[:-nsym], msg_out[-nsym:]
[ "def", "rs_correct_msg_nofsynd", "(", "msg_in", ",", "nsym", ",", "fcr", "=", "0", ",", "generator", "=", "2", ",", "erase_pos", "=", "None", ",", "only_erasures", "=", "False", ")", ":", "global", "field_charac", "if", "len", "(", "msg_in", ")", ">", "field_charac", ":", "raise", "ValueError", "(", "\"Message is too long (%i when max is %i)\"", "%", "(", "len", "(", "msg_in", ")", ",", "field_charac", ")", ")", "msg_out", "=", "bytearray", "(", "msg_in", ")", "# copy of message", "# erasures: set them to null bytes for easier decoding (but this is not necessary, they will be corrected anyway, but debugging will be easier with null bytes because the error locator polynomial values will only depend on the errors locations, not their values)", "if", "erase_pos", "is", "None", ":", "erase_pos", "=", "[", "]", "else", ":", "for", "e_pos", "in", "erase_pos", ":", "msg_out", "[", "e_pos", "]", "=", "0", "# check if there are too many erasures", "if", "len", "(", "erase_pos", ")", ">", "nsym", ":", "raise", "ReedSolomonError", "(", "\"Too many erasures to correct\"", ")", "# prepare the syndrome polynomial using only errors (ie: errors = characters that were either replaced by null byte or changed to another character, but we don't know their positions)", "synd", "=", "rs_calc_syndromes", "(", "msg_out", ",", "nsym", ",", "fcr", ",", "generator", ")", "# check if there's any error/erasure in the input codeword. If not (all syndromes coefficients are 0), then just return the codeword as-is.", "if", "max", "(", "synd", ")", "==", "0", ":", "return", "msg_out", "[", ":", "-", "nsym", "]", ",", "msg_out", "[", "-", "nsym", ":", "]", "# no errors", "# prepare erasures locator and evaluator polynomials", "erase_loc", "=", "None", "#erase_eval = None", "erase_count", "=", "0", "if", "erase_pos", ":", "erase_count", "=", "len", "(", "erase_pos", ")", "erase_pos_reversed", "=", "[", "len", "(", "msg_out", ")", "-", "1", "-", "eras", "for", "eras", "in", "erase_pos", "]", "erase_loc", "=", "rs_find_errata_locator", "(", "erase_pos_reversed", ",", "generator", "=", "generator", ")", "#erase_eval = rs_find_error_evaluator(synd[::-1], erase_loc, len(erase_loc)-1)", "# prepare errors/errata locator polynomial", "if", "only_erasures", ":", "err_loc", "=", "erase_loc", "[", ":", ":", "-", "1", "]", "#err_eval = erase_eval[::-1]", "else", ":", "err_loc", "=", "rs_find_error_locator", "(", "synd", ",", "nsym", ",", "erase_loc", "=", "erase_loc", ",", "erase_count", "=", "erase_count", ")", "err_loc", "=", "err_loc", "[", ":", ":", "-", "1", "]", "#err_eval = rs_find_error_evaluator(synd[::-1], err_loc[::-1], len(err_loc)-1)[::-1] # find error/errata evaluator polynomial (not really necessary since we already compute it at the same time as the error locator poly in BM)", "# locate the message errors", "err_pos", "=", "rs_find_errors", "(", "err_loc", ",", "len", "(", "msg_out", ")", ",", "generator", ")", "# find the roots of the errata locator polynomial (ie: the positions of the errors/errata)", "if", "err_pos", "is", "None", ":", "raise", "ReedSolomonError", "(", "\"Could not locate error\"", ")", "# compute errata evaluator and errata magnitude polynomials, then correct errors and erasures", "msg_out", "=", "rs_correct_errata", "(", "msg_out", ",", "synd", ",", "err_pos", ",", "fcr", "=", "fcr", ",", "generator", "=", "generator", ")", "# check if the final message is fully repaired", "synd", "=", "rs_calc_syndromes", "(", "msg_out", ",", "nsym", ",", "fcr", ",", "generator", ")", "if", "max", "(", "synd", ")", ">", "0", ":", "raise", "ReedSolomonError", "(", "\"Could not correct message\"", ")", "# return the successfully decoded message", "return", "msg_out", "[", ":", "-", "nsym", "]", ",", "msg_out", "[", "-", "nsym", ":", "]" ]
56.867925
36.679245
def tdSensor(self): """Get the next sensor while iterating. :return: a dict with the keys: protocol, model, id, datatypes. """ protocol = create_string_buffer(20) model = create_string_buffer(20) sid = c_int() datatypes = c_int() self._lib.tdSensor(protocol, sizeof(protocol), model, sizeof(model), byref(sid), byref(datatypes)) return {'protocol': self._to_str(protocol), 'model': self._to_str(model), 'id': sid.value, 'datatypes': datatypes.value}
[ "def", "tdSensor", "(", "self", ")", ":", "protocol", "=", "create_string_buffer", "(", "20", ")", "model", "=", "create_string_buffer", "(", "20", ")", "sid", "=", "c_int", "(", ")", "datatypes", "=", "c_int", "(", ")", "self", ".", "_lib", ".", "tdSensor", "(", "protocol", ",", "sizeof", "(", "protocol", ")", ",", "model", ",", "sizeof", "(", "model", ")", ",", "byref", "(", "sid", ")", ",", "byref", "(", "datatypes", ")", ")", "return", "{", "'protocol'", ":", "self", ".", "_to_str", "(", "protocol", ")", ",", "'model'", ":", "self", ".", "_to_str", "(", "model", ")", ",", "'id'", ":", "sid", ".", "value", ",", "'datatypes'", ":", "datatypes", ".", "value", "}" ]
37.866667
17.066667
def stress(syllabified_simplex_word): '''Assign primary and secondary stress to 'syllabified_simplex_word'.''' syllables = syllabified_simplex_word.split('.') stressed = '\'' + syllables[0] # primary stress try: n = 0 medial = syllables[1:-1] for i, syll in enumerate(medial): if (i + n) % 2 == 0: stressed += '.' + syll else: try: if is_light(syll) and is_heavy(medial[i + 1]): stressed += '.' + syll n += 1 continue except IndexError: pass # secondary stress stressed += '.`' + syll except IndexError: pass if len(syllables) > 1: stressed += '.' + syllables[-1] return stressed
[ "def", "stress", "(", "syllabified_simplex_word", ")", ":", "syllables", "=", "syllabified_simplex_word", ".", "split", "(", "'.'", ")", "stressed", "=", "'\\''", "+", "syllables", "[", "0", "]", "# primary stress", "try", ":", "n", "=", "0", "medial", "=", "syllables", "[", "1", ":", "-", "1", "]", "for", "i", ",", "syll", "in", "enumerate", "(", "medial", ")", ":", "if", "(", "i", "+", "n", ")", "%", "2", "==", "0", ":", "stressed", "+=", "'.'", "+", "syll", "else", ":", "try", ":", "if", "is_light", "(", "syll", ")", "and", "is_heavy", "(", "medial", "[", "i", "+", "1", "]", ")", ":", "stressed", "+=", "'.'", "+", "syll", "n", "+=", "1", "continue", "except", "IndexError", ":", "pass", "# secondary stress", "stressed", "+=", "'.`'", "+", "syll", "except", "IndexError", ":", "pass", "if", "len", "(", "syllables", ")", ">", "1", ":", "stressed", "+=", "'.'", "+", "syllables", "[", "-", "1", "]", "return", "stressed" ]
24.705882
20.705882
def bit_by_bit(self, in_data): """ Classic simple and slow CRC implementation. This function iterates bit by bit over the augmented input message and returns the calculated CRC value at the end. """ # If the input data is a string, convert to bytes. if isinstance(in_data, str): in_data = [ord(c) for c in in_data] register = self.NonDirectInit for octet in in_data: if self.ReflectIn: octet = self.reflect(octet, 8) for i in range(8): topbit = register & self.MSB_Mask register = ((register << 1) & self.Mask) | ((octet >> (7 - i)) & 0x01) if topbit: register ^= self.Poly for i in range(self.Width): topbit = register & self.MSB_Mask register = ((register << 1) & self.Mask) if topbit: register ^= self.Poly if self.ReflectOut: register = self.reflect(register, self.Width) return register ^ self.XorOut
[ "def", "bit_by_bit", "(", "self", ",", "in_data", ")", ":", "# If the input data is a string, convert to bytes.", "if", "isinstance", "(", "in_data", ",", "str", ")", ":", "in_data", "=", "[", "ord", "(", "c", ")", "for", "c", "in", "in_data", "]", "register", "=", "self", ".", "NonDirectInit", "for", "octet", "in", "in_data", ":", "if", "self", ".", "ReflectIn", ":", "octet", "=", "self", ".", "reflect", "(", "octet", ",", "8", ")", "for", "i", "in", "range", "(", "8", ")", ":", "topbit", "=", "register", "&", "self", ".", "MSB_Mask", "register", "=", "(", "(", "register", "<<", "1", ")", "&", "self", ".", "Mask", ")", "|", "(", "(", "octet", ">>", "(", "7", "-", "i", ")", ")", "&", "0x01", ")", "if", "topbit", ":", "register", "^=", "self", ".", "Poly", "for", "i", "in", "range", "(", "self", ".", "Width", ")", ":", "topbit", "=", "register", "&", "self", ".", "MSB_Mask", "register", "=", "(", "(", "register", "<<", "1", ")", "&", "self", ".", "Mask", ")", "if", "topbit", ":", "register", "^=", "self", ".", "Poly", "if", "self", ".", "ReflectOut", ":", "register", "=", "self", ".", "reflect", "(", "register", ",", "self", ".", "Width", ")", "return", "register", "^", "self", ".", "XorOut" ]
36.586207
15.068966
def get_impls(interfaces): """Get impls from their interfaces.""" if interfaces is None: return None elif isinstance(interfaces, Mapping): return {name: interfaces[name]._impl for name in interfaces} elif isinstance(interfaces, Sequence): return [interfaces._impl for interfaces in interfaces] else: return interfaces._impl
[ "def", "get_impls", "(", "interfaces", ")", ":", "if", "interfaces", "is", "None", ":", "return", "None", "elif", "isinstance", "(", "interfaces", ",", "Mapping", ")", ":", "return", "{", "name", ":", "interfaces", "[", "name", "]", ".", "_impl", "for", "name", "in", "interfaces", "}", "elif", "isinstance", "(", "interfaces", ",", "Sequence", ")", ":", "return", "[", "interfaces", ".", "_impl", "for", "interfaces", "in", "interfaces", "]", "else", ":", "return", "interfaces", ".", "_impl" ]
28.153846
20.153846
def refill(self, from_address, to_address, nfees, ntokens, password, min_confirmations=6, sync=False): """ Refill wallets with the necessary fuel to perform spool transactions Args: from_address (Tuple[str]): Federation wallet address. Fuels the wallets with tokens and fees. All transactions to wallets holding a particular piece should come from the Federation wallet to_address (str): Wallet address that needs to perform a spool transaction nfees (int): Number of fees to transfer. Each fee is 10000 satoshi. Used to pay for the transactions ntokens (int): Number of tokens to transfer. Each token is 600 satoshi. Used to register hashes in the blockchain password (str): Password for the Federation wallet. Used to sign the transaction min_confirmations (int): Number of confirmations when chosing the inputs of the transaction. Defaults to 6 sync (bool): Perform the transaction in synchronous mode, the call to the function will block until there is at least on confirmation on the blockchain. Defaults to False Returns: str: transaction id """ path, from_address = from_address verb = Spoolverb() # nfees + 1: nfees to refill plus one fee for the refill transaction itself inputs = self.select_inputs(from_address, nfees + 1, ntokens, min_confirmations=min_confirmations) outputs = [{'address': to_address, 'value': self.token}] * ntokens outputs += [{'address': to_address, 'value': self.fee}] * nfees outputs += [{'script': self._t._op_return_hex(verb.fuel), 'value': 0}] unsigned_tx = self._t.build_transaction(inputs, outputs) signed_tx = self._t.sign_transaction(unsigned_tx, password, path=path) txid = self._t.push(signed_tx) return txid
[ "def", "refill", "(", "self", ",", "from_address", ",", "to_address", ",", "nfees", ",", "ntokens", ",", "password", ",", "min_confirmations", "=", "6", ",", "sync", "=", "False", ")", ":", "path", ",", "from_address", "=", "from_address", "verb", "=", "Spoolverb", "(", ")", "# nfees + 1: nfees to refill plus one fee for the refill transaction itself", "inputs", "=", "self", ".", "select_inputs", "(", "from_address", ",", "nfees", "+", "1", ",", "ntokens", ",", "min_confirmations", "=", "min_confirmations", ")", "outputs", "=", "[", "{", "'address'", ":", "to_address", ",", "'value'", ":", "self", ".", "token", "}", "]", "*", "ntokens", "outputs", "+=", "[", "{", "'address'", ":", "to_address", ",", "'value'", ":", "self", ".", "fee", "}", "]", "*", "nfees", "outputs", "+=", "[", "{", "'script'", ":", "self", ".", "_t", ".", "_op_return_hex", "(", "verb", ".", "fuel", ")", ",", "'value'", ":", "0", "}", "]", "unsigned_tx", "=", "self", ".", "_t", ".", "build_transaction", "(", "inputs", ",", "outputs", ")", "signed_tx", "=", "self", ".", "_t", ".", "sign_transaction", "(", "unsigned_tx", ",", "password", ",", "path", "=", "path", ")", "txid", "=", "self", ".", "_t", ".", "push", "(", "signed_tx", ")", "return", "txid" ]
62.733333
39.133333
def get_index(self, value): """ Return the index (or indices) of the given value (or values) in `state_values`. Parameters ---------- value Value(s) to get the index (indices) for. Returns ------- idx : int or ndarray(int) Index of `value` if `value` is a single state value; array of indices if `value` is an array_like of state values. """ if self.state_values is None: state_values_ndim = 1 else: state_values_ndim = self.state_values.ndim values = np.asarray(value) if values.ndim <= state_values_ndim - 1: return self._get_index(value) elif values.ndim == state_values_ndim: # array of values k = values.shape[0] idx = np.empty(k, dtype=int) for i in range(k): idx[i] = self._get_index(values[i]) return idx else: raise ValueError('invalid value')
[ "def", "get_index", "(", "self", ",", "value", ")", ":", "if", "self", ".", "state_values", "is", "None", ":", "state_values_ndim", "=", "1", "else", ":", "state_values_ndim", "=", "self", ".", "state_values", ".", "ndim", "values", "=", "np", ".", "asarray", "(", "value", ")", "if", "values", ".", "ndim", "<=", "state_values_ndim", "-", "1", ":", "return", "self", ".", "_get_index", "(", "value", ")", "elif", "values", ".", "ndim", "==", "state_values_ndim", ":", "# array of values", "k", "=", "values", ".", "shape", "[", "0", "]", "idx", "=", "np", ".", "empty", "(", "k", ",", "dtype", "=", "int", ")", "for", "i", "in", "range", "(", "k", ")", ":", "idx", "[", "i", "]", "=", "self", ".", "_get_index", "(", "values", "[", "i", "]", ")", "return", "idx", "else", ":", "raise", "ValueError", "(", "'invalid value'", ")" ]
29.441176
18.5
def replace_random_tokens_bow(self, n_samples, # type: int replacement='', # type: str random_state=None, min_replace=1, # type: Union[int, float] max_replace=1.0, # type: Union[int, float] ): # type: (...) -> List[Tuple[str, int, np.ndarray]] """ Return a list of ``(text, replaced_words_count, mask)`` tuples with n_samples versions of text with some words replaced. If a word is replaced, all duplicate words are also replaced from the text. By default words are replaced with '', i.e. removed. """ if not self.vocab: nomask = np.array([], dtype=int) return [('', 0, nomask)] * n_samples min_replace, max_replace = self._get_min_max(min_replace, max_replace, len(self.vocab)) rng = check_random_state(random_state) replace_sizes = rng.randint(low=min_replace, high=max_replace + 1, size=n_samples) res = [] for num_to_replace in replace_sizes: tokens_to_replace = set(rng.choice(self.vocab, num_to_replace, replace=False)) idx_to_replace = [idx for idx, token in enumerate(self.tokens) if token in tokens_to_replace] mask = indices_to_bool_mask(idx_to_replace, len(self.tokens)) s = self.split.masked(idx_to_replace, replacement) res.append((s.text, num_to_replace, mask)) return res
[ "def", "replace_random_tokens_bow", "(", "self", ",", "n_samples", ",", "# type: int", "replacement", "=", "''", ",", "# type: str", "random_state", "=", "None", ",", "min_replace", "=", "1", ",", "# type: Union[int, float]", "max_replace", "=", "1.0", ",", "# type: Union[int, float]", ")", ":", "# type: (...) -> List[Tuple[str, int, np.ndarray]]", "if", "not", "self", ".", "vocab", ":", "nomask", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "int", ")", "return", "[", "(", "''", ",", "0", ",", "nomask", ")", "]", "*", "n_samples", "min_replace", ",", "max_replace", "=", "self", ".", "_get_min_max", "(", "min_replace", ",", "max_replace", ",", "len", "(", "self", ".", "vocab", ")", ")", "rng", "=", "check_random_state", "(", "random_state", ")", "replace_sizes", "=", "rng", ".", "randint", "(", "low", "=", "min_replace", ",", "high", "=", "max_replace", "+", "1", ",", "size", "=", "n_samples", ")", "res", "=", "[", "]", "for", "num_to_replace", "in", "replace_sizes", ":", "tokens_to_replace", "=", "set", "(", "rng", ".", "choice", "(", "self", ".", "vocab", ",", "num_to_replace", ",", "replace", "=", "False", ")", ")", "idx_to_replace", "=", "[", "idx", "for", "idx", ",", "token", "in", "enumerate", "(", "self", ".", "tokens", ")", "if", "token", "in", "tokens_to_replace", "]", "mask", "=", "indices_to_bool_mask", "(", "idx_to_replace", ",", "len", "(", "self", ".", "tokens", ")", ")", "s", "=", "self", ".", "split", ".", "masked", "(", "idx_to_replace", ",", "replacement", ")", "res", ".", "append", "(", "(", "s", ".", "text", ",", "num_to_replace", ",", "mask", ")", ")", "return", "res" ]
52.242424
20.606061
def r_large(self, x, r0): """ Approximate trajectory function for large (:math:`r_0 > \\sigma_r`) oscillations. """ return r0*_np.cos(x*self.omega_big(r0))
[ "def", "r_large", "(", "self", ",", "x", ",", "r0", ")", ":", "return", "r0", "*", "_np", ".", "cos", "(", "x", "*", "self", ".", "omega_big", "(", "r0", ")", ")" ]
36.6
14.2
def reset(self): """Remove all the information from previous dataset before loading a new dataset. """ # store current dataset max_dataset_history = self.value('max_dataset_history') keep_recent_datasets(max_dataset_history, self.info) # reset all the widgets self.labels.reset() self.channels.reset() self.info.reset() self.notes.reset() self.overview.reset() self.spectrum.reset() self.traces.reset()
[ "def", "reset", "(", "self", ")", ":", "# store current dataset", "max_dataset_history", "=", "self", ".", "value", "(", "'max_dataset_history'", ")", "keep_recent_datasets", "(", "max_dataset_history", ",", "self", ".", "info", ")", "# reset all the widgets", "self", ".", "labels", ".", "reset", "(", ")", "self", ".", "channels", ".", "reset", "(", ")", "self", ".", "info", ".", "reset", "(", ")", "self", ".", "notes", ".", "reset", "(", ")", "self", ".", "overview", ".", "reset", "(", ")", "self", ".", "spectrum", ".", "reset", "(", ")", "self", ".", "traces", ".", "reset", "(", ")" ]
29.411765
16.058824
def list_user_participants(self, appointment_group, **kwargs): """ List user participants in this appointment group. .. warning:: .. deprecated:: 0.10.0 Use :func:`canvasapi. canvas.Canvas.get_user_participants` instead. :calls: `GET /api/v1/appointment_groups/:id/users \ <https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.users>`_ :param appointment_group: The object or ID of the appointment group. :type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.user.User` """ warnings.warn( "`list_user_participants` is being deprecated and will be removed in a future version." " Use `get_user_participants` instead", DeprecationWarning ) return self.get_user_participants(appointment_group, **kwargs)
[ "def", "list_user_participants", "(", "self", ",", "appointment_group", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"`list_user_participants` is being deprecated and will be removed in a future version.\"", "\" Use `get_user_participants` instead\"", ",", "DeprecationWarning", ")", "return", "self", ".", "get_user_participants", "(", "appointment_group", ",", "*", "*", "kwargs", ")" ]
42.956522
30.26087
def create_menu(self, menu_data): """ 创建自定义菜单 :: # -*- coding: utf-8 -*- wechat = WechatBasic(appid='appid', appsecret='appsecret') wechat.create_menu({ 'button':[ { 'type': 'click', 'name': '今日歌曲', 'key': 'V1001_TODAY_MUSIC' }, { 'type': 'click', 'name': '歌手简介', 'key': 'V1001_TODAY_SINGER' }, { 'name': '菜单', 'sub_button': [ { 'type': 'view', 'name': '搜索', 'url': 'http://www.soso.com/' }, { 'type': 'view', 'name': '视频', 'url': 'http://v.qq.com/' }, { 'type': 'click', 'name': '赞一下我们', 'key': 'V1001_GOOD' } ] } ]}) 详情请参考 http://mp.weixin.qq.com/wiki/13/43de8269be54a0a6f64413e4dfa94f39.html :param menu_data: Python 字典 :return: 返回的 JSON 数据包 """ menu_data = self._transcoding_dict(menu_data) return self.request.post( url='https://api.weixin.qq.com/cgi-bin/menu/create', data=menu_data )
[ "def", "create_menu", "(", "self", ",", "menu_data", ")", ":", "menu_data", "=", "self", ".", "_transcoding_dict", "(", "menu_data", ")", "return", "self", ".", "request", ".", "post", "(", "url", "=", "'https://api.weixin.qq.com/cgi-bin/menu/create'", ",", "data", "=", "menu_data", ")" ]
34.693878
13.102041
def _get_dbid2goids(associations): """Return gene2go data for user-specified taxids.""" id2gos = cx.defaultdict(set) for ntd in associations: id2gos[ntd.DB_ID].add(ntd.GO_ID) return dict(id2gos)
[ "def", "_get_dbid2goids", "(", "associations", ")", ":", "id2gos", "=", "cx", ".", "defaultdict", "(", "set", ")", "for", "ntd", "in", "associations", ":", "id2gos", "[", "ntd", ".", "DB_ID", "]", ".", "add", "(", "ntd", ".", "GO_ID", ")", "return", "dict", "(", "id2gos", ")" ]
38.833333
5.833333
def parse_pair_results(data, sample, res): """ parse results from cutadapt for paired data""" LOGGER.info("in parse pair mod results\n%s", res) ## set default values sample.stats_dfs.s2["trim_adapter_bp_read1"] = 0 sample.stats_dfs.s2["trim_adapter_bp_read2"] = 0 sample.stats_dfs.s2["trim_quality_bp_read1"] = 0 sample.stats_dfs.s2["trim_quality_bp_read2"] = 0 sample.stats_dfs.s2["reads_filtered_by_Ns"] = 0 sample.stats_dfs.s2["reads_filtered_by_minlen"] = 0 sample.stats_dfs.s2["reads_passed_filter"] = 0 lines = res.strip().split("\n") qprimed = 0 for line in lines: ## set primer to catch next line if "Quality-trimmed" in line: qprimed = 1 ## grab read1 and read2 lines when qprimed if "Read 1:" in line: if qprimed: value = int(line.split()[2].replace(",", "")) sample.stats_dfs.s2["trim_quality_bp_read1"] = value if "Read 2:" in line: if qprimed: value = int(line.split()[2].replace(",", "")) sample.stats_dfs.s2["trim_quality_bp_read2"] = value qprimed = 0 if "Read 1 with adapter:" in line: value = int(line.split()[4].replace(",", "")) sample.stats_dfs.s2["trim_adapter_bp_read1"] = value if "Read 2 with adapter:" in line: value = int(line.split()[4].replace(",", "")) sample.stats_dfs.s2["trim_adapter_bp_read2"] = value if "Total read pairs processed:" in line: value = int(line.split()[4].replace(",", "")) sample.stats_dfs.s2["reads_raw"] = value if "Pairs that were too short" in line: value = int(line.split()[5].replace(",", "")) sample.stats_dfs.s2["reads_filtered_by_minlen"] = value if "Pairs with too many N" in line: value = int(line.split()[5].replace(",", "")) sample.stats_dfs.s2["reads_filtered_by_Ns"] = value if "Pairs written (passing filters):" in line: value = int(line.split()[4].replace(",", "")) sample.stats_dfs.s2["reads_passed_filter"] = value ## save to stats summary if sample.stats_dfs.s2.reads_passed_filter: sample.stats.state = 2 sample.stats.reads_passed_filter = sample.stats_dfs.s2.reads_passed_filter sample.files.edits = [( OPJ(data.dirs.edits, sample.name+".trimmed_R1_.fastq.gz"), OPJ(data.dirs.edits, sample.name+".trimmed_R2_.fastq.gz") )] else: print("No reads passed filtering in Sample: {}".format(sample.name))
[ "def", "parse_pair_results", "(", "data", ",", "sample", ",", "res", ")", ":", "LOGGER", ".", "info", "(", "\"in parse pair mod results\\n%s\"", ",", "res", ")", "## set default values", "sample", ".", "stats_dfs", ".", "s2", "[", "\"trim_adapter_bp_read1\"", "]", "=", "0", "sample", ".", "stats_dfs", ".", "s2", "[", "\"trim_adapter_bp_read2\"", "]", "=", "0", "sample", ".", "stats_dfs", ".", "s2", "[", "\"trim_quality_bp_read1\"", "]", "=", "0", "sample", ".", "stats_dfs", ".", "s2", "[", "\"trim_quality_bp_read2\"", "]", "=", "0", "sample", ".", "stats_dfs", ".", "s2", "[", "\"reads_filtered_by_Ns\"", "]", "=", "0", "sample", ".", "stats_dfs", ".", "s2", "[", "\"reads_filtered_by_minlen\"", "]", "=", "0", "sample", ".", "stats_dfs", ".", "s2", "[", "\"reads_passed_filter\"", "]", "=", "0", "lines", "=", "res", ".", "strip", "(", ")", ".", "split", "(", "\"\\n\"", ")", "qprimed", "=", "0", "for", "line", "in", "lines", ":", "## set primer to catch next line", "if", "\"Quality-trimmed\"", "in", "line", ":", "qprimed", "=", "1", "## grab read1 and read2 lines when qprimed", "if", "\"Read 1:\"", "in", "line", ":", "if", "qprimed", ":", "value", "=", "int", "(", "line", ".", "split", "(", ")", "[", "2", "]", ".", "replace", "(", "\",\"", ",", "\"\"", ")", ")", "sample", ".", "stats_dfs", ".", "s2", "[", "\"trim_quality_bp_read1\"", "]", "=", "value", "if", "\"Read 2:\"", "in", "line", ":", "if", "qprimed", ":", "value", "=", "int", "(", "line", ".", "split", "(", ")", "[", "2", "]", ".", "replace", "(", "\",\"", ",", "\"\"", ")", ")", "sample", ".", "stats_dfs", ".", "s2", "[", "\"trim_quality_bp_read2\"", "]", "=", "value", "qprimed", "=", "0", "if", "\"Read 1 with adapter:\"", "in", "line", ":", "value", "=", "int", "(", "line", ".", "split", "(", ")", "[", "4", "]", ".", "replace", "(", "\",\"", ",", "\"\"", ")", ")", "sample", ".", "stats_dfs", ".", "s2", "[", "\"trim_adapter_bp_read1\"", "]", "=", "value", "if", "\"Read 2 with adapter:\"", "in", "line", ":", "value", "=", "int", "(", "line", ".", "split", "(", ")", "[", "4", "]", ".", "replace", "(", "\",\"", ",", "\"\"", ")", ")", "sample", ".", "stats_dfs", ".", "s2", "[", "\"trim_adapter_bp_read2\"", "]", "=", "value", "if", "\"Total read pairs processed:\"", "in", "line", ":", "value", "=", "int", "(", "line", ".", "split", "(", ")", "[", "4", "]", ".", "replace", "(", "\",\"", ",", "\"\"", ")", ")", "sample", ".", "stats_dfs", ".", "s2", "[", "\"reads_raw\"", "]", "=", "value", "if", "\"Pairs that were too short\"", "in", "line", ":", "value", "=", "int", "(", "line", ".", "split", "(", ")", "[", "5", "]", ".", "replace", "(", "\",\"", ",", "\"\"", ")", ")", "sample", ".", "stats_dfs", ".", "s2", "[", "\"reads_filtered_by_minlen\"", "]", "=", "value", "if", "\"Pairs with too many N\"", "in", "line", ":", "value", "=", "int", "(", "line", ".", "split", "(", ")", "[", "5", "]", ".", "replace", "(", "\",\"", ",", "\"\"", ")", ")", "sample", ".", "stats_dfs", ".", "s2", "[", "\"reads_filtered_by_Ns\"", "]", "=", "value", "if", "\"Pairs written (passing filters):\"", "in", "line", ":", "value", "=", "int", "(", "line", ".", "split", "(", ")", "[", "4", "]", ".", "replace", "(", "\",\"", ",", "\"\"", ")", ")", "sample", ".", "stats_dfs", ".", "s2", "[", "\"reads_passed_filter\"", "]", "=", "value", "## save to stats summary", "if", "sample", ".", "stats_dfs", ".", "s2", ".", "reads_passed_filter", ":", "sample", ".", "stats", ".", "state", "=", "2", "sample", ".", "stats", ".", "reads_passed_filter", "=", "sample", ".", "stats_dfs", ".", "s2", ".", "reads_passed_filter", "sample", ".", "files", ".", "edits", "=", "[", "(", "OPJ", "(", "data", ".", "dirs", ".", "edits", ",", "sample", ".", "name", "+", "\".trimmed_R1_.fastq.gz\"", ")", ",", "OPJ", "(", "data", ".", "dirs", ".", "edits", ",", "sample", ".", "name", "+", "\".trimmed_R2_.fastq.gz\"", ")", ")", "]", "else", ":", "print", "(", "\"No reads passed filtering in Sample: {}\"", ".", "format", "(", "sample", ".", "name", ")", ")" ]
39.560606
19.924242
def configure_sbi(self, sbi_config: dict, schema_path: str = None): """Add a new SBI to the database associated with this subarray. Args: sbi_config (dict): SBI configuration. schema_path (str, optional): Path to the SBI config schema. """ if not self.active: raise RuntimeError("Unable to add SBIs to inactive subarray!") sbi_config['subarray_id'] = self._id sbi = SchedulingBlockInstance.from_config(sbi_config, schema_path) self._add_sbi_id(sbi_config['id']) return sbi
[ "def", "configure_sbi", "(", "self", ",", "sbi_config", ":", "dict", ",", "schema_path", ":", "str", "=", "None", ")", ":", "if", "not", "self", ".", "active", ":", "raise", "RuntimeError", "(", "\"Unable to add SBIs to inactive subarray!\"", ")", "sbi_config", "[", "'subarray_id'", "]", "=", "self", ".", "_id", "sbi", "=", "SchedulingBlockInstance", ".", "from_config", "(", "sbi_config", ",", "schema_path", ")", "self", ".", "_add_sbi_id", "(", "sbi_config", "[", "'id'", "]", ")", "return", "sbi" ]
40.071429
20.214286
def _export_table(dataset, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=True, sort=None, ascending=True): """ :param DatasetLocal dataset: dataset to export :param str path: path for file :param lis[str] column_names: list of column names to export or None for all columns :param str byteorder: = for native, < for little endian and > for big endian :param bool shuffle: export rows in random order :param bool selection: export selection or not :param progress: progress callback that gets a progress fraction as argument and should return True to continue, or a default progress bar when progress=True :param: bool virtual: When True, export virtual columns :return: """ column_names = column_names or dataset.get_column_names(virtual=virtual, strings=True) for name in column_names: if name not in dataset.columns: warnings.warn('Exporting to arrow with virtual columns is not efficient') N = len(dataset) if not selection else dataset.selected_length(selection) if N == 0: raise ValueError("Cannot export empty table") if shuffle and sort: raise ValueError("Cannot shuffle and sort at the same time") if shuffle: random_index_column = "random_index" while random_index_column in dataset.get_column_names(): random_index_column += "_new" partial_shuffle = shuffle and len(dataset) != N order_array = None if partial_shuffle: # if we only export a portion, we need to create the full length random_index array, and shuffle_array_full = np.random.choice(len(dataset), len(dataset), replace=False) # then take a section of it # shuffle_array[:] = shuffle_array_full[:N] shuffle_array = shuffle_array_full[shuffle_array_full < N] del shuffle_array_full order_array = shuffle_array elif shuffle: shuffle_array = np.random.choice(N, N, replace=False) order_array = shuffle_array if sort: if selection: raise ValueError("sorting selections not yet supported") logger.info("sorting...") indices = np.argsort(dataset.evaluate(sort)) order_array = indices if ascending else indices[::-1] logger.info("sorting done") if selection: full_mask = dataset.evaluate_selection_mask(selection) else: full_mask = None arrow_arrays = [] for column_name in column_names: mask = full_mask if selection: values = dataset.evaluate(column_name, filtered=False) values = values[mask] else: values = dataset.evaluate(column_name) if shuffle or sort: indices = order_array values = values[indices] arrow_arrays.append(arrow_array_from_numpy_array(values)) if shuffle: arrow_arrays.append(arrow_array_from_numpy_array(order_array)) column_names = column_names + [random_index_column] table = pa.Table.from_arrays(arrow_arrays, column_names) return table
[ "def", "_export_table", "(", "dataset", ",", "column_names", "=", "None", ",", "byteorder", "=", "\"=\"", ",", "shuffle", "=", "False", ",", "selection", "=", "False", ",", "progress", "=", "None", ",", "virtual", "=", "True", ",", "sort", "=", "None", ",", "ascending", "=", "True", ")", ":", "column_names", "=", "column_names", "or", "dataset", ".", "get_column_names", "(", "virtual", "=", "virtual", ",", "strings", "=", "True", ")", "for", "name", "in", "column_names", ":", "if", "name", "not", "in", "dataset", ".", "columns", ":", "warnings", ".", "warn", "(", "'Exporting to arrow with virtual columns is not efficient'", ")", "N", "=", "len", "(", "dataset", ")", "if", "not", "selection", "else", "dataset", ".", "selected_length", "(", "selection", ")", "if", "N", "==", "0", ":", "raise", "ValueError", "(", "\"Cannot export empty table\"", ")", "if", "shuffle", "and", "sort", ":", "raise", "ValueError", "(", "\"Cannot shuffle and sort at the same time\"", ")", "if", "shuffle", ":", "random_index_column", "=", "\"random_index\"", "while", "random_index_column", "in", "dataset", ".", "get_column_names", "(", ")", ":", "random_index_column", "+=", "\"_new\"", "partial_shuffle", "=", "shuffle", "and", "len", "(", "dataset", ")", "!=", "N", "order_array", "=", "None", "if", "partial_shuffle", ":", "# if we only export a portion, we need to create the full length random_index array, and", "shuffle_array_full", "=", "np", ".", "random", ".", "choice", "(", "len", "(", "dataset", ")", ",", "len", "(", "dataset", ")", ",", "replace", "=", "False", ")", "# then take a section of it", "# shuffle_array[:] = shuffle_array_full[:N]", "shuffle_array", "=", "shuffle_array_full", "[", "shuffle_array_full", "<", "N", "]", "del", "shuffle_array_full", "order_array", "=", "shuffle_array", "elif", "shuffle", ":", "shuffle_array", "=", "np", ".", "random", ".", "choice", "(", "N", ",", "N", ",", "replace", "=", "False", ")", "order_array", "=", "shuffle_array", "if", "sort", ":", "if", "selection", ":", "raise", "ValueError", "(", "\"sorting selections not yet supported\"", ")", "logger", ".", "info", "(", "\"sorting...\"", ")", "indices", "=", "np", ".", "argsort", "(", "dataset", ".", "evaluate", "(", "sort", ")", ")", "order_array", "=", "indices", "if", "ascending", "else", "indices", "[", ":", ":", "-", "1", "]", "logger", ".", "info", "(", "\"sorting done\"", ")", "if", "selection", ":", "full_mask", "=", "dataset", ".", "evaluate_selection_mask", "(", "selection", ")", "else", ":", "full_mask", "=", "None", "arrow_arrays", "=", "[", "]", "for", "column_name", "in", "column_names", ":", "mask", "=", "full_mask", "if", "selection", ":", "values", "=", "dataset", ".", "evaluate", "(", "column_name", ",", "filtered", "=", "False", ")", "values", "=", "values", "[", "mask", "]", "else", ":", "values", "=", "dataset", ".", "evaluate", "(", "column_name", ")", "if", "shuffle", "or", "sort", ":", "indices", "=", "order_array", "values", "=", "values", "[", "indices", "]", "arrow_arrays", ".", "append", "(", "arrow_array_from_numpy_array", "(", "values", ")", ")", "if", "shuffle", ":", "arrow_arrays", ".", "append", "(", "arrow_array_from_numpy_array", "(", "order_array", ")", ")", "column_names", "=", "column_names", "+", "[", "random_index_column", "]", "table", "=", "pa", ".", "Table", ".", "from_arrays", "(", "arrow_arrays", ",", "column_names", ")", "return", "table" ]
41.972603
22.547945
def _set_dpi_awareness(self): """ Set DPI aware to capture full screen on Hi-DPI monitors. """ version = sys.getwindowsversion()[:2] # pylint: disable=no-member if version >= (6, 3): # Windows 8.1+ # Here 2 = PROCESS_PER_MONITOR_DPI_AWARE, which means: # per monitor DPI aware. This app checks for the DPI when it is # created and adjusts the scale factor whenever the DPI changes. # These applications are not automatically scaled by the system. ctypes.windll.shcore.SetProcessDpiAwareness(2) elif (6, 0) <= version < (6, 3): # Windows Vista, 7, 8 and Server 2012 self.user32.SetProcessDPIAware()
[ "def", "_set_dpi_awareness", "(", "self", ")", ":", "version", "=", "sys", ".", "getwindowsversion", "(", ")", "[", ":", "2", "]", "# pylint: disable=no-member", "if", "version", ">=", "(", "6", ",", "3", ")", ":", "# Windows 8.1+", "# Here 2 = PROCESS_PER_MONITOR_DPI_AWARE, which means:", "# per monitor DPI aware. This app checks for the DPI when it is", "# created and adjusts the scale factor whenever the DPI changes.", "# These applications are not automatically scaled by the system.", "ctypes", ".", "windll", ".", "shcore", ".", "SetProcessDpiAwareness", "(", "2", ")", "elif", "(", "6", ",", "0", ")", "<=", "version", "<", "(", "6", ",", "3", ")", ":", "# Windows Vista, 7, 8 and Server 2012", "self", ".", "user32", ".", "SetProcessDPIAware", "(", ")" ]
51.857143
20.428571
def thing_type_exists(thingTypeName, region=None, key=None, keyid=None, profile=None): ''' Given a thing type name, check to see if the given thing type exists Returns True if the given thing type exists and returns False if the given thing type does not exist. .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt myminion boto_iot.thing_type_exists mythingtype ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) res = conn.describe_thing_type(thingTypeName=thingTypeName) if res.get('thingTypeName'): return {'exists': True} else: return {'exists': False} except ClientError as e: err = __utils__['boto3.get_error'](e) if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException': return {'exists': False} return {'error': err}
[ "def", "thing_type_exists", "(", "thingTypeName", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "res", "=", "conn", ".", "describe_thing_type", "(", "thingTypeName", "=", "thingTypeName", ")", "if", "res", ".", "get", "(", "'thingTypeName'", ")", ":", "return", "{", "'exists'", ":", "True", "}", "else", ":", "return", "{", "'exists'", ":", "False", "}", "except", "ClientError", "as", "e", ":", "err", "=", "__utils__", "[", "'boto3.get_error'", "]", "(", "e", ")", "if", "e", ".", "response", ".", "get", "(", "'Error'", ",", "{", "}", ")", ".", "get", "(", "'Code'", ")", "==", "'ResourceNotFoundException'", ":", "return", "{", "'exists'", ":", "False", "}", "return", "{", "'error'", ":", "err", "}" ]
30.5
23.966667
def get_groups_of_account_apikey(self, account_id, api_key, **kwargs): # noqa: E501 """Get groups of the API key. # noqa: E501 An endpoint for retrieving groups of the API key. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/api-keys/{apiKey}/groups -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_groups_of_account_apikey(account_id, api_key, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str api_key: The ID of the API key whose details are retrieved. (required) :param int limit: The number of results to return (2-1000), default is 50. :param str after: The entity ID to fetch after the given one. :param str order: The order of the records based on creation time, ASC or DESC; by default ASC :param str include: Comma separated additional data to return. Currently supported: total_count :return: GroupSummaryList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_groups_of_account_apikey_with_http_info(account_id, api_key, **kwargs) # noqa: E501 else: (data) = self.get_groups_of_account_apikey_with_http_info(account_id, api_key, **kwargs) # noqa: E501 return data
[ "def", "get_groups_of_account_apikey", "(", "self", ",", "account_id", ",", "api_key", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", "return", "self", ".", "get_groups_of_account_apikey_with_http_info", "(", "account_id", ",", "api_key", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "get_groups_of_account_apikey_with_http_info", "(", "account_id", ",", "api_key", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
63.076923
35.153846
def dist_abs(self, src, tar, max_offset=5, max_distance=0): """Return the "common" Sift4 distance between two terms. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison max_offset : int The number of characters to search for matching letters max_distance : int The distance at which to stop and exit Returns ------- int The Sift4 distance according to the common formula Examples -------- >>> cmp = Sift4() >>> cmp.dist_abs('cat', 'hat') 1 >>> cmp.dist_abs('Niall', 'Neil') 2 >>> cmp.dist_abs('Colin', 'Cuilen') 3 >>> cmp.dist_abs('ATCG', 'TAGC') 2 """ if not src: return len(tar) if not tar: return len(src) src_len = len(src) tar_len = len(tar) src_cur = 0 tar_cur = 0 lcss = 0 local_cs = 0 trans = 0 offset_arr = [] while (src_cur < src_len) and (tar_cur < tar_len): if src[src_cur] == tar[tar_cur]: local_cs += 1 is_trans = False i = 0 while i < len(offset_arr): ofs = offset_arr[i] if src_cur <= ofs['src_cur'] or tar_cur <= ofs['tar_cur']: is_trans = abs(tar_cur - src_cur) >= abs( ofs['tar_cur'] - ofs['src_cur'] ) if is_trans: trans += 1 elif not ofs['trans']: ofs['trans'] = True trans += 1 break elif src_cur > ofs['tar_cur'] and tar_cur > ofs['src_cur']: del offset_arr[i] else: i += 1 offset_arr.append( {'src_cur': src_cur, 'tar_cur': tar_cur, 'trans': is_trans} ) else: lcss += local_cs local_cs = 0 if src_cur != tar_cur: src_cur = tar_cur = min(src_cur, tar_cur) for i in range(max_offset): if not ( (src_cur + i < src_len) or (tar_cur + i < tar_len) ): break if (src_cur + i < src_len) and ( src[src_cur + i] == tar[tar_cur] ): src_cur += i - 1 tar_cur -= 1 break if (tar_cur + i < tar_len) and ( src[src_cur] == tar[tar_cur + i] ): src_cur -= 1 tar_cur += i - 1 break src_cur += 1 tar_cur += 1 if max_distance: temporary_distance = max(src_cur, tar_cur) - lcss + trans if temporary_distance >= max_distance: return round(temporary_distance) if (src_cur >= src_len) or (tar_cur >= tar_len): lcss += local_cs local_cs = 0 src_cur = tar_cur = min(src_cur, tar_cur) lcss += local_cs return round(max(src_len, tar_len) - lcss + trans)
[ "def", "dist_abs", "(", "self", ",", "src", ",", "tar", ",", "max_offset", "=", "5", ",", "max_distance", "=", "0", ")", ":", "if", "not", "src", ":", "return", "len", "(", "tar", ")", "if", "not", "tar", ":", "return", "len", "(", "src", ")", "src_len", "=", "len", "(", "src", ")", "tar_len", "=", "len", "(", "tar", ")", "src_cur", "=", "0", "tar_cur", "=", "0", "lcss", "=", "0", "local_cs", "=", "0", "trans", "=", "0", "offset_arr", "=", "[", "]", "while", "(", "src_cur", "<", "src_len", ")", "and", "(", "tar_cur", "<", "tar_len", ")", ":", "if", "src", "[", "src_cur", "]", "==", "tar", "[", "tar_cur", "]", ":", "local_cs", "+=", "1", "is_trans", "=", "False", "i", "=", "0", "while", "i", "<", "len", "(", "offset_arr", ")", ":", "ofs", "=", "offset_arr", "[", "i", "]", "if", "src_cur", "<=", "ofs", "[", "'src_cur'", "]", "or", "tar_cur", "<=", "ofs", "[", "'tar_cur'", "]", ":", "is_trans", "=", "abs", "(", "tar_cur", "-", "src_cur", ")", ">=", "abs", "(", "ofs", "[", "'tar_cur'", "]", "-", "ofs", "[", "'src_cur'", "]", ")", "if", "is_trans", ":", "trans", "+=", "1", "elif", "not", "ofs", "[", "'trans'", "]", ":", "ofs", "[", "'trans'", "]", "=", "True", "trans", "+=", "1", "break", "elif", "src_cur", ">", "ofs", "[", "'tar_cur'", "]", "and", "tar_cur", ">", "ofs", "[", "'src_cur'", "]", ":", "del", "offset_arr", "[", "i", "]", "else", ":", "i", "+=", "1", "offset_arr", ".", "append", "(", "{", "'src_cur'", ":", "src_cur", ",", "'tar_cur'", ":", "tar_cur", ",", "'trans'", ":", "is_trans", "}", ")", "else", ":", "lcss", "+=", "local_cs", "local_cs", "=", "0", "if", "src_cur", "!=", "tar_cur", ":", "src_cur", "=", "tar_cur", "=", "min", "(", "src_cur", ",", "tar_cur", ")", "for", "i", "in", "range", "(", "max_offset", ")", ":", "if", "not", "(", "(", "src_cur", "+", "i", "<", "src_len", ")", "or", "(", "tar_cur", "+", "i", "<", "tar_len", ")", ")", ":", "break", "if", "(", "src_cur", "+", "i", "<", "src_len", ")", "and", "(", "src", "[", "src_cur", "+", "i", "]", "==", "tar", "[", "tar_cur", "]", ")", ":", "src_cur", "+=", "i", "-", "1", "tar_cur", "-=", "1", "break", "if", "(", "tar_cur", "+", "i", "<", "tar_len", ")", "and", "(", "src", "[", "src_cur", "]", "==", "tar", "[", "tar_cur", "+", "i", "]", ")", ":", "src_cur", "-=", "1", "tar_cur", "+=", "i", "-", "1", "break", "src_cur", "+=", "1", "tar_cur", "+=", "1", "if", "max_distance", ":", "temporary_distance", "=", "max", "(", "src_cur", ",", "tar_cur", ")", "-", "lcss", "+", "trans", "if", "temporary_distance", ">=", "max_distance", ":", "return", "round", "(", "temporary_distance", ")", "if", "(", "src_cur", ">=", "src_len", ")", "or", "(", "tar_cur", ">=", "tar_len", ")", ":", "lcss", "+=", "local_cs", "local_cs", "=", "0", "src_cur", "=", "tar_cur", "=", "min", "(", "src_cur", ",", "tar_cur", ")", "lcss", "+=", "local_cs", "return", "round", "(", "max", "(", "src_len", ",", "tar_len", ")", "-", "lcss", "+", "trans", ")" ]
31.117117
17.990991
def get_mapping_format(self): """Dictating the corresponding mapping to the format.""" if self.format == DataFormat.json or self.format == DataFormat.avro: return self.format.name else: return DataFormat.csv.name
[ "def", "get_mapping_format", "(", "self", ")", ":", "if", "self", ".", "format", "==", "DataFormat", ".", "json", "or", "self", ".", "format", "==", "DataFormat", ".", "avro", ":", "return", "self", ".", "format", ".", "name", "else", ":", "return", "DataFormat", ".", "csv", ".", "name" ]
42.5
13.5
def get_min_max_mag(self): """ :returns: minumum and maximum magnitudes from the underlying MFDs """ m1s, m2s = [], [] for mfd in self: m1, m2 = mfd.get_min_max_mag() m1s.append(m1) m2s.append(m2) return min(m1s), max(m2s)
[ "def", "get_min_max_mag", "(", "self", ")", ":", "m1s", ",", "m2s", "=", "[", "]", ",", "[", "]", "for", "mfd", "in", "self", ":", "m1", ",", "m2", "=", "mfd", ".", "get_min_max_mag", "(", ")", "m1s", ".", "append", "(", "m1", ")", "m2s", ".", "append", "(", "m2", ")", "return", "min", "(", "m1s", ")", ",", "max", "(", "m2s", ")" ]
29.7
11.5
def lilypond(point): """ Generate lilypond representation for a point """ #If lilypond already computed, leave as is if "lilypond" in point: return point #Defaults: pitch_string = "" octave_string = "" duration_string = "" preamble = "" dynamic_string = "" if "pitch" in point: octave = point["octave"] pitch = point["pitch"] if octave > 4: octave_string = "'" * (octave - 4) elif octave < 4: octave_string = "," * (4 - octave) else: octave_string = "" m = modifiers(pitch) if m > 0: modifier_string = "is" * m elif m < 0: modifier_string = "es" * -m else: modifier_string = "" pitch_string = letter(pitch).lower() + modifier_string if DURATION_64 in point: duration = point[DURATION_64] if duration > 0: if duration % 3 == 0: # dotted note duration_string = str(192 // (2 * duration)) + "." else: duration_string = str(64 // duration) #TODO: for now, if we have a duration but no pitch, show a 'c' with an x note if duration_string: if not pitch_string: pitch_string = "c" octave_string = "'" preamble = r'\xNote ' if "dynamic" in point: dynamic = point["dynamic"] if dynamic == "crescendo": dynamic_string = "\<" elif dynamic == "diminuendo": dynamic_string = "\>" else: dynamic_string = "\%s" % (dynamic,) point["lilypond"] = "%s%s%s%s%s" % (preamble, pitch_string, octave_string, duration_string, dynamic_string) return point
[ "def", "lilypond", "(", "point", ")", ":", "#If lilypond already computed, leave as is", "if", "\"lilypond\"", "in", "point", ":", "return", "point", "#Defaults:", "pitch_string", "=", "\"\"", "octave_string", "=", "\"\"", "duration_string", "=", "\"\"", "preamble", "=", "\"\"", "dynamic_string", "=", "\"\"", "if", "\"pitch\"", "in", "point", ":", "octave", "=", "point", "[", "\"octave\"", "]", "pitch", "=", "point", "[", "\"pitch\"", "]", "if", "octave", ">", "4", ":", "octave_string", "=", "\"'\"", "*", "(", "octave", "-", "4", ")", "elif", "octave", "<", "4", ":", "octave_string", "=", "\",\"", "*", "(", "4", "-", "octave", ")", "else", ":", "octave_string", "=", "\"\"", "m", "=", "modifiers", "(", "pitch", ")", "if", "m", ">", "0", ":", "modifier_string", "=", "\"is\"", "*", "m", "elif", "m", "<", "0", ":", "modifier_string", "=", "\"es\"", "*", "-", "m", "else", ":", "modifier_string", "=", "\"\"", "pitch_string", "=", "letter", "(", "pitch", ")", ".", "lower", "(", ")", "+", "modifier_string", "if", "DURATION_64", "in", "point", ":", "duration", "=", "point", "[", "DURATION_64", "]", "if", "duration", ">", "0", ":", "if", "duration", "%", "3", "==", "0", ":", "# dotted note", "duration_string", "=", "str", "(", "192", "//", "(", "2", "*", "duration", ")", ")", "+", "\".\"", "else", ":", "duration_string", "=", "str", "(", "64", "//", "duration", ")", "#TODO: for now, if we have a duration but no pitch, show a 'c' with an x note", "if", "duration_string", ":", "if", "not", "pitch_string", ":", "pitch_string", "=", "\"c\"", "octave_string", "=", "\"'\"", "preamble", "=", "r'\\xNote '", "if", "\"dynamic\"", "in", "point", ":", "dynamic", "=", "point", "[", "\"dynamic\"", "]", "if", "dynamic", "==", "\"crescendo\"", ":", "dynamic_string", "=", "\"\\<\"", "elif", "dynamic", "==", "\"diminuendo\"", ":", "dynamic_string", "=", "\"\\>\"", "else", ":", "dynamic_string", "=", "\"\\%s\"", "%", "(", "dynamic", ",", ")", "point", "[", "\"lilypond\"", "]", "=", "\"%s%s%s%s%s\"", "%", "(", "preamble", ",", "pitch_string", ",", "octave_string", ",", "duration_string", ",", "dynamic_string", ")", "return", "point" ]
29.62069
16.758621
def _set_advertisement_interval(self, v, load=False): """ Setter method for advertisement_interval, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/neighbor/af_ipv4_vrf_neighbor_address_holder/af_ipv4_neighbor_addr/advertisement_interval (container) If this variable is read-only (config: false) in the source YANG file, then _set_advertisement_interval is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_advertisement_interval() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=advertisement_interval.advertisement_interval, is_container='container', presence=False, yang_name="advertisement-interval", rest_name="advertisement-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Minimum interval between sending BGP routing updates', u'cli-compact-syntax': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """advertisement_interval must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=advertisement_interval.advertisement_interval, is_container='container', presence=False, yang_name="advertisement-interval", rest_name="advertisement-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Minimum interval between sending BGP routing updates', u'cli-compact-syntax': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""", }) self.__advertisement_interval = t if hasattr(self, '_set'): self._set()
[ "def", "_set_advertisement_interval", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "advertisement_interval", ".", "advertisement_interval", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"advertisement-interval\"", ",", "rest_name", "=", "\"advertisement-interval\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Minimum interval between sending BGP routing updates'", ",", "u'cli-compact-syntax'", ":", "None", ",", "u'cli-incomplete-command'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-bgp'", ",", "defining_module", "=", "'brocade-bgp'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"advertisement_interval must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=advertisement_interval.advertisement_interval, is_container='container', presence=False, yang_name=\"advertisement-interval\", rest_name=\"advertisement-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Minimum interval between sending BGP routing updates', u'cli-compact-syntax': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__advertisement_interval", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
94.090909
47.272727
def predict( self, user_ids, item_ids, item_features=None, user_features=None, num_threads=1 ): """ Compute the recommendation score for user-item pairs. For details on how to use feature matrices, see the documentation on the :class:`lightfm.LightFM` class. Arguments --------- user_ids: integer or np.int32 array of shape [n_pairs,] single user id or an array containing the user ids for the user-item pairs for which a prediction is to be computed. Note that these are LightFM's internal id's, i.e. the index of the user in the interaction matrix used for fitting the model. item_ids: np.int32 array of shape [n_pairs,] an array containing the item ids for the user-item pairs for which a prediction is to be computed. Note that these are LightFM's internal id's, i.e. the index of the item in the interaction matrix used for fitting the model. user_features: np.float32 csr_matrix of shape [n_users, n_user_features], optional Each row contains that user's weights over features. item_features: np.float32 csr_matrix of shape [n_items, n_item_features], optional Each row contains that item's weights over features. num_threads: int, optional Number of parallel computation threads to use. Should not be higher than the number of physical cores. Returns ------- np.float32 array of shape [n_pairs,] Numpy array containing the recommendation scores for pairs defined by the inputs. """ self._check_initialized() if not isinstance(user_ids, np.ndarray): user_ids = np.repeat(np.int32(user_ids), len(item_ids)) if isinstance(item_ids, (list, tuple)): item_ids = np.array(item_ids, dtype=np.int32) assert len(user_ids) == len(item_ids) if user_ids.dtype != np.int32: user_ids = user_ids.astype(np.int32) if item_ids.dtype != np.int32: item_ids = item_ids.astype(np.int32) if num_threads < 1: raise ValueError("Number of threads must be 1 or larger.") if user_ids.min() < 0 or item_ids.min() < 0: raise ValueError( "User or item ids cannot be negative. " "Check your inputs for negative numbers " "or very large numbers that can overflow." ) n_users = user_ids.max() + 1 n_items = item_ids.max() + 1 (user_features, item_features) = self._construct_feature_matrices( n_users, n_items, user_features, item_features ) lightfm_data = self._get_lightfm_data() predictions = np.empty(len(user_ids), dtype=np.float64) predict_lightfm( CSRMatrix(item_features), CSRMatrix(user_features), user_ids, item_ids, predictions, lightfm_data, num_threads, ) return predictions
[ "def", "predict", "(", "self", ",", "user_ids", ",", "item_ids", ",", "item_features", "=", "None", ",", "user_features", "=", "None", ",", "num_threads", "=", "1", ")", ":", "self", ".", "_check_initialized", "(", ")", "if", "not", "isinstance", "(", "user_ids", ",", "np", ".", "ndarray", ")", ":", "user_ids", "=", "np", ".", "repeat", "(", "np", ".", "int32", "(", "user_ids", ")", ",", "len", "(", "item_ids", ")", ")", "if", "isinstance", "(", "item_ids", ",", "(", "list", ",", "tuple", ")", ")", ":", "item_ids", "=", "np", ".", "array", "(", "item_ids", ",", "dtype", "=", "np", ".", "int32", ")", "assert", "len", "(", "user_ids", ")", "==", "len", "(", "item_ids", ")", "if", "user_ids", ".", "dtype", "!=", "np", ".", "int32", ":", "user_ids", "=", "user_ids", ".", "astype", "(", "np", ".", "int32", ")", "if", "item_ids", ".", "dtype", "!=", "np", ".", "int32", ":", "item_ids", "=", "item_ids", ".", "astype", "(", "np", ".", "int32", ")", "if", "num_threads", "<", "1", ":", "raise", "ValueError", "(", "\"Number of threads must be 1 or larger.\"", ")", "if", "user_ids", ".", "min", "(", ")", "<", "0", "or", "item_ids", ".", "min", "(", ")", "<", "0", ":", "raise", "ValueError", "(", "\"User or item ids cannot be negative. \"", "\"Check your inputs for negative numbers \"", "\"or very large numbers that can overflow.\"", ")", "n_users", "=", "user_ids", ".", "max", "(", ")", "+", "1", "n_items", "=", "item_ids", ".", "max", "(", ")", "+", "1", "(", "user_features", ",", "item_features", ")", "=", "self", ".", "_construct_feature_matrices", "(", "n_users", ",", "n_items", ",", "user_features", ",", "item_features", ")", "lightfm_data", "=", "self", ".", "_get_lightfm_data", "(", ")", "predictions", "=", "np", ".", "empty", "(", "len", "(", "user_ids", ")", ",", "dtype", "=", "np", ".", "float64", ")", "predict_lightfm", "(", "CSRMatrix", "(", "item_features", ")", ",", "CSRMatrix", "(", "user_features", ")", ",", "user_ids", ",", "item_ids", ",", "predictions", ",", "lightfm_data", ",", "num_threads", ",", ")", "return", "predictions" ]
36.117647
23.882353
def new_event(event): """ Wrap a raw gRPC event in a friendlier containing class. This picks the appropriate class from one of PutEvent or DeleteEvent and returns a new instance. """ op_name = event.EventType.DESCRIPTOR.values_by_number[event.type].name if op_name == 'PUT': cls = PutEvent elif op_name == 'DELETE': cls = DeleteEvent else: raise Exception('Invalid op_name') return cls(event)
[ "def", "new_event", "(", "event", ")", ":", "op_name", "=", "event", ".", "EventType", ".", "DESCRIPTOR", ".", "values_by_number", "[", "event", ".", "type", "]", ".", "name", "if", "op_name", "==", "'PUT'", ":", "cls", "=", "PutEvent", "elif", "op_name", "==", "'DELETE'", ":", "cls", "=", "DeleteEvent", "else", ":", "raise", "Exception", "(", "'Invalid op_name'", ")", "return", "cls", "(", "event", ")" ]
27.6875
19.5625
def shareItem(sharedItem, toRole=None, toName=None, shareID=None, interfaces=ALL_IMPLEMENTED): """ Share an item with a given role. This provides a way to expose items to users for later retrieval with L{Role.getShare}. This API is slated for deprecation. Prefer L{Role.shareItem} in new code. @param sharedItem: an item to be shared. @param toRole: a L{Role} instance which represents the group that has access to the given item. May not be specified if toName is also specified. @param toName: a unicode string which uniquely identifies a L{Role} in the same store as the sharedItem. @param shareID: a unicode string. If provided, specify the ID under which the shared item will be shared. @param interfaces: a list of Interface objects which specify the methods and attributes accessible to C{toRole} on C{sharedItem}. @return: a L{Share} which records the ability of the given role to access the given item. """ warnings.warn("Use Role.shareItem() instead of sharing.shareItem().", PendingDeprecationWarning, stacklevel=2) if toRole is None: if toName is not None: toRole = getPrimaryRole(sharedItem.store, toName, True) else: toRole = getEveryoneRole(sharedItem.store) return toRole.shareItem(sharedItem, shareID, interfaces)
[ "def", "shareItem", "(", "sharedItem", ",", "toRole", "=", "None", ",", "toName", "=", "None", ",", "shareID", "=", "None", ",", "interfaces", "=", "ALL_IMPLEMENTED", ")", ":", "warnings", ".", "warn", "(", "\"Use Role.shareItem() instead of sharing.shareItem().\"", ",", "PendingDeprecationWarning", ",", "stacklevel", "=", "2", ")", "if", "toRole", "is", "None", ":", "if", "toName", "is", "not", "None", ":", "toRole", "=", "getPrimaryRole", "(", "sharedItem", ".", "store", ",", "toName", ",", "True", ")", "else", ":", "toRole", "=", "getEveryoneRole", "(", "sharedItem", ".", "store", ")", "return", "toRole", ".", "shareItem", "(", "sharedItem", ",", "shareID", ",", "interfaces", ")" ]
39.342857
24.257143
def notify(self, name, job): """ Concrete method of Subject.notify(). Notify to change the status of Subject for observer. This method call Observer.update(). In this program, ConfigReader.notify() call JobObserver.update(). For exmaple, register threads.redis.ConcreateJob to JobObserver.jobs. """ for observer in self._observers: observer.update(name, job)
[ "def", "notify", "(", "self", ",", "name", ",", "job", ")", ":", "for", "observer", "in", "self", ".", "_observers", ":", "observer", ".", "update", "(", "name", ",", "job", ")" ]
42.5
11.1
def passwordReset1to2(old): """ Power down and delete the item """ new = old.upgradeVersion(old.typeName, 1, 2, installedOn=None) for iface in new.store.interfacesFor(new): new.store.powerDown(new, iface) new.deleteFromStore()
[ "def", "passwordReset1to2", "(", "old", ")", ":", "new", "=", "old", ".", "upgradeVersion", "(", "old", ".", "typeName", ",", "1", ",", "2", ",", "installedOn", "=", "None", ")", "for", "iface", "in", "new", ".", "store", ".", "interfacesFor", "(", "new", ")", ":", "new", ".", "store", ".", "powerDown", "(", "new", ",", "iface", ")", "new", ".", "deleteFromStore", "(", ")" ]
31.375
8.375
def send(self, group_id=None, message_dict=None): """ Send this current message to a group. `message_dict` can be a dictionary formatted according to http://docs.fiesta.cc/list-management-api.html#messages If message is provided, this method will ignore object-level variables. """ if self.group is not None and self.group.id is not None: group_id = self.group.id path = 'message/%s' % group_id if message_dict is not None: request_data = { 'message': message_dict, } else: subject = self.subject text = self.text markdown = self.markdown request_data = { 'message': {}, } if subject: request_data['message']['subject'] = subject if text: request_data['message']['text'] = text if markdown: request_data['message']['markdown'] = markdown response_data = self.api.request(path, request_data) self.id = response_data['message_id'] self.thread_id = response_data['thread_id'] self.sent_message = FiestaMessage(self.api, response_data['message'])
[ "def", "send", "(", "self", ",", "group_id", "=", "None", ",", "message_dict", "=", "None", ")", ":", "if", "self", ".", "group", "is", "not", "None", "and", "self", ".", "group", ".", "id", "is", "not", "None", ":", "group_id", "=", "self", ".", "group", ".", "id", "path", "=", "'message/%s'", "%", "group_id", "if", "message_dict", "is", "not", "None", ":", "request_data", "=", "{", "'message'", ":", "message_dict", ",", "}", "else", ":", "subject", "=", "self", ".", "subject", "text", "=", "self", ".", "text", "markdown", "=", "self", ".", "markdown", "request_data", "=", "{", "'message'", ":", "{", "}", ",", "}", "if", "subject", ":", "request_data", "[", "'message'", "]", "[", "'subject'", "]", "=", "subject", "if", "text", ":", "request_data", "[", "'message'", "]", "[", "'text'", "]", "=", "text", "if", "markdown", ":", "request_data", "[", "'message'", "]", "[", "'markdown'", "]", "=", "markdown", "response_data", "=", "self", ".", "api", ".", "request", "(", "path", ",", "request_data", ")", "self", ".", "id", "=", "response_data", "[", "'message_id'", "]", "self", ".", "thread_id", "=", "response_data", "[", "'thread_id'", "]", "self", ".", "sent_message", "=", "FiestaMessage", "(", "self", ".", "api", ",", "response_data", "[", "'message'", "]", ")" ]
34.138889
20.194444
def _set_task_uuid(self, dependencies): """Adds universally unique user ids (UUID) to each task of the workflow. :param dependencies: The list of dependencies between tasks defining the computational graph :type dependencies: list(Dependency) :return: A dictionary mapping UUID to dependencies :rtype: dict(str: Dependency) """ uuid_dict = {} for dep in dependencies: task = dep.task if task.private_task_config.uuid in uuid_dict: raise ValueError('EOWorkflow cannot execute the same instance of EOTask multiple times') task.private_task_config.uuid = self.id_gen.next() uuid_dict[task.private_task_config.uuid] = dep return uuid_dict
[ "def", "_set_task_uuid", "(", "self", ",", "dependencies", ")", ":", "uuid_dict", "=", "{", "}", "for", "dep", "in", "dependencies", ":", "task", "=", "dep", ".", "task", "if", "task", ".", "private_task_config", ".", "uuid", "in", "uuid_dict", ":", "raise", "ValueError", "(", "'EOWorkflow cannot execute the same instance of EOTask multiple times'", ")", "task", ".", "private_task_config", ".", "uuid", "=", "self", ".", "id_gen", ".", "next", "(", ")", "uuid_dict", "[", "task", ".", "private_task_config", ".", "uuid", "]", "=", "dep", "return", "uuid_dict" ]
42.944444
21.222222
def from_status(cls, http_status, code_index=0, message=None, developer_message=None, meta=None): # type: (HTTPStatus, int, AnyStr, AnyStr, dict) -> Error """ Automatically build an HTTP response from the HTTP Status code. :param http_status: :param code_index: :param message: :param developer_message: :param meta: """ return cls(http_status.value, (http_status.value * 100) + code_index, message or http_status.description, developer_message or http_status.description, meta)
[ "def", "from_status", "(", "cls", ",", "http_status", ",", "code_index", "=", "0", ",", "message", "=", "None", ",", "developer_message", "=", "None", ",", "meta", "=", "None", ")", ":", "# type: (HTTPStatus, int, AnyStr, AnyStr, dict) -> Error", "return", "cls", "(", "http_status", ".", "value", ",", "(", "http_status", ".", "value", "*", "100", ")", "+", "code_index", ",", "message", "or", "http_status", ".", "description", ",", "developer_message", "or", "http_status", ".", "description", ",", "meta", ")" ]
37.235294
19.117647
def detail(self): """ 个人信息,如果未调用过``get_detail()``会自动调用 :return: information of student :rtype: dict """ if hasattr(self, '_detail'): return self._detail else: self.get_detail() return self._detail
[ "def", "detail", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'_detail'", ")", ":", "return", "self", ".", "_detail", "else", ":", "self", ".", "get_detail", "(", ")", "return", "self", ".", "_detail" ]
23.166667
12
def get_top_sentences(self): ''' getter ''' if isinstance(self.__top_sentences, int) is False: raise TypeError("The type of __top_sentences must be int.") return self.__top_sentences
[ "def", "get_top_sentences", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "__top_sentences", ",", "int", ")", "is", "False", ":", "raise", "TypeError", "(", "\"The type of __top_sentences must be int.\"", ")", "return", "self", ".", "__top_sentences" ]
42.8
16.8
def rgb_tuple_from_str(color_string): """Convert color in format 'rgb(RRR,GGG,BBB)', 'rgba(RRR,GGG,BBB,alpha)', '#RRGGBB', or limited English color name (eg 'red') to tuple (RRR, GGG, BBB) """ try: # English color names (limited) rgb_string = common_html_colors[color_string] return tuple([float(x) for x in re.findall(r'\d{1,3}', rgb_string)]) except KeyError: try: # HEX color code hex_string = color_string.lstrip('#') return tuple(int(hex_string[i:i+2], 16) for i in (0, 2 ,4)) except ValueError: # RGB or RGBA formatted strings return tuple([int(x) if float(x) > 1 else float(x) for x in re.findall(r"[-+]?\d*\.*\d+", color_string)])
[ "def", "rgb_tuple_from_str", "(", "color_string", ")", ":", "try", ":", "# English color names (limited)", "rgb_string", "=", "common_html_colors", "[", "color_string", "]", "return", "tuple", "(", "[", "float", "(", "x", ")", "for", "x", "in", "re", ".", "findall", "(", "r'\\d{1,3}'", ",", "rgb_string", ")", "]", ")", "except", "KeyError", ":", "try", ":", "# HEX color code", "hex_string", "=", "color_string", ".", "lstrip", "(", "'#'", ")", "return", "tuple", "(", "int", "(", "hex_string", "[", "i", ":", "i", "+", "2", "]", ",", "16", ")", "for", "i", "in", "(", "0", ",", "2", ",", "4", ")", ")", "except", "ValueError", ":", "# RGB or RGBA formatted strings", "return", "tuple", "(", "[", "int", "(", "x", ")", "if", "float", "(", "x", ")", ">", "1", "else", "float", "(", "x", ")", "for", "x", "in", "re", ".", "findall", "(", "r\"[-+]?\\d*\\.*\\d+\"", ",", "color_string", ")", "]", ")" ]
41.263158
19.684211
def evaluate(self, text): """ Given a string of `text`, compute confusion matrix for the classification task. """ cx = BinaryConfusion() for (L, P, R, gold, _) in Detector.candidates(text): guess = self.predict(L, P, R) cx.update(gold, guess) if not gold and guess: logger.debug("False pos.: L='{}', R='{}'.".format(L, R)) elif gold and not guess: logger.debug("False neg.: L='{}', R='{}'.".format(L, R)) return cx
[ "def", "evaluate", "(", "self", ",", "text", ")", ":", "cx", "=", "BinaryConfusion", "(", ")", "for", "(", "L", ",", "P", ",", "R", ",", "gold", ",", "_", ")", "in", "Detector", ".", "candidates", "(", "text", ")", ":", "guess", "=", "self", ".", "predict", "(", "L", ",", "P", ",", "R", ")", "cx", ".", "update", "(", "gold", ",", "guess", ")", "if", "not", "gold", "and", "guess", ":", "logger", ".", "debug", "(", "\"False pos.: L='{}', R='{}'.\"", ".", "format", "(", "L", ",", "R", ")", ")", "elif", "gold", "and", "not", "guess", ":", "logger", ".", "debug", "(", "\"False neg.: L='{}', R='{}'.\"", ".", "format", "(", "L", ",", "R", ")", ")", "return", "cx" ]
38.357143
13.357143
def _numeric_summary(arg, exact_nunique=False, prefix=None): """ Compute a set of summary metrics from the input numeric value expression Parameters ---------- arg : numeric value expression exact_nunique : boolean, default False prefix : string, default None String prefix for metric names Returns ------- summary : (count, # nulls, min, max, sum, mean, nunique) """ metrics = [ arg.count(), arg.isnull().sum().name('nulls'), arg.min(), arg.max(), arg.sum(), arg.mean(), ] if exact_nunique: unique_metric = arg.nunique().name('nunique') else: unique_metric = arg.approx_nunique().name('approx_nunique') metrics.append(unique_metric) return _wrap_summary_metrics(metrics, prefix)
[ "def", "_numeric_summary", "(", "arg", ",", "exact_nunique", "=", "False", ",", "prefix", "=", "None", ")", ":", "metrics", "=", "[", "arg", ".", "count", "(", ")", ",", "arg", ".", "isnull", "(", ")", ".", "sum", "(", ")", ".", "name", "(", "'nulls'", ")", ",", "arg", ".", "min", "(", ")", ",", "arg", ".", "max", "(", ")", ",", "arg", ".", "sum", "(", ")", ",", "arg", ".", "mean", "(", ")", ",", "]", "if", "exact_nunique", ":", "unique_metric", "=", "arg", ".", "nunique", "(", ")", ".", "name", "(", "'nunique'", ")", "else", ":", "unique_metric", "=", "arg", ".", "approx_nunique", "(", ")", ".", "name", "(", "'approx_nunique'", ")", "metrics", ".", "append", "(", "unique_metric", ")", "return", "_wrap_summary_metrics", "(", "metrics", ",", "prefix", ")" ]
25.516129
20.612903
def RCL(input_shape, rec_conv_layers, dense_layers, output_layer=[1, 'sigmoid'], padding='same', optimizer='adam', loss='binary_crossentropy'): """Summary Args: input_shape (tuple): The shape of the input layer. output_nodes (int): Number of nodes in the output layer. It depends on the loss function used. rec_conv_layers (list): RCL descriptor [ [ [(filter, kernel), (pool_size, stride), leak, drop], [(filter, kernel), (pool_size, stride), leak, drop], [(filter, kernel), (pool_size, stride), leak, drop, timesteps], ], ... [ [],[],[] ] ] dense_layers (TYPE): Dense layer descriptor [[fully_connected, leak, drop], ... []] padding (str, optional): Type of padding for conv and pooling layers optimizer (str or object optional): Keras optimizer as string or keras optimizer Returns: model: The compiled Kears model, ready for training. """ inputs = Input(shape=input_shape) for i, c in enumerate(rec_conv_layers): conv = Conv1D(c[0][0][0], c[0][0][1], padding=padding)(inputs) batch = BatchNormalization()(conv) act = LeakyReLU(alpha=c[0][2])(batch) pool = MaxPooling1D(pool_size=c[0][1][0], strides=c[0][1][1], padding=padding)(act) d1 = Dropout(c[0][3])(pool) inner = time_steps( input=d1, conv_layer=c[1], time_conv_layer=c[2], padding=padding) drop = Flatten()(inner) for i, d in enumerate(dense_layers): dense = Dense(d[0], activation='relu')(drop) bn = BatchNormalization()(dense) act = LeakyReLU(alpha=d[1])(bn) drop = Dropout(d[2])(act) output = Dense(output_layer[0], activation=output_layer[1])(drop) model = Model(inputs=inputs, outputs=output) model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy']) return model
[ "def", "RCL", "(", "input_shape", ",", "rec_conv_layers", ",", "dense_layers", ",", "output_layer", "=", "[", "1", ",", "'sigmoid'", "]", ",", "padding", "=", "'same'", ",", "optimizer", "=", "'adam'", ",", "loss", "=", "'binary_crossentropy'", ")", ":", "inputs", "=", "Input", "(", "shape", "=", "input_shape", ")", "for", "i", ",", "c", "in", "enumerate", "(", "rec_conv_layers", ")", ":", "conv", "=", "Conv1D", "(", "c", "[", "0", "]", "[", "0", "]", "[", "0", "]", ",", "c", "[", "0", "]", "[", "0", "]", "[", "1", "]", ",", "padding", "=", "padding", ")", "(", "inputs", ")", "batch", "=", "BatchNormalization", "(", ")", "(", "conv", ")", "act", "=", "LeakyReLU", "(", "alpha", "=", "c", "[", "0", "]", "[", "2", "]", ")", "(", "batch", ")", "pool", "=", "MaxPooling1D", "(", "pool_size", "=", "c", "[", "0", "]", "[", "1", "]", "[", "0", "]", ",", "strides", "=", "c", "[", "0", "]", "[", "1", "]", "[", "1", "]", ",", "padding", "=", "padding", ")", "(", "act", ")", "d1", "=", "Dropout", "(", "c", "[", "0", "]", "[", "3", "]", ")", "(", "pool", ")", "inner", "=", "time_steps", "(", "input", "=", "d1", ",", "conv_layer", "=", "c", "[", "1", "]", ",", "time_conv_layer", "=", "c", "[", "2", "]", ",", "padding", "=", "padding", ")", "drop", "=", "Flatten", "(", ")", "(", "inner", ")", "for", "i", ",", "d", "in", "enumerate", "(", "dense_layers", ")", ":", "dense", "=", "Dense", "(", "d", "[", "0", "]", ",", "activation", "=", "'relu'", ")", "(", "drop", ")", "bn", "=", "BatchNormalization", "(", ")", "(", "dense", ")", "act", "=", "LeakyReLU", "(", "alpha", "=", "d", "[", "1", "]", ")", "(", "bn", ")", "drop", "=", "Dropout", "(", "d", "[", "2", "]", ")", "(", "act", ")", "output", "=", "Dense", "(", "output_layer", "[", "0", "]", ",", "activation", "=", "output_layer", "[", "1", "]", ")", "(", "drop", ")", "model", "=", "Model", "(", "inputs", "=", "inputs", ",", "outputs", "=", "output", ")", "model", ".", "compile", "(", "loss", "=", "loss", ",", "optimizer", "=", "optimizer", ",", "metrics", "=", "[", "'accuracy'", "]", ")", "return", "model" ]
37.774194
20.274194
def run(self, ipyclient=None, ): """ Run a batch of dstat tests on a list of tests, where each test is a dictionary mapping sample names to {p1 - p4} (and sometimes p5). Parameters modifying the behavior of the run, such as the number of bootstrap replicates (nboots) or the minimum coverage for loci (mincov) can be set in {object}.params. Parameters: ----------- ipyclient (ipyparallel.Client object): An ipyparallel client object to distribute jobs to a cluster. """ self.results_table, self.results_boots = batch(self, ipyclient) ## skip this for 5-part test results if not isinstance(self.results_table, list): self.results_table.nloci = np.nan_to_num(self.results_table.nloci)\ .astype(int)
[ "def", "run", "(", "self", ",", "ipyclient", "=", "None", ",", ")", ":", "self", ".", "results_table", ",", "self", ".", "results_boots", "=", "batch", "(", "self", ",", "ipyclient", ")", "## skip this for 5-part test results", "if", "not", "isinstance", "(", "self", ".", "results_table", ",", "list", ")", ":", "self", ".", "results_table", ".", "nloci", "=", "np", ".", "nan_to_num", "(", "self", ".", "results_table", ".", "nloci", ")", ".", "astype", "(", "int", ")" ]
41.714286
23.047619
def heatmap_seaborn(dfr, outfilename=None, title=None, params=None): """Returns seaborn heatmap with cluster dendrograms. - dfr - pandas DataFrame with relevant data - outfilename - path to output file (indicates output format) """ # Decide on figure layout size: a minimum size is required for # aesthetics, and a maximum to avoid core dumps on rendering. # If we hit the maximum size, we should modify font size. maxfigsize = 120 calcfigsize = dfr.shape[0] * 1.1 figsize = min(max(8, calcfigsize), maxfigsize) if figsize == maxfigsize: scale = maxfigsize / calcfigsize sns.set_context("notebook", font_scale=scale) # Add a colorbar? if params.classes is None: col_cb = None else: col_cb = get_seaborn_colorbar(dfr, params.classes) # Labels are defined before we build the clustering # If a label mapping is missing, use the key text as fall back params.labels = get_safe_seaborn_labels(dfr, params.labels) # Add attributes to parameter object, and draw heatmap params.colorbar = col_cb params.figsize = figsize params.linewidths = 0.25 fig = get_seaborn_clustermap(dfr, params, title=title) # Save to file if outfilename: fig.savefig(outfilename) # Return clustermap return fig
[ "def", "heatmap_seaborn", "(", "dfr", ",", "outfilename", "=", "None", ",", "title", "=", "None", ",", "params", "=", "None", ")", ":", "# Decide on figure layout size: a minimum size is required for", "# aesthetics, and a maximum to avoid core dumps on rendering.", "# If we hit the maximum size, we should modify font size.", "maxfigsize", "=", "120", "calcfigsize", "=", "dfr", ".", "shape", "[", "0", "]", "*", "1.1", "figsize", "=", "min", "(", "max", "(", "8", ",", "calcfigsize", ")", ",", "maxfigsize", ")", "if", "figsize", "==", "maxfigsize", ":", "scale", "=", "maxfigsize", "/", "calcfigsize", "sns", ".", "set_context", "(", "\"notebook\"", ",", "font_scale", "=", "scale", ")", "# Add a colorbar?", "if", "params", ".", "classes", "is", "None", ":", "col_cb", "=", "None", "else", ":", "col_cb", "=", "get_seaborn_colorbar", "(", "dfr", ",", "params", ".", "classes", ")", "# Labels are defined before we build the clustering", "# If a label mapping is missing, use the key text as fall back", "params", ".", "labels", "=", "get_safe_seaborn_labels", "(", "dfr", ",", "params", ".", "labels", ")", "# Add attributes to parameter object, and draw heatmap", "params", ".", "colorbar", "=", "col_cb", "params", ".", "figsize", "=", "figsize", "params", ".", "linewidths", "=", "0.25", "fig", "=", "get_seaborn_clustermap", "(", "dfr", ",", "params", ",", "title", "=", "title", ")", "# Save to file", "if", "outfilename", ":", "fig", ".", "savefig", "(", "outfilename", ")", "# Return clustermap", "return", "fig" ]
34
19.921053
def from_page_xml(cls, page_xml): """ Constructs a :class:`~mwxml.iteration.dump.Dump` from a <page> block. :Parameters: page_xml : `str` | `file` Either a plain string or a file containing <page> block XML to process """ header = """ <mediawiki xmlns="http://www.mediawiki.org/xml/export-0.5/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.mediawiki.org/xml/export-0.5/ http://www.mediawiki.org/xml/export-0.5.xsd" version="0.5" xml:lang="en"> <siteinfo> </siteinfo> """ footer = "</mediawiki>" return cls.from_file(mwtypes.files.concat(header, page_xml, footer))
[ "def", "from_page_xml", "(", "cls", ",", "page_xml", ")", ":", "header", "=", "\"\"\"\n <mediawiki xmlns=\"http://www.mediawiki.org/xml/export-0.5/\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"http://www.mediawiki.org/xml/export-0.5/\n http://www.mediawiki.org/xml/export-0.5.xsd\" version=\"0.5\"\n xml:lang=\"en\">\n <siteinfo>\n </siteinfo>\n \"\"\"", "footer", "=", "\"</mediawiki>\"", "return", "cls", ".", "from_file", "(", "mwtypes", ".", "files", ".", "concat", "(", "header", ",", "page_xml", ",", "footer", ")", ")" ]
36.136364
21.545455
def stop_volume(name, force=False): ''' Stop a gluster volume name Volume name force Force stop the volume .. versionadded:: 2015.8.4 CLI Example: .. code-block:: bash salt '*' glusterfs.stop_volume mycluster ''' volinfo = info() if name not in volinfo: log.error('Cannot stop non-existing volume %s', name) return False if int(volinfo[name]['status']) != 1: log.warning('Attempt to stop already stopped volume %s', name) return True cmd = 'volume stop {0}'.format(name) if force: cmd += ' force' return _gluster(cmd)
[ "def", "stop_volume", "(", "name", ",", "force", "=", "False", ")", ":", "volinfo", "=", "info", "(", ")", "if", "name", "not", "in", "volinfo", ":", "log", ".", "error", "(", "'Cannot stop non-existing volume %s'", ",", "name", ")", "return", "False", "if", "int", "(", "volinfo", "[", "name", "]", "[", "'status'", "]", ")", "!=", "1", ":", "log", ".", "warning", "(", "'Attempt to stop already stopped volume %s'", ",", "name", ")", "return", "True", "cmd", "=", "'volume stop {0}'", ".", "format", "(", "name", ")", "if", "force", ":", "cmd", "+=", "' force'", "return", "_gluster", "(", "cmd", ")" ]
19.967742
23.903226
def rename_tables(db, table_mapping, reverse=False): """ renames tables from source to destination name, if the source exists and the destination does not exist yet. """ from django.db import connection if reverse: table_mapping = [(dst, src) for src, dst in table_mapping] table_names = connection.introspection.table_names() for source, destination in table_mapping: if source in table_names and destination in table_names: print(u" WARNING: not renaming {0} to {1}, because both tables already exist.".format(source, destination)) elif source in table_names and destination not in table_names: print(u" - renaming {0} to {1}".format(source, destination)) db.rename_table(source, destination)
[ "def", "rename_tables", "(", "db", ",", "table_mapping", ",", "reverse", "=", "False", ")", ":", "from", "django", ".", "db", "import", "connection", "if", "reverse", ":", "table_mapping", "=", "[", "(", "dst", ",", "src", ")", "for", "src", ",", "dst", "in", "table_mapping", "]", "table_names", "=", "connection", ".", "introspection", ".", "table_names", "(", ")", "for", "source", ",", "destination", "in", "table_mapping", ":", "if", "source", "in", "table_names", "and", "destination", "in", "table_names", ":", "print", "(", "u\" WARNING: not renaming {0} to {1}, because both tables already exist.\"", ".", "format", "(", "source", ",", "destination", ")", ")", "elif", "source", "in", "table_names", "and", "destination", "not", "in", "table_names", ":", "print", "(", "u\" - renaming {0} to {1}\"", ".", "format", "(", "source", ",", "destination", ")", ")", "db", ".", "rename_table", "(", "source", ",", "destination", ")" ]
51.933333
23.133333
def warning_implicit_type(lineno, id_, type_=None): """ Warning: Using default implicit type 'x' """ if OPTIONS.strict.value: syntax_error_undeclared_type(lineno, id_) return if type_ is None: type_ = global_.DEFAULT_TYPE warning(lineno, "Using default implicit type '%s' for '%s'" % (type_, id_))
[ "def", "warning_implicit_type", "(", "lineno", ",", "id_", ",", "type_", "=", "None", ")", ":", "if", "OPTIONS", ".", "strict", ".", "value", ":", "syntax_error_undeclared_type", "(", "lineno", ",", "id_", ")", "return", "if", "type_", "is", "None", ":", "type_", "=", "global_", ".", "DEFAULT_TYPE", "warning", "(", "lineno", ",", "\"Using default implicit type '%s' for '%s'\"", "%", "(", "type_", ",", "id_", ")", ")" ]
30.272727
18.181818
def find_best_instruction(addr, cpu_name, meta=None): """Given an instruction and meta information this attempts to find the best instruction for the frame. In some circumstances we can fix it up a bit to improve the accuracy. For more information see `symbolize_frame`. """ addr = rv = parse_addr(addr) # In case we're not on the crashing frame we apply a simple heuristic: # since we're most likely dealing with return addresses we just assume # that the call is one instruction behind the current one. if not meta or meta.get('frame_number') != 0: rv = get_previous_instruction(addr, cpu_name) # In case registers are available we can check if the PC register # does not match the given address we have from the first frame. # If that is the case and we got one of a few signals taht are likely # it seems that going with one instruction back is actually the # correct thing to do. else: regs = meta.get('registers') ip = get_ip_register(regs, cpu_name) if ip is not None and ip != addr and \ meta.get('signal') in (SIGILL, SIGBUS, SIGSEGV): rv = get_previous_instruction(addr, cpu_name) # Don't ask me why we do this, but apparently on arm we get better # hits if we look at the end of an instruction in the DWARF file than # the beginning. return round_to_instruction_end(rv, cpu_name)
[ "def", "find_best_instruction", "(", "addr", ",", "cpu_name", ",", "meta", "=", "None", ")", ":", "addr", "=", "rv", "=", "parse_addr", "(", "addr", ")", "# In case we're not on the crashing frame we apply a simple heuristic:", "# since we're most likely dealing with return addresses we just assume", "# that the call is one instruction behind the current one.", "if", "not", "meta", "or", "meta", ".", "get", "(", "'frame_number'", ")", "!=", "0", ":", "rv", "=", "get_previous_instruction", "(", "addr", ",", "cpu_name", ")", "# In case registers are available we can check if the PC register", "# does not match the given address we have from the first frame.", "# If that is the case and we got one of a few signals taht are likely", "# it seems that going with one instruction back is actually the", "# correct thing to do.", "else", ":", "regs", "=", "meta", ".", "get", "(", "'registers'", ")", "ip", "=", "get_ip_register", "(", "regs", ",", "cpu_name", ")", "if", "ip", "is", "not", "None", "and", "ip", "!=", "addr", "and", "meta", ".", "get", "(", "'signal'", ")", "in", "(", "SIGILL", ",", "SIGBUS", ",", "SIGSEGV", ")", ":", "rv", "=", "get_previous_instruction", "(", "addr", ",", "cpu_name", ")", "# Don't ask me why we do this, but apparently on arm we get better", "# hits if we look at the end of an instruction in the DWARF file than", "# the beginning.", "return", "round_to_instruction_end", "(", "rv", ",", "cpu_name", ")" ]
46.7
21.133333
def _c_base_var(self): """Return the name of the module base variable.""" if self.opts.no_structs: return self.name return 'windll->{}.{}'.format( self.name, self.opts.base )
[ "def", "_c_base_var", "(", "self", ")", ":", "if", "self", ".", "opts", ".", "no_structs", ":", "return", "self", ".", "name", "return", "'windll->{}.{}'", ".", "format", "(", "self", ".", "name", ",", "self", ".", "opts", ".", "base", ")" ]
32
10.571429
def booklet_nup_pdf(input_filename: str, output_filename: str, latex_paper_size: str = LATEX_PAPER_SIZE_A4) -> str: """ Takes a PDF (e.g. A4) and makes a 2x1 booklet (e.g. 2xA5 per A4). The booklet can be folded like a book and the final pages will be in order. Returns the output filename. """ log.info("Creating booklet") log.debug("... {!r} -> {!r}", input_filename, output_filename) require(PDFJAM, HELP_MISSING_PDFJAM) n_pages = get_page_count(input_filename) n_sheets = calc_n_sheets(n_pages) log.debug("{} pages => {} sheets", n_pages, n_sheets) pagenums = page_sequence(n_sheets, one_based=True) pagespeclist = [str(p) if p <= n_pages else "{}" for p in pagenums] # ... switches empty pages to "{}", which is pdfjam notation for # an empty page. pagespec = ",".join(pagespeclist) pdfjam_tidy = True # clean up after yourself? args = [ PDFJAM, "--paper", latex_paper_size, "--landscape", "--nup", "2x1", "--keepinfo", # e.g. author information "--outfile", output_filename, "--tidy" if pdfjam_tidy else "--no-tidy", "--", # "no more options" input_filename, pagespec ] run(args) return output_filename
[ "def", "booklet_nup_pdf", "(", "input_filename", ":", "str", ",", "output_filename", ":", "str", ",", "latex_paper_size", ":", "str", "=", "LATEX_PAPER_SIZE_A4", ")", "->", "str", ":", "log", ".", "info", "(", "\"Creating booklet\"", ")", "log", ".", "debug", "(", "\"... {!r} -> {!r}\"", ",", "input_filename", ",", "output_filename", ")", "require", "(", "PDFJAM", ",", "HELP_MISSING_PDFJAM", ")", "n_pages", "=", "get_page_count", "(", "input_filename", ")", "n_sheets", "=", "calc_n_sheets", "(", "n_pages", ")", "log", ".", "debug", "(", "\"{} pages => {} sheets\"", ",", "n_pages", ",", "n_sheets", ")", "pagenums", "=", "page_sequence", "(", "n_sheets", ",", "one_based", "=", "True", ")", "pagespeclist", "=", "[", "str", "(", "p", ")", "if", "p", "<=", "n_pages", "else", "\"{}\"", "for", "p", "in", "pagenums", "]", "# ... switches empty pages to \"{}\", which is pdfjam notation for", "# an empty page.", "pagespec", "=", "\",\"", ".", "join", "(", "pagespeclist", ")", "pdfjam_tidy", "=", "True", "# clean up after yourself?", "args", "=", "[", "PDFJAM", ",", "\"--paper\"", ",", "latex_paper_size", ",", "\"--landscape\"", ",", "\"--nup\"", ",", "\"2x1\"", ",", "\"--keepinfo\"", ",", "# e.g. author information", "\"--outfile\"", ",", "output_filename", ",", "\"--tidy\"", "if", "pdfjam_tidy", "else", "\"--no-tidy\"", ",", "\"--\"", ",", "# \"no more options\"", "input_filename", ",", "pagespec", "]", "run", "(", "args", ")", "return", "output_filename" ]
38.636364
14.515152
def get_many(self, content_id_list): '''Yield (content_id, data) tuples for ids in list. As with :meth:`get`, if a content_id in the list is missing, then it is yielded with a data value of `None`. :type content_id_list: list<str> :rtype: yields tuple(str, :class:`dossier.fc.FeatureCollection`) ''' content_id_keys = [tuplify(x) for x in content_id_list] for row in self.kvl.get(self.TABLE, *content_id_keys): content_id = row[0][0] data = row[1] if data is not None: data = fc_loads(data) yield (content_id, data)
[ "def", "get_many", "(", "self", ",", "content_id_list", ")", ":", "content_id_keys", "=", "[", "tuplify", "(", "x", ")", "for", "x", "in", "content_id_list", "]", "for", "row", "in", "self", ".", "kvl", ".", "get", "(", "self", ".", "TABLE", ",", "*", "content_id_keys", ")", ":", "content_id", "=", "row", "[", "0", "]", "[", "0", "]", "data", "=", "row", "[", "1", "]", "if", "data", "is", "not", "None", ":", "data", "=", "fc_loads", "(", "data", ")", "yield", "(", "content_id", ",", "data", ")" ]
37.058824
19.294118
def theta_limit(theta): """ Angle theta is periodic with period pi. Constrain theta such that -pi/2<theta<=pi/2. Parameters ---------- theta : float Input angle. Returns ------- theta : float Rotate angle. """ while theta <= -1 * np.pi / 2: theta += np.pi while theta > np.pi / 2: theta -= np.pi return theta
[ "def", "theta_limit", "(", "theta", ")", ":", "while", "theta", "<=", "-", "1", "*", "np", ".", "pi", "/", "2", ":", "theta", "+=", "np", ".", "pi", "while", "theta", ">", "np", ".", "pi", "/", "2", ":", "theta", "-=", "np", ".", "pi", "return", "theta" ]
18.75
19.05
def addBlock(self,branch=None,btype=None,mtype=None,attributes=None): ''' Add block definition to list of blocks in material Order for list entry: branch level (0=root), block type (material,solid,fluid,etc.), matid (integer if root, False otherwise), material name (string if root, False otherwise), material type, dictionary of attributes or False if none ''' if branch == 0: attributes = self.attributes blk = {'branch': branch, 'btype': btype, 'mtype': mtype, 'attributes': attributes} self.blocks.append(blk)
[ "def", "addBlock", "(", "self", ",", "branch", "=", "None", ",", "btype", "=", "None", ",", "mtype", "=", "None", ",", "attributes", "=", "None", ")", ":", "if", "branch", "==", "0", ":", "attributes", "=", "self", ".", "attributes", "blk", "=", "{", "'branch'", ":", "branch", ",", "'btype'", ":", "btype", ",", "'mtype'", ":", "mtype", ",", "'attributes'", ":", "attributes", "}", "self", ".", "blocks", ".", "append", "(", "blk", ")" ]
45.285714
25.857143
def sanitize_for_archive(url, headers, payload): """Sanitize payload of a HTTP request by removing the token information before storing/retrieving archived items :param: url: HTTP url request :param: headers: HTTP headers request :param: payload: HTTP payload request :returns url, headers and the sanitized payload """ if DiscourseClient.PKEY in payload: payload.pop(DiscourseClient.PKEY) return url, headers, payload
[ "def", "sanitize_for_archive", "(", "url", ",", "headers", ",", "payload", ")", ":", "if", "DiscourseClient", ".", "PKEY", "in", "payload", ":", "payload", ".", "pop", "(", "DiscourseClient", ".", "PKEY", ")", "return", "url", ",", "headers", ",", "payload" ]
35.142857
12.571429
def to_mesh(self): """ Return a copy of the Primitive object as a Trimesh object. """ result = Trimesh(vertices=self.vertices.copy(), faces=self.faces.copy(), face_normals=self.face_normals.copy(), process=False) return result
[ "def", "to_mesh", "(", "self", ")", ":", "result", "=", "Trimesh", "(", "vertices", "=", "self", ".", "vertices", ".", "copy", "(", ")", ",", "faces", "=", "self", ".", "faces", ".", "copy", "(", ")", ",", "face_normals", "=", "self", ".", "face_normals", ".", "copy", "(", ")", ",", "process", "=", "False", ")", "return", "result" ]
37
12.777778
def _assemble_flowtable(self, values): """ generate a flowtable from a tuple of descriptors. """ values = map(lambda x: [] if x is None else x, values) src = values[0] + values[1] dst = values[2] + values[3] thistable = dict() for s in src: thistable[s] = dst return thistable
[ "def", "_assemble_flowtable", "(", "self", ",", "values", ")", ":", "values", "=", "map", "(", "lambda", "x", ":", "[", "]", "if", "x", "is", "None", "else", "x", ",", "values", ")", "src", "=", "values", "[", "0", "]", "+", "values", "[", "1", "]", "dst", "=", "values", "[", "2", "]", "+", "values", "[", "3", "]", "thistable", "=", "dict", "(", ")", "for", "s", "in", "src", ":", "thistable", "[", "s", "]", "=", "dst", "return", "thistable" ]
31.181818
12.090909
def qtag(tag): """ Return fully qualified (Clark notation) tagname corresponding to short-form prefixed tagname *tag*. """ prefix, tagroot = tag.split(':') uri = nsmap[prefix] return '{%s}%s' % (uri, tagroot)
[ "def", "qtag", "(", "tag", ")", ":", "prefix", ",", "tagroot", "=", "tag", ".", "split", "(", "':'", ")", "uri", "=", "nsmap", "[", "prefix", "]", "return", "'{%s}%s'", "%", "(", "uri", ",", "tagroot", ")" ]
28.625
10.125
def model_counts_map(self, name=None, exclude=None, use_mask=False): """Return the model counts map for a single source, a list of sources, or for the sum of all sources in the ROI. The exclude parameter can be used to exclude one or more components when generating the model map. Parameters ---------- name : str or list of str Parameter controlling the set of sources for which the model counts map will be calculated. If name=None the model map will be generated for all sources in the ROI. exclude : str or list of str List of sources that will be excluded when calculating the model map. use_mask : bool Parameter that specifies in the model counts map should include mask pixels (i.e., ones whose weights are <= 0) Returns ------- map : `~gammapy.maps.Map` """ maps = [c.model_counts_map(name, exclude, use_mask=use_mask) for c in self.components] return skymap.coadd_maps(self.geom, maps)
[ "def", "model_counts_map", "(", "self", ",", "name", "=", "None", ",", "exclude", "=", "None", ",", "use_mask", "=", "False", ")", ":", "maps", "=", "[", "c", ".", "model_counts_map", "(", "name", ",", "exclude", ",", "use_mask", "=", "use_mask", ")", "for", "c", "in", "self", ".", "components", "]", "return", "skymap", ".", "coadd_maps", "(", "self", ".", "geom", ",", "maps", ")" ]
35.322581
22.677419
def _check_and_execute(func, *args, **kwargs): """ Check the type of all parameters with type information, converting as appropriate and then execute the function. """ convargs = [] #Convert and validate all arguments for i, arg in enumerate(args): val = func.metadata.convert_positional_argument(i, arg) convargs.append(val) convkw = {} for key, val in kwargs: convkw[key] = func.metadata.convert_argument(key, val) if not func.metadata.spec_filled(convargs, convkw): raise ValidationError("Not enough parameters specified to call function", function=func.metadata.name, signature=func.metadata.signature()) retval = func(*convargs, **convkw) return retval
[ "def", "_check_and_execute", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "convargs", "=", "[", "]", "#Convert and validate all arguments", "for", "i", ",", "arg", "in", "enumerate", "(", "args", ")", ":", "val", "=", "func", ".", "metadata", ".", "convert_positional_argument", "(", "i", ",", "arg", ")", "convargs", ".", "append", "(", "val", ")", "convkw", "=", "{", "}", "for", "key", ",", "val", "in", "kwargs", ":", "convkw", "[", "key", "]", "=", "func", ".", "metadata", ".", "convert_argument", "(", "key", ",", "val", ")", "if", "not", "func", ".", "metadata", ".", "spec_filled", "(", "convargs", ",", "convkw", ")", ":", "raise", "ValidationError", "(", "\"Not enough parameters specified to call function\"", ",", "function", "=", "func", ".", "metadata", ".", "name", ",", "signature", "=", "func", ".", "metadata", ".", "signature", "(", ")", ")", "retval", "=", "func", "(", "*", "convargs", ",", "*", "*", "convkw", ")", "return", "retval" ]
32.772727
23.5
def _QueryHash(self, digest): """Queries the Viper Server for a specfic hash. Args: digest (str): hash to look up. Returns: dict[str, object]: JSON response or None on error. """ if not self._url: self._url = '{0:s}://{1:s}:{2:d}/file/find'.format( self._protocol, self._host, self._port) request_data = {self.lookup_hash: digest} try: json_response = self.MakeRequestAndDecodeJSON( self._url, 'POST', data=request_data) except errors.ConnectionError as exception: json_response = None logger.error('Unable to query Viper with error: {0!s}.'.format( exception)) return json_response
[ "def", "_QueryHash", "(", "self", ",", "digest", ")", ":", "if", "not", "self", ".", "_url", ":", "self", ".", "_url", "=", "'{0:s}://{1:s}:{2:d}/file/find'", ".", "format", "(", "self", ".", "_protocol", ",", "self", ".", "_host", ",", "self", ".", "_port", ")", "request_data", "=", "{", "self", ".", "lookup_hash", ":", "digest", "}", "try", ":", "json_response", "=", "self", ".", "MakeRequestAndDecodeJSON", "(", "self", ".", "_url", ",", "'POST'", ",", "data", "=", "request_data", ")", "except", "errors", ".", "ConnectionError", "as", "exception", ":", "json_response", "=", "None", "logger", ".", "error", "(", "'Unable to query Viper with error: {0!s}.'", ".", "format", "(", "exception", ")", ")", "return", "json_response" ]
26.64
20.64
def logp_gradient(variable, calculation_set=None): """ Calculates the gradient of the joint log posterior with respect to variable. Calculation of the log posterior is restricted to the variables in calculation_set. """ return variable.logp_partial_gradient(variable, calculation_set) + sum( [child.logp_partial_gradient(variable, calculation_set) for child in variable.children])
[ "def", "logp_gradient", "(", "variable", ",", "calculation_set", "=", "None", ")", ":", "return", "variable", ".", "logp_partial_gradient", "(", "variable", ",", "calculation_set", ")", "+", "sum", "(", "[", "child", ".", "logp_partial_gradient", "(", "variable", ",", "calculation_set", ")", "for", "child", "in", "variable", ".", "children", "]", ")" ]
57.428571
26.857143
def add_exception(self, exception, stack, remote=False): """ Add an exception to trace entities. :param Exception exception: the catched exception. :param list stack: the output from python built-in `traceback.extract_stack()`. :param bool remote: If False it means it's a client error instead of a downstream service. """ self._check_ended() self.add_fault_flag() if hasattr(exception, '_recorded'): setattr(self, 'cause', getattr(exception, '_cause_id')) return exceptions = [] exceptions.append(Throwable(exception, stack, remote)) self.cause['exceptions'] = exceptions self.cause['working_directory'] = os.getcwd()
[ "def", "add_exception", "(", "self", ",", "exception", ",", "stack", ",", "remote", "=", "False", ")", ":", "self", ".", "_check_ended", "(", ")", "self", ".", "add_fault_flag", "(", ")", "if", "hasattr", "(", "exception", ",", "'_recorded'", ")", ":", "setattr", "(", "self", ",", "'cause'", ",", "getattr", "(", "exception", ",", "'_cause_id'", ")", ")", "return", "exceptions", "=", "[", "]", "exceptions", ".", "append", "(", "Throwable", "(", "exception", ",", "stack", ",", "remote", ")", ")", "self", ".", "cause", "[", "'exceptions'", "]", "=", "exceptions", "self", ".", "cause", "[", "'working_directory'", "]", "=", "os", ".", "getcwd", "(", ")" ]
34.227273
17.136364