text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def number_arg(ctx, obj): ''' Handles LiteralObjects as well as computable arguments ''' if hasattr(obj, 'compute'): obj = next(obj.compute(ctx), False) return to_number(obj)
[ "def", "number_arg", "(", "ctx", ",", "obj", ")", ":", "if", "hasattr", "(", "obj", ",", "'compute'", ")", ":", "obj", "=", "next", "(", "obj", ".", "compute", "(", "ctx", ")", ",", "False", ")", "return", "to_number", "(", "obj", ")" ]
28
18
def process(in_path, annot_beats=False, feature="mfcc", framesync=False, boundaries_id="gt", labels_id=None, n_jobs=4, config=None): """Sweeps parameters across the specified algorithm.""" results_file = "results_sweep_boundsE%s_labelsE%s.csv" % (boundaries_id, labels_id) if labels_id == "cnmf3" or boundaries_id == "cnmf3": config = io.get_configuration(feature, annot_beats, framesync, boundaries_id, labels_id) hh = range(15, 33) RR = range(15, 40) ranks = range(3, 6) RR_labels = range(11, 12) ranks_labels = range(6, 7) all_results = pd.DataFrame() for rank in ranks: for h in hh: for R in RR: for rank_labels in ranks_labels: for R_labels in RR_labels: config["h"] = h config["R"] = R config["rank"] = rank config["rank_labels"] = rank_labels config["R_labels"] = R_labels config["features"] = None # Run process msaf.run.process( in_path, n_jobs=n_jobs, boundaries_id=boundaries_id, labels_id=labels_id, config=config) # Compute evaluations results = msaf.eval.process( in_path, boundaries_id, labels_id, save=True, n_jobs=n_jobs, config=config) # Save avg results new_columns = {"config_h": h, "config_R": R, "config_rank": rank, "config_R_labels": R_labels, "config_rank_labels": rank_labels} results = results.append([new_columns], ignore_index=True) all_results = all_results.append(results.mean(), ignore_index=True) all_results.to_csv(results_file) elif labels_id is None and boundaries_id == "sf": config = io.get_configuration(feature, annot_beats, framesync, boundaries_id, labels_id) MM = range(20, 32) mm = range(3, 4) kk = np.arange(0.03, 0.1, 0.01) Mpp = range(16, 32) ott = np.arange(0.02, 0.1, 0.01) all_results = pd.DataFrame() for k in kk: for ot in ott: for m in mm: for M in MM: for Mp in Mpp: config["M_gaussian"] = M config["m_embedded"] = m config["k_nearest"] = k config["Mp_adaptive"] = Mp config["offset_thres"] = ot config["features"] = None # Run process msaf.run.process( in_path, n_jobs=n_jobs, boundaries_id=boundaries_id, labels_id=labels_id, config=config) # Compute evaluations results = msaf.eval.process( in_path, boundaries_id, labels_id, save=True, n_jobs=n_jobs, config=config) # Save avg results new_columns = {"config_M": M, "config_m": m, "config_k": k, "config_Mp": Mp, "config_ot": ot} results = results.append([new_columns], ignore_index=True) all_results = all_results.append(results.mean(), ignore_index=True) all_results.to_csv(results_file) else: logging.error("Can't sweep parameters for %s algorithm. " "Implement me! :D")
[ "def", "process", "(", "in_path", ",", "annot_beats", "=", "False", ",", "feature", "=", "\"mfcc\"", ",", "framesync", "=", "False", ",", "boundaries_id", "=", "\"gt\"", ",", "labels_id", "=", "None", ",", "n_jobs", "=", "4", ",", "config", "=", "None", ")", ":", "results_file", "=", "\"results_sweep_boundsE%s_labelsE%s.csv\"", "%", "(", "boundaries_id", ",", "labels_id", ")", "if", "labels_id", "==", "\"cnmf3\"", "or", "boundaries_id", "==", "\"cnmf3\"", ":", "config", "=", "io", ".", "get_configuration", "(", "feature", ",", "annot_beats", ",", "framesync", ",", "boundaries_id", ",", "labels_id", ")", "hh", "=", "range", "(", "15", ",", "33", ")", "RR", "=", "range", "(", "15", ",", "40", ")", "ranks", "=", "range", "(", "3", ",", "6", ")", "RR_labels", "=", "range", "(", "11", ",", "12", ")", "ranks_labels", "=", "range", "(", "6", ",", "7", ")", "all_results", "=", "pd", ".", "DataFrame", "(", ")", "for", "rank", "in", "ranks", ":", "for", "h", "in", "hh", ":", "for", "R", "in", "RR", ":", "for", "rank_labels", "in", "ranks_labels", ":", "for", "R_labels", "in", "RR_labels", ":", "config", "[", "\"h\"", "]", "=", "h", "config", "[", "\"R\"", "]", "=", "R", "config", "[", "\"rank\"", "]", "=", "rank", "config", "[", "\"rank_labels\"", "]", "=", "rank_labels", "config", "[", "\"R_labels\"", "]", "=", "R_labels", "config", "[", "\"features\"", "]", "=", "None", "# Run process", "msaf", ".", "run", ".", "process", "(", "in_path", ",", "n_jobs", "=", "n_jobs", ",", "boundaries_id", "=", "boundaries_id", ",", "labels_id", "=", "labels_id", ",", "config", "=", "config", ")", "# Compute evaluations", "results", "=", "msaf", ".", "eval", ".", "process", "(", "in_path", ",", "boundaries_id", ",", "labels_id", ",", "save", "=", "True", ",", "n_jobs", "=", "n_jobs", ",", "config", "=", "config", ")", "# Save avg results", "new_columns", "=", "{", "\"config_h\"", ":", "h", ",", "\"config_R\"", ":", "R", ",", "\"config_rank\"", ":", "rank", ",", "\"config_R_labels\"", ":", "R_labels", ",", "\"config_rank_labels\"", ":", "rank_labels", "}", "results", "=", "results", ".", "append", "(", "[", "new_columns", "]", ",", "ignore_index", "=", "True", ")", "all_results", "=", "all_results", ".", "append", "(", "results", ".", "mean", "(", ")", ",", "ignore_index", "=", "True", ")", "all_results", ".", "to_csv", "(", "results_file", ")", "elif", "labels_id", "is", "None", "and", "boundaries_id", "==", "\"sf\"", ":", "config", "=", "io", ".", "get_configuration", "(", "feature", ",", "annot_beats", ",", "framesync", ",", "boundaries_id", ",", "labels_id", ")", "MM", "=", "range", "(", "20", ",", "32", ")", "mm", "=", "range", "(", "3", ",", "4", ")", "kk", "=", "np", ".", "arange", "(", "0.03", ",", "0.1", ",", "0.01", ")", "Mpp", "=", "range", "(", "16", ",", "32", ")", "ott", "=", "np", ".", "arange", "(", "0.02", ",", "0.1", ",", "0.01", ")", "all_results", "=", "pd", ".", "DataFrame", "(", ")", "for", "k", "in", "kk", ":", "for", "ot", "in", "ott", ":", "for", "m", "in", "mm", ":", "for", "M", "in", "MM", ":", "for", "Mp", "in", "Mpp", ":", "config", "[", "\"M_gaussian\"", "]", "=", "M", "config", "[", "\"m_embedded\"", "]", "=", "m", "config", "[", "\"k_nearest\"", "]", "=", "k", "config", "[", "\"Mp_adaptive\"", "]", "=", "Mp", "config", "[", "\"offset_thres\"", "]", "=", "ot", "config", "[", "\"features\"", "]", "=", "None", "# Run process", "msaf", ".", "run", ".", "process", "(", "in_path", ",", "n_jobs", "=", "n_jobs", ",", "boundaries_id", "=", "boundaries_id", ",", "labels_id", "=", "labels_id", ",", "config", "=", "config", ")", "# Compute evaluations", "results", "=", "msaf", ".", "eval", ".", "process", "(", "in_path", ",", "boundaries_id", ",", "labels_id", ",", "save", "=", "True", ",", "n_jobs", "=", "n_jobs", ",", "config", "=", "config", ")", "# Save avg results", "new_columns", "=", "{", "\"config_M\"", ":", "M", ",", "\"config_m\"", ":", "m", ",", "\"config_k\"", ":", "k", ",", "\"config_Mp\"", ":", "Mp", ",", "\"config_ot\"", ":", "ot", "}", "results", "=", "results", ".", "append", "(", "[", "new_columns", "]", ",", "ignore_index", "=", "True", ")", "all_results", "=", "all_results", ".", "append", "(", "results", ".", "mean", "(", ")", ",", "ignore_index", "=", "True", ")", "all_results", ".", "to_csv", "(", "results_file", ")", "else", ":", "logging", ".", "error", "(", "\"Can't sweep parameters for %s algorithm. \"", "\"Implement me! :D\"", ")" ]
46.092784
20.639175
def fetch(self): """Fetch the data for the model from Redis and assign the values. :rtype: bool """ raw = yield gen.Task(self._redis_client.get, self._key) if raw: self.loads(base64.b64decode(raw)) raise gen.Return(True) raise gen.Return(False)
[ "def", "fetch", "(", "self", ")", ":", "raw", "=", "yield", "gen", ".", "Task", "(", "self", ".", "_redis_client", ".", "get", ",", "self", ".", "_key", ")", "if", "raw", ":", "self", ".", "loads", "(", "base64", ".", "b64decode", "(", "raw", ")", ")", "raise", "gen", ".", "Return", "(", "True", ")", "raise", "gen", ".", "Return", "(", "False", ")" ]
28
17.454545
def _connect(self): """ Connect to the graphite server """ if (self.proto == 'udp'): stream = socket.SOCK_DGRAM else: stream = socket.SOCK_STREAM if (self.proto[-1] == '4'): family = socket.AF_INET connection_struct = (self.host, self.port) elif (self.proto[-1] == '6'): family = socket.AF_INET6 connection_struct = (self.host, self.port, self.flow_info, self.scope_id) else: connection_struct = (self.host, self.port) try: addrinfo = socket.getaddrinfo(self.host, self.port, 0, stream) except socket.gaierror as ex: self.log.error("GraphiteHandler: Error looking up graphite host" " '%s' - %s", self.host, ex) return if (len(addrinfo) > 0): family = addrinfo[0][0] if (family == socket.AF_INET6): connection_struct = (self.host, self.port, self.flow_info, self.scope_id) else: family = socket.AF_INET # Create socket self.socket = socket.socket(family, stream) if self.socket is None: # Log Error self.log.error("GraphiteHandler: Unable to create socket.") # Close Socket self._close() return # Enable keepalives? if self.proto != 'udp' and self.keepalive: self.log.error("GraphiteHandler: Setting socket keepalives...") self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, self.keepaliveinterval) self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, self.keepaliveinterval) self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 3) # Set socket timeout self.socket.settimeout(self.timeout) # Connect to graphite server try: self.socket.connect(connection_struct) # Log self.log.debug("GraphiteHandler: Established connection to " "graphite server %s:%d.", self.host, self.port) self.last_connect_timestamp = time.time() except Exception as ex: # Log Error self._throttle_error("GraphiteHandler: Failed to connect to " "%s:%i. %s.", self.host, self.port, ex) # Close Socket self._close() return
[ "def", "_connect", "(", "self", ")", ":", "if", "(", "self", ".", "proto", "==", "'udp'", ")", ":", "stream", "=", "socket", ".", "SOCK_DGRAM", "else", ":", "stream", "=", "socket", ".", "SOCK_STREAM", "if", "(", "self", ".", "proto", "[", "-", "1", "]", "==", "'4'", ")", ":", "family", "=", "socket", ".", "AF_INET", "connection_struct", "=", "(", "self", ".", "host", ",", "self", ".", "port", ")", "elif", "(", "self", ".", "proto", "[", "-", "1", "]", "==", "'6'", ")", ":", "family", "=", "socket", ".", "AF_INET6", "connection_struct", "=", "(", "self", ".", "host", ",", "self", ".", "port", ",", "self", ".", "flow_info", ",", "self", ".", "scope_id", ")", "else", ":", "connection_struct", "=", "(", "self", ".", "host", ",", "self", ".", "port", ")", "try", ":", "addrinfo", "=", "socket", ".", "getaddrinfo", "(", "self", ".", "host", ",", "self", ".", "port", ",", "0", ",", "stream", ")", "except", "socket", ".", "gaierror", "as", "ex", ":", "self", ".", "log", ".", "error", "(", "\"GraphiteHandler: Error looking up graphite host\"", "\" '%s' - %s\"", ",", "self", ".", "host", ",", "ex", ")", "return", "if", "(", "len", "(", "addrinfo", ")", ">", "0", ")", ":", "family", "=", "addrinfo", "[", "0", "]", "[", "0", "]", "if", "(", "family", "==", "socket", ".", "AF_INET6", ")", ":", "connection_struct", "=", "(", "self", ".", "host", ",", "self", ".", "port", ",", "self", ".", "flow_info", ",", "self", ".", "scope_id", ")", "else", ":", "family", "=", "socket", ".", "AF_INET", "# Create socket", "self", ".", "socket", "=", "socket", ".", "socket", "(", "family", ",", "stream", ")", "if", "self", ".", "socket", "is", "None", ":", "# Log Error", "self", ".", "log", ".", "error", "(", "\"GraphiteHandler: Unable to create socket.\"", ")", "# Close Socket", "self", ".", "_close", "(", ")", "return", "# Enable keepalives?", "if", "self", ".", "proto", "!=", "'udp'", "and", "self", ".", "keepalive", ":", "self", ".", "log", ".", "error", "(", "\"GraphiteHandler: Setting socket keepalives...\"", ")", "self", ".", "socket", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_KEEPALIVE", ",", "1", ")", "self", ".", "socket", ".", "setsockopt", "(", "socket", ".", "IPPROTO_TCP", ",", "socket", ".", "TCP_KEEPIDLE", ",", "self", ".", "keepaliveinterval", ")", "self", ".", "socket", ".", "setsockopt", "(", "socket", ".", "IPPROTO_TCP", ",", "socket", ".", "TCP_KEEPINTVL", ",", "self", ".", "keepaliveinterval", ")", "self", ".", "socket", ".", "setsockopt", "(", "socket", ".", "IPPROTO_TCP", ",", "socket", ".", "TCP_KEEPCNT", ",", "3", ")", "# Set socket timeout", "self", ".", "socket", ".", "settimeout", "(", "self", ".", "timeout", ")", "# Connect to graphite server", "try", ":", "self", ".", "socket", ".", "connect", "(", "connection_struct", ")", "# Log", "self", ".", "log", ".", "debug", "(", "\"GraphiteHandler: Established connection to \"", "\"graphite server %s:%d.\"", ",", "self", ".", "host", ",", "self", ".", "port", ")", "self", ".", "last_connect_timestamp", "=", "time", ".", "time", "(", ")", "except", "Exception", "as", "ex", ":", "# Log Error", "self", ".", "_throttle_error", "(", "\"GraphiteHandler: Failed to connect to \"", "\"%s:%i. %s.\"", ",", "self", ".", "host", ",", "self", ".", "port", ",", "ex", ")", "# Close Socket", "self", ".", "_close", "(", ")", "return" ]
40.910448
16.880597
def RqueryBM(query_filter,query_items,query_attributes,dataset,database,host=rbiomart_host): """ Queries BioMart. :param query_filtery: one BioMart filter associated with the items being queried :param query_items: list of items to be queried (must assoiate with given filter) :param query_attributes: list of attributes to recover from BioMart :param dataset: dataset to query :param database: database to query :param host: address of the host server, default='www.ensembl.org' return: a Pandas dataframe of the queried attributes """ biomaRt = importr("biomaRt") ensemblMart=biomaRt.useMart(database, host=rbiomart_host) ensembl=biomaRt.useDataset(dataset, mart=ensemblMart) df=biomaRt.getBM(attributes=query_attributes, filters=query_filter, values=query_items, mart=ensembl) output = [tuple([df[j][i] for j in range(df.ncol)]) for i in range(df.nrow)] output = pd.DataFrame(output) output.columns = query_attributes return output
[ "def", "RqueryBM", "(", "query_filter", ",", "query_items", ",", "query_attributes", ",", "dataset", ",", "database", ",", "host", "=", "rbiomart_host", ")", ":", "biomaRt", "=", "importr", "(", "\"biomaRt\"", ")", "ensemblMart", "=", "biomaRt", ".", "useMart", "(", "database", ",", "host", "=", "rbiomart_host", ")", "ensembl", "=", "biomaRt", ".", "useDataset", "(", "dataset", ",", "mart", "=", "ensemblMart", ")", "df", "=", "biomaRt", ".", "getBM", "(", "attributes", "=", "query_attributes", ",", "filters", "=", "query_filter", ",", "values", "=", "query_items", ",", "mart", "=", "ensembl", ")", "output", "=", "[", "tuple", "(", "[", "df", "[", "j", "]", "[", "i", "]", "for", "j", "in", "range", "(", "df", ".", "ncol", ")", "]", ")", "for", "i", "in", "range", "(", "df", ".", "nrow", ")", "]", "output", "=", "pd", ".", "DataFrame", "(", "output", ")", "output", ".", "columns", "=", "query_attributes", "return", "output" ]
42.956522
25.565217
def _transform_item(self, content_metadata_item): """ Transform the provided content metadata item to the schema expected by the integrated channel. """ content_metadata_type = content_metadata_item['content_type'] transformed_item = {} for integrated_channel_schema_key, edx_data_schema_key in self.DATA_TRANSFORM_MAPPING.items(): # Look for transformer functions defined on subclasses. # Favor content type-specific functions. transformer = ( getattr( self, 'transform_{content_type}_{edx_data_schema_key}'.format( content_type=content_metadata_type, edx_data_schema_key=edx_data_schema_key ), None ) or getattr( self, 'transform_{edx_data_schema_key}'.format( edx_data_schema_key=edx_data_schema_key ), None ) ) if transformer: transformed_item[integrated_channel_schema_key] = transformer(content_metadata_item) else: # The concrete subclass does not define an override for the given field, # so just use the data key to index the content metadata item dictionary. try: transformed_item[integrated_channel_schema_key] = content_metadata_item[edx_data_schema_key] except KeyError: # There may be a problem with the DATA_TRANSFORM_MAPPING on # the concrete subclass or the concrete subclass does not implement # the appropriate field tranformer function. LOGGER.exception( 'Failed to transform content metadata item field [%s] for [%s]: [%s]', edx_data_schema_key, self.enterprise_customer.name, content_metadata_item, ) return transformed_item
[ "def", "_transform_item", "(", "self", ",", "content_metadata_item", ")", ":", "content_metadata_type", "=", "content_metadata_item", "[", "'content_type'", "]", "transformed_item", "=", "{", "}", "for", "integrated_channel_schema_key", ",", "edx_data_schema_key", "in", "self", ".", "DATA_TRANSFORM_MAPPING", ".", "items", "(", ")", ":", "# Look for transformer functions defined on subclasses.", "# Favor content type-specific functions.", "transformer", "=", "(", "getattr", "(", "self", ",", "'transform_{content_type}_{edx_data_schema_key}'", ".", "format", "(", "content_type", "=", "content_metadata_type", ",", "edx_data_schema_key", "=", "edx_data_schema_key", ")", ",", "None", ")", "or", "getattr", "(", "self", ",", "'transform_{edx_data_schema_key}'", ".", "format", "(", "edx_data_schema_key", "=", "edx_data_schema_key", ")", ",", "None", ")", ")", "if", "transformer", ":", "transformed_item", "[", "integrated_channel_schema_key", "]", "=", "transformer", "(", "content_metadata_item", ")", "else", ":", "# The concrete subclass does not define an override for the given field,", "# so just use the data key to index the content metadata item dictionary.", "try", ":", "transformed_item", "[", "integrated_channel_schema_key", "]", "=", "content_metadata_item", "[", "edx_data_schema_key", "]", "except", "KeyError", ":", "# There may be a problem with the DATA_TRANSFORM_MAPPING on", "# the concrete subclass or the concrete subclass does not implement", "# the appropriate field tranformer function.", "LOGGER", ".", "exception", "(", "'Failed to transform content metadata item field [%s] for [%s]: [%s]'", ",", "edx_data_schema_key", ",", "self", ".", "enterprise_customer", ".", "name", ",", "content_metadata_item", ",", ")", "return", "transformed_item" ]
46.478261
24.434783
def visit_dictcomp(self, node, parent): """visit a DictComp node by returning a fresh instance of it""" newnode = nodes.DictComp(node.lineno, node.col_offset, parent) newnode.postinit( self.visit(node.key, newnode), self.visit(node.value, newnode), [self.visit(child, newnode) for child in node.generators], ) return newnode
[ "def", "visit_dictcomp", "(", "self", ",", "node", ",", "parent", ")", ":", "newnode", "=", "nodes", ".", "DictComp", "(", "node", ".", "lineno", ",", "node", ".", "col_offset", ",", "parent", ")", "newnode", ".", "postinit", "(", "self", ".", "visit", "(", "node", ".", "key", ",", "newnode", ")", ",", "self", ".", "visit", "(", "node", ".", "value", ",", "newnode", ")", ",", "[", "self", ".", "visit", "(", "child", ",", "newnode", ")", "for", "child", "in", "node", ".", "generators", "]", ",", ")", "return", "newnode" ]
43.555556
14.555556
def get(self, number): """ Return a pattern for a number. @param number (int) Number of pattern @return (set) Indices of on bits """ if not number in self._patterns: raise IndexError("Invalid number") return self._patterns[number]
[ "def", "get", "(", "self", ",", "number", ")", ":", "if", "not", "number", "in", "self", ".", "_patterns", ":", "raise", "IndexError", "(", "\"Invalid number\"", ")", "return", "self", ".", "_patterns", "[", "number", "]" ]
21.333333
13.333333
def command(state, args): """Watch an anime.""" if len(args) < 2: print(f'Usage: {args[0]} {{ID|aid:AID}} [EPISODE]') return aid = state.results.parse_aid(args[1], default_key='db') anime = query.select.lookup(state.db, aid) if len(args) < 3: episode = anime.watched_episodes + 1 else: episode = int(args[2]) anime_files = query.files.get_files(state.db, aid) files = anime_files[episode] if not files: print('No files.') return file = state.file_picker.pick(files) ret = subprocess.call(state.config['anime'].getargs('player') + [file]) if ret == 0 and episode == anime.watched_episodes + 1: user_input = input('Bump? [Yn]') if user_input.lower() in ('n', 'no'): print('Not bumped.') else: query.update.bump(state.db, aid) print('Bumped.')
[ "def", "command", "(", "state", ",", "args", ")", ":", "if", "len", "(", "args", ")", "<", "2", ":", "print", "(", "f'Usage: {args[0]} {{ID|aid:AID}} [EPISODE]'", ")", "return", "aid", "=", "state", ".", "results", ".", "parse_aid", "(", "args", "[", "1", "]", ",", "default_key", "=", "'db'", ")", "anime", "=", "query", ".", "select", ".", "lookup", "(", "state", ".", "db", ",", "aid", ")", "if", "len", "(", "args", ")", "<", "3", ":", "episode", "=", "anime", ".", "watched_episodes", "+", "1", "else", ":", "episode", "=", "int", "(", "args", "[", "2", "]", ")", "anime_files", "=", "query", ".", "files", ".", "get_files", "(", "state", ".", "db", ",", "aid", ")", "files", "=", "anime_files", "[", "episode", "]", "if", "not", "files", ":", "print", "(", "'No files.'", ")", "return", "file", "=", "state", ".", "file_picker", ".", "pick", "(", "files", ")", "ret", "=", "subprocess", ".", "call", "(", "state", ".", "config", "[", "'anime'", "]", ".", "getargs", "(", "'player'", ")", "+", "[", "file", "]", ")", "if", "ret", "==", "0", "and", "episode", "==", "anime", ".", "watched_episodes", "+", "1", ":", "user_input", "=", "input", "(", "'Bump? [Yn]'", ")", "if", "user_input", ".", "lower", "(", ")", "in", "(", "'n'", ",", "'no'", ")", ":", "print", "(", "'Not bumped.'", ")", "else", ":", "query", ".", "update", ".", "bump", "(", "state", ".", "db", ",", "aid", ")", "print", "(", "'Bumped.'", ")" ]
32.296296
16.407407
def _find_path(start, end, path=tuple()): """Return the path from start to end. If no path exists, return None. """ path = path + (start, ) if start is end: return path else: ret = None if start.lo is not None: ret = _find_path(start.lo, end, path) if ret is None and start.hi is not None: ret = _find_path(start.hi, end, path) return ret
[ "def", "_find_path", "(", "start", ",", "end", ",", "path", "=", "tuple", "(", ")", ")", ":", "path", "=", "path", "+", "(", "start", ",", ")", "if", "start", "is", "end", ":", "return", "path", "else", ":", "ret", "=", "None", "if", "start", ".", "lo", "is", "not", "None", ":", "ret", "=", "_find_path", "(", "start", ".", "lo", ",", "end", ",", "path", ")", "if", "ret", "is", "None", "and", "start", ".", "hi", "is", "not", "None", ":", "ret", "=", "_find_path", "(", "start", ".", "hi", ",", "end", ",", "path", ")", "return", "ret" ]
27.533333
13.933333
def iterate(self, max_number_of_files=None): """iterate([max_number_of_files]) -> image, bounding_boxes, image_file Yields the image and the bounding boxes stored in the training set as an iterator. This function loads the images and converts them to gray-scale. It yields the image, the list of bounding boxes and the original image file name. **Parameters:** ``max_number_of_files`` : int or ``None`` If specified, limit the number of returned data by sub-selection using :py:func:`quasi_random_indices` **Yields:** ``image`` : array_like(2D) The image loaded from file and converted to gray scale ``bounding_boxes`` : [:py:class:`BoundingBox`] A list of bounding boxes, where faces are found in the image; might be empty (in case of pure background images) `` image_file`` : str The name of the original image that was read """ indices = quasi_random_indices(len(self), max_number_of_files) for index in indices: image = bob.io.base.load(self.image_paths[index]) if len(image.shape) == 3: image = bob.ip.color.rgb_to_gray(image) # return image and bounding box as iterator yield image, self.bounding_boxes[index], self.image_paths[index]
[ "def", "iterate", "(", "self", ",", "max_number_of_files", "=", "None", ")", ":", "indices", "=", "quasi_random_indices", "(", "len", "(", "self", ")", ",", "max_number_of_files", ")", "for", "index", "in", "indices", ":", "image", "=", "bob", ".", "io", ".", "base", ".", "load", "(", "self", ".", "image_paths", "[", "index", "]", ")", "if", "len", "(", "image", ".", "shape", ")", "==", "3", ":", "image", "=", "bob", ".", "ip", ".", "color", ".", "rgb_to_gray", "(", "image", ")", "# return image and bounding box as iterator", "yield", "image", ",", "self", ".", "bounding_boxes", "[", "index", "]", ",", "self", ".", "image_paths", "[", "index", "]" ]
39.548387
26.290323
def rhymes(word): """Get words rhyming with a given word. This function may return an empty list if no rhyming words are found in the dictionary, or if the word you pass to the function is itself not found in the dictionary. .. doctest:: >>> import pronouncing >>> pronouncing.rhymes("conditioner") ['commissioner', 'parishioner', 'petitioner', 'practitioner'] :param word: a word :returns: a list of rhyming words """ phones = phones_for_word(word) combined_rhymes = [] if phones: for element in phones: combined_rhymes.append([w for w in rhyme_lookup.get(rhyming_part( element), []) if w != word]) combined_rhymes = list(chain.from_iterable(combined_rhymes)) unique_combined_rhymes = sorted(set(combined_rhymes)) return unique_combined_rhymes else: return []
[ "def", "rhymes", "(", "word", ")", ":", "phones", "=", "phones_for_word", "(", "word", ")", "combined_rhymes", "=", "[", "]", "if", "phones", ":", "for", "element", "in", "phones", ":", "combined_rhymes", ".", "append", "(", "[", "w", "for", "w", "in", "rhyme_lookup", ".", "get", "(", "rhyming_part", "(", "element", ")", ",", "[", "]", ")", "if", "w", "!=", "word", "]", ")", "combined_rhymes", "=", "list", "(", "chain", ".", "from_iterable", "(", "combined_rhymes", ")", ")", "unique_combined_rhymes", "=", "sorted", "(", "set", "(", "combined_rhymes", ")", ")", "return", "unique_combined_rhymes", "else", ":", "return", "[", "]" ]
33.259259
21.333333
def setCurrentQuery(self, query): """ Sets the query for the current container widget. This will only change the active container, not parent containers. You should use the setQuery method to completely assign a query to this widget. :param query | <orb.Query> """ container = self.currentContainer() if container: container.setQuery(query)
[ "def", "setCurrentQuery", "(", "self", ",", "query", ")", ":", "container", "=", "self", ".", "currentContainer", "(", ")", "if", "container", ":", "container", ".", "setQuery", "(", "query", ")" ]
39.272727
14.909091
def element_tree_oai_records(tree, header_subs=None): """Take an ElementTree and converts the nodes into BibRecord records. This expects a clean OAI response with the tree root as ListRecords or GetRecord and record as the subtag like so: <ListRecords|GetRecord> <record> <header> <!-- Record Information --> </header> <metadata> <record> <!-- MARCXML --> </record> </metadata> </record> <record> ... </record> </ListRecords|GetRecord> :param tree: ElementTree object corresponding to GetRecord node from OAI request :param header_subs: OAI header subfields, if any :yield: (record, is_deleted) A tuple, with first a BibRecord found and second a boolean value saying if this is a deleted record or not. """ from .bibrecord import record_add_field, create_record if not header_subs: header_subs = [] # Make it a tuple, this information should not be changed header_subs = tuple(header_subs) oai_records = tree.getroot() for record_element in oai_records.getchildren(): header = record_element.find('header') # Add to OAI subfield datestamp = header.find('datestamp') identifier = header.find('identifier') identifier = identifier.text # The record's subfield is based on header information subs = list(header_subs) subs.append(("a", identifier)) subs.append(("d", datestamp.text)) if "status" in header.attrib and header.attrib["status"] == "deleted": # Record was deleted - create delete record deleted_record = {} record_add_field(deleted_record, "037", subfields=subs) yield deleted_record, True else: marc_root = record_element.find('metadata').find('record') marcxml = ET.tostring(marc_root, encoding="utf-8") record, status, errors = create_record(marcxml) if status == 1: # Add OAI request information record_add_field(record, "035", subfields=subs) yield record, False
[ "def", "element_tree_oai_records", "(", "tree", ",", "header_subs", "=", "None", ")", ":", "from", ".", "bibrecord", "import", "record_add_field", ",", "create_record", "if", "not", "header_subs", ":", "header_subs", "=", "[", "]", "# Make it a tuple, this information should not be changed", "header_subs", "=", "tuple", "(", "header_subs", ")", "oai_records", "=", "tree", ".", "getroot", "(", ")", "for", "record_element", "in", "oai_records", ".", "getchildren", "(", ")", ":", "header", "=", "record_element", ".", "find", "(", "'header'", ")", "# Add to OAI subfield", "datestamp", "=", "header", ".", "find", "(", "'datestamp'", ")", "identifier", "=", "header", ".", "find", "(", "'identifier'", ")", "identifier", "=", "identifier", ".", "text", "# The record's subfield is based on header information", "subs", "=", "list", "(", "header_subs", ")", "subs", ".", "append", "(", "(", "\"a\"", ",", "identifier", ")", ")", "subs", ".", "append", "(", "(", "\"d\"", ",", "datestamp", ".", "text", ")", ")", "if", "\"status\"", "in", "header", ".", "attrib", "and", "header", ".", "attrib", "[", "\"status\"", "]", "==", "\"deleted\"", ":", "# Record was deleted - create delete record", "deleted_record", "=", "{", "}", "record_add_field", "(", "deleted_record", ",", "\"037\"", ",", "subfields", "=", "subs", ")", "yield", "deleted_record", ",", "True", "else", ":", "marc_root", "=", "record_element", ".", "find", "(", "'metadata'", ")", ".", "find", "(", "'record'", ")", "marcxml", "=", "ET", ".", "tostring", "(", "marc_root", ",", "encoding", "=", "\"utf-8\"", ")", "record", ",", "status", ",", "errors", "=", "create_record", "(", "marcxml", ")", "if", "status", "==", "1", ":", "# Add OAI request information", "record_add_field", "(", "record", ",", "\"035\"", ",", "subfields", "=", "subs", ")", "yield", "record", ",", "False" ]
36.566667
18.2
def get_page_object_by_name(context, name): """ **Arguments** ``name` name for object selection :return selected object """ selected_object = None try: for obj_type in context['page']['content']: for obj in context['page']['content'][obj_type]: if obj.name == name: selected_object = obj break if selected_object is None: for obj_type in context['page']['content']: for obj in context['page']['ext_content'][obj_type]: if obj.name == name: selected_object = obj break except TypeError: pass return selected_object
[ "def", "get_page_object_by_name", "(", "context", ",", "name", ")", ":", "selected_object", "=", "None", "try", ":", "for", "obj_type", "in", "context", "[", "'page'", "]", "[", "'content'", "]", ":", "for", "obj", "in", "context", "[", "'page'", "]", "[", "'content'", "]", "[", "obj_type", "]", ":", "if", "obj", ".", "name", "==", "name", ":", "selected_object", "=", "obj", "break", "if", "selected_object", "is", "None", ":", "for", "obj_type", "in", "context", "[", "'page'", "]", "[", "'content'", "]", ":", "for", "obj", "in", "context", "[", "'page'", "]", "[", "'ext_content'", "]", "[", "obj_type", "]", ":", "if", "obj", ".", "name", "==", "name", ":", "selected_object", "=", "obj", "break", "except", "TypeError", ":", "pass", "return", "selected_object" ]
27.807692
16.038462
def init_validator(required, cls, *additional_validators): """ Create an attrs validator based on the cls provided and required setting. :param bool required: whether the field is required in a given model. :param cls: the expected class type of object value. :return: attrs validator chained correctly (e.g. optional(instance_of)) """ validator = validators.instance_of(cls) if additional_validators: additional_validators = list(additional_validators) additional_validators.append(validator) validator = composite(*additional_validators) return validator if required else validators.optional(validator)
[ "def", "init_validator", "(", "required", ",", "cls", ",", "*", "additional_validators", ")", ":", "validator", "=", "validators", ".", "instance_of", "(", "cls", ")", "if", "additional_validators", ":", "additional_validators", "=", "list", "(", "additional_validators", ")", "additional_validators", ".", "append", "(", "validator", ")", "validator", "=", "composite", "(", "*", "additional_validators", ")", "return", "validator", "if", "required", "else", "validators", ".", "optional", "(", "validator", ")" ]
43.466667
20
def mremove(self, class_name, names): """ Removes multiple components from the network. Removes them from component DataFrames. Parameters ---------- class_name : string Component class name name : list-like Component names Examples -------- >>> network.mremove("Line", ["line x", "line y"]) """ if class_name not in self.components: logger.error("Component class {} not found".format(class_name)) return None if not isinstance(names, pd.Index): names = pd.Index(names) cls_df = self.df(class_name) cls_df.drop(names, inplace=True) pnl = self.pnl(class_name) for df in itervalues(pnl): df.drop(df.columns.intersection(names), axis=1, inplace=True)
[ "def", "mremove", "(", "self", ",", "class_name", ",", "names", ")", ":", "if", "class_name", "not", "in", "self", ".", "components", ":", "logger", ".", "error", "(", "\"Component class {} not found\"", ".", "format", "(", "class_name", ")", ")", "return", "None", "if", "not", "isinstance", "(", "names", ",", "pd", ".", "Index", ")", ":", "names", "=", "pd", ".", "Index", "(", "names", ")", "cls_df", "=", "self", ".", "df", "(", "class_name", ")", "cls_df", ".", "drop", "(", "names", ",", "inplace", "=", "True", ")", "pnl", "=", "self", ".", "pnl", "(", "class_name", ")", "for", "df", "in", "itervalues", "(", "pnl", ")", ":", "df", ".", "drop", "(", "df", ".", "columns", ".", "intersection", "(", "names", ")", ",", "axis", "=", "1", ",", "inplace", "=", "True", ")" ]
24.470588
20.470588
def closeContentsWidget( self ): """ Closes the current contents widget. """ widget = self.currentContentsWidget() if ( not widget ): return widget.close() widget.setParent(None) widget.deleteLater()
[ "def", "closeContentsWidget", "(", "self", ")", ":", "widget", "=", "self", ".", "currentContentsWidget", "(", ")", "if", "(", "not", "widget", ")", ":", "return", "widget", ".", "close", "(", ")", "widget", ".", "setParent", "(", "None", ")", "widget", ".", "deleteLater", "(", ")" ]
26.181818
10.545455
def to(self, target: typing.Union[types.Message, types.Chat, types.base.Integer, types.base.String]): """ Send to chat :param target: message or chat or id :return: """ if isinstance(target, types.Message): chat_id = target.chat.id elif isinstance(target, types.Chat): chat_id = target.id elif isinstance(target, (int, str)): chat_id = target else: raise TypeError(f"Bad type of target. ({type(target)})") setattr(self, 'chat_id', chat_id) return self
[ "def", "to", "(", "self", ",", "target", ":", "typing", ".", "Union", "[", "types", ".", "Message", ",", "types", ".", "Chat", ",", "types", ".", "base", ".", "Integer", ",", "types", ".", "base", ".", "String", "]", ")", ":", "if", "isinstance", "(", "target", ",", "types", ".", "Message", ")", ":", "chat_id", "=", "target", ".", "chat", ".", "id", "elif", "isinstance", "(", "target", ",", "types", ".", "Chat", ")", ":", "chat_id", "=", "target", ".", "id", "elif", "isinstance", "(", "target", ",", "(", "int", ",", "str", ")", ")", ":", "chat_id", "=", "target", "else", ":", "raise", "TypeError", "(", "f\"Bad type of target. ({type(target)})\"", ")", "setattr", "(", "self", ",", "'chat_id'", ",", "chat_id", ")", "return", "self" ]
31.777778
16.888889
def _generate_url(self, regex, arguments): """ Uses the regex (of the type defined in Django's url patterns) and the arguments to return a relative URL For example, if the regex is '^/api/shreddr/job/(?P<id>[\d]+)$' and arguments is ['23'] then return would be '/api/shreddr/job/23' """ regex_tokens = _split_regex(regex) result = '' for i in range(len(arguments)): result = result + str(regex_tokens[i]) + str(arguments[i]) if len(regex_tokens) > len(arguments): result += regex_tokens[-1] return result
[ "def", "_generate_url", "(", "self", ",", "regex", ",", "arguments", ")", ":", "regex_tokens", "=", "_split_regex", "(", "regex", ")", "result", "=", "''", "for", "i", "in", "range", "(", "len", "(", "arguments", ")", ")", ":", "result", "=", "result", "+", "str", "(", "regex_tokens", "[", "i", "]", ")", "+", "str", "(", "arguments", "[", "i", "]", ")", "if", "len", "(", "regex_tokens", ")", ">", "len", "(", "arguments", ")", ":", "result", "+=", "regex_tokens", "[", "-", "1", "]", "return", "result" ]
45.846154
16.923077
def visualRect(self, index): """ Returns the visual rectangle for the inputed index. :param index | <QModelIndex> :return <QtCore.QRect> """ rect = super(XTreeWidget, self).visualRect(index) item = self.itemFromIndex(index) if not rect.isNull() and item and item.isFirstColumnSpanned(): vpos = self.viewport().mapFromParent(QtCore.QPoint(0, 0)) rect.setX(vpos.x()) rect.setWidth(self.width()) return rect return rect
[ "def", "visualRect", "(", "self", ",", "index", ")", ":", "rect", "=", "super", "(", "XTreeWidget", ",", "self", ")", ".", "visualRect", "(", "index", ")", "item", "=", "self", ".", "itemFromIndex", "(", "index", ")", "if", "not", "rect", ".", "isNull", "(", ")", "and", "item", "and", "item", ".", "isFirstColumnSpanned", "(", ")", ":", "vpos", "=", "self", ".", "viewport", "(", ")", ".", "mapFromParent", "(", "QtCore", ".", "QPoint", "(", "0", ",", "0", ")", ")", "rect", ".", "setX", "(", "vpos", ".", "x", "(", ")", ")", "rect", ".", "setWidth", "(", "self", ".", "width", "(", ")", ")", "return", "rect", "return", "rect" ]
35.1875
14.0625
def clear_commentarea_cache(comment): """ Clean the plugin output cache of a rendered plugin. """ parent = comment.content_object for instance in CommentsAreaItem.objects.parent(parent): instance.clear_cache()
[ "def", "clear_commentarea_cache", "(", "comment", ")", ":", "parent", "=", "comment", ".", "content_object", "for", "instance", "in", "CommentsAreaItem", ".", "objects", ".", "parent", "(", "parent", ")", ":", "instance", ".", "clear_cache", "(", ")" ]
33
7.571429
def _verify_any(self): """ Verify that an initial request has been made, or failing that, that the request is in the session :raises: LTIException """ log.debug('verify_any enter') # Check to see if there is a new LTI launch request incoming newrequest = False if flask_request.method == 'POST': params = flask_request.form.to_dict() initiation = "basic-lti-launch-request" if params.get("lti_message_type", None) == initiation: newrequest = True # Scrub the session of the old authentication for prop in LTI_PROPERTY_LIST: if session.get(prop, None): del session[prop] session[LTI_SESSION_KEY] = False # Attempt the appropriate validation # Both of these methods raise LTIException as necessary if newrequest: self.verify_request() else: self._verify_session()
[ "def", "_verify_any", "(", "self", ")", ":", "log", ".", "debug", "(", "'verify_any enter'", ")", "# Check to see if there is a new LTI launch request incoming", "newrequest", "=", "False", "if", "flask_request", ".", "method", "==", "'POST'", ":", "params", "=", "flask_request", ".", "form", ".", "to_dict", "(", ")", "initiation", "=", "\"basic-lti-launch-request\"", "if", "params", ".", "get", "(", "\"lti_message_type\"", ",", "None", ")", "==", "initiation", ":", "newrequest", "=", "True", "# Scrub the session of the old authentication", "for", "prop", "in", "LTI_PROPERTY_LIST", ":", "if", "session", ".", "get", "(", "prop", ",", "None", ")", ":", "del", "session", "[", "prop", "]", "session", "[", "LTI_SESSION_KEY", "]", "=", "False", "# Attempt the appropriate validation", "# Both of these methods raise LTIException as necessary", "if", "newrequest", ":", "self", ".", "verify_request", "(", ")", "else", ":", "self", ".", "_verify_session", "(", ")" ]
37.37037
13.888889
def bs_values_df(run_list, estimator_list, estimator_names, n_simulate, **kwargs): """Computes a data frame of bootstrap resampled values. Parameters ---------- run_list: list of dicts List of nested sampling run dicts. estimator_list: list of functions Estimators to apply to runs. estimator_names: list of strs Name of each func in estimator_list. n_simulate: int Number of bootstrap replications to use on each run. kwargs: Kwargs to pass to parallel_apply. Returns ------- bs_values_df: pandas data frame Columns represent estimators and rows represent runs. Each cell contains a 1d array of bootstrap resampled values for the run and estimator. """ tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'desc': 'bs values'}) assert len(estimator_list) == len(estimator_names), ( 'len(estimator_list) = {0} != len(estimator_names = {1}' .format(len(estimator_list), len(estimator_names))) bs_values_list = pu.parallel_apply( nestcheck.error_analysis.run_bootstrap_values, run_list, func_args=(estimator_list,), func_kwargs={'n_simulate': n_simulate}, tqdm_kwargs=tqdm_kwargs, **kwargs) df = pd.DataFrame() for i, name in enumerate(estimator_names): df[name] = [arr[i, :] for arr in bs_values_list] # Check there are the correct number of bootstrap replications in each cell for vals_shape in df.loc[0].apply(lambda x: x.shape).values: assert vals_shape == (n_simulate,), ( 'Should be n_simulate=' + str(n_simulate) + ' values in ' + 'each cell. The cell contains array with shape ' + str(vals_shape)) return df
[ "def", "bs_values_df", "(", "run_list", ",", "estimator_list", ",", "estimator_names", ",", "n_simulate", ",", "*", "*", "kwargs", ")", ":", "tqdm_kwargs", "=", "kwargs", ".", "pop", "(", "'tqdm_kwargs'", ",", "{", "'desc'", ":", "'bs values'", "}", ")", "assert", "len", "(", "estimator_list", ")", "==", "len", "(", "estimator_names", ")", ",", "(", "'len(estimator_list) = {0} != len(estimator_names = {1}'", ".", "format", "(", "len", "(", "estimator_list", ")", ",", "len", "(", "estimator_names", ")", ")", ")", "bs_values_list", "=", "pu", ".", "parallel_apply", "(", "nestcheck", ".", "error_analysis", ".", "run_bootstrap_values", ",", "run_list", ",", "func_args", "=", "(", "estimator_list", ",", ")", ",", "func_kwargs", "=", "{", "'n_simulate'", ":", "n_simulate", "}", ",", "tqdm_kwargs", "=", "tqdm_kwargs", ",", "*", "*", "kwargs", ")", "df", "=", "pd", ".", "DataFrame", "(", ")", "for", "i", ",", "name", "in", "enumerate", "(", "estimator_names", ")", ":", "df", "[", "name", "]", "=", "[", "arr", "[", "i", ",", ":", "]", "for", "arr", "in", "bs_values_list", "]", "# Check there are the correct number of bootstrap replications in each cell", "for", "vals_shape", "in", "df", ".", "loc", "[", "0", "]", ".", "apply", "(", "lambda", "x", ":", "x", ".", "shape", ")", ".", "values", ":", "assert", "vals_shape", "==", "(", "n_simulate", ",", ")", ",", "(", "'Should be n_simulate='", "+", "str", "(", "n_simulate", ")", "+", "' values in '", "+", "'each cell. The cell contains array with shape '", "+", "str", "(", "vals_shape", ")", ")", "return", "df" ]
40.833333
18.309524
def get(self, key=NOT_SET, index=NOT_SET, d=None): """Return value with given key or index. If no value is found, return d (None by default). """ if index is NOT_SET and key is not NOT_SET: try: index, value = self._dict[key] except KeyError: return d else: return value elif index is not NOT_SET and key is NOT_SET: try: key, value = self._list[index] except IndexError: return d else: return value else: raise KEY_EQ_INDEX_ERROR
[ "def", "get", "(", "self", ",", "key", "=", "NOT_SET", ",", "index", "=", "NOT_SET", ",", "d", "=", "None", ")", ":", "if", "index", "is", "NOT_SET", "and", "key", "is", "not", "NOT_SET", ":", "try", ":", "index", ",", "value", "=", "self", ".", "_dict", "[", "key", "]", "except", "KeyError", ":", "return", "d", "else", ":", "return", "value", "elif", "index", "is", "not", "NOT_SET", "and", "key", "is", "NOT_SET", ":", "try", ":", "key", ",", "value", "=", "self", ".", "_list", "[", "index", "]", "except", "IndexError", ":", "return", "d", "else", ":", "return", "value", "else", ":", "raise", "KEY_EQ_INDEX_ERROR" ]
22.285714
19.285714
def setup_menus(): '''setup console menus''' menu = MPMenuTop([]) menu.add(MPMenuSubMenu('MAVExplorer', items=[MPMenuItem('Settings', 'Settings', 'menuSettings'), MPMenuItem('Map', 'Map', '# map'), MPMenuItem('Save Graph', 'Save', '# save'), MPMenuItem('Reload Graphs', 'Reload', '# reload')])) menu.add(graph_menus()) menu.add(MPMenuSubMenu('FlightMode', items=flightmode_menu())) mestate.console.set_menu(menu, menu_callback)
[ "def", "setup_menus", "(", ")", ":", "menu", "=", "MPMenuTop", "(", "[", "]", ")", "menu", ".", "add", "(", "MPMenuSubMenu", "(", "'MAVExplorer'", ",", "items", "=", "[", "MPMenuItem", "(", "'Settings'", ",", "'Settings'", ",", "'menuSettings'", ")", ",", "MPMenuItem", "(", "'Map'", ",", "'Map'", ",", "'# map'", ")", ",", "MPMenuItem", "(", "'Save Graph'", ",", "'Save'", ",", "'# save'", ")", ",", "MPMenuItem", "(", "'Reload Graphs'", ",", "'Reload'", ",", "'# reload'", ")", "]", ")", ")", "menu", ".", "add", "(", "graph_menus", "(", ")", ")", "menu", ".", "add", "(", "MPMenuSubMenu", "(", "'FlightMode'", ",", "items", "=", "flightmode_menu", "(", ")", ")", ")", "mestate", ".", "console", ".", "set_menu", "(", "menu", ",", "menu_callback", ")" ]
47.5
24.5
def data_item(self) -> DataItem: """Return the data item associated with this display panel. .. versionadded:: 1.0 Scriptable: Yes """ display_panel = self.__display_panel if not display_panel: return None data_item = display_panel.data_item return DataItem(data_item) if data_item else None
[ "def", "data_item", "(", "self", ")", "->", "DataItem", ":", "display_panel", "=", "self", ".", "__display_panel", "if", "not", "display_panel", ":", "return", "None", "data_item", "=", "display_panel", ".", "data_item", "return", "DataItem", "(", "data_item", ")", "if", "data_item", "else", "None" ]
29.833333
14
def safe_import_module(module_name): """ Like :func:`importlib.import_module`, except it does not raise ``ImportError`` if the requested ``module_name`` was not found. """ try: return import_module(module_name) except ImportError as e: m = re.match(r"No module named '([\w\.]+)'", str(e)) if not m or not module_name.startswith(m.group(1)): raise e
[ "def", "safe_import_module", "(", "module_name", ")", ":", "try", ":", "return", "import_module", "(", "module_name", ")", "except", "ImportError", "as", "e", ":", "m", "=", "re", ".", "match", "(", "r\"No module named '([\\w\\.]+)'\"", ",", "str", "(", "e", ")", ")", "if", "not", "m", "or", "not", "module_name", ".", "startswith", "(", "m", ".", "group", "(", "1", ")", ")", ":", "raise", "e" ]
36.181818
14.727273
def binned_pixelrange(self, waverange, **kwargs): """Calculate the number of pixels within the given wavelength range and `binset`. Parameters ---------- waverange : tuple of float or `~astropy.units.quantity.Quantity` Lower and upper limits of the desired wavelength range. If not a Quantity, assumed to be in Angstrom. kwargs : dict Keywords accepted by :func:`synphot.binning.pixel_range`. Returns ------- npix : int Number of pixels. """ x = units.validate_quantity( waverange, self._internal_wave_unit, equivalencies=u.spectral()) return binning.pixel_range(self.binset.value, x.value, **kwargs)
[ "def", "binned_pixelrange", "(", "self", ",", "waverange", ",", "*", "*", "kwargs", ")", ":", "x", "=", "units", ".", "validate_quantity", "(", "waverange", ",", "self", ".", "_internal_wave_unit", ",", "equivalencies", "=", "u", ".", "spectral", "(", ")", ")", "return", "binning", ".", "pixel_range", "(", "self", ".", "binset", ".", "value", ",", "x", ".", "value", ",", "*", "*", "kwargs", ")" ]
33.590909
22.954545
def sendmail(self, msg_from, msg_to, msg): """Remember the recipients.""" SMTP_dummy.msg_from = msg_from SMTP_dummy.msg_to = msg_to SMTP_dummy.msg = msg
[ "def", "sendmail", "(", "self", ",", "msg_from", ",", "msg_to", ",", "msg", ")", ":", "SMTP_dummy", ".", "msg_from", "=", "msg_from", "SMTP_dummy", ".", "msg_to", "=", "msg_to", "SMTP_dummy", ".", "msg", "=", "msg" ]
36
4.4
def format_python2_stmts(python_stmts, show_tokens=False, showast=False, showgrammar=False, compile_mode='exec'): """ formats python2 statements """ parser_debug = {'rules': False, 'transition': False, 'reduce': showgrammar, 'errorstack': True, 'context': True, 'dups': True } parsed = parse_python2(python_stmts, show_tokens=show_tokens, parser_debug=parser_debug) assert parsed == 'file_input', 'Should have parsed grammar start' formatter = Python2Formatter() if showast: print(parsed) # What we've been waiting for: Generate source from AST! python2_formatted_str = formatter.traverse(parsed) return python2_formatted_str
[ "def", "format_python2_stmts", "(", "python_stmts", ",", "show_tokens", "=", "False", ",", "showast", "=", "False", ",", "showgrammar", "=", "False", ",", "compile_mode", "=", "'exec'", ")", ":", "parser_debug", "=", "{", "'rules'", ":", "False", ",", "'transition'", ":", "False", ",", "'reduce'", ":", "showgrammar", ",", "'errorstack'", ":", "True", ",", "'context'", ":", "True", ",", "'dups'", ":", "True", "}", "parsed", "=", "parse_python2", "(", "python_stmts", ",", "show_tokens", "=", "show_tokens", ",", "parser_debug", "=", "parser_debug", ")", "assert", "parsed", "==", "'file_input'", ",", "'Should have parsed grammar start'", "formatter", "=", "Python2Formatter", "(", ")", "if", "showast", ":", "print", "(", "parsed", ")", "# What we've been waiting for: Generate source from AST!", "python2_formatted_str", "=", "formatter", ".", "traverse", "(", "parsed", ")", "return", "python2_formatted_str" ]
34.227273
21.590909
def _addsub_int_array(self, other, op): """ Add or subtract array-like of integers equivalent to applying `_time_shift` pointwise. Parameters ---------- other : Index, ExtensionArray, np.ndarray integer-dtype op : {operator.add, operator.sub} Returns ------- result : same class as self """ # _addsub_int_array is overriden by PeriodArray assert not is_period_dtype(self) assert op in [operator.add, operator.sub] if self.freq is None: # GH#19123 raise NullFrequencyError("Cannot shift with no freq") elif isinstance(self.freq, Tick): # easy case where we can convert to timedelta64 operation td = Timedelta(self.freq) return op(self, td * other) # We should only get here with DatetimeIndex; dispatch # to _addsub_offset_array assert not is_timedelta64_dtype(self) return op(self, np.array(other) * self.freq)
[ "def", "_addsub_int_array", "(", "self", ",", "other", ",", "op", ")", ":", "# _addsub_int_array is overriden by PeriodArray", "assert", "not", "is_period_dtype", "(", "self", ")", "assert", "op", "in", "[", "operator", ".", "add", ",", "operator", ".", "sub", "]", "if", "self", ".", "freq", "is", "None", ":", "# GH#19123", "raise", "NullFrequencyError", "(", "\"Cannot shift with no freq\"", ")", "elif", "isinstance", "(", "self", ".", "freq", ",", "Tick", ")", ":", "# easy case where we can convert to timedelta64 operation", "td", "=", "Timedelta", "(", "self", ".", "freq", ")", "return", "op", "(", "self", ",", "td", "*", "other", ")", "# We should only get here with DatetimeIndex; dispatch", "# to _addsub_offset_array", "assert", "not", "is_timedelta64_dtype", "(", "self", ")", "return", "op", "(", "self", ",", "np", ".", "array", "(", "other", ")", "*", "self", ".", "freq", ")" ]
31.75
16.25
def showtraceback(self, *args, **kwargs): """Display the exception that just occurred. We remove the first stack item because it is our own code. The output is written by self.write(), below. """ try: type, value, tb = sys.exc_info() sys.last_type = type sys.last_value = value sys.last_traceback = tb tblist = traceback.extract_tb(tb) del tblist[:1] list = traceback.format_list(tblist) if list: list.insert(0, "Traceback (most recent call last):\n") list[len(list):] = traceback.format_exception_only(type, value) finally: tblist = tb = None map(self.write, list)
[ "def", "showtraceback", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "type", ",", "value", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "sys", ".", "last_type", "=", "type", "sys", ".", "last_value", "=", "value", "sys", ".", "last_traceback", "=", "tb", "tblist", "=", "traceback", ".", "extract_tb", "(", "tb", ")", "del", "tblist", "[", ":", "1", "]", "list", "=", "traceback", ".", "format_list", "(", "tblist", ")", "if", "list", ":", "list", ".", "insert", "(", "0", ",", "\"Traceback (most recent call last):\\n\"", ")", "list", "[", "len", "(", "list", ")", ":", "]", "=", "traceback", ".", "format_exception_only", "(", "type", ",", "value", ")", "finally", ":", "tblist", "=", "tb", "=", "None", "map", "(", "self", ".", "write", ",", "list", ")" ]
33.590909
16.727273
def prefix_all(self, prefix='#', *lines): """ Same as :func:`~prefix`, for multiple lines. :param prefix: Dockerfile command to use, e.g. ``ENV`` or ``RUN``. :type prefix: unicode | str :param lines: Lines with arguments to be prefixed. :type lines: collections.Iterable[unicode | str] """ for line in lines: if isinstance(line, (tuple, list)): self.prefix(prefix, *line) elif line: self.prefix(prefix, line) else: self.blank()
[ "def", "prefix_all", "(", "self", ",", "prefix", "=", "'#'", ",", "*", "lines", ")", ":", "for", "line", "in", "lines", ":", "if", "isinstance", "(", "line", ",", "(", "tuple", ",", "list", ")", ")", ":", "self", ".", "prefix", "(", "prefix", ",", "*", "line", ")", "elif", "line", ":", "self", ".", "prefix", "(", "prefix", ",", "line", ")", "else", ":", "self", ".", "blank", "(", ")" ]
35.0625
12.6875
def __process_parameter(self, param_name, param_type, ancestry=None): """Collect values for a given web service operation input parameter.""" assert self.active() param_optional = param_type.optional() has_argument, value = self.__get_param_value(param_name) if has_argument: self.__params_with_arguments.add(param_name) self.__update_context(ancestry) self.__stack[-1].process_parameter(param_optional, value is not None) self.__external_param_processor(param_name, param_type, self.__in_choice_context(), value)
[ "def", "__process_parameter", "(", "self", ",", "param_name", ",", "param_type", ",", "ancestry", "=", "None", ")", ":", "assert", "self", ".", "active", "(", ")", "param_optional", "=", "param_type", ".", "optional", "(", ")", "has_argument", ",", "value", "=", "self", ".", "__get_param_value", "(", "param_name", ")", "if", "has_argument", ":", "self", ".", "__params_with_arguments", ".", "add", "(", "param_name", ")", "self", ".", "__update_context", "(", "ancestry", ")", "self", ".", "__stack", "[", "-", "1", "]", ".", "process_parameter", "(", "param_optional", ",", "value", "is", "not", "None", ")", "self", ".", "__external_param_processor", "(", "param_name", ",", "param_type", ",", "self", ".", "__in_choice_context", "(", ")", ",", "value", ")" ]
53.727273
15.454545
def list_symbols(self, all_symbols=False, snapshot=None, regex=None, **kwargs): """ Return the symbols in this library. Parameters ---------- all_symbols : `bool` If True returns all symbols under all snapshots, even if the symbol has been deleted in the current version (i.e. it exists under a snapshot... Default: False snapshot : `str` Return the symbols available under the snapshot. regex : `str` filter symbols by the passed in regular expression kwargs : kwarg keys are used as fields to query for symbols with metadata matching the kwargs query Returns ------- String list of symbols in the library """ query = {} if regex is not None: query['symbol'] = {'$regex': regex} if kwargs: for k, v in six.iteritems(kwargs): # TODO: this doesn't work as expected as it ignores the versions with metadata.deleted set # as a result it will return symbols with matching metadata which have been deleted # Maybe better add a match step in the pipeline instead of making it part of the query query['metadata.' + k] = v if snapshot is not None: try: query['parent'] = self._snapshots.find_one({'name': snapshot})['_id'] except TypeError: raise NoDataFoundException('No snapshot %s in library %s' % (snapshot, self._arctic_lib.get_name())) elif all_symbols: return self._versions.find(query).distinct('symbol') # Return just the symbols which aren't deleted in the 'trunk' of this library pipeline = [] if query: # Match based on user criteria first pipeline.append({'$match': query}) pipeline.extend([ # version_custom value is: 2*version + (0 if deleted else 1) # This is used to optimize aggregation query: # - avoid sorting # - be able to rely on the latest version (max) for the deleted status # # Be aware of that if you don't use custom sort or if use a sort before $group which utilizes # exactly an existing index, the $group will do best effort to utilize this index: # - https://jira.mongodb.org/browse/SERVER-4507 {'$group': { '_id': '$symbol', 'version_custom': { '$max': { '$add': [ {'$multiply': ['$version', 2]}, {'$cond': [{'$eq': ['$metadata.deleted', True]}, 1, 0]} ] } }, }}, # Don't include symbols which are part of some snapshot, but really deleted... {'$match': {'version_custom': {'$mod': [2, 0]}}} ]) # We may hit the group memory limit (100MB), so use allowDiskUse to circumvent this # - https://docs.mongodb.com/manual/reference/operator/aggregation/group/#group-memory-limit return sorted([x['_id'] for x in self._versions.aggregate(pipeline, allowDiskUse=True)])
[ "def", "list_symbols", "(", "self", ",", "all_symbols", "=", "False", ",", "snapshot", "=", "None", ",", "regex", "=", "None", ",", "*", "*", "kwargs", ")", ":", "query", "=", "{", "}", "if", "regex", "is", "not", "None", ":", "query", "[", "'symbol'", "]", "=", "{", "'$regex'", ":", "regex", "}", "if", "kwargs", ":", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "kwargs", ")", ":", "# TODO: this doesn't work as expected as it ignores the versions with metadata.deleted set", "# as a result it will return symbols with matching metadata which have been deleted", "# Maybe better add a match step in the pipeline instead of making it part of the query", "query", "[", "'metadata.'", "+", "k", "]", "=", "v", "if", "snapshot", "is", "not", "None", ":", "try", ":", "query", "[", "'parent'", "]", "=", "self", ".", "_snapshots", ".", "find_one", "(", "{", "'name'", ":", "snapshot", "}", ")", "[", "'_id'", "]", "except", "TypeError", ":", "raise", "NoDataFoundException", "(", "'No snapshot %s in library %s'", "%", "(", "snapshot", ",", "self", ".", "_arctic_lib", ".", "get_name", "(", ")", ")", ")", "elif", "all_symbols", ":", "return", "self", ".", "_versions", ".", "find", "(", "query", ")", ".", "distinct", "(", "'symbol'", ")", "# Return just the symbols which aren't deleted in the 'trunk' of this library", "pipeline", "=", "[", "]", "if", "query", ":", "# Match based on user criteria first", "pipeline", ".", "append", "(", "{", "'$match'", ":", "query", "}", ")", "pipeline", ".", "extend", "(", "[", "# version_custom value is: 2*version + (0 if deleted else 1)", "# This is used to optimize aggregation query:", "# - avoid sorting", "# - be able to rely on the latest version (max) for the deleted status", "#", "# Be aware of that if you don't use custom sort or if use a sort before $group which utilizes", "# exactly an existing index, the $group will do best effort to utilize this index:", "# - https://jira.mongodb.org/browse/SERVER-4507", "{", "'$group'", ":", "{", "'_id'", ":", "'$symbol'", ",", "'version_custom'", ":", "{", "'$max'", ":", "{", "'$add'", ":", "[", "{", "'$multiply'", ":", "[", "'$version'", ",", "2", "]", "}", ",", "{", "'$cond'", ":", "[", "{", "'$eq'", ":", "[", "'$metadata.deleted'", ",", "True", "]", "}", ",", "1", ",", "0", "]", "}", "]", "}", "}", ",", "}", "}", ",", "# Don't include symbols which are part of some snapshot, but really deleted...", "{", "'$match'", ":", "{", "'version_custom'", ":", "{", "'$mod'", ":", "[", "2", ",", "0", "]", "}", "}", "}", "]", ")", "# We may hit the group memory limit (100MB), so use allowDiskUse to circumvent this", "# - https://docs.mongodb.com/manual/reference/operator/aggregation/group/#group-memory-limit", "return", "sorted", "(", "[", "x", "[", "'_id'", "]", "for", "x", "in", "self", ".", "_versions", ".", "aggregate", "(", "pipeline", ",", "allowDiskUse", "=", "True", ")", "]", ")" ]
46.071429
26.928571
def grey_transmittance(extinction_coefficient, molar_density, length, base=e): r'''Calculates the transmittance of a grey body, given the extinction coefficient of the material, its molar density, and the path length of the radiation. .. math:: \tau = base^{(-\epsilon \cdot l\cdot \rho_m )} Parameters ---------- extinction_coefficient : float The extinction coefficient of the material the radiation is passing at the modeled frequency, [m^2/mol] molar_density : float The molar density of the material the radiation is passing through, [mol/m^3] length : float The length of the body the radiation is transmitted through, [m] base : float, optional The exponent used in calculations; `e` is more theoretically sound but 10 is often used as a base by chemists, [-] Returns ------- transmittance : float The fraction of spectral radiance which is transmitted through a grey body (can be liquid, gas, or even solid ex. in the case of glasses) [-] Notes ----- For extinction coefficients, see the HITRAN database. They are temperature and pressure dependent for each chemical and phase. Examples -------- Overall transmission loss through 1 cm of precipitable water equivalent atmospheric water vapor at a frequency of 1.3 um [2]_: >>> grey_transmittance(3.8e-4, molar_density=55300, length=1e-2) 0.8104707721191062 References ---------- .. [1] Modest, Michael F. Radiative Heat Transfer, Third Edition. 3rd edition. New York: Academic Press, 2013. .. [2] Eldridge, Ralph G. "Water Vapor Absorption of Visible and Near Infrared Radiation." Applied Optics 6, no. 4 (April 1, 1967): 709-13. https://doi.org/10.1364/AO.6.000709. ''' transmittance = molar_density*extinction_coefficient*length return base**(-transmittance)
[ "def", "grey_transmittance", "(", "extinction_coefficient", ",", "molar_density", ",", "length", ",", "base", "=", "e", ")", ":", "transmittance", "=", "molar_density", "*", "extinction_coefficient", "*", "length", "return", "base", "**", "(", "-", "transmittance", ")" ]
37.392157
27.117647
def set_sticker_position_in_set(self, sticker, position): """ Use this method to move a sticker in a set created by the bot to a specific position . Returns True on success. :param sticker: :param position: :return: """ return apihelper.set_sticker_position_in_set(self.token, sticker, position)
[ "def", "set_sticker_position_in_set", "(", "self", ",", "sticker", ",", "position", ")", ":", "return", "apihelper", ".", "set_sticker_position_in_set", "(", "self", ".", "token", ",", "sticker", ",", "position", ")" ]
43
24.5
def changes(since, out_file, eager): """Show changes since a specific date.""" root = get_root() history_data = defaultdict(lambda: {'lines': deque(), 'releasers': set()}) with chdir(root): result = run_command( ( 'git log "--pretty=format:%H %s" --date-order --date=iso8601 ' '--since="{}T00:00:00" */CHANGELOG.md'.format(since) ), capture=True, check=True, ) for result_line in result.stdout.splitlines(): commit_hash, commit_subject = result_line.split(' ', 1) if not eager and 'release' not in commit_subject.lower(): continue result = run_command( 'git show "--pretty=format:%an%n" -U0 {} */CHANGELOG.md'.format(commit_hash), capture=True, check=True ) # Example: # # <AUTHOR NAME> # diff --git a/<INTEGRATION NAME 1>/CHANGELOG.md b/<INTEGRATION NAME 1>/CHANGELOG.md # index 89b5a3441..9534019a9 100644 # --- a/<INTEGRATION NAME 1>/CHANGELOG.md # +++ b/<INTEGRATION NAME 1>/CHANGELOG.md # @@ -2,0 +3,5 @@ # +## <RELEASE VERSION> / <RELEASE DATE> # + # +* <ENTRY> # +* <ENTRY> # + # diff --git a/<INTEGRATION NAME 2>/CHANGELOG.md b/<INTEGRATION NAME 2>/CHANGELOG.md # index 89b5a3441..9534019a9 100644 # --- a/<INTEGRATION NAME 2>/CHANGELOG.md # +++ b/<INTEGRATION NAME 2>/CHANGELOG.md # @@ -2,0 +3,4 @@ # +## <RELEASE VERSION> / <RELEASE DATE> # + # +* <ENTRY> # + lines = deque(result.stdout.splitlines()) author_name = lines.popleft().strip() patches = [] for line in lines: if line: # New patch if line.startswith('diff --git'): patches.append([]) patches[-1].append(line) for patch in patches: integration = patch[0].split('/')[-2].strip() history_data[integration]['releasers'].add(author_name) additions = deque() for line in reversed(patch): if line.startswith('+'): line = line[1:] # Demote releases to h3 if line.startswith('##'): line = '#{}'.format(line) additions.append(line) elif line.startswith('@@'): break # Get rid of the header for new integrations if additions[-1].startswith('# '): additions.pop() # Get rid of blank lines to ensure consistency while not additions[0].strip(): additions.popleft() while not additions[-1].strip(): additions.pop() history_data[integration]['lines'].appendleft('') history_data[integration]['lines'].extendleft(additions) output_lines = ['# Changes since {}'.format(since), ''] for integration, history in sorted(iteritems(history_data)): display_name = load_manifest(integration).get('display_name', integration) output_lines.append('## {}'.format(display_name)) output_lines.append('released by: {}'.format(', '.join(sorted(history['releasers'])))) output_lines.append('') output_lines.extend(history['lines']) output = '\n'.join(output_lines) if out_file: write_file(out_file, output) else: echo_info(output)
[ "def", "changes", "(", "since", ",", "out_file", ",", "eager", ")", ":", "root", "=", "get_root", "(", ")", "history_data", "=", "defaultdict", "(", "lambda", ":", "{", "'lines'", ":", "deque", "(", ")", ",", "'releasers'", ":", "set", "(", ")", "}", ")", "with", "chdir", "(", "root", ")", ":", "result", "=", "run_command", "(", "(", "'git log \"--pretty=format:%H %s\" --date-order --date=iso8601 '", "'--since=\"{}T00:00:00\" */CHANGELOG.md'", ".", "format", "(", "since", ")", ")", ",", "capture", "=", "True", ",", "check", "=", "True", ",", ")", "for", "result_line", "in", "result", ".", "stdout", ".", "splitlines", "(", ")", ":", "commit_hash", ",", "commit_subject", "=", "result_line", ".", "split", "(", "' '", ",", "1", ")", "if", "not", "eager", "and", "'release'", "not", "in", "commit_subject", ".", "lower", "(", ")", ":", "continue", "result", "=", "run_command", "(", "'git show \"--pretty=format:%an%n\" -U0 {} */CHANGELOG.md'", ".", "format", "(", "commit_hash", ")", ",", "capture", "=", "True", ",", "check", "=", "True", ")", "# Example:", "#", "# <AUTHOR NAME>", "# diff --git a/<INTEGRATION NAME 1>/CHANGELOG.md b/<INTEGRATION NAME 1>/CHANGELOG.md", "# index 89b5a3441..9534019a9 100644", "# --- a/<INTEGRATION NAME 1>/CHANGELOG.md", "# +++ b/<INTEGRATION NAME 1>/CHANGELOG.md", "# @@ -2,0 +3,5 @@", "# +## <RELEASE VERSION> / <RELEASE DATE>", "# +", "# +* <ENTRY>", "# +* <ENTRY>", "# +", "# diff --git a/<INTEGRATION NAME 2>/CHANGELOG.md b/<INTEGRATION NAME 2>/CHANGELOG.md", "# index 89b5a3441..9534019a9 100644", "# --- a/<INTEGRATION NAME 2>/CHANGELOG.md", "# +++ b/<INTEGRATION NAME 2>/CHANGELOG.md", "# @@ -2,0 +3,4 @@", "# +## <RELEASE VERSION> / <RELEASE DATE>", "# +", "# +* <ENTRY>", "# +", "lines", "=", "deque", "(", "result", ".", "stdout", ".", "splitlines", "(", ")", ")", "author_name", "=", "lines", ".", "popleft", "(", ")", ".", "strip", "(", ")", "patches", "=", "[", "]", "for", "line", "in", "lines", ":", "if", "line", ":", "# New patch", "if", "line", ".", "startswith", "(", "'diff --git'", ")", ":", "patches", ".", "append", "(", "[", "]", ")", "patches", "[", "-", "1", "]", ".", "append", "(", "line", ")", "for", "patch", "in", "patches", ":", "integration", "=", "patch", "[", "0", "]", ".", "split", "(", "'/'", ")", "[", "-", "2", "]", ".", "strip", "(", ")", "history_data", "[", "integration", "]", "[", "'releasers'", "]", ".", "add", "(", "author_name", ")", "additions", "=", "deque", "(", ")", "for", "line", "in", "reversed", "(", "patch", ")", ":", "if", "line", ".", "startswith", "(", "'+'", ")", ":", "line", "=", "line", "[", "1", ":", "]", "# Demote releases to h3", "if", "line", ".", "startswith", "(", "'##'", ")", ":", "line", "=", "'#{}'", ".", "format", "(", "line", ")", "additions", ".", "append", "(", "line", ")", "elif", "line", ".", "startswith", "(", "'@@'", ")", ":", "break", "# Get rid of the header for new integrations", "if", "additions", "[", "-", "1", "]", ".", "startswith", "(", "'# '", ")", ":", "additions", ".", "pop", "(", ")", "# Get rid of blank lines to ensure consistency", "while", "not", "additions", "[", "0", "]", ".", "strip", "(", ")", ":", "additions", ".", "popleft", "(", ")", "while", "not", "additions", "[", "-", "1", "]", ".", "strip", "(", ")", ":", "additions", ".", "pop", "(", ")", "history_data", "[", "integration", "]", "[", "'lines'", "]", ".", "appendleft", "(", "''", ")", "history_data", "[", "integration", "]", "[", "'lines'", "]", ".", "extendleft", "(", "additions", ")", "output_lines", "=", "[", "'# Changes since {}'", ".", "format", "(", "since", ")", ",", "''", "]", "for", "integration", ",", "history", "in", "sorted", "(", "iteritems", "(", "history_data", ")", ")", ":", "display_name", "=", "load_manifest", "(", "integration", ")", ".", "get", "(", "'display_name'", ",", "integration", ")", "output_lines", ".", "append", "(", "'## {}'", ".", "format", "(", "display_name", ")", ")", "output_lines", ".", "append", "(", "'released by: {}'", ".", "format", "(", "', '", ".", "join", "(", "sorted", "(", "history", "[", "'releasers'", "]", ")", ")", ")", ")", "output_lines", ".", "append", "(", "''", ")", "output_lines", ".", "extend", "(", "history", "[", "'lines'", "]", ")", "output", "=", "'\\n'", ".", "join", "(", "output_lines", ")", "if", "out_file", ":", "write_file", "(", "out_file", ",", "output", ")", "else", ":", "echo_info", "(", "output", ")" ]
36.343137
21
def parse(self, request): """Parse incoming request and return an email instance. Args: request: an HttpRequest object, containing the forwarded email, as per the SendGrid specification for inbound emails. Returns: an EmailMultiAlternatives instance, containing the parsed contents of the inbound email. TODO: non-UTF8 charset handling. TODO: handler headers. """ assert isinstance(request, HttpRequest), "Invalid request type: %s" % type(request) try: # from_email should never be a list (unless we change our API) from_email = self._get_addresses([_decode_POST_value(request, 'from')])[0] # ...but all these can and will be a list to_email = self._get_addresses([_decode_POST_value(request, 'to')]) cc = self._get_addresses([_decode_POST_value(request, 'cc', default='')]) bcc = self._get_addresses([_decode_POST_value(request, 'bcc', default='')]) subject = _decode_POST_value(request, 'subject') text = _decode_POST_value(request, 'text', default='') html = _decode_POST_value(request, 'html', default='') except IndexError as ex: raise RequestParseError( "Inbound request lacks a valid from address: %s." % request.get('from') ) except MultiValueDictKeyError as ex: raise RequestParseError("Inbound request is missing required value: %s." % ex) if "@" not in from_email: # Light sanity check for potential issues related to taking just the # first element of the 'from' address list raise RequestParseError("Could not get a valid from address out of: %s." % request) email = EmailMultiAlternatives( subject=subject, body=text, from_email=from_email, to=to_email, cc=cc, bcc=bcc, ) if html is not None and len(html) > 0: email.attach_alternative(html, "text/html") # TODO: this won't cope with big files - should really read in in chunks for n, f in list(request.FILES.items()): if f.size > self.max_file_size: logger.debug( "File attachment %s is too large to process (%sB)", f.name, f.size ) raise AttachmentTooLargeError( email=email, filename=f.name, size=f.size ) else: email.attach(f.name, f.read(), f.content_type) return email
[ "def", "parse", "(", "self", ",", "request", ")", ":", "assert", "isinstance", "(", "request", ",", "HttpRequest", ")", ",", "\"Invalid request type: %s\"", "%", "type", "(", "request", ")", "try", ":", "# from_email should never be a list (unless we change our API)", "from_email", "=", "self", ".", "_get_addresses", "(", "[", "_decode_POST_value", "(", "request", ",", "'from'", ")", "]", ")", "[", "0", "]", "# ...but all these can and will be a list", "to_email", "=", "self", ".", "_get_addresses", "(", "[", "_decode_POST_value", "(", "request", ",", "'to'", ")", "]", ")", "cc", "=", "self", ".", "_get_addresses", "(", "[", "_decode_POST_value", "(", "request", ",", "'cc'", ",", "default", "=", "''", ")", "]", ")", "bcc", "=", "self", ".", "_get_addresses", "(", "[", "_decode_POST_value", "(", "request", ",", "'bcc'", ",", "default", "=", "''", ")", "]", ")", "subject", "=", "_decode_POST_value", "(", "request", ",", "'subject'", ")", "text", "=", "_decode_POST_value", "(", "request", ",", "'text'", ",", "default", "=", "''", ")", "html", "=", "_decode_POST_value", "(", "request", ",", "'html'", ",", "default", "=", "''", ")", "except", "IndexError", "as", "ex", ":", "raise", "RequestParseError", "(", "\"Inbound request lacks a valid from address: %s.\"", "%", "request", ".", "get", "(", "'from'", ")", ")", "except", "MultiValueDictKeyError", "as", "ex", ":", "raise", "RequestParseError", "(", "\"Inbound request is missing required value: %s.\"", "%", "ex", ")", "if", "\"@\"", "not", "in", "from_email", ":", "# Light sanity check for potential issues related to taking just the", "# first element of the 'from' address list", "raise", "RequestParseError", "(", "\"Could not get a valid from address out of: %s.\"", "%", "request", ")", "email", "=", "EmailMultiAlternatives", "(", "subject", "=", "subject", ",", "body", "=", "text", ",", "from_email", "=", "from_email", ",", "to", "=", "to_email", ",", "cc", "=", "cc", ",", "bcc", "=", "bcc", ",", ")", "if", "html", "is", "not", "None", "and", "len", "(", "html", ")", ">", "0", ":", "email", ".", "attach_alternative", "(", "html", ",", "\"text/html\"", ")", "# TODO: this won't cope with big files - should really read in in chunks", "for", "n", ",", "f", "in", "list", "(", "request", ".", "FILES", ".", "items", "(", ")", ")", ":", "if", "f", ".", "size", ">", "self", ".", "max_file_size", ":", "logger", ".", "debug", "(", "\"File attachment %s is too large to process (%sB)\"", ",", "f", ".", "name", ",", "f", ".", "size", ")", "raise", "AttachmentTooLargeError", "(", "email", "=", "email", ",", "filename", "=", "f", ".", "name", ",", "size", "=", "f", ".", "size", ")", "else", ":", "email", ".", "attach", "(", "f", ".", "name", ",", "f", ".", "read", "(", ")", ",", "f", ".", "content_type", ")", "return", "email" ]
38.797101
24.014493
def move_roles(self, roles): """ Moves roles to this role config group. The roles can be moved from any role config group belonging to the same service. The role type of the destination group must match the role type of the roles. @param roles: The names of the roles to move. @return: List of roles which have been moved successfully. """ return move_roles(self._get_resource_root(), self.serviceRef.serviceName, self.name, roles, self.serviceRef.clusterName)
[ "def", "move_roles", "(", "self", ",", "roles", ")", ":", "return", "move_roles", "(", "self", ".", "_get_resource_root", "(", ")", ",", "self", ".", "serviceRef", ".", "serviceName", ",", "self", ".", "name", ",", "roles", ",", "self", ".", "serviceRef", ".", "clusterName", ")" ]
38
17.230769
def _set_defaults(self): """ Set configuration parameters for drawing guide """ valid_locations = {'top', 'bottom', 'left', 'right'} horizontal_locations = {'left', 'right'} get_property = self.theme.themeables.property margin_location_lookup = {'t': 'b', 'b': 't', 'l': 'r', 'r': 'l'} # label position self.label_position = self.label_position or 'right' if self.label_position not in valid_locations: msg = "label position '{}' is invalid" raise PlotnineError(msg.format(self.label_position)) # label margin # legend_text_legend or legend_text_colorbar name = 'legend_text_{}'.format( self.__class__.__name__.split('_')[-1]) loc = margin_location_lookup[self.label_position[0]] try: margin = get_property(name, 'margin') except KeyError: self._label_margin = 3 else: self._label_margin = margin.get_as(loc, 'pt') # direction of guide if self.direction is None: if self.label_position in horizontal_locations: self.direction = 'vertical' else: self.direction = 'horizontal' # title position if self.title_position is None: if self.direction == 'vertical': self.title_position = 'top' elif self.direction == 'horizontal': self.title_position = 'left' if self.title_position not in valid_locations: msg = "title position '{}' is invalid" raise PlotnineError(msg.format(self.title_position)) # title alignment tmp = 'left' if self.direction == 'vertical' else 'center' self._title_align = self._default('legend_title_align', tmp) # by default, direction of each guide depends on # the position all the guides try: position = get_property('legend_position') except KeyError: position = 'right' if position in {'top', 'bottom'}: tmp = 'horizontal' else: # left, right, (default) tmp = 'vertical' self.direction = self._default('legend_direction', tmp) # title margin loc = margin_location_lookup[self.title_position[0]] try: margin = get_property('legend_title', 'margin') except KeyError: self._title_margin = 8 else: self._title_margin = margin.get_as(loc, 'pt') # legend_margin try: self._legend_margin = get_property('legend_margin') except KeyError: self._legend_margin = 10 # legend_entry_spacing try: self._legend_entry_spacing_x = get_property( 'legend_entry_spacing_x') except KeyError: self._legend_entry_spacing_x = 5 try: self._legend_entry_spacing_y = get_property( 'legend_entry_spacing_y') except KeyError: self._legend_entry_spacing_y = 2
[ "def", "_set_defaults", "(", "self", ")", ":", "valid_locations", "=", "{", "'top'", ",", "'bottom'", ",", "'left'", ",", "'right'", "}", "horizontal_locations", "=", "{", "'left'", ",", "'right'", "}", "get_property", "=", "self", ".", "theme", ".", "themeables", ".", "property", "margin_location_lookup", "=", "{", "'t'", ":", "'b'", ",", "'b'", ":", "'t'", ",", "'l'", ":", "'r'", ",", "'r'", ":", "'l'", "}", "# label position", "self", ".", "label_position", "=", "self", ".", "label_position", "or", "'right'", "if", "self", ".", "label_position", "not", "in", "valid_locations", ":", "msg", "=", "\"label position '{}' is invalid\"", "raise", "PlotnineError", "(", "msg", ".", "format", "(", "self", ".", "label_position", ")", ")", "# label margin", "# legend_text_legend or legend_text_colorbar", "name", "=", "'legend_text_{}'", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ".", "split", "(", "'_'", ")", "[", "-", "1", "]", ")", "loc", "=", "margin_location_lookup", "[", "self", ".", "label_position", "[", "0", "]", "]", "try", ":", "margin", "=", "get_property", "(", "name", ",", "'margin'", ")", "except", "KeyError", ":", "self", ".", "_label_margin", "=", "3", "else", ":", "self", ".", "_label_margin", "=", "margin", ".", "get_as", "(", "loc", ",", "'pt'", ")", "# direction of guide", "if", "self", ".", "direction", "is", "None", ":", "if", "self", ".", "label_position", "in", "horizontal_locations", ":", "self", ".", "direction", "=", "'vertical'", "else", ":", "self", ".", "direction", "=", "'horizontal'", "# title position", "if", "self", ".", "title_position", "is", "None", ":", "if", "self", ".", "direction", "==", "'vertical'", ":", "self", ".", "title_position", "=", "'top'", "elif", "self", ".", "direction", "==", "'horizontal'", ":", "self", ".", "title_position", "=", "'left'", "if", "self", ".", "title_position", "not", "in", "valid_locations", ":", "msg", "=", "\"title position '{}' is invalid\"", "raise", "PlotnineError", "(", "msg", ".", "format", "(", "self", ".", "title_position", ")", ")", "# title alignment", "tmp", "=", "'left'", "if", "self", ".", "direction", "==", "'vertical'", "else", "'center'", "self", ".", "_title_align", "=", "self", ".", "_default", "(", "'legend_title_align'", ",", "tmp", ")", "# by default, direction of each guide depends on", "# the position all the guides", "try", ":", "position", "=", "get_property", "(", "'legend_position'", ")", "except", "KeyError", ":", "position", "=", "'right'", "if", "position", "in", "{", "'top'", ",", "'bottom'", "}", ":", "tmp", "=", "'horizontal'", "else", ":", "# left, right, (default)", "tmp", "=", "'vertical'", "self", ".", "direction", "=", "self", ".", "_default", "(", "'legend_direction'", ",", "tmp", ")", "# title margin", "loc", "=", "margin_location_lookup", "[", "self", ".", "title_position", "[", "0", "]", "]", "try", ":", "margin", "=", "get_property", "(", "'legend_title'", ",", "'margin'", ")", "except", "KeyError", ":", "self", ".", "_title_margin", "=", "8", "else", ":", "self", ".", "_title_margin", "=", "margin", ".", "get_as", "(", "loc", ",", "'pt'", ")", "# legend_margin", "try", ":", "self", ".", "_legend_margin", "=", "get_property", "(", "'legend_margin'", ")", "except", "KeyError", ":", "self", ".", "_legend_margin", "=", "10", "# legend_entry_spacing", "try", ":", "self", ".", "_legend_entry_spacing_x", "=", "get_property", "(", "'legend_entry_spacing_x'", ")", "except", "KeyError", ":", "self", ".", "_legend_entry_spacing_x", "=", "5", "try", ":", "self", ".", "_legend_entry_spacing_y", "=", "get_property", "(", "'legend_entry_spacing_y'", ")", "except", "KeyError", ":", "self", ".", "_legend_entry_spacing_y", "=", "2" ]
34.426966
17.078652
def image_get(fingerprint, remote_addr=None, cert=None, key=None, verify_cert=True, _raw=False): ''' Get an image by its fingerprint fingerprint : The fingerprint of the image to retrieve remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. _raw : False Return the raw pylxd object or a dict of it? CLI Examples: ..code-block:: bash $ salt '*' lxd.image_get <fingerprint> ''' client = pylxd_client_get(remote_addr, cert, key, verify_cert) image = None try: image = client.images.get(fingerprint) except pylxd.exceptions.LXDAPIException: raise SaltInvocationError( 'Image with fingerprint \'{0}\' not found'.format(fingerprint) ) if _raw: return image return _pylxd_model_to_dict(image)
[ "def", "image_get", "(", "fingerprint", ",", "remote_addr", "=", "None", ",", "cert", "=", "None", ",", "key", "=", "None", ",", "verify_cert", "=", "True", ",", "_raw", "=", "False", ")", ":", "client", "=", "pylxd_client_get", "(", "remote_addr", ",", "cert", ",", "key", ",", "verify_cert", ")", "image", "=", "None", "try", ":", "image", "=", "client", ".", "images", ".", "get", "(", "fingerprint", ")", "except", "pylxd", ".", "exceptions", ".", "LXDAPIException", ":", "raise", "SaltInvocationError", "(", "'Image with fingerprint \\'{0}\\' not found'", ".", "format", "(", "fingerprint", ")", ")", "if", "_raw", ":", "return", "image", "return", "_pylxd_model_to_dict", "(", "image", ")" ]
25.491525
22.033898
def equals(self, other): """Field-based equality for SSAEvents.""" if isinstance(other, SSAEvent): return self.as_dict() == other.as_dict() else: raise TypeError("Cannot compare to non-SSAEvent object")
[ "def", "equals", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "SSAEvent", ")", ":", "return", "self", ".", "as_dict", "(", ")", "==", "other", ".", "as_dict", "(", ")", "else", ":", "raise", "TypeError", "(", "\"Cannot compare to non-SSAEvent object\"", ")" ]
40.833333
14
def stringify(self, string, phrases, parent=None): """ Stringifies phrases. After parsing of the string via self.parse(), this method takes the escaped string and the list of phrases returned by self.parse() and replaces the original phrases (with tags) with the Phrase-objects in the list and adds the appropriate flag-combinations as determined by the string or the position of the phrase (the string if it's in self.always, i.e. an 'always' argument). This method also works recursively to handle nested phrases (and resetting of parent-phrase styles). Arguments: string (str): The escaped string returned by self.parse(). phrases (list): The list of Phrase-objects returned by self.parse(). parent (Phrase): For recursive calls, the current parent Phrase. Returns: The finished, beautifully beautified string. Raises: errors.ArgumentError: If more positional arguments are requested than were supplied. """ last_tag = 0 beauty = "" for phrase in phrases: beauty += string[last_tag : phrase.opening] if phrase.string in self.always and not phrase.override: phrase.style = self.always[phrase.string] if phrase.arguments: combination = 0 for i in phrase.arguments: try: combination |= self.positional[i] except IndexError: raise errors.ArgumentError("Positional argument '{0}' " "is out of range" "!".format(i)) phrase.style |= combination elif (phrase.string not in self.always or phrase.increment or phrase.override): try: combination = self.positional[self.counter] if phrase.increment or not phrase.override: self.counter += 1 except IndexError: self.raise_not_enough_arguments(phrase.string) phrase.style |= combination phrase.style = flags.codify(phrase.style) if phrase.nested: phrase.string = self.stringify(phrase.string, phrase.nested, phrase) # After a nested phrase is over, we reset the style to the # parent style, this gives the notion of nested styles. reset = parent.style if parent else "" # \033[ signifies the start of a command-line escape-sequence beauty += "\033[{0}m{1}\033[0;{2}m".format(phrase.style, phrase, reset) last_tag = phrase.closing + 1 beauty += string[last_tag:] return beauty
[ "def", "stringify", "(", "self", ",", "string", ",", "phrases", ",", "parent", "=", "None", ")", ":", "last_tag", "=", "0", "beauty", "=", "\"\"", "for", "phrase", "in", "phrases", ":", "beauty", "+=", "string", "[", "last_tag", ":", "phrase", ".", "opening", "]", "if", "phrase", ".", "string", "in", "self", ".", "always", "and", "not", "phrase", ".", "override", ":", "phrase", ".", "style", "=", "self", ".", "always", "[", "phrase", ".", "string", "]", "if", "phrase", ".", "arguments", ":", "combination", "=", "0", "for", "i", "in", "phrase", ".", "arguments", ":", "try", ":", "combination", "|=", "self", ".", "positional", "[", "i", "]", "except", "IndexError", ":", "raise", "errors", ".", "ArgumentError", "(", "\"Positional argument '{0}' \"", "\"is out of range\"", "\"!\"", ".", "format", "(", "i", ")", ")", "phrase", ".", "style", "|=", "combination", "elif", "(", "phrase", ".", "string", "not", "in", "self", ".", "always", "or", "phrase", ".", "increment", "or", "phrase", ".", "override", ")", ":", "try", ":", "combination", "=", "self", ".", "positional", "[", "self", ".", "counter", "]", "if", "phrase", ".", "increment", "or", "not", "phrase", ".", "override", ":", "self", ".", "counter", "+=", "1", "except", "IndexError", ":", "self", ".", "raise_not_enough_arguments", "(", "phrase", ".", "string", ")", "phrase", ".", "style", "|=", "combination", "phrase", ".", "style", "=", "flags", ".", "codify", "(", "phrase", ".", "style", ")", "if", "phrase", ".", "nested", ":", "phrase", ".", "string", "=", "self", ".", "stringify", "(", "phrase", ".", "string", ",", "phrase", ".", "nested", ",", "phrase", ")", "# After a nested phrase is over, we reset the style to the", "# parent style, this gives the notion of nested styles.", "reset", "=", "parent", ".", "style", "if", "parent", "else", "\"\"", "# \\033[ signifies the start of a command-line escape-sequence", "beauty", "+=", "\"\\033[{0}m{1}\\033[0;{2}m\"", ".", "format", "(", "phrase", ".", "style", ",", "phrase", ",", "reset", ")", "last_tag", "=", "phrase", ".", "closing", "+", "1", "beauty", "+=", "string", "[", "last_tag", ":", "]", "return", "beauty" ]
28.365854
23.02439
def new_metric(self, meta): """ Create and register metric, find subscribers for this metric (using meta as filter) and subscribe Return: metric (available_metrics[0]): one of Metric """ type_ = meta.get('type') if not type_: raise ValueError('Metric type should be defined.') if type_ in available_metrics: metric_obj = available_metrics[type_](meta, self.routing_queue) # create metric object metric_meta = pd.DataFrame({metric_obj.local_id: meta}).T # create metric meta self.metrics_meta = self.metrics_meta.append(metric_meta) # register metric meta self.metrics[metric_obj.local_id] = metric_obj # register metric object # find subscribers for this metric this_metric_subscribers = self.__reversed_filter(self.subscribers, meta) if this_metric_subscribers.empty: logger.debug('subscriber for metric %s not found', metric_obj.local_id) else: logger.debug('Found subscribers for this metric, subscribing...: %s', this_metric_subscribers) # attach this metric id to discovered subscribers and select id <-> callbacks this_metric_subscribers['id'] = metric_obj.local_id found_callbacks = this_metric_subscribers[['id', 'callback']].set_index('id') # add this metric callbacks to DataManager's callbacks self.callbacks = self.callbacks.append(found_callbacks) return metric_obj else: raise NotImplementedError('Unknown metric type: %s' % type_)
[ "def", "new_metric", "(", "self", ",", "meta", ")", ":", "type_", "=", "meta", ".", "get", "(", "'type'", ")", "if", "not", "type_", ":", "raise", "ValueError", "(", "'Metric type should be defined.'", ")", "if", "type_", "in", "available_metrics", ":", "metric_obj", "=", "available_metrics", "[", "type_", "]", "(", "meta", ",", "self", ".", "routing_queue", ")", "# create metric object", "metric_meta", "=", "pd", ".", "DataFrame", "(", "{", "metric_obj", ".", "local_id", ":", "meta", "}", ")", ".", "T", "# create metric meta", "self", ".", "metrics_meta", "=", "self", ".", "metrics_meta", ".", "append", "(", "metric_meta", ")", "# register metric meta", "self", ".", "metrics", "[", "metric_obj", ".", "local_id", "]", "=", "metric_obj", "# register metric object", "# find subscribers for this metric", "this_metric_subscribers", "=", "self", ".", "__reversed_filter", "(", "self", ".", "subscribers", ",", "meta", ")", "if", "this_metric_subscribers", ".", "empty", ":", "logger", ".", "debug", "(", "'subscriber for metric %s not found'", ",", "metric_obj", ".", "local_id", ")", "else", ":", "logger", ".", "debug", "(", "'Found subscribers for this metric, subscribing...: %s'", ",", "this_metric_subscribers", ")", "# attach this metric id to discovered subscribers and select id <-> callbacks", "this_metric_subscribers", "[", "'id'", "]", "=", "metric_obj", ".", "local_id", "found_callbacks", "=", "this_metric_subscribers", "[", "[", "'id'", ",", "'callback'", "]", "]", ".", "set_index", "(", "'id'", ")", "# add this metric callbacks to DataManager's callbacks", "self", ".", "callbacks", "=", "self", ".", "callbacks", ".", "append", "(", "found_callbacks", ")", "return", "metric_obj", "else", ":", "raise", "NotImplementedError", "(", "'Unknown metric type: %s'", "%", "type_", ")" ]
51.53125
29.15625
def update_monitor(self): """Update the monitor and monitor status from the ZM server.""" result = self._client.get_state(self._monitor_url) self._raw_result = result['monitor']
[ "def", "update_monitor", "(", "self", ")", ":", "result", "=", "self", ".", "_client", ".", "get_state", "(", "self", ".", "_monitor_url", ")", "self", ".", "_raw_result", "=", "result", "[", "'monitor'", "]" ]
49.5
9.25
def emit(self, span_datas): """ :type span_datas: list of :class: `~opencensus.trace.span_data.SpanData` :param list of opencensus.trace.span_data.SpanData span_datas: SpanData tuples to emit """ envelopes = [self.span_data_to_envelope(sd) for sd in span_datas] result = self._transmit(envelopes) if result > 0: self.storage.put(envelopes, result)
[ "def", "emit", "(", "self", ",", "span_datas", ")", ":", "envelopes", "=", "[", "self", ".", "span_data_to_envelope", "(", "sd", ")", "for", "sd", "in", "span_datas", "]", "result", "=", "self", ".", "_transmit", "(", "envelopes", ")", "if", "result", ">", "0", ":", "self", ".", "storage", ".", "put", "(", "envelopes", ",", "result", ")" ]
39
10.818182
def tell(self): """Returns the current position of read head. """ pos = ctypes.c_size_t() check_call(_LIB.MXRecordIOReaderTell(self.handle, ctypes.byref(pos))) return pos.value
[ "def", "tell", "(", "self", ")", ":", "pos", "=", "ctypes", ".", "c_size_t", "(", ")", "check_call", "(", "_LIB", ".", "MXRecordIOReaderTell", "(", "self", ".", "handle", ",", "ctypes", ".", "byref", "(", "pos", ")", ")", ")", "return", "pos", ".", "value" ]
35.166667
14.5
def toplevel(func): """A sync tasklet that sets a fresh default Context. Use this for toplevel view functions such as webapp.RequestHandler.get() or Django view functions. """ synctaskletfunc = synctasklet(func) # wrap at declaration time. @utils.wrapping(func) def add_context_wrapper(*args, **kwds): # pylint: disable=invalid-name __ndb_debug__ = utils.func_info(func) _state.clear_all_pending() # Create and install a new context. ctx = make_default_context() try: set_context(ctx) return synctaskletfunc(*args, **kwds) finally: set_context(None) ctx.flush().check_success() eventloop.run() # Ensure writes are flushed, etc. return add_context_wrapper
[ "def", "toplevel", "(", "func", ")", ":", "synctaskletfunc", "=", "synctasklet", "(", "func", ")", "# wrap at declaration time.", "@", "utils", ".", "wrapping", "(", "func", ")", "def", "add_context_wrapper", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "# pylint: disable=invalid-name", "__ndb_debug__", "=", "utils", ".", "func_info", "(", "func", ")", "_state", ".", "clear_all_pending", "(", ")", "# Create and install a new context.", "ctx", "=", "make_default_context", "(", ")", "try", ":", "set_context", "(", "ctx", ")", "return", "synctaskletfunc", "(", "*", "args", ",", "*", "*", "kwds", ")", "finally", ":", "set_context", "(", "None", ")", "ctx", ".", "flush", "(", ")", ".", "check_success", "(", ")", "eventloop", ".", "run", "(", ")", "# Ensure writes are flushed, etc.", "return", "add_context_wrapper" ]
30.869565
14.130435
def try_int(o:Any)->Any: "Try to convert `o` to int, default to `o` if not possible." # NB: single-item rank-1 array/tensor can be converted to int, but we don't want to do this if isinstance(o, (np.ndarray,Tensor)): return o if o.ndim else int(o) if isinstance(o, collections.Sized) or getattr(o,'__array_interface__',False): return o try: return int(o) except: return o
[ "def", "try_int", "(", "o", ":", "Any", ")", "->", "Any", ":", "# NB: single-item rank-1 array/tensor can be converted to int, but we don't want to do this", "if", "isinstance", "(", "o", ",", "(", "np", ".", "ndarray", ",", "Tensor", ")", ")", ":", "return", "o", "if", "o", ".", "ndim", "else", "int", "(", "o", ")", "if", "isinstance", "(", "o", ",", "collections", ".", "Sized", ")", "or", "getattr", "(", "o", ",", "'__array_interface__'", ",", "False", ")", ":", "return", "o", "try", ":", "return", "int", "(", "o", ")", "except", ":", "return", "o" ]
55.571429
31
def get_requires(self, requires_types): """Extracts requires of given types from metadata file, filter windows specific requires. """ if not isinstance(requires_types, list): requires_types = list(requires_types) extracted_requires = [] for requires_name in requires_types: for requires in self.json_metadata.get(requires_name, []): if 'win' in requires.get('environment', {}): continue extracted_requires.extend(requires['requires']) return extracted_requires
[ "def", "get_requires", "(", "self", ",", "requires_types", ")", ":", "if", "not", "isinstance", "(", "requires_types", ",", "list", ")", ":", "requires_types", "=", "list", "(", "requires_types", ")", "extracted_requires", "=", "[", "]", "for", "requires_name", "in", "requires_types", ":", "for", "requires", "in", "self", ".", "json_metadata", ".", "get", "(", "requires_name", ",", "[", "]", ")", ":", "if", "'win'", "in", "requires", ".", "get", "(", "'environment'", ",", "{", "}", ")", ":", "continue", "extracted_requires", ".", "extend", "(", "requires", "[", "'requires'", "]", ")", "return", "extracted_requires" ]
44.615385
10.538462
def pop_rule_nodes(self) -> bool: """Pop context variable that store rule nodes""" self.rule_nodes = self.rule_nodes.parents self.tag_cache = self.tag_cache.parents self.id_cache = self.id_cache.parents return True
[ "def", "pop_rule_nodes", "(", "self", ")", "->", "bool", ":", "self", ".", "rule_nodes", "=", "self", ".", "rule_nodes", ".", "parents", "self", ".", "tag_cache", "=", "self", ".", "tag_cache", ".", "parents", "self", ".", "id_cache", "=", "self", ".", "id_cache", ".", "parents", "return", "True" ]
41.5
8.166667
def earthsun_distance(unixtime, delta_t, numthreads): """ Calculates the distance from the earth to the sun using the NREL SPA algorithm described in [1]. Parameters ---------- unixtime : numpy array Array of unix/epoch timestamps to calculate solar position for. Unixtime is the number of seconds since Jan. 1, 1970 00:00:00 UTC. A pandas.DatetimeIndex is easily converted using .astype(np.int64)/10**9 delta_t : float Difference between terrestrial time and UT. USNO has tables. numthreads : int Number to threads to use for calculation (if using numba) Returns ------- R : array Earth-Sun distance in AU. References ---------- [1] Reda, I., Andreas, A., 2003. Solar position algorithm for solar radiation applications. Technical report: NREL/TP-560- 34302. Golden, USA, http://www.nrel.gov. """ R = solar_position(unixtime, 0, 0, 0, 0, 0, delta_t, 0, numthreads, esd=True)[0] return R
[ "def", "earthsun_distance", "(", "unixtime", ",", "delta_t", ",", "numthreads", ")", ":", "R", "=", "solar_position", "(", "unixtime", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "delta_t", ",", "0", ",", "numthreads", ",", "esd", "=", "True", ")", "[", "0", "]", "return", "R" ]
31.5
24.1875
def get_named_tensor(self, name): """ Returns a named tensor if available. Returns: valid: True if named tensor found, False otherwise tensor: If valid, will be a tensor, otherwise None """ if name in self.named_tensors: return True, self.named_tensors[name] else: return False, None
[ "def", "get_named_tensor", "(", "self", ",", "name", ")", ":", "if", "name", "in", "self", ".", "named_tensors", ":", "return", "True", ",", "self", ".", "named_tensors", "[", "name", "]", "else", ":", "return", "False", ",", "None" ]
30.75
13.916667
def get_id_attr(self, value): """Check if value has id attribute and return it. :param value: The value to get id from. :return: The value.id. """ if not hasattr(value, "id") and hasattr(value, "value"): value = value.value return value.id
[ "def", "get_id_attr", "(", "self", ",", "value", ")", ":", "if", "not", "hasattr", "(", "value", ",", "\"id\"", ")", "and", "hasattr", "(", "value", ",", "\"value\"", ")", ":", "value", "=", "value", ".", "value", "return", "value", ".", "id" ]
32.444444
13.111111
def roundedSpecClass(self): """ Spectral class with rounded class number ie A8.5V is A9 """ try: classnumber = str(int(np.around(self.classNumber))) except TypeError: classnumber = str(self.classNumber) return self.classLetter + classnumber
[ "def", "roundedSpecClass", "(", "self", ")", ":", "try", ":", "classnumber", "=", "str", "(", "int", "(", "np", ".", "around", "(", "self", ".", "classNumber", ")", ")", ")", "except", "TypeError", ":", "classnumber", "=", "str", "(", "self", ".", "classNumber", ")", "return", "self", ".", "classLetter", "+", "classnumber" ]
36.25
16.375
def open_image(fn): """ Opens an image using OpenCV given the file path. Arguments: fn: the file path of the image Returns: The image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0 """ flags = cv2.IMREAD_UNCHANGED+cv2.IMREAD_ANYDEPTH+cv2.IMREAD_ANYCOLOR if not os.path.exists(fn) and not str(fn).startswith("http"): raise OSError('No such file or directory: {}'.format(fn)) elif os.path.isdir(fn) and not str(fn).startswith("http"): raise OSError('Is a directory: {}'.format(fn)) elif isdicom(fn): slice = pydicom.read_file(fn) if slice.PhotometricInterpretation.startswith('MONOCHROME'): # Make a fake RGB image im = np.stack([slice.pixel_array]*3,-1) return im / ((1 << slice.BitsStored)-1) else: # No support for RGB yet, as it involves various color spaces. # It shouldn't be too difficult to add though, if needed. raise OSError('Unsupported DICOM image with PhotometricInterpretation=={}'.format(slice.PhotometricInterpretation)) else: #res = np.array(Image.open(fn), dtype=np.float32)/255 #if len(res.shape)==2: res = np.repeat(res[...,None],3,2) #return res try: if str(fn).startswith("http"): req = urllib.urlopen(str(fn)) image = np.asarray(bytearray(req.read()), dtype="uint8") im = cv2.imdecode(image, flags).astype(np.float32)/255 else: im = cv2.imread(str(fn), flags).astype(np.float32)/255 if im is None: raise OSError(f'File not recognized by opencv: {fn}') return cv2.cvtColor(im, cv2.COLOR_BGR2RGB) except Exception as e: raise OSError('Error handling image at: {}'.format(fn)) from e
[ "def", "open_image", "(", "fn", ")", ":", "flags", "=", "cv2", ".", "IMREAD_UNCHANGED", "+", "cv2", ".", "IMREAD_ANYDEPTH", "+", "cv2", ".", "IMREAD_ANYCOLOR", "if", "not", "os", ".", "path", ".", "exists", "(", "fn", ")", "and", "not", "str", "(", "fn", ")", ".", "startswith", "(", "\"http\"", ")", ":", "raise", "OSError", "(", "'No such file or directory: {}'", ".", "format", "(", "fn", ")", ")", "elif", "os", ".", "path", ".", "isdir", "(", "fn", ")", "and", "not", "str", "(", "fn", ")", ".", "startswith", "(", "\"http\"", ")", ":", "raise", "OSError", "(", "'Is a directory: {}'", ".", "format", "(", "fn", ")", ")", "elif", "isdicom", "(", "fn", ")", ":", "slice", "=", "pydicom", ".", "read_file", "(", "fn", ")", "if", "slice", ".", "PhotometricInterpretation", ".", "startswith", "(", "'MONOCHROME'", ")", ":", "# Make a fake RGB image", "im", "=", "np", ".", "stack", "(", "[", "slice", ".", "pixel_array", "]", "*", "3", ",", "-", "1", ")", "return", "im", "/", "(", "(", "1", "<<", "slice", ".", "BitsStored", ")", "-", "1", ")", "else", ":", "# No support for RGB yet, as it involves various color spaces.", "# It shouldn't be too difficult to add though, if needed.", "raise", "OSError", "(", "'Unsupported DICOM image with PhotometricInterpretation=={}'", ".", "format", "(", "slice", ".", "PhotometricInterpretation", ")", ")", "else", ":", "#res = np.array(Image.open(fn), dtype=np.float32)/255", "#if len(res.shape)==2: res = np.repeat(res[...,None],3,2)", "#return res", "try", ":", "if", "str", "(", "fn", ")", ".", "startswith", "(", "\"http\"", ")", ":", "req", "=", "urllib", ".", "urlopen", "(", "str", "(", "fn", ")", ")", "image", "=", "np", ".", "asarray", "(", "bytearray", "(", "req", ".", "read", "(", ")", ")", ",", "dtype", "=", "\"uint8\"", ")", "im", "=", "cv2", ".", "imdecode", "(", "image", ",", "flags", ")", ".", "astype", "(", "np", ".", "float32", ")", "/", "255", "else", ":", "im", "=", "cv2", ".", "imread", "(", "str", "(", "fn", ")", ",", "flags", ")", ".", "astype", "(", "np", ".", "float32", ")", "/", "255", "if", "im", "is", "None", ":", "raise", "OSError", "(", "f'File not recognized by opencv: {fn}'", ")", "return", "cv2", ".", "cvtColor", "(", "im", ",", "cv2", ".", "COLOR_BGR2RGB", ")", "except", "Exception", "as", "e", ":", "raise", "OSError", "(", "'Error handling image at: {}'", ".", "format", "(", "fn", ")", ")", "from", "e" ]
46.769231
23.820513
def pad_to_multiple(self, factor): """ Pad the pianoroll with zeros at the end along the time axis with the minimum length that makes the resulting pianoroll length a multiple of `factor`. Parameters ---------- factor : int The value which the length of the resulting pianoroll will be a multiple of. """ remainder = self.pianoroll.shape[0] % factor if remainder: pad_width = ((0, (factor - remainder)), (0, 0)) self.pianoroll = np.pad(self.pianoroll, pad_width, 'constant')
[ "def", "pad_to_multiple", "(", "self", ",", "factor", ")", ":", "remainder", "=", "self", ".", "pianoroll", ".", "shape", "[", "0", "]", "%", "factor", "if", "remainder", ":", "pad_width", "=", "(", "(", "0", ",", "(", "factor", "-", "remainder", ")", ")", ",", "(", "0", ",", "0", ")", ")", "self", ".", "pianoroll", "=", "np", ".", "pad", "(", "self", ".", "pianoroll", ",", "pad_width", ",", "'constant'", ")" ]
34.588235
22.235294
def setup_config( config_directories=None, config_file=None, default_filename="opentc.yml" ): """Setup configuration """ config_found = False config_file_path = None if config_file: config_file_path = config_file if os.path.isfile(config_file_path) and os.access(config_file_path, os.R_OK): config_found = True else: for directory in config_directories: if directory is None: continue config_file_path = os.path.join(directory, default_filename) if os.path.isfile(config_file_path) and os.access(config_file_path, os.R_OK): config_found = True break if config_found: with open(config_file_path, 'rt') as ymlfile: config = yaml.safe_load(ymlfile.read()) return config else: print("The configuration file is not found.") exit(1)
[ "def", "setup_config", "(", "config_directories", "=", "None", ",", "config_file", "=", "None", ",", "default_filename", "=", "\"opentc.yml\"", ")", ":", "config_found", "=", "False", "config_file_path", "=", "None", "if", "config_file", ":", "config_file_path", "=", "config_file", "if", "os", ".", "path", ".", "isfile", "(", "config_file_path", ")", "and", "os", ".", "access", "(", "config_file_path", ",", "os", ".", "R_OK", ")", ":", "config_found", "=", "True", "else", ":", "for", "directory", "in", "config_directories", ":", "if", "directory", "is", "None", ":", "continue", "config_file_path", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "default_filename", ")", "if", "os", ".", "path", ".", "isfile", "(", "config_file_path", ")", "and", "os", ".", "access", "(", "config_file_path", ",", "os", ".", "R_OK", ")", ":", "config_found", "=", "True", "break", "if", "config_found", ":", "with", "open", "(", "config_file_path", ",", "'rt'", ")", "as", "ymlfile", ":", "config", "=", "yaml", ".", "safe_load", "(", "ymlfile", ".", "read", "(", ")", ")", "return", "config", "else", ":", "print", "(", "\"The configuration file is not found.\"", ")", "exit", "(", "1", ")" ]
31.689655
18.206897
def best_motif_in_cluster(single_pwm, clus_pwm, clusters, fg_fa, background, stats=None, metrics=("roc_auc", "recall_at_fdr")): """Return the best motif per cluster for a clustering results. The motif can be either the average motif or one of the clustered motifs. Parameters ---------- single_pwm : str Filename of motifs. clus_pwm : str Filename of motifs. clusters : Motif clustering result. fg_fa : str Filename of FASTA file. background : dict Dictionary for background file names. stats : dict, optional If statistics are not supplied they will be computed. metrics : sequence, optional Metrics to use for motif evaluation. Default are "roc_auc" and "recall_at_fdr". Returns ------- motifs : list List of Motif instances. """ # combine original and clustered motifs motifs = read_motifs(single_pwm) + read_motifs(clus_pwm) motifs = dict([(str(m), m) for m in motifs]) # get the statistics for those motifs that were not yet checked clustered_motifs = [] for clus,singles in clusters: for motif in set([clus] + singles): if str(motif) not in stats: clustered_motifs.append(motifs[str(motif)]) new_stats = {} for bg, bg_fa in background.items(): for m,s in calc_stats(clustered_motifs, fg_fa, bg_fa).items(): if m not in new_stats: new_stats[m] = {} new_stats[m][bg] = s stats.update(new_stats) rank = rank_motifs(stats, metrics) # rank the motifs best_motifs = [] for clus, singles in clusters: if len(singles) > 1: eval_motifs = singles if clus not in motifs: eval_motifs.append(clus) eval_motifs = [motifs[str(e)] for e in eval_motifs] best_motif = sorted(eval_motifs, key=lambda x: rank[str(x)])[-1] best_motifs.append(best_motif) else: best_motifs.append(clus) for bg in background: stats[str(best_motifs[-1])][bg]["num_cluster"] = len(singles) best_motifs = sorted(best_motifs, key=lambda x: rank[str(x)], reverse=True) return best_motifs
[ "def", "best_motif_in_cluster", "(", "single_pwm", ",", "clus_pwm", ",", "clusters", ",", "fg_fa", ",", "background", ",", "stats", "=", "None", ",", "metrics", "=", "(", "\"roc_auc\"", ",", "\"recall_at_fdr\"", ")", ")", ":", "# combine original and clustered motifs", "motifs", "=", "read_motifs", "(", "single_pwm", ")", "+", "read_motifs", "(", "clus_pwm", ")", "motifs", "=", "dict", "(", "[", "(", "str", "(", "m", ")", ",", "m", ")", "for", "m", "in", "motifs", "]", ")", "# get the statistics for those motifs that were not yet checked", "clustered_motifs", "=", "[", "]", "for", "clus", ",", "singles", "in", "clusters", ":", "for", "motif", "in", "set", "(", "[", "clus", "]", "+", "singles", ")", ":", "if", "str", "(", "motif", ")", "not", "in", "stats", ":", "clustered_motifs", ".", "append", "(", "motifs", "[", "str", "(", "motif", ")", "]", ")", "new_stats", "=", "{", "}", "for", "bg", ",", "bg_fa", "in", "background", ".", "items", "(", ")", ":", "for", "m", ",", "s", "in", "calc_stats", "(", "clustered_motifs", ",", "fg_fa", ",", "bg_fa", ")", ".", "items", "(", ")", ":", "if", "m", "not", "in", "new_stats", ":", "new_stats", "[", "m", "]", "=", "{", "}", "new_stats", "[", "m", "]", "[", "bg", "]", "=", "s", "stats", ".", "update", "(", "new_stats", ")", "rank", "=", "rank_motifs", "(", "stats", ",", "metrics", ")", "# rank the motifs", "best_motifs", "=", "[", "]", "for", "clus", ",", "singles", "in", "clusters", ":", "if", "len", "(", "singles", ")", ">", "1", ":", "eval_motifs", "=", "singles", "if", "clus", "not", "in", "motifs", ":", "eval_motifs", ".", "append", "(", "clus", ")", "eval_motifs", "=", "[", "motifs", "[", "str", "(", "e", ")", "]", "for", "e", "in", "eval_motifs", "]", "best_motif", "=", "sorted", "(", "eval_motifs", ",", "key", "=", "lambda", "x", ":", "rank", "[", "str", "(", "x", ")", "]", ")", "[", "-", "1", "]", "best_motifs", ".", "append", "(", "best_motif", ")", "else", ":", "best_motifs", ".", "append", "(", "clus", ")", "for", "bg", "in", "background", ":", "stats", "[", "str", "(", "best_motifs", "[", "-", "1", "]", ")", "]", "[", "bg", "]", "[", "\"num_cluster\"", "]", "=", "len", "(", "singles", ")", "best_motifs", "=", "sorted", "(", "best_motifs", ",", "key", "=", "lambda", "x", ":", "rank", "[", "str", "(", "x", ")", "]", ",", "reverse", "=", "True", ")", "return", "best_motifs" ]
30.625
21.055556
def _listify(collection): """This is a workaround where Collections are no longer iterable when using JPype.""" new_list = [] for index in range(len(collection)): new_list.append(collection[index]) return new_list
[ "def", "_listify", "(", "collection", ")", ":", "new_list", "=", "[", "]", "for", "index", "in", "range", "(", "len", "(", "collection", ")", ")", ":", "new_list", ".", "append", "(", "collection", "[", "index", "]", ")", "return", "new_list" ]
37
8.714286
def add_module_plugin_filters(self, module_plugin_filters): """ Adds `module_plugin_filters` to the internal module filters. May be a single object or an iterable. Every module filters must be a callable and take in a list of plugins and their associated names. """ module_plugin_filters = util.return_list(module_plugin_filters) self.module_plugin_filters.extend(module_plugin_filters)
[ "def", "add_module_plugin_filters", "(", "self", ",", "module_plugin_filters", ")", ":", "module_plugin_filters", "=", "util", ".", "return_list", "(", "module_plugin_filters", ")", "self", ".", "module_plugin_filters", ".", "extend", "(", "module_plugin_filters", ")" ]
44.2
18
def get_data(self, data_split="train"): """ Get specified split of data. """ if data_split == 'train': return self._current_train_set elif data_split == 'valid': return self._current_valid_set elif data_split == 'test': return self._current_test_set else: return None
[ "def", "get_data", "(", "self", ",", "data_split", "=", "\"train\"", ")", ":", "if", "data_split", "==", "'train'", ":", "return", "self", ".", "_current_train_set", "elif", "data_split", "==", "'valid'", ":", "return", "self", ".", "_current_valid_set", "elif", "data_split", "==", "'test'", ":", "return", "self", ".", "_current_test_set", "else", ":", "return", "None" ]
30
6
def _json_path_search(self, json_dict, expr): """ Scan JSON dictionary with using json-path passed sting of the format of $.element..element1[index] etc. *Args:*\n _json_dict_ - JSON dictionary;\n _expr_ - string of fuzzy search for items within the directory;\n *Returns:*\n List of DatumInContext objects: ``[DatumInContext(value=..., path=..., context=[DatumInContext])]`` - value - found value - path - value selector inside context.value (in implementation of jsonpath-rw: class Index or Fields) *Raises:*\n JsonValidatorError """ path = parse(expr) results = path.find(json_dict) if len(results) is 0: raise JsonValidatorError("Nothing found in the dictionary {0} using the given path {1}".format( str(json_dict), str(expr))) return results
[ "def", "_json_path_search", "(", "self", ",", "json_dict", ",", "expr", ")", ":", "path", "=", "parse", "(", "expr", ")", "results", "=", "path", ".", "find", "(", "json_dict", ")", "if", "len", "(", "results", ")", "is", "0", ":", "raise", "JsonValidatorError", "(", "\"Nothing found in the dictionary {0} using the given path {1}\"", ".", "format", "(", "str", "(", "json_dict", ")", ",", "str", "(", "expr", ")", ")", ")", "return", "results" ]
35.68
24.8
def add_job(self, id, func, **kwargs): """ Add the given job to the job list and wakes up the scheduler if it's already running. :param str id: explicit identifier for the job (for modifying it later) :param func: callable (or a textual reference to one) to run at the given time """ job_def = dict(kwargs) job_def['id'] = id job_def['func'] = func job_def['name'] = job_def.get('name') or id fix_job_def(job_def) return self._scheduler.add_job(**job_def)
[ "def", "add_job", "(", "self", ",", "id", ",", "func", ",", "*", "*", "kwargs", ")", ":", "job_def", "=", "dict", "(", "kwargs", ")", "job_def", "[", "'id'", "]", "=", "id", "job_def", "[", "'func'", "]", "=", "func", "job_def", "[", "'name'", "]", "=", "job_def", ".", "get", "(", "'name'", ")", "or", "id", "fix_job_def", "(", "job_def", ")", "return", "self", ".", "_scheduler", ".", "add_job", "(", "*", "*", "job_def", ")" ]
33.25
22.875
def createGraph(self, name, createCollections = True, isSmart = False, numberOfShards = None, smartGraphAttribute = None) : """Creates a graph and returns it. 'name' must be the name of a class inheriting from Graph. Checks will be performed to make sure that every collection mentionned in the edges definition exist. Raises a ValueError in case of a non-existing collection.""" def _checkCollectionList(lst) : for colName in lst : if not COL.isCollection(colName) : raise ValueError("'%s' is not a defined Collection" % colName) graphClass = GR.getGraphClass(name) ed = [] for e in graphClass._edgeDefinitions : if not COL.isEdgeCollection(e.edgesCollection) : raise ValueError("'%s' is not a defined Edge Collection" % e.edgesCollection) _checkCollectionList(e.fromCollections) _checkCollectionList(e.toCollections) ed.append(e.toJson()) _checkCollectionList(graphClass._orphanedCollections) options = {} if numberOfShards: options['numberOfShards'] = numberOfShards if smartGraphAttribute: options['smartGraphAttribute'] = smartGraphAttribute payload = { "name": name, "edgeDefinitions": ed, "orphanCollections": graphClass._orphanedCollections } if isSmart : payload['isSmart'] = isSmart if options: payload['options'] = options payload = json.dumps(payload) r = self.connection.session.post(self.graphsURL, data = payload) data = r.json() if r.status_code == 201 or r.status_code == 202 : self.graphs[name] = graphClass(self, data["graph"]) else : raise CreationError(data["errorMessage"], data) return self.graphs[name]
[ "def", "createGraph", "(", "self", ",", "name", ",", "createCollections", "=", "True", ",", "isSmart", "=", "False", ",", "numberOfShards", "=", "None", ",", "smartGraphAttribute", "=", "None", ")", ":", "def", "_checkCollectionList", "(", "lst", ")", ":", "for", "colName", "in", "lst", ":", "if", "not", "COL", ".", "isCollection", "(", "colName", ")", ":", "raise", "ValueError", "(", "\"'%s' is not a defined Collection\"", "%", "colName", ")", "graphClass", "=", "GR", ".", "getGraphClass", "(", "name", ")", "ed", "=", "[", "]", "for", "e", "in", "graphClass", ".", "_edgeDefinitions", ":", "if", "not", "COL", ".", "isEdgeCollection", "(", "e", ".", "edgesCollection", ")", ":", "raise", "ValueError", "(", "\"'%s' is not a defined Edge Collection\"", "%", "e", ".", "edgesCollection", ")", "_checkCollectionList", "(", "e", ".", "fromCollections", ")", "_checkCollectionList", "(", "e", ".", "toCollections", ")", "ed", ".", "append", "(", "e", ".", "toJson", "(", ")", ")", "_checkCollectionList", "(", "graphClass", ".", "_orphanedCollections", ")", "options", "=", "{", "}", "if", "numberOfShards", ":", "options", "[", "'numberOfShards'", "]", "=", "numberOfShards", "if", "smartGraphAttribute", ":", "options", "[", "'smartGraphAttribute'", "]", "=", "smartGraphAttribute", "payload", "=", "{", "\"name\"", ":", "name", ",", "\"edgeDefinitions\"", ":", "ed", ",", "\"orphanCollections\"", ":", "graphClass", ".", "_orphanedCollections", "}", "if", "isSmart", ":", "payload", "[", "'isSmart'", "]", "=", "isSmart", "if", "options", ":", "payload", "[", "'options'", "]", "=", "options", "payload", "=", "json", ".", "dumps", "(", "payload", ")", "r", "=", "self", ".", "connection", ".", "session", ".", "post", "(", "self", ".", "graphsURL", ",", "data", "=", "payload", ")", "data", "=", "r", ".", "json", "(", ")", "if", "r", ".", "status_code", "==", "201", "or", "r", ".", "status_code", "==", "202", ":", "self", ".", "graphs", "[", "name", "]", "=", "graphClass", "(", "self", ",", "data", "[", "\"graph\"", "]", ")", "else", ":", "raise", "CreationError", "(", "data", "[", "\"errorMessage\"", "]", ",", "data", ")", "return", "self", ".", "graphs", "[", "name", "]" ]
37.176471
24.294118
def get_upload_path(self, filename): ''' Override this in proxy subclass to customize upload path. Default upload path is :file:`/media/images/<user.id>/<filename>.<ext>` or :file:`/media/images/common/<filename>.<ext>` if user is not set. ``<filename>`` is returned by :meth:`~generic_images.models.AbstractAttachedImage.get_file_name` method. By default it is probable id of new image (it is predicted as it is unknown at this stage). ''' user_folder = str(self.user.pk) if self.user else 'common' root, ext = os.path.splitext(filename) return os.path.join('media', 'images', user_folder, self.get_file_name(filename) + ext)
[ "def", "get_upload_path", "(", "self", ",", "filename", ")", ":", "user_folder", "=", "str", "(", "self", ".", "user", ".", "pk", ")", "if", "self", ".", "user", "else", "'common'", "root", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "return", "os", ".", "path", ".", "join", "(", "'media'", ",", "'images'", ",", "user_folder", ",", "self", ".", "get_file_name", "(", "filename", ")", "+", "ext", ")" ]
47.8125
22.6875
def summary(processors, metrics, context): """Print the summary""" # display aggregated metric values on language level def display_header(processors, before='', after=''): """Display the header for the summary results.""" print(before, end=' ') for processor in processors: processor.display_header() print(after) def display_separator(processors, before='', after=''): """Display the header for the summary results.""" print(before, end=' ') for processor in processors: processor.display_separator() print(after) def display_metrics(processors, before='', after='', metrics=[]): """Display the header for the summary results.""" print(before, end=' ') for processor in processors: processor.display_metrics(metrics) print(after) summary = {} for m in metrics: lang = metrics[m]['language'] has_key = lang in summary if not has_key: summary[lang] = {'file_count': 0, 'language': lang} summary[lang]['file_count'] += 1 for i in metrics[m]: if i not in ['sloc', 'comments', 'mccabe']: # include metrics to be used continue if not has_key: summary[lang][i] = 0 summary[lang][i] += metrics[m][i] total = {'language': 'Total'} for m in summary: for i in summary[m]: if i == 'language': continue if i not in total: total[i] = 0 total[i] += summary[m][i] print('Metrics Summary:') display_header(processors, 'Files', '') display_separator(processors, '-'*5, '') for k in sorted(summary.keys(), key=str.lower): display_metrics(processors, '%5d' % summary[k]['file_count'], '', summary[k]) display_separator(processors, '-'*5, '') display_metrics(processors, '%5d' % total['file_count'], '', total)
[ "def", "summary", "(", "processors", ",", "metrics", ",", "context", ")", ":", "# display aggregated metric values on language level", "def", "display_header", "(", "processors", ",", "before", "=", "''", ",", "after", "=", "''", ")", ":", "\"\"\"Display the header for the summary results.\"\"\"", "print", "(", "before", ",", "end", "=", "' '", ")", "for", "processor", "in", "processors", ":", "processor", ".", "display_header", "(", ")", "print", "(", "after", ")", "def", "display_separator", "(", "processors", ",", "before", "=", "''", ",", "after", "=", "''", ")", ":", "\"\"\"Display the header for the summary results.\"\"\"", "print", "(", "before", ",", "end", "=", "' '", ")", "for", "processor", "in", "processors", ":", "processor", ".", "display_separator", "(", ")", "print", "(", "after", ")", "def", "display_metrics", "(", "processors", ",", "before", "=", "''", ",", "after", "=", "''", ",", "metrics", "=", "[", "]", ")", ":", "\"\"\"Display the header for the summary results.\"\"\"", "print", "(", "before", ",", "end", "=", "' '", ")", "for", "processor", "in", "processors", ":", "processor", ".", "display_metrics", "(", "metrics", ")", "print", "(", "after", ")", "summary", "=", "{", "}", "for", "m", "in", "metrics", ":", "lang", "=", "metrics", "[", "m", "]", "[", "'language'", "]", "has_key", "=", "lang", "in", "summary", "if", "not", "has_key", ":", "summary", "[", "lang", "]", "=", "{", "'file_count'", ":", "0", ",", "'language'", ":", "lang", "}", "summary", "[", "lang", "]", "[", "'file_count'", "]", "+=", "1", "for", "i", "in", "metrics", "[", "m", "]", ":", "if", "i", "not", "in", "[", "'sloc'", ",", "'comments'", ",", "'mccabe'", "]", ":", "# include metrics to be used", "continue", "if", "not", "has_key", ":", "summary", "[", "lang", "]", "[", "i", "]", "=", "0", "summary", "[", "lang", "]", "[", "i", "]", "+=", "metrics", "[", "m", "]", "[", "i", "]", "total", "=", "{", "'language'", ":", "'Total'", "}", "for", "m", "in", "summary", ":", "for", "i", "in", "summary", "[", "m", "]", ":", "if", "i", "==", "'language'", ":", "continue", "if", "i", "not", "in", "total", ":", "total", "[", "i", "]", "=", "0", "total", "[", "i", "]", "+=", "summary", "[", "m", "]", "[", "i", "]", "print", "(", "'Metrics Summary:'", ")", "display_header", "(", "processors", ",", "'Files'", ",", "''", ")", "display_separator", "(", "processors", ",", "'-'", "*", "5", ",", "''", ")", "for", "k", "in", "sorted", "(", "summary", ".", "keys", "(", ")", ",", "key", "=", "str", ".", "lower", ")", ":", "display_metrics", "(", "processors", ",", "'%5d'", "%", "summary", "[", "k", "]", "[", "'file_count'", "]", ",", "''", ",", "summary", "[", "k", "]", ")", "display_separator", "(", "processors", ",", "'-'", "*", "5", ",", "''", ")", "display_metrics", "(", "processors", ",", "'%5d'", "%", "total", "[", "'file_count'", "]", ",", "''", ",", "total", ")" ]
34.77193
14.035088
def universal_transformer_encoder(encoder_input, encoder_self_attention_bias, hparams, name="encoder", nonpadding=None, save_weights_to=None, make_image_summary=True): """Universal Transformer encoder function. Prepares all the arguments and the inputs and passes it to a universal_transformer_layer to encode the encoder_input. Args: encoder_input: a Tensor encoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string nonpadding: optional Tensor with shape [batch_size, encoder_length] indicating what positions are not padding. This must either be passed in, which we do for "packed" datasets, or inferred from encoder_self_attention_bias. The knowledge about padding is used for pad_remover(efficiency) and to mask out padding in convoltutional layers. save_weights_to: an optional dictionary to capture attention weights for vizualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. Returns: y: a Tensors as the output of the encoder extra_output: which can be used to pass extra information to the body """ x = encoder_input attention_dropout_broadcast_dims = ( common_layers.comma_separated_string_to_integer_list( getattr(hparams, "attention_dropout_broadcast_dims", ""))) with tf.variable_scope(name): if nonpadding is not None: padding = 1.0 - nonpadding else: padding = common_attention.attention_bias_to_padding( encoder_self_attention_bias) nonpadding = 1.0 - padding pad_remover = None if hparams.use_pad_remover and not common_layers.is_xla_compiled(): pad_remover = expert_utils.PadRemover(padding) ffn_unit = functools.partial( transformer_encoder_ffn_unit, hparams=hparams, nonpadding_mask=nonpadding, pad_remover=pad_remover) attention_unit = functools.partial( transformer_encoder_attention_unit, hparams=hparams, encoder_self_attention_bias=encoder_self_attention_bias, attention_dropout_broadcast_dims=attention_dropout_broadcast_dims, save_weights_to=save_weights_to, make_image_summary=make_image_summary) x, extra_output = universal_transformer_layer( x, hparams, ffn_unit, attention_unit, pad_remover=pad_remover) return common_layers.layer_preprocess(x, hparams), extra_output
[ "def", "universal_transformer_encoder", "(", "encoder_input", ",", "encoder_self_attention_bias", ",", "hparams", ",", "name", "=", "\"encoder\"", ",", "nonpadding", "=", "None", ",", "save_weights_to", "=", "None", ",", "make_image_summary", "=", "True", ")", ":", "x", "=", "encoder_input", "attention_dropout_broadcast_dims", "=", "(", "common_layers", ".", "comma_separated_string_to_integer_list", "(", "getattr", "(", "hparams", ",", "\"attention_dropout_broadcast_dims\"", ",", "\"\"", ")", ")", ")", "with", "tf", ".", "variable_scope", "(", "name", ")", ":", "if", "nonpadding", "is", "not", "None", ":", "padding", "=", "1.0", "-", "nonpadding", "else", ":", "padding", "=", "common_attention", ".", "attention_bias_to_padding", "(", "encoder_self_attention_bias", ")", "nonpadding", "=", "1.0", "-", "padding", "pad_remover", "=", "None", "if", "hparams", ".", "use_pad_remover", "and", "not", "common_layers", ".", "is_xla_compiled", "(", ")", ":", "pad_remover", "=", "expert_utils", ".", "PadRemover", "(", "padding", ")", "ffn_unit", "=", "functools", ".", "partial", "(", "transformer_encoder_ffn_unit", ",", "hparams", "=", "hparams", ",", "nonpadding_mask", "=", "nonpadding", ",", "pad_remover", "=", "pad_remover", ")", "attention_unit", "=", "functools", ".", "partial", "(", "transformer_encoder_attention_unit", ",", "hparams", "=", "hparams", ",", "encoder_self_attention_bias", "=", "encoder_self_attention_bias", ",", "attention_dropout_broadcast_dims", "=", "attention_dropout_broadcast_dims", ",", "save_weights_to", "=", "save_weights_to", ",", "make_image_summary", "=", "make_image_summary", ")", "x", ",", "extra_output", "=", "universal_transformer_layer", "(", "x", ",", "hparams", ",", "ffn_unit", ",", "attention_unit", ",", "pad_remover", "=", "pad_remover", ")", "return", "common_layers", ".", "layer_preprocess", "(", "x", ",", "hparams", ")", ",", "extra_output" ]
40.970149
19.701493
def _mod_run_check(cmd_kwargs, onlyif, unless): ''' Execute the onlyif and unless logic. Return a result dict if: * onlyif failed (onlyif != 0) * unless succeeded (unless == 0) else return True ''' if onlyif: if __salt__['cmd.retcode'](onlyif, **cmd_kwargs) != 0: return {'comment': 'onlyif condition is false', 'skip_watch': True, 'result': True} if unless: if __salt__['cmd.retcode'](unless, **cmd_kwargs) == 0: return {'comment': 'unless condition is true', 'skip_watch': True, 'result': True} # No reason to stop, return True return True
[ "def", "_mod_run_check", "(", "cmd_kwargs", ",", "onlyif", ",", "unless", ")", ":", "if", "onlyif", ":", "if", "__salt__", "[", "'cmd.retcode'", "]", "(", "onlyif", ",", "*", "*", "cmd_kwargs", ")", "!=", "0", ":", "return", "{", "'comment'", ":", "'onlyif condition is false'", ",", "'skip_watch'", ":", "True", ",", "'result'", ":", "True", "}", "if", "unless", ":", "if", "__salt__", "[", "'cmd.retcode'", "]", "(", "unless", ",", "*", "*", "cmd_kwargs", ")", "==", "0", ":", "return", "{", "'comment'", ":", "'unless condition is true'", ",", "'skip_watch'", ":", "True", ",", "'result'", ":", "True", "}", "# No reason to stop, return True", "return", "True" ]
31.181818
16.818182
def unlock_queue_message(self, queue_name, sequence_number, lock_token): ''' Unlocks a message for processing by other receivers on a given queue. This operation deletes the lock object, causing the message to be unlocked. A message must have first been locked by a receiver before this operation is called. queue_name: Name of the queue. sequence_number: The sequence number of the message to be unlocked as returned in BrokerProperties['SequenceNumber'] by the Peek Message operation. lock_token: The ID of the lock as returned by the Peek Message operation in BrokerProperties['LockToken'] ''' _validate_not_none('queue_name', queue_name) _validate_not_none('sequence_number', sequence_number) _validate_not_none('lock_token', lock_token) request = HTTPRequest() request.method = 'PUT' request.host = self._get_host() request.path = '/' + _str(queue_name) + \ '/messages/' + _str(sequence_number) + \ '/' + _str(lock_token) + '' request.path, request.query = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access request.headers = self._update_service_bus_header(request) self._perform_request(request)
[ "def", "unlock_queue_message", "(", "self", ",", "queue_name", ",", "sequence_number", ",", "lock_token", ")", ":", "_validate_not_none", "(", "'queue_name'", ",", "queue_name", ")", "_validate_not_none", "(", "'sequence_number'", ",", "sequence_number", ")", "_validate_not_none", "(", "'lock_token'", ",", "lock_token", ")", "request", "=", "HTTPRequest", "(", ")", "request", ".", "method", "=", "'PUT'", "request", ".", "host", "=", "self", ".", "_get_host", "(", ")", "request", ".", "path", "=", "'/'", "+", "_str", "(", "queue_name", ")", "+", "'/messages/'", "+", "_str", "(", "sequence_number", ")", "+", "'/'", "+", "_str", "(", "lock_token", ")", "+", "''", "request", ".", "path", ",", "request", ".", "query", "=", "self", ".", "_httpclient", ".", "_update_request_uri_query", "(", "request", ")", "# pylint: disable=protected-access", "request", ".", "headers", "=", "self", ".", "_update_service_bus_header", "(", "request", ")", "self", ".", "_perform_request", "(", "request", ")" ]
48.964286
22.392857
def output_stream_http(plugin, initial_streams, external=False, port=0): """Continuously output the stream over HTTP.""" global output if not external: if not args.player: console.exit("The default player (VLC) does not seem to be " "installed. You must specify the path to a player " "executable with --player.") title = create_title(plugin) server = create_http_server() player = output = PlayerOutput(args.player, args=args.player_args, filename=server.url, quiet=not args.verbose_player, title=title) try: log.info("Starting player: {0}", args.player) if player: player.open() except OSError as err: console.exit("Failed to start player: {0} ({1})", args.player, err) else: server = create_http_server(host=None, port=port) player = None log.info("Starting server, access with one of:") for url in server.urls: log.info(" " + url) for req in iter_http_requests(server, player): user_agent = req.headers.get("User-Agent") or "unknown player" log.info("Got HTTP request from {0}".format(user_agent)) stream_fd = prebuffer = None while not stream_fd and (not player or player.running): try: streams = initial_streams or fetch_streams(plugin) initial_streams = None for stream_name in (resolve_stream_name(streams, s) for s in args.stream): if stream_name in streams: stream = streams[stream_name] break else: log.info("Stream not available, will re-fetch " "streams in 10 sec") sleep(10) continue except PluginError as err: log.error(u"Unable to fetch new streams: {0}", err) continue try: log.info("Opening stream: {0} ({1})", stream_name, type(stream).shortname()) stream_fd, prebuffer = open_stream(stream) except StreamError as err: log.error("{0}", err) if stream_fd and prebuffer: log.debug("Writing stream to player") read_stream(stream_fd, server, prebuffer) server.close(True) player.close() server.close()
[ "def", "output_stream_http", "(", "plugin", ",", "initial_streams", ",", "external", "=", "False", ",", "port", "=", "0", ")", ":", "global", "output", "if", "not", "external", ":", "if", "not", "args", ".", "player", ":", "console", ".", "exit", "(", "\"The default player (VLC) does not seem to be \"", "\"installed. You must specify the path to a player \"", "\"executable with --player.\"", ")", "title", "=", "create_title", "(", "plugin", ")", "server", "=", "create_http_server", "(", ")", "player", "=", "output", "=", "PlayerOutput", "(", "args", ".", "player", ",", "args", "=", "args", ".", "player_args", ",", "filename", "=", "server", ".", "url", ",", "quiet", "=", "not", "args", ".", "verbose_player", ",", "title", "=", "title", ")", "try", ":", "log", ".", "info", "(", "\"Starting player: {0}\"", ",", "args", ".", "player", ")", "if", "player", ":", "player", ".", "open", "(", ")", "except", "OSError", "as", "err", ":", "console", ".", "exit", "(", "\"Failed to start player: {0} ({1})\"", ",", "args", ".", "player", ",", "err", ")", "else", ":", "server", "=", "create_http_server", "(", "host", "=", "None", ",", "port", "=", "port", ")", "player", "=", "None", "log", ".", "info", "(", "\"Starting server, access with one of:\"", ")", "for", "url", "in", "server", ".", "urls", ":", "log", ".", "info", "(", "\" \"", "+", "url", ")", "for", "req", "in", "iter_http_requests", "(", "server", ",", "player", ")", ":", "user_agent", "=", "req", ".", "headers", ".", "get", "(", "\"User-Agent\"", ")", "or", "\"unknown player\"", "log", ".", "info", "(", "\"Got HTTP request from {0}\"", ".", "format", "(", "user_agent", ")", ")", "stream_fd", "=", "prebuffer", "=", "None", "while", "not", "stream_fd", "and", "(", "not", "player", "or", "player", ".", "running", ")", ":", "try", ":", "streams", "=", "initial_streams", "or", "fetch_streams", "(", "plugin", ")", "initial_streams", "=", "None", "for", "stream_name", "in", "(", "resolve_stream_name", "(", "streams", ",", "s", ")", "for", "s", "in", "args", ".", "stream", ")", ":", "if", "stream_name", "in", "streams", ":", "stream", "=", "streams", "[", "stream_name", "]", "break", "else", ":", "log", ".", "info", "(", "\"Stream not available, will re-fetch \"", "\"streams in 10 sec\"", ")", "sleep", "(", "10", ")", "continue", "except", "PluginError", "as", "err", ":", "log", ".", "error", "(", "u\"Unable to fetch new streams: {0}\"", ",", "err", ")", "continue", "try", ":", "log", ".", "info", "(", "\"Opening stream: {0} ({1})\"", ",", "stream_name", ",", "type", "(", "stream", ")", ".", "shortname", "(", ")", ")", "stream_fd", ",", "prebuffer", "=", "open_stream", "(", "stream", ")", "except", "StreamError", "as", "err", ":", "log", ".", "error", "(", "\"{0}\"", ",", "err", ")", "if", "stream_fd", "and", "prebuffer", ":", "log", ".", "debug", "(", "\"Writing stream to player\"", ")", "read_stream", "(", "stream_fd", ",", "server", ",", "prebuffer", ")", "server", ".", "close", "(", "True", ")", "player", ".", "close", "(", ")", "server", ".", "close", "(", ")" ]
36.857143
20.442857
def create(self, repo_slug=None, key=None, label=None): """ Associate an ssh key with your repo and return it. """ key = '%s' % key repo_slug = repo_slug or self.bitbucket.repo_slug or '' url = self.bitbucket.url('SET_DEPLOY_KEY', username=self.bitbucket.username, repo_slug=repo_slug) return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, key=key, label=label)
[ "def", "create", "(", "self", ",", "repo_slug", "=", "None", ",", "key", "=", "None", ",", "label", "=", "None", ")", ":", "key", "=", "'%s'", "%", "key", "repo_slug", "=", "repo_slug", "or", "self", ".", "bitbucket", ".", "repo_slug", "or", "''", "url", "=", "self", ".", "bitbucket", ".", "url", "(", "'SET_DEPLOY_KEY'", ",", "username", "=", "self", ".", "bitbucket", ".", "username", ",", "repo_slug", "=", "repo_slug", ")", "return", "self", ".", "bitbucket", ".", "dispatch", "(", "'POST'", ",", "url", ",", "auth", "=", "self", ".", "bitbucket", ".", "auth", ",", "key", "=", "key", ",", "label", "=", "label", ")" ]
48.846154
11.846154
def create_key_pair(self, key_name): """ Create a new key pair for your account. This will create the key pair within the region you are currently connected to. :type key_name: string :param key_name: The name of the new keypair :rtype: :class:`boto.ec2.keypair.KeyPair` :return: The newly created :class:`boto.ec2.keypair.KeyPair`. The material attribute of the new KeyPair object will contain the the unencrypted PEM encoded RSA private key. """ params = {'KeyName':key_name} return self.get_object('CreateKeyPair', params, KeyPair, verb='POST')
[ "def", "create_key_pair", "(", "self", ",", "key_name", ")", ":", "params", "=", "{", "'KeyName'", ":", "key_name", "}", "return", "self", ".", "get_object", "(", "'CreateKeyPair'", ",", "params", ",", "KeyPair", ",", "verb", "=", "'POST'", ")" ]
41
17.375
def to_graphviz(booster, fmap='', num_trees=0, rankdir='UT', yes_color='#0000FF', no_color='#FF0000', condition_node_params=None, leaf_node_params=None, **kwargs): """Convert specified tree to graphviz instance. IPython can automatically plot the returned graphiz instance. Otherwise, you should call .render() method of the returned graphiz instance. Parameters ---------- booster : Booster, XGBModel Booster or XGBModel instance fmap: str (optional) The name of feature map file num_trees : int, default 0 Specify the ordinal number of target tree rankdir : str, default "UT" Passed to graphiz via graph_attr yes_color : str, default '#0000FF' Edge color when meets the node condition. no_color : str, default '#FF0000' Edge color when doesn't meet the node condition. condition_node_params : dict (optional) condition node configuration, {'shape':'box', 'style':'filled,rounded', 'fillcolor':'#78bceb' } leaf_node_params : dict (optional) leaf node configuration {'shape':'box', 'style':'filled', 'fillcolor':'#e48038' } kwargs : Other keywords passed to graphviz graph_attr Returns ------- ax : matplotlib Axes """ if condition_node_params is None: condition_node_params = {} if leaf_node_params is None: leaf_node_params = {} try: from graphviz import Digraph except ImportError: raise ImportError('You must install graphviz to plot tree') if not isinstance(booster, (Booster, XGBModel)): raise ValueError('booster must be Booster or XGBModel instance') if isinstance(booster, XGBModel): booster = booster.get_booster() tree = booster.get_dump(fmap=fmap)[num_trees] tree = tree.split() kwargs = kwargs.copy() kwargs.update({'rankdir': rankdir}) graph = Digraph(graph_attr=kwargs) for i, text in enumerate(tree): if text[0].isdigit(): node = _parse_node( graph, text, condition_node_params=condition_node_params, leaf_node_params=leaf_node_params) else: if i == 0: # 1st string must be node raise ValueError('Unable to parse given string as tree') _parse_edge(graph, node, text, yes_color=yes_color, no_color=no_color) return graph
[ "def", "to_graphviz", "(", "booster", ",", "fmap", "=", "''", ",", "num_trees", "=", "0", ",", "rankdir", "=", "'UT'", ",", "yes_color", "=", "'#0000FF'", ",", "no_color", "=", "'#FF0000'", ",", "condition_node_params", "=", "None", ",", "leaf_node_params", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "condition_node_params", "is", "None", ":", "condition_node_params", "=", "{", "}", "if", "leaf_node_params", "is", "None", ":", "leaf_node_params", "=", "{", "}", "try", ":", "from", "graphviz", "import", "Digraph", "except", "ImportError", ":", "raise", "ImportError", "(", "'You must install graphviz to plot tree'", ")", "if", "not", "isinstance", "(", "booster", ",", "(", "Booster", ",", "XGBModel", ")", ")", ":", "raise", "ValueError", "(", "'booster must be Booster or XGBModel instance'", ")", "if", "isinstance", "(", "booster", ",", "XGBModel", ")", ":", "booster", "=", "booster", ".", "get_booster", "(", ")", "tree", "=", "booster", ".", "get_dump", "(", "fmap", "=", "fmap", ")", "[", "num_trees", "]", "tree", "=", "tree", ".", "split", "(", ")", "kwargs", "=", "kwargs", ".", "copy", "(", ")", "kwargs", ".", "update", "(", "{", "'rankdir'", ":", "rankdir", "}", ")", "graph", "=", "Digraph", "(", "graph_attr", "=", "kwargs", ")", "for", "i", ",", "text", "in", "enumerate", "(", "tree", ")", ":", "if", "text", "[", "0", "]", ".", "isdigit", "(", ")", ":", "node", "=", "_parse_node", "(", "graph", ",", "text", ",", "condition_node_params", "=", "condition_node_params", ",", "leaf_node_params", "=", "leaf_node_params", ")", "else", ":", "if", "i", "==", "0", ":", "# 1st string must be node", "raise", "ValueError", "(", "'Unable to parse given string as tree'", ")", "_parse_edge", "(", "graph", ",", "node", ",", "text", ",", "yes_color", "=", "yes_color", ",", "no_color", "=", "no_color", ")", "return", "graph" ]
32.155844
16.766234
def activate(name, lbn, target, profile='default', tgt_type='glob'): ''' .. versionchanged:: 2017.7.0 The ``expr_form`` argument has been renamed to ``tgt_type``, earlier releases must use ``expr_form``. Activate the named worker from the lbn load balancers at the targeted minions Example: .. code-block:: yaml disable-before-deploy: modjk_worker.activate: - name: {{ grains['id'] }} - lbn: application - target: 'roles:balancer' - tgt_type: grain ''' return _talk2modjk(name, lbn, target, 'worker_activate', profile, tgt_type)
[ "def", "activate", "(", "name", ",", "lbn", ",", "target", ",", "profile", "=", "'default'", ",", "tgt_type", "=", "'glob'", ")", ":", "return", "_talk2modjk", "(", "name", ",", "lbn", ",", "target", ",", "'worker_activate'", ",", "profile", ",", "tgt_type", ")" ]
29.809524
23.142857
def find_covalent_bonds(ampal, max_range=2.2, threshold=1.1, tag=True): """Finds all covalent bonds in the AMPAL object. Parameters ---------- ampal : AMPAL Object Any AMPAL object with a `get_atoms` method. max_range : float, optional Used to define the sector size, so interactions at longer ranges will not be found. threshold : float, optional Allows deviation from ideal covalent bond distance to be included. For example, a value of 1.1 would allow interactions up to 10% further from the ideal distance to be included. tag : bool, optional If `True`, will add the covalent bond to the tags dictionary of each `Atom` involved in the interaction under the `covalent_bonds` key. """ sectors=gen_sectors(ampal.get_atoms(), max_range * 1.1) bonds=[] for sector in sectors.values(): atoms=itertools.combinations(sector, 2) bonds.extend(covalent_bonds(atoms, threshold=threshold)) bond_set=list(set(bonds)) if tag: for bond in bond_set: a, b=bond.a, bond.b if 'covalent_bonds' not in a.tags: a.tags['covalent_bonds']=[b] else: a.tags['covalent_bonds'].append(b) if 'covalent_bonds' not in b.tags: b.tags['covalent_bonds']=[a] else: b.tags['covalent_bonds'].append(a) return bond_set
[ "def", "find_covalent_bonds", "(", "ampal", ",", "max_range", "=", "2.2", ",", "threshold", "=", "1.1", ",", "tag", "=", "True", ")", ":", "sectors", "=", "gen_sectors", "(", "ampal", ".", "get_atoms", "(", ")", ",", "max_range", "*", "1.1", ")", "bonds", "=", "[", "]", "for", "sector", "in", "sectors", ".", "values", "(", ")", ":", "atoms", "=", "itertools", ".", "combinations", "(", "sector", ",", "2", ")", "bonds", ".", "extend", "(", "covalent_bonds", "(", "atoms", ",", "threshold", "=", "threshold", ")", ")", "bond_set", "=", "list", "(", "set", "(", "bonds", ")", ")", "if", "tag", ":", "for", "bond", "in", "bond_set", ":", "a", ",", "b", "=", "bond", ".", "a", ",", "bond", ".", "b", "if", "'covalent_bonds'", "not", "in", "a", ".", "tags", ":", "a", ".", "tags", "[", "'covalent_bonds'", "]", "=", "[", "b", "]", "else", ":", "a", ".", "tags", "[", "'covalent_bonds'", "]", ".", "append", "(", "b", ")", "if", "'covalent_bonds'", "not", "in", "b", ".", "tags", ":", "b", ".", "tags", "[", "'covalent_bonds'", "]", "=", "[", "a", "]", "else", ":", "b", ".", "tags", "[", "'covalent_bonds'", "]", ".", "append", "(", "a", ")", "return", "bond_set" ]
38.459459
17.621622
def _get_enrollments_list_page(self, params=None): """ Submit request to retrieve enrollments list. Args: params (dict): Query parameters to use in the request. Valid parameters are: * course_id: Filters the result to course enrollments for the course corresponding to the given course ID. The value must be URL encoded. Optional. * username: username: List of comma-separated usernames. Filters the result to the course enrollments of the given users. Optional. """ req_url = urljoin(self.base_url, self.enrollment_list_url) resp = self.requester.get(req_url, params=params) resp.raise_for_status() resp_json = resp.json() results = resp_json['results'] next_url_str = resp_json.get('next') cursor = None qstr_cursor = None if next_url_str: next_url = urlparse(next_url_str) qstr = parse_qs(next_url.query) qstr_cursor = qstr.get('cursor') if qstr_cursor and isinstance(qstr_cursor, list): cursor = qstr_cursor[0] return results, cursor
[ "def", "_get_enrollments_list_page", "(", "self", ",", "params", "=", "None", ")", ":", "req_url", "=", "urljoin", "(", "self", ".", "base_url", ",", "self", ".", "enrollment_list_url", ")", "resp", "=", "self", ".", "requester", ".", "get", "(", "req_url", ",", "params", "=", "params", ")", "resp", ".", "raise_for_status", "(", ")", "resp_json", "=", "resp", ".", "json", "(", ")", "results", "=", "resp_json", "[", "'results'", "]", "next_url_str", "=", "resp_json", ".", "get", "(", "'next'", ")", "cursor", "=", "None", "qstr_cursor", "=", "None", "if", "next_url_str", ":", "next_url", "=", "urlparse", "(", "next_url_str", ")", "qstr", "=", "parse_qs", "(", "next_url", ".", "query", ")", "qstr_cursor", "=", "qstr", ".", "get", "(", "'cursor'", ")", "if", "qstr_cursor", "and", "isinstance", "(", "qstr_cursor", ",", "list", ")", ":", "cursor", "=", "qstr_cursor", "[", "0", "]", "return", "results", ",", "cursor" ]
40.827586
19.517241
def bezier_unit_tangent(seg, t): """Returns the unit tangent of the segment at t. Notes ----- If you receive a RuntimeWarning, try the following: >>> import numpy >>> old_numpy_error_settings = numpy.seterr(invalid='raise') This can be undone with: >>> numpy.seterr(**old_numpy_error_settings) """ assert 0 <= t <= 1 dseg = seg.derivative(t) # Note: dseg might be numpy value, use np.seterr(invalid='raise') try: unit_tangent = dseg/abs(dseg) except (ZeroDivisionError, FloatingPointError): # This may be a removable singularity, if so we just need to compute # the limit. # Note: limit{{dseg / abs(dseg)} = sqrt(limit{dseg**2 / abs(dseg)**2}) dseg_poly = seg.poly().deriv() dseg_abs_squared_poly = (real(dseg_poly) ** 2 + imag(dseg_poly) ** 2) try: unit_tangent = csqrt(rational_limit(dseg_poly**2, dseg_abs_squared_poly, t)) except ValueError: bef = seg.poly().deriv()(t - 1e-4) aft = seg.poly().deriv()(t + 1e-4) mes = ("Unit tangent appears to not be well-defined at " "t = {}, \n".format(t) + "seg.poly().deriv()(t - 1e-4) = {}\n".format(bef) + "seg.poly().deriv()(t + 1e-4) = {}".format(aft)) raise ValueError(mes) return unit_tangent
[ "def", "bezier_unit_tangent", "(", "seg", ",", "t", ")", ":", "assert", "0", "<=", "t", "<=", "1", "dseg", "=", "seg", ".", "derivative", "(", "t", ")", "# Note: dseg might be numpy value, use np.seterr(invalid='raise')", "try", ":", "unit_tangent", "=", "dseg", "/", "abs", "(", "dseg", ")", "except", "(", "ZeroDivisionError", ",", "FloatingPointError", ")", ":", "# This may be a removable singularity, if so we just need to compute", "# the limit.", "# Note: limit{{dseg / abs(dseg)} = sqrt(limit{dseg**2 / abs(dseg)**2})", "dseg_poly", "=", "seg", ".", "poly", "(", ")", ".", "deriv", "(", ")", "dseg_abs_squared_poly", "=", "(", "real", "(", "dseg_poly", ")", "**", "2", "+", "imag", "(", "dseg_poly", ")", "**", "2", ")", "try", ":", "unit_tangent", "=", "csqrt", "(", "rational_limit", "(", "dseg_poly", "**", "2", ",", "dseg_abs_squared_poly", ",", "t", ")", ")", "except", "ValueError", ":", "bef", "=", "seg", ".", "poly", "(", ")", ".", "deriv", "(", ")", "(", "t", "-", "1e-4", ")", "aft", "=", "seg", ".", "poly", "(", ")", ".", "deriv", "(", ")", "(", "t", "+", "1e-4", ")", "mes", "=", "(", "\"Unit tangent appears to not be well-defined at \"", "\"t = {}, \\n\"", ".", "format", "(", "t", ")", "+", "\"seg.poly().deriv()(t - 1e-4) = {}\\n\"", ".", "format", "(", "bef", ")", "+", "\"seg.poly().deriv()(t + 1e-4) = {}\"", ".", "format", "(", "aft", ")", ")", "raise", "ValueError", "(", "mes", ")", "return", "unit_tangent" ]
39.583333
18.777778
def _set_auth(self, v, load=False): """ Setter method for auth, mapped from YANG variable /rbridge_id/snmp_server/user/auth (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_auth is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_auth() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'sha': {'value': 1}, u'noauth': {'value': 2}, u'md5': {'value': 0}},), default=unicode("noauth"), is_leaf=True, yang_name="auth", rest_name="auth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Authorization protocol for username\n(Default=noauth)'}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='enumeration', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """auth must be of a type compatible with enumeration""", 'defined-type': "brocade-snmp:enumeration", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'sha': {'value': 1}, u'noauth': {'value': 2}, u'md5': {'value': 0}},), default=unicode("noauth"), is_leaf=True, yang_name="auth", rest_name="auth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Authorization protocol for username\n(Default=noauth)'}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='enumeration', is_config=True)""", }) self.__auth = t if hasattr(self, '_set'): self._set()
[ "def", "_set_auth", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "RestrictedClassType", "(", "base_type", "=", "unicode", ",", "restriction_type", "=", "\"dict_key\"", ",", "restriction_arg", "=", "{", "u'sha'", ":", "{", "'value'", ":", "1", "}", ",", "u'noauth'", ":", "{", "'value'", ":", "2", "}", ",", "u'md5'", ":", "{", "'value'", ":", "0", "}", "}", ",", ")", ",", "default", "=", "unicode", "(", "\"noauth\"", ")", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"auth\"", ",", "rest_name", "=", "\"auth\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Authorization protocol for username\\n(Default=noauth)'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-snmp'", ",", "defining_module", "=", "'brocade-snmp'", ",", "yang_type", "=", "'enumeration'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"auth must be of a type compatible with enumeration\"\"\"", ",", "'defined-type'", ":", "\"brocade-snmp:enumeration\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'sha': {'value': 1}, u'noauth': {'value': 2}, u'md5': {'value': 0}},), default=unicode(\"noauth\"), is_leaf=True, yang_name=\"auth\", rest_name=\"auth\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Authorization protocol for username\\n(Default=noauth)'}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='enumeration', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__auth", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
92.136364
43.954545
def connect_get_namespaced_pod_attach(self, name, namespace, **kwargs): # noqa: E501 """connect_get_namespaced_pod_attach # noqa: E501 connect GET requests to attach of Pod # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_get_namespaced_pod_attach(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodAttachOptions (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str container: The container in which to execute the command. Defaults to only container if there is only one container in the pod. :param bool stderr: Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true. :param bool stdin: Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false. :param bool stdout: Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true. :param bool tty: TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false. :return: str If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.connect_get_namespaced_pod_attach_with_http_info(name, namespace, **kwargs) # noqa: E501 else: (data) = self.connect_get_namespaced_pod_attach_with_http_info(name, namespace, **kwargs) # noqa: E501 return data
[ "def", "connect_get_namespaced_pod_attach", "(", "self", ",", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "connect_get_namespaced_pod_attach_with_http_info", "(", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "connect_get_namespaced_pod_attach_with_http_info", "(", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
69.407407
42.185185
def invoke_hook_bolt_ack(self, heron_tuple, process_latency_ns): """invoke task hooks for every time bolt acks a tuple :type heron_tuple: HeronTuple :param heron_tuple: tuple that is acked :type process_latency_ns: float :param process_latency_ns: process latency in nano seconds """ if len(self.task_hooks) > 0: bolt_ack_info = BoltAckInfo(heron_tuple=heron_tuple, acking_task_id=self.get_task_id(), process_latency_ms=process_latency_ns * system_constants.NS_TO_MS) for task_hook in self.task_hooks: task_hook.bolt_ack(bolt_ack_info)
[ "def", "invoke_hook_bolt_ack", "(", "self", ",", "heron_tuple", ",", "process_latency_ns", ")", ":", "if", "len", "(", "self", ".", "task_hooks", ")", ">", "0", ":", "bolt_ack_info", "=", "BoltAckInfo", "(", "heron_tuple", "=", "heron_tuple", ",", "acking_task_id", "=", "self", ".", "get_task_id", "(", ")", ",", "process_latency_ms", "=", "process_latency_ns", "*", "system_constants", ".", "NS_TO_MS", ")", "for", "task_hook", "in", "self", ".", "task_hooks", ":", "task_hook", ".", "bolt_ack", "(", "bolt_ack_info", ")" ]
45.642857
15.5
def setup(self, level=None, log_file=None, json=None): ''' Load everything up. Note that any arg here will override both default and custom settings @param level: the log level @param log_file: boolean t/f whether to log to a file, else stdout @param json: boolean t/f whether to write the logs in json ''' self.settings = self.wrapper.load(self.settings_name) my_level = level if level else self.settings['LOG_LEVEL'] # negate because logger wants True for std out my_output = not log_file if log_file else self.settings['LOG_STDOUT'] my_json = json if json else self.settings['LOG_JSON'] self.logger = LogFactory.get_instance(json=my_json, stdout=my_output, level=my_level, name=self.settings['LOGGER_NAME'], dir=self.settings['LOG_DIR'], file=self.settings['LOG_FILE'], bytes=self.settings['LOG_MAX_BYTES'], backups=self.settings['LOG_BACKUPS']) self.redis_conn = redis.StrictRedis(host=self.settings['REDIS_HOST'], port=self.settings['REDIS_PORT'], db=self.settings['REDIS_DB']) try: self.redis_conn.info() self.logger.debug("Successfully connected to Redis") except ConnectionError: self.logger.error("Failed to connect to Redis") # essential to functionality sys.exit(1) self._load_plugins() self._setup_stats()
[ "def", "setup", "(", "self", ",", "level", "=", "None", ",", "log_file", "=", "None", ",", "json", "=", "None", ")", ":", "self", ".", "settings", "=", "self", ".", "wrapper", ".", "load", "(", "self", ".", "settings_name", ")", "my_level", "=", "level", "if", "level", "else", "self", ".", "settings", "[", "'LOG_LEVEL'", "]", "# negate because logger wants True for std out", "my_output", "=", "not", "log_file", "if", "log_file", "else", "self", ".", "settings", "[", "'LOG_STDOUT'", "]", "my_json", "=", "json", "if", "json", "else", "self", ".", "settings", "[", "'LOG_JSON'", "]", "self", ".", "logger", "=", "LogFactory", ".", "get_instance", "(", "json", "=", "my_json", ",", "stdout", "=", "my_output", ",", "level", "=", "my_level", ",", "name", "=", "self", ".", "settings", "[", "'LOGGER_NAME'", "]", ",", "dir", "=", "self", ".", "settings", "[", "'LOG_DIR'", "]", ",", "file", "=", "self", ".", "settings", "[", "'LOG_FILE'", "]", ",", "bytes", "=", "self", ".", "settings", "[", "'LOG_MAX_BYTES'", "]", ",", "backups", "=", "self", ".", "settings", "[", "'LOG_BACKUPS'", "]", ")", "self", ".", "redis_conn", "=", "redis", ".", "StrictRedis", "(", "host", "=", "self", ".", "settings", "[", "'REDIS_HOST'", "]", ",", "port", "=", "self", ".", "settings", "[", "'REDIS_PORT'", "]", ",", "db", "=", "self", ".", "settings", "[", "'REDIS_DB'", "]", ")", "try", ":", "self", ".", "redis_conn", ".", "info", "(", ")", "self", ".", "logger", ".", "debug", "(", "\"Successfully connected to Redis\"", ")", "except", "ConnectionError", ":", "self", ".", "logger", ".", "error", "(", "\"Failed to connect to Redis\"", ")", "# essential to functionality", "sys", ".", "exit", "(", "1", ")", "self", ".", "_load_plugins", "(", ")", "self", ".", "_setup_stats", "(", ")" ]
47.081081
26.162162
def _setResult(self, result): """ Sets the result. If the result is already set, raises C{AlreadyCalledError}. @raise AlreadyCalledError: The result was already set. @return: C{None}, if the result was successfully set. """ if self._result is not _NO_RESULT: raise AlreadyCalledError() self._result = result
[ "def", "_setResult", "(", "self", ",", "result", ")", ":", "if", "self", ".", "_result", "is", "not", "_NO_RESULT", ":", "raise", "AlreadyCalledError", "(", ")", "self", ".", "_result", "=", "result" ]
31.166667
15
def packSeptets(octets, padBits=0): """ Packs the specified octets into septets Typically the output of encodeGsm7 would be used as input to this function. The resulting bytearray contains the original GSM-7 characters packed into septets ready for transmission. :rtype: bytearray """ result = bytearray() if type(octets) == str: octets = iter(rawStrToByteArray(octets)) elif type(octets) == bytearray: octets = iter(octets) shift = padBits if padBits == 0: prevSeptet = next(octets) else: prevSeptet = 0x00 for octet in octets: septet = octet & 0x7f; if shift == 7: # prevSeptet has already been fully added to result shift = 0 prevSeptet = septet continue b = ((septet << (7 - shift)) & 0xFF) | (prevSeptet >> shift) prevSeptet = septet shift += 1 result.append(b) if shift != 7: # There is a bit "left over" from prevSeptet result.append(prevSeptet >> shift) return result
[ "def", "packSeptets", "(", "octets", ",", "padBits", "=", "0", ")", ":", "result", "=", "bytearray", "(", ")", "if", "type", "(", "octets", ")", "==", "str", ":", "octets", "=", "iter", "(", "rawStrToByteArray", "(", "octets", ")", ")", "elif", "type", "(", "octets", ")", "==", "bytearray", ":", "octets", "=", "iter", "(", "octets", ")", "shift", "=", "padBits", "if", "padBits", "==", "0", ":", "prevSeptet", "=", "next", "(", "octets", ")", "else", ":", "prevSeptet", "=", "0x00", "for", "octet", "in", "octets", ":", "septet", "=", "octet", "&", "0x7f", "if", "shift", "==", "7", ":", "# prevSeptet has already been fully added to result", "shift", "=", "0", "prevSeptet", "=", "septet", "continue", "b", "=", "(", "(", "septet", "<<", "(", "7", "-", "shift", ")", ")", "&", "0xFF", ")", "|", "(", "prevSeptet", ">>", "shift", ")", "prevSeptet", "=", "septet", "shift", "+=", "1", "result", ".", "append", "(", "b", ")", "if", "shift", "!=", "7", ":", "# There is a bit \"left over\" from prevSeptet", "result", ".", "append", "(", "prevSeptet", ">>", "shift", ")", "return", "result" ]
32.757576
17.484848
def create_issue(self, project_id, summary, issue_type_id, priority_id, extra_request_params={}): """ client = BacklogClient("your_space_name", "your_api_key") project_key = "YOUR_PROJECT" project_id = client.get_project_id(project_key) issue_type_id = client.project_issue_types(project_key)[0][u"id"] priority_id = client.priorities()[0][u"id"] client.create_issue(project_id, u"some summary", issue_type_id, priority_id, {"description": u"a is b and c or d."}) """ request_params = extra_request_params request_params["projectId"] = project_id request_params["summary"] = summary request_params["issueTypeId"] = issue_type_id request_params["priorityId"] = priority_id return self.do("POST", "issues", request_params=request_params, )
[ "def", "create_issue", "(", "self", ",", "project_id", ",", "summary", ",", "issue_type_id", ",", "priority_id", ",", "extra_request_params", "=", "{", "}", ")", ":", "request_params", "=", "extra_request_params", "request_params", "[", "\"projectId\"", "]", "=", "project_id", "request_params", "[", "\"summary\"", "]", "=", "summary", "request_params", "[", "\"issueTypeId\"", "]", "=", "issue_type_id", "request_params", "[", "\"priorityId\"", "]", "=", "priority_id", "return", "self", ".", "do", "(", "\"POST\"", ",", "\"issues\"", ",", "request_params", "=", "request_params", ",", ")" ]
41.125
15.291667
def to_team(team): """Serializes team to id string :param team: object to serialize :return: string id """ from sevenbridges.models.team import Team if not team: raise SbgError('Team is required!') elif isinstance(team, Team): return team.id elif isinstance(team, six.string_types): return team else: raise SbgError('Invalid team parameter!')
[ "def", "to_team", "(", "team", ")", ":", "from", "sevenbridges", ".", "models", ".", "team", "import", "Team", "if", "not", "team", ":", "raise", "SbgError", "(", "'Team is required!'", ")", "elif", "isinstance", "(", "team", ",", "Team", ")", ":", "return", "team", ".", "id", "elif", "isinstance", "(", "team", ",", "six", ".", "string_types", ")", ":", "return", "team", "else", ":", "raise", "SbgError", "(", "'Invalid team parameter!'", ")" ]
32.071429
11.071429
def __build_config_block(self, config_block_node): """parse `config_block` in each section Args: config_block_node (TreeNode): Description Returns: [line_node1, line_node2, ...] """ node_lists = [] for line_node in config_block_node: if isinstance(line_node, pegnode.ConfigLine): node_lists.append(self.__build_config(line_node)) elif isinstance(line_node, pegnode.OptionLine): node_lists.append(self.__build_option(line_node)) elif isinstance(line_node, pegnode.ServerLine): node_lists.append( self.__build_server(line_node)) elif isinstance(line_node, pegnode.BindLine): node_lists.append( self.__build_bind(line_node)) elif isinstance(line_node, pegnode.AclLine): node_lists.append( self.__build_acl(line_node)) elif isinstance(line_node, pegnode.BackendLine): node_lists.append( self.__build_usebackend(line_node)) elif isinstance(line_node, pegnode.UserLine): node_lists.append( self.__build_user(line_node)) elif isinstance(line_node, pegnode.GroupLine): node_lists.append( self.__build_group(line_node)) else: # may blank_line, comment_line pass return node_lists
[ "def", "__build_config_block", "(", "self", ",", "config_block_node", ")", ":", "node_lists", "=", "[", "]", "for", "line_node", "in", "config_block_node", ":", "if", "isinstance", "(", "line_node", ",", "pegnode", ".", "ConfigLine", ")", ":", "node_lists", ".", "append", "(", "self", ".", "__build_config", "(", "line_node", ")", ")", "elif", "isinstance", "(", "line_node", ",", "pegnode", ".", "OptionLine", ")", ":", "node_lists", ".", "append", "(", "self", ".", "__build_option", "(", "line_node", ")", ")", "elif", "isinstance", "(", "line_node", ",", "pegnode", ".", "ServerLine", ")", ":", "node_lists", ".", "append", "(", "self", ".", "__build_server", "(", "line_node", ")", ")", "elif", "isinstance", "(", "line_node", ",", "pegnode", ".", "BindLine", ")", ":", "node_lists", ".", "append", "(", "self", ".", "__build_bind", "(", "line_node", ")", ")", "elif", "isinstance", "(", "line_node", ",", "pegnode", ".", "AclLine", ")", ":", "node_lists", ".", "append", "(", "self", ".", "__build_acl", "(", "line_node", ")", ")", "elif", "isinstance", "(", "line_node", ",", "pegnode", ".", "BackendLine", ")", ":", "node_lists", ".", "append", "(", "self", ".", "__build_usebackend", "(", "line_node", ")", ")", "elif", "isinstance", "(", "line_node", ",", "pegnode", ".", "UserLine", ")", ":", "node_lists", ".", "append", "(", "self", ".", "__build_user", "(", "line_node", ")", ")", "elif", "isinstance", "(", "line_node", ",", "pegnode", ".", "GroupLine", ")", ":", "node_lists", ".", "append", "(", "self", ".", "__build_group", "(", "line_node", ")", ")", "else", ":", "# may blank_line, comment_line", "pass", "return", "node_lists" ]
39.578947
15
def index(self): """Retrieve the attribute index number. Args:: no argument Returns:: attribute index number (starting at 0) C library equivalent : SDfindattr """ self._index = _C.SDfindattr(self._obj._id, self._name) _checkErr('find', self._index, 'illegal attribute name') return self._index
[ "def", "index", "(", "self", ")", ":", "self", ".", "_index", "=", "_C", ".", "SDfindattr", "(", "self", ".", "_obj", ".", "_id", ",", "self", ".", "_name", ")", "_checkErr", "(", "'find'", ",", "self", ".", "_index", ",", "'illegal attribute name'", ")", "return", "self", ".", "_index" ]
23.764706
23.588235
def _pretty_print_event(event, colored): """ Basic formatter to convert an event object to string """ event.timestamp = colored.yellow(event.timestamp) event.log_stream_name = colored.cyan(event.log_stream_name) return ' '.join([event.log_stream_name, event.timestamp, event.message])
[ "def", "_pretty_print_event", "(", "event", ",", "colored", ")", ":", "event", ".", "timestamp", "=", "colored", ".", "yellow", "(", "event", ".", "timestamp", ")", "event", ".", "log_stream_name", "=", "colored", ".", "cyan", "(", "event", ".", "log_stream_name", ")", "return", "' '", ".", "join", "(", "[", "event", ".", "log_stream_name", ",", "event", ".", "timestamp", ",", "event", ".", "message", "]", ")" ]
40.75
18
def option_configure(debug=False, path=None): """ Summary: Initiate configuration menu to customize metal runtime options. Console script ```keyconfig``` invokes this option_configure directly in debug mode to display the contents of the local config file (if exists) Args: :path (str): full path to default local configuration file location :debug (bool): debug flag, when True prints out contents of local config file Returns: TYPE (bool): Configuration Success | Failure """ if CONFIG_SCRIPT in sys.argv[0]: debug = True # set debug mode if invoked from CONFIG_SCRIPT if path is None: path = local_config['PROJECT']['CONFIG_PATH'] if debug: if os.path.isfile(path): debug_mode('local_config file: ', local_config, debug, halt=True) else: msg = """ Local config file does not yet exist. Run: $ metal --configure """ debug_mode(msg, {'CONFIG_PATH': path}, debug, halt=True) r = configuration.init(debug, path) return r
[ "def", "option_configure", "(", "debug", "=", "False", ",", "path", "=", "None", ")", ":", "if", "CONFIG_SCRIPT", "in", "sys", ".", "argv", "[", "0", "]", ":", "debug", "=", "True", "# set debug mode if invoked from CONFIG_SCRIPT", "if", "path", "is", "None", ":", "path", "=", "local_config", "[", "'PROJECT'", "]", "[", "'CONFIG_PATH'", "]", "if", "debug", ":", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "debug_mode", "(", "'local_config file: '", ",", "local_config", ",", "debug", ",", "halt", "=", "True", ")", "else", ":", "msg", "=", "\"\"\" Local config file does not yet exist. Run:\n\n $ metal --configure \"\"\"", "debug_mode", "(", "msg", ",", "{", "'CONFIG_PATH'", ":", "path", "}", ",", "debug", ",", "halt", "=", "True", ")", "r", "=", "configuration", ".", "init", "(", "debug", ",", "path", ")", "return", "r" ]
39.851852
20.962963
def recursive_index_decode(int_array, max=32767, min=-32768): """Unpack an array of integers using recursive indexing. :param int_array: the input array of integers :param max: the maximum integer size :param min: the minimum integer size :return the array of integers after recursive index decoding""" out_arr = [] decoded_val = 0 for item in int_array.tolist(): if item==max or item==min: decoded_val += item else: decoded_val += item out_arr.append(decoded_val) decoded_val = 0 return numpy.asarray(out_arr,dtype=numpy.int32)
[ "def", "recursive_index_decode", "(", "int_array", ",", "max", "=", "32767", ",", "min", "=", "-", "32768", ")", ":", "out_arr", "=", "[", "]", "decoded_val", "=", "0", "for", "item", "in", "int_array", ".", "tolist", "(", ")", ":", "if", "item", "==", "max", "or", "item", "==", "min", ":", "decoded_val", "+=", "item", "else", ":", "decoded_val", "+=", "item", "out_arr", ".", "append", "(", "decoded_val", ")", "decoded_val", "=", "0", "return", "numpy", ".", "asarray", "(", "out_arr", ",", "dtype", "=", "numpy", ".", "int32", ")" ]
36.058824
11.529412
def _interpolate_missing_data(data, mask, method='cubic'): """ Interpolate missing data as identified by the ``mask`` keyword. Parameters ---------- data : 2D `~numpy.ndarray` An array containing the 2D image. mask : 2D bool `~numpy.ndarray` A 2D booleen mask array with the same shape as the input ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. The masked data points are those that will be interpolated. method : {'cubic', 'nearest'}, optional The method of used to interpolate the missing data: * ``'cubic'``: Masked data are interpolated using 2D cubic splines. This is the default. * ``'nearest'``: Masked data are interpolated using nearest-neighbor interpolation. Returns ------- data_interp : 2D `~numpy.ndarray` The interpolated 2D image. """ from scipy import interpolate data_interp = np.array(data, copy=True) if len(data_interp.shape) != 2: raise ValueError('data must be a 2D array.') if mask.shape != data.shape: raise ValueError('mask and data must have the same shape.') y, x = np.indices(data_interp.shape) xy = np.dstack((x[~mask].ravel(), y[~mask].ravel()))[0] z = data_interp[~mask].ravel() if method == 'nearest': interpol = interpolate.NearestNDInterpolator(xy, z) elif method == 'cubic': interpol = interpolate.CloughTocher2DInterpolator(xy, z) else: raise ValueError('Unsupported interpolation method.') xy_missing = np.dstack((x[mask].ravel(), y[mask].ravel()))[0] data_interp[mask] = interpol(xy_missing) return data_interp
[ "def", "_interpolate_missing_data", "(", "data", ",", "mask", ",", "method", "=", "'cubic'", ")", ":", "from", "scipy", "import", "interpolate", "data_interp", "=", "np", ".", "array", "(", "data", ",", "copy", "=", "True", ")", "if", "len", "(", "data_interp", ".", "shape", ")", "!=", "2", ":", "raise", "ValueError", "(", "'data must be a 2D array.'", ")", "if", "mask", ".", "shape", "!=", "data", ".", "shape", ":", "raise", "ValueError", "(", "'mask and data must have the same shape.'", ")", "y", ",", "x", "=", "np", ".", "indices", "(", "data_interp", ".", "shape", ")", "xy", "=", "np", ".", "dstack", "(", "(", "x", "[", "~", "mask", "]", ".", "ravel", "(", ")", ",", "y", "[", "~", "mask", "]", ".", "ravel", "(", ")", ")", ")", "[", "0", "]", "z", "=", "data_interp", "[", "~", "mask", "]", ".", "ravel", "(", ")", "if", "method", "==", "'nearest'", ":", "interpol", "=", "interpolate", ".", "NearestNDInterpolator", "(", "xy", ",", "z", ")", "elif", "method", "==", "'cubic'", ":", "interpol", "=", "interpolate", ".", "CloughTocher2DInterpolator", "(", "xy", ",", "z", ")", "else", ":", "raise", "ValueError", "(", "'Unsupported interpolation method.'", ")", "xy_missing", "=", "np", ".", "dstack", "(", "(", "x", "[", "mask", "]", ".", "ravel", "(", ")", ",", "y", "[", "mask", "]", ".", "ravel", "(", ")", ")", ")", "[", "0", "]", "data_interp", "[", "mask", "]", "=", "interpol", "(", "xy_missing", ")", "return", "data_interp" ]
30.709091
20.818182
def encode(B): """ Encode data using Hamming(7, 4) code. E.g.: encode([0, 0, 1, 1]) encode([[0, 0, 0, 1], [0, 1, 0, 1]]) :param array B: binary data to encode (must be shaped as (4, ) or (-1, 4)). """ B = array(B) flatten = False if len(B.shape) == 1: flatten = True B = B.reshape(1, -1) if B.shape[1] != data_size: raise ValueError('Data must be shaped as (4, ) or (-1, 4)') C = dot(G, B.T).T % 2 if flatten: C = C.flatten() return C
[ "def", "encode", "(", "B", ")", ":", "B", "=", "array", "(", "B", ")", "flatten", "=", "False", "if", "len", "(", "B", ".", "shape", ")", "==", "1", ":", "flatten", "=", "True", "B", "=", "B", ".", "reshape", "(", "1", ",", "-", "1", ")", "if", "B", ".", "shape", "[", "1", "]", "!=", "data_size", ":", "raise", "ValueError", "(", "'Data must be shaped as (4, ) or (-1, 4)'", ")", "C", "=", "dot", "(", "G", ",", "B", ".", "T", ")", ".", "T", "%", "2", "if", "flatten", ":", "C", "=", "C", ".", "flatten", "(", ")", "return", "C" ]
18.714286
25
def init_argparser_working_dir( self, argparser, explanation='', help_template=( 'the working directory; %(explanation)s' 'default is current working directory (%(cwd)s)'), ): """ Subclass could an extra expanation on how this is used. Arguments explanation Explanation text for the default help template help_template A standard help message for this option. """ cwd = self.toolchain.join_cwd() argparser.add_argument( '--working-dir', dest=WORKING_DIR, metavar=metavar(WORKING_DIR), default=cwd, help=help_template % {'explanation': explanation, 'cwd': cwd}, )
[ "def", "init_argparser_working_dir", "(", "self", ",", "argparser", ",", "explanation", "=", "''", ",", "help_template", "=", "(", "'the working directory; %(explanation)s'", "'default is current working directory (%(cwd)s)'", ")", ",", ")", ":", "cwd", "=", "self", ".", "toolchain", ".", "join_cwd", "(", ")", "argparser", ".", "add_argument", "(", "'--working-dir'", ",", "dest", "=", "WORKING_DIR", ",", "metavar", "=", "metavar", "(", "WORKING_DIR", ")", ",", "default", "=", "cwd", ",", "help", "=", "help_template", "%", "{", "'explanation'", ":", "explanation", ",", "'cwd'", ":", "cwd", "}", ",", ")" ]
30.6
17.96
def free (self): """ Returns free properties which are not dependency properties. """ result = [p for p in self.lazy_properties if not p.feature.incidental and p.feature.free] result.extend(self.free_) return result
[ "def", "free", "(", "self", ")", ":", "result", "=", "[", "p", "for", "p", "in", "self", ".", "lazy_properties", "if", "not", "p", ".", "feature", ".", "incidental", "and", "p", ".", "feature", ".", "free", "]", "result", ".", "extend", "(", "self", ".", "free_", ")", "return", "result" ]
38.142857
12
def readline(self, size=-1): "Ignore the `size` since a complete line must be processed." try: record = next(self.reader) return self.outdel.join(self.process_line(record)) + '\n' except StopIteration: return ''
[ "def", "readline", "(", "self", ",", "size", "=", "-", "1", ")", ":", "try", ":", "record", "=", "next", "(", "self", ".", "reader", ")", "return", "self", ".", "outdel", ".", "join", "(", "self", ".", "process_line", "(", "record", ")", ")", "+", "'\\n'", "except", "StopIteration", ":", "return", "''" ]
37.857143
18.428571
def translate_features_to_letter_annotations(protein, more_sites=None): """Store select uniprot features (sites) as letter annotations with the key as the type of site and the values as a list of booleans""" from ssbio.databases.uniprot import longname_sites from collections import defaultdict sites = longname_sites ## longname_sites = ["active site", "binding site", "metal ion-binding site", "site"] sites.append('nucleotide phosphate-binding region') sites.append('DNA-binding region') sites.append('intramembrane region') sites.append("transmembrane region") sites.append("catalyticResidue") ## ADD MORE IF YOU WANT if more_sites: more_sites = ssbio.utils.force_list(more_sites) sites.extend(more_sites) sites = list(set(sites)) for site in sites: protein.representative_sequence.letter_annotations[site] = [False] * protein.representative_sequence.seq_len to_store = defaultdict(list) for f in protein.representative_sequence.features: if f.type in sites: to_store[f.type].append(f) for site, feature in to_store.items(): try: positions = [int(f.location.start) for f in feature] except TypeError: log.error('Protein {}, SeqProp {}: unable to translate feature {} into letter annotation'.format(protein.id, protein.representative_sequence.id, site)) continue feat_letter_anno = [] for x in range(protein.representative_sequence.seq_len): if x in positions: idx = positions.index(x) if 'description' in feature[idx].qualifiers: feat_letter_anno.append(feature[idx].qualifiers['description']) else: feat_letter_anno.append(True) else: feat_letter_anno.append(False) protein.representative_sequence.letter_annotations[site] = feat_letter_anno
[ "def", "translate_features_to_letter_annotations", "(", "protein", ",", "more_sites", "=", "None", ")", ":", "from", "ssbio", ".", "databases", ".", "uniprot", "import", "longname_sites", "from", "collections", "import", "defaultdict", "sites", "=", "longname_sites", "## longname_sites = [\"active site\", \"binding site\", \"metal ion-binding site\", \"site\"]", "sites", ".", "append", "(", "'nucleotide phosphate-binding region'", ")", "sites", ".", "append", "(", "'DNA-binding region'", ")", "sites", ".", "append", "(", "'intramembrane region'", ")", "sites", ".", "append", "(", "\"transmembrane region\"", ")", "sites", ".", "append", "(", "\"catalyticResidue\"", ")", "## ADD MORE IF YOU WANT", "if", "more_sites", ":", "more_sites", "=", "ssbio", ".", "utils", ".", "force_list", "(", "more_sites", ")", "sites", ".", "extend", "(", "more_sites", ")", "sites", "=", "list", "(", "set", "(", "sites", ")", ")", "for", "site", "in", "sites", ":", "protein", ".", "representative_sequence", ".", "letter_annotations", "[", "site", "]", "=", "[", "False", "]", "*", "protein", ".", "representative_sequence", ".", "seq_len", "to_store", "=", "defaultdict", "(", "list", ")", "for", "f", "in", "protein", ".", "representative_sequence", ".", "features", ":", "if", "f", ".", "type", "in", "sites", ":", "to_store", "[", "f", ".", "type", "]", ".", "append", "(", "f", ")", "for", "site", ",", "feature", "in", "to_store", ".", "items", "(", ")", ":", "try", ":", "positions", "=", "[", "int", "(", "f", ".", "location", ".", "start", ")", "for", "f", "in", "feature", "]", "except", "TypeError", ":", "log", ".", "error", "(", "'Protein {}, SeqProp {}: unable to translate feature {} into letter annotation'", ".", "format", "(", "protein", ".", "id", ",", "protein", ".", "representative_sequence", ".", "id", ",", "site", ")", ")", "continue", "feat_letter_anno", "=", "[", "]", "for", "x", "in", "range", "(", "protein", ".", "representative_sequence", ".", "seq_len", ")", ":", "if", "x", "in", "positions", ":", "idx", "=", "positions", ".", "index", "(", "x", ")", "if", "'description'", "in", "feature", "[", "idx", "]", ".", "qualifiers", ":", "feat_letter_anno", ".", "append", "(", "feature", "[", "idx", "]", ".", "qualifiers", "[", "'description'", "]", ")", "else", ":", "feat_letter_anno", ".", "append", "(", "True", ")", "else", ":", "feat_letter_anno", ".", "append", "(", "False", ")", "protein", ".", "representative_sequence", ".", "letter_annotations", "[", "site", "]", "=", "feat_letter_anno" ]
46.155556
22.555556
def list_subscriptions(self, target_id=None, ids=None, query_flags=None): """ListSubscriptions. [Preview API] :param str target_id: :param [str] ids: :param str query_flags: :rtype: [NotificationSubscription] """ query_parameters = {} if target_id is not None: query_parameters['targetId'] = self._serialize.query('target_id', target_id, 'str') if ids is not None: ids = ",".join(ids) query_parameters['ids'] = self._serialize.query('ids', ids, 'str') if query_flags is not None: query_parameters['queryFlags'] = self._serialize.query('query_flags', query_flags, 'str') response = self._send(http_method='GET', location_id='70f911d6-abac-488c-85b3-a206bf57e165', version='5.0-preview.1', query_parameters=query_parameters) return self._deserialize('[NotificationSubscription]', self._unwrap_collection(response))
[ "def", "list_subscriptions", "(", "self", ",", "target_id", "=", "None", ",", "ids", "=", "None", ",", "query_flags", "=", "None", ")", ":", "query_parameters", "=", "{", "}", "if", "target_id", "is", "not", "None", ":", "query_parameters", "[", "'targetId'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'target_id'", ",", "target_id", ",", "'str'", ")", "if", "ids", "is", "not", "None", ":", "ids", "=", "\",\"", ".", "join", "(", "ids", ")", "query_parameters", "[", "'ids'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'ids'", ",", "ids", ",", "'str'", ")", "if", "query_flags", "is", "not", "None", ":", "query_parameters", "[", "'queryFlags'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'query_flags'", ",", "query_flags", ",", "'str'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'70f911d6-abac-488c-85b3-a206bf57e165'", ",", "version", "=", "'5.0-preview.1'", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'[NotificationSubscription]'", ",", "self", ".", "_unwrap_collection", "(", "response", ")", ")" ]
49.238095
20.571429