text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def merged_cell_ranges(self): """Generates the sequence of merged cell ranges in the format: ((col_low, row_low), (col_hi, row_hi)) """ for row_number, row in enumerate(self.raw_sheet.rows()): for col_number, cell in enumerate(row): rspan, cspan = cell.span if (rspan, cspan) != (1, 1): yield ((col_number, row_number), (col_number + cspan, row_number + rspan))
[ "def", "merged_cell_ranges", "(", "self", ")", ":", "for", "row_number", ",", "row", "in", "enumerate", "(", "self", ".", "raw_sheet", ".", "rows", "(", ")", ")", ":", "for", "col_number", ",", "cell", "in", "enumerate", "(", "row", ")", ":", "rspan", ",", "cspan", "=", "cell", ".", "span", "if", "(", "rspan", ",", "cspan", ")", "!=", "(", "1", ",", "1", ")", ":", "yield", "(", "(", "col_number", ",", "row_number", ")", ",", "(", "col_number", "+", "cspan", ",", "row_number", "+", "rspan", ")", ")" ]
40.1
13.4
def get_number_of_agents_for_scheduling(self, context): """Return number of agents on which the router will be scheduled.""" num_agents = len(self.get_l3_agents(context, active=True, filters={'agent_modes': [bc.constants.L3_AGENT_MODE_LEGACY, bc.constants.L3_AGENT_MODE_DVR_SNAT]})) max_agents = cfg.CONF.max_l3_agents_per_router if max_agents: if max_agents > num_agents: LOG.info("Number of active agents lower than " "max_l3_agents_per_router. L3 agents " "available: %s", num_agents) else: num_agents = max_agents return num_agents
[ "def", "get_number_of_agents_for_scheduling", "(", "self", ",", "context", ")", ":", "num_agents", "=", "len", "(", "self", ".", "get_l3_agents", "(", "context", ",", "active", "=", "True", ",", "filters", "=", "{", "'agent_modes'", ":", "[", "bc", ".", "constants", ".", "L3_AGENT_MODE_LEGACY", ",", "bc", ".", "constants", ".", "L3_AGENT_MODE_DVR_SNAT", "]", "}", ")", ")", "max_agents", "=", "cfg", ".", "CONF", ".", "max_l3_agents_per_router", "if", "max_agents", ":", "if", "max_agents", ">", "num_agents", ":", "LOG", ".", "info", "(", "\"Number of active agents lower than \"", "\"max_l3_agents_per_router. L3 agents \"", "\"available: %s\"", ",", "num_agents", ")", "else", ":", "num_agents", "=", "max_agents", "return", "num_agents" ]
44.8125
19.8125
def track_from_filename(filename, filetype = None, timeout=DEFAULT_ASYNC_TIMEOUT, force_upload=False): """ Create a track object from a filename. NOTE: Does not create the detailed analysis for the Track. Call Track.get_analysis() for that. Args: filename: A string containing the path to the input file. filetype: A string indicating the filetype; Defaults to None (type determined by file extension). force_upload: skip the MD5 shortcut path, force an upload+analysis Example: >>> t = track.track_from_filename("Miaow-01-Tempered-song.mp3") >>> t < Track > >>> """ filetype = filetype or filename.split('.')[-1] file_object = open(filename, 'rb') result = track_from_file(file_object, filetype, timeout, force_upload) file_object.close() return result
[ "def", "track_from_filename", "(", "filename", ",", "filetype", "=", "None", ",", "timeout", "=", "DEFAULT_ASYNC_TIMEOUT", ",", "force_upload", "=", "False", ")", ":", "filetype", "=", "filetype", "or", "filename", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "file_object", "=", "open", "(", "filename", ",", "'rb'", ")", "result", "=", "track_from_file", "(", "file_object", ",", "filetype", ",", "timeout", ",", "force_upload", ")", "file_object", ".", "close", "(", ")", "return", "result" ]
36.434783
25.913043
def logs_sidecars_jobs(job_uuid: str, job_name: str, log_lines: Optional[Union[str, Iterable[str]]]) -> None: """Signal handling for sidecars logs.""" handle_job_logs(job_uuid=job_uuid, job_name=job_name, log_lines=log_lines) publisher.publish_job_log( log_lines=log_lines, job_uuid=job_uuid, job_name=job_name, send_task=False )
[ "def", "logs_sidecars_jobs", "(", "job_uuid", ":", "str", ",", "job_name", ":", "str", ",", "log_lines", ":", "Optional", "[", "Union", "[", "str", ",", "Iterable", "[", "str", "]", "]", "]", ")", "->", "None", ":", "handle_job_logs", "(", "job_uuid", "=", "job_uuid", ",", "job_name", "=", "job_name", ",", "log_lines", "=", "log_lines", ")", "publisher", ".", "publish_job_log", "(", "log_lines", "=", "log_lines", ",", "job_uuid", "=", "job_uuid", ",", "job_name", "=", "job_name", ",", "send_task", "=", "False", ")" ]
34.692308
11.615385
def setText(self, text): """ Sets the text for this instance to the inputed text. :param text | <str> """ super(XTextEdit, self).setText(projex.text.toAscii(text))
[ "def", "setText", "(", "self", ",", "text", ")", ":", "super", "(", "XTextEdit", ",", "self", ")", ".", "setText", "(", "projex", ".", "text", ".", "toAscii", "(", "text", ")", ")" ]
31
14.142857
def random_pos(self, context_iterable, num_permutations): """Obtains random positions w/ replacement which match sequence context. Parameters ---------- context_iterable: iterable containing two element tuple Records number of mutations in each context. context_iterable should be something like [('AA', 5), ...]. num_permutations : int Number of permutations used in the permutation test. Returns ------- position_list : list Contains context string and the randomly chosen positions for that context. """ position_list = [] for contxt, n in context_iterable: pos_array = self.random_context_pos(n, num_permutations, contxt) position_list.append([contxt, pos_array]) return position_list
[ "def", "random_pos", "(", "self", ",", "context_iterable", ",", "num_permutations", ")", ":", "position_list", "=", "[", "]", "for", "contxt", ",", "n", "in", "context_iterable", ":", "pos_array", "=", "self", ".", "random_context_pos", "(", "n", ",", "num_permutations", ",", "contxt", ")", "position_list", ".", "append", "(", "[", "contxt", ",", "pos_array", "]", ")", "return", "position_list" ]
38.590909
19.272727
def save(self, obj): """Required functionality.""" if not obj.id: obj.id = uuid() stored_data = { '_id': obj.id, 'value': json.loads(obj.to_data()) } index_vals = obj.indexes() or {} for key in obj.__class__.index_names() or []: val = index_vals.get(key, '') stored_data[key] = str(val) coll = self.get_collection(obj.__class__.get_table_name()) coll.update({"_id": obj.id}, stored_data, upsert=True)
[ "def", "save", "(", "self", ",", "obj", ")", ":", "if", "not", "obj", ".", "id", ":", "obj", ".", "id", "=", "uuid", "(", ")", "stored_data", "=", "{", "'_id'", ":", "obj", ".", "id", ",", "'value'", ":", "json", ".", "loads", "(", "obj", ".", "to_data", "(", ")", ")", "}", "index_vals", "=", "obj", ".", "indexes", "(", ")", "or", "{", "}", "for", "key", "in", "obj", ".", "__class__", ".", "index_names", "(", ")", "or", "[", "]", ":", "val", "=", "index_vals", ".", "get", "(", "key", ",", "''", ")", "stored_data", "[", "key", "]", "=", "str", "(", "val", ")", "coll", "=", "self", ".", "get_collection", "(", "obj", ".", "__class__", ".", "get_table_name", "(", ")", ")", "coll", ".", "update", "(", "{", "\"_id\"", ":", "obj", ".", "id", "}", ",", "stored_data", ",", "upsert", "=", "True", ")" ]
30.058824
17.764706
def nameop_put_collision( cls, collisions, nameop ): """ Record a nameop as collided with another nameop in this block. """ # these are supposed to have been put here by nameop_set_collided history_id_key = nameop.get('__collided_history_id_key__', None) history_id = nameop.get('__collided_history_id__', None) try: assert cls.nameop_is_collided( nameop ), "Nameop not collided" assert history_id_key is not None, "Nameop missing collision info" assert history_id is not None, "Nameop missing collision info" except Exception, e: log.exception(e) log.error("FATAL: BUG: bad collision info") os.abort() if not collisions.has_key(history_id_key): collisions[history_id_key] = [history_id] else: collisions[history_id_key].append( history_id )
[ "def", "nameop_put_collision", "(", "cls", ",", "collisions", ",", "nameop", ")", ":", "# these are supposed to have been put here by nameop_set_collided", "history_id_key", "=", "nameop", ".", "get", "(", "'__collided_history_id_key__'", ",", "None", ")", "history_id", "=", "nameop", ".", "get", "(", "'__collided_history_id__'", ",", "None", ")", "try", ":", "assert", "cls", ".", "nameop_is_collided", "(", "nameop", ")", ",", "\"Nameop not collided\"", "assert", "history_id_key", "is", "not", "None", ",", "\"Nameop missing collision info\"", "assert", "history_id", "is", "not", "None", ",", "\"Nameop missing collision info\"", "except", "Exception", ",", "e", ":", "log", ".", "exception", "(", "e", ")", "log", ".", "error", "(", "\"FATAL: BUG: bad collision info\"", ")", "os", ".", "abort", "(", ")", "if", "not", "collisions", ".", "has_key", "(", "history_id_key", ")", ":", "collisions", "[", "history_id_key", "]", "=", "[", "history_id", "]", "else", ":", "collisions", "[", "history_id_key", "]", ".", "append", "(", "history_id", ")" ]
42.809524
22.428571
async def rename_conversation(self, rename_conversation_request): """Rename a conversation. Both group and one-to-one conversations may be renamed, but the official Hangouts clients have mixed support for one-to-one conversations with custom names. """ response = hangouts_pb2.RenameConversationResponse() await self._pb_request('conversations/renameconversation', rename_conversation_request, response) return response
[ "async", "def", "rename_conversation", "(", "self", ",", "rename_conversation_request", ")", ":", "response", "=", "hangouts_pb2", ".", "RenameConversationResponse", "(", ")", "await", "self", ".", "_pb_request", "(", "'conversations/renameconversation'", ",", "rename_conversation_request", ",", "response", ")", "return", "response" ]
45.909091
19.545455
def copyHdfsDirectoryToLocal(hdfsDirectory, localDirectory, hdfsClient): '''Copy directory from HDFS to local''' if not os.path.exists(localDirectory): os.makedirs(localDirectory) try: listing = hdfsClient.list_status(hdfsDirectory) except Exception as exception: nni_log(LogType.Error, 'List hdfs directory {0} error: {1}'.format(hdfsDirectory, str(exception))) raise exception for f in listing: if f.type == 'DIRECTORY': subHdfsDirectory = posixpath.join(hdfsDirectory, f.pathSuffix) subLocalDirectory = os.path.join(localDirectory, f.pathSuffix) copyHdfsDirectoryToLocal(subHdfsDirectory, subLocalDirectory, hdfsClient) elif f.type == 'FILE': hdfsFilePath = posixpath.join(hdfsDirectory, f.pathSuffix) localFilePath = os.path.join(localDirectory, f.pathSuffix) copyHdfsFileToLocal(hdfsFilePath, localFilePath, hdfsClient) else: raise AssertionError('unexpected type {}'.format(f.type))
[ "def", "copyHdfsDirectoryToLocal", "(", "hdfsDirectory", ",", "localDirectory", ",", "hdfsClient", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "localDirectory", ")", ":", "os", ".", "makedirs", "(", "localDirectory", ")", "try", ":", "listing", "=", "hdfsClient", ".", "list_status", "(", "hdfsDirectory", ")", "except", "Exception", "as", "exception", ":", "nni_log", "(", "LogType", ".", "Error", ",", "'List hdfs directory {0} error: {1}'", ".", "format", "(", "hdfsDirectory", ",", "str", "(", "exception", ")", ")", ")", "raise", "exception", "for", "f", "in", "listing", ":", "if", "f", ".", "type", "==", "'DIRECTORY'", ":", "subHdfsDirectory", "=", "posixpath", ".", "join", "(", "hdfsDirectory", ",", "f", ".", "pathSuffix", ")", "subLocalDirectory", "=", "os", ".", "path", ".", "join", "(", "localDirectory", ",", "f", ".", "pathSuffix", ")", "copyHdfsDirectoryToLocal", "(", "subHdfsDirectory", ",", "subLocalDirectory", ",", "hdfsClient", ")", "elif", "f", ".", "type", "==", "'FILE'", ":", "hdfsFilePath", "=", "posixpath", ".", "join", "(", "hdfsDirectory", ",", "f", ".", "pathSuffix", ")", "localFilePath", "=", "os", ".", "path", ".", "join", "(", "localDirectory", ",", "f", ".", "pathSuffix", ")", "copyHdfsFileToLocal", "(", "hdfsFilePath", ",", "localFilePath", ",", "hdfsClient", ")", "else", ":", "raise", "AssertionError", "(", "'unexpected type {}'", ".", "format", "(", "f", ".", "type", ")", ")" ]
49.619048
25.047619
def get_users_info(self, user_id_list, lang="zh_CN"): """ 批量获取用户基本信息。 :param user_id_list: 用户 ID 的列表 :param lang: 返回国家地区语言版本,zh_CN 简体,zh_TW 繁体,en 英语 :return: 返回的 JSON 数据包 """ return self.post( url="https://api.weixin.qq.com/cgi-bin/user/info/batchget", data={ "user_list": [ { "openid": user_id, "lang": lang } for user_id in user_id_list ] } )
[ "def", "get_users_info", "(", "self", ",", "user_id_list", ",", "lang", "=", "\"zh_CN\"", ")", ":", "return", "self", ".", "post", "(", "url", "=", "\"https://api.weixin.qq.com/cgi-bin/user/info/batchget\"", ",", "data", "=", "{", "\"user_list\"", ":", "[", "{", "\"openid\"", ":", "user_id", ",", "\"lang\"", ":", "lang", "}", "for", "user_id", "in", "user_id_list", "]", "}", ")" ]
28.789474
15.526316
def image_size(self, pnmfile): """Get width and height of pnm file. simeon@homebox src>pnmfile /tmp/214-2.png /tmp/214-2.png:PPM raw, 100 by 100 maxval 255 """ pout = os.popen(self.shellsetup + self.pnmfile + ' ' + pnmfile, 'r') pnmfileout = pout.read(200) pout.close() m = re.search(', (\d+) by (\d+) ', pnmfileout) if (m is None): raise IIIFError( text="Bad output from pnmfile when trying to get size.") w = int(m.group(1)) h = int(m.group(2)) # print "pnmfile output = %s" % (pnmfileout) # print "image size = %d,%d" % (w,h) return(w, h)
[ "def", "image_size", "(", "self", ",", "pnmfile", ")", ":", "pout", "=", "os", ".", "popen", "(", "self", ".", "shellsetup", "+", "self", ".", "pnmfile", "+", "' '", "+", "pnmfile", ",", "'r'", ")", "pnmfileout", "=", "pout", ".", "read", "(", "200", ")", "pout", ".", "close", "(", ")", "m", "=", "re", ".", "search", "(", "', (\\d+) by (\\d+) '", ",", "pnmfileout", ")", "if", "(", "m", "is", "None", ")", ":", "raise", "IIIFError", "(", "text", "=", "\"Bad output from pnmfile when trying to get size.\"", ")", "w", "=", "int", "(", "m", ".", "group", "(", "1", ")", ")", "h", "=", "int", "(", "m", ".", "group", "(", "2", ")", ")", "# print \"pnmfile output = %s\" % (pnmfileout)", "# print \"image size = %d,%d\" % (w,h)", "return", "(", "w", ",", "h", ")" ]
37
15.055556
def infer_dict_fromkeys(node, context=None): """Infer dict.fromkeys :param nodes.Call node: dict.fromkeys() call to infer :param context.InferenceContext: node context :rtype nodes.Dict: a Dictionary containing the values that astroid was able to infer. In case the inference failed for any reason, an empty dictionary will be inferred instead. """ def _build_dict_with_elements(elements): new_node = nodes.Dict( col_offset=node.col_offset, lineno=node.lineno, parent=node.parent ) new_node.postinit(elements) return new_node call = arguments.CallSite.from_call(node) if call.keyword_arguments: raise UseInferenceDefault("TypeError: int() must take no keyword arguments") if len(call.positional_arguments) not in {1, 2}: raise UseInferenceDefault( "TypeError: Needs between 1 and 2 positional arguments" ) default = nodes.Const(None) values = call.positional_arguments[0] try: inferred_values = next(values.infer(context=context)) except InferenceError: return _build_dict_with_elements([]) if inferred_values is util.Uninferable: return _build_dict_with_elements([]) # Limit to a couple of potential values, as this can become pretty complicated accepted_iterable_elements = (nodes.Const,) if isinstance(inferred_values, (nodes.List, nodes.Set, nodes.Tuple)): elements = inferred_values.elts for element in elements: if not isinstance(element, accepted_iterable_elements): # Fallback to an empty dict return _build_dict_with_elements([]) elements_with_value = [(element, default) for element in elements] return _build_dict_with_elements(elements_with_value) elif isinstance(inferred_values, nodes.Const) and isinstance( inferred_values.value, (str, bytes) ): elements = [ (nodes.Const(element), default) for element in inferred_values.value ] return _build_dict_with_elements(elements) elif isinstance(inferred_values, nodes.Dict): keys = inferred_values.itered() for key in keys: if not isinstance(key, accepted_iterable_elements): # Fallback to an empty dict return _build_dict_with_elements([]) elements_with_value = [(element, default) for element in keys] return _build_dict_with_elements(elements_with_value) # Fallback to an empty dictionary return _build_dict_with_elements([])
[ "def", "infer_dict_fromkeys", "(", "node", ",", "context", "=", "None", ")", ":", "def", "_build_dict_with_elements", "(", "elements", ")", ":", "new_node", "=", "nodes", ".", "Dict", "(", "col_offset", "=", "node", ".", "col_offset", ",", "lineno", "=", "node", ".", "lineno", ",", "parent", "=", "node", ".", "parent", ")", "new_node", ".", "postinit", "(", "elements", ")", "return", "new_node", "call", "=", "arguments", ".", "CallSite", ".", "from_call", "(", "node", ")", "if", "call", ".", "keyword_arguments", ":", "raise", "UseInferenceDefault", "(", "\"TypeError: int() must take no keyword arguments\"", ")", "if", "len", "(", "call", ".", "positional_arguments", ")", "not", "in", "{", "1", ",", "2", "}", ":", "raise", "UseInferenceDefault", "(", "\"TypeError: Needs between 1 and 2 positional arguments\"", ")", "default", "=", "nodes", ".", "Const", "(", "None", ")", "values", "=", "call", ".", "positional_arguments", "[", "0", "]", "try", ":", "inferred_values", "=", "next", "(", "values", ".", "infer", "(", "context", "=", "context", ")", ")", "except", "InferenceError", ":", "return", "_build_dict_with_elements", "(", "[", "]", ")", "if", "inferred_values", "is", "util", ".", "Uninferable", ":", "return", "_build_dict_with_elements", "(", "[", "]", ")", "# Limit to a couple of potential values, as this can become pretty complicated", "accepted_iterable_elements", "=", "(", "nodes", ".", "Const", ",", ")", "if", "isinstance", "(", "inferred_values", ",", "(", "nodes", ".", "List", ",", "nodes", ".", "Set", ",", "nodes", ".", "Tuple", ")", ")", ":", "elements", "=", "inferred_values", ".", "elts", "for", "element", "in", "elements", ":", "if", "not", "isinstance", "(", "element", ",", "accepted_iterable_elements", ")", ":", "# Fallback to an empty dict", "return", "_build_dict_with_elements", "(", "[", "]", ")", "elements_with_value", "=", "[", "(", "element", ",", "default", ")", "for", "element", "in", "elements", "]", "return", "_build_dict_with_elements", "(", "elements_with_value", ")", "elif", "isinstance", "(", "inferred_values", ",", "nodes", ".", "Const", ")", "and", "isinstance", "(", "inferred_values", ".", "value", ",", "(", "str", ",", "bytes", ")", ")", ":", "elements", "=", "[", "(", "nodes", ".", "Const", "(", "element", ")", ",", "default", ")", "for", "element", "in", "inferred_values", ".", "value", "]", "return", "_build_dict_with_elements", "(", "elements", ")", "elif", "isinstance", "(", "inferred_values", ",", "nodes", ".", "Dict", ")", ":", "keys", "=", "inferred_values", ".", "itered", "(", ")", "for", "key", "in", "keys", ":", "if", "not", "isinstance", "(", "key", ",", "accepted_iterable_elements", ")", ":", "# Fallback to an empty dict", "return", "_build_dict_with_elements", "(", "[", "]", ")", "elements_with_value", "=", "[", "(", "element", ",", "default", ")", "for", "element", "in", "keys", "]", "return", "_build_dict_with_elements", "(", "elements_with_value", ")", "# Fallback to an empty dictionary", "return", "_build_dict_with_elements", "(", "[", "]", ")" ]
38.530303
19.363636
def get_latex(ink_filename): """Get the LaTeX string from a file by the *.ink filename.""" tex_file = os.path.splitext(ink_filename)[0] + ".tex" with open(tex_file) as f: tex_content = f.read().strip() pattern = re.compile(r"\\begin\{displaymath\}(.*?)\\end\{displaymath\}", re.DOTALL) matches = pattern.findall(tex_content) if len(matches) == 0: pattern = re.compile(r"$$(.*?)$$", re.DOTALL) matches = pattern.findall(tex_content) if len(matches) != 1: raise Exception("%s: Found not one match, but %i: %s" % (ink_filename, len(matches), matches)) formula_in_latex = matches[0].strip() formula_in_latex = remove_matching_braces(formula_in_latex) # repl = [] # for letter in string.letters: # repl.append(('\mbox{%s}' % letter, letter)) # for search, replace in repl: # formula_in_latex = formula_in_latex.replace(search, replace) return formula_in_latex
[ "def", "get_latex", "(", "ink_filename", ")", ":", "tex_file", "=", "os", ".", "path", ".", "splitext", "(", "ink_filename", ")", "[", "0", "]", "+", "\".tex\"", "with", "open", "(", "tex_file", ")", "as", "f", ":", "tex_content", "=", "f", ".", "read", "(", ")", ".", "strip", "(", ")", "pattern", "=", "re", ".", "compile", "(", "r\"\\\\begin\\{displaymath\\}(.*?)\\\\end\\{displaymath\\}\"", ",", "re", ".", "DOTALL", ")", "matches", "=", "pattern", ".", "findall", "(", "tex_content", ")", "if", "len", "(", "matches", ")", "==", "0", ":", "pattern", "=", "re", ".", "compile", "(", "r\"$$(.*?)$$\"", ",", "re", ".", "DOTALL", ")", "matches", "=", "pattern", ".", "findall", "(", "tex_content", ")", "if", "len", "(", "matches", ")", "!=", "1", ":", "raise", "Exception", "(", "\"%s: Found not one match, but %i: %s\"", "%", "(", "ink_filename", ",", "len", "(", "matches", ")", ",", "matches", ")", ")", "formula_in_latex", "=", "matches", "[", "0", "]", ".", "strip", "(", ")", "formula_in_latex", "=", "remove_matching_braces", "(", "formula_in_latex", ")", "# repl = []", "# for letter in string.letters:", "# repl.append(('\\mbox{%s}' % letter, letter))", "# for search, replace in repl:", "# formula_in_latex = formula_in_latex.replace(search, replace)", "return", "formula_in_latex" ]
38.846154
15.576923
def on_raise(self, node): # ('type', 'inst', 'tback') """Raise statement: note difference for python 2 and 3.""" if version_info[0] == 3: excnode = node.exc msgnode = node.cause else: excnode = node.type msgnode = node.inst out = self.run(excnode) msg = ' '.join(out.args) msg2 = self.run(msgnode) if msg2 not in (None, 'None'): msg = "%s: %s" % (msg, msg2) self.raise_exception(None, exc=out.__class__, msg=msg, expr='')
[ "def", "on_raise", "(", "self", ",", "node", ")", ":", "# ('type', 'inst', 'tback')", "if", "version_info", "[", "0", "]", "==", "3", ":", "excnode", "=", "node", ".", "exc", "msgnode", "=", "node", ".", "cause", "else", ":", "excnode", "=", "node", ".", "type", "msgnode", "=", "node", ".", "inst", "out", "=", "self", ".", "run", "(", "excnode", ")", "msg", "=", "' '", ".", "join", "(", "out", ".", "args", ")", "msg2", "=", "self", ".", "run", "(", "msgnode", ")", "if", "msg2", "not", "in", "(", "None", ",", "'None'", ")", ":", "msg", "=", "\"%s: %s\"", "%", "(", "msg", ",", "msg2", ")", "self", ".", "raise_exception", "(", "None", ",", "exc", "=", "out", ".", "__class__", ",", "msg", "=", "msg", ",", "expr", "=", "''", ")" ]
38.214286
10.357143
def outputs(ctx, client, revision, paths): r"""Show output files in the repository. <PATHS> Files to show. If no files are given all output files are shown. """ graph = Graph(client) filter = graph.build(paths=paths, revision=revision) output_paths = graph.output_paths click.echo('\n'.join(graph._format_path(path) for path in output_paths)) if paths: if not output_paths: ctx.exit(1) from renku.models._datastructures import DirectoryTree tree = DirectoryTree.from_list(item.path for item in filter) for output in output_paths: if tree.get(output) is None: ctx.exit(1) return
[ "def", "outputs", "(", "ctx", ",", "client", ",", "revision", ",", "paths", ")", ":", "graph", "=", "Graph", "(", "client", ")", "filter", "=", "graph", ".", "build", "(", "paths", "=", "paths", ",", "revision", "=", "revision", ")", "output_paths", "=", "graph", ".", "output_paths", "click", ".", "echo", "(", "'\\n'", ".", "join", "(", "graph", ".", "_format_path", "(", "path", ")", "for", "path", "in", "output_paths", ")", ")", "if", "paths", ":", "if", "not", "output_paths", ":", "ctx", ".", "exit", "(", "1", ")", "from", "renku", ".", "models", ".", "_datastructures", "import", "DirectoryTree", "tree", "=", "DirectoryTree", ".", "from_list", "(", "item", ".", "path", "for", "item", "in", "filter", ")", "for", "output", "in", "output_paths", ":", "if", "tree", ".", "get", "(", "output", ")", "is", "None", ":", "ctx", ".", "exit", "(", "1", ")", "return" ]
31.090909
20.590909
def _sequential_topk(timestep: int, beam_size: int, inactive: mx.nd.NDArray, scores: mx.nd.NDArray, hypotheses: List[ConstrainedHypothesis], best_ids: mx.nd.NDArray, best_word_ids: mx.nd.NDArray, sequence_scores: mx.nd.NDArray) -> Tuple[np.array, np.array, np.array, List[ConstrainedHypothesis], mx.nd.NDArray]: """ Builds a new topk list such that the beam contains hypotheses having completed different numbers of constraints. These items are built from three different types: (1) the best items across the whole scores matrix, (2) the set of words that must follow existing constraints, and (3) k-best items from each row. :param timestep: The current decoder timestep. :param beam_size: The length of the beam for each segment. :param inactive: Array listing inactive rows (shape: (beam_size,)). :param scores: The scores array (shape: (beam_size, target_vocab_size)). :param hypotheses: The list of hypothesis objects. :param best_ids: The current list of best hypotheses (shape: (beam_size,)). :param best_word_ids: The parallel list of best word IDs (shape: (beam_size,)). :param sequence_scores: (shape: (beam_size, 1)). :return: A tuple containing the best hypothesis rows, the best hypothesis words, the scores, the updated constrained hypotheses, and the updated set of inactive hypotheses. """ num_constraints = hypotheses[0].size() candidates = set() # (1) Add all of the top-k items (which were passed) in as long as they pass the constraints for row, col, seq_score in zip(best_ids, best_word_ids, sequence_scores): row = int(row.asscalar()) col = int(col.asscalar()) if hypotheses[row] is not None and hypotheses[row].is_valid(col): seq_score = float(seq_score.asscalar()) new_item = hypotheses[row].advance(col) cand = ConstrainedCandidate(row, col, seq_score, new_item) candidates.add(cand) # For each hypothesis, we add (2) all the constraints that could follow it and # (3) the best item (constrained or not) in that row best_next = mx.nd.argmin(scores, axis=1) for row in range(beam_size): if inactive[row]: continue hyp = hypotheses[row] # (2) add all the constraints that could extend this nextones = hyp.allowed() # (3) add the single-best item after this (if it's valid) col = int(best_next[row].asscalar()) if hyp.is_valid(col): nextones.add(col) # Now, create new candidates for each of these items for col in nextones: new_item = hyp.advance(col) score = scores[row, col].asscalar() cand = ConstrainedCandidate(row, col, score, new_item) candidates.add(cand) # Sort the candidates. After allocating the beam across the banks, we will pick the top items # for each bank from this list sorted_candidates = sorted(candidates, key=attrgetter('score')) # The number of hypotheses in each bank counts = [0 for _ in range(num_constraints + 1)] for cand in sorted_candidates: counts[cand.hypothesis.num_met()] += 1 # Adjust allocated bank sizes if there are too few candidates in any of them bank_sizes = get_bank_sizes(num_constraints, beam_size, counts) # Sort the candidates into the allocated banks pruned_candidates = [] # type: List[ConstrainedCandidate] for i, cand in enumerate(sorted_candidates): bank = cand.hypothesis.num_met() if bank_sizes[bank] > 0: pruned_candidates.append(cand) bank_sizes[bank] -= 1 num_pruned_candidates = len(pruned_candidates) inactive[:num_pruned_candidates] = 0 # Pad the beam so array assignment still works if num_pruned_candidates < beam_size: inactive[num_pruned_candidates:] = 1 pruned_candidates += [pruned_candidates[num_pruned_candidates - 1]] * (beam_size - num_pruned_candidates) return (np.array([x.row for x in pruned_candidates]), np.array([x.col for x in pruned_candidates]), np.array([[x.score] for x in pruned_candidates]), [x.hypothesis for x in pruned_candidates], inactive)
[ "def", "_sequential_topk", "(", "timestep", ":", "int", ",", "beam_size", ":", "int", ",", "inactive", ":", "mx", ".", "nd", ".", "NDArray", ",", "scores", ":", "mx", ".", "nd", ".", "NDArray", ",", "hypotheses", ":", "List", "[", "ConstrainedHypothesis", "]", ",", "best_ids", ":", "mx", ".", "nd", ".", "NDArray", ",", "best_word_ids", ":", "mx", ".", "nd", ".", "NDArray", ",", "sequence_scores", ":", "mx", ".", "nd", ".", "NDArray", ")", "->", "Tuple", "[", "np", ".", "array", ",", "np", ".", "array", ",", "np", ".", "array", ",", "List", "[", "ConstrainedHypothesis", "]", ",", "mx", ".", "nd", ".", "NDArray", "]", ":", "num_constraints", "=", "hypotheses", "[", "0", "]", ".", "size", "(", ")", "candidates", "=", "set", "(", ")", "# (1) Add all of the top-k items (which were passed) in as long as they pass the constraints", "for", "row", ",", "col", ",", "seq_score", "in", "zip", "(", "best_ids", ",", "best_word_ids", ",", "sequence_scores", ")", ":", "row", "=", "int", "(", "row", ".", "asscalar", "(", ")", ")", "col", "=", "int", "(", "col", ".", "asscalar", "(", ")", ")", "if", "hypotheses", "[", "row", "]", "is", "not", "None", "and", "hypotheses", "[", "row", "]", ".", "is_valid", "(", "col", ")", ":", "seq_score", "=", "float", "(", "seq_score", ".", "asscalar", "(", ")", ")", "new_item", "=", "hypotheses", "[", "row", "]", ".", "advance", "(", "col", ")", "cand", "=", "ConstrainedCandidate", "(", "row", ",", "col", ",", "seq_score", ",", "new_item", ")", "candidates", ".", "add", "(", "cand", ")", "# For each hypothesis, we add (2) all the constraints that could follow it and", "# (3) the best item (constrained or not) in that row", "best_next", "=", "mx", ".", "nd", ".", "argmin", "(", "scores", ",", "axis", "=", "1", ")", "for", "row", "in", "range", "(", "beam_size", ")", ":", "if", "inactive", "[", "row", "]", ":", "continue", "hyp", "=", "hypotheses", "[", "row", "]", "# (2) add all the constraints that could extend this", "nextones", "=", "hyp", ".", "allowed", "(", ")", "# (3) add the single-best item after this (if it's valid)", "col", "=", "int", "(", "best_next", "[", "row", "]", ".", "asscalar", "(", ")", ")", "if", "hyp", ".", "is_valid", "(", "col", ")", ":", "nextones", ".", "add", "(", "col", ")", "# Now, create new candidates for each of these items", "for", "col", "in", "nextones", ":", "new_item", "=", "hyp", ".", "advance", "(", "col", ")", "score", "=", "scores", "[", "row", ",", "col", "]", ".", "asscalar", "(", ")", "cand", "=", "ConstrainedCandidate", "(", "row", ",", "col", ",", "score", ",", "new_item", ")", "candidates", ".", "add", "(", "cand", ")", "# Sort the candidates. After allocating the beam across the banks, we will pick the top items", "# for each bank from this list", "sorted_candidates", "=", "sorted", "(", "candidates", ",", "key", "=", "attrgetter", "(", "'score'", ")", ")", "# The number of hypotheses in each bank", "counts", "=", "[", "0", "for", "_", "in", "range", "(", "num_constraints", "+", "1", ")", "]", "for", "cand", "in", "sorted_candidates", ":", "counts", "[", "cand", ".", "hypothesis", ".", "num_met", "(", ")", "]", "+=", "1", "# Adjust allocated bank sizes if there are too few candidates in any of them", "bank_sizes", "=", "get_bank_sizes", "(", "num_constraints", ",", "beam_size", ",", "counts", ")", "# Sort the candidates into the allocated banks", "pruned_candidates", "=", "[", "]", "# type: List[ConstrainedCandidate]", "for", "i", ",", "cand", "in", "enumerate", "(", "sorted_candidates", ")", ":", "bank", "=", "cand", ".", "hypothesis", ".", "num_met", "(", ")", "if", "bank_sizes", "[", "bank", "]", ">", "0", ":", "pruned_candidates", ".", "append", "(", "cand", ")", "bank_sizes", "[", "bank", "]", "-=", "1", "num_pruned_candidates", "=", "len", "(", "pruned_candidates", ")", "inactive", "[", ":", "num_pruned_candidates", "]", "=", "0", "# Pad the beam so array assignment still works", "if", "num_pruned_candidates", "<", "beam_size", ":", "inactive", "[", "num_pruned_candidates", ":", "]", "=", "1", "pruned_candidates", "+=", "[", "pruned_candidates", "[", "num_pruned_candidates", "-", "1", "]", "]", "*", "(", "beam_size", "-", "num_pruned_candidates", ")", "return", "(", "np", ".", "array", "(", "[", "x", ".", "row", "for", "x", "in", "pruned_candidates", "]", ")", ",", "np", ".", "array", "(", "[", "x", ".", "col", "for", "x", "in", "pruned_candidates", "]", ")", ",", "np", ".", "array", "(", "[", "[", "x", ".", "score", "]", "for", "x", "in", "pruned_candidates", "]", ")", ",", "[", "x", ".", "hypothesis", "for", "x", "in", "pruned_candidates", "]", ",", "inactive", ")" ]
44.581633
23.214286
def extend(self, *args): """ Extend a given object with all the properties in passed-in object(s). """ args = list(args) for i in args: self.obj.update(i) return self._wrap(self.obj)
[ "def", "extend", "(", "self", ",", "*", "args", ")", ":", "args", "=", "list", "(", "args", ")", "for", "i", "in", "args", ":", "self", ".", "obj", ".", "update", "(", "i", ")", "return", "self", ".", "_wrap", "(", "self", ".", "obj", ")" ]
24.2
13.2
def link(target, link_to): """ Create a link to a target file or a folder. For simplicity sake, both target and link_to must be absolute path and must include the filename of the file or folder. Also do not include any trailing slash. e.g. link('/path/to/file', '/path/to/link') But not: link('/path/to/file', 'path/to/') or link('/path/to/folder/', '/path/to/link') Args: target (str): file or folder the link will point to link_to (str): Link to create """ assert isinstance(target, str) assert os.path.exists(target) assert isinstance(link_to, str) # Create the path to the link if it does not exists abs_path = os.path.dirname(os.path.abspath(link_to)) if not os.path.isdir(abs_path): os.makedirs(abs_path) # Make sure the file or folder recursively has the good mode chmod(target) # Create the link to target os.symlink(target, link_to)
[ "def", "link", "(", "target", ",", "link_to", ")", ":", "assert", "isinstance", "(", "target", ",", "str", ")", "assert", "os", ".", "path", ".", "exists", "(", "target", ")", "assert", "isinstance", "(", "link_to", ",", "str", ")", "# Create the path to the link if it does not exists", "abs_path", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "link_to", ")", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "abs_path", ")", ":", "os", ".", "makedirs", "(", "abs_path", ")", "# Make sure the file or folder recursively has the good mode", "chmod", "(", "target", ")", "# Create the link to target", "os", ".", "symlink", "(", "target", ",", "link_to", ")" ]
29.741935
17.870968
def send_event(self, name, *args, **kwargs): """ Send an event to the native handler. This call is queued and batched. Parameters ---------- name : str The event name to be processed by MainActivity.processMessages. *args: args The arguments required by the event. **kwargs: kwargs Options for sending. These are: now: boolean Send the event now """ n = len(self._bridge_queue) # Add to queue self._bridge_queue.append((name, args)) if n == 0: # First event, send at next available time self._bridge_last_scheduled = time() self.deferred_call(self._bridge_send) return elif kwargs.get('now'): self._bridge_send(now=True) return # If it's been over 5 ms since we last scheduled, run now dt = time() - self._bridge_last_scheduled if dt > self._bridge_max_delay: self._bridge_send(now=True)
[ "def", "send_event", "(", "self", ",", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "n", "=", "len", "(", "self", ".", "_bridge_queue", ")", "# Add to queue", "self", ".", "_bridge_queue", ".", "append", "(", "(", "name", ",", "args", ")", ")", "if", "n", "==", "0", ":", "# First event, send at next available time", "self", ".", "_bridge_last_scheduled", "=", "time", "(", ")", "self", ".", "deferred_call", "(", "self", ".", "_bridge_send", ")", "return", "elif", "kwargs", ".", "get", "(", "'now'", ")", ":", "self", ".", "_bridge_send", "(", "now", "=", "True", ")", "return", "# If it's been over 5 ms since we last scheduled, run now", "dt", "=", "time", "(", ")", "-", "self", ".", "_bridge_last_scheduled", "if", "dt", ">", "self", ".", "_bridge_max_delay", ":", "self", ".", "_bridge_send", "(", "now", "=", "True", ")" ]
29.6
17.485714
def isDescendantOf(self, other): '''Returns whether this Key is a descendant of `other`. >>> Key('/Comedy/MontyPython').isDescendantOf(Key('/Comedy')) True ''' if isinstance(other, Key): return other.isAncestorOf(self) raise TypeError('%s is not of type %s' % (other, Key))
[ "def", "isDescendantOf", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "Key", ")", ":", "return", "other", ".", "isAncestorOf", "(", "self", ")", "raise", "TypeError", "(", "'%s is not of type %s'", "%", "(", "other", ",", "Key", ")", ")" ]
30.4
22.8
def create_transformation(self, rotation=None, translation=None): """ Creates a transformation matrix woth rotations and translation. Args: rotation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3` translation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3` Returns: A 4x4 matrix as a :py:class:`numpy.array` """ mat = None if rotation is not None: mat = Matrix44.from_eulers(Vector3(rotation)) if translation is not None: trans = matrix44.create_from_translation(Vector3(translation)) if mat is None: mat = trans else: mat = matrix44.multiply(mat, trans) return mat
[ "def", "create_transformation", "(", "self", ",", "rotation", "=", "None", ",", "translation", "=", "None", ")", ":", "mat", "=", "None", "if", "rotation", "is", "not", "None", ":", "mat", "=", "Matrix44", ".", "from_eulers", "(", "Vector3", "(", "rotation", ")", ")", "if", "translation", "is", "not", "None", ":", "trans", "=", "matrix44", ".", "create_from_translation", "(", "Vector3", "(", "translation", ")", ")", "if", "mat", "is", "None", ":", "mat", "=", "trans", "else", ":", "mat", "=", "matrix44", ".", "multiply", "(", "mat", ",", "trans", ")", "return", "mat" ]
33.521739
23.608696
def history_forward(self, count=1): """ Move forwards through the history. :param count: Amount of items to move forward. """ self._set_history_search() # Go forward in history. found_something = False for i in range(self.working_index + 1, len(self._working_lines)): if self._history_matches(i): self.working_index = i count -= 1 found_something = True if count == 0: break # If we found an entry, move cursor to the end of the first line. if found_something: self.cursor_position = 0 self.cursor_position += self.document.get_end_of_line_position()
[ "def", "history_forward", "(", "self", ",", "count", "=", "1", ")", ":", "self", ".", "_set_history_search", "(", ")", "# Go forward in history.", "found_something", "=", "False", "for", "i", "in", "range", "(", "self", ".", "working_index", "+", "1", ",", "len", "(", "self", ".", "_working_lines", ")", ")", ":", "if", "self", ".", "_history_matches", "(", "i", ")", ":", "self", ".", "working_index", "=", "i", "count", "-=", "1", "found_something", "=", "True", "if", "count", "==", "0", ":", "break", "# If we found an entry, move cursor to the end of the first line.", "if", "found_something", ":", "self", ".", "cursor_position", "=", "0", "self", ".", "cursor_position", "+=", "self", ".", "document", ".", "get_end_of_line_position", "(", ")" ]
31.478261
16.26087
def list_jobs(config, *, status=JobStatus.Active, filter_by_type=None, filter_by_worker=None): """ Return a list of Celery jobs. Args: config (Config): Reference to the configuration object from which the settings are retrieved. status (JobStatus): The status of the jobs that should be returned. filter_by_type (list): Restrict the returned jobs to the types in this list. filter_by_worker (list): Only return jobs that were registered, reserved or are running on the workers given in this list of worker names. Using this option will increase the performance. Returns: list: A list of JobStats. """ celery_app = create_app(config) # option to filter by the worker (improves performance) if filter_by_worker is not None: inspect = celery_app.control.inspect( destination=filter_by_worker if isinstance(filter_by_worker, list) else [filter_by_worker]) else: inspect = celery_app.control.inspect() # get active, registered or reserved jobs if status == JobStatus.Active: job_map = inspect.active() elif status == JobStatus.Registered: job_map = inspect.registered() elif status == JobStatus.Reserved: job_map = inspect.reserved() elif status == JobStatus.Scheduled: job_map = inspect.scheduled() else: job_map = None if job_map is None: return [] result = [] for worker_name, jobs in job_map.items(): for job in jobs: try: job_stats = JobStats.from_celery(worker_name, job, celery_app) if (filter_by_type is None) or (job_stats.type == filter_by_type): result.append(job_stats) except JobStatInvalid: pass return result
[ "def", "list_jobs", "(", "config", ",", "*", ",", "status", "=", "JobStatus", ".", "Active", ",", "filter_by_type", "=", "None", ",", "filter_by_worker", "=", "None", ")", ":", "celery_app", "=", "create_app", "(", "config", ")", "# option to filter by the worker (improves performance)", "if", "filter_by_worker", "is", "not", "None", ":", "inspect", "=", "celery_app", ".", "control", ".", "inspect", "(", "destination", "=", "filter_by_worker", "if", "isinstance", "(", "filter_by_worker", ",", "list", ")", "else", "[", "filter_by_worker", "]", ")", "else", ":", "inspect", "=", "celery_app", ".", "control", ".", "inspect", "(", ")", "# get active, registered or reserved jobs", "if", "status", "==", "JobStatus", ".", "Active", ":", "job_map", "=", "inspect", ".", "active", "(", ")", "elif", "status", "==", "JobStatus", ".", "Registered", ":", "job_map", "=", "inspect", ".", "registered", "(", ")", "elif", "status", "==", "JobStatus", ".", "Reserved", ":", "job_map", "=", "inspect", ".", "reserved", "(", ")", "elif", "status", "==", "JobStatus", ".", "Scheduled", ":", "job_map", "=", "inspect", ".", "scheduled", "(", ")", "else", ":", "job_map", "=", "None", "if", "job_map", "is", "None", ":", "return", "[", "]", "result", "=", "[", "]", "for", "worker_name", ",", "jobs", "in", "job_map", ".", "items", "(", ")", ":", "for", "job", "in", "jobs", ":", "try", ":", "job_stats", "=", "JobStats", ".", "from_celery", "(", "worker_name", ",", "job", ",", "celery_app", ")", "if", "(", "filter_by_type", "is", "None", ")", "or", "(", "job_stats", ".", "type", "==", "filter_by_type", ")", ":", "result", ".", "append", "(", "job_stats", ")", "except", "JobStatInvalid", ":", "pass", "return", "result" ]
34.415094
20.075472
def register(self, name, path, description, final_words=None): """ Registers a new recipe in the context of the current plugin. :param name: Name of the recipe :param path: Absolute path of the recipe folder :param description: A meaningful description of the recipe :param final_words: A string, which gets printed after the recipe was build. """ return self.__app.recipes.register(name, path, self._plugin, description, final_words)
[ "def", "register", "(", "self", ",", "name", ",", "path", ",", "description", ",", "final_words", "=", "None", ")", ":", "return", "self", ".", "__app", ".", "recipes", ".", "register", "(", "name", ",", "path", ",", "self", ".", "_plugin", ",", "description", ",", "final_words", ")" ]
49
23
def handleNotification(self, req): """handles a notification request by calling the appropriete method the service exposes""" name = req["method"] params = req["params"] try: #to get a callable obj obj = getMethodByName(self.service, name) rslt = obj(*params) except: pass
[ "def", "handleNotification", "(", "self", ",", "req", ")", ":", "name", "=", "req", "[", "\"method\"", "]", "params", "=", "req", "[", "\"params\"", "]", "try", ":", "#to get a callable obj ", "obj", "=", "getMethodByName", "(", "self", ".", "service", ",", "name", ")", "rslt", "=", "obj", "(", "*", "params", ")", "except", ":", "pass" ]
37.888889
11.444444
def analyze(self, text, index=None, analyzer=None, tokenizer=None, filters=None, field=None): """ Performs the analysis process on a text and return the tokens breakdown of the text (See :ref:`es-guide-reference-api-admin-indices-optimize`) """ if filters is None: filters = [] argsets = 0 args = {} if analyzer: args['analyzer'] = analyzer argsets += 1 if tokenizer or filters: if tokenizer: args['tokenizer'] = tokenizer if filters: args['filters'] = ','.join(filters) argsets += 1 if field: args['field'] = field argsets += 1 if argsets > 1: raise ValueError('Argument conflict: Specify either analyzer, tokenizer/filters or field') if field and index is None: raise ValueError('field can only be specified with an index') path = make_path(index, '_analyze') return self.conn._send_request('POST', path, text, args)
[ "def", "analyze", "(", "self", ",", "text", ",", "index", "=", "None", ",", "analyzer", "=", "None", ",", "tokenizer", "=", "None", ",", "filters", "=", "None", ",", "field", "=", "None", ")", ":", "if", "filters", "is", "None", ":", "filters", "=", "[", "]", "argsets", "=", "0", "args", "=", "{", "}", "if", "analyzer", ":", "args", "[", "'analyzer'", "]", "=", "analyzer", "argsets", "+=", "1", "if", "tokenizer", "or", "filters", ":", "if", "tokenizer", ":", "args", "[", "'tokenizer'", "]", "=", "tokenizer", "if", "filters", ":", "args", "[", "'filters'", "]", "=", "','", ".", "join", "(", "filters", ")", "argsets", "+=", "1", "if", "field", ":", "args", "[", "'field'", "]", "=", "field", "argsets", "+=", "1", "if", "argsets", ">", "1", ":", "raise", "ValueError", "(", "'Argument conflict: Specify either analyzer, tokenizer/filters or field'", ")", "if", "field", "and", "index", "is", "None", ":", "raise", "ValueError", "(", "'field can only be specified with an index'", ")", "path", "=", "make_path", "(", "index", ",", "'_analyze'", ")", "return", "self", ".", "conn", ".", "_send_request", "(", "'POST'", ",", "path", ",", "text", ",", "args", ")" ]
32
22.484848
def _get_window(self, other=None): """ Get the window length over which to perform some operation. Parameters ---------- other : object, default None The other object that is involved in the operation. Such an object is involved for operations like covariance. Returns ------- window : int The window length. """ axis = self.obj._get_axis(self.axis) length = len(axis) + (other is not None) * len(axis) other = self.min_periods or -1 return max(length, other)
[ "def", "_get_window", "(", "self", ",", "other", "=", "None", ")", ":", "axis", "=", "self", ".", "obj", ".", "_get_axis", "(", "self", ".", "axis", ")", "length", "=", "len", "(", "axis", ")", "+", "(", "other", "is", "not", "None", ")", "*", "len", "(", "axis", ")", "other", "=", "self", ".", "min_periods", "or", "-", "1", "return", "max", "(", "length", ",", "other", ")" ]
29.15
18.35
def _actionsFreqsAngles(self,*args,**kwargs): """ NAME: actionsFreqsAngles (_actionsFreqsAngles) PURPOSE: evaluate the actions, frequencies, and angles (jr,lz,jz,Omegar,Omegaphi,Omegaz,angler,anglephi,anglez) INPUT: Either: a) R,vR,vT,z,vz[,phi]: 1) floats: phase-space value for single object (phi is optional) (each can be a Quantity) 2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity) b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument maxn= (default: object-wide default) Use a grid in vec(n) up to this n (zero-based) ts= if set, the phase-space points correspond to these times (IF NOT SET, WE ASSUME THAT ts IS THAT THAT IS ASSOCIATED WITH THIS OBJECT) _firstFlip= (False) if True and Orbits are given, the backward part of the orbit is integrated first and stored in the Orbit object OUTPUT: (jr,lz,jz,Omegar,Omegaphi,Omegaz,angler,anglephi,anglez) HISTORY: 2013-09-10 - Written - Bovy (IAS) """ from galpy.orbit import Orbit _firstFlip= kwargs.get('_firstFlip',False) #If the orbit was already integrated, set ts to the integration times if isinstance(args[0],Orbit) and hasattr(args[0]._orb,'orbit') \ and not 'ts' in kwargs: kwargs['ts']= args[0]._orb.t elif (isinstance(args[0],list) and isinstance(args[0][0],Orbit)) \ and hasattr(args[0][0]._orb,'orbit') \ and not 'ts' in kwargs: kwargs['ts']= args[0][0]._orb.t R,vR,vT,z,vz,phi= self._parse_args(True,_firstFlip,*args) if 'ts' in kwargs and not kwargs['ts'] is None: ts= kwargs['ts'] if _APY_LOADED and isinstance(ts,units.Quantity): ts= ts.to(units.Gyr).value\ /time_in_Gyr(self._vo,self._ro) else: ts= nu.empty(R.shape[1]) ts[self._ntintJ-1:]= self._tsJ ts[:self._ntintJ-1]= -self._tsJ[1:][::-1] maxn= kwargs.get('maxn',self._maxn) if self._c: #pragma: no cover pass else: #Use self._aAI to calculate the actions and angles in the isochrone potential if '_acfs' in kwargs: acfs= kwargs['_acfs'] else: acfs= self._aAI._actionsFreqsAngles(R.flatten(), vR.flatten(), vT.flatten(), z.flatten(), vz.flatten(), phi.flatten()) jrI= nu.reshape(acfs[0],R.shape)[:,:-1] jzI= nu.reshape(acfs[2],R.shape)[:,:-1] anglerI= nu.reshape(acfs[6],R.shape) anglezI= nu.reshape(acfs[8],R.shape) if nu.any((nu.fabs(nu.amax(anglerI,axis=1)-_TWOPI) > _ANGLETOL)\ *(nu.fabs(nu.amin(anglerI,axis=1)) > _ANGLETOL)): #pragma: no cover warnings.warn("Full radial angle range not covered for at least one object; actions are likely not reliable",galpyWarning) if nu.any((nu.fabs(nu.amax(anglezI,axis=1)-_TWOPI) > _ANGLETOL)\ *(nu.fabs(nu.amin(anglezI,axis=1)) > _ANGLETOL)): #pragma: no cover warnings.warn("Full vertical angle range not covered for at least one object; actions are likely not reliable",galpyWarning) danglerI= ((nu.roll(anglerI,-1,axis=1)-anglerI) % _TWOPI)[:,:-1] danglezI= ((nu.roll(anglezI,-1,axis=1)-anglezI) % _TWOPI)[:,:-1] jr= nu.sum(jrI*danglerI,axis=1)/nu.sum(danglerI,axis=1) jz= nu.sum(jzI*danglezI,axis=1)/nu.sum(danglezI,axis=1) if _isNonAxi(self._pot): #pragma: no cover lzI= nu.reshape(acfs[1],R.shape)[:,:-1] anglephiI= nu.reshape(acfs[7],R.shape) if nu.any((nu.fabs(nu.amax(anglephiI,axis=1)-_TWOPI) > _ANGLETOL)\ *(nu.fabs(nu.amin(anglephiI,axis=1)) > _ANGLETOL)): #pragma: no cover warnings.warn("Full azimuthal angle range not covered for at least one object; actions are likely not reliable",galpyWarning) danglephiI= ((nu.roll(anglephiI,-1,axis=1)-anglephiI) % _TWOPI)[:,:-1] lz= nu.sum(lzI*danglephiI,axis=1)/nu.sum(danglephiI,axis=1) else: lz= R[:,len(ts)//2]*vT[:,len(ts)//2] #Now do an 'angle-fit' angleRT= dePeriod(nu.reshape(acfs[6],R.shape)) acfs7= nu.reshape(acfs[7],R.shape) negFreqIndx= nu.median(acfs7-nu.roll(acfs7,1,axis=1),axis=1) < 0. #anglephi is decreasing anglephiT= nu.empty(acfs7.shape) anglephiT[negFreqIndx,:]= dePeriod(_TWOPI-acfs7[negFreqIndx,:]) negFreqPhi= nu.zeros(R.shape[0],dtype='bool') negFreqPhi[negFreqIndx]= True anglephiT[True^negFreqIndx,:]= dePeriod(acfs7[True^negFreqIndx,:]) angleZT= dePeriod(nu.reshape(acfs[8],R.shape)) #Write the angle-fit as Y=AX, build A and Y nt= len(ts) no= R.shape[0] #remove 0,0,0 and half-plane if _isNonAxi(self._pot): nn= (2*maxn-1)**2*maxn-(maxn-1)*(2*maxn-1)-maxn else: nn= maxn*(2*maxn-1)-maxn A= nu.zeros((no,nt,2+nn)) A[:,:,0]= 1. A[:,:,1]= ts #sorting the phi and Z grids this way makes it easy to exclude the origin phig= list(nu.arange(-maxn+1,maxn,1)) phig.sort(key = lambda x: abs(x)) phig= nu.array(phig,dtype='int') if _isNonAxi(self._pot): grid= nu.meshgrid(nu.arange(maxn),phig,phig) else: grid= nu.meshgrid(nu.arange(maxn),phig) gridR= grid[0].T.flatten()[1:] #remove 0,0,0 gridZ= grid[1].T.flatten()[1:] mask = nu.ones(len(gridR),dtype=bool) # excludes axis that is not in half-space if _isNonAxi(self._pot): gridphi= grid[2].T.flatten()[1:] mask= True\ ^(gridR == 0)*((gridphi < 0)+((gridphi==0)*(gridZ < 0))) else: mask[:2*maxn-3:2]= False gridR= gridR[mask] gridZ= gridZ[mask] tangleR= nu.tile(angleRT.T,(nn,1,1)).T tgridR= nu.tile(gridR,(no,nt,1)) tangleZ= nu.tile(angleZT.T,(nn,1,1)).T tgridZ= nu.tile(gridZ,(no,nt,1)) if _isNonAxi(self._pot): gridphi= gridphi[mask] tgridphi= nu.tile(gridphi,(no,nt,1)) tanglephi= nu.tile(anglephiT.T,(nn,1,1)).T sinnR= nu.sin(tgridR*tangleR+tgridphi*tanglephi+tgridZ*tangleZ) else: sinnR= nu.sin(tgridR*tangleR+tgridZ*tangleZ) A[:,:,2:]= sinnR #Matrix magic atainv= nu.empty((no,2+nn,2+nn)) AT= nu.transpose(A,axes=(0,2,1)) for ii in range(no): atainv[ii,:,:,]= linalg.inv(nu.dot(AT[ii,:,:],A[ii,:,:])) ATAR= nu.sum(AT*nu.transpose(nu.tile(angleRT,(2+nn,1,1)),axes=(1,0,2)),axis=2) ATAT= nu.sum(AT*nu.transpose(nu.tile(anglephiT,(2+nn,1,1)),axes=(1,0,2)),axis=2) ATAZ= nu.sum(AT*nu.transpose(nu.tile(angleZT,(2+nn,1,1)),axes=(1,0,2)),axis=2) angleR= nu.sum(atainv[:,0,:]*ATAR,axis=1) OmegaR= nu.sum(atainv[:,1,:]*ATAR,axis=1) anglephi= nu.sum(atainv[:,0,:]*ATAT,axis=1) Omegaphi= nu.sum(atainv[:,1,:]*ATAT,axis=1) angleZ= nu.sum(atainv[:,0,:]*ATAZ,axis=1) OmegaZ= nu.sum(atainv[:,1,:]*ATAZ,axis=1) Omegaphi[negFreqIndx]= -Omegaphi[negFreqIndx] anglephi[negFreqIndx]= _TWOPI-anglephi[negFreqIndx] if kwargs.get('_retacfs',False): return (jr,lz,jz,OmegaR,Omegaphi,OmegaZ, #pragma: no cover angleR % _TWOPI, anglephi % _TWOPI, angleZ % _TWOPI,acfs) else: return (jr,lz,jz,OmegaR,Omegaphi,OmegaZ, angleR % _TWOPI, anglephi % _TWOPI, angleZ % _TWOPI)
[ "def", "_actionsFreqsAngles", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "galpy", ".", "orbit", "import", "Orbit", "_firstFlip", "=", "kwargs", ".", "get", "(", "'_firstFlip'", ",", "False", ")", "#If the orbit was already integrated, set ts to the integration times", "if", "isinstance", "(", "args", "[", "0", "]", ",", "Orbit", ")", "and", "hasattr", "(", "args", "[", "0", "]", ".", "_orb", ",", "'orbit'", ")", "and", "not", "'ts'", "in", "kwargs", ":", "kwargs", "[", "'ts'", "]", "=", "args", "[", "0", "]", ".", "_orb", ".", "t", "elif", "(", "isinstance", "(", "args", "[", "0", "]", ",", "list", ")", "and", "isinstance", "(", "args", "[", "0", "]", "[", "0", "]", ",", "Orbit", ")", ")", "and", "hasattr", "(", "args", "[", "0", "]", "[", "0", "]", ".", "_orb", ",", "'orbit'", ")", "and", "not", "'ts'", "in", "kwargs", ":", "kwargs", "[", "'ts'", "]", "=", "args", "[", "0", "]", "[", "0", "]", ".", "_orb", ".", "t", "R", ",", "vR", ",", "vT", ",", "z", ",", "vz", ",", "phi", "=", "self", ".", "_parse_args", "(", "True", ",", "_firstFlip", ",", "*", "args", ")", "if", "'ts'", "in", "kwargs", "and", "not", "kwargs", "[", "'ts'", "]", "is", "None", ":", "ts", "=", "kwargs", "[", "'ts'", "]", "if", "_APY_LOADED", "and", "isinstance", "(", "ts", ",", "units", ".", "Quantity", ")", ":", "ts", "=", "ts", ".", "to", "(", "units", ".", "Gyr", ")", ".", "value", "/", "time_in_Gyr", "(", "self", ".", "_vo", ",", "self", ".", "_ro", ")", "else", ":", "ts", "=", "nu", ".", "empty", "(", "R", ".", "shape", "[", "1", "]", ")", "ts", "[", "self", ".", "_ntintJ", "-", "1", ":", "]", "=", "self", ".", "_tsJ", "ts", "[", ":", "self", ".", "_ntintJ", "-", "1", "]", "=", "-", "self", ".", "_tsJ", "[", "1", ":", "]", "[", ":", ":", "-", "1", "]", "maxn", "=", "kwargs", ".", "get", "(", "'maxn'", ",", "self", ".", "_maxn", ")", "if", "self", ".", "_c", ":", "#pragma: no cover", "pass", "else", ":", "#Use self._aAI to calculate the actions and angles in the isochrone potential", "if", "'_acfs'", "in", "kwargs", ":", "acfs", "=", "kwargs", "[", "'_acfs'", "]", "else", ":", "acfs", "=", "self", ".", "_aAI", ".", "_actionsFreqsAngles", "(", "R", ".", "flatten", "(", ")", ",", "vR", ".", "flatten", "(", ")", ",", "vT", ".", "flatten", "(", ")", ",", "z", ".", "flatten", "(", ")", ",", "vz", ".", "flatten", "(", ")", ",", "phi", ".", "flatten", "(", ")", ")", "jrI", "=", "nu", ".", "reshape", "(", "acfs", "[", "0", "]", ",", "R", ".", "shape", ")", "[", ":", ",", ":", "-", "1", "]", "jzI", "=", "nu", ".", "reshape", "(", "acfs", "[", "2", "]", ",", "R", ".", "shape", ")", "[", ":", ",", ":", "-", "1", "]", "anglerI", "=", "nu", ".", "reshape", "(", "acfs", "[", "6", "]", ",", "R", ".", "shape", ")", "anglezI", "=", "nu", ".", "reshape", "(", "acfs", "[", "8", "]", ",", "R", ".", "shape", ")", "if", "nu", ".", "any", "(", "(", "nu", ".", "fabs", "(", "nu", ".", "amax", "(", "anglerI", ",", "axis", "=", "1", ")", "-", "_TWOPI", ")", ">", "_ANGLETOL", ")", "*", "(", "nu", ".", "fabs", "(", "nu", ".", "amin", "(", "anglerI", ",", "axis", "=", "1", ")", ")", ">", "_ANGLETOL", ")", ")", ":", "#pragma: no cover", "warnings", ".", "warn", "(", "\"Full radial angle range not covered for at least one object; actions are likely not reliable\"", ",", "galpyWarning", ")", "if", "nu", ".", "any", "(", "(", "nu", ".", "fabs", "(", "nu", ".", "amax", "(", "anglezI", ",", "axis", "=", "1", ")", "-", "_TWOPI", ")", ">", "_ANGLETOL", ")", "*", "(", "nu", ".", "fabs", "(", "nu", ".", "amin", "(", "anglezI", ",", "axis", "=", "1", ")", ")", ">", "_ANGLETOL", ")", ")", ":", "#pragma: no cover", "warnings", ".", "warn", "(", "\"Full vertical angle range not covered for at least one object; actions are likely not reliable\"", ",", "galpyWarning", ")", "danglerI", "=", "(", "(", "nu", ".", "roll", "(", "anglerI", ",", "-", "1", ",", "axis", "=", "1", ")", "-", "anglerI", ")", "%", "_TWOPI", ")", "[", ":", ",", ":", "-", "1", "]", "danglezI", "=", "(", "(", "nu", ".", "roll", "(", "anglezI", ",", "-", "1", ",", "axis", "=", "1", ")", "-", "anglezI", ")", "%", "_TWOPI", ")", "[", ":", ",", ":", "-", "1", "]", "jr", "=", "nu", ".", "sum", "(", "jrI", "*", "danglerI", ",", "axis", "=", "1", ")", "/", "nu", ".", "sum", "(", "danglerI", ",", "axis", "=", "1", ")", "jz", "=", "nu", ".", "sum", "(", "jzI", "*", "danglezI", ",", "axis", "=", "1", ")", "/", "nu", ".", "sum", "(", "danglezI", ",", "axis", "=", "1", ")", "if", "_isNonAxi", "(", "self", ".", "_pot", ")", ":", "#pragma: no cover", "lzI", "=", "nu", ".", "reshape", "(", "acfs", "[", "1", "]", ",", "R", ".", "shape", ")", "[", ":", ",", ":", "-", "1", "]", "anglephiI", "=", "nu", ".", "reshape", "(", "acfs", "[", "7", "]", ",", "R", ".", "shape", ")", "if", "nu", ".", "any", "(", "(", "nu", ".", "fabs", "(", "nu", ".", "amax", "(", "anglephiI", ",", "axis", "=", "1", ")", "-", "_TWOPI", ")", ">", "_ANGLETOL", ")", "*", "(", "nu", ".", "fabs", "(", "nu", ".", "amin", "(", "anglephiI", ",", "axis", "=", "1", ")", ")", ">", "_ANGLETOL", ")", ")", ":", "#pragma: no cover", "warnings", ".", "warn", "(", "\"Full azimuthal angle range not covered for at least one object; actions are likely not reliable\"", ",", "galpyWarning", ")", "danglephiI", "=", "(", "(", "nu", ".", "roll", "(", "anglephiI", ",", "-", "1", ",", "axis", "=", "1", ")", "-", "anglephiI", ")", "%", "_TWOPI", ")", "[", ":", ",", ":", "-", "1", "]", "lz", "=", "nu", ".", "sum", "(", "lzI", "*", "danglephiI", ",", "axis", "=", "1", ")", "/", "nu", ".", "sum", "(", "danglephiI", ",", "axis", "=", "1", ")", "else", ":", "lz", "=", "R", "[", ":", ",", "len", "(", "ts", ")", "//", "2", "]", "*", "vT", "[", ":", ",", "len", "(", "ts", ")", "//", "2", "]", "#Now do an 'angle-fit'", "angleRT", "=", "dePeriod", "(", "nu", ".", "reshape", "(", "acfs", "[", "6", "]", ",", "R", ".", "shape", ")", ")", "acfs7", "=", "nu", ".", "reshape", "(", "acfs", "[", "7", "]", ",", "R", ".", "shape", ")", "negFreqIndx", "=", "nu", ".", "median", "(", "acfs7", "-", "nu", ".", "roll", "(", "acfs7", ",", "1", ",", "axis", "=", "1", ")", ",", "axis", "=", "1", ")", "<", "0.", "#anglephi is decreasing", "anglephiT", "=", "nu", ".", "empty", "(", "acfs7", ".", "shape", ")", "anglephiT", "[", "negFreqIndx", ",", ":", "]", "=", "dePeriod", "(", "_TWOPI", "-", "acfs7", "[", "negFreqIndx", ",", ":", "]", ")", "negFreqPhi", "=", "nu", ".", "zeros", "(", "R", ".", "shape", "[", "0", "]", ",", "dtype", "=", "'bool'", ")", "negFreqPhi", "[", "negFreqIndx", "]", "=", "True", "anglephiT", "[", "True", "^", "negFreqIndx", ",", ":", "]", "=", "dePeriod", "(", "acfs7", "[", "True", "^", "negFreqIndx", ",", ":", "]", ")", "angleZT", "=", "dePeriod", "(", "nu", ".", "reshape", "(", "acfs", "[", "8", "]", ",", "R", ".", "shape", ")", ")", "#Write the angle-fit as Y=AX, build A and Y", "nt", "=", "len", "(", "ts", ")", "no", "=", "R", ".", "shape", "[", "0", "]", "#remove 0,0,0 and half-plane", "if", "_isNonAxi", "(", "self", ".", "_pot", ")", ":", "nn", "=", "(", "2", "*", "maxn", "-", "1", ")", "**", "2", "*", "maxn", "-", "(", "maxn", "-", "1", ")", "*", "(", "2", "*", "maxn", "-", "1", ")", "-", "maxn", "else", ":", "nn", "=", "maxn", "*", "(", "2", "*", "maxn", "-", "1", ")", "-", "maxn", "A", "=", "nu", ".", "zeros", "(", "(", "no", ",", "nt", ",", "2", "+", "nn", ")", ")", "A", "[", ":", ",", ":", ",", "0", "]", "=", "1.", "A", "[", ":", ",", ":", ",", "1", "]", "=", "ts", "#sorting the phi and Z grids this way makes it easy to exclude the origin", "phig", "=", "list", "(", "nu", ".", "arange", "(", "-", "maxn", "+", "1", ",", "maxn", ",", "1", ")", ")", "phig", ".", "sort", "(", "key", "=", "lambda", "x", ":", "abs", "(", "x", ")", ")", "phig", "=", "nu", ".", "array", "(", "phig", ",", "dtype", "=", "'int'", ")", "if", "_isNonAxi", "(", "self", ".", "_pot", ")", ":", "grid", "=", "nu", ".", "meshgrid", "(", "nu", ".", "arange", "(", "maxn", ")", ",", "phig", ",", "phig", ")", "else", ":", "grid", "=", "nu", ".", "meshgrid", "(", "nu", ".", "arange", "(", "maxn", ")", ",", "phig", ")", "gridR", "=", "grid", "[", "0", "]", ".", "T", ".", "flatten", "(", ")", "[", "1", ":", "]", "#remove 0,0,0", "gridZ", "=", "grid", "[", "1", "]", ".", "T", ".", "flatten", "(", ")", "[", "1", ":", "]", "mask", "=", "nu", ".", "ones", "(", "len", "(", "gridR", ")", ",", "dtype", "=", "bool", ")", "# excludes axis that is not in half-space", "if", "_isNonAxi", "(", "self", ".", "_pot", ")", ":", "gridphi", "=", "grid", "[", "2", "]", ".", "T", ".", "flatten", "(", ")", "[", "1", ":", "]", "mask", "=", "True", "^", "(", "gridR", "==", "0", ")", "*", "(", "(", "gridphi", "<", "0", ")", "+", "(", "(", "gridphi", "==", "0", ")", "*", "(", "gridZ", "<", "0", ")", ")", ")", "else", ":", "mask", "[", ":", "2", "*", "maxn", "-", "3", ":", "2", "]", "=", "False", "gridR", "=", "gridR", "[", "mask", "]", "gridZ", "=", "gridZ", "[", "mask", "]", "tangleR", "=", "nu", ".", "tile", "(", "angleRT", ".", "T", ",", "(", "nn", ",", "1", ",", "1", ")", ")", ".", "T", "tgridR", "=", "nu", ".", "tile", "(", "gridR", ",", "(", "no", ",", "nt", ",", "1", ")", ")", "tangleZ", "=", "nu", ".", "tile", "(", "angleZT", ".", "T", ",", "(", "nn", ",", "1", ",", "1", ")", ")", ".", "T", "tgridZ", "=", "nu", ".", "tile", "(", "gridZ", ",", "(", "no", ",", "nt", ",", "1", ")", ")", "if", "_isNonAxi", "(", "self", ".", "_pot", ")", ":", "gridphi", "=", "gridphi", "[", "mask", "]", "tgridphi", "=", "nu", ".", "tile", "(", "gridphi", ",", "(", "no", ",", "nt", ",", "1", ")", ")", "tanglephi", "=", "nu", ".", "tile", "(", "anglephiT", ".", "T", ",", "(", "nn", ",", "1", ",", "1", ")", ")", ".", "T", "sinnR", "=", "nu", ".", "sin", "(", "tgridR", "*", "tangleR", "+", "tgridphi", "*", "tanglephi", "+", "tgridZ", "*", "tangleZ", ")", "else", ":", "sinnR", "=", "nu", ".", "sin", "(", "tgridR", "*", "tangleR", "+", "tgridZ", "*", "tangleZ", ")", "A", "[", ":", ",", ":", ",", "2", ":", "]", "=", "sinnR", "#Matrix magic", "atainv", "=", "nu", ".", "empty", "(", "(", "no", ",", "2", "+", "nn", ",", "2", "+", "nn", ")", ")", "AT", "=", "nu", ".", "transpose", "(", "A", ",", "axes", "=", "(", "0", ",", "2", ",", "1", ")", ")", "for", "ii", "in", "range", "(", "no", ")", ":", "atainv", "[", "ii", ",", ":", ",", ":", ",", "]", "=", "linalg", ".", "inv", "(", "nu", ".", "dot", "(", "AT", "[", "ii", ",", ":", ",", ":", "]", ",", "A", "[", "ii", ",", ":", ",", ":", "]", ")", ")", "ATAR", "=", "nu", ".", "sum", "(", "AT", "*", "nu", ".", "transpose", "(", "nu", ".", "tile", "(", "angleRT", ",", "(", "2", "+", "nn", ",", "1", ",", "1", ")", ")", ",", "axes", "=", "(", "1", ",", "0", ",", "2", ")", ")", ",", "axis", "=", "2", ")", "ATAT", "=", "nu", ".", "sum", "(", "AT", "*", "nu", ".", "transpose", "(", "nu", ".", "tile", "(", "anglephiT", ",", "(", "2", "+", "nn", ",", "1", ",", "1", ")", ")", ",", "axes", "=", "(", "1", ",", "0", ",", "2", ")", ")", ",", "axis", "=", "2", ")", "ATAZ", "=", "nu", ".", "sum", "(", "AT", "*", "nu", ".", "transpose", "(", "nu", ".", "tile", "(", "angleZT", ",", "(", "2", "+", "nn", ",", "1", ",", "1", ")", ")", ",", "axes", "=", "(", "1", ",", "0", ",", "2", ")", ")", ",", "axis", "=", "2", ")", "angleR", "=", "nu", ".", "sum", "(", "atainv", "[", ":", ",", "0", ",", ":", "]", "*", "ATAR", ",", "axis", "=", "1", ")", "OmegaR", "=", "nu", ".", "sum", "(", "atainv", "[", ":", ",", "1", ",", ":", "]", "*", "ATAR", ",", "axis", "=", "1", ")", "anglephi", "=", "nu", ".", "sum", "(", "atainv", "[", ":", ",", "0", ",", ":", "]", "*", "ATAT", ",", "axis", "=", "1", ")", "Omegaphi", "=", "nu", ".", "sum", "(", "atainv", "[", ":", ",", "1", ",", ":", "]", "*", "ATAT", ",", "axis", "=", "1", ")", "angleZ", "=", "nu", ".", "sum", "(", "atainv", "[", ":", ",", "0", ",", ":", "]", "*", "ATAZ", ",", "axis", "=", "1", ")", "OmegaZ", "=", "nu", ".", "sum", "(", "atainv", "[", ":", ",", "1", ",", ":", "]", "*", "ATAZ", ",", "axis", "=", "1", ")", "Omegaphi", "[", "negFreqIndx", "]", "=", "-", "Omegaphi", "[", "negFreqIndx", "]", "anglephi", "[", "negFreqIndx", "]", "=", "_TWOPI", "-", "anglephi", "[", "negFreqIndx", "]", "if", "kwargs", ".", "get", "(", "'_retacfs'", ",", "False", ")", ":", "return", "(", "jr", ",", "lz", ",", "jz", ",", "OmegaR", ",", "Omegaphi", ",", "OmegaZ", ",", "#pragma: no cover", "angleR", "%", "_TWOPI", ",", "anglephi", "%", "_TWOPI", ",", "angleZ", "%", "_TWOPI", ",", "acfs", ")", "else", ":", "return", "(", "jr", ",", "lz", ",", "jz", ",", "OmegaR", ",", "Omegaphi", ",", "OmegaZ", ",", "angleR", "%", "_TWOPI", ",", "anglephi", "%", "_TWOPI", ",", "angleZ", "%", "_TWOPI", ")" ]
54.115385
21.833333
def _to_dict(self): """Return keyrange's state as a dict. :rtype: dict :returns: state of this instance. """ mapping = {} if self.start_open: mapping["start_open"] = self.start_open if self.start_closed: mapping["start_closed"] = self.start_closed if self.end_open: mapping["end_open"] = self.end_open if self.end_closed: mapping["end_closed"] = self.end_closed return mapping
[ "def", "_to_dict", "(", "self", ")", ":", "mapping", "=", "{", "}", "if", "self", ".", "start_open", ":", "mapping", "[", "\"start_open\"", "]", "=", "self", ".", "start_open", "if", "self", ".", "start_closed", ":", "mapping", "[", "\"start_closed\"", "]", "=", "self", ".", "start_closed", "if", "self", ".", "end_open", ":", "mapping", "[", "\"end_open\"", "]", "=", "self", ".", "end_open", "if", "self", ".", "end_closed", ":", "mapping", "[", "\"end_closed\"", "]", "=", "self", ".", "end_closed", "return", "mapping" ]
23.333333
19.809524
def post_headline(self, name, level, message): """Asynchronously update the sticky headline for a service. Args: name (string): The name of the service level (int): A message level in states.*_LEVEL message (string): The user facing error message that will be stored for the service and can be queried later. """ self._client.post_headline(name, level, message)
[ "def", "post_headline", "(", "self", ",", "name", ",", "level", ",", "message", ")", ":", "self", ".", "_client", ".", "post_headline", "(", "name", ",", "level", ",", "message", ")" ]
39.727273
19.363636
def value_to_bool(config_val, evar): """ Massages the 'true' and 'false' strings to bool equivalents. :param str config_val: The env var value. :param EnvironmentVariable evar: The EVar object we are validating a value for. :rtype: bool :return: True or False, depending on the value. """ if not config_val: return False if config_val.strip().lower() == 'true': return True else: return False
[ "def", "value_to_bool", "(", "config_val", ",", "evar", ")", ":", "if", "not", "config_val", ":", "return", "False", "if", "config_val", ".", "strip", "(", ")", ".", "lower", "(", ")", "==", "'true'", ":", "return", "True", "else", ":", "return", "False" ]
28.125
17
def get_sphinx_autodoc( self, depth=None, exclude=None, width=72, error=False, raised=False, no_comment=False, ): r""" Return exception list in `reStructuredText`_ auto-determining callable name. :param depth: Hierarchy levels to include in the exceptions list (overrides default **depth** argument; see :py:attr:`pexdoc.ExDoc.depth`). If None exceptions at all depths are included :type depth: non-negative integer or None :param exclude: List of (potentially partial) module and callable names to exclude from exceptions list (overrides default **exclude** argument, see :py:attr:`pexdoc.ExDoc.exclude`). If None all callables are included :type exclude: list of strings or None :param width: Maximum width of the lines of text (minimum 40) :type width: integer :param error: Flag that indicates whether an exception should be raised if the callable is not found in the callables exceptions database (True) or not (False) :type error: boolean :param raised: Flag that indicates whether only exceptions that were raised (and presumably caught) should be documented (True) or all registered exceptions should be documented (False) :type raised: boolean :param no_comment: Flag that indicates whether a `reStructuredText`_ comment labeling the callable (method, function or class property) should be printed (False) or not (True) before the exceptions documentation :type no_comment: boolean :raises: * RuntimeError (Argument \\`depth\\` is not valid) * RuntimeError (Argument \\`error\\` is not valid) * RuntimeError (Argument \\`exclude\\` is not valid) * RuntimeError (Argument \\`no_comment\\` is not valid) * RuntimeError (Argument \\`raised\\` is not valid) * RuntimeError (Argument \\`width\\` is not valid) * RuntimeError (Callable not found in exception list: *[name]*) * RuntimeError (Unable to determine callable name) """ # This code is cog-specific: cog code file name is the module # file name, a plus (+), and then the line number where the # cog function is frame = sys._getframe(1) index = frame.f_code.co_filename.rfind("+") fname = os.path.abspath(frame.f_code.co_filename[:index]) # Find name of callable based on module name and line number # within that module, then get the exceptions by using the # get_sphinx_doc() method with this information line_num = int(frame.f_code.co_filename[index + 1 :]) module_db = self._module_obj_db[fname] names = [callable_dict["name"] for callable_dict in module_db] line_nums = [callable_dict["line"] for callable_dict in module_db] name = names[bisect.bisect(line_nums, line_num) - 1] return self.get_sphinx_doc( name=name, depth=depth, exclude=exclude, width=width, error=error, raised=raised, no_comment=no_comment, )
[ "def", "get_sphinx_autodoc", "(", "self", ",", "depth", "=", "None", ",", "exclude", "=", "None", ",", "width", "=", "72", ",", "error", "=", "False", ",", "raised", "=", "False", ",", "no_comment", "=", "False", ",", ")", ":", "# This code is cog-specific: cog code file name is the module", "# file name, a plus (+), and then the line number where the", "# cog function is", "frame", "=", "sys", ".", "_getframe", "(", "1", ")", "index", "=", "frame", ".", "f_code", ".", "co_filename", ".", "rfind", "(", "\"+\"", ")", "fname", "=", "os", ".", "path", ".", "abspath", "(", "frame", ".", "f_code", ".", "co_filename", "[", ":", "index", "]", ")", "# Find name of callable based on module name and line number", "# within that module, then get the exceptions by using the", "# get_sphinx_doc() method with this information", "line_num", "=", "int", "(", "frame", ".", "f_code", ".", "co_filename", "[", "index", "+", "1", ":", "]", ")", "module_db", "=", "self", ".", "_module_obj_db", "[", "fname", "]", "names", "=", "[", "callable_dict", "[", "\"name\"", "]", "for", "callable_dict", "in", "module_db", "]", "line_nums", "=", "[", "callable_dict", "[", "\"line\"", "]", "for", "callable_dict", "in", "module_db", "]", "name", "=", "names", "[", "bisect", ".", "bisect", "(", "line_nums", ",", "line_num", ")", "-", "1", "]", "return", "self", ".", "get_sphinx_doc", "(", "name", "=", "name", ",", "depth", "=", "depth", ",", "exclude", "=", "exclude", ",", "width", "=", "width", ",", "error", "=", "error", ",", "raised", "=", "raised", ",", "no_comment", "=", "no_comment", ",", ")" ]
39.574713
24.45977
def primitive_datatype(self, t: URIRef) -> Optional[URIRef]: """ Return the data type for primitive type t, if any :param t: type :return: corresponding data type """ for sco in self._o.objects(t, RDFS.subClassOf): sco_type = self._o.value(sco, RDF.type) sco_prop = self._o.value(sco, OWL.onProperty) if sco_type == OWL.Restriction and sco_prop == FHIR.value: # The older versions of fhir.ttl (incorrectly) referenced the datatype directly restriction_type = self._o.value(sco, OWL.allValuesFrom) if not restriction_type: restriction_dt_entry = self._o.value(sco, OWL.someValuesFrom) restriction_type = self._o.value(restriction_dt_entry, OWL.onDatatype) return restriction_type return None
[ "def", "primitive_datatype", "(", "self", ",", "t", ":", "URIRef", ")", "->", "Optional", "[", "URIRef", "]", ":", "for", "sco", "in", "self", ".", "_o", ".", "objects", "(", "t", ",", "RDFS", ".", "subClassOf", ")", ":", "sco_type", "=", "self", ".", "_o", ".", "value", "(", "sco", ",", "RDF", ".", "type", ")", "sco_prop", "=", "self", ".", "_o", ".", "value", "(", "sco", ",", "OWL", ".", "onProperty", ")", "if", "sco_type", "==", "OWL", ".", "Restriction", "and", "sco_prop", "==", "FHIR", ".", "value", ":", "# The older versions of fhir.ttl (incorrectly) referenced the datatype directly", "restriction_type", "=", "self", ".", "_o", ".", "value", "(", "sco", ",", "OWL", ".", "allValuesFrom", ")", "if", "not", "restriction_type", ":", "restriction_dt_entry", "=", "self", ".", "_o", ".", "value", "(", "sco", ",", "OWL", ".", "someValuesFrom", ")", "restriction_type", "=", "self", ".", "_o", ".", "value", "(", "restriction_dt_entry", ",", "OWL", ".", "onDatatype", ")", "return", "restriction_type", "return", "None" ]
51.176471
19.294118
def _leave_status(self, subreddit, statusurl): """Abdicate status in a subreddit. :param subreddit: The name of the subreddit to leave `status` from. :param statusurl: The API URL which will be used in the leave request. Please use :meth:`leave_contributor` or :meth:`leave_moderator` rather than setting this directly. :returns: the json response from the server. """ if isinstance(subreddit, six.string_types): subreddit = self.get_subreddit(subreddit) data = {'id': subreddit.fullname} return self.request_json(statusurl, data=data)
[ "def", "_leave_status", "(", "self", ",", "subreddit", ",", "statusurl", ")", ":", "if", "isinstance", "(", "subreddit", ",", "six", ".", "string_types", ")", ":", "subreddit", "=", "self", ".", "get_subreddit", "(", "subreddit", ")", "data", "=", "{", "'id'", ":", "subreddit", ".", "fullname", "}", "return", "self", ".", "request_json", "(", "statusurl", ",", "data", "=", "data", ")" ]
41.6
19.4
def _get_bs4_string(soup): """ Outputs a BeautifulSoup object as a string that should hopefully be minimally modified """ if len(soup.find_all("script")) == 0: soup_str = soup.prettify(formatter=None).strip() else: soup_str = str(soup.html) soup_str = re.sub("&amp;", "&", soup_str) soup_str = re.sub("&lt;", "<", soup_str) soup_str = re.sub("&gt;", ">", soup_str) return soup_str
[ "def", "_get_bs4_string", "(", "soup", ")", ":", "if", "len", "(", "soup", ".", "find_all", "(", "\"script\"", ")", ")", "==", "0", ":", "soup_str", "=", "soup", ".", "prettify", "(", "formatter", "=", "None", ")", ".", "strip", "(", ")", "else", ":", "soup_str", "=", "str", "(", "soup", ".", "html", ")", "soup_str", "=", "re", ".", "sub", "(", "\"&amp;\"", ",", "\"&\"", ",", "soup_str", ")", "soup_str", "=", "re", ".", "sub", "(", "\"&lt;\"", ",", "\"<\"", ",", "soup_str", ")", "soup_str", "=", "re", ".", "sub", "(", "\"&gt;\"", ",", "\">\"", ",", "soup_str", ")", "return", "soup_str" ]
36.083333
13.75
def get_saml_assertion(cls, ticket): """ http://www.jasig.org/cas/protocol#samlvalidate-cas-3.0 SAML request values: RequestID [REQUIRED]: unique identifier for the request IssueInstant [REQUIRED]: timestamp of the request samlp:AssertionArtifact [REQUIRED]: the valid CAS Service Ticket obtained as a response parameter at login. """ # RequestID [REQUIRED] - unique identifier for the request request_id = uuid4() # e.g. 2014-06-02T09:21:03.071189 timestamp = datetime.datetime.now().isoformat() return SAML_ASSERTION_TEMPLATE.format( request_id=request_id, timestamp=timestamp, ticket=ticket, ).encode('utf8')
[ "def", "get_saml_assertion", "(", "cls", ",", "ticket", ")", ":", "# RequestID [REQUIRED] - unique identifier for the request", "request_id", "=", "uuid4", "(", ")", "# e.g. 2014-06-02T09:21:03.071189", "timestamp", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", "return", "SAML_ASSERTION_TEMPLATE", ".", "format", "(", "request_id", "=", "request_id", ",", "timestamp", "=", "timestamp", ",", "ticket", "=", "ticket", ",", ")", ".", "encode", "(", "'utf8'", ")" ]
32
15.666667
def foreground(color, content, readline=False): """ Color the text of the content :param color: pick a constant, any constant :type color: int :param content: Whatever you want to say... :type content: unicode :return: ansi string :rtype: unicode """ return encode(color, readline=readline) + content + encode(DEFAULT, readline=readline)
[ "def", "foreground", "(", "color", ",", "content", ",", "readline", "=", "False", ")", ":", "return", "encode", "(", "color", ",", "readline", "=", "readline", ")", "+", "content", "+", "encode", "(", "DEFAULT", ",", "readline", "=", "readline", ")" ]
33.090909
16.545455
def dump(self, *args, **kwargs): """ Build a list of dicts, by calling :meth:`Node.dump` on each item. Each keyword provides a function that extracts a value from a Node. Examples: >>> c = Collection([Scalar(1), Scalar(2)]) >>> c.dump(x2=Q*2, m1=Q-1).val() [{'x2': 2, 'm1': 0}, {'x2': 4, 'm1': 1}] """ return self.each(Q.dump(*args, **kwargs))
[ "def", "dump", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "each", "(", "Q", ".", "dump", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")" ]
28.8
18
def predict_survival_function(self, X, times=None): """ Predict the survival function for individuals, given their covariates. This assumes that the individual just entered the study (that is, we do not condition on how long they have already lived for.) Parameters ---------- X: numpy array or DataFrame a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns can be in any order. If a numpy array, columns must be in the same order as the training data. times: iterable, optional an iterable of increasing times to predict the cumulative hazard at. Default is the set of all durations (observed and unobserved). Uses a linear interpolation if points in time are not in the index. Returns ------- survival_function : DataFrame the survival probabilities of individuals over the timeline """ return np.exp(-self.predict_cumulative_hazard(X, times=times))
[ "def", "predict_survival_function", "(", "self", ",", "X", ",", "times", "=", "None", ")", ":", "return", "np", ".", "exp", "(", "-", "self", ".", "predict_cumulative_hazard", "(", "X", ",", "times", "=", "times", ")", ")" ]
42.791667
27.625
def all_ip_address_in_subnet(ip_net, cidr): """ Function to return every ip in a subnet :param ip_net: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1 :param cidr: CIDR value of 0 to 32 :return: A list of ip address's """ ip_address_list = list() if not ip_mask('{ip_net}/{cidr}'.format(ip_net=ip_net, cidr=cidr), return_tuple=False): LOGGER.critical('{network} is not a valid IPv4 network'.format(network='{ip_net}/{cidr}'.format(ip_net=ip_net, cidr=cidr))) raise ValueError('{network} is not a valid IPv4 network'.format(network='{ip_net}/{cidr}'.format(ip_net=ip_net, cidr=cidr))) else: ip_net = whole_subnet_maker(ip_net, cidr) net = __ipaddress.ip_network('{ip_net}/{cidr}'.format(ip_net=ip_net, cidr=cidr)) for single_ip in net: ip_address_list.append(str(single_ip)) return ip_address_list
[ "def", "all_ip_address_in_subnet", "(", "ip_net", ",", "cidr", ")", ":", "ip_address_list", "=", "list", "(", ")", "if", "not", "ip_mask", "(", "'{ip_net}/{cidr}'", ".", "format", "(", "ip_net", "=", "ip_net", ",", "cidr", "=", "cidr", ")", ",", "return_tuple", "=", "False", ")", ":", "LOGGER", ".", "critical", "(", "'{network} is not a valid IPv4 network'", ".", "format", "(", "network", "=", "'{ip_net}/{cidr}'", ".", "format", "(", "ip_net", "=", "ip_net", ",", "cidr", "=", "cidr", ")", ")", ")", "raise", "ValueError", "(", "'{network} is not a valid IPv4 network'", ".", "format", "(", "network", "=", "'{ip_net}/{cidr}'", ".", "format", "(", "ip_net", "=", "ip_net", ",", "cidr", "=", "cidr", ")", ")", ")", "else", ":", "ip_net", "=", "whole_subnet_maker", "(", "ip_net", ",", "cidr", ")", "net", "=", "__ipaddress", ".", "ip_network", "(", "'{ip_net}/{cidr}'", ".", "format", "(", "ip_net", "=", "ip_net", ",", "cidr", "=", "cidr", ")", ")", "for", "single_ip", "in", "net", ":", "ip_address_list", ".", "append", "(", "str", "(", "single_ip", ")", ")", "return", "ip_address_list" ]
49.391304
31.304348
def ensure(user, action, subject): """ Similar to ``can`` but will raise a AccessDenied Exception if does not have access""" ability = Ability(user, get_authorization_method()) if ability.cannot(action, subject): raise AccessDenied()
[ "def", "ensure", "(", "user", ",", "action", ",", "subject", ")", ":", "ability", "=", "Ability", "(", "user", ",", "get_authorization_method", "(", ")", ")", "if", "ability", ".", "cannot", "(", "action", ",", "subject", ")", ":", "raise", "AccessDenied", "(", ")" ]
49.8
6.8
def actor_url(parser, token): """ Renders the URL for a particular actor instance :: <a href="{% actor_url request.user %}">View your actions</a> <a href="{% actor_url another_user %}">{{ another_user }}'s actions</a> """ bits = token.split_contents() if len(bits) != 2: raise TemplateSyntaxError("Accepted format " "{% actor_url [actor_instance] %}") else: return DisplayActivityActorUrl(*bits[1:])
[ "def", "actor_url", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "if", "len", "(", "bits", ")", "!=", "2", ":", "raise", "TemplateSyntaxError", "(", "\"Accepted format \"", "\"{% actor_url [actor_instance] %}\"", ")", "else", ":", "return", "DisplayActivityActorUrl", "(", "*", "bits", "[", "1", ":", "]", ")" ]
30.0625
21.8125
def authorgroup(self): """A list of namedtuples representing the article's authors organized by affiliation, in the form (affiliation_id, dptid, organization, city, postalcode, addresspart, country, auid, indexed_name, surname, given_name). If "given_name" is not present, fall back to initials. Note: Affiliation information might be missing or mal-assigned even when it lookes correct in the web view. In this case please request a correction. """ out = [] fields = 'affiliation_id dptid organization city postalcode '\ 'addresspart country auid indexed_name surname given_name' auth = namedtuple('Author', fields) items = listify(self._head.get('author-group', [])) for item in items: # Affiliation information aff = item.get('affiliation', {}) try: aff_ids = listify(aff['affiliation-id']) aff_id = ", ".join([a["@afid"] for a in aff_ids]) except KeyError: aff_id = aff.get("@afid") org = _get_org(aff) # Author information (might relate to collaborations) authors = listify(item.get('author', item.get('collaboration', []))) for au in authors: try: given = au.get('ce:given-name', au['ce:initials']) except KeyError: # Collaboration given = au.get('ce:text') new = auth(affiliation_id=aff_id, organization=org, city=aff.get('city'), dptid=aff.get("@dptid"), postalcode=aff.get('postal-code'), addresspart=aff.get('address-part'), country=aff.get('country'), auid=au.get('@auid'), surname=au.get('ce:surname'), given_name=given, indexed_name=chained_get(au, ['preferred-name', 'ce:indexed-name'])) out.append(new) return out or None
[ "def", "authorgroup", "(", "self", ")", ":", "out", "=", "[", "]", "fields", "=", "'affiliation_id dptid organization city postalcode '", "'addresspart country auid indexed_name surname given_name'", "auth", "=", "namedtuple", "(", "'Author'", ",", "fields", ")", "items", "=", "listify", "(", "self", ".", "_head", ".", "get", "(", "'author-group'", ",", "[", "]", ")", ")", "for", "item", "in", "items", ":", "# Affiliation information", "aff", "=", "item", ".", "get", "(", "'affiliation'", ",", "{", "}", ")", "try", ":", "aff_ids", "=", "listify", "(", "aff", "[", "'affiliation-id'", "]", ")", "aff_id", "=", "\", \"", ".", "join", "(", "[", "a", "[", "\"@afid\"", "]", "for", "a", "in", "aff_ids", "]", ")", "except", "KeyError", ":", "aff_id", "=", "aff", ".", "get", "(", "\"@afid\"", ")", "org", "=", "_get_org", "(", "aff", ")", "# Author information (might relate to collaborations)", "authors", "=", "listify", "(", "item", ".", "get", "(", "'author'", ",", "item", ".", "get", "(", "'collaboration'", ",", "[", "]", ")", ")", ")", "for", "au", "in", "authors", ":", "try", ":", "given", "=", "au", ".", "get", "(", "'ce:given-name'", ",", "au", "[", "'ce:initials'", "]", ")", "except", "KeyError", ":", "# Collaboration", "given", "=", "au", ".", "get", "(", "'ce:text'", ")", "new", "=", "auth", "(", "affiliation_id", "=", "aff_id", ",", "organization", "=", "org", ",", "city", "=", "aff", ".", "get", "(", "'city'", ")", ",", "dptid", "=", "aff", ".", "get", "(", "\"@dptid\"", ")", ",", "postalcode", "=", "aff", ".", "get", "(", "'postal-code'", ")", ",", "addresspart", "=", "aff", ".", "get", "(", "'address-part'", ")", ",", "country", "=", "aff", ".", "get", "(", "'country'", ")", ",", "auid", "=", "au", ".", "get", "(", "'@auid'", ")", ",", "surname", "=", "au", ".", "get", "(", "'ce:surname'", ")", ",", "given_name", "=", "given", ",", "indexed_name", "=", "chained_get", "(", "au", ",", "[", "'preferred-name'", ",", "'ce:indexed-name'", "]", ")", ")", "out", ".", "append", "(", "new", ")", "return", "out", "or", "None" ]
51.45
20.6
def _parse_arg(valid_classifications): """ Command line parser for coco Parameters ---------- valid_classifications: list Available classifications, used for checking input parameters. Returns ------- args : ArgumentParser namespace """ parser = argparse.ArgumentParser( description=('The country converter (coco): a Python package for ' 'converting country names between ' 'different classifications schemes. ' 'Version: {}'.format(__version__) ), prog='coco', usage=('%(prog)s --names --src --to]')) parser.add_argument('names', help=('List of countries to convert ' '(space separated, country names consisting of ' 'multiple words must be put in quotation marks).' 'Possible classifications: ' + ', '.join(valid_classifications) + '; NB: long, official and short are provided ' 'as shortcuts for the names classifications' ), nargs='*') parser.add_argument('-s', '--src', '--source', '-f', '--from', help=('Classification of the names given, ' '(default: inferred from names)')) parser.add_argument('-t', '--to', help=('Required classification of the passed names' '(default: "ISO3"')) parser.add_argument('-o', '--output_sep', help=('Seperator for output names ' '(default: space), e.g. "," ')) parser.add_argument('-n', '--not_found', default='not found', help=('Fill in value for none found entries. ' 'If "None" (string), keep the input value ' '(default: not found)')) parser.add_argument('-a', '--additional_data', help=('Data file with additional country data' '(Same format as the original data file - ' 'utf-8 encoded tab separated data, same ' 'column headers as in the general country ' 'data file; default: not found)')) args = parser.parse_args() args.src = args.src or None args.to = args.to or 'ISO3' args.not_found = args.not_found if args.not_found != 'None' else None args.output_sep = args.output_sep or ' ' return args
[ "def", "_parse_arg", "(", "valid_classifications", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "(", "'The country converter (coco): a Python package for '", "'converting country names between '", "'different classifications schemes. '", "'Version: {}'", ".", "format", "(", "__version__", ")", ")", ",", "prog", "=", "'coco'", ",", "usage", "=", "(", "'%(prog)s --names --src --to]'", ")", ")", "parser", ".", "add_argument", "(", "'names'", ",", "help", "=", "(", "'List of countries to convert '", "'(space separated, country names consisting of '", "'multiple words must be put in quotation marks).'", "'Possible classifications: '", "+", "', '", ".", "join", "(", "valid_classifications", ")", "+", "'; NB: long, official and short are provided '", "'as shortcuts for the names classifications'", ")", ",", "nargs", "=", "'*'", ")", "parser", ".", "add_argument", "(", "'-s'", ",", "'--src'", ",", "'--source'", ",", "'-f'", ",", "'--from'", ",", "help", "=", "(", "'Classification of the names given, '", "'(default: inferred from names)'", ")", ")", "parser", ".", "add_argument", "(", "'-t'", ",", "'--to'", ",", "help", "=", "(", "'Required classification of the passed names'", "'(default: \"ISO3\"'", ")", ")", "parser", ".", "add_argument", "(", "'-o'", ",", "'--output_sep'", ",", "help", "=", "(", "'Seperator for output names '", "'(default: space), e.g. \",\" '", ")", ")", "parser", ".", "add_argument", "(", "'-n'", ",", "'--not_found'", ",", "default", "=", "'not found'", ",", "help", "=", "(", "'Fill in value for none found entries. '", "'If \"None\" (string), keep the input value '", "'(default: not found)'", ")", ")", "parser", ".", "add_argument", "(", "'-a'", ",", "'--additional_data'", ",", "help", "=", "(", "'Data file with additional country data'", "'(Same format as the original data file - '", "'utf-8 encoded tab separated data, same '", "'column headers as in the general country '", "'data file; default: not found)'", ")", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "args", ".", "src", "=", "args", ".", "src", "or", "None", "args", ".", "to", "=", "args", ".", "to", "or", "'ISO3'", "args", ".", "not_found", "=", "args", ".", "not_found", "if", "args", ".", "not_found", "!=", "'None'", "else", "None", "args", ".", "output_sep", "=", "args", ".", "output_sep", "or", "' '", "return", "args" ]
44.457627
22.474576
def valuefrompostdata(self, postdata): """This parameter method searches the POST data and retrieves the values it needs. It does not set the value yet though, but simply returns it. Needs to be explicitly passed to parameter.set()""" if self.multi: #multi parameters can be passed as parameterid=choiceid1,choiceid2 or by setting parameterid[choiceid]=1 (or whatever other non-zero value) found = False if self.id in postdata: found = True passedvalues = postdata[self.id].split(',') values = [] for choicekey in [x[0] for x in self.choices]: if choicekey in passedvalues: found = True values.append(choicekey) else: values = [] for choicekey in [x[0] for x in self.choices]: if self.id+'['+choicekey+']' in postdata: found = True if postdata[self.id+'['+choicekey+']']: values.append(choicekey) if not found: return None else: return values else: if self.id in postdata: return postdata[self.id] else: return None
[ "def", "valuefrompostdata", "(", "self", ",", "postdata", ")", ":", "if", "self", ".", "multi", ":", "#multi parameters can be passed as parameterid=choiceid1,choiceid2 or by setting parameterid[choiceid]=1 (or whatever other non-zero value)", "found", "=", "False", "if", "self", ".", "id", "in", "postdata", ":", "found", "=", "True", "passedvalues", "=", "postdata", "[", "self", ".", "id", "]", ".", "split", "(", "','", ")", "values", "=", "[", "]", "for", "choicekey", "in", "[", "x", "[", "0", "]", "for", "x", "in", "self", ".", "choices", "]", ":", "if", "choicekey", "in", "passedvalues", ":", "found", "=", "True", "values", ".", "append", "(", "choicekey", ")", "else", ":", "values", "=", "[", "]", "for", "choicekey", "in", "[", "x", "[", "0", "]", "for", "x", "in", "self", ".", "choices", "]", ":", "if", "self", ".", "id", "+", "'['", "+", "choicekey", "+", "']'", "in", "postdata", ":", "found", "=", "True", "if", "postdata", "[", "self", ".", "id", "+", "'['", "+", "choicekey", "+", "']'", "]", ":", "values", ".", "append", "(", "choicekey", ")", "if", "not", "found", ":", "return", "None", "else", ":", "return", "values", "else", ":", "if", "self", ".", "id", "in", "postdata", ":", "return", "postdata", "[", "self", ".", "id", "]", "else", ":", "return", "None" ]
47.178571
17.142857
def CreateControls(self): """Create our sub-controls""" wx.EVT_LIST_COL_CLICK(self, self.GetId(), self.OnReorder) wx.EVT_LIST_ITEM_SELECTED(self, self.GetId(), self.OnNodeSelected) wx.EVT_MOTION(self, self.OnMouseMove) wx.EVT_LIST_ITEM_ACTIVATED(self, self.GetId(), self.OnNodeActivated) self.CreateColumns()
[ "def", "CreateControls", "(", "self", ")", ":", "wx", ".", "EVT_LIST_COL_CLICK", "(", "self", ",", "self", ".", "GetId", "(", ")", ",", "self", ".", "OnReorder", ")", "wx", ".", "EVT_LIST_ITEM_SELECTED", "(", "self", ",", "self", ".", "GetId", "(", ")", ",", "self", ".", "OnNodeSelected", ")", "wx", ".", "EVT_MOTION", "(", "self", ",", "self", ".", "OnMouseMove", ")", "wx", ".", "EVT_LIST_ITEM_ACTIVATED", "(", "self", ",", "self", ".", "GetId", "(", ")", ",", "self", ".", "OnNodeActivated", ")", "self", ".", "CreateColumns", "(", ")" ]
50
18.142857
def get_name(self, func): ''' Get the name to reference a function by :param func: function to get the name of :type func: callable ''' if hasattr(func, 'name'): return func.name return '%s.%s' % ( func.__module__, func.__name__ )
[ "def", "get_name", "(", "self", ",", "func", ")", ":", "if", "hasattr", "(", "func", ",", "'name'", ")", ":", "return", "func", ".", "name", "return", "'%s.%s'", "%", "(", "func", ".", "__module__", ",", "func", ".", "__name__", ")" ]
22.785714
19.357143
def tojsonarrays(table, source=None, prefix=None, suffix=None, output_header=False, *args, **kwargs): """ Write a table in JSON format, with rows output as JSON arrays. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... ['a', 1], ... ['b', 2], ... ['c', 2]] >>> etl.tojsonarrays(table1, 'example.json') >>> # check what it did ... print(open('example.json').read()) [["a", 1], ["b", 2], ["c", 2]] Note that this is currently not streaming, all data is loaded into memory before being written to the file. """ if output_header: obj = list(table) else: obj = list(data(table)) _writejson(source, obj, prefix, suffix, *args, **kwargs)
[ "def", "tojsonarrays", "(", "table", ",", "source", "=", "None", ",", "prefix", "=", "None", ",", "suffix", "=", "None", ",", "output_header", "=", "False", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "output_header", ":", "obj", "=", "list", "(", "table", ")", "else", ":", "obj", "=", "list", "(", "data", "(", "table", ")", ")", "_writejson", "(", "source", ",", "obj", ",", "prefix", ",", "suffix", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
31.64
17.32
def get_pair(self, pair): """get pair description Parameters ---------- pair : :any:`list` of nodes Descr Returns ------- type Descr """ i, j = pair return (self._nodes[i.name()], self._nodes[j.name()])
[ "def", "get_pair", "(", "self", ",", "pair", ")", ":", "i", ",", "j", "=", "pair", "return", "(", "self", ".", "_nodes", "[", "i", ".", "name", "(", ")", "]", ",", "self", ".", "_nodes", "[", "j", ".", "name", "(", ")", "]", ")" ]
20.933333
19.333333
def duration_minutes(duration): """returns minutes from duration, otherwise we keep bashing in same math""" if isinstance(duration, list): res = dt.timedelta() for entry in duration: res += entry return duration_minutes(res) elif isinstance(duration, dt.timedelta): return duration.total_seconds() / 60 else: return duration
[ "def", "duration_minutes", "(", "duration", ")", ":", "if", "isinstance", "(", "duration", ",", "list", ")", ":", "res", "=", "dt", ".", "timedelta", "(", ")", "for", "entry", "in", "duration", ":", "res", "+=", "entry", "return", "duration_minutes", "(", "res", ")", "elif", "isinstance", "(", "duration", ",", "dt", ".", "timedelta", ")", ":", "return", "duration", ".", "total_seconds", "(", ")", "/", "60", "else", ":", "return", "duration" ]
31.833333
12.75
def CheckApproversForLabel(self, token, client_urn, requester, approvers, label): """Checks if requester and approvers have approval privileges for labels. Checks against list of approvers for each label defined in approvers.yaml to determine if the list of approvers is sufficient. Args: token: user token client_urn: ClientURN object of the client requester: username string of person requesting approval. approvers: list of username strings that have approved this client. label: label strings to check approval privs for. Returns: True if access is allowed, raises otherwise. """ auth = self.reader.GetAuthorizationForSubject(label) if not auth: # This label isn't listed in approvers.yaml return True if auth.requester_must_be_authorized: if not self.CheckPermissions(requester, label): raise access_control.UnauthorizedAccess( "User %s not in %s or groups:%s for %s" % (requester, auth.users, auth.groups, label), subject=client_urn, requested_access=token.requested_access) approved_count = 0 for approver in approvers: if self.CheckPermissions(approver, label) and approver != requester: approved_count += 1 if approved_count < auth.num_approvers_required: raise access_control.UnauthorizedAccess( "Found %s approvers for %s, needed %s" % (approved_count, label, auth.num_approvers_required), subject=client_urn, requested_access=token.requested_access) return True
[ "def", "CheckApproversForLabel", "(", "self", ",", "token", ",", "client_urn", ",", "requester", ",", "approvers", ",", "label", ")", ":", "auth", "=", "self", ".", "reader", ".", "GetAuthorizationForSubject", "(", "label", ")", "if", "not", "auth", ":", "# This label isn't listed in approvers.yaml", "return", "True", "if", "auth", ".", "requester_must_be_authorized", ":", "if", "not", "self", ".", "CheckPermissions", "(", "requester", ",", "label", ")", ":", "raise", "access_control", ".", "UnauthorizedAccess", "(", "\"User %s not in %s or groups:%s for %s\"", "%", "(", "requester", ",", "auth", ".", "users", ",", "auth", ".", "groups", ",", "label", ")", ",", "subject", "=", "client_urn", ",", "requested_access", "=", "token", ".", "requested_access", ")", "approved_count", "=", "0", "for", "approver", "in", "approvers", ":", "if", "self", ".", "CheckPermissions", "(", "approver", ",", "label", ")", "and", "approver", "!=", "requester", ":", "approved_count", "+=", "1", "if", "approved_count", "<", "auth", ".", "num_approvers_required", ":", "raise", "access_control", ".", "UnauthorizedAccess", "(", "\"Found %s approvers for %s, needed %s\"", "%", "(", "approved_count", ",", "label", ",", "auth", ".", "num_approvers_required", ")", ",", "subject", "=", "client_urn", ",", "requested_access", "=", "token", ".", "requested_access", ")", "return", "True" ]
39.804878
19.853659
def _get_param_names(self): """ Get mappable parameters from YAML. """ template = Template(self.yaml_string) names = ['yaml_string'] # always include the template for match in re.finditer(template.pattern, template.template): name = match.group('named') or match.group('braced') assert name is not None names.append(name) return names
[ "def", "_get_param_names", "(", "self", ")", ":", "template", "=", "Template", "(", "self", ".", "yaml_string", ")", "names", "=", "[", "'yaml_string'", "]", "# always include the template", "for", "match", "in", "re", ".", "finditer", "(", "template", ".", "pattern", ",", "template", ".", "template", ")", ":", "name", "=", "match", ".", "group", "(", "'named'", ")", "or", "match", ".", "group", "(", "'braced'", ")", "assert", "name", "is", "not", "None", "names", ".", "append", "(", "name", ")", "return", "names" ]
37.909091
11.909091
def gapfill(model, universal=None, lower_bound=0.05, penalties=None, demand_reactions=True, exchange_reactions=False, iterations=1): """Perform gapfilling on a model. See documentation for the class GapFiller. Parameters ---------- model : cobra.Model The model to perform gap filling on. universal : cobra.Model, None A universal model with reactions that can be used to complete the model. Only gapfill considering demand and exchange reactions if left missing. lower_bound : float The minimally accepted flux for the objective in the filled model. penalties : dict, None A dictionary with keys being 'universal' (all reactions included in the universal model), 'exchange' and 'demand' (all additionally added exchange and demand reactions) for the three reaction types. Can also have reaction identifiers for reaction specific costs. Defaults are 1, 100 and 1 respectively. iterations : int The number of rounds of gapfilling to perform. For every iteration, the penalty for every used reaction increases linearly. This way, the algorithm is encouraged to search for alternative solutions which may include previously used reactions. I.e., with enough iterations pathways including 10 steps will eventually be reported even if the shortest pathway is a single reaction. exchange_reactions : bool Consider adding exchange (uptake) reactions for all metabolites in the model. demand_reactions : bool Consider adding demand reactions for all metabolites. Returns ------- iterable list of lists with on set of reactions that completes the model per requested iteration. Examples -------- >>> import cobra.test as ct >>> from cobra import Model >>> from cobra.flux_analysis import gapfill >>> model = ct.create_test_model("salmonella") >>> universal = Model('universal') >>> universal.add_reactions(model.reactions.GF6PTA.copy()) >>> model.remove_reactions([model.reactions.GF6PTA]) >>> gapfill(model, universal) """ gapfiller = GapFiller(model, universal=universal, lower_bound=lower_bound, penalties=penalties, demand_reactions=demand_reactions, exchange_reactions=exchange_reactions) return gapfiller.fill(iterations=iterations)
[ "def", "gapfill", "(", "model", ",", "universal", "=", "None", ",", "lower_bound", "=", "0.05", ",", "penalties", "=", "None", ",", "demand_reactions", "=", "True", ",", "exchange_reactions", "=", "False", ",", "iterations", "=", "1", ")", ":", "gapfiller", "=", "GapFiller", "(", "model", ",", "universal", "=", "universal", ",", "lower_bound", "=", "lower_bound", ",", "penalties", "=", "penalties", ",", "demand_reactions", "=", "demand_reactions", ",", "exchange_reactions", "=", "exchange_reactions", ")", "return", "gapfiller", ".", "fill", "(", "iterations", "=", "iterations", ")" ]
42.310345
21.689655
def home(self) -> str: """ Return the robot to the home position and update the position tracker """ self.hardware.home() self.current_position = self._position() return 'Homed'
[ "def", "home", "(", "self", ")", "->", "str", ":", "self", ".", "hardware", ".", "home", "(", ")", "self", ".", "current_position", "=", "self", ".", "_position", "(", ")", "return", "'Homed'" ]
31.285714
13.285714
def reduce_by_window(self, window_config, reduce_function): """Return a new Streamlet in which each element of this Streamlet are collected over a window defined by window_config and then reduced using the reduce_function reduce_function takes two element at one time and reduces them to one element that is used in the subsequent operations. """ from heronpy.streamlet.impl.reducebywindowbolt import ReduceByWindowStreamlet reduce_streamlet = ReduceByWindowStreamlet(window_config, reduce_function, self) self._add_child(reduce_streamlet) return reduce_streamlet
[ "def", "reduce_by_window", "(", "self", ",", "window_config", ",", "reduce_function", ")", ":", "from", "heronpy", ".", "streamlet", ".", "impl", ".", "reducebywindowbolt", "import", "ReduceByWindowStreamlet", "reduce_streamlet", "=", "ReduceByWindowStreamlet", "(", "window_config", ",", "reduce_function", ",", "self", ")", "self", ".", "_add_child", "(", "reduce_streamlet", ")", "return", "reduce_streamlet" ]
59.6
21.8
def _create_event(self, event_state, event_type, event_value, proc_list, proc_desc, peak_time): """Add a new item in the log list. Item is added only if the criticity (event_state) is WARNING or CRITICAL. """ if event_state == "WARNING" or event_state == "CRITICAL": # Define the automatic process sort key self.set_process_sort(event_type) # Create the new log item # Time is stored in Epoch format # Epoch -> DMYHMS = datetime.fromtimestamp(epoch) item = [ time.mktime(datetime.now().timetuple()), # START DATE -1, # END DATE event_state, # STATE: WARNING|CRITICAL event_type, # TYPE: CPU, LOAD, MEM... event_value, # MAX event_value, # AVG event_value, # MIN event_value, # SUM 1, # COUNT [], # TOP 3 PROCESS LIST proc_desc, # MONITORED PROCESSES DESC glances_processes.sort_key] # TOP PROCESS SORTKEY # Add the item to the list self.events_list.insert(0, item) # Limit the list to 'events_max' items if self.len() > self.events_max: self.events_list.pop() return True else: return False
[ "def", "_create_event", "(", "self", ",", "event_state", ",", "event_type", ",", "event_value", ",", "proc_list", ",", "proc_desc", ",", "peak_time", ")", ":", "if", "event_state", "==", "\"WARNING\"", "or", "event_state", "==", "\"CRITICAL\"", ":", "# Define the automatic process sort key", "self", ".", "set_process_sort", "(", "event_type", ")", "# Create the new log item", "# Time is stored in Epoch format", "# Epoch -> DMYHMS = datetime.fromtimestamp(epoch)", "item", "=", "[", "time", ".", "mktime", "(", "datetime", ".", "now", "(", ")", ".", "timetuple", "(", ")", ")", ",", "# START DATE", "-", "1", ",", "# END DATE", "event_state", ",", "# STATE: WARNING|CRITICAL", "event_type", ",", "# TYPE: CPU, LOAD, MEM...", "event_value", ",", "# MAX", "event_value", ",", "# AVG", "event_value", ",", "# MIN", "event_value", ",", "# SUM", "1", ",", "# COUNT", "[", "]", ",", "# TOP 3 PROCESS LIST", "proc_desc", ",", "# MONITORED PROCESSES DESC", "glances_processes", ".", "sort_key", "]", "# TOP PROCESS SORTKEY", "# Add the item to the list", "self", ".", "events_list", ".", "insert", "(", "0", ",", "item", ")", "# Limit the list to 'events_max' items", "if", "self", ".", "len", "(", ")", ">", "self", ".", "events_max", ":", "self", ".", "events_list", ".", "pop", "(", ")", "return", "True", "else", ":", "return", "False" ]
37.432432
15.945946
def _get_footer(self, footer): """ Gets the html footer """ if footer is None: html = self.footer() else: html = footer return html
[ "def", "_get_footer", "(", "self", ",", "footer", ")", ":", "if", "footer", "is", "None", ":", "html", "=", "self", ".", "footer", "(", ")", "else", ":", "html", "=", "footer", "return", "html" ]
21.666667
11.888889
def pattern(ref, est, **kwargs): r'''Pattern detection evaluation Parameters ---------- ref : jams.Annotation Reference annotation object est : jams.Annotation Estimated annotation object kwargs Additional keyword arguments Returns ------- scores : dict Dictionary of scores, where the key is the metric name (str) and the value is the (float) score achieved. See Also -------- mir_eval.pattern.evaluate Examples -------- >>> # Load in the JAMS objects >>> ref_jam = jams.load('reference.jams') >>> est_jam = jams.load('estimated.jams') >>> # Select the first relevant annotations >>> ref_ann = ref_jam.search(namespace='pattern_jku')[0] >>> est_ann = est_jam.search(namespace='pattern_jku')[0] >>> scores = jams.eval.pattern(ref_ann, est_ann) ''' namespace = 'pattern_jku' ref = coerce_annotation(ref, namespace) est = coerce_annotation(est, namespace) ref_patterns = pattern_to_mireval(ref) est_patterns = pattern_to_mireval(est) return mir_eval.pattern.evaluate(ref_patterns, est_patterns, **kwargs)
[ "def", "pattern", "(", "ref", ",", "est", ",", "*", "*", "kwargs", ")", ":", "namespace", "=", "'pattern_jku'", "ref", "=", "coerce_annotation", "(", "ref", ",", "namespace", ")", "est", "=", "coerce_annotation", "(", "est", ",", "namespace", ")", "ref_patterns", "=", "pattern_to_mireval", "(", "ref", ")", "est_patterns", "=", "pattern_to_mireval", "(", "est", ")", "return", "mir_eval", ".", "pattern", ".", "evaluate", "(", "ref_patterns", ",", "est_patterns", ",", "*", "*", "kwargs", ")" ]
27.341463
20.121951
def get_categorical_feature_names(example): """Returns a list of feature names for byte type features. Args: example: An example. Returns: A list of categorical feature names (e.g. ['education', 'marital_status'] ) """ features = get_example_features(example) return sorted([ feature_name for feature_name in features if features[feature_name].WhichOneof('kind') == 'bytes_list' ])
[ "def", "get_categorical_feature_names", "(", "example", ")", ":", "features", "=", "get_example_features", "(", "example", ")", "return", "sorted", "(", "[", "feature_name", "for", "feature_name", "in", "features", "if", "features", "[", "feature_name", "]", ".", "WhichOneof", "(", "'kind'", ")", "==", "'bytes_list'", "]", ")" ]
28.857143
21.071429
def grouplabelencode(data, mapping, nacode=None, nastate=False): """Encode data array with grouped labels Parameters: ----------- data : list array with labels mapping : dict, list of list the index of each element is used as encoding. Each element is a single label (str) or list of labels that are mapped to the encoding. nacode : integer (Default: None) Encoding for unmapped states. nastate : bool If False (Default) unmatched data labels are encoded as nacode. If nastate=True (and nacode=None) then unmatched data labels are encoded with the integer nacode=len(mapping). """ # What value is used for missing data? if nastate: if nacode is None: nacode = len(mapping) # Process depending on the data type of the data mapping variable if isinstance(mapping, list): m = mapping e = range(len(mapping)) elif isinstance(mapping, dict): m = list(mapping.values()) e = list(mapping.keys()) else: raise Exception("'data' must be list-of-list or dict.") # Loop over 'data' array return grouplabelencode_loop(data, m, e, nacode=nacode)
[ "def", "grouplabelencode", "(", "data", ",", "mapping", ",", "nacode", "=", "None", ",", "nastate", "=", "False", ")", ":", "# What value is used for missing data?", "if", "nastate", ":", "if", "nacode", "is", "None", ":", "nacode", "=", "len", "(", "mapping", ")", "# Process depending on the data type of the data mapping variable", "if", "isinstance", "(", "mapping", ",", "list", ")", ":", "m", "=", "mapping", "e", "=", "range", "(", "len", "(", "mapping", ")", ")", "elif", "isinstance", "(", "mapping", ",", "dict", ")", ":", "m", "=", "list", "(", "mapping", ".", "values", "(", ")", ")", "e", "=", "list", "(", "mapping", ".", "keys", "(", ")", ")", "else", ":", "raise", "Exception", "(", "\"'data' must be list-of-list or dict.\"", ")", "# Loop over 'data' array", "return", "grouplabelencode_loop", "(", "data", ",", "m", ",", "e", ",", "nacode", "=", "nacode", ")" ]
31.263158
19.657895
def install_file_legacy(path, sudo=False, from_path=None, **substitutions): '''Install file with path on the host target. The from file is the first of this list which exists: * custom file * custom file.template * common file * common file.template ''' # source paths 'from_custom' and 'from_common' from_path = from_path or path # remove beginning '/' (if any), eg '/foo/bar' -> 'foo/bar' from_tail = join('files', from_path.lstrip(os.sep)) if from_path.startswith('~/'): from_tail = join('files', 'home', 'USERNAME', from_path[2:]) # without beginning '~/' from_common = join(FABFILE_DATA_DIR, from_tail) from_custom = join(FABSETUP_CUSTOM_DIR, from_tail) # target path 'to_' (path or tempfile) for subst in ['SITENAME', 'USER', 'ADDON', 'TASK']: sitename = substitutions.get(subst, False) if sitename: path = path.replace(subst, sitename) to_ = path if sudo: to_ = join(os.sep, 'tmp', 'fabsetup_' + os.path.basename(path)) path_dir = dirname(path) # copy file if isfile(from_custom): run(flo('mkdir -p {path_dir}')) put(from_custom, to_) elif isfile(from_custom + '.template'): _install_file_from_template_legacy(from_custom + '.template', to_=to_, **substitutions) elif isfile(from_common): run(flo('mkdir -p {path_dir}')) put(from_common, to_) else: _install_file_from_template_legacy(from_common + '.template', to_=to_, **substitutions) if sudo: run(flo('sudo mv --force {to_} {path}'))
[ "def", "install_file_legacy", "(", "path", ",", "sudo", "=", "False", ",", "from_path", "=", "None", ",", "*", "*", "substitutions", ")", ":", "# source paths 'from_custom' and 'from_common'", "from_path", "=", "from_path", "or", "path", "# remove beginning '/' (if any), eg '/foo/bar' -> 'foo/bar'", "from_tail", "=", "join", "(", "'files'", ",", "from_path", ".", "lstrip", "(", "os", ".", "sep", ")", ")", "if", "from_path", ".", "startswith", "(", "'~/'", ")", ":", "from_tail", "=", "join", "(", "'files'", ",", "'home'", ",", "'USERNAME'", ",", "from_path", "[", "2", ":", "]", ")", "# without beginning '~/'", "from_common", "=", "join", "(", "FABFILE_DATA_DIR", ",", "from_tail", ")", "from_custom", "=", "join", "(", "FABSETUP_CUSTOM_DIR", ",", "from_tail", ")", "# target path 'to_' (path or tempfile)", "for", "subst", "in", "[", "'SITENAME'", ",", "'USER'", ",", "'ADDON'", ",", "'TASK'", "]", ":", "sitename", "=", "substitutions", ".", "get", "(", "subst", ",", "False", ")", "if", "sitename", ":", "path", "=", "path", ".", "replace", "(", "subst", ",", "sitename", ")", "to_", "=", "path", "if", "sudo", ":", "to_", "=", "join", "(", "os", ".", "sep", ",", "'tmp'", ",", "'fabsetup_'", "+", "os", ".", "path", ".", "basename", "(", "path", ")", ")", "path_dir", "=", "dirname", "(", "path", ")", "# copy file", "if", "isfile", "(", "from_custom", ")", ":", "run", "(", "flo", "(", "'mkdir -p {path_dir}'", ")", ")", "put", "(", "from_custom", ",", "to_", ")", "elif", "isfile", "(", "from_custom", "+", "'.template'", ")", ":", "_install_file_from_template_legacy", "(", "from_custom", "+", "'.template'", ",", "to_", "=", "to_", ",", "*", "*", "substitutions", ")", "elif", "isfile", "(", "from_common", ")", ":", "run", "(", "flo", "(", "'mkdir -p {path_dir}'", ")", ")", "put", "(", "from_common", ",", "to_", ")", "else", ":", "_install_file_from_template_legacy", "(", "from_common", "+", "'.template'", ",", "to_", "=", "to_", ",", "*", "*", "substitutions", ")", "if", "sudo", ":", "run", "(", "flo", "(", "'sudo mv --force {to_} {path}'", ")", ")" ]
37.704545
18.25
def child_added(self, child): """ Overwrite the content view """ view = child.widget if view is not None: self.dialog.setContentView(view)
[ "def", "child_added", "(", "self", ",", "child", ")", ":", "view", "=", "child", ".", "widget", "if", "view", "is", "not", "None", ":", "self", ".", "dialog", ".", "setContentView", "(", "view", ")" ]
34
8
def Delete(self): """Delete disk. This request will error if disk is protected and cannot be removed (e.g. a system disk) >>> clc.v2.Server("WA1BTDIX01").Disks().disks[2].Delete().WaitUntilComplete() 0 """ disk_set = [{'diskId': o.id, 'sizeGB': o.size} for o in self.parent.disks if o!=self] self.parent.disks = [o for o in self.parent.disks if o!=self] self.parent.server.dirty = True return(clc.v2.Requests(clc.v2.API.Call('PATCH','servers/%s/%s' % (self.parent.server.alias,self.parent.server.id), json.dumps([{"op": "set", "member": "disks", "value": disk_set}]), session=self.session), alias=self.parent.server.alias, session=self.session))
[ "def", "Delete", "(", "self", ")", ":", "disk_set", "=", "[", "{", "'diskId'", ":", "o", ".", "id", ",", "'sizeGB'", ":", "o", ".", "size", "}", "for", "o", "in", "self", ".", "parent", ".", "disks", "if", "o", "!=", "self", "]", "self", ".", "parent", ".", "disks", "=", "[", "o", "for", "o", "in", "self", ".", "parent", ".", "disks", "if", "o", "!=", "self", "]", "self", ".", "parent", ".", "server", ".", "dirty", "=", "True", "return", "(", "clc", ".", "v2", ".", "Requests", "(", "clc", ".", "v2", ".", "API", ".", "Call", "(", "'PATCH'", ",", "'servers/%s/%s'", "%", "(", "self", ".", "parent", ".", "server", ".", "alias", ",", "self", ".", "parent", ".", "server", ".", "id", ")", ",", "json", ".", "dumps", "(", "[", "{", "\"op\"", ":", "\"set\"", ",", "\"member\"", ":", "\"disks\"", ",", "\"value\"", ":", "disk_set", "}", "]", ")", ",", "session", "=", "self", ".", "session", ")", ",", "alias", "=", "self", ".", "parent", ".", "server", ".", "alias", ",", "session", "=", "self", ".", "session", ")", ")" ]
40.277778
30.055556
def available_state_for_gene(self, gene: Gene, state: State) -> Tuple[State, ...]: """ Return the state reachable from a given state for a particular gene. """ result: List[State] = [] active_multiplex: Tuple[Multiplex] = gene.active_multiplex(state) transition: Transition = self.find_transition(gene, active_multiplex) current_state: int = state[gene] done = set() for target_state in transition.states: target_state: int = self._state_after_transition(current_state, target_state) if target_state not in done: done.add(target_state) new_state: State = state.copy() new_state[gene] = target_state result.append(new_state) return tuple(result)
[ "def", "available_state_for_gene", "(", "self", ",", "gene", ":", "Gene", ",", "state", ":", "State", ")", "->", "Tuple", "[", "State", ",", "...", "]", ":", "result", ":", "List", "[", "State", "]", "=", "[", "]", "active_multiplex", ":", "Tuple", "[", "Multiplex", "]", "=", "gene", ".", "active_multiplex", "(", "state", ")", "transition", ":", "Transition", "=", "self", ".", "find_transition", "(", "gene", ",", "active_multiplex", ")", "current_state", ":", "int", "=", "state", "[", "gene", "]", "done", "=", "set", "(", ")", "for", "target_state", "in", "transition", ".", "states", ":", "target_state", ":", "int", "=", "self", ".", "_state_after_transition", "(", "current_state", ",", "target_state", ")", "if", "target_state", "not", "in", "done", ":", "done", ".", "add", "(", "target_state", ")", "new_state", ":", "State", "=", "state", ".", "copy", "(", ")", "new_state", "[", "gene", "]", "=", "target_state", "result", ".", "append", "(", "new_state", ")", "return", "tuple", "(", "result", ")" ]
52.133333
14.8
def _render(self, request, template=None, status=200, context={}, headers={}, prefix_template_path=True): """ Render a HTTP response. :param request: A django.http.HttpRequest instance. :param template: A string describing the path to a template. :param status: An integer describing the HTTP status code to respond with. :param context: A dictionary describing variables to populate the template with. :param headers: A dictionary describing HTTP headers. :param prefix_template_path: A boolean describing whether to prefix the template with the view's template path. Please note that ``template`` must not specify an extension, as one will be appended according to the request format. For example, a value of ``blog/posts/index`` would populate ``blog/posts/index.html`` for requests that query the resource's HTML representation. If no template that matches the request format exists at the given location, or if ``template`` is ``None``, Respite will attempt to serialize the template context automatically. You can change the way your models are serialized by defining ``serialize`` methods that return a dictionary:: class NuclearMissile(models.Model): serial_number = models.IntegerField() is_armed = models.BooleanField() launch_code = models.IntegerField() def serialize(self): return { 'serial_number': self.serial_number, 'is_armed': self.is_armed } If the request format is not supported by the view (as determined by the ``supported_formats`` property or a specific view's ``override_supported_formats`` decorator), this function will yield HTTP 406 Not Acceptable. """ format = self._get_format(request) # Render 406 Not Acceptable if the requested format isn't supported. if not format: return HttpResponse(status=406) if template: if prefix_template_path: template_path = '%s.%s' % (self.template_path + template, format.extension) else: template_path = '%s.%s' % (template, format.extension) try: response = render( request = request, template_name = template_path, dictionary = context, status = status, content_type = '%s; charset=%s' % (format.content_type, settings.DEFAULT_CHARSET) ) except TemplateDoesNotExist: try: response = HttpResponse( content = serializers.find(format)(context).serialize(request), content_type = '%s; charset=%s' % (format.content_type, settings.DEFAULT_CHARSET), status = status ) except serializers.UnknownSerializer: raise self.Error( 'No template exists at %(template_path)s, and no serializer found for %(format)s' % { 'template_path': template_path, 'format': format } ) else: response = HttpResponse( content = serializers.find(format)(context).serialize(request), content_type = '%s; charset=%s' % (format.content_type, settings.DEFAULT_CHARSET), status = status ) for header, value in headers.items(): response[header] = value return response
[ "def", "_render", "(", "self", ",", "request", ",", "template", "=", "None", ",", "status", "=", "200", ",", "context", "=", "{", "}", ",", "headers", "=", "{", "}", ",", "prefix_template_path", "=", "True", ")", ":", "format", "=", "self", ".", "_get_format", "(", "request", ")", "# Render 406 Not Acceptable if the requested format isn't supported.", "if", "not", "format", ":", "return", "HttpResponse", "(", "status", "=", "406", ")", "if", "template", ":", "if", "prefix_template_path", ":", "template_path", "=", "'%s.%s'", "%", "(", "self", ".", "template_path", "+", "template", ",", "format", ".", "extension", ")", "else", ":", "template_path", "=", "'%s.%s'", "%", "(", "template", ",", "format", ".", "extension", ")", "try", ":", "response", "=", "render", "(", "request", "=", "request", ",", "template_name", "=", "template_path", ",", "dictionary", "=", "context", ",", "status", "=", "status", ",", "content_type", "=", "'%s; charset=%s'", "%", "(", "format", ".", "content_type", ",", "settings", ".", "DEFAULT_CHARSET", ")", ")", "except", "TemplateDoesNotExist", ":", "try", ":", "response", "=", "HttpResponse", "(", "content", "=", "serializers", ".", "find", "(", "format", ")", "(", "context", ")", ".", "serialize", "(", "request", ")", ",", "content_type", "=", "'%s; charset=%s'", "%", "(", "format", ".", "content_type", ",", "settings", ".", "DEFAULT_CHARSET", ")", ",", "status", "=", "status", ")", "except", "serializers", ".", "UnknownSerializer", ":", "raise", "self", ".", "Error", "(", "'No template exists at %(template_path)s, and no serializer found for %(format)s'", "%", "{", "'template_path'", ":", "template_path", ",", "'format'", ":", "format", "}", ")", "else", ":", "response", "=", "HttpResponse", "(", "content", "=", "serializers", ".", "find", "(", "format", ")", "(", "context", ")", ".", "serialize", "(", "request", ")", ",", "content_type", "=", "'%s; charset=%s'", "%", "(", "format", ".", "content_type", ",", "settings", ".", "DEFAULT_CHARSET", ")", ",", "status", "=", "status", ")", "for", "header", ",", "value", "in", "headers", ".", "items", "(", ")", ":", "response", "[", "header", "]", "=", "value", "return", "response" ]
45.109756
26.963415
def addarchive(self, name): """ Add (i.e. copy) the contents of another tarball to this one. :param name: File path to the tar archive. :type name: unicode | str """ with tarfile.open(name, 'r') as st: for member in st.getmembers(): self.tarfile.addfile(member, st.extractfile(member.name))
[ "def", "addarchive", "(", "self", ",", "name", ")", ":", "with", "tarfile", ".", "open", "(", "name", ",", "'r'", ")", "as", "st", ":", "for", "member", "in", "st", ".", "getmembers", "(", ")", ":", "self", ".", "tarfile", ".", "addfile", "(", "member", ",", "st", ".", "extractfile", "(", "member", ".", "name", ")", ")" ]
35.8
13.6
def _load_compiled(self, file_path): """ Accepts a path to a compiled plugin and returns a module object. file_path: A string that represents a complete file path to a compiled plugin. """ name = os.path.splitext(os.path.split(file_path)[-1])[0] plugin_directory = os.sep.join(os.path.split(file_path)[0:-1]) compiled_directory = os.path.join(plugin_directory, '__pycache__') # Use glob to autocomplete the filename. compiled_file = glob.glob(os.path.join(compiled_directory, (name + '.*')))[0] plugin = imp.load_compiled(name, compiled_file) return plugin
[ "def", "_load_compiled", "(", "self", ",", "file_path", ")", ":", "name", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "split", "(", "file_path", ")", "[", "-", "1", "]", ")", "[", "0", "]", "plugin_directory", "=", "os", ".", "sep", ".", "join", "(", "os", ".", "path", ".", "split", "(", "file_path", ")", "[", "0", ":", "-", "1", "]", ")", "compiled_directory", "=", "os", ".", "path", ".", "join", "(", "plugin_directory", ",", "'__pycache__'", ")", "# Use glob to autocomplete the filename.", "compiled_file", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "compiled_directory", ",", "(", "name", "+", "'.*'", ")", ")", ")", "[", "0", "]", "plugin", "=", "imp", ".", "load_compiled", "(", "name", ",", "compiled_file", ")", "return", "plugin" ]
45.714286
22.428571
def start_server(self, datacenter_id, server_id): """ Starts the server. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param server_id: The unique ID of the server. :type server_id: ``str`` """ response = self._perform_request( url='/datacenters/%s/servers/%s/start' % ( datacenter_id, server_id), method='POST-ACTION') return response
[ "def", "start_server", "(", "self", ",", "datacenter_id", ",", "server_id", ")", ":", "response", "=", "self", ".", "_perform_request", "(", "url", "=", "'/datacenters/%s/servers/%s/start'", "%", "(", "datacenter_id", ",", "server_id", ")", ",", "method", "=", "'POST-ACTION'", ")", "return", "response" ]
28.444444
16.444444
def upload(self, href, vobject_item): """Upload a new or replace an existing item.""" if self.is_fake: return content = vobject_item.serialize() try: item = self.get(href) etesync_item = item.etesync_item etesync_item.content = content except api.exceptions.DoesNotExist: etesync_item = self.collection.get_content_class().create(self.collection, content) etesync_item.save() return self.get(href)
[ "def", "upload", "(", "self", ",", "href", ",", "vobject_item", ")", ":", "if", "self", ".", "is_fake", ":", "return", "content", "=", "vobject_item", ".", "serialize", "(", ")", "try", ":", "item", "=", "self", ".", "get", "(", "href", ")", "etesync_item", "=", "item", ".", "etesync_item", "etesync_item", ".", "content", "=", "content", "except", "api", ".", "exceptions", ".", "DoesNotExist", ":", "etesync_item", "=", "self", ".", "collection", ".", "get_content_class", "(", ")", ".", "create", "(", "self", ".", "collection", ",", "content", ")", "etesync_item", ".", "save", "(", ")", "return", "self", ".", "get", "(", "href", ")" ]
31.3125
17.875
def _check_stop(self, iters, elapsed_time, converged): """ Defines the stopping criterion. """ r_c = self.config['resources'] stop = False if converged==0: stop=True if r_c['maximum-iterations'] !='NA' and iters>= r_c['maximum-iterations']: stop = True if r_c['max-run-time'] != 'NA' and elapsed_time/60.>= r_c['max-run-time']: stop = True return stop
[ "def", "_check_stop", "(", "self", ",", "iters", ",", "elapsed_time", ",", "converged", ")", ":", "r_c", "=", "self", ".", "config", "[", "'resources'", "]", "stop", "=", "False", "if", "converged", "==", "0", ":", "stop", "=", "True", "if", "r_c", "[", "'maximum-iterations'", "]", "!=", "'NA'", "and", "iters", ">=", "r_c", "[", "'maximum-iterations'", "]", ":", "stop", "=", "True", "if", "r_c", "[", "'max-run-time'", "]", "!=", "'NA'", "and", "elapsed_time", "/", "60.", ">=", "r_c", "[", "'max-run-time'", "]", ":", "stop", "=", "True", "return", "stop" ]
30.333333
18.866667
def match(self, node, results=None): """ Does this pattern exactly match a node? Returns True if it matches, False if not. If results is not None, it must be a dict which will be updated with the nodes matching named subpatterns. Default implementation for non-wildcard patterns. """ if self.type is not None and node.type != self.type: return False if self.content is not None: r = None if results is not None: r = {} if not self._submatch(node, r): return False if r: results.update(r) if results is not None and self.name: results[self.name] = node return True
[ "def", "match", "(", "self", ",", "node", ",", "results", "=", "None", ")", ":", "if", "self", ".", "type", "is", "not", "None", "and", "node", ".", "type", "!=", "self", ".", "type", ":", "return", "False", "if", "self", ".", "content", "is", "not", "None", ":", "r", "=", "None", "if", "results", "is", "not", "None", ":", "r", "=", "{", "}", "if", "not", "self", ".", "_submatch", "(", "node", ",", "r", ")", ":", "return", "False", "if", "r", ":", "results", ".", "update", "(", "r", ")", "if", "results", "is", "not", "None", "and", "self", ".", "name", ":", "results", "[", "self", ".", "name", "]", "=", "node", "return", "True" ]
31.291667
14.791667
def set_remote_events(enable): ''' Set whether the server responds to events sent by other computers (such as AppleScripts) :param bool enable: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' system.set_remote_events On ''' state = __utils__['mac_utils.validate_enabled'](enable) cmd = 'systemsetup -setremoteappleevents {0}'.format(state) __utils__['mac_utils.execute_return_success'](cmd) return __utils__['mac_utils.confirm_updated']( state, get_remote_events, normalize_ret=True, )
[ "def", "set_remote_events", "(", "enable", ")", ":", "state", "=", "__utils__", "[", "'mac_utils.validate_enabled'", "]", "(", "enable", ")", "cmd", "=", "'systemsetup -setremoteappleevents {0}'", ".", "format", "(", "state", ")", "__utils__", "[", "'mac_utils.execute_return_success'", "]", "(", "cmd", ")", "return", "__utils__", "[", "'mac_utils.confirm_updated'", "]", "(", "state", ",", "get_remote_events", ",", "normalize_ret", "=", "True", ",", ")" ]
27.535714
25.821429
def check_secure(): """Check request, return False if using SSL or local connection.""" if this.request.is_secure(): return True # using SSL elif this.request.META['REMOTE_ADDR'] in [ 'localhost', '127.0.0.1', ]: return True # localhost raise MeteorError(403, 'Authentication refused without SSL.')
[ "def", "check_secure", "(", ")", ":", "if", "this", ".", "request", ".", "is_secure", "(", ")", ":", "return", "True", "# using SSL", "elif", "this", ".", "request", ".", "META", "[", "'REMOTE_ADDR'", "]", "in", "[", "'localhost'", ",", "'127.0.0.1'", ",", "]", ":", "return", "True", "# localhost", "raise", "MeteorError", "(", "403", ",", "'Authentication refused without SSL.'", ")" ]
38.7
12.6
def calc_login_v1(self): """Refresh the input log sequence for the different MA processes. Required derived parameters: |Nmb| |MA_Order| Required flux sequence: |QPIn| Updated log sequence: |LogIn| Example: Assume there are three response functions, involving one, two and three MA coefficients respectively: >>> from hydpy.models.arma import * >>> parameterstep() >>> derived.nmb(3) >>> derived.ma_order.shape = 3 >>> derived.ma_order = 1, 2, 3 >>> fluxes.qpin.shape = 3 >>> logs.login.shape = (3, 3) The "memory values" of the different MA processes are defined as follows (one row for each process): >>> logs.login = ((1.0, nan, nan), ... (2.0, 3.0, nan), ... (4.0, 5.0, 6.0)) These are the new inflow discharge portions to be included into the memories of the different processes: >>> fluxes.qpin = 7.0, 8.0, 9.0 Through applying method |calc_login_v1| all values already existing are shifted to the right ("into the past"). Values, which are no longer required due to the limited order or the different MA processes, are discarded. The new values are inserted in the first column: >>> model.calc_login_v1() >>> logs.login login([[7.0, nan, nan], [8.0, 2.0, nan], [9.0, 4.0, 5.0]]) """ der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess log = self.sequences.logs.fastaccess for idx in range(der.nmb): for jdx in range(der.ma_order[idx]-2, -1, -1): log.login[idx, jdx+1] = log.login[idx, jdx] for idx in range(der.nmb): log.login[idx, 0] = flu.qpin[idx]
[ "def", "calc_login_v1", "(", "self", ")", ":", "der", "=", "self", ".", "parameters", ".", "derived", ".", "fastaccess", "flu", "=", "self", ".", "sequences", ".", "fluxes", ".", "fastaccess", "log", "=", "self", ".", "sequences", ".", "logs", ".", "fastaccess", "for", "idx", "in", "range", "(", "der", ".", "nmb", ")", ":", "for", "jdx", "in", "range", "(", "der", ".", "ma_order", "[", "idx", "]", "-", "2", ",", "-", "1", ",", "-", "1", ")", ":", "log", ".", "login", "[", "idx", ",", "jdx", "+", "1", "]", "=", "log", ".", "login", "[", "idx", ",", "jdx", "]", "for", "idx", "in", "range", "(", "der", ".", "nmb", ")", ":", "log", ".", "login", "[", "idx", ",", "0", "]", "=", "flu", ".", "qpin", "[", "idx", "]" ]
30.965517
18.068966
def check(self, instance): """ Returns a dictionary that looks a lot like what's sent back by db.serverStatus() """ def total_seconds(td): """ Returns total seconds of a timedelta in a way that's safe for Python < 2.7 """ if hasattr(td, 'total_seconds'): return td.total_seconds() else: return (lag.microseconds + (lag.seconds + lag.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6 if 'server' not in instance: raise Exception("Missing 'server' in mongo config") # x.509 authentication ssl_params = { 'ssl': instance.get('ssl', None), 'ssl_keyfile': instance.get('ssl_keyfile', None), 'ssl_certfile': instance.get('ssl_certfile', None), 'ssl_cert_reqs': instance.get('ssl_cert_reqs', None), 'ssl_ca_certs': instance.get('ssl_ca_certs', None), } for key, param in list(iteritems(ssl_params)): if param is None: del ssl_params[key] server = instance['server'] username, password, db_name, nodelist, clean_server_name, auth_source = self._parse_uri( server, sanitize_username=bool(ssl_params) ) additional_metrics = instance.get('additional_metrics', []) # Get the list of metrics to collect collect_tcmalloc_metrics = 'tcmalloc' in additional_metrics metrics_to_collect = self._get_metrics_to_collect(server, additional_metrics) # Tagging tags = instance.get('tags', []) # ...de-dupe tags to avoid a memory leak tags = list(set(tags)) if not db_name: self.log.info('No MongoDB database found in URI. Defaulting to admin.') db_name = 'admin' service_check_tags = ["db:%s" % db_name] service_check_tags.extend(tags) # ...add the `server` tag to the metrics' tags only # (it's added in the backend for service checks) tags.append('server:%s' % clean_server_name) if nodelist: host = nodelist[0][0] port = nodelist[0][1] service_check_tags = service_check_tags + ["host:%s" % host, "port:%s" % port] timeout = float(instance.get('timeout', DEFAULT_TIMEOUT)) * 1000 try: cli = pymongo.mongo_client.MongoClient( server, socketTimeoutMS=timeout, connectTimeoutMS=timeout, serverSelectionTimeoutMS=timeout, read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED, **ssl_params ) # some commands can only go against the admin DB admindb = cli['admin'] db = cli[db_name] except Exception: self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags) raise # Authenticate do_auth = True use_x509 = ssl_params and not password if not username: self.log.debug(u"A username is required to authenticate to `%s`", server) do_auth = False if do_auth: if auth_source: msg = "authSource was specified in the the server URL: using '%s' as the authentication database" self.log.info(msg, auth_source) self._authenticate( cli[auth_source], username, password, use_x509, clean_server_name, service_check_tags ) else: self._authenticate(db, username, password, use_x509, clean_server_name, service_check_tags) try: status = db.command('serverStatus', tcmalloc=collect_tcmalloc_metrics) except Exception: self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags) raise else: self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=service_check_tags) if status['ok'] == 0: raise Exception(status['errmsg'].__str__()) ops = db.current_op() status['fsyncLocked'] = 1 if ops.get('fsyncLock') else 0 status['stats'] = db.command('dbstats') dbstats = {db_name: {'stats': status['stats']}} # Handle replica data, if any # See # http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus # noqa if is_affirmative(instance.get('replica_check', True)): try: data = {} replSet = admindb.command('replSetGetStatus') if replSet: primary = None current = None # need a new connection to deal with replica sets setname = replSet.get('set') cli_rs = pymongo.mongo_client.MongoClient( server, socketTimeoutMS=timeout, connectTimeoutMS=timeout, serverSelectionTimeoutMS=timeout, replicaset=setname, read_preference=pymongo.ReadPreference.NEAREST, **ssl_params ) if do_auth: if auth_source: self._authenticate( cli_rs[auth_source], username, password, use_x509, server, service_check_tags ) else: self._authenticate( cli_rs[db_name], username, password, use_x509, server, service_check_tags ) # Replication set information replset_name = replSet['set'] replset_state = self.get_state_name(replSet['myState']).lower() tags.extend([u"replset_name:{0}".format(replset_name), u"replset_state:{0}".format(replset_state)]) # Find nodes: master and current node (ourself) for member in replSet.get('members'): if member.get('self'): current = member if int(member.get('state')) == 1: primary = member # Compute a lag time if current is not None and primary is not None: if 'optimeDate' in primary and 'optimeDate' in current: lag = primary['optimeDate'] - current['optimeDate'] data['replicationLag'] = total_seconds(lag) if current is not None: data['health'] = current['health'] data['state'] = replSet['myState'] if current is not None: total = 0.0 cfg = cli_rs['local']['system.replset'].find_one() for member in cfg.get('members'): total += member.get('votes', 1) if member['_id'] == current['_id']: data['votes'] = member.get('votes', 1) data['voteFraction'] = data['votes'] / total status['replSet'] = data # Submit events self._report_replica_set_state(data['state'], clean_server_name, replset_name) except Exception as e: if "OperationFailure" in repr(e) and ( "not running with --replSet" in str(e) or "replSetGetStatus" in str(e) ): pass else: raise e # If these keys exist, remove them for now as they cannot be serialized try: status['backgroundFlushing'].pop('last_finished') except KeyError: pass try: status.pop('localTime') except KeyError: pass dbnames = cli.database_names() self.gauge('mongodb.dbs', len(dbnames), tags=tags) for db_n in dbnames: db_aux = cli[db_n] dbstats[db_n] = {'stats': db_aux.command('dbstats')} # Go through the metrics and save the values for metric_name in metrics_to_collect: # each metric is of the form: x.y.z with z optional # and can be found at status[x][y][z] value = status if metric_name.startswith('stats'): continue else: try: for c in metric_name.split("."): value = value[c] except KeyError: continue # value is now status[x][y][z] if not isinstance(value, (int, long, float)): raise TypeError( u"{0} value is a {1}, it should be an int, a float or a long instead.".format( metric_name, type(value) ) ) # Submit the metric submit_method, metric_name_alias = self._resolve_metric(metric_name, metrics_to_collect) submit_method(self, metric_name_alias, value, tags=tags) for st, value in iteritems(dbstats): for metric_name in metrics_to_collect: if not metric_name.startswith('stats.'): continue try: val = value['stats'][metric_name.split('.')[1]] except KeyError: continue # value is now status[x][y][z] if not isinstance(val, (int, long, float)): raise TypeError( u"{0} value is a {1}, it should be an int, a float or a long instead.".format( metric_name, type(val) ) ) # Submit the metric metrics_tags = tags + [ u"cluster:db:{0}".format(st), # FIXME 6.0 - keep for backward compatibility u"db:{0}".format(st), ] submit_method, metric_name_alias = self._resolve_metric(metric_name, metrics_to_collect) submit_method(self, metric_name_alias, val, tags=metrics_tags) if is_affirmative(instance.get('collections_indexes_stats')): mongo_version = cli.server_info().get('version', '0.0') if LooseVersion(mongo_version) >= LooseVersion("3.2"): self._collect_indexes_stats(instance, db, tags) else: msg = "'collections_indexes_stats' is only available starting from mongo 3.2: your mongo version is %s" self.log.error(msg, mongo_version) # Report the usage metrics for dbs/collections if 'top' in additional_metrics: try: dbtop = admindb.command('top') for ns, ns_metrics in iteritems(dbtop['totals']): if "." not in ns: continue # configure tags for db name and collection name dbname, collname = ns.split(".", 1) ns_tags = tags + ["db:%s" % dbname, "collection:%s" % collname] # iterate over DBTOP metrics for m in self.TOP_METRICS: # each metric is of the form: x.y.z with z optional # and can be found at ns_metrics[x][y][z] value = ns_metrics try: for c in m.split("."): value = value[c] except Exception: continue # value is now status[x][y][z] if not isinstance(value, (int, long, float)): raise TypeError( u"{0} value is a {1}, it should be an int, a float or a long instead.".format( m, type(value) ) ) # Submit the metric submit_method, metric_name_alias = self._resolve_metric(m, metrics_to_collect, prefix="usage") submit_method(self, metric_name_alias, value, tags=ns_tags) # Keep old incorrect metric if metric_name_alias.endswith('countps'): GAUGE(self, metric_name_alias[:-2], value, tags=ns_tags) except Exception as e: self.log.warning('Failed to record `top` metrics %s' % str(e)) if 'local' in dbnames: # it might not be if we are connectiing through mongos # Fetch information analogous to Mongo's db.getReplicationInfo() localdb = cli['local'] oplog_data = {} for ol_collection_name in ("oplog.rs", "oplog.$main"): ol_options = localdb[ol_collection_name].options() if ol_options: break if ol_options: try: oplog_data['logSizeMB'] = round_value(ol_options['size'] / 2.0 ** 20, 2) oplog = localdb[ol_collection_name] oplog_data['usedSizeMB'] = round_value( localdb.command("collstats", ol_collection_name)['size'] / 2.0 ** 20, 2 ) op_asc_cursor = oplog.find({"ts": {"$exists": 1}}).sort("$natural", pymongo.ASCENDING).limit(1) op_dsc_cursor = oplog.find({"ts": {"$exists": 1}}).sort("$natural", pymongo.DESCENDING).limit(1) try: first_timestamp = op_asc_cursor[0]['ts'].as_datetime() last_timestamp = op_dsc_cursor[0]['ts'].as_datetime() oplog_data['timeDiff'] = total_seconds(last_timestamp - first_timestamp) except (IndexError, KeyError): # if the oplog collection doesn't have any entries # if an object in the collection doesn't have a ts value, we ignore it pass except KeyError: # encountered an error trying to access options.size for the oplog collection self.log.warning(u"Failed to record `ReplicationInfo` metrics.") for m, value in iteritems(oplog_data): submit_method, metric_name_alias = self._resolve_metric('oplog.%s' % m, metrics_to_collect) submit_method(self, metric_name_alias, value, tags=tags) else: self.log.debug('"local" database not in dbnames. Not collecting ReplicationInfo metrics') # get collection level stats try: # Ensure that you're on the right db db = cli[db_name] # grab the collections from the configutation coll_names = instance.get('collections', []) # loop through the collections for coll_name in coll_names: # grab the stats from the collection stats = db.command("collstats", coll_name) # loop through the metrics for m in self.collection_metrics_names: coll_tags = tags + ["db:%s" % db_name, "collection:%s" % coll_name] value = stats.get(m, None) if not value: continue # if it's the index sizes, then it's a dict. if m == 'indexSizes': submit_method, metric_name_alias = self._resolve_metric( 'collection.%s' % m, self.COLLECTION_METRICS ) # loop through the indexes for idx, val in iteritems(value): # we tag the index idx_tags = coll_tags + ["index:%s" % idx] submit_method(self, metric_name_alias, val, tags=idx_tags) else: submit_method, metric_name_alias = self._resolve_metric( 'collection.%s' % m, self.COLLECTION_METRICS ) submit_method(self, metric_name_alias, value, tags=coll_tags) except Exception as e: self.log.warning(u"Failed to record `collection` metrics.") self.log.exception(e)
[ "def", "check", "(", "self", ",", "instance", ")", ":", "def", "total_seconds", "(", "td", ")", ":", "\"\"\"\n Returns total seconds of a timedelta in a way that's safe for\n Python < 2.7\n \"\"\"", "if", "hasattr", "(", "td", ",", "'total_seconds'", ")", ":", "return", "td", ".", "total_seconds", "(", ")", "else", ":", "return", "(", "lag", ".", "microseconds", "+", "(", "lag", ".", "seconds", "+", "lag", ".", "days", "*", "24", "*", "3600", ")", "*", "10", "**", "6", ")", "/", "10.0", "**", "6", "if", "'server'", "not", "in", "instance", ":", "raise", "Exception", "(", "\"Missing 'server' in mongo config\"", ")", "# x.509 authentication", "ssl_params", "=", "{", "'ssl'", ":", "instance", ".", "get", "(", "'ssl'", ",", "None", ")", ",", "'ssl_keyfile'", ":", "instance", ".", "get", "(", "'ssl_keyfile'", ",", "None", ")", ",", "'ssl_certfile'", ":", "instance", ".", "get", "(", "'ssl_certfile'", ",", "None", ")", ",", "'ssl_cert_reqs'", ":", "instance", ".", "get", "(", "'ssl_cert_reqs'", ",", "None", ")", ",", "'ssl_ca_certs'", ":", "instance", ".", "get", "(", "'ssl_ca_certs'", ",", "None", ")", ",", "}", "for", "key", ",", "param", "in", "list", "(", "iteritems", "(", "ssl_params", ")", ")", ":", "if", "param", "is", "None", ":", "del", "ssl_params", "[", "key", "]", "server", "=", "instance", "[", "'server'", "]", "username", ",", "password", ",", "db_name", ",", "nodelist", ",", "clean_server_name", ",", "auth_source", "=", "self", ".", "_parse_uri", "(", "server", ",", "sanitize_username", "=", "bool", "(", "ssl_params", ")", ")", "additional_metrics", "=", "instance", ".", "get", "(", "'additional_metrics'", ",", "[", "]", ")", "# Get the list of metrics to collect", "collect_tcmalloc_metrics", "=", "'tcmalloc'", "in", "additional_metrics", "metrics_to_collect", "=", "self", ".", "_get_metrics_to_collect", "(", "server", ",", "additional_metrics", ")", "# Tagging", "tags", "=", "instance", ".", "get", "(", "'tags'", ",", "[", "]", ")", "# ...de-dupe tags to avoid a memory leak", "tags", "=", "list", "(", "set", "(", "tags", ")", ")", "if", "not", "db_name", ":", "self", ".", "log", ".", "info", "(", "'No MongoDB database found in URI. Defaulting to admin.'", ")", "db_name", "=", "'admin'", "service_check_tags", "=", "[", "\"db:%s\"", "%", "db_name", "]", "service_check_tags", ".", "extend", "(", "tags", ")", "# ...add the `server` tag to the metrics' tags only", "# (it's added in the backend for service checks)", "tags", ".", "append", "(", "'server:%s'", "%", "clean_server_name", ")", "if", "nodelist", ":", "host", "=", "nodelist", "[", "0", "]", "[", "0", "]", "port", "=", "nodelist", "[", "0", "]", "[", "1", "]", "service_check_tags", "=", "service_check_tags", "+", "[", "\"host:%s\"", "%", "host", ",", "\"port:%s\"", "%", "port", "]", "timeout", "=", "float", "(", "instance", ".", "get", "(", "'timeout'", ",", "DEFAULT_TIMEOUT", ")", ")", "*", "1000", "try", ":", "cli", "=", "pymongo", ".", "mongo_client", ".", "MongoClient", "(", "server", ",", "socketTimeoutMS", "=", "timeout", ",", "connectTimeoutMS", "=", "timeout", ",", "serverSelectionTimeoutMS", "=", "timeout", ",", "read_preference", "=", "pymongo", ".", "ReadPreference", ".", "PRIMARY_PREFERRED", ",", "*", "*", "ssl_params", ")", "# some commands can only go against the admin DB", "admindb", "=", "cli", "[", "'admin'", "]", "db", "=", "cli", "[", "db_name", "]", "except", "Exception", ":", "self", ".", "service_check", "(", "self", ".", "SERVICE_CHECK_NAME", ",", "AgentCheck", ".", "CRITICAL", ",", "tags", "=", "service_check_tags", ")", "raise", "# Authenticate", "do_auth", "=", "True", "use_x509", "=", "ssl_params", "and", "not", "password", "if", "not", "username", ":", "self", ".", "log", ".", "debug", "(", "u\"A username is required to authenticate to `%s`\"", ",", "server", ")", "do_auth", "=", "False", "if", "do_auth", ":", "if", "auth_source", ":", "msg", "=", "\"authSource was specified in the the server URL: using '%s' as the authentication database\"", "self", ".", "log", ".", "info", "(", "msg", ",", "auth_source", ")", "self", ".", "_authenticate", "(", "cli", "[", "auth_source", "]", ",", "username", ",", "password", ",", "use_x509", ",", "clean_server_name", ",", "service_check_tags", ")", "else", ":", "self", ".", "_authenticate", "(", "db", ",", "username", ",", "password", ",", "use_x509", ",", "clean_server_name", ",", "service_check_tags", ")", "try", ":", "status", "=", "db", ".", "command", "(", "'serverStatus'", ",", "tcmalloc", "=", "collect_tcmalloc_metrics", ")", "except", "Exception", ":", "self", ".", "service_check", "(", "self", ".", "SERVICE_CHECK_NAME", ",", "AgentCheck", ".", "CRITICAL", ",", "tags", "=", "service_check_tags", ")", "raise", "else", ":", "self", ".", "service_check", "(", "self", ".", "SERVICE_CHECK_NAME", ",", "AgentCheck", ".", "OK", ",", "tags", "=", "service_check_tags", ")", "if", "status", "[", "'ok'", "]", "==", "0", ":", "raise", "Exception", "(", "status", "[", "'errmsg'", "]", ".", "__str__", "(", ")", ")", "ops", "=", "db", ".", "current_op", "(", ")", "status", "[", "'fsyncLocked'", "]", "=", "1", "if", "ops", ".", "get", "(", "'fsyncLock'", ")", "else", "0", "status", "[", "'stats'", "]", "=", "db", ".", "command", "(", "'dbstats'", ")", "dbstats", "=", "{", "db_name", ":", "{", "'stats'", ":", "status", "[", "'stats'", "]", "}", "}", "# Handle replica data, if any", "# See", "# http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus # noqa", "if", "is_affirmative", "(", "instance", ".", "get", "(", "'replica_check'", ",", "True", ")", ")", ":", "try", ":", "data", "=", "{", "}", "replSet", "=", "admindb", ".", "command", "(", "'replSetGetStatus'", ")", "if", "replSet", ":", "primary", "=", "None", "current", "=", "None", "# need a new connection to deal with replica sets", "setname", "=", "replSet", ".", "get", "(", "'set'", ")", "cli_rs", "=", "pymongo", ".", "mongo_client", ".", "MongoClient", "(", "server", ",", "socketTimeoutMS", "=", "timeout", ",", "connectTimeoutMS", "=", "timeout", ",", "serverSelectionTimeoutMS", "=", "timeout", ",", "replicaset", "=", "setname", ",", "read_preference", "=", "pymongo", ".", "ReadPreference", ".", "NEAREST", ",", "*", "*", "ssl_params", ")", "if", "do_auth", ":", "if", "auth_source", ":", "self", ".", "_authenticate", "(", "cli_rs", "[", "auth_source", "]", ",", "username", ",", "password", ",", "use_x509", ",", "server", ",", "service_check_tags", ")", "else", ":", "self", ".", "_authenticate", "(", "cli_rs", "[", "db_name", "]", ",", "username", ",", "password", ",", "use_x509", ",", "server", ",", "service_check_tags", ")", "# Replication set information", "replset_name", "=", "replSet", "[", "'set'", "]", "replset_state", "=", "self", ".", "get_state_name", "(", "replSet", "[", "'myState'", "]", ")", ".", "lower", "(", ")", "tags", ".", "extend", "(", "[", "u\"replset_name:{0}\"", ".", "format", "(", "replset_name", ")", ",", "u\"replset_state:{0}\"", ".", "format", "(", "replset_state", ")", "]", ")", "# Find nodes: master and current node (ourself)", "for", "member", "in", "replSet", ".", "get", "(", "'members'", ")", ":", "if", "member", ".", "get", "(", "'self'", ")", ":", "current", "=", "member", "if", "int", "(", "member", ".", "get", "(", "'state'", ")", ")", "==", "1", ":", "primary", "=", "member", "# Compute a lag time", "if", "current", "is", "not", "None", "and", "primary", "is", "not", "None", ":", "if", "'optimeDate'", "in", "primary", "and", "'optimeDate'", "in", "current", ":", "lag", "=", "primary", "[", "'optimeDate'", "]", "-", "current", "[", "'optimeDate'", "]", "data", "[", "'replicationLag'", "]", "=", "total_seconds", "(", "lag", ")", "if", "current", "is", "not", "None", ":", "data", "[", "'health'", "]", "=", "current", "[", "'health'", "]", "data", "[", "'state'", "]", "=", "replSet", "[", "'myState'", "]", "if", "current", "is", "not", "None", ":", "total", "=", "0.0", "cfg", "=", "cli_rs", "[", "'local'", "]", "[", "'system.replset'", "]", ".", "find_one", "(", ")", "for", "member", "in", "cfg", ".", "get", "(", "'members'", ")", ":", "total", "+=", "member", ".", "get", "(", "'votes'", ",", "1", ")", "if", "member", "[", "'_id'", "]", "==", "current", "[", "'_id'", "]", ":", "data", "[", "'votes'", "]", "=", "member", ".", "get", "(", "'votes'", ",", "1", ")", "data", "[", "'voteFraction'", "]", "=", "data", "[", "'votes'", "]", "/", "total", "status", "[", "'replSet'", "]", "=", "data", "# Submit events", "self", ".", "_report_replica_set_state", "(", "data", "[", "'state'", "]", ",", "clean_server_name", ",", "replset_name", ")", "except", "Exception", "as", "e", ":", "if", "\"OperationFailure\"", "in", "repr", "(", "e", ")", "and", "(", "\"not running with --replSet\"", "in", "str", "(", "e", ")", "or", "\"replSetGetStatus\"", "in", "str", "(", "e", ")", ")", ":", "pass", "else", ":", "raise", "e", "# If these keys exist, remove them for now as they cannot be serialized", "try", ":", "status", "[", "'backgroundFlushing'", "]", ".", "pop", "(", "'last_finished'", ")", "except", "KeyError", ":", "pass", "try", ":", "status", ".", "pop", "(", "'localTime'", ")", "except", "KeyError", ":", "pass", "dbnames", "=", "cli", ".", "database_names", "(", ")", "self", ".", "gauge", "(", "'mongodb.dbs'", ",", "len", "(", "dbnames", ")", ",", "tags", "=", "tags", ")", "for", "db_n", "in", "dbnames", ":", "db_aux", "=", "cli", "[", "db_n", "]", "dbstats", "[", "db_n", "]", "=", "{", "'stats'", ":", "db_aux", ".", "command", "(", "'dbstats'", ")", "}", "# Go through the metrics and save the values", "for", "metric_name", "in", "metrics_to_collect", ":", "# each metric is of the form: x.y.z with z optional", "# and can be found at status[x][y][z]", "value", "=", "status", "if", "metric_name", ".", "startswith", "(", "'stats'", ")", ":", "continue", "else", ":", "try", ":", "for", "c", "in", "metric_name", ".", "split", "(", "\".\"", ")", ":", "value", "=", "value", "[", "c", "]", "except", "KeyError", ":", "continue", "# value is now status[x][y][z]", "if", "not", "isinstance", "(", "value", ",", "(", "int", ",", "long", ",", "float", ")", ")", ":", "raise", "TypeError", "(", "u\"{0} value is a {1}, it should be an int, a float or a long instead.\"", ".", "format", "(", "metric_name", ",", "type", "(", "value", ")", ")", ")", "# Submit the metric", "submit_method", ",", "metric_name_alias", "=", "self", ".", "_resolve_metric", "(", "metric_name", ",", "metrics_to_collect", ")", "submit_method", "(", "self", ",", "metric_name_alias", ",", "value", ",", "tags", "=", "tags", ")", "for", "st", ",", "value", "in", "iteritems", "(", "dbstats", ")", ":", "for", "metric_name", "in", "metrics_to_collect", ":", "if", "not", "metric_name", ".", "startswith", "(", "'stats.'", ")", ":", "continue", "try", ":", "val", "=", "value", "[", "'stats'", "]", "[", "metric_name", ".", "split", "(", "'.'", ")", "[", "1", "]", "]", "except", "KeyError", ":", "continue", "# value is now status[x][y][z]", "if", "not", "isinstance", "(", "val", ",", "(", "int", ",", "long", ",", "float", ")", ")", ":", "raise", "TypeError", "(", "u\"{0} value is a {1}, it should be an int, a float or a long instead.\"", ".", "format", "(", "metric_name", ",", "type", "(", "val", ")", ")", ")", "# Submit the metric", "metrics_tags", "=", "tags", "+", "[", "u\"cluster:db:{0}\"", ".", "format", "(", "st", ")", ",", "# FIXME 6.0 - keep for backward compatibility", "u\"db:{0}\"", ".", "format", "(", "st", ")", ",", "]", "submit_method", ",", "metric_name_alias", "=", "self", ".", "_resolve_metric", "(", "metric_name", ",", "metrics_to_collect", ")", "submit_method", "(", "self", ",", "metric_name_alias", ",", "val", ",", "tags", "=", "metrics_tags", ")", "if", "is_affirmative", "(", "instance", ".", "get", "(", "'collections_indexes_stats'", ")", ")", ":", "mongo_version", "=", "cli", ".", "server_info", "(", ")", ".", "get", "(", "'version'", ",", "'0.0'", ")", "if", "LooseVersion", "(", "mongo_version", ")", ">=", "LooseVersion", "(", "\"3.2\"", ")", ":", "self", ".", "_collect_indexes_stats", "(", "instance", ",", "db", ",", "tags", ")", "else", ":", "msg", "=", "\"'collections_indexes_stats' is only available starting from mongo 3.2: your mongo version is %s\"", "self", ".", "log", ".", "error", "(", "msg", ",", "mongo_version", ")", "# Report the usage metrics for dbs/collections", "if", "'top'", "in", "additional_metrics", ":", "try", ":", "dbtop", "=", "admindb", ".", "command", "(", "'top'", ")", "for", "ns", ",", "ns_metrics", "in", "iteritems", "(", "dbtop", "[", "'totals'", "]", ")", ":", "if", "\".\"", "not", "in", "ns", ":", "continue", "# configure tags for db name and collection name", "dbname", ",", "collname", "=", "ns", ".", "split", "(", "\".\"", ",", "1", ")", "ns_tags", "=", "tags", "+", "[", "\"db:%s\"", "%", "dbname", ",", "\"collection:%s\"", "%", "collname", "]", "# iterate over DBTOP metrics", "for", "m", "in", "self", ".", "TOP_METRICS", ":", "# each metric is of the form: x.y.z with z optional", "# and can be found at ns_metrics[x][y][z]", "value", "=", "ns_metrics", "try", ":", "for", "c", "in", "m", ".", "split", "(", "\".\"", ")", ":", "value", "=", "value", "[", "c", "]", "except", "Exception", ":", "continue", "# value is now status[x][y][z]", "if", "not", "isinstance", "(", "value", ",", "(", "int", ",", "long", ",", "float", ")", ")", ":", "raise", "TypeError", "(", "u\"{0} value is a {1}, it should be an int, a float or a long instead.\"", ".", "format", "(", "m", ",", "type", "(", "value", ")", ")", ")", "# Submit the metric", "submit_method", ",", "metric_name_alias", "=", "self", ".", "_resolve_metric", "(", "m", ",", "metrics_to_collect", ",", "prefix", "=", "\"usage\"", ")", "submit_method", "(", "self", ",", "metric_name_alias", ",", "value", ",", "tags", "=", "ns_tags", ")", "# Keep old incorrect metric", "if", "metric_name_alias", ".", "endswith", "(", "'countps'", ")", ":", "GAUGE", "(", "self", ",", "metric_name_alias", "[", ":", "-", "2", "]", ",", "value", ",", "tags", "=", "ns_tags", ")", "except", "Exception", "as", "e", ":", "self", ".", "log", ".", "warning", "(", "'Failed to record `top` metrics %s'", "%", "str", "(", "e", ")", ")", "if", "'local'", "in", "dbnames", ":", "# it might not be if we are connectiing through mongos", "# Fetch information analogous to Mongo's db.getReplicationInfo()", "localdb", "=", "cli", "[", "'local'", "]", "oplog_data", "=", "{", "}", "for", "ol_collection_name", "in", "(", "\"oplog.rs\"", ",", "\"oplog.$main\"", ")", ":", "ol_options", "=", "localdb", "[", "ol_collection_name", "]", ".", "options", "(", ")", "if", "ol_options", ":", "break", "if", "ol_options", ":", "try", ":", "oplog_data", "[", "'logSizeMB'", "]", "=", "round_value", "(", "ol_options", "[", "'size'", "]", "/", "2.0", "**", "20", ",", "2", ")", "oplog", "=", "localdb", "[", "ol_collection_name", "]", "oplog_data", "[", "'usedSizeMB'", "]", "=", "round_value", "(", "localdb", ".", "command", "(", "\"collstats\"", ",", "ol_collection_name", ")", "[", "'size'", "]", "/", "2.0", "**", "20", ",", "2", ")", "op_asc_cursor", "=", "oplog", ".", "find", "(", "{", "\"ts\"", ":", "{", "\"$exists\"", ":", "1", "}", "}", ")", ".", "sort", "(", "\"$natural\"", ",", "pymongo", ".", "ASCENDING", ")", ".", "limit", "(", "1", ")", "op_dsc_cursor", "=", "oplog", ".", "find", "(", "{", "\"ts\"", ":", "{", "\"$exists\"", ":", "1", "}", "}", ")", ".", "sort", "(", "\"$natural\"", ",", "pymongo", ".", "DESCENDING", ")", ".", "limit", "(", "1", ")", "try", ":", "first_timestamp", "=", "op_asc_cursor", "[", "0", "]", "[", "'ts'", "]", ".", "as_datetime", "(", ")", "last_timestamp", "=", "op_dsc_cursor", "[", "0", "]", "[", "'ts'", "]", ".", "as_datetime", "(", ")", "oplog_data", "[", "'timeDiff'", "]", "=", "total_seconds", "(", "last_timestamp", "-", "first_timestamp", ")", "except", "(", "IndexError", ",", "KeyError", ")", ":", "# if the oplog collection doesn't have any entries", "# if an object in the collection doesn't have a ts value, we ignore it", "pass", "except", "KeyError", ":", "# encountered an error trying to access options.size for the oplog collection", "self", ".", "log", ".", "warning", "(", "u\"Failed to record `ReplicationInfo` metrics.\"", ")", "for", "m", ",", "value", "in", "iteritems", "(", "oplog_data", ")", ":", "submit_method", ",", "metric_name_alias", "=", "self", ".", "_resolve_metric", "(", "'oplog.%s'", "%", "m", ",", "metrics_to_collect", ")", "submit_method", "(", "self", ",", "metric_name_alias", ",", "value", ",", "tags", "=", "tags", ")", "else", ":", "self", ".", "log", ".", "debug", "(", "'\"local\" database not in dbnames. Not collecting ReplicationInfo metrics'", ")", "# get collection level stats", "try", ":", "# Ensure that you're on the right db", "db", "=", "cli", "[", "db_name", "]", "# grab the collections from the configutation", "coll_names", "=", "instance", ".", "get", "(", "'collections'", ",", "[", "]", ")", "# loop through the collections", "for", "coll_name", "in", "coll_names", ":", "# grab the stats from the collection", "stats", "=", "db", ".", "command", "(", "\"collstats\"", ",", "coll_name", ")", "# loop through the metrics", "for", "m", "in", "self", ".", "collection_metrics_names", ":", "coll_tags", "=", "tags", "+", "[", "\"db:%s\"", "%", "db_name", ",", "\"collection:%s\"", "%", "coll_name", "]", "value", "=", "stats", ".", "get", "(", "m", ",", "None", ")", "if", "not", "value", ":", "continue", "# if it's the index sizes, then it's a dict.", "if", "m", "==", "'indexSizes'", ":", "submit_method", ",", "metric_name_alias", "=", "self", ".", "_resolve_metric", "(", "'collection.%s'", "%", "m", ",", "self", ".", "COLLECTION_METRICS", ")", "# loop through the indexes", "for", "idx", ",", "val", "in", "iteritems", "(", "value", ")", ":", "# we tag the index", "idx_tags", "=", "coll_tags", "+", "[", "\"index:%s\"", "%", "idx", "]", "submit_method", "(", "self", ",", "metric_name_alias", ",", "val", ",", "tags", "=", "idx_tags", ")", "else", ":", "submit_method", ",", "metric_name_alias", "=", "self", ".", "_resolve_metric", "(", "'collection.%s'", "%", "m", ",", "self", ".", "COLLECTION_METRICS", ")", "submit_method", "(", "self", ",", "metric_name_alias", ",", "value", ",", "tags", "=", "coll_tags", ")", "except", "Exception", "as", "e", ":", "self", ".", "log", ".", "warning", "(", "u\"Failed to record `collection` metrics.\"", ")", "self", ".", "log", ".", "exception", "(", "e", ")" ]
41.86514
24.145038
def sympy_to_py(func, args): """ Turn a symbolic expression into a Python lambda function, which has the names of the variables and parameters as it's argument names. :param func: sympy expression :param args: variables and parameters in this model :return: lambda function to be used for numerical evaluation of the model. """ # replace the derivatives with printable variables. derivatives = {var: Variable(var.name) for var in args if isinstance(var, sympy.Derivative)} func = func.xreplace(derivatives) args = [derivatives[var] if isinstance(var, sympy.Derivative) else var for var in args] lambdafunc = lambdify(args, func, printer=SymfitNumPyPrinter, dummify=False) # Check if the names of the lambda function are what we expect signature = inspect_sig.signature(lambdafunc) sig_parameters = OrderedDict(signature.parameters) for arg, lambda_arg in zip(args, sig_parameters): if arg.name != lambda_arg: break else: # Lambdifying succesful! return lambdafunc # If we are here (very rare), then one of the lambda arg is still a Dummy. # In this case we will manually handle the naming. lambda_names = sig_parameters.keys() arg_names = [arg.name for arg in args] conversion = dict(zip(arg_names, lambda_names)) # Wrap the lambda such that arg names are translated into the correct dummy # symbol names @wraps(lambdafunc) def wrapped_lambdafunc(*ordered_args, **kwargs): converted_kwargs = {conversion[k]: v for k, v in kwargs.items()} return lambdafunc(*ordered_args, **converted_kwargs) # Update the signature of wrapped_lambdafunc to math our args new_sig_parameters = OrderedDict() for arg_name, dummy_name in conversion.items(): if arg_name == dummy_name: # Already has the correct name new_sig_parameters[arg_name] = sig_parameters[arg_name] else: # Change the dummy inspect.Parameter to the correct name param = sig_parameters[dummy_name] param = param.replace(name=arg_name) new_sig_parameters[arg_name] = param wrapped_lambdafunc.__signature__ = signature.replace( parameters=new_sig_parameters.values() ) return wrapped_lambdafunc
[ "def", "sympy_to_py", "(", "func", ",", "args", ")", ":", "# replace the derivatives with printable variables.", "derivatives", "=", "{", "var", ":", "Variable", "(", "var", ".", "name", ")", "for", "var", "in", "args", "if", "isinstance", "(", "var", ",", "sympy", ".", "Derivative", ")", "}", "func", "=", "func", ".", "xreplace", "(", "derivatives", ")", "args", "=", "[", "derivatives", "[", "var", "]", "if", "isinstance", "(", "var", ",", "sympy", ".", "Derivative", ")", "else", "var", "for", "var", "in", "args", "]", "lambdafunc", "=", "lambdify", "(", "args", ",", "func", ",", "printer", "=", "SymfitNumPyPrinter", ",", "dummify", "=", "False", ")", "# Check if the names of the lambda function are what we expect", "signature", "=", "inspect_sig", ".", "signature", "(", "lambdafunc", ")", "sig_parameters", "=", "OrderedDict", "(", "signature", ".", "parameters", ")", "for", "arg", ",", "lambda_arg", "in", "zip", "(", "args", ",", "sig_parameters", ")", ":", "if", "arg", ".", "name", "!=", "lambda_arg", ":", "break", "else", ":", "# Lambdifying succesful!", "return", "lambdafunc", "# If we are here (very rare), then one of the lambda arg is still a Dummy.", "# In this case we will manually handle the naming.", "lambda_names", "=", "sig_parameters", ".", "keys", "(", ")", "arg_names", "=", "[", "arg", ".", "name", "for", "arg", "in", "args", "]", "conversion", "=", "dict", "(", "zip", "(", "arg_names", ",", "lambda_names", ")", ")", "# Wrap the lambda such that arg names are translated into the correct dummy", "# symbol names", "@", "wraps", "(", "lambdafunc", ")", "def", "wrapped_lambdafunc", "(", "*", "ordered_args", ",", "*", "*", "kwargs", ")", ":", "converted_kwargs", "=", "{", "conversion", "[", "k", "]", ":", "v", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "}", "return", "lambdafunc", "(", "*", "ordered_args", ",", "*", "*", "converted_kwargs", ")", "# Update the signature of wrapped_lambdafunc to math our args", "new_sig_parameters", "=", "OrderedDict", "(", ")", "for", "arg_name", ",", "dummy_name", "in", "conversion", ".", "items", "(", ")", ":", "if", "arg_name", "==", "dummy_name", ":", "# Already has the correct name", "new_sig_parameters", "[", "arg_name", "]", "=", "sig_parameters", "[", "arg_name", "]", "else", ":", "# Change the dummy inspect.Parameter to the correct name", "param", "=", "sig_parameters", "[", "dummy_name", "]", "param", "=", "param", ".", "replace", "(", "name", "=", "arg_name", ")", "new_sig_parameters", "[", "arg_name", "]", "=", "param", "wrapped_lambdafunc", ".", "__signature__", "=", "signature", ".", "replace", "(", "parameters", "=", "new_sig_parameters", ".", "values", "(", ")", ")", "return", "wrapped_lambdafunc" ]
43.377358
18.622642
def square_edge_grill(alpha, l=None, Dh=None, fd=None): r'''Returns the loss coefficient for a square grill or square bar screen or perforated plate with squared edges of thickness l, as shown in [1]_. for Dh < l < 50D .. math:: K = \frac{0.5(1-\alpha) + (1-\alpha^2)}{\alpha^2} else: .. math:: K = \frac{0.5(1-\alpha) + (1-\alpha^2) + f{l}/D}{\alpha^2} Parameters ---------- alpha : float Fraction of grill open to flow [-] l : float, optional Thickness of the grill or plate [m] Dh : float, optional Hydraulic diameter of gap in grill, [m] fd : float, optional Darcy friction factor [-] Returns ------- K : float Loss coefficient [-] Notes ----- If l, Dh, or fd is not provided, the first expression is used instead. The alteration of the expression to include friction factor is there if the grill is long enough to have considerable friction along the surface of the grill. Examples -------- >>> square_edge_grill(.45) 5.296296296296296 >>> square_edge_grill(.45, l=.15, Dh=.002, fd=.0185) 12.148148148148147 References ---------- .. [1] Blevins, Robert D. Applied Fluid Dynamics Handbook. New York, N.Y.: Van Nostrand Reinhold Co., 1984. ''' if Dh and l and fd and l > 50*Dh: return (0.5*(1-alpha) + (1-alpha**2) + fd*l/Dh)/alpha**2 else: return (0.5*(1-alpha) + (1-alpha**2))/alpha**2
[ "def", "square_edge_grill", "(", "alpha", ",", "l", "=", "None", ",", "Dh", "=", "None", ",", "fd", "=", "None", ")", ":", "if", "Dh", "and", "l", "and", "fd", "and", "l", ">", "50", "*", "Dh", ":", "return", "(", "0.5", "*", "(", "1", "-", "alpha", ")", "+", "(", "1", "-", "alpha", "**", "2", ")", "+", "fd", "*", "l", "/", "Dh", ")", "/", "alpha", "**", "2", "else", ":", "return", "(", "0.5", "*", "(", "1", "-", "alpha", ")", "+", "(", "1", "-", "alpha", "**", "2", ")", ")", "/", "alpha", "**", "2" ]
27.055556
24.981481
def uploadFile(self, filename, ispickle=False, athome=False): """ Uploads a single file to Redunda. :param str filename: The name of the file to upload :param bool ispickle: Optional variable to be set to True is the file is a pickle; default is False. :returns: returns nothing """ print("Uploading file {} to Redunda.".format(filename)) _, tail = os.path.split(filename) url = "https://redunda.sobotics.org/bots/data/{}?key={}".format(tail, self.key) #Set the content type to 'application/octet-stream' header = {"Content-type": "application/octet-stream"} filedata = "" if athome: filename = str(os.path.expanduser("~")) + filename #Read the data from a file to a string. if filename.endswith(".pickle") or ispickle: try: with open(filename, "rb") as fileToRead: data = pickle.load(fileToRead) except pickle.PickleError as perr: print("Pickling error occurred: {}".format(perr)) return filedata = json.dumps(data) else: try: with open(filename, "r") as fileToRead: filedata = fileToRead.read() except IOError as ioerr: print("IOError occurred: {}".format(ioerr)) return requestToMake = request.Request(url, data=filedata.encode("utf-8"), headers=header) #Make the request. response = request.urlopen(requestToMake) if response.code >= 400: print("Error occurred while uploading file '{}' with error code {}.".format(filename,response.code))
[ "def", "uploadFile", "(", "self", ",", "filename", ",", "ispickle", "=", "False", ",", "athome", "=", "False", ")", ":", "print", "(", "\"Uploading file {} to Redunda.\"", ".", "format", "(", "filename", ")", ")", "_", ",", "tail", "=", "os", ".", "path", ".", "split", "(", "filename", ")", "url", "=", "\"https://redunda.sobotics.org/bots/data/{}?key={}\"", ".", "format", "(", "tail", ",", "self", ".", "key", ")", "#Set the content type to 'application/octet-stream'", "header", "=", "{", "\"Content-type\"", ":", "\"application/octet-stream\"", "}", "filedata", "=", "\"\"", "if", "athome", ":", "filename", "=", "str", "(", "os", ".", "path", ".", "expanduser", "(", "\"~\"", ")", ")", "+", "filename", "#Read the data from a file to a string.", "if", "filename", ".", "endswith", "(", "\".pickle\"", ")", "or", "ispickle", ":", "try", ":", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "fileToRead", ":", "data", "=", "pickle", ".", "load", "(", "fileToRead", ")", "except", "pickle", ".", "PickleError", "as", "perr", ":", "print", "(", "\"Pickling error occurred: {}\"", ".", "format", "(", "perr", ")", ")", "return", "filedata", "=", "json", ".", "dumps", "(", "data", ")", "else", ":", "try", ":", "with", "open", "(", "filename", ",", "\"r\"", ")", "as", "fileToRead", ":", "filedata", "=", "fileToRead", ".", "read", "(", ")", "except", "IOError", "as", "ioerr", ":", "print", "(", "\"IOError occurred: {}\"", ".", "format", "(", "ioerr", ")", ")", "return", "requestToMake", "=", "request", ".", "Request", "(", "url", ",", "data", "=", "filedata", ".", "encode", "(", "\"utf-8\"", ")", ",", "headers", "=", "header", ")", "#Make the request.", "response", "=", "request", ".", "urlopen", "(", "requestToMake", ")", "if", "response", ".", "code", ">=", "400", ":", "print", "(", "\"Error occurred while uploading file '{}' with error code {}.\"", ".", "format", "(", "filename", ",", "response", ".", "code", ")", ")" ]
37.217391
22.913043
def grad(self, X, mean=None, lenscale=None): r""" Get the gradients of this basis w.r.t.\ the mean and length scales. Parameters ---------- x: ndarray (n, d) array of observations where n is the number of samples, and d is the dimensionality of x. mean: ndarray, optional array of shape (d,) frequency means (one for each dimension of X). If not input, this uses the value of the initial mean. lenscale: ndarray, optional array of shape (d,) length scales (one for each dimension of X). If not input, this uses the value of the initial length scale. Returns ------- ndarray: shape (n, 4*nbases) where nbases is number of random rbf bases, again to the nearest larger two power. This is :math:`\partial \phi(\mathbf{x}) / \partial \boldsymbol\mu` ndarray: shape (n, 4*nbases) where nbases is number of random rbf bases, again to the nearest larger two power. This is :math:`\partial \phi(\mathbf{x}) / \partial \mathbf{l}` """ d = X.shape[1] mean = self._check_dim(d, mean, paramind=0) lenscale = self._check_dim(d, lenscale, paramind=1) VX = self._makeVX(X / lenscale) mX = X.dot(mean)[:, np.newaxis] sinVXpmX = - np.sin(VX + mX) sinVXmmX = - np.sin(VX - mX) cosVXpmX = np.cos(VX + mX) cosVXmmX = np.cos(VX - mX) dPhi_len = [] dPhi_mean = [] for i, l in enumerate(lenscale): # Means dmX = X[:, [i]] dPhi_mean.append(np.hstack((dmX * sinVXpmX, dmX * cosVXpmX, -dmX * sinVXmmX, -dmX * cosVXmmX)) / np.sqrt(2 * self.n)) # Lenscales indlen = np.zeros(d) indlen[i] = 1. / l**2 dVX = - self._makeVX(X * indlen) # FIXME make this more efficient? dPhi_len.append(np.hstack((dVX * sinVXpmX, dVX * cosVXpmX, dVX * sinVXmmX, dVX * cosVXmmX)) / np.sqrt(2 * self.n)) dPhi_mean = np.dstack(dPhi_mean) if d != 1 else dPhi_mean[0] dPhi_len = np.dstack(dPhi_len) if d != 1 else dPhi_len[0] return dPhi_mean, dPhi_len
[ "def", "grad", "(", "self", ",", "X", ",", "mean", "=", "None", ",", "lenscale", "=", "None", ")", ":", "d", "=", "X", ".", "shape", "[", "1", "]", "mean", "=", "self", ".", "_check_dim", "(", "d", ",", "mean", ",", "paramind", "=", "0", ")", "lenscale", "=", "self", ".", "_check_dim", "(", "d", ",", "lenscale", ",", "paramind", "=", "1", ")", "VX", "=", "self", ".", "_makeVX", "(", "X", "/", "lenscale", ")", "mX", "=", "X", ".", "dot", "(", "mean", ")", "[", ":", ",", "np", ".", "newaxis", "]", "sinVXpmX", "=", "-", "np", ".", "sin", "(", "VX", "+", "mX", ")", "sinVXmmX", "=", "-", "np", ".", "sin", "(", "VX", "-", "mX", ")", "cosVXpmX", "=", "np", ".", "cos", "(", "VX", "+", "mX", ")", "cosVXmmX", "=", "np", ".", "cos", "(", "VX", "-", "mX", ")", "dPhi_len", "=", "[", "]", "dPhi_mean", "=", "[", "]", "for", "i", ",", "l", "in", "enumerate", "(", "lenscale", ")", ":", "# Means", "dmX", "=", "X", "[", ":", ",", "[", "i", "]", "]", "dPhi_mean", ".", "append", "(", "np", ".", "hstack", "(", "(", "dmX", "*", "sinVXpmX", ",", "dmX", "*", "cosVXpmX", ",", "-", "dmX", "*", "sinVXmmX", ",", "-", "dmX", "*", "cosVXmmX", ")", ")", "/", "np", ".", "sqrt", "(", "2", "*", "self", ".", "n", ")", ")", "# Lenscales", "indlen", "=", "np", ".", "zeros", "(", "d", ")", "indlen", "[", "i", "]", "=", "1.", "/", "l", "**", "2", "dVX", "=", "-", "self", ".", "_makeVX", "(", "X", "*", "indlen", ")", "# FIXME make this more efficient?", "dPhi_len", ".", "append", "(", "np", ".", "hstack", "(", "(", "dVX", "*", "sinVXpmX", ",", "dVX", "*", "cosVXpmX", ",", "dVX", "*", "sinVXmmX", ",", "dVX", "*", "cosVXmmX", ")", ")", "/", "np", ".", "sqrt", "(", "2", "*", "self", ".", "n", ")", ")", "dPhi_mean", "=", "np", ".", "dstack", "(", "dPhi_mean", ")", "if", "d", "!=", "1", "else", "dPhi_mean", "[", "0", "]", "dPhi_len", "=", "np", ".", "dstack", "(", "dPhi_len", ")", "if", "d", "!=", "1", "else", "dPhi_len", "[", "0", "]", "return", "dPhi_mean", ",", "dPhi_len" ]
39.016667
21.533333
def _handle_next_task(self): """ We have to catch three ways a task can be "done": 1. normal execution: the task runs/fails and puts a result back on the queue, 2. new dependencies: the task yielded new deps that were not complete and will be rescheduled and dependencies added, 3. child process dies: we need to catch this separately. """ self._idle_since = None while True: self._purge_children() # Deal with subprocess failures try: task_id, status, expl, missing, new_requirements = ( self._task_result_queue.get( timeout=self._config.wait_interval)) except Queue.Empty: return task = self._scheduled_tasks[task_id] if not task or task_id not in self._running_tasks: continue # Not a running task. Probably already removed. # Maybe it yielded something? # external task if run not implemented, retry-able if config option is enabled. external_task_retryable = _is_external(task) and self._config.retry_external_tasks if status == FAILED and not external_task_retryable: self._email_task_failure(task, expl) new_deps = [] if new_requirements: new_req = [load_task(module, name, params) for module, name, params in new_requirements] for t in new_req: self.add(t) new_deps = [t.task_id for t in new_req] self._add_task(worker=self._id, task_id=task_id, status=status, expl=json.dumps(expl), resources=task.process_resources(), runnable=None, params=task.to_str_params(), family=task.task_family, module=task.task_module, new_deps=new_deps, assistant=self._assistant, retry_policy_dict=_get_retry_policy_dict(task)) self._running_tasks.pop(task_id) # re-add task to reschedule missing dependencies if missing: reschedule = True # keep out of infinite loops by not rescheduling too many times for task_id in missing: self.unfulfilled_counts[task_id] += 1 if (self.unfulfilled_counts[task_id] > self._config.max_reschedules): reschedule = False if reschedule: self.add(task) self.run_succeeded &= (status == DONE) or (len(new_deps) > 0) return
[ "def", "_handle_next_task", "(", "self", ")", ":", "self", ".", "_idle_since", "=", "None", "while", "True", ":", "self", ".", "_purge_children", "(", ")", "# Deal with subprocess failures", "try", ":", "task_id", ",", "status", ",", "expl", ",", "missing", ",", "new_requirements", "=", "(", "self", ".", "_task_result_queue", ".", "get", "(", "timeout", "=", "self", ".", "_config", ".", "wait_interval", ")", ")", "except", "Queue", ".", "Empty", ":", "return", "task", "=", "self", ".", "_scheduled_tasks", "[", "task_id", "]", "if", "not", "task", "or", "task_id", "not", "in", "self", ".", "_running_tasks", ":", "continue", "# Not a running task. Probably already removed.", "# Maybe it yielded something?", "# external task if run not implemented, retry-able if config option is enabled.", "external_task_retryable", "=", "_is_external", "(", "task", ")", "and", "self", ".", "_config", ".", "retry_external_tasks", "if", "status", "==", "FAILED", "and", "not", "external_task_retryable", ":", "self", ".", "_email_task_failure", "(", "task", ",", "expl", ")", "new_deps", "=", "[", "]", "if", "new_requirements", ":", "new_req", "=", "[", "load_task", "(", "module", ",", "name", ",", "params", ")", "for", "module", ",", "name", ",", "params", "in", "new_requirements", "]", "for", "t", "in", "new_req", ":", "self", ".", "add", "(", "t", ")", "new_deps", "=", "[", "t", ".", "task_id", "for", "t", "in", "new_req", "]", "self", ".", "_add_task", "(", "worker", "=", "self", ".", "_id", ",", "task_id", "=", "task_id", ",", "status", "=", "status", ",", "expl", "=", "json", ".", "dumps", "(", "expl", ")", ",", "resources", "=", "task", ".", "process_resources", "(", ")", ",", "runnable", "=", "None", ",", "params", "=", "task", ".", "to_str_params", "(", ")", ",", "family", "=", "task", ".", "task_family", ",", "module", "=", "task", ".", "task_module", ",", "new_deps", "=", "new_deps", ",", "assistant", "=", "self", ".", "_assistant", ",", "retry_policy_dict", "=", "_get_retry_policy_dict", "(", "task", ")", ")", "self", ".", "_running_tasks", ".", "pop", "(", "task_id", ")", "# re-add task to reschedule missing dependencies", "if", "missing", ":", "reschedule", "=", "True", "# keep out of infinite loops by not rescheduling too many times", "for", "task_id", "in", "missing", ":", "self", ".", "unfulfilled_counts", "[", "task_id", "]", "+=", "1", "if", "(", "self", ".", "unfulfilled_counts", "[", "task_id", "]", ">", "self", ".", "_config", ".", "max_reschedules", ")", ":", "reschedule", "=", "False", "if", "reschedule", ":", "self", ".", "add", "(", "task", ")", "self", ".", "run_succeeded", "&=", "(", "status", "==", "DONE", ")", "or", "(", "len", "(", "new_deps", ")", ">", "0", ")", "return" ]
41.434783
19.985507
def find(value, find, start=0): """ Return index of `find` in `value` beginning at `start` :param value: :param find: :param start: :return: If NOT found, return the length of `value` string """ l = len(value) if is_list(find): m = l for f in find: i = value.find(f, start) if i == -1: continue m = min(m, i) return m else: i = value.find(find, start) if i == -1: return l return i
[ "def", "find", "(", "value", ",", "find", ",", "start", "=", "0", ")", ":", "l", "=", "len", "(", "value", ")", "if", "is_list", "(", "find", ")", ":", "m", "=", "l", "for", "f", "in", "find", ":", "i", "=", "value", ".", "find", "(", "f", ",", "start", ")", "if", "i", "==", "-", "1", ":", "continue", "m", "=", "min", "(", "m", ",", "i", ")", "return", "m", "else", ":", "i", "=", "value", ".", "find", "(", "find", ",", "start", ")", "if", "i", "==", "-", "1", ":", "return", "l", "return", "i" ]
23.272727
17.363636
def run(self): """ Run the task - compose full series + add to our results """ empty = False while not empty: try: s = self.series.get() result_dict = itunes.get_rss_feed_data_from_series(s) self.storer.store(result_dict) self.logger.info('Retrieved and stored %s', str(s.id)) except Exception as e: # pylint: disable=W0703 print e finally: self.series.task_done() empty = self.series.empty()
[ "def", "run", "(", "self", ")", ":", "empty", "=", "False", "while", "not", "empty", ":", "try", ":", "s", "=", "self", ".", "series", ".", "get", "(", ")", "result_dict", "=", "itunes", ".", "get_rss_feed_data_from_series", "(", "s", ")", "self", ".", "storer", ".", "store", "(", "result_dict", ")", "self", ".", "logger", ".", "info", "(", "'Retrieved and stored %s'", ",", "str", "(", "s", ".", "id", ")", ")", "except", "Exception", "as", "e", ":", "# pylint: disable=W0703", "print", "e", "finally", ":", "self", ".", "series", ".", "task_done", "(", ")", "empty", "=", "self", ".", "series", ".", "empty", "(", ")" ]
29.4375
15.6875
def _read_coll( ctx: ReaderContext, f: Callable[[Collection[Any]], Union[llist.List, lset.Set, vector.Vector]], end_token: str, coll_name: str, ): """Read a collection from the input stream and create the collection using f.""" coll: List = [] reader = ctx.reader while True: token = reader.peek() if token == "": raise SyntaxError(f"Unexpected EOF in {coll_name}") if whitespace_chars.match(token): reader.advance() continue if token == end_token: reader.next_token() return f(coll) elem = _read_next(ctx) if elem is COMMENT: continue coll.append(elem)
[ "def", "_read_coll", "(", "ctx", ":", "ReaderContext", ",", "f", ":", "Callable", "[", "[", "Collection", "[", "Any", "]", "]", ",", "Union", "[", "llist", ".", "List", ",", "lset", ".", "Set", ",", "vector", ".", "Vector", "]", "]", ",", "end_token", ":", "str", ",", "coll_name", ":", "str", ",", ")", ":", "coll", ":", "List", "=", "[", "]", "reader", "=", "ctx", ".", "reader", "while", "True", ":", "token", "=", "reader", ".", "peek", "(", ")", "if", "token", "==", "\"\"", ":", "raise", "SyntaxError", "(", "f\"Unexpected EOF in {coll_name}\"", ")", "if", "whitespace_chars", ".", "match", "(", "token", ")", ":", "reader", ".", "advance", "(", ")", "continue", "if", "token", "==", "end_token", ":", "reader", ".", "next_token", "(", ")", "return", "f", "(", "coll", ")", "elem", "=", "_read_next", "(", "ctx", ")", "if", "elem", "is", "COMMENT", ":", "continue", "coll", ".", "append", "(", "elem", ")" ]
28.916667
16.625
def single_line_stdout(cmd, expected_errors=(), shell=True, sudo=False, quiet=False): """ Runs a command and returns the first line of the result, that would be written to `stdout`, as a string. The output itself can be suppressed. :param cmd: Command to run. :type cmd: unicode :param expected_errors: If the return code is non-zero, but found in this tuple, it will be ignored. ``None`` is returned in this case. :type expected_errors: tuple :param shell: Use a shell. :type shell: bool :param sudo: Use `sudo`. :type sudo: bool :param quiet: If set to ``True``, does not show any output. :type quiet: bool :return: The result of the command as would be written to `stdout`. :rtype: unicode """ return single_line(stdout_result(cmd, expected_errors, shell, sudo, quiet))
[ "def", "single_line_stdout", "(", "cmd", ",", "expected_errors", "=", "(", ")", ",", "shell", "=", "True", ",", "sudo", "=", "False", ",", "quiet", "=", "False", ")", ":", "return", "single_line", "(", "stdout_result", "(", "cmd", ",", "expected_errors", ",", "shell", ",", "sudo", ",", "quiet", ")", ")" ]
41.4
23.5
def clone(self, **kwargs): """ Clone a part. .. versionadded:: 2.3 :param kwargs: (optional) additional keyword=value arguments :type kwargs: dict :return: cloned :class:`models.Part` :raises APIError: if the `Part` could not be cloned Example ------- >>> bike = client.model('Bike') >>> bike2 = bike.clone() """ parent = self.parent() return self._client._create_clone(parent, self, **kwargs)
[ "def", "clone", "(", "self", ",", "*", "*", "kwargs", ")", ":", "parent", "=", "self", ".", "parent", "(", ")", "return", "self", ".", "_client", ".", "_create_clone", "(", "parent", ",", "self", ",", "*", "*", "kwargs", ")" ]
25.842105
19.105263
def initialize_bfd(self, abfd): """Initialize underlying libOpcodes library using BFD.""" self._ptr = _opcodes.initialize_bfd(abfd._ptr) # Already done inside opcodes.c #self.architecture = abfd.architecture #self.machine = abfd.machine #self.endian = abfd.endian # force intel syntax if self.architecture == ARCH_I386: if abfd.arch_size == 32: self.machine = MACH_I386_I386_INTEL_SYNTAX #abfd.machine = MACH_I386_I386_INTEL_SYNTAX elif abfd.arch_size == 64: self.machine = MACH_X86_64_INTEL_SYNTAX
[ "def", "initialize_bfd", "(", "self", ",", "abfd", ")", ":", "self", ".", "_ptr", "=", "_opcodes", ".", "initialize_bfd", "(", "abfd", ".", "_ptr", ")", "# Already done inside opcodes.c", "#self.architecture = abfd.architecture", "#self.machine = abfd.machine", "#self.endian = abfd.endian", "# force intel syntax", "if", "self", ".", "architecture", "==", "ARCH_I386", ":", "if", "abfd", ".", "arch_size", "==", "32", ":", "self", ".", "machine", "=", "MACH_I386_I386_INTEL_SYNTAX", "#abfd.machine = MACH_I386_I386_INTEL_SYNTAX", "elif", "abfd", ".", "arch_size", "==", "64", ":", "self", ".", "machine", "=", "MACH_X86_64_INTEL_SYNTAX" ]
39.6875
12.875
def evolve(self, new_date): """ evolve to the new process state at the next date, i.e. do one step in the simulation :param date new_date: date of the new state :return State: """ if self.state.date == new_date and not self.initial_state.date == new_date: return self.state self.state.value = self.func(self.state, new_date) self.state.date = new_date return self.state
[ "def", "evolve", "(", "self", ",", "new_date", ")", ":", "if", "self", ".", "state", ".", "date", "==", "new_date", "and", "not", "self", ".", "initial_state", ".", "date", "==", "new_date", ":", "return", "self", ".", "state", "self", ".", "state", ".", "value", "=", "self", ".", "func", "(", "self", ".", "state", ",", "new_date", ")", "self", ".", "state", ".", "date", "=", "new_date", "return", "self", ".", "state" ]
36.916667
18.916667
def _JMS_to_Bern_I(C, qq): """From JMS to BernI basis (= traditional SUSY basis in this case) for $\Delta F=2$ operators. `qq` should be 'sb', 'db', 'ds' or 'cu'""" if qq in ['sb', 'db', 'ds']: dd = 'dd' ij = tuple(dflav[q] for q in qq) elif qq == 'cu': dd = 'uu' ij = tuple(uflav[q] for q in qq) else: raise ValueError("not in Bern_I: ".format(qq)) ji = (ij[1], ij[0]) d = { '1' + 2 * qq : C["V{}LL".format(dd)][ij + ij], '2' + 2 * qq : C["S1{}RR".format(dd)][ji + ji].conj() - C["S8{}RR".format(dd)][ji + ji].conj() / (2 * Nc), '3' + 2 * qq : C["S8{}RR".format(dd)][ji + ji].conj() / 2, '4' + 2 * qq : -C["V8{}LR".format(dd)][ij + ij], '5' + 2 * qq : -2 * C["V1{}LR".format(dd)][ij + ij] + C["V8{}LR".format(dd)][ij + ij] / Nc, '1p' + 2 * qq : C["V{}RR".format(dd)][ij + ij], '2p' + 2 * qq : C["S1{}RR".format(dd)][ij + ij] - C["S8{}RR".format(dd)][ij + ij] / (2 * Nc), '3p' + 2 * qq : C["S8{}RR".format(dd)][ij + ij] / 2 } return d
[ "def", "_JMS_to_Bern_I", "(", "C", ",", "qq", ")", ":", "if", "qq", "in", "[", "'sb'", ",", "'db'", ",", "'ds'", "]", ":", "dd", "=", "'dd'", "ij", "=", "tuple", "(", "dflav", "[", "q", "]", "for", "q", "in", "qq", ")", "elif", "qq", "==", "'cu'", ":", "dd", "=", "'uu'", "ij", "=", "tuple", "(", "uflav", "[", "q", "]", "for", "q", "in", "qq", ")", "else", ":", "raise", "ValueError", "(", "\"not in Bern_I: \"", ".", "format", "(", "qq", ")", ")", "ji", "=", "(", "ij", "[", "1", "]", ",", "ij", "[", "0", "]", ")", "d", "=", "{", "'1'", "+", "2", "*", "qq", ":", "C", "[", "\"V{}LL\"", ".", "format", "(", "dd", ")", "]", "[", "ij", "+", "ij", "]", ",", "'2'", "+", "2", "*", "qq", ":", "C", "[", "\"S1{}RR\"", ".", "format", "(", "dd", ")", "]", "[", "ji", "+", "ji", "]", ".", "conj", "(", ")", "-", "C", "[", "\"S8{}RR\"", ".", "format", "(", "dd", ")", "]", "[", "ji", "+", "ji", "]", ".", "conj", "(", ")", "/", "(", "2", "*", "Nc", ")", ",", "'3'", "+", "2", "*", "qq", ":", "C", "[", "\"S8{}RR\"", ".", "format", "(", "dd", ")", "]", "[", "ji", "+", "ji", "]", ".", "conj", "(", ")", "/", "2", ",", "'4'", "+", "2", "*", "qq", ":", "-", "C", "[", "\"V8{}LR\"", ".", "format", "(", "dd", ")", "]", "[", "ij", "+", "ij", "]", ",", "'5'", "+", "2", "*", "qq", ":", "-", "2", "*", "C", "[", "\"V1{}LR\"", ".", "format", "(", "dd", ")", "]", "[", "ij", "+", "ij", "]", "+", "C", "[", "\"V8{}LR\"", ".", "format", "(", "dd", ")", "]", "[", "ij", "+", "ij", "]", "/", "Nc", ",", "'1p'", "+", "2", "*", "qq", ":", "C", "[", "\"V{}RR\"", ".", "format", "(", "dd", ")", "]", "[", "ij", "+", "ij", "]", ",", "'2p'", "+", "2", "*", "qq", ":", "C", "[", "\"S1{}RR\"", ".", "format", "(", "dd", ")", "]", "[", "ij", "+", "ij", "]", "-", "C", "[", "\"S8{}RR\"", ".", "format", "(", "dd", ")", "]", "[", "ij", "+", "ij", "]", "/", "(", "2", "*", "Nc", ")", ",", "'3p'", "+", "2", "*", "qq", ":", "C", "[", "\"S8{}RR\"", ".", "format", "(", "dd", ")", "]", "[", "ij", "+", "ij", "]", "/", "2", "}", "return", "d" ]
41.814815
17.592593
def list_l3_agent_hosting_routers(self, router, **_params): """Fetches a list of L3 agents hosting a router.""" return self.get((self.router_path + self.L3_AGENTS) % router, params=_params)
[ "def", "list_l3_agent_hosting_routers", "(", "self", ",", "router", ",", "*", "*", "_params", ")", ":", "return", "self", ".", "get", "(", "(", "self", ".", "router_path", "+", "self", ".", "L3_AGENTS", ")", "%", "router", ",", "params", "=", "_params", ")" ]
56.5
12.25
def _write_ccr(self, f, g, level: int): ''' Write a CCR to file "g" from file "f" with level "level". Currently, only handles gzip compression. Parameters: f : file Uncompressed file to read from g : file File to read the compressed file into level : int The level of the compression from 0 to 9 Returns: None ''' f.seek(8) data = f.read() uSize = len(data) section_type = CDF.CCR_ rfuA = 0 cData = gzip.compress(data, level) block_size = CDF.CCR_BASE_SIZE64 + len(cData) cprOffset = 0 ccr1 = bytearray(32) #ccr1[0:4] = binascii.unhexlify(CDF.V3magicNUMBER_1) #ccr1[4:8] = binascii.unhexlify(CDF.V3magicNUMBER_2c) ccr1[0:8] = struct.pack('>q', block_size) ccr1[8:12] = struct.pack('>i', section_type) ccr1[12:20] = struct.pack('>q', cprOffset) ccr1[20:28] = struct.pack('>q', uSize) ccr1[28:32] = struct.pack('>i', rfuA) g.seek(0, 2) g.write(ccr1) g.write(cData) cprOffset = self._write_cpr(g, CDF.GZIP_COMPRESSION, level) self._update_offset_value(g, 20, 8, cprOffset)
[ "def", "_write_ccr", "(", "self", ",", "f", ",", "g", ",", "level", ":", "int", ")", ":", "f", ".", "seek", "(", "8", ")", "data", "=", "f", ".", "read", "(", ")", "uSize", "=", "len", "(", "data", ")", "section_type", "=", "CDF", ".", "CCR_", "rfuA", "=", "0", "cData", "=", "gzip", ".", "compress", "(", "data", ",", "level", ")", "block_size", "=", "CDF", ".", "CCR_BASE_SIZE64", "+", "len", "(", "cData", ")", "cprOffset", "=", "0", "ccr1", "=", "bytearray", "(", "32", ")", "#ccr1[0:4] = binascii.unhexlify(CDF.V3magicNUMBER_1)", "#ccr1[4:8] = binascii.unhexlify(CDF.V3magicNUMBER_2c)", "ccr1", "[", "0", ":", "8", "]", "=", "struct", ".", "pack", "(", "'>q'", ",", "block_size", ")", "ccr1", "[", "8", ":", "12", "]", "=", "struct", ".", "pack", "(", "'>i'", ",", "section_type", ")", "ccr1", "[", "12", ":", "20", "]", "=", "struct", ".", "pack", "(", "'>q'", ",", "cprOffset", ")", "ccr1", "[", "20", ":", "28", "]", "=", "struct", ".", "pack", "(", "'>q'", ",", "uSize", ")", "ccr1", "[", "28", ":", "32", "]", "=", "struct", ".", "pack", "(", "'>i'", ",", "rfuA", ")", "g", ".", "seek", "(", "0", ",", "2", ")", "g", ".", "write", "(", "ccr1", ")", "g", ".", "write", "(", "cData", ")", "cprOffset", "=", "self", ".", "_write_cpr", "(", "g", ",", "CDF", ".", "GZIP_COMPRESSION", ",", "level", ")", "self", ".", "_update_offset_value", "(", "g", ",", "20", ",", "8", ",", "cprOffset", ")" ]
33.405405
17.837838
def _const_node_to_py_ast(ctx: GeneratorContext, lisp_ast: Const) -> GeneratedPyAST: """Generate Python AST nodes for a :const Lisp AST node. Nested values in collections for :const nodes are not parsed. Consequently, this function cannot be called recursively for those nested values. Instead, call `_const_val_to_py_ast` on nested values.""" assert lisp_ast.op == NodeOp.CONST node_type = lisp_ast.type handle_const_node = _CONSTANT_HANDLER.get(node_type) assert handle_const_node is not None, f"No :const AST type handler for {node_type}" node_val = lisp_ast.val return handle_const_node(ctx, node_val)
[ "def", "_const_node_to_py_ast", "(", "ctx", ":", "GeneratorContext", ",", "lisp_ast", ":", "Const", ")", "->", "GeneratedPyAST", ":", "assert", "lisp_ast", ".", "op", "==", "NodeOp", ".", "CONST", "node_type", "=", "lisp_ast", ".", "type", "handle_const_node", "=", "_CONSTANT_HANDLER", ".", "get", "(", "node_type", ")", "assert", "handle_const_node", "is", "not", "None", ",", "f\"No :const AST type handler for {node_type}\"", "node_val", "=", "lisp_ast", ".", "val", "return", "handle_const_node", "(", "ctx", ",", "node_val", ")" ]
52.916667
21.25
def safe_re_encode(s, encoding_to, errors="backslashreplace"): """Re-encode str or binary so that is compatible with a given encoding (replacing unsupported chars). We use ASCII as default, which gives us some output that contains \x99 and \u9999 for every character > 127, for easier debugging. (e.g. if we don't know the encoding, see #87, #96) """ # prev = s if not encoding_to: encoding_to = "ASCII" if compat.is_bytes(s): s = s.decode(encoding_to, errors=errors).encode(encoding_to) else: s = s.encode(encoding_to, errors=errors).decode(encoding_to) # print("safe_re_encode({}, {}) => {}".format(prev, encoding_to, s)) return s
[ "def", "safe_re_encode", "(", "s", ",", "encoding_to", ",", "errors", "=", "\"backslashreplace\"", ")", ":", "# prev = s", "if", "not", "encoding_to", ":", "encoding_to", "=", "\"ASCII\"", "if", "compat", ".", "is_bytes", "(", "s", ")", ":", "s", "=", "s", ".", "decode", "(", "encoding_to", ",", "errors", "=", "errors", ")", ".", "encode", "(", "encoding_to", ")", "else", ":", "s", "=", "s", ".", "encode", "(", "encoding_to", ",", "errors", "=", "errors", ")", ".", "decode", "(", "encoding_to", ")", "# print(\"safe_re_encode({}, {}) => {}\".format(prev, encoding_to, s))", "return", "s" ]
40.529412
21.470588
def get_host(name=None, ipv4addr=None, mac=None, return_fields=None, **api_opts): ''' Get host information CLI Examples: .. code-block:: bash salt-call infoblox.get_host hostname.domain.ca salt-call infoblox.get_host ipv4addr=123.123.122.12 salt-call infoblox.get_host mac=00:50:56:84:6e:ae ''' infoblox = _get_infoblox(**api_opts) host = infoblox.get_host(name=name, mac=mac, ipv4addr=ipv4addr, return_fields=return_fields) return host
[ "def", "get_host", "(", "name", "=", "None", ",", "ipv4addr", "=", "None", ",", "mac", "=", "None", ",", "return_fields", "=", "None", ",", "*", "*", "api_opts", ")", ":", "infoblox", "=", "_get_infoblox", "(", "*", "*", "api_opts", ")", "host", "=", "infoblox", ".", "get_host", "(", "name", "=", "name", ",", "mac", "=", "mac", ",", "ipv4addr", "=", "ipv4addr", ",", "return_fields", "=", "return_fields", ")", "return", "host" ]
32.066667
27.533333
def graft_neuron(root_section): '''Returns a neuron starting at root_section''' assert isinstance(root_section, Section) return Neuron(soma=Soma(root_section.points[:1]), neurites=[Neurite(root_section)])
[ "def", "graft_neuron", "(", "root_section", ")", ":", "assert", "isinstance", "(", "root_section", ",", "Section", ")", "return", "Neuron", "(", "soma", "=", "Soma", "(", "root_section", ".", "points", "[", ":", "1", "]", ")", ",", "neurites", "=", "[", "Neurite", "(", "root_section", ")", "]", ")" ]
53.25
17.75
def file(self, path): """ Reads the body to match from a disk file. Arguments: path (str): relative or absolute path to file to read from. Returns: self: current Mock instance. """ with open(path, 'r') as f: self.body(str(f.read()))
[ "def", "file", "(", "self", ",", "path", ")", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "self", ".", "body", "(", "str", "(", "f", ".", "read", "(", ")", ")", ")" ]
25.583333
16.25
def list_adresposities_by_subadres_and_huisnummer(self, subadres, huisnummer): ''' List all `adresposities` for a subadres and a :class:`Huisnummer`. :param subadres: A string representing a certain subadres. :param huisnummer: The :class:`Huisnummer` for which the \ `adresposities` are wanted. OR A huisnummer id. :rtype: A :class:`list` of :class:`Adrespositie` ''' try: hid = huisnummer.id except AttributeError: hid = huisnummer def creator(): res = crab_gateway_request( self.client, 'ListAdrespositiesBySubadres', subadres, hid ) try: return [Adrespositie( r.AdrespositieId, r.HerkomstAdrespositie )for r in res.AdrespositieItem] except AttributeError: return [] if self.caches['short'].is_configured: key = 'ListAdrespositiesBySubadres#%s%s' % (subadres, hid) adresposities = self.caches['short'].get_or_create(key, creator) else: adresposities = creator() for a in adresposities: a.set_gateway(self) return adresposities
[ "def", "list_adresposities_by_subadres_and_huisnummer", "(", "self", ",", "subadres", ",", "huisnummer", ")", ":", "try", ":", "hid", "=", "huisnummer", ".", "id", "except", "AttributeError", ":", "hid", "=", "huisnummer", "def", "creator", "(", ")", ":", "res", "=", "crab_gateway_request", "(", "self", ".", "client", ",", "'ListAdrespositiesBySubadres'", ",", "subadres", ",", "hid", ")", "try", ":", "return", "[", "Adrespositie", "(", "r", ".", "AdrespositieId", ",", "r", ".", "HerkomstAdrespositie", ")", "for", "r", "in", "res", ".", "AdrespositieItem", "]", "except", "AttributeError", ":", "return", "[", "]", "if", "self", ".", "caches", "[", "'short'", "]", ".", "is_configured", ":", "key", "=", "'ListAdrespositiesBySubadres#%s%s'", "%", "(", "subadres", ",", "hid", ")", "adresposities", "=", "self", ".", "caches", "[", "'short'", "]", ".", "get_or_create", "(", "key", ",", "creator", ")", "else", ":", "adresposities", "=", "creator", "(", ")", "for", "a", "in", "adresposities", ":", "a", ".", "set_gateway", "(", "self", ")", "return", "adresposities" ]
38.71875
18.34375
def add_role(self, role, description=None): """ Creates a new group """ new_group = AuthGroup(role=role, creator=self.client) try: new_group.save() return True except NotUniqueError: return False
[ "def", "add_role", "(", "self", ",", "role", ",", "description", "=", "None", ")", ":", "new_group", "=", "AuthGroup", "(", "role", "=", "role", ",", "creator", "=", "self", ".", "client", ")", "try", ":", "new_group", ".", "save", "(", ")", "return", "True", "except", "NotUniqueError", ":", "return", "False" ]
32
13.375
def to_list(item_or_list): """ Convert a single item, a tuple, a generator or anything else to a list. :param item_or_list: single item or iterable to convert :return: a list """ if isinstance(item_or_list, list): return item_or_list elif isinstance(item_or_list, (str, bytes)): return [item_or_list] elif isinstance(item_or_list, Iterable): return list(item_or_list) else: return [item_or_list]
[ "def", "to_list", "(", "item_or_list", ")", ":", "if", "isinstance", "(", "item_or_list", ",", "list", ")", ":", "return", "item_or_list", "elif", "isinstance", "(", "item_or_list", ",", "(", "str", ",", "bytes", ")", ")", ":", "return", "[", "item_or_list", "]", "elif", "isinstance", "(", "item_or_list", ",", "Iterable", ")", ":", "return", "list", "(", "item_or_list", ")", "else", ":", "return", "[", "item_or_list", "]" ]
30
14.4
def build_constraints(self, coef, constraint_lam, constraint_l2): """ builds the GAM block-diagonal constraint matrix in quadratic form out of constraint matrices specified for each feature. Parameters ---------- coefs : array-like containing the coefficients of a term constraint_lam : float, penalty to impose on the constraint. typically this is a very large number. constraint_l2 : float, loading to improve the numerical conditioning of the constraint matrix. typically this is a very small number. Returns ------- C : sparse CSC matrix containing the model constraints in quadratic form """ C = sp.sparse.csc_matrix(np.zeros((self.n_coefs, self.n_coefs))) for i in range(len(self._terms)): C += self._build_marginal_constraints(i, coef, constraint_lam, constraint_l2) return sp.sparse.csc_matrix(C)
[ "def", "build_constraints", "(", "self", ",", "coef", ",", "constraint_lam", ",", "constraint_l2", ")", ":", "C", "=", "sp", ".", "sparse", ".", "csc_matrix", "(", "np", ".", "zeros", "(", "(", "self", ".", "n_coefs", ",", "self", ".", "n_coefs", ")", ")", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "_terms", ")", ")", ":", "C", "+=", "self", ".", "_build_marginal_constraints", "(", "i", ",", "coef", ",", "constraint_lam", ",", "constraint_l2", ")", "return", "sp", ".", "sparse", ".", "csc_matrix", "(", "C", ")" ]
33.62069
24.310345