text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def addGagUser(self, userId, chatroomId, minute): """ 添加禁言聊天室成员方法(在 App 中如果不想让某一用户在聊天室中发言时,可将此用户在聊天室中禁言,被禁言用户可以接收查看聊天室中用户聊天信息,但不能发送消息.) 方法 @param userId:用户 Id。(必传) @param chatroomId:聊天室 Id。(必传) @param minute:禁言时长,以分钟为单位,最大值为43200分钟。(必传) @return code:返回码,200 为正常。 @return errorMessage:错误信息。 """ desc = { "name": "CodeSuccessReslut", "desc": " http 成功返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('API', 'POST', 'application/x-www-form-urlencoded'), action='/chatroom/user/gag/add.json', params={ "userId": userId, "chatroomId": chatroomId, "minute": minute }) return Response(r, desc)
[ "def", "addGagUser", "(", "self", ",", "userId", ",", "chatroomId", ",", "minute", ")", ":", "desc", "=", "{", "\"name\"", ":", "\"CodeSuccessReslut\"", ",", "\"desc\"", ":", "\" http 成功返回结果\",", "", "\"fields\"", ":", "[", "{", "\"name\"", ":", "\"code\"", ",", "\"type\"", ":", "\"Integer\"", ",", "\"desc\"", ":", "\"返回码,200 为正常。\"", "}", ",", "{", "\"name\"", ":", "\"errorMessage\"", ",", "\"type\"", ":", "\"String\"", ",", "\"desc\"", ":", "\"错误信息。\"", "}", "]", "}", "r", "=", "self", ".", "call_api", "(", "method", "=", "(", "'API'", ",", "'POST'", ",", "'application/x-www-form-urlencoded'", ")", ",", "action", "=", "'/chatroom/user/gag/add.json'", ",", "params", "=", "{", "\"userId\"", ":", "userId", ",", "\"chatroomId\"", ":", "chatroomId", ",", "\"minute\"", ":", "minute", "}", ")", "return", "Response", "(", "r", ",", "desc", ")" ]
31.212121
13.848485
def create(self, metric_id, value, timestamp=None): """Add a Metric Point to a Metric :param int metric_id: Metric ID :param int value: Value to plot on the metric graph :param str timestamp: Unix timestamp of the point was measured :return: Created metric point data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#post-metric-points """ data = ApiParams() data['value'] = value data['timestamp'] = timestamp return self._post('metrics/%s/points' % metric_id, data=data)['data']
[ "def", "create", "(", "self", ",", "metric_id", ",", "value", ",", "timestamp", "=", "None", ")", ":", "data", "=", "ApiParams", "(", ")", "data", "[", "'value'", "]", "=", "value", "data", "[", "'timestamp'", "]", "=", "timestamp", "return", "self", ".", "_post", "(", "'metrics/%s/points'", "%", "metric_id", ",", "data", "=", "data", ")", "[", "'data'", "]" ]
40.857143
18.428571
def addAsn1MibSource(self, *asn1Sources, **kwargs): """Adds path to a repository to search ASN.1 MIB files. Parameters ---------- *asn1Sources : one or more URL in form of :py:obj:`str` identifying local or remote ASN.1 MIB repositories. Path must include the *@mib@* component which will be replaced with MIB module name at the time of search. Returns ------- : :py:class:`~pysnmp.smi.rfc1902.ObjectIdentity` reference to itself Notes ----- Please refer to :py:class:`~pysmi.reader.localfile.FileReader`, :py:class:`~pysmi.reader.httpclient.HttpReader` and :py:class:`~pysmi.reader.ftpclient.FtpReader` classes for in-depth information on ASN.1 MIB lookup. Examples -------- >>> ObjectIdentity('SNMPv2-MIB', 'sysDescr').addAsn1Source('http://mibs.snmplabs.com/asn1/@mib@') ObjectIdentity('SNMPv2-MIB', 'sysDescr') >>> """ if self._asn1SourcesToAdd is None: self._asn1SourcesToAdd = asn1Sources else: self._asn1SourcesToAdd += asn1Sources if self._asn1SourcesOptions: self._asn1SourcesOptions.update(kwargs) else: self._asn1SourcesOptions = kwargs return self
[ "def", "addAsn1MibSource", "(", "self", ",", "*", "asn1Sources", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_asn1SourcesToAdd", "is", "None", ":", "self", ".", "_asn1SourcesToAdd", "=", "asn1Sources", "else", ":", "self", ".", "_asn1SourcesToAdd", "+=", "asn1Sources", "if", "self", ".", "_asn1SourcesOptions", ":", "self", ".", "_asn1SourcesOptions", ".", "update", "(", "kwargs", ")", "else", ":", "self", ".", "_asn1SourcesOptions", "=", "kwargs", "return", "self" ]
31.571429
23.333333
def plot(self, title=LABEL_DEFAULT, xlabel=LABEL_DEFAULT, ylabel=LABEL_DEFAULT): """ Create a Plot object representing the SArray. Notes ----- - The plot will render either inline in a Jupyter Notebook, or in a native GUI window, depending on the value provided in `turicreate.visualization.set_target` (defaults to 'auto'). Parameters ---------- title : str The plot title to show for the resulting visualization. If the title is None, the title will be omitted. xlabel : str The X axis label to show for the resulting visualization. If the xlabel is None, the X axis label will be omitted. ylabel : str The Y axis label to show for the resulting visualization. If the ylabel is None, the Y axis label will be omitted. Returns ------- out : Plot A :class: Plot object that is the visualization of the SArray. Examples -------- Suppose 'sa' is an SArray, we can create a plot of it using: >>> plt = sa.plot() To override the default plot title and axis labels: >>> plt = sa.plot(title="My Plot Title", xlabel="My X Axis", ylabel="My Y Axis") We can then visualize the plot using: >>> plt.show() """ if title == "": title = " " if xlabel == "": xlabel = " " if ylabel == "": ylabel = " " if title is None: title = "" # C++ otherwise gets "None" as std::string if xlabel is None: xlabel = "" if ylabel is None: ylabel = "" return Plot(self.__proxy__.plot(title, xlabel, ylabel))
[ "def", "plot", "(", "self", ",", "title", "=", "LABEL_DEFAULT", ",", "xlabel", "=", "LABEL_DEFAULT", ",", "ylabel", "=", "LABEL_DEFAULT", ")", ":", "if", "title", "==", "\"\"", ":", "title", "=", "\" \"", "if", "xlabel", "==", "\"\"", ":", "xlabel", "=", "\" \"", "if", "ylabel", "==", "\"\"", ":", "ylabel", "=", "\" \"", "if", "title", "is", "None", ":", "title", "=", "\"\"", "# C++ otherwise gets \"None\" as std::string", "if", "xlabel", "is", "None", ":", "xlabel", "=", "\"\"", "if", "ylabel", "is", "None", ":", "ylabel", "=", "\"\"", "return", "Plot", "(", "self", ".", "__proxy__", ".", "plot", "(", "title", ",", "xlabel", ",", "ylabel", ")", ")" ]
29.423729
25.830508
def get_edge_list(self): r"""Return an edge list, an alternative representation of the graph. Each edge :math:`e_k = (v_i, v_j) \in \mathcal{E}` from :math:`v_i` to :math:`v_j` is associated with the weight :math:`W[i, j]`. For each edge :math:`e_k`, the method returns :math:`(i, j, W[i, j])` as `(sources[k], targets[k], weights[k])`, with :math:`i \in [0, |\mathcal{V}|-1], j \in [0, |\mathcal{V}|-1], k \in [0, |\mathcal{E}|-1]`. Returns ------- sources : vector of int Source node indices. targets : vector of int Target node indices. weights : vector of float Edge weights. Notes ----- The weighted adjacency matrix is the canonical form used in this package to represent a graph as it is the easiest to work with when considering spectral methods. Edge orientation (i.e., which node is the source or the target) is arbitrary for undirected graphs. The implementation uses the upper triangular part of the adjacency matrix, hence :math:`i \leq j \ \forall k`. Examples -------- Edge list of a directed graph. >>> graph = graphs.Graph([ ... [0, 3, 0], ... [3, 0, 4], ... [0, 0, 0], ... ]) >>> sources, targets, weights = graph.get_edge_list() >>> list(sources), list(targets), list(weights) ([0, 1, 1], [1, 0, 2], [3, 3, 4]) Edge list of an undirected graph. >>> graph = graphs.Graph([ ... [0, 3, 0], ... [3, 0, 4], ... [0, 4, 0], ... ]) >>> sources, targets, weights = graph.get_edge_list() >>> list(sources), list(targets), list(weights) ([0, 1], [1, 2], [3, 4]) """ if self.is_directed(): W = self.W.tocoo() else: W = sparse.triu(self.W, format='coo') sources = W.row targets = W.col weights = W.data assert self.n_edges == sources.size == targets.size == weights.size return sources, targets, weights
[ "def", "get_edge_list", "(", "self", ")", ":", "if", "self", ".", "is_directed", "(", ")", ":", "W", "=", "self", ".", "W", ".", "tocoo", "(", ")", "else", ":", "W", "=", "sparse", ".", "triu", "(", "self", ".", "W", ",", "format", "=", "'coo'", ")", "sources", "=", "W", ".", "row", "targets", "=", "W", ".", "col", "weights", "=", "W", ".", "data", "assert", "self", ".", "n_edges", "==", "sources", ".", "size", "==", "targets", ".", "size", "==", "weights", ".", "size", "return", "sources", ",", "targets", ",", "weights" ]
31.338235
21.117647
def run_recipe(recipe, task, rinput, workenv, logger_control): """Recipe execution mode of numina.""" # Creating custom logger file recipe_logger = logging.getLogger(logger_control['default']) if logger_control['enabled']: logfile = os.path.join(workenv.resultsdir, logger_control['logfile']) logformat = logger_control['format'] _logger.debug('creating file logger %r from Recipe logger', logfile) fh = create_recipe_file_logger(recipe_logger, logfile, logformat) else: fh = logging.NullHandler() recipe_logger.addHandler(fh) with working_directory(workenv.workdir): try: run_recipe_timed(task, recipe, rinput) return task finally: recipe_logger.removeHandler(fh)
[ "def", "run_recipe", "(", "recipe", ",", "task", ",", "rinput", ",", "workenv", ",", "logger_control", ")", ":", "# Creating custom logger file", "recipe_logger", "=", "logging", ".", "getLogger", "(", "logger_control", "[", "'default'", "]", ")", "if", "logger_control", "[", "'enabled'", "]", ":", "logfile", "=", "os", ".", "path", ".", "join", "(", "workenv", ".", "resultsdir", ",", "logger_control", "[", "'logfile'", "]", ")", "logformat", "=", "logger_control", "[", "'format'", "]", "_logger", ".", "debug", "(", "'creating file logger %r from Recipe logger'", ",", "logfile", ")", "fh", "=", "create_recipe_file_logger", "(", "recipe_logger", ",", "logfile", ",", "logformat", ")", "else", ":", "fh", "=", "logging", ".", "NullHandler", "(", ")", "recipe_logger", ".", "addHandler", "(", "fh", ")", "with", "working_directory", "(", "workenv", ".", "workdir", ")", ":", "try", ":", "run_recipe_timed", "(", "task", ",", "recipe", ",", "rinput", ")", "return", "task", "finally", ":", "recipe_logger", ".", "removeHandler", "(", "fh", ")" ]
34.863636
20.954545
def regex_to_sql_like(regex_text: str, single_wildcard: str = "_", zero_or_more_wildcard: str = "%") -> List[str]: """ Converts regular expression text to a reasonably close fragment for the SQL ``LIKE`` operator. NOT PERFECT, but works for current built-in regular expressions. Args: regex_text: regular expression text to work with single_wildcard: SQL single wildcard, typically an underscore zero_or_more_wildcard: SQL "zero/one/many" wildcard, probably always a percent symbol Returns: string for an SQL string literal Raises: :exc:`ValueError` for some regex text that it doesn't understand properly """ def append_to_all(new_content: str) -> None: nonlocal results results = [r + new_content for r in results] def split_and_append(new_options: List[str]) -> None: nonlocal results newresults = [] # type: List[str] for option in new_options: newresults.extend([r + option for r in results]) results = newresults def deduplicate_wildcards(text: str) -> str: while zero_or_more_wildcard + zero_or_more_wildcard in text: text = text.replace( zero_or_more_wildcard + zero_or_more_wildcard, zero_or_more_wildcard) return text # Basic processing working = regex_text # strings are immutable results = [zero_or_more_wildcard] # start with a wildcard while working: if working.startswith(".*"): # e.g. ".*ozapi" append_to_all(zero_or_more_wildcard) working = working[2:] elif working.startswith("["): # e.g. "[io]peridol" close_bracket = working.index("]") # may raise bracketed = working[1:close_bracket] option_groups = bracketed.split("|") options = [c for group in option_groups for c in group] split_and_append(options) working = working[close_bracket + 1:] elif len(working) > 1 and working[1] == "?": # e.g. "r?azole" split_and_append(["", working[0]]) # ... regex "optional character" # ... SQL: some results with a single wildcard, some without working = working[2:] elif working.startswith("."): # single character wildcard append_to_all(single_wildcard) working = working[1:] else: append_to_all(working[0]) working = working[1:] append_to_all(zero_or_more_wildcard) # end with a wildcard # Remove any duplicate (consecutive) % wildcards: results = [deduplicate_wildcards(r) for r in results] # Done return results
[ "def", "regex_to_sql_like", "(", "regex_text", ":", "str", ",", "single_wildcard", ":", "str", "=", "\"_\"", ",", "zero_or_more_wildcard", ":", "str", "=", "\"%\"", ")", "->", "List", "[", "str", "]", ":", "def", "append_to_all", "(", "new_content", ":", "str", ")", "->", "None", ":", "nonlocal", "results", "results", "=", "[", "r", "+", "new_content", "for", "r", "in", "results", "]", "def", "split_and_append", "(", "new_options", ":", "List", "[", "str", "]", ")", "->", "None", ":", "nonlocal", "results", "newresults", "=", "[", "]", "# type: List[str]", "for", "option", "in", "new_options", ":", "newresults", ".", "extend", "(", "[", "r", "+", "option", "for", "r", "in", "results", "]", ")", "results", "=", "newresults", "def", "deduplicate_wildcards", "(", "text", ":", "str", ")", "->", "str", ":", "while", "zero_or_more_wildcard", "+", "zero_or_more_wildcard", "in", "text", ":", "text", "=", "text", ".", "replace", "(", "zero_or_more_wildcard", "+", "zero_or_more_wildcard", ",", "zero_or_more_wildcard", ")", "return", "text", "# Basic processing", "working", "=", "regex_text", "# strings are immutable", "results", "=", "[", "zero_or_more_wildcard", "]", "# start with a wildcard", "while", "working", ":", "if", "working", ".", "startswith", "(", "\".*\"", ")", ":", "# e.g. \".*ozapi\"", "append_to_all", "(", "zero_or_more_wildcard", ")", "working", "=", "working", "[", "2", ":", "]", "elif", "working", ".", "startswith", "(", "\"[\"", ")", ":", "# e.g. \"[io]peridol\"", "close_bracket", "=", "working", ".", "index", "(", "\"]\"", ")", "# may raise", "bracketed", "=", "working", "[", "1", ":", "close_bracket", "]", "option_groups", "=", "bracketed", ".", "split", "(", "\"|\"", ")", "options", "=", "[", "c", "for", "group", "in", "option_groups", "for", "c", "in", "group", "]", "split_and_append", "(", "options", ")", "working", "=", "working", "[", "close_bracket", "+", "1", ":", "]", "elif", "len", "(", "working", ")", ">", "1", "and", "working", "[", "1", "]", "==", "\"?\"", ":", "# e.g. \"r?azole\"", "split_and_append", "(", "[", "\"\"", ",", "working", "[", "0", "]", "]", ")", "# ... regex \"optional character\"", "# ... SQL: some results with a single wildcard, some without", "working", "=", "working", "[", "2", ":", "]", "elif", "working", ".", "startswith", "(", "\".\"", ")", ":", "# single character wildcard", "append_to_all", "(", "single_wildcard", ")", "working", "=", "working", "[", "1", ":", "]", "else", ":", "append_to_all", "(", "working", "[", "0", "]", ")", "working", "=", "working", "[", "1", ":", "]", "append_to_all", "(", "zero_or_more_wildcard", ")", "# end with a wildcard", "# Remove any duplicate (consecutive) % wildcards:", "results", "=", "[", "deduplicate_wildcards", "(", "r", ")", "for", "r", "in", "results", "]", "# Done", "return", "results" ]
39.103896
17.883117
def _compute_distance(self, rup, dists, C): """ Compute the distance function, equation (9): """ mref = 3.6 rref = 1.0 rval = np.sqrt(dists.rhypo ** 2 + C['h'] ** 2) return (C['c1'] + C['c2'] * (rup.mag - mref)) *\ np.log10(rval / rref) + C['c3'] * (rval - rref)
[ "def", "_compute_distance", "(", "self", ",", "rup", ",", "dists", ",", "C", ")", ":", "mref", "=", "3.6", "rref", "=", "1.0", "rval", "=", "np", ".", "sqrt", "(", "dists", ".", "rhypo", "**", "2", "+", "C", "[", "'h'", "]", "**", "2", ")", "return", "(", "C", "[", "'c1'", "]", "+", "C", "[", "'c2'", "]", "*", "(", "rup", ".", "mag", "-", "mref", ")", ")", "*", "np", ".", "log10", "(", "rval", "/", "rref", ")", "+", "C", "[", "'c3'", "]", "*", "(", "rval", "-", "rref", ")" ]
35.777778
12
def resolve_schema_instance(schema): """Return schema instance for given schema (instance or class) :param type|Schema|str schema: instance, class or class name of marshmallow.Schema :return: schema instance of given schema (instance or class) """ if isinstance(schema, type) and issubclass(schema, marshmallow.Schema): return schema() if isinstance(schema, marshmallow.Schema): return schema try: return marshmallow.class_registry.get_class(schema)() except marshmallow.exceptions.RegistryError: raise ValueError( "{!r} is not a marshmallow.Schema subclass or instance and has not" " been registered in the Marshmallow class registry.".format(schema) )
[ "def", "resolve_schema_instance", "(", "schema", ")", ":", "if", "isinstance", "(", "schema", ",", "type", ")", "and", "issubclass", "(", "schema", ",", "marshmallow", ".", "Schema", ")", ":", "return", "schema", "(", ")", "if", "isinstance", "(", "schema", ",", "marshmallow", ".", "Schema", ")", ":", "return", "schema", "try", ":", "return", "marshmallow", ".", "class_registry", ".", "get_class", "(", "schema", ")", "(", ")", "except", "marshmallow", ".", "exceptions", ".", "RegistryError", ":", "raise", "ValueError", "(", "\"{!r} is not a marshmallow.Schema subclass or instance and has not\"", "\" been registered in the Marshmallow class registry.\"", ".", "format", "(", "schema", ")", ")" ]
43.176471
22.176471
def delete_csi_node(self, name, **kwargs): """ delete a CSINode This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_csi_node(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the CSINode (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_csi_node_with_http_info(name, **kwargs) else: (data) = self.delete_csi_node_with_http_info(name, **kwargs) return data
[ "def", "delete_csi_node", "(", "self", ",", "name", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "delete_csi_node_with_http_info", "(", "name", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "delete_csi_node_with_http_info", "(", "name", ",", "*", "*", "kwargs", ")", "return", "data" ]
90.653846
63.807692
def bounds(self) -> Tuple[float, float, float, float]: """Returns the bounds of the shape. Bounds are given in the following order in the origin crs: west, south, east, north """ return self.shape.bounds
[ "def", "bounds", "(", "self", ")", "->", "Tuple", "[", "float", ",", "float", ",", "float", ",", "float", "]", ":", "return", "self", ".", "shape", ".", "bounds" ]
39.666667
9.333333
def histogram_intersection(h1, h2): # 6 us @array, 30 us @list \w 100 bins r""" Calculate the common part of two histograms. The histogram intersection between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: .. math:: d_{\cap}(H, H') = \sum_{m=1}^M\min(H_m, H'_m) *Attributes:* - a real metric *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 1` - :math:`d(H, H') = d(H', H)` *Attributes for not-normalized histograms:* - not applicable *Attributes for not-equal histograms:* - not applicable Parameters ---------- h1 : sequence The first histogram, normalized. h2 : sequence The second histogram, normalized, same bins as ``h1``. Returns ------- histogram_intersection : float Intersection between the two histograms. """ h1, h2 = __prepare_histogram(h1, h2) return scipy.sum(scipy.minimum(h1, h2))
[ "def", "histogram_intersection", "(", "h1", ",", "h2", ")", ":", "# 6 us @array, 30 us @list \\w 100 bins", "h1", ",", "h2", "=", "__prepare_histogram", "(", "h1", ",", "h2", ")", "return", "scipy", ".", "sum", "(", "scipy", ".", "minimum", "(", "h1", ",", "h2", ")", ")" ]
23.27907
22.604651
def comma_join(fields, oxford=True): """ Join together words. """ def fmt(field): return "'%s'" % field if not fields: return "nothing" elif len(fields) == 1: return fmt(fields[0]) elif len(fields) == 2: return " and ".join([fmt(f) for f in fields]) else: result = ", ".join([fmt(f) for f in fields[:-1]]) if oxford: result += "," result += " and %s" % fmt(fields[-1]) return result
[ "def", "comma_join", "(", "fields", ",", "oxford", "=", "True", ")", ":", "def", "fmt", "(", "field", ")", ":", "return", "\"'%s'\"", "%", "field", "if", "not", "fields", ":", "return", "\"nothing\"", "elif", "len", "(", "fields", ")", "==", "1", ":", "return", "fmt", "(", "fields", "[", "0", "]", ")", "elif", "len", "(", "fields", ")", "==", "2", ":", "return", "\" and \"", ".", "join", "(", "[", "fmt", "(", "f", ")", "for", "f", "in", "fields", "]", ")", "else", ":", "result", "=", "\", \"", ".", "join", "(", "[", "fmt", "(", "f", ")", "for", "f", "in", "fields", "[", ":", "-", "1", "]", "]", ")", "if", "oxford", ":", "result", "+=", "\",\"", "result", "+=", "\" and %s\"", "%", "fmt", "(", "fields", "[", "-", "1", "]", ")", "return", "result" ]
25.944444
17.5
def search_form(context, search_model_names=None): """ Includes the search form with a list of models to use as choices for filtering the search by. Models should be a string with models in the format ``app_label.model_name`` separated by spaces. The string ``all`` can also be used, in which case the models defined by the ``SEARCH_MODEL_CHOICES`` setting will be used. """ template_vars = { "request": context["request"], } if not search_model_names or not settings.SEARCH_MODEL_CHOICES: search_model_names = [] elif search_model_names == "all": search_model_names = list(settings.SEARCH_MODEL_CHOICES) else: search_model_names = search_model_names.split(" ") search_model_choices = [] for model_name in search_model_names: try: model = apps.get_model(*model_name.split(".", 1)) except LookupError: pass else: verbose_name = model._meta.verbose_name_plural.capitalize() search_model_choices.append((verbose_name, model_name)) template_vars["search_model_choices"] = sorted(search_model_choices) return template_vars
[ "def", "search_form", "(", "context", ",", "search_model_names", "=", "None", ")", ":", "template_vars", "=", "{", "\"request\"", ":", "context", "[", "\"request\"", "]", ",", "}", "if", "not", "search_model_names", "or", "not", "settings", ".", "SEARCH_MODEL_CHOICES", ":", "search_model_names", "=", "[", "]", "elif", "search_model_names", "==", "\"all\"", ":", "search_model_names", "=", "list", "(", "settings", ".", "SEARCH_MODEL_CHOICES", ")", "else", ":", "search_model_names", "=", "search_model_names", ".", "split", "(", "\" \"", ")", "search_model_choices", "=", "[", "]", "for", "model_name", "in", "search_model_names", ":", "try", ":", "model", "=", "apps", ".", "get_model", "(", "*", "model_name", ".", "split", "(", "\".\"", ",", "1", ")", ")", "except", "LookupError", ":", "pass", "else", ":", "verbose_name", "=", "model", ".", "_meta", ".", "verbose_name_plural", ".", "capitalize", "(", ")", "search_model_choices", ".", "append", "(", "(", "verbose_name", ",", "model_name", ")", ")", "template_vars", "[", "\"search_model_choices\"", "]", "=", "sorted", "(", "search_model_choices", ")", "return", "template_vars" ]
41.357143
19.285714
def load_model(model_cls_path, model_cls_name, model_load_args): """Get an instance of the described model. Args: model_cls_path: Path to the module in which the model class is defined. model_cls_name: Name of the model class. model_load_args: Dictionary of args to pass to the `load` method of the model instance. Returns: An instance of :class:`.models.model.BaseModel` or subclass """ spec = importlib.util.spec_from_file_location('active_model', model_cls_path) model_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(model_module) model_cls = getattr(model_module, model_cls_name) model = model_cls() if not isinstance(model, BaseModel): warnings.warn("Loaded model '%s' at '%s' is not an instance of %r" % (model_cls_name, model_cls_path, BaseModel)) model.load(**model_load_args) return model
[ "def", "load_model", "(", "model_cls_path", ",", "model_cls_name", ",", "model_load_args", ")", ":", "spec", "=", "importlib", ".", "util", ".", "spec_from_file_location", "(", "'active_model'", ",", "model_cls_path", ")", "model_module", "=", "importlib", ".", "util", ".", "module_from_spec", "(", "spec", ")", "spec", ".", "loader", ".", "exec_module", "(", "model_module", ")", "model_cls", "=", "getattr", "(", "model_module", ",", "model_cls_name", ")", "model", "=", "model_cls", "(", ")", "if", "not", "isinstance", "(", "model", ",", "BaseModel", ")", ":", "warnings", ".", "warn", "(", "\"Loaded model '%s' at '%s' is not an instance of %r\"", "%", "(", "model_cls_name", ",", "model_cls_path", ",", "BaseModel", ")", ")", "model", ".", "load", "(", "*", "*", "model_load_args", ")", "return", "model" ]
39.32
20.4
def safe_unit_norm(a): """ Ensure that the vector or vectors have unit norm """ if 1 == len(a.shape): n = np.linalg.norm(a) if n: return a / n return a norm = np.sum(np.abs(a) ** 2, axis=-1) ** (1. / 2) # Dividing by a norm of zero will cause a warning to be issued. Set those # values to another number. It doesn't matter what, since we'll be dividing # a vector of zeros by the number, and 0 / N always equals 0. norm[norm == 0] = -1e12 return a / norm[:, np.newaxis]
[ "def", "safe_unit_norm", "(", "a", ")", ":", "if", "1", "==", "len", "(", "a", ".", "shape", ")", ":", "n", "=", "np", ".", "linalg", ".", "norm", "(", "a", ")", "if", "n", ":", "return", "a", "/", "n", "return", "a", "norm", "=", "np", ".", "sum", "(", "np", ".", "abs", "(", "a", ")", "**", "2", ",", "axis", "=", "-", "1", ")", "**", "(", "1.", "/", "2", ")", "# Dividing by a norm of zero will cause a warning to be issued. Set those", "# values to another number. It doesn't matter what, since we'll be dividing", "# a vector of zeros by the number, and 0 / N always equals 0.", "norm", "[", "norm", "==", "0", "]", "=", "-", "1e12", "return", "a", "/", "norm", "[", ":", ",", "np", ".", "newaxis", "]" ]
31.235294
19.823529
def append_multi(self, keys, format=None, persist_to=0, replicate_to=0): """Append to multiple keys. Multi variant of :meth:`append`. .. warning:: If using the `Item` interface, use the :meth:`append_items` and :meth:`prepend_items` instead, as those will automatically update the :attr:`.Item.value` property upon successful completion. .. seealso:: :meth:`append`, :meth:`upsert_multi`, :meth:`upsert` """ return _Base.append_multi(self, keys, format=format, persist_to=persist_to, replicate_to=replicate_to)
[ "def", "append_multi", "(", "self", ",", "keys", ",", "format", "=", "None", ",", "persist_to", "=", "0", ",", "replicate_to", "=", "0", ")", ":", "return", "_Base", ".", "append_multi", "(", "self", ",", "keys", ",", "format", "=", "format", ",", "persist_to", "=", "persist_to", ",", "replicate_to", "=", "replicate_to", ")" ]
43.666667
22.4
def __dp(self): """ Get the I{default} port if defined in the I{options}. @return: A L{MethodSelector} for the I{default} port. @rtype: L{MethodSelector}. """ dp = self.__client.options.port if dp is None: return None else: return self.__find(dp)
[ "def", "__dp", "(", "self", ")", ":", "dp", "=", "self", ".", "__client", ".", "options", ".", "port", "if", "dp", "is", "None", ":", "return", "None", "else", ":", "return", "self", ".", "__find", "(", "dp", ")" ]
29.454545
12.909091
def get_publish_profile(self, webspace_name, website_name): ''' Get a site's publish profile as an object webspace_name: The name of the webspace. website_name: The name of the website. ''' return self._perform_get(self._get_publishxml_path(webspace_name, website_name), PublishData)
[ "def", "get_publish_profile", "(", "self", ",", "webspace_name", ",", "website_name", ")", ":", "return", "self", ".", "_perform_get", "(", "self", ".", "_get_publishxml_path", "(", "webspace_name", ",", "website_name", ")", ",", "PublishData", ")" ]
34.454545
20.272727
def plot(self): """Plot the empirical histogram versus best-fit distribution's PDF.""" plt.plot(self.bin_edges, self.hist, self.bin_edges, self.best_pdf)
[ "def", "plot", "(", "self", ")", ":", "plt", ".", "plot", "(", "self", ".", "bin_edges", ",", "self", ".", "hist", ",", "self", ".", "bin_edges", ",", "self", ".", "best_pdf", ")" ]
56.333333
19.333333
def run_transgene(job, snpeffed_file, rna_bam, univ_options, transgene_options, tumor_dna_bam=None, fusion_calls=None): """ Run transgene on an input snpeffed vcf file and return the peptides for MHC prediction. :param toil.fileStore.FileID snpeffed_file: fsID for snpeffed vcf :param dict rna_bam: The dict of bams returned by running star :param dict univ_options: Dict of universal options used by almost all tools :param dict transgene_options: Options specific to Transgene :param dict tumor_dna_bam: The dict of bams returned by running bwa :return: A dictionary of 9 files (9-, 10-, and 15-mer peptides each for Tumor and Normal and the corresponding .map files for the 3 Tumor fastas) output_files: |- 'transgened_normal_10_mer_peptides.faa': fsID |- 'transgened_normal_15_mer_peptides.faa': fsID |- 'transgened_normal_9_mer_peptides.faa': fsID |- 'transgened_tumor_10_mer_peptides.faa': fsID |- 'transgened_tumor_10_mer_peptides.faa.map': fsID |- 'transgened_tumor_15_mer_peptides.faa': fsID |- 'transgened_tumor_15_mer_peptides.faa.map': fsID |- 'transgened_tumor_9_mer_peptides.faa': fsID +- 'transgened_tumor_9_mer_peptides.faa.map': fsID :rtype: dict """ assert snpeffed_file or fusion_calls work_dir = os.getcwd() input_files = { 'pepts.fa.tar.gz': transgene_options['gencode_peptide_fasta'], 'annotation.gtf.tar.gz': transgene_options['gencode_annotation_gtf'], 'genome.fa.tar.gz': transgene_options['genome_fasta'] } if snpeffed_file is not None: input_files.update({ 'snpeffed_muts.vcf': snpeffed_file}) if rna_bam: input_files.update({ 'rna.bam': rna_bam['rna_genome']['rna_genome_sorted.bam'], 'rna.bam.bai': rna_bam['rna_genome']['rna_genome_sorted.bam.bai'], }) if tumor_dna_bam is not None: input_files.update({ 'tumor_dna.bam': tumor_dna_bam['tumor_dna_fix_pg_sorted.bam'], 'tumor_dna.bam.bai': tumor_dna_bam['tumor_dna_fix_pg_sorted.bam.bai'], }) input_files = get_files_from_filestore(job, input_files, work_dir, docker=False) input_files['pepts.fa'] = untargz(input_files['pepts.fa.tar.gz'], work_dir) input_files['genome.fa'] = untargz(input_files['genome.fa.tar.gz'], work_dir) input_files['annotation.gtf'] = untargz(input_files['annotation.gtf.tar.gz'], work_dir) input_files = {key: docker_path(path) for key, path in input_files.items()} parameters = ['--peptides', input_files['pepts.fa'], '--prefix', 'transgened', '--pep_lens', '9,10,15', '--cores', str(transgene_options['n']), '--genome', input_files['genome.fa'], '--annotation', input_files['annotation.gtf']] if snpeffed_file is not None: parameters.extend(['--snpeff', input_files['snpeffed_muts.vcf']]) if rna_bam: parameters.extend(['--rna_file', input_files['rna.bam']]) if tumor_dna_bam is not None: parameters.extend(['--dna_file', input_files['tumor_dna.bam']]) if fusion_calls: fusion_files = {'fusion_calls': fusion_calls, 'transcripts.fa.tar.gz': transgene_options['gencode_transcript_fasta'] } fusion_files = get_files_from_filestore(job, fusion_files, work_dir, docker=False) fusion_files['transcripts.fa'] = untargz(fusion_files['transcripts.fa.tar.gz'], work_dir) fusion_files = {key: docker_path(path) for key, path in fusion_files.items()} parameters += ['--transcripts', fusion_files['transcripts.fa'], '--fusions', fusion_files['fusion_calls']] docker_call(tool='transgene', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], tool_version=transgene_options['version']) output_files = defaultdict() for peplen in ['9', '10', '15']: for tissue_type in ['tumor', 'normal']: pepfile = '_'.join(['transgened', tissue_type, peplen, 'mer_peptides.faa']) # Backwards compatibility for old transgene output old_pepfile = '_'.join(['transgened', tissue_type, peplen, 'mer_snpeffed.faa']) if os.path.exists(os.path.join(work_dir, old_pepfile)): os.rename(os.path.join(work_dir, old_pepfile), os.path.join(work_dir, pepfile)) if tissue_type == 'tumor': os.rename(os.path.join(work_dir, old_pepfile + '.map'), os.path.join(work_dir, pepfile + '.map')) output_files[pepfile] = job.fileStore.writeGlobalFile(os.path.join(work_dir, pepfile)) export_results(job, output_files[pepfile], pepfile, univ_options, subfolder='peptides') mapfile = '_'.join(['transgened_tumor', peplen, 'mer_peptides.faa.map']) output_files[mapfile] = job.fileStore.writeGlobalFile(os.path.join(work_dir, mapfile)) export_results(job, output_files[mapfile], mapfile, univ_options, subfolder='peptides') if snpeffed_file: # There won't be an output vcf if there's no input os.rename('transgened_transgened.vcf', 'mutations.vcf') export_results(job, job.fileStore.writeGlobalFile('mutations.vcf'), 'mutations.vcf', univ_options, subfolder='mutations/transgened') if fusion_calls: # There won't be an output bedpe if there's no input os.rename('transgened_transgened.bedpe', 'fusions.bedpe') export_results(job, job.fileStore.writeGlobalFile('fusions.bedpe'), 'fusions.bedpe', univ_options, subfolder='mutations/transgened') job.fileStore.logToMaster('Ran transgene on %s successfully' % univ_options['patient']) return output_files
[ "def", "run_transgene", "(", "job", ",", "snpeffed_file", ",", "rna_bam", ",", "univ_options", ",", "transgene_options", ",", "tumor_dna_bam", "=", "None", ",", "fusion_calls", "=", "None", ")", ":", "assert", "snpeffed_file", "or", "fusion_calls", "work_dir", "=", "os", ".", "getcwd", "(", ")", "input_files", "=", "{", "'pepts.fa.tar.gz'", ":", "transgene_options", "[", "'gencode_peptide_fasta'", "]", ",", "'annotation.gtf.tar.gz'", ":", "transgene_options", "[", "'gencode_annotation_gtf'", "]", ",", "'genome.fa.tar.gz'", ":", "transgene_options", "[", "'genome_fasta'", "]", "}", "if", "snpeffed_file", "is", "not", "None", ":", "input_files", ".", "update", "(", "{", "'snpeffed_muts.vcf'", ":", "snpeffed_file", "}", ")", "if", "rna_bam", ":", "input_files", ".", "update", "(", "{", "'rna.bam'", ":", "rna_bam", "[", "'rna_genome'", "]", "[", "'rna_genome_sorted.bam'", "]", ",", "'rna.bam.bai'", ":", "rna_bam", "[", "'rna_genome'", "]", "[", "'rna_genome_sorted.bam.bai'", "]", ",", "}", ")", "if", "tumor_dna_bam", "is", "not", "None", ":", "input_files", ".", "update", "(", "{", "'tumor_dna.bam'", ":", "tumor_dna_bam", "[", "'tumor_dna_fix_pg_sorted.bam'", "]", ",", "'tumor_dna.bam.bai'", ":", "tumor_dna_bam", "[", "'tumor_dna_fix_pg_sorted.bam.bai'", "]", ",", "}", ")", "input_files", "=", "get_files_from_filestore", "(", "job", ",", "input_files", ",", "work_dir", ",", "docker", "=", "False", ")", "input_files", "[", "'pepts.fa'", "]", "=", "untargz", "(", "input_files", "[", "'pepts.fa.tar.gz'", "]", ",", "work_dir", ")", "input_files", "[", "'genome.fa'", "]", "=", "untargz", "(", "input_files", "[", "'genome.fa.tar.gz'", "]", ",", "work_dir", ")", "input_files", "[", "'annotation.gtf'", "]", "=", "untargz", "(", "input_files", "[", "'annotation.gtf.tar.gz'", "]", ",", "work_dir", ")", "input_files", "=", "{", "key", ":", "docker_path", "(", "path", ")", "for", "key", ",", "path", "in", "input_files", ".", "items", "(", ")", "}", "parameters", "=", "[", "'--peptides'", ",", "input_files", "[", "'pepts.fa'", "]", ",", "'--prefix'", ",", "'transgened'", ",", "'--pep_lens'", ",", "'9,10,15'", ",", "'--cores'", ",", "str", "(", "transgene_options", "[", "'n'", "]", ")", ",", "'--genome'", ",", "input_files", "[", "'genome.fa'", "]", ",", "'--annotation'", ",", "input_files", "[", "'annotation.gtf'", "]", "]", "if", "snpeffed_file", "is", "not", "None", ":", "parameters", ".", "extend", "(", "[", "'--snpeff'", ",", "input_files", "[", "'snpeffed_muts.vcf'", "]", "]", ")", "if", "rna_bam", ":", "parameters", ".", "extend", "(", "[", "'--rna_file'", ",", "input_files", "[", "'rna.bam'", "]", "]", ")", "if", "tumor_dna_bam", "is", "not", "None", ":", "parameters", ".", "extend", "(", "[", "'--dna_file'", ",", "input_files", "[", "'tumor_dna.bam'", "]", "]", ")", "if", "fusion_calls", ":", "fusion_files", "=", "{", "'fusion_calls'", ":", "fusion_calls", ",", "'transcripts.fa.tar.gz'", ":", "transgene_options", "[", "'gencode_transcript_fasta'", "]", "}", "fusion_files", "=", "get_files_from_filestore", "(", "job", ",", "fusion_files", ",", "work_dir", ",", "docker", "=", "False", ")", "fusion_files", "[", "'transcripts.fa'", "]", "=", "untargz", "(", "fusion_files", "[", "'transcripts.fa.tar.gz'", "]", ",", "work_dir", ")", "fusion_files", "=", "{", "key", ":", "docker_path", "(", "path", ")", "for", "key", ",", "path", "in", "fusion_files", ".", "items", "(", ")", "}", "parameters", "+=", "[", "'--transcripts'", ",", "fusion_files", "[", "'transcripts.fa'", "]", ",", "'--fusions'", ",", "fusion_files", "[", "'fusion_calls'", "]", "]", "docker_call", "(", "tool", "=", "'transgene'", ",", "tool_parameters", "=", "parameters", ",", "work_dir", "=", "work_dir", ",", "dockerhub", "=", "univ_options", "[", "'dockerhub'", "]", ",", "tool_version", "=", "transgene_options", "[", "'version'", "]", ")", "output_files", "=", "defaultdict", "(", ")", "for", "peplen", "in", "[", "'9'", ",", "'10'", ",", "'15'", "]", ":", "for", "tissue_type", "in", "[", "'tumor'", ",", "'normal'", "]", ":", "pepfile", "=", "'_'", ".", "join", "(", "[", "'transgened'", ",", "tissue_type", ",", "peplen", ",", "'mer_peptides.faa'", "]", ")", "# Backwards compatibility for old transgene output", "old_pepfile", "=", "'_'", ".", "join", "(", "[", "'transgened'", ",", "tissue_type", ",", "peplen", ",", "'mer_snpeffed.faa'", "]", ")", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "old_pepfile", ")", ")", ":", "os", ".", "rename", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "old_pepfile", ")", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "pepfile", ")", ")", "if", "tissue_type", "==", "'tumor'", ":", "os", ".", "rename", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "old_pepfile", "+", "'.map'", ")", ",", "os", ".", "path", ".", "join", "(", "work_dir", ",", "pepfile", "+", "'.map'", ")", ")", "output_files", "[", "pepfile", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "pepfile", ")", ")", "export_results", "(", "job", ",", "output_files", "[", "pepfile", "]", ",", "pepfile", ",", "univ_options", ",", "subfolder", "=", "'peptides'", ")", "mapfile", "=", "'_'", ".", "join", "(", "[", "'transgened_tumor'", ",", "peplen", ",", "'mer_peptides.faa.map'", "]", ")", "output_files", "[", "mapfile", "]", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "os", ".", "path", ".", "join", "(", "work_dir", ",", "mapfile", ")", ")", "export_results", "(", "job", ",", "output_files", "[", "mapfile", "]", ",", "mapfile", ",", "univ_options", ",", "subfolder", "=", "'peptides'", ")", "if", "snpeffed_file", ":", "# There won't be an output vcf if there's no input", "os", ".", "rename", "(", "'transgened_transgened.vcf'", ",", "'mutations.vcf'", ")", "export_results", "(", "job", ",", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "'mutations.vcf'", ")", ",", "'mutations.vcf'", ",", "univ_options", ",", "subfolder", "=", "'mutations/transgened'", ")", "if", "fusion_calls", ":", "# There won't be an output bedpe if there's no input", "os", ".", "rename", "(", "'transgened_transgened.bedpe'", ",", "'fusions.bedpe'", ")", "export_results", "(", "job", ",", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "'fusions.bedpe'", ")", ",", "'fusions.bedpe'", ",", "univ_options", ",", "subfolder", "=", "'mutations/transgened'", ")", "job", ".", "fileStore", ".", "logToMaster", "(", "'Ran transgene on %s successfully'", "%", "univ_options", "[", "'patient'", "]", ")", "return", "output_files" ]
52.22807
28.263158
def get(self, block=True, timeout=None): """Remove and return an item from the queue. If optional args ``block`` is ``True`` and ``timeout`` is ``None`` (the default), block if necessary until an item is available. If ``timeout`` is a positive number, it blocks at most ``timeout`` seconds and raises the :exc:`Queue.Empty` exception if no item was available within that time. Otherwise (``block`` is ``False``), return an item if one is immediately available, else raise the :exc:`Queue.Empty` exception (``timeout`` is ignored in that case). """ if not block: return self.get_nowait() item = self._bpop([self.name], timeout=timeout) if item is not None: return item raise Empty
[ "def", "get", "(", "self", ",", "block", "=", "True", ",", "timeout", "=", "None", ")", ":", "if", "not", "block", ":", "return", "self", ".", "get_nowait", "(", ")", "item", "=", "self", ".", "_bpop", "(", "[", "self", ".", "name", "]", ",", "timeout", "=", "timeout", ")", "if", "item", "is", "not", "None", ":", "return", "item", "raise", "Empty" ]
42.105263
21
def add_optionals(self, optionals_in, optionals_out): """ Add optional inputs and outputs to the model spec. Parameters ---------- optionals_in: [str] List of inputs that are optionals. optionals_out: [str] List of outputs that are optionals. See Also -------- set_input, set_output """ spec = self.spec if (not optionals_in) and (not optionals_out): return # assuming single sizes here input_types = [datatypes.Array(dim) for (name, dim) in optionals_in] output_types = [datatypes.Array(dim) for (name, dim) in optionals_out] input_names = [str(name) for (name, dim) in optionals_in] output_names = [str(name) for (name, dim) in optionals_out] input_features = list(zip(input_names, input_types)) output_features = list(zip(output_names, output_types)) len_before_in = len(spec.description.input) len_before_out = len(spec.description.output) # this appends to the existing model interface set_transform_interface_params(spec, input_features, output_features, True) # add types for any extra hidden inputs for idx in range(len_before_in, len(spec.description.input)): spec.description.input[idx].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE for idx in range(len_before_out, len(spec.description.output)): spec.description.output[idx].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE
[ "def", "add_optionals", "(", "self", ",", "optionals_in", ",", "optionals_out", ")", ":", "spec", "=", "self", ".", "spec", "if", "(", "not", "optionals_in", ")", "and", "(", "not", "optionals_out", ")", ":", "return", "# assuming single sizes here", "input_types", "=", "[", "datatypes", ".", "Array", "(", "dim", ")", "for", "(", "name", ",", "dim", ")", "in", "optionals_in", "]", "output_types", "=", "[", "datatypes", ".", "Array", "(", "dim", ")", "for", "(", "name", ",", "dim", ")", "in", "optionals_out", "]", "input_names", "=", "[", "str", "(", "name", ")", "for", "(", "name", ",", "dim", ")", "in", "optionals_in", "]", "output_names", "=", "[", "str", "(", "name", ")", "for", "(", "name", ",", "dim", ")", "in", "optionals_out", "]", "input_features", "=", "list", "(", "zip", "(", "input_names", ",", "input_types", ")", ")", "output_features", "=", "list", "(", "zip", "(", "output_names", ",", "output_types", ")", ")", "len_before_in", "=", "len", "(", "spec", ".", "description", ".", "input", ")", "len_before_out", "=", "len", "(", "spec", ".", "description", ".", "output", ")", "# this appends to the existing model interface", "set_transform_interface_params", "(", "spec", ",", "input_features", ",", "output_features", ",", "True", ")", "# add types for any extra hidden inputs", "for", "idx", "in", "range", "(", "len_before_in", ",", "len", "(", "spec", ".", "description", ".", "input", ")", ")", ":", "spec", ".", "description", ".", "input", "[", "idx", "]", ".", "type", ".", "multiArrayType", ".", "dataType", "=", "_Model_pb2", ".", "ArrayFeatureType", ".", "DOUBLE", "for", "idx", "in", "range", "(", "len_before_out", ",", "len", "(", "spec", ".", "description", ".", "output", ")", ")", ":", "spec", ".", "description", ".", "output", "[", "idx", "]", ".", "type", ".", "multiArrayType", ".", "dataType", "=", "_Model_pb2", ".", "ArrayFeatureType", ".", "DOUBLE" ]
37.095238
25.619048
def make_const_handler(data): """ Create a handler for a data opcode that returns a constant. """ data = bytes_as_hex(data) def constant_data_opcode_handler(script, pc, verify_minimal_data=False): return pc+1, data return constant_data_opcode_handler
[ "def", "make_const_handler", "(", "data", ")", ":", "data", "=", "bytes_as_hex", "(", "data", ")", "def", "constant_data_opcode_handler", "(", "script", ",", "pc", ",", "verify_minimal_data", "=", "False", ")", ":", "return", "pc", "+", "1", ",", "data", "return", "constant_data_opcode_handler" ]
30.555556
15.222222
def _verify_type(self, spec, path): """Verify that the 'type' in the spec is valid""" field_type = spec['type'] if isinstance(field_type, Schema): # Nested documents cannot have validation if not set(spec.keys()).issubset(set(['type', 'required', 'nullable', 'default'])): raise SchemaFormatException("Unsupported field spec item at {}. Items: "+repr(spec.keys()), path) return elif isinstance(field_type, Array): if not isinstance(field_type.contained_type, (type, Schema, Array, types.FunctionType)): raise SchemaFormatException("Unsupported field type contained by Array at {}.", path) elif not isinstance(field_type, type) and not isinstance(field_type, types.FunctionType): raise SchemaFormatException("Unsupported field type at {}. Type must be a type, a function, an Array or another Schema", path)
[ "def", "_verify_type", "(", "self", ",", "spec", ",", "path", ")", ":", "field_type", "=", "spec", "[", "'type'", "]", "if", "isinstance", "(", "field_type", ",", "Schema", ")", ":", "# Nested documents cannot have validation", "if", "not", "set", "(", "spec", ".", "keys", "(", ")", ")", ".", "issubset", "(", "set", "(", "[", "'type'", ",", "'required'", ",", "'nullable'", ",", "'default'", "]", ")", ")", ":", "raise", "SchemaFormatException", "(", "\"Unsupported field spec item at {}. Items: \"", "+", "repr", "(", "spec", ".", "keys", "(", ")", ")", ",", "path", ")", "return", "elif", "isinstance", "(", "field_type", ",", "Array", ")", ":", "if", "not", "isinstance", "(", "field_type", ".", "contained_type", ",", "(", "type", ",", "Schema", ",", "Array", ",", "types", ".", "FunctionType", ")", ")", ":", "raise", "SchemaFormatException", "(", "\"Unsupported field type contained by Array at {}.\"", ",", "path", ")", "elif", "not", "isinstance", "(", "field_type", ",", "type", ")", "and", "not", "isinstance", "(", "field_type", ",", "types", ".", "FunctionType", ")", ":", "raise", "SchemaFormatException", "(", "\"Unsupported field type at {}. Type must be a type, a function, an Array or another Schema\"", ",", "path", ")" ]
57.8125
36
def _exec(self, binary, stdin='', args=(), env={}): """ Executes the binary using stdin and args with environment variables. Returns a tuple of stdout, stderr. Format determined by the input text (either str or bytes), and the encoding of str will be determined by the locale this module was imported in. """ call_kw = self._gen_call_kws(**env) call_args = [self._get_exec_binary(call_kw)] call_args.extend(args) return fork_exec(call_args, stdin, **call_kw)
[ "def", "_exec", "(", "self", ",", "binary", ",", "stdin", "=", "''", ",", "args", "=", "(", ")", ",", "env", "=", "{", "}", ")", ":", "call_kw", "=", "self", ".", "_gen_call_kws", "(", "*", "*", "env", ")", "call_args", "=", "[", "self", ".", "_get_exec_binary", "(", "call_kw", ")", "]", "call_args", ".", "extend", "(", "args", ")", "return", "fork_exec", "(", "call_args", ",", "stdin", ",", "*", "*", "call_kw", ")" ]
38.285714
18.428571
def lithospheric_stress(step, trench, ridge, time): """calculate stress in the lithosphere""" timestep = step.isnap base_lith = step.geom.rcmb + 1 - 0.105 stressfld = step.fields['sII'][0, :, :, 0] stressfld = np.ma.masked_where(step.geom.r_mesh[0] < base_lith, stressfld) # stress integration in the lithosphere dzm = (step.geom.r_coord[1:] - step.geom.r_coord[:-1]) stress_lith = np.sum((stressfld[:, 1:] * dzm.T), axis=1) ph_coord = step.geom.p_coord # probably doesn't need alias # plot stress in the lithosphere fig, axis, _, _ = field.plot_scalar(step, 'sII', stressfld, cmap='plasma_r', vmin=0, vmax=300) # Annotation with time and step axis.text(1., 0.9, str(round(time, 0)) + ' My', transform=axis.transAxes) axis.text(1., 0.1, str(timestep), transform=axis.transAxes) misc.saveplot(fig, 'lith', timestep) # velocity vphi = step.fields['v2'][0, :, :, 0] vph2 = 0.5 * (vphi + np.roll(vphi, 1, 0)) # interpolate to the same phi # position of continents concfld = step.fields['c'][0, :, :, 0] if step.sdat.par['boundaries']['air_layer']: # we are a bit below the surface; delete "-some number" # to be just below dsa = step.sdat.par['boundaries']['air_thickness'] # depth to detect the continents indcont = np.argmin(abs((1 - dsa) - step.geom.r_coord)) - 10 else: # depth to detect continents indcont = -1 if step.sdat.par['boundaries']['air_layer'] and\ not step.sdat.par['continents']['proterozoic_belts']: continents = np.ma.masked_where( np.logical_or(concfld[:-1, indcont] < 3, concfld[:-1, indcont] > 4), concfld[:-1, indcont]) elif step.sdat.par['boundaries']['air_layer'] and\ step.sdat.par['continents']['proterozoic_belts']: continents = np.ma.masked_where( np.logical_or(concfld[:-1, indcont] < 3, concfld[:-1, indcont] > 5), concfld[:-1, indcont]) elif step.sdat.par['tracersin']['tracers_weakcrust']: continents = np.ma.masked_where( concfld[:-1, indcont] < 3, concfld[:-1, indcont]) else: continents = np.ma.masked_where( concfld[:-1, indcont] < 2, concfld[:-1, indcont]) # masked array, only continents are true continentsall = continents / continents # plot integrated stress in the lithosphere fig0, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(12, 8)) ax1.plot(ph_coord[:-1], vph2[:-1, -1], label='Vel') ax1.axhline(y=0, xmin=0, xmax=2 * np.pi, color='black', ls='solid', alpha=0.2) ax1.set_ylabel("Velocity") ax1.text(0.95, 1.07, str(round(time, 0)) + ' My', transform=ax1.transAxes) ax1.text(0.01, 1.07, str(round(step.geom.ti_ad, 8)), transform=ax1.transAxes) intstr_scale = step.sdat.scales.stress * step.sdat.scales.length / 1.e12 ax2.plot(ph_coord, stress_lith * intstr_scale, color='k', label='Stress') ax2.set_ylabel(r"Integrated stress [$TN\,m^{-1}$]") plot_plate_limits(ax1, ridge, trench, conf.plates.vmin, conf.plates.vmax) plot_plate_limits(ax2, ridge, trench, conf.plates.stressmin, conf.plates.lstressmax) ax1.set_xlim(0, 2 * np.pi) ax1.set_title(timestep) ax1.fill_between( ph_coord[:-1], continentsall * conf.plates.vmin, conf.plates.vmax, facecolor='#8b6914', alpha=0.2) ax1.set_ylim(conf.plates.vmin, conf.plates.vmax) ax2.fill_between( ph_coord[:-1], continentsall * conf.plates.stressmin, conf.plates.lstressmax, facecolor='#8b6914', alpha=0.2) ax2.set_ylim(conf.plates.stressmin, conf.plates.lstressmax) misc.saveplot(fig0, 'svelslith', timestep)
[ "def", "lithospheric_stress", "(", "step", ",", "trench", ",", "ridge", ",", "time", ")", ":", "timestep", "=", "step", ".", "isnap", "base_lith", "=", "step", ".", "geom", ".", "rcmb", "+", "1", "-", "0.105", "stressfld", "=", "step", ".", "fields", "[", "'sII'", "]", "[", "0", ",", ":", ",", ":", ",", "0", "]", "stressfld", "=", "np", ".", "ma", ".", "masked_where", "(", "step", ".", "geom", ".", "r_mesh", "[", "0", "]", "<", "base_lith", ",", "stressfld", ")", "# stress integration in the lithosphere", "dzm", "=", "(", "step", ".", "geom", ".", "r_coord", "[", "1", ":", "]", "-", "step", ".", "geom", ".", "r_coord", "[", ":", "-", "1", "]", ")", "stress_lith", "=", "np", ".", "sum", "(", "(", "stressfld", "[", ":", ",", "1", ":", "]", "*", "dzm", ".", "T", ")", ",", "axis", "=", "1", ")", "ph_coord", "=", "step", ".", "geom", ".", "p_coord", "# probably doesn't need alias", "# plot stress in the lithosphere", "fig", ",", "axis", ",", "_", ",", "_", "=", "field", ".", "plot_scalar", "(", "step", ",", "'sII'", ",", "stressfld", ",", "cmap", "=", "'plasma_r'", ",", "vmin", "=", "0", ",", "vmax", "=", "300", ")", "# Annotation with time and step", "axis", ".", "text", "(", "1.", ",", "0.9", ",", "str", "(", "round", "(", "time", ",", "0", ")", ")", "+", "' My'", ",", "transform", "=", "axis", ".", "transAxes", ")", "axis", ".", "text", "(", "1.", ",", "0.1", ",", "str", "(", "timestep", ")", ",", "transform", "=", "axis", ".", "transAxes", ")", "misc", ".", "saveplot", "(", "fig", ",", "'lith'", ",", "timestep", ")", "# velocity", "vphi", "=", "step", ".", "fields", "[", "'v2'", "]", "[", "0", ",", ":", ",", ":", ",", "0", "]", "vph2", "=", "0.5", "*", "(", "vphi", "+", "np", ".", "roll", "(", "vphi", ",", "1", ",", "0", ")", ")", "# interpolate to the same phi", "# position of continents", "concfld", "=", "step", ".", "fields", "[", "'c'", "]", "[", "0", ",", ":", ",", ":", ",", "0", "]", "if", "step", ".", "sdat", ".", "par", "[", "'boundaries'", "]", "[", "'air_layer'", "]", ":", "# we are a bit below the surface; delete \"-some number\"", "# to be just below", "dsa", "=", "step", ".", "sdat", ".", "par", "[", "'boundaries'", "]", "[", "'air_thickness'", "]", "# depth to detect the continents", "indcont", "=", "np", ".", "argmin", "(", "abs", "(", "(", "1", "-", "dsa", ")", "-", "step", ".", "geom", ".", "r_coord", ")", ")", "-", "10", "else", ":", "# depth to detect continents", "indcont", "=", "-", "1", "if", "step", ".", "sdat", ".", "par", "[", "'boundaries'", "]", "[", "'air_layer'", "]", "and", "not", "step", ".", "sdat", ".", "par", "[", "'continents'", "]", "[", "'proterozoic_belts'", "]", ":", "continents", "=", "np", ".", "ma", ".", "masked_where", "(", "np", ".", "logical_or", "(", "concfld", "[", ":", "-", "1", ",", "indcont", "]", "<", "3", ",", "concfld", "[", ":", "-", "1", ",", "indcont", "]", ">", "4", ")", ",", "concfld", "[", ":", "-", "1", ",", "indcont", "]", ")", "elif", "step", ".", "sdat", ".", "par", "[", "'boundaries'", "]", "[", "'air_layer'", "]", "and", "step", ".", "sdat", ".", "par", "[", "'continents'", "]", "[", "'proterozoic_belts'", "]", ":", "continents", "=", "np", ".", "ma", ".", "masked_where", "(", "np", ".", "logical_or", "(", "concfld", "[", ":", "-", "1", ",", "indcont", "]", "<", "3", ",", "concfld", "[", ":", "-", "1", ",", "indcont", "]", ">", "5", ")", ",", "concfld", "[", ":", "-", "1", ",", "indcont", "]", ")", "elif", "step", ".", "sdat", ".", "par", "[", "'tracersin'", "]", "[", "'tracers_weakcrust'", "]", ":", "continents", "=", "np", ".", "ma", ".", "masked_where", "(", "concfld", "[", ":", "-", "1", ",", "indcont", "]", "<", "3", ",", "concfld", "[", ":", "-", "1", ",", "indcont", "]", ")", "else", ":", "continents", "=", "np", ".", "ma", ".", "masked_where", "(", "concfld", "[", ":", "-", "1", ",", "indcont", "]", "<", "2", ",", "concfld", "[", ":", "-", "1", ",", "indcont", "]", ")", "# masked array, only continents are true", "continentsall", "=", "continents", "/", "continents", "# plot integrated stress in the lithosphere", "fig0", ",", "(", "ax1", ",", "ax2", ")", "=", "plt", ".", "subplots", "(", "2", ",", "1", ",", "sharex", "=", "True", ",", "figsize", "=", "(", "12", ",", "8", ")", ")", "ax1", ".", "plot", "(", "ph_coord", "[", ":", "-", "1", "]", ",", "vph2", "[", ":", "-", "1", ",", "-", "1", "]", ",", "label", "=", "'Vel'", ")", "ax1", ".", "axhline", "(", "y", "=", "0", ",", "xmin", "=", "0", ",", "xmax", "=", "2", "*", "np", ".", "pi", ",", "color", "=", "'black'", ",", "ls", "=", "'solid'", ",", "alpha", "=", "0.2", ")", "ax1", ".", "set_ylabel", "(", "\"Velocity\"", ")", "ax1", ".", "text", "(", "0.95", ",", "1.07", ",", "str", "(", "round", "(", "time", ",", "0", ")", ")", "+", "' My'", ",", "transform", "=", "ax1", ".", "transAxes", ")", "ax1", ".", "text", "(", "0.01", ",", "1.07", ",", "str", "(", "round", "(", "step", ".", "geom", ".", "ti_ad", ",", "8", ")", ")", ",", "transform", "=", "ax1", ".", "transAxes", ")", "intstr_scale", "=", "step", ".", "sdat", ".", "scales", ".", "stress", "*", "step", ".", "sdat", ".", "scales", ".", "length", "/", "1.e12", "ax2", ".", "plot", "(", "ph_coord", ",", "stress_lith", "*", "intstr_scale", ",", "color", "=", "'k'", ",", "label", "=", "'Stress'", ")", "ax2", ".", "set_ylabel", "(", "r\"Integrated stress [$TN\\,m^{-1}$]\"", ")", "plot_plate_limits", "(", "ax1", ",", "ridge", ",", "trench", ",", "conf", ".", "plates", ".", "vmin", ",", "conf", ".", "plates", ".", "vmax", ")", "plot_plate_limits", "(", "ax2", ",", "ridge", ",", "trench", ",", "conf", ".", "plates", ".", "stressmin", ",", "conf", ".", "plates", ".", "lstressmax", ")", "ax1", ".", "set_xlim", "(", "0", ",", "2", "*", "np", ".", "pi", ")", "ax1", ".", "set_title", "(", "timestep", ")", "ax1", ".", "fill_between", "(", "ph_coord", "[", ":", "-", "1", "]", ",", "continentsall", "*", "conf", ".", "plates", ".", "vmin", ",", "conf", ".", "plates", ".", "vmax", ",", "facecolor", "=", "'#8b6914'", ",", "alpha", "=", "0.2", ")", "ax1", ".", "set_ylim", "(", "conf", ".", "plates", ".", "vmin", ",", "conf", ".", "plates", ".", "vmax", ")", "ax2", ".", "fill_between", "(", "ph_coord", "[", ":", "-", "1", "]", ",", "continentsall", "*", "conf", ".", "plates", ".", "stressmin", ",", "conf", ".", "plates", ".", "lstressmax", ",", "facecolor", "=", "'#8b6914'", ",", "alpha", "=", "0.2", ")", "ax2", ".", "set_ylim", "(", "conf", ".", "plates", ".", "stressmin", ",", "conf", ".", "plates", ".", "lstressmax", ")", "misc", ".", "saveplot", "(", "fig0", ",", "'svelslith'", ",", "timestep", ")" ]
42.3
17.622222
def _logs_options(p): """ Add options specific to logs subcommand. """ _default_options(p, blacklist=['cache', 'quiet']) # default time range is 0 to "now" (to include all log entries) p.add_argument( '--start', default='the beginning', # invalid, will result in 0 help='Start date and/or time', ) p.add_argument( '--end', default=datetime.now().strftime('%c'), help='End date and/or time', )
[ "def", "_logs_options", "(", "p", ")", ":", "_default_options", "(", "p", ",", "blacklist", "=", "[", "'cache'", ",", "'quiet'", "]", ")", "# default time range is 0 to \"now\" (to include all log entries)", "p", ".", "add_argument", "(", "'--start'", ",", "default", "=", "'the beginning'", ",", "# invalid, will result in 0", "help", "=", "'Start date and/or time'", ",", ")", "p", ".", "add_argument", "(", "'--end'", ",", "default", "=", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'%c'", ")", ",", "help", "=", "'End date and/or time'", ",", ")" ]
30.4
19.333333
def convert_reaction_entry(self, reaction): """Convert reaction entry to YAML dict.""" d = OrderedDict() d['id'] = reaction.id def is_equation_valid(equation): # If the equation is a Reaction object, it must have non-zero # number of compounds. return (equation is not None and ( not isinstance(equation, Reaction) or len(equation.compounds) > 0)) order = { key: i for i, key in enumerate( ['name', 'genes', 'equation', 'subsystem', 'ec'])} prop_keys = (set(reaction.properties) - {'lower_flux', 'upper_flux', 'reversible'}) for prop in sorted(prop_keys, key=lambda x: (order.get(x, 1000), x)): if reaction.properties[prop] is None: continue d[prop] = reaction.properties[prop] if prop == 'equation' and not is_equation_valid(d[prop]): del d[prop] return d
[ "def", "convert_reaction_entry", "(", "self", ",", "reaction", ")", ":", "d", "=", "OrderedDict", "(", ")", "d", "[", "'id'", "]", "=", "reaction", ".", "id", "def", "is_equation_valid", "(", "equation", ")", ":", "# If the equation is a Reaction object, it must have non-zero", "# number of compounds.", "return", "(", "equation", "is", "not", "None", "and", "(", "not", "isinstance", "(", "equation", ",", "Reaction", ")", "or", "len", "(", "equation", ".", "compounds", ")", ">", "0", ")", ")", "order", "=", "{", "key", ":", "i", "for", "i", ",", "key", "in", "enumerate", "(", "[", "'name'", ",", "'genes'", ",", "'equation'", ",", "'subsystem'", ",", "'ec'", "]", ")", "}", "prop_keys", "=", "(", "set", "(", "reaction", ".", "properties", ")", "-", "{", "'lower_flux'", ",", "'upper_flux'", ",", "'reversible'", "}", ")", "for", "prop", "in", "sorted", "(", "prop_keys", ",", "key", "=", "lambda", "x", ":", "(", "order", ".", "get", "(", "x", ",", "1000", ")", ",", "x", ")", ")", ":", "if", "reaction", ".", "properties", "[", "prop", "]", "is", "None", ":", "continue", "d", "[", "prop", "]", "=", "reaction", ".", "properties", "[", "prop", "]", "if", "prop", "==", "'equation'", "and", "not", "is_equation_valid", "(", "d", "[", "prop", "]", ")", ":", "del", "d", "[", "prop", "]", "return", "d" ]
39.68
17.52
def start(self): """Union[Datetime, None]: Datetime when the stage started.""" if self._properties.get("startMs") is None: return None return _helpers._datetime_from_microseconds( int(self._properties.get("startMs")) * 1000.0 )
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "_properties", ".", "get", "(", "\"startMs\"", ")", "is", "None", ":", "return", "None", "return", "_helpers", ".", "_datetime_from_microseconds", "(", "int", "(", "self", ".", "_properties", ".", "get", "(", "\"startMs\"", ")", ")", "*", "1000.0", ")" ]
39.571429
16
def check_for_update(self, force=True, download=False): """ Returns a :class:`~plexapi.base.Release` object containing release info. Parameters: force (bool): Force server to check for new releases download (bool): Download if a update is available. """ part = '/updater/check?download=%s' % (1 if download else 0) if force: self.query(part, method=self._session.put) releases = self.fetchItems('/updater/status') if len(releases): return releases[0]
[ "def", "check_for_update", "(", "self", ",", "force", "=", "True", ",", "download", "=", "False", ")", ":", "part", "=", "'/updater/check?download=%s'", "%", "(", "1", "if", "download", "else", "0", ")", "if", "force", ":", "self", ".", "query", "(", "part", ",", "method", "=", "self", ".", "_session", ".", "put", ")", "releases", "=", "self", ".", "fetchItems", "(", "'/updater/status'", ")", "if", "len", "(", "releases", ")", ":", "return", "releases", "[", "0", "]" ]
42.615385
17.769231
def cmd_param(self, args): '''control parameters''' self.pstate.handle_command(self.master, self.mpstate, args)
[ "def", "cmd_param", "(", "self", ",", "args", ")", ":", "self", ".", "pstate", ".", "handle_command", "(", "self", ".", "master", ",", "self", ".", "mpstate", ",", "args", ")" ]
41.666667
16.333333
def unpack(self, buff, offset=0): """Unpack *buff* into this object. Do nothing, since the _length is already defined and it is just a Pad. Keep buff and offset just for compability with other unpack methods. Args: buff (bytes): Binary buffer. offset (int): Where to begin unpacking. Raises: :exc:`~.exceptions.UnpackException`: If unpack fails. """ super().unpack(buff, offset) self.wildcards = UBInt32(value=FlowWildCards.OFPFW_ALL, enum_ref=FlowWildCards) self.wildcards.unpack(buff, offset)
[ "def", "unpack", "(", "self", ",", "buff", ",", "offset", "=", "0", ")", ":", "super", "(", ")", ".", "unpack", "(", "buff", ",", "offset", ")", "self", ".", "wildcards", "=", "UBInt32", "(", "value", "=", "FlowWildCards", ".", "OFPFW_ALL", ",", "enum_ref", "=", "FlowWildCards", ")", "self", ".", "wildcards", ".", "unpack", "(", "buff", ",", "offset", ")" ]
34.555556
20.833333
def get_forecast(self): ''' If configured to do so, make an API request to retrieve the forecast data for the configured/queried weather station, and return the low and high temperatures. Otherwise, return two empty strings. ''' no_data = ('', '') if self.forecast: query_url = STATION_QUERY_URL % (self.api_key, 'forecast', self.station_id) try: response = self.api_request(query_url)['forecast'] response = response['simpleforecast']['forecastday'][0] except (KeyError, IndexError, TypeError): self.logger.error( 'No forecast data found for %s', self.station_id) self.data['update_error'] = self.update_error return no_data unit = 'celsius' if self.units == 'metric' else 'fahrenheit' low_temp = response.get('low', {}).get(unit, '') high_temp = response.get('high', {}).get(unit, '') return low_temp, high_temp else: return no_data
[ "def", "get_forecast", "(", "self", ")", ":", "no_data", "=", "(", "''", ",", "''", ")", "if", "self", ".", "forecast", ":", "query_url", "=", "STATION_QUERY_URL", "%", "(", "self", ".", "api_key", ",", "'forecast'", ",", "self", ".", "station_id", ")", "try", ":", "response", "=", "self", ".", "api_request", "(", "query_url", ")", "[", "'forecast'", "]", "response", "=", "response", "[", "'simpleforecast'", "]", "[", "'forecastday'", "]", "[", "0", "]", "except", "(", "KeyError", ",", "IndexError", ",", "TypeError", ")", ":", "self", ".", "logger", ".", "error", "(", "'No forecast data found for %s'", ",", "self", ".", "station_id", ")", "self", ".", "data", "[", "'update_error'", "]", "=", "self", ".", "update_error", "return", "no_data", "unit", "=", "'celsius'", "if", "self", ".", "units", "==", "'metric'", "else", "'fahrenheit'", "low_temp", "=", "response", ".", "get", "(", "'low'", ",", "{", "}", ")", ".", "get", "(", "unit", ",", "''", ")", "high_temp", "=", "response", ".", "get", "(", "'high'", ",", "{", "}", ")", ".", "get", "(", "unit", ",", "''", ")", "return", "low_temp", ",", "high_temp", "else", ":", "return", "no_data" ]
44.615385
22.076923
def write(self, filename, encoding='utf-8'): """Write the list of entries to a file. :param filename: :param encoding: :return: """ with io.open(str(filename), 'w', encoding=encoding) as fp: for entry in self: fp.write(entry.__unicode__()) fp.write('\n\n')
[ "def", "write", "(", "self", ",", "filename", ",", "encoding", "=", "'utf-8'", ")", ":", "with", "io", ".", "open", "(", "str", "(", "filename", ")", ",", "'w'", ",", "encoding", "=", "encoding", ")", "as", "fp", ":", "for", "entry", "in", "self", ":", "fp", ".", "write", "(", "entry", ".", "__unicode__", "(", ")", ")", "fp", ".", "write", "(", "'\\n\\n'", ")" ]
30.818182
13.545455
def min_eta_for_em_bright(bh_spin_z, ns_g_mass, mNS_pts, sBH_pts, eta_mins): """ Function that uses the end product of generate_em_constraint_data to swipe over a set of NS-BH binaries and determine the minimum symmetric mass ratio required by each binary to yield a remnant disk mass that exceeds a certain threshold. Each binary passed to this function consists of a NS mass and a BH spin parameter component along the orbital angular momentum. Unlike find_em_constraint_data_point, which solves the problem at a given point in the paremter space and is more generic, this function interpolates the results produced by generate_em_constraint_data at the desired locations: generate_em_constraint_data must be run once prior to calling min_eta_for_em_bright. Parameters ----------- bh_spin_z: array desired values of the BH dimensionless spin parameter for the spin projection along the orbital angular momentum ns_g_mass: array desired values of the NS gravitational mass (in solar masses) mNS_pts: array NS mass values (in solar masses) from the output of generate_em_constraint_data sBH_pts: array BH dimensionless spin parameter values along the orbital angular momentum from the output of generate_em_constraint_data eta_mins: array minimum symmetric mass ratio values to exceed a given remnant disk mass threshold from the output of generate_em_constraint_data Returns ---------- eta_min: array the minimum symmetric mass ratio required by each binary in the input to yield a remnant disk mass that exceeds a certain threshold """ f = scipy.interpolate.RectBivariateSpline(mNS_pts, sBH_pts, eta_mins, kx=1, ky=1) # If bh_spin_z is a numpy array (assuming ns_g_mass has the same size) if isinstance(bh_spin_z, np.ndarray): eta_min = np.empty(len(bh_spin_z)) for i in range(len(bh_spin_z)): eta_min[i] = f(ns_g_mass[i], bh_spin_z[i]) # Else (assuming ns_g_mass and bh_spin_z are single numbers) else: eta_min = f(ns_g_mass, bh_spin_z) return eta_min
[ "def", "min_eta_for_em_bright", "(", "bh_spin_z", ",", "ns_g_mass", ",", "mNS_pts", ",", "sBH_pts", ",", "eta_mins", ")", ":", "f", "=", "scipy", ".", "interpolate", ".", "RectBivariateSpline", "(", "mNS_pts", ",", "sBH_pts", ",", "eta_mins", ",", "kx", "=", "1", ",", "ky", "=", "1", ")", "# If bh_spin_z is a numpy array (assuming ns_g_mass has the same size)", "if", "isinstance", "(", "bh_spin_z", ",", "np", ".", "ndarray", ")", ":", "eta_min", "=", "np", ".", "empty", "(", "len", "(", "bh_spin_z", ")", ")", "for", "i", "in", "range", "(", "len", "(", "bh_spin_z", ")", ")", ":", "eta_min", "[", "i", "]", "=", "f", "(", "ns_g_mass", "[", "i", "]", ",", "bh_spin_z", "[", "i", "]", ")", "# Else (assuming ns_g_mass and bh_spin_z are single numbers)", "else", ":", "eta_min", "=", "f", "(", "ns_g_mass", ",", "bh_spin_z", ")", "return", "eta_min" ]
43.24
22.52
def integrate_ivp(u0=1.0, v0=0.0, mu=1.0, tend=10.0, dt0=1e-8, nt=0, nsteps=600, t0=0.0, atol=1e-8, rtol=1e-8, plot=False, savefig='None', method='bdf', dpi=100, verbose=False): """ Example program integrating an IVP problem of van der Pol oscillator """ f, j = get_f_and_j(mu) if nt > 1: tout = np.linspace(t0, tend, nt) yout, nfo = integrate_predefined( f, j, [u0, v0], tout, dt0, atol, rtol, nsteps=nsteps, check_indexing=False, method=method) else: tout, yout, nfo = integrate_adaptive( f, j, [u0, v0], t0, tend, dt0, atol, rtol, nsteps=nsteps, check_indexing=False, method=method) # dfdt[:] also for len == 1 if verbose: print(nfo) if plot: import matplotlib.pyplot as plt plt.plot(tout, yout[:, 1], 'g--') plt.plot(tout, yout[:, 0], 'k-', linewidth=2) if savefig == 'None': plt.show() else: plt.savefig(savefig, dpi=dpi)
[ "def", "integrate_ivp", "(", "u0", "=", "1.0", ",", "v0", "=", "0.0", ",", "mu", "=", "1.0", ",", "tend", "=", "10.0", ",", "dt0", "=", "1e-8", ",", "nt", "=", "0", ",", "nsteps", "=", "600", ",", "t0", "=", "0.0", ",", "atol", "=", "1e-8", ",", "rtol", "=", "1e-8", ",", "plot", "=", "False", ",", "savefig", "=", "'None'", ",", "method", "=", "'bdf'", ",", "dpi", "=", "100", ",", "verbose", "=", "False", ")", ":", "f", ",", "j", "=", "get_f_and_j", "(", "mu", ")", "if", "nt", ">", "1", ":", "tout", "=", "np", ".", "linspace", "(", "t0", ",", "tend", ",", "nt", ")", "yout", ",", "nfo", "=", "integrate_predefined", "(", "f", ",", "j", ",", "[", "u0", ",", "v0", "]", ",", "tout", ",", "dt0", ",", "atol", ",", "rtol", ",", "nsteps", "=", "nsteps", ",", "check_indexing", "=", "False", ",", "method", "=", "method", ")", "else", ":", "tout", ",", "yout", ",", "nfo", "=", "integrate_adaptive", "(", "f", ",", "j", ",", "[", "u0", ",", "v0", "]", ",", "t0", ",", "tend", ",", "dt0", ",", "atol", ",", "rtol", ",", "nsteps", "=", "nsteps", ",", "check_indexing", "=", "False", ",", "method", "=", "method", ")", "# dfdt[:] also for len == 1", "if", "verbose", ":", "print", "(", "nfo", ")", "if", "plot", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "plt", ".", "plot", "(", "tout", ",", "yout", "[", ":", ",", "1", "]", ",", "'g--'", ")", "plt", ".", "plot", "(", "tout", ",", "yout", "[", ":", ",", "0", "]", ",", "'k-'", ",", "linewidth", "=", "2", ")", "if", "savefig", "==", "'None'", ":", "plt", ".", "show", "(", ")", "else", ":", "plt", ".", "savefig", "(", "savefig", ",", "dpi", "=", "dpi", ")" ]
39
17.153846
def disconnect(self): """ Disconnects from Telegram. If the event loop is already running, this method returns a coroutine that you should await on your own code; otherwise the loop is ran until said coroutine completes. """ if self._loop.is_running(): return self._disconnect_coro() else: self._loop.run_until_complete(self._disconnect_coro())
[ "def", "disconnect", "(", "self", ")", ":", "if", "self", ".", "_loop", ".", "is_running", "(", ")", ":", "return", "self", ".", "_disconnect_coro", "(", ")", "else", ":", "self", ".", "_loop", ".", "run_until_complete", "(", "self", ".", "_disconnect_coro", "(", ")", ")" ]
35.166667
16.166667
def _cursor_position_changed(self): """ Updates the tip based on user cursor movement. """ cursor = self._text_edit.textCursor() if cursor.position() <= self._start_position: self.hide() else: position, commas = self._find_parenthesis(self._start_position + 1) if position != -1: self.hide()
[ "def", "_cursor_position_changed", "(", "self", ")", ":", "cursor", "=", "self", ".", "_text_edit", ".", "textCursor", "(", ")", "if", "cursor", ".", "position", "(", ")", "<=", "self", ".", "_start_position", ":", "self", ".", "hide", "(", ")", "else", ":", "position", ",", "commas", "=", "self", ".", "_find_parenthesis", "(", "self", ".", "_start_position", "+", "1", ")", "if", "position", "!=", "-", "1", ":", "self", ".", "hide", "(", ")" ]
37.4
12.9
def del_widget(self, ref): """ Delete/Remove A Widget """ self.server.request("widget_del %s %s" % (self.name, ref)) del(self.widgets[ref])
[ "def", "del_widget", "(", "self", ",", "ref", ")", ":", "self", ".", "server", ".", "request", "(", "\"widget_del %s %s\"", "%", "(", "self", ".", "name", ",", "ref", ")", ")", "del", "(", "self", ".", "widgets", "[", "ref", "]", ")" ]
40
12.5
def _as_array(self, fmt): '''Unpack the raw bytes of this param using the given data format.''' assert self.dimensions, \ '{}: cannot get value as {} array!'.format(self.name, fmt) elems = array.array(fmt) elems.fromstring(self.bytes) return np.array(elems).reshape(self.dimensions)
[ "def", "_as_array", "(", "self", ",", "fmt", ")", ":", "assert", "self", ".", "dimensions", ",", "'{}: cannot get value as {} array!'", ".", "format", "(", "self", ".", "name", ",", "fmt", ")", "elems", "=", "array", ".", "array", "(", "fmt", ")", "elems", ".", "fromstring", "(", "self", ".", "bytes", ")", "return", "np", ".", "array", "(", "elems", ")", ".", "reshape", "(", "self", ".", "dimensions", ")" ]
46.857143
16.571429
def initialize(): """Initialize all the uninitialized variables in the global scope.""" new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED get_session().run(tf.variables_initializer(new_variables)) ALREADY_INITIALIZED.update(new_variables)
[ "def", "initialize", "(", ")", ":", "new_variables", "=", "set", "(", "tf", ".", "global_variables", "(", ")", ")", "-", "ALREADY_INITIALIZED", "get_session", "(", ")", ".", "run", "(", "tf", ".", "variables_initializer", "(", "new_variables", ")", ")", "ALREADY_INITIALIZED", ".", "update", "(", "new_variables", ")" ]
53
15.6
def draw_graph(self): """ The central logic for drawing the graph. Sets self.graph (the 'g' element in the SVG root) """ transform = 'translate (%s %s)' % (self.border_left, self.border_top) self.graph = etree.SubElement(self.root, 'g', transform=transform) etree.SubElement(self.graph, 'rect', { 'x': '0', 'y': '0', 'width': str(self.graph_width), 'height': str(self.graph_height), 'class': 'graphBackground' }) # Axis etree.SubElement(self.graph, 'path', { 'd': 'M 0 0 v%s' % self.graph_height, 'class': 'axis', 'id': 'xAxis' }) etree.SubElement(self.graph, 'path', { 'd': 'M 0 %s h%s' % (self.graph_height, self.graph_width), 'class': 'axis', 'id': 'yAxis' }) self.draw_x_labels() self.draw_y_labels()
[ "def", "draw_graph", "(", "self", ")", ":", "transform", "=", "'translate (%s %s)'", "%", "(", "self", ".", "border_left", ",", "self", ".", "border_top", ")", "self", ".", "graph", "=", "etree", ".", "SubElement", "(", "self", ".", "root", ",", "'g'", ",", "transform", "=", "transform", ")", "etree", ".", "SubElement", "(", "self", ".", "graph", ",", "'rect'", ",", "{", "'x'", ":", "'0'", ",", "'y'", ":", "'0'", ",", "'width'", ":", "str", "(", "self", ".", "graph_width", ")", ",", "'height'", ":", "str", "(", "self", ".", "graph_height", ")", ",", "'class'", ":", "'graphBackground'", "}", ")", "# Axis", "etree", ".", "SubElement", "(", "self", ".", "graph", ",", "'path'", ",", "{", "'d'", ":", "'M 0 0 v%s'", "%", "self", ".", "graph_height", ",", "'class'", ":", "'axis'", ",", "'id'", ":", "'xAxis'", "}", ")", "etree", ".", "SubElement", "(", "self", ".", "graph", ",", "'path'", ",", "{", "'d'", ":", "'M 0 %s h%s'", "%", "(", "self", ".", "graph_height", ",", "self", ".", "graph_width", ")", ",", "'class'", ":", "'axis'", ",", "'id'", ":", "'yAxis'", "}", ")", "self", ".", "draw_x_labels", "(", ")", "self", ".", "draw_y_labels", "(", ")" ]
23.903226
19.83871
def get_season(self, season_key, card_type="micro_card"): """ Calling Season API. Arg: season_key: key of the season card_type: optional, default to micro_card. Accepted values are micro_card & summary_card Return: json data """ season_url = self.api_path + "season/" + season_key + "/" params = {} params["card_type"] = card_type response = self.get_response(season_url, params) return response
[ "def", "get_season", "(", "self", ",", "season_key", ",", "card_type", "=", "\"micro_card\"", ")", ":", "season_url", "=", "self", ".", "api_path", "+", "\"season/\"", "+", "season_key", "+", "\"/\"", "params", "=", "{", "}", "params", "[", "\"card_type\"", "]", "=", "card_type", "response", "=", "self", ".", "get_response", "(", "season_url", ",", "params", ")", "return", "response" ]
29.823529
17.705882
def spawn(self, context=None): """ context may be a callable or a dict. """ if context is None: context = self.default_context if isinstance(context, collections.Callable): context = context() if not isinstance(context, collections.Mapping): raise PatchboardError('Cannot determine a valid context') return Client(self, context, self.api, self.endpoint_classes)
[ "def", "spawn", "(", "self", ",", "context", "=", "None", ")", ":", "if", "context", "is", "None", ":", "context", "=", "self", ".", "default_context", "if", "isinstance", "(", "context", ",", "collections", ".", "Callable", ")", ":", "context", "=", "context", "(", ")", "if", "not", "isinstance", "(", "context", ",", "collections", ".", "Mapping", ")", ":", "raise", "PatchboardError", "(", "'Cannot determine a valid context'", ")", "return", "Client", "(", "self", ",", "context", ",", "self", ".", "api", ",", "self", ".", "endpoint_classes", ")" ]
31.642857
17.5
def set_resize_parameters( self, degrad=6, labels=None, resize_mm=None, resize_voxel_number=None, ): """ set_input_data() should be called before :param degrad: :param labels: :param resize_mm: :param resize_voxel_number: :return: """ # from . import show_segmentation logger.debug("set_resize_parameters(\ndegrad={}, \nlabels={}\nresize_mm={}\nresize_voxel_number={}".format( degrad, labels, resize_mm, resize_voxel_number )) degrad = int(degrad) # import ipdb; ipdb.set_trace() # return voxelsize_mm, degrad self.degrad = degrad self.labels = labels segmentation = self._select_labels(self.segmentation, labels) if resize_voxel_number is not None: nvoxels = np.sum(segmentation > 0) volume = nvoxels * np.prod(self.voxelsize_mm) voxel_volume = volume / float(resize_voxel_number) resize_mm = voxel_volume ** (1.0 / 3.0) else: resize_mm = np.mean(self.voxelsize_mm) # self.working_voxelsize_mm = voxelsize_mm # self.working_segmentation = segmentation if np.sum(np.abs(self.resize_mm_1d - resize_mm)) != 0: # resize parameter changed self.resized_segmentation = None self.resized_binar_segmentation = None self.resize_mm_1d = resize_mm
[ "def", "set_resize_parameters", "(", "self", ",", "degrad", "=", "6", ",", "labels", "=", "None", ",", "resize_mm", "=", "None", ",", "resize_voxel_number", "=", "None", ",", ")", ":", "# from . import show_segmentation", "logger", ".", "debug", "(", "\"set_resize_parameters(\\ndegrad={}, \\nlabels={}\\nresize_mm={}\\nresize_voxel_number={}\"", ".", "format", "(", "degrad", ",", "labels", ",", "resize_mm", ",", "resize_voxel_number", ")", ")", "degrad", "=", "int", "(", "degrad", ")", "# import ipdb; ipdb.set_trace()", "# return voxelsize_mm, degrad", "self", ".", "degrad", "=", "degrad", "self", ".", "labels", "=", "labels", "segmentation", "=", "self", ".", "_select_labels", "(", "self", ".", "segmentation", ",", "labels", ")", "if", "resize_voxel_number", "is", "not", "None", ":", "nvoxels", "=", "np", ".", "sum", "(", "segmentation", ">", "0", ")", "volume", "=", "nvoxels", "*", "np", ".", "prod", "(", "self", ".", "voxelsize_mm", ")", "voxel_volume", "=", "volume", "/", "float", "(", "resize_voxel_number", ")", "resize_mm", "=", "voxel_volume", "**", "(", "1.0", "/", "3.0", ")", "else", ":", "resize_mm", "=", "np", ".", "mean", "(", "self", ".", "voxelsize_mm", ")", "# self.working_voxelsize_mm = voxelsize_mm", "# self.working_segmentation = segmentation", "if", "np", ".", "sum", "(", "np", ".", "abs", "(", "self", ".", "resize_mm_1d", "-", "resize_mm", ")", ")", "!=", "0", ":", "# resize parameter changed", "self", ".", "resized_segmentation", "=", "None", "self", ".", "resized_binar_segmentation", "=", "None", "self", ".", "resize_mm_1d", "=", "resize_mm" ]
33.045455
17.272727
def run( self, *, # Force keyword args. program: Union[circuits.Circuit, Schedule], job_config: Optional[JobConfig] = None, param_resolver: ParamResolver = ParamResolver({}), repetitions: int = 1, priority: int = 50, processor_ids: Sequence[str] = ('xmonsim',)) -> TrialResult: """Runs the supplied Circuit or Schedule via Quantum Engine. Args: program: The Circuit or Schedule to execute. If a circuit is provided, a moment by moment schedule will be used. job_config: Configures the names of programs and jobs. param_resolver: Parameters to run with the program. repetitions: The number of repetitions to simulate. priority: The priority to run at, 0-100. processor_ids: The engine processors to run against. Returns: A single TrialResult for this run. """ return list( self.run_sweep(program=program, job_config=job_config, params=[param_resolver], repetitions=repetitions, priority=priority, processor_ids=processor_ids))[0]
[ "def", "run", "(", "self", ",", "*", ",", "# Force keyword args.", "program", ":", "Union", "[", "circuits", ".", "Circuit", ",", "Schedule", "]", ",", "job_config", ":", "Optional", "[", "JobConfig", "]", "=", "None", ",", "param_resolver", ":", "ParamResolver", "=", "ParamResolver", "(", "{", "}", ")", ",", "repetitions", ":", "int", "=", "1", ",", "priority", ":", "int", "=", "50", ",", "processor_ids", ":", "Sequence", "[", "str", "]", "=", "(", "'xmonsim'", ",", ")", ")", "->", "TrialResult", ":", "return", "list", "(", "self", ".", "run_sweep", "(", "program", "=", "program", ",", "job_config", "=", "job_config", ",", "params", "=", "[", "param_resolver", "]", ",", "repetitions", "=", "repetitions", ",", "priority", "=", "priority", ",", "processor_ids", "=", "processor_ids", ")", ")", "[", "0", "]" ]
42.833333
17.866667
def from_dict(cls, d): """ Create from dict. Args: A dict with all data for a band structure object. Returns: A BandStructure object """ labels_dict = d['labels_dict'] projections = {} structure = None if isinstance(list(d['bands'].values())[0], dict): eigenvals = {Spin(int(k)): np.array(d['bands'][k]['data']) for k in d['bands']} else: eigenvals = {Spin(int(k)): d['bands'][k] for k in d['bands']} if 'structure' in d: structure = Structure.from_dict(d['structure']) if d.get('projections'): projections = {Spin(int(spin)): np.array(v) for spin, v in d["projections"].items()} return BandStructure( d['kpoints'], eigenvals, Lattice(d['lattice_rec']['matrix']), d['efermi'], labels_dict, structure=structure, projections=projections)
[ "def", "from_dict", "(", "cls", ",", "d", ")", ":", "labels_dict", "=", "d", "[", "'labels_dict'", "]", "projections", "=", "{", "}", "structure", "=", "None", "if", "isinstance", "(", "list", "(", "d", "[", "'bands'", "]", ".", "values", "(", ")", ")", "[", "0", "]", ",", "dict", ")", ":", "eigenvals", "=", "{", "Spin", "(", "int", "(", "k", ")", ")", ":", "np", ".", "array", "(", "d", "[", "'bands'", "]", "[", "k", "]", "[", "'data'", "]", ")", "for", "k", "in", "d", "[", "'bands'", "]", "}", "else", ":", "eigenvals", "=", "{", "Spin", "(", "int", "(", "k", ")", ")", ":", "d", "[", "'bands'", "]", "[", "k", "]", "for", "k", "in", "d", "[", "'bands'", "]", "}", "if", "'structure'", "in", "d", ":", "structure", "=", "Structure", ".", "from_dict", "(", "d", "[", "'structure'", "]", ")", "if", "d", ".", "get", "(", "'projections'", ")", ":", "projections", "=", "{", "Spin", "(", "int", "(", "spin", ")", ")", ":", "np", ".", "array", "(", "v", ")", "for", "spin", ",", "v", "in", "d", "[", "\"projections\"", "]", ".", "items", "(", ")", "}", "return", "BandStructure", "(", "d", "[", "'kpoints'", "]", ",", "eigenvals", ",", "Lattice", "(", "d", "[", "'lattice_rec'", "]", "[", "'matrix'", "]", ")", ",", "d", "[", "'efermi'", "]", ",", "labels_dict", ",", "structure", "=", "structure", ",", "projections", "=", "projections", ")" ]
34.821429
18.75
def _current_color(self, which=0): """Returns a color for the queue. Parameters ---------- which : int (optional, default: ``0``) Specifies the type of color to return. Returns ------- color : list Returns a RGBA color that is represented as a list with 4 entries where each entry can be any floating point number between 0 and 1. * If ``which`` is 1 then it returns the color of the edge as if it were a self loop. This is specified in ``colors['edge_loop_color']``. * If ``which`` is 2 then it returns the color of the vertex pen color (defined as color/vertex_color in :meth:`.QueueNetworkDiGraph.graph_draw`). This is specified in ``colors['vertex_color']``. * If ``which`` is anything else, then it returns the a shade of the edge that is proportional to the number of agents in the system -- which includes those being servered and those waiting to be served. More agents correspond to darker edge colors. Uses ``colors['vertex_fill_color']`` if the queue sits on a loop, and ``colors['edge_color']`` otherwise. """ if which == 1: color = self.colors['edge_loop_color'] elif which == 2: color = self.colors['vertex_color'] else: div = self.coloring_sensitivity * self.num_servers + 1. tmp = 1. - min(self.num_system / div, 1) if self.edge[0] == self.edge[1]: color = [i * tmp for i in self.colors['vertex_fill_color']] color[3] = 1.0 else: color = [i * tmp for i in self.colors['edge_color']] color[3] = 1 / 2. return color
[ "def", "_current_color", "(", "self", ",", "which", "=", "0", ")", ":", "if", "which", "==", "1", ":", "color", "=", "self", ".", "colors", "[", "'edge_loop_color'", "]", "elif", "which", "==", "2", ":", "color", "=", "self", ".", "colors", "[", "'vertex_color'", "]", "else", ":", "div", "=", "self", ".", "coloring_sensitivity", "*", "self", ".", "num_servers", "+", "1.", "tmp", "=", "1.", "-", "min", "(", "self", ".", "num_system", "/", "div", ",", "1", ")", "if", "self", ".", "edge", "[", "0", "]", "==", "self", ".", "edge", "[", "1", "]", ":", "color", "=", "[", "i", "*", "tmp", "for", "i", "in", "self", ".", "colors", "[", "'vertex_fill_color'", "]", "]", "color", "[", "3", "]", "=", "1.0", "else", ":", "color", "=", "[", "i", "*", "tmp", "for", "i", "in", "self", ".", "colors", "[", "'edge_color'", "]", "]", "color", "[", "3", "]", "=", "1", "/", "2.", "return", "color" ]
38.729167
21.520833
def run_muse_sump_perchrom(job, muse_output, univ_options, muse_options, chrom): """ Run MuSE sump on the MuSE call generated vcf. :param toil.fileStore.FileID muse_output: vcf generated by MuSE call :param dict univ_options: Dict of universal options used by almost all tools :param dict muse_options: Options specific to MuSE :param str chrom: Chromosome to process :return: fsID for the chromsome vcf :rtype: toil.fileStore.FileID """ work_dir = os.getcwd() input_files = { 'MuSE.txt': muse_output, 'dbsnp_coding.vcf.gz': muse_options['dbsnp_vcf'], 'dbsnp_coding.vcf.gz.tbi.tmp': muse_options['dbsnp_tbi']} input_files = get_files_from_filestore(job, input_files, work_dir, docker=False) tbi = os.path.splitext(input_files['dbsnp_coding.vcf.gz.tbi.tmp'])[0] time.sleep(2) shutil.copy(input_files['dbsnp_coding.vcf.gz.tbi.tmp'], tbi) os.chmod(tbi, 0777) open(tbi, 'a').close() input_files = {key: docker_path(path) for key, path in input_files.items()} output_file = ''.join([work_dir, '/', chrom, '.vcf']) parameters = ['sump', '-I', input_files['MuSE.txt'], '-O', docker_path(output_file), '-D', input_files['dbsnp_coding.vcf.gz'], '-E'] docker_call(tool='muse', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], tool_version=muse_options['version']) outfile = job.fileStore.writeGlobalFile(output_file) export_results(job, outfile, output_file, univ_options, subfolder='mutations/muse') job.fileStore.logToMaster('Ran MuSE sump on %s:%s successfully' % (univ_options['patient'], chrom)) return outfile
[ "def", "run_muse_sump_perchrom", "(", "job", ",", "muse_output", ",", "univ_options", ",", "muse_options", ",", "chrom", ")", ":", "work_dir", "=", "os", ".", "getcwd", "(", ")", "input_files", "=", "{", "'MuSE.txt'", ":", "muse_output", ",", "'dbsnp_coding.vcf.gz'", ":", "muse_options", "[", "'dbsnp_vcf'", "]", ",", "'dbsnp_coding.vcf.gz.tbi.tmp'", ":", "muse_options", "[", "'dbsnp_tbi'", "]", "}", "input_files", "=", "get_files_from_filestore", "(", "job", ",", "input_files", ",", "work_dir", ",", "docker", "=", "False", ")", "tbi", "=", "os", ".", "path", ".", "splitext", "(", "input_files", "[", "'dbsnp_coding.vcf.gz.tbi.tmp'", "]", ")", "[", "0", "]", "time", ".", "sleep", "(", "2", ")", "shutil", ".", "copy", "(", "input_files", "[", "'dbsnp_coding.vcf.gz.tbi.tmp'", "]", ",", "tbi", ")", "os", ".", "chmod", "(", "tbi", ",", "0777", ")", "open", "(", "tbi", ",", "'a'", ")", ".", "close", "(", ")", "input_files", "=", "{", "key", ":", "docker_path", "(", "path", ")", "for", "key", ",", "path", "in", "input_files", ".", "items", "(", ")", "}", "output_file", "=", "''", ".", "join", "(", "[", "work_dir", ",", "'/'", ",", "chrom", ",", "'.vcf'", "]", ")", "parameters", "=", "[", "'sump'", ",", "'-I'", ",", "input_files", "[", "'MuSE.txt'", "]", ",", "'-O'", ",", "docker_path", "(", "output_file", ")", ",", "'-D'", ",", "input_files", "[", "'dbsnp_coding.vcf.gz'", "]", ",", "'-E'", "]", "docker_call", "(", "tool", "=", "'muse'", ",", "tool_parameters", "=", "parameters", ",", "work_dir", "=", "work_dir", ",", "dockerhub", "=", "univ_options", "[", "'dockerhub'", "]", ",", "tool_version", "=", "muse_options", "[", "'version'", "]", ")", "outfile", "=", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "output_file", ")", "export_results", "(", "job", ",", "outfile", ",", "output_file", ",", "univ_options", ",", "subfolder", "=", "'mutations/muse'", ")", "job", ".", "fileStore", ".", "logToMaster", "(", "'Ran MuSE sump on %s:%s successfully'", "%", "(", "univ_options", "[", "'patient'", "]", ",", "chrom", ")", ")", "return", "outfile" ]
46
22.421053
def async_comp_check(self, original, loc, tokens): """Check for Python 3.6 async comprehension.""" return self.check_py("36", "async comprehension", original, loc, tokens)
[ "def", "async_comp_check", "(", "self", ",", "original", ",", "loc", ",", "tokens", ")", ":", "return", "self", ".", "check_py", "(", "\"36\"", ",", "\"async comprehension\"", ",", "original", ",", "loc", ",", "tokens", ")" ]
61.666667
16.666667
def smart_search_pool(self, auth, query_str, search_options=None, extra_query=None): """ Perform a smart search on pool list. * `auth` [BaseAuth] AAA options. * `query_str` [string] Search string * `search_options` [options_dict] Search options. See :func:`search_pool`. * `extra_query` [dict_to_sql] Extra search terms, will be AND:ed together with what is extracted from the query string. Return a dict with three elements: * :attr:`interpretation` - How the query string was interpreted. * :attr:`search_options` - Various search_options. * :attr:`result` - The search result. The :attr:`interpretation` is given as a list of dicts, each explaining how a part of the search key was interpreted (ie. what pool attribute the search operation was performed on). The :attr:`result` is a list of dicts containing the search result. The smart search function tries to convert the query from a text string to a `query` dict which is passed to the :func:`search_pool` function. If multiple search keys are detected, they are combined with a logical AND. It will basically just take each search term and try to match it against the name or description column with regex match. See the :func:`search_pool` function for an explanation of the `search_options` argument. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.smart_search_pool` for full understanding. """ if search_options is None: search_options = {} self._logger.debug("smart_search_pool query string: %s" % query_str) success, query = self._parse_pool_query(query_str) if not success: return { 'interpretation': query, 'search_options': search_options, 'result': [], 'error': True, 'error_message': 'query interpretation failed' } if extra_query is not None: query = { 'operator': 'and', 'val1': query, 'val2': extra_query } self._logger.debug("smart_search_pool; query expanded to: %s" % unicode(query)) search_result = self.search_pool(auth, query, search_options) search_result['interpretation'] = query search_result['error'] = False return search_result
[ "def", "smart_search_pool", "(", "self", ",", "auth", ",", "query_str", ",", "search_options", "=", "None", ",", "extra_query", "=", "None", ")", ":", "if", "search_options", "is", "None", ":", "search_options", "=", "{", "}", "self", ".", "_logger", ".", "debug", "(", "\"smart_search_pool query string: %s\"", "%", "query_str", ")", "success", ",", "query", "=", "self", ".", "_parse_pool_query", "(", "query_str", ")", "if", "not", "success", ":", "return", "{", "'interpretation'", ":", "query", ",", "'search_options'", ":", "search_options", ",", "'result'", ":", "[", "]", ",", "'error'", ":", "True", ",", "'error_message'", ":", "'query interpretation failed'", "}", "if", "extra_query", "is", "not", "None", ":", "query", "=", "{", "'operator'", ":", "'and'", ",", "'val1'", ":", "query", ",", "'val2'", ":", "extra_query", "}", "self", ".", "_logger", ".", "debug", "(", "\"smart_search_pool; query expanded to: %s\"", "%", "unicode", "(", "query", ")", ")", "search_result", "=", "self", ".", "search_pool", "(", "auth", ",", "query", ",", "search_options", ")", "search_result", "[", "'interpretation'", "]", "=", "query", "search_result", "[", "'error'", "]", "=", "False", "return", "search_result" ]
39.357143
23.771429
def get_seed(self): """ Collects the required information to generate a data estructure that can be used to recreate exactly the same geometry object via *\*\*kwargs*. :returns: Object's sufficient info to initialize it. :rtype: dict .. seealso:: * :func:`get_map` * :func:`map2pyny` * :func:`map2seed` * :func:`explode_map` """ self.seed = {'places': [place.get_seed() for place in self]} return self.seed
[ "def", "get_seed", "(", "self", ")", ":", "self", ".", "seed", "=", "{", "'places'", ":", "[", "place", ".", "get_seed", "(", ")", "for", "place", "in", "self", "]", "}", "return", "self", ".", "seed" ]
29.15
19.35
def attach_file(self, path, mimetype=None): """Attache a file from the filesystem.""" filename = os.path.basename(path) content = open(path, "rb").read() self.attach(filename, content, mimetype)
[ "def", "attach_file", "(", "self", ",", "path", ",", "mimetype", "=", "None", ")", ":", "filename", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "content", "=", "open", "(", "path", ",", "\"rb\"", ")", ".", "read", "(", ")", "self", ".", "attach", "(", "filename", ",", "content", ",", "mimetype", ")" ]
45.2
3.2
def chung_dense(T, MW, Tc, Vc, omega, Cvm, Vm, mu, dipole, association=0): r'''Estimates the thermal conductivity of a gas at high pressure as a function of temperature using the reference fluid method of Chung [1]_ as shown in [2]_. .. math:: \lambda = \frac{31.2 \eta^\circ \Psi}{M'}(G_2^{-1} + B_6 y)+qB_7y^2T_r^{1/2}G_2 \Psi = 1 + \alpha \left\{[0.215+0.28288\alpha-1.061\beta+0.26665Z]/ [0.6366+\beta Z + 1.061 \alpha \beta]\right\} \alpha = \frac{C_v}{R}-1.5 \beta = 0.7862-0.7109\omega + 1.3168\omega^2 Z=2+10.5T_r^2 q = 3.586\times 10^{-3} (T_c/M')^{1/2}/V_c^{2/3} y = \frac{V_c}{6V} G_1 = \frac{1-0.5y}{(1-y)^3} G_2 = \frac{(B_1/y)[1-\exp(-B_4y)]+ B_2G_1\exp(B_5y) + B_3G_1} {B_1B_4 + B_2 + B_3} B_i = a_i + b_i \omega + c_i \mu_r^4 + d_i \kappa Parameters ---------- T : float Temperature of the gas [K] MW : float Molecular weight of the gas [g/mol] Tc : float Critical temperature of the gas [K] Vc : float Critical volume of the gas [m^3/mol] omega : float Acentric factor of the gas [-] Cvm : float Molar contant volume heat capacity of the gas [J/mol/K] Vm : float Molar volume of the gas at T and P [m^3/mol] mu : float Low-pressure gas viscosity [Pa*S] dipole : float Dipole moment [debye] association : float, optional Association factor [-] Returns ------- kg : float Estimated dense gas thermal conductivity [W/m/k] Notes ----- MW internally converted to kg/g-mol. Vm internally converted to mL/mol. [1]_ is not the latest form as presented in [1]_. Association factor is assumed 0. Relates to the polarity of the gas. Coefficients as follows: ais = [2.4166E+0, -5.0924E-1, 6.6107E+0, 1.4543E+1, 7.9274E-1, -5.8634E+0, 9.1089E+1] bis = [7.4824E-1, -1.5094E+0, 5.6207E+0, -8.9139E+0, 8.2019E-1, 1.2801E+1, 1.2811E+2] cis = [-9.1858E-1, -4.9991E+1, 6.4760E+1, -5.6379E+0, -6.9369E-1, 9.5893E+0, -5.4217E+1] dis = [1.2172E+2, 6.9983E+1, 2.7039E+1, 7.4344E+1, 6.3173E+0, 6.5529E+1, 5.2381E+2] Examples -------- >>> chung_dense(T=473., MW=42.081, Tc=364.9, Vc=184.6E-6, omega=0.142, ... Cvm=82.67, Vm=172.1E-6, mu=134E-7, dipole=0.4) 0.06160570379787278 References ---------- .. [1] Chung, Ting Horng, Mohammad Ajlan, Lloyd L. Lee, and Kenneth E. Starling. "Generalized Multiparameter Correlation for Nonpolar and Polar Fluid Transport Properties." Industrial & Engineering Chemistry Research 27, no. 4 (April 1, 1988): 671-79. doi:10.1021/ie00076a024. .. [2] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition. New York: McGraw-Hill Professional, 2000. ''' ais = [2.4166E+0, -5.0924E-1, 6.6107E+0, 1.4543E+1, 7.9274E-1, -5.8634E+0, 9.1089E+1] bis = [7.4824E-1, -1.5094E+0, 5.6207E+0, -8.9139E+0, 8.2019E-1, 1.2801E+1, 1.2811E+2] cis = [-9.1858E-1, -4.9991E+1, 6.4760E+1, -5.6379E+0, -6.9369E-1, 9.5893E+0, -5.4217E+1] dis = [1.2172E+2, 6.9983E+1, 2.7039E+1, 7.4344E+1, 6.3173E+0, 6.5529E+1, 5.2381E+2] Tr = T/Tc mur = 131.3*dipole/(Vc*1E6*Tc)**0.5 # From Chung Method alpha = Cvm/R - 1.5 beta = 0.7862 - 0.7109*omega + 1.3168*omega**2 Z = 2 + 10.5*(T/Tc)**2 psi = 1 + alpha*((0.215 + 0.28288*alpha - 1.061*beta + 0.26665*Z)/(0.6366 + beta*Z + 1.061*alpha*beta)) y = Vc/(6*Vm) B1, B2, B3, B4, B5, B6, B7 = [ais[i] + bis[i]*omega + cis[i]*mur**4 + dis[i]*association for i in range(7)] G1 = (1 - 0.5*y)/(1. - y)**3 G2 = (B1/y*(1 - exp(-B4*y)) + B2*G1*exp(B5*y) + B3*G1)/(B1*B4 + B2 + B3) q = 3.586E-3*(Tc/(MW/1000.))**0.5/(Vc*1E6)**(2/3.) return 31.2*mu*psi/(MW/1000.)*(G2**-1 + B6*y) + q*B7*y**2*Tr**0.5*G2
[ "def", "chung_dense", "(", "T", ",", "MW", ",", "Tc", ",", "Vc", ",", "omega", ",", "Cvm", ",", "Vm", ",", "mu", ",", "dipole", ",", "association", "=", "0", ")", ":", "ais", "=", "[", "2.4166E+0", ",", "-", "5.0924E-1", ",", "6.6107E+0", ",", "1.4543E+1", ",", "7.9274E-1", ",", "-", "5.8634E+0", ",", "9.1089E+1", "]", "bis", "=", "[", "7.4824E-1", ",", "-", "1.5094E+0", ",", "5.6207E+0", ",", "-", "8.9139E+0", ",", "8.2019E-1", ",", "1.2801E+1", ",", "1.2811E+2", "]", "cis", "=", "[", "-", "9.1858E-1", ",", "-", "4.9991E+1", ",", "6.4760E+1", ",", "-", "5.6379E+0", ",", "-", "6.9369E-1", ",", "9.5893E+0", ",", "-", "5.4217E+1", "]", "dis", "=", "[", "1.2172E+2", ",", "6.9983E+1", ",", "2.7039E+1", ",", "7.4344E+1", ",", "6.3173E+0", ",", "6.5529E+1", ",", "5.2381E+2", "]", "Tr", "=", "T", "/", "Tc", "mur", "=", "131.3", "*", "dipole", "/", "(", "Vc", "*", "1E6", "*", "Tc", ")", "**", "0.5", "# From Chung Method", "alpha", "=", "Cvm", "/", "R", "-", "1.5", "beta", "=", "0.7862", "-", "0.7109", "*", "omega", "+", "1.3168", "*", "omega", "**", "2", "Z", "=", "2", "+", "10.5", "*", "(", "T", "/", "Tc", ")", "**", "2", "psi", "=", "1", "+", "alpha", "*", "(", "(", "0.215", "+", "0.28288", "*", "alpha", "-", "1.061", "*", "beta", "+", "0.26665", "*", "Z", ")", "/", "(", "0.6366", "+", "beta", "*", "Z", "+", "1.061", "*", "alpha", "*", "beta", ")", ")", "y", "=", "Vc", "/", "(", "6", "*", "Vm", ")", "B1", ",", "B2", ",", "B3", ",", "B4", ",", "B5", ",", "B6", ",", "B7", "=", "[", "ais", "[", "i", "]", "+", "bis", "[", "i", "]", "*", "omega", "+", "cis", "[", "i", "]", "*", "mur", "**", "4", "+", "dis", "[", "i", "]", "*", "association", "for", "i", "in", "range", "(", "7", ")", "]", "G1", "=", "(", "1", "-", "0.5", "*", "y", ")", "/", "(", "1.", "-", "y", ")", "**", "3", "G2", "=", "(", "B1", "/", "y", "*", "(", "1", "-", "exp", "(", "-", "B4", "*", "y", ")", ")", "+", "B2", "*", "G1", "*", "exp", "(", "B5", "*", "y", ")", "+", "B3", "*", "G1", ")", "/", "(", "B1", "*", "B4", "+", "B2", "+", "B3", ")", "q", "=", "3.586E-3", "*", "(", "Tc", "/", "(", "MW", "/", "1000.", ")", ")", "**", "0.5", "/", "(", "Vc", "*", "1E6", ")", "**", "(", "2", "/", "3.", ")", "return", "31.2", "*", "mu", "*", "psi", "/", "(", "MW", "/", "1000.", ")", "*", "(", "G2", "**", "-", "1", "+", "B6", "*", "y", ")", "+", "q", "*", "B7", "*", "y", "**", "2", "*", "Tr", "**", "0.5", "*", "G2" ]
35.111111
27.518519
def __load_project_from_file_path(file_path): ''' Load a docker-compose project from file path :param path: :return: ''' try: project = get_project(project_dir=os.path.dirname(file_path), config_path=[os.path.basename(file_path)]) except Exception as inst: return __handle_except(inst) return project
[ "def", "__load_project_from_file_path", "(", "file_path", ")", ":", "try", ":", "project", "=", "get_project", "(", "project_dir", "=", "os", ".", "path", ".", "dirname", "(", "file_path", ")", ",", "config_path", "=", "[", "os", ".", "path", ".", "basename", "(", "file_path", ")", "]", ")", "except", "Exception", "as", "inst", ":", "return", "__handle_except", "(", "inst", ")", "return", "project" ]
28.230769
23.153846
def get_public_domain_metadata(self): """Gets the metadata for the public domain flag. return: (osid.Metadata) - metadata for the public domain *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.get_group_metadata_template metadata = dict(self._mdata['public_domain']) metadata.update({'existing_boolean_values': self._my_map['publicDomain']}) return Metadata(**metadata)
[ "def", "get_public_domain_metadata", "(", "self", ")", ":", "# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template", "metadata", "=", "dict", "(", "self", ".", "_mdata", "[", "'public_domain'", "]", ")", "metadata", ".", "update", "(", "{", "'existing_boolean_values'", ":", "self", ".", "_my_map", "[", "'publicDomain'", "]", "}", ")", "return", "Metadata", "(", "*", "*", "metadata", ")" ]
45.363636
22.545455
def fit_freq_min_max(self, training_signal): """Defines a spectral mask based on training data using min and max values of each frequency component Args: training_signal: Training data """ window_length = len(self.window) window_weight = sum(self.window) max_mask = np.zeros(int(window_length / 2) + 1) min_mask = np.zeros(int(window_length / 2) + 1) for i in range(0, len(training_signal) - window_length - 1): rfft = np.fft.rfft(training_signal[i:i + window_length] * self.window) temp = np.abs(rfft) / window_weight max_mask = np.maximum(max_mask, temp) min_mask = np.minimum(min_mask, temp) self.mask_top = self.gain * max_mask self.mask_bottom = min_mask / self.gain
[ "def", "fit_freq_min_max", "(", "self", ",", "training_signal", ")", ":", "window_length", "=", "len", "(", "self", ".", "window", ")", "window_weight", "=", "sum", "(", "self", ".", "window", ")", "max_mask", "=", "np", ".", "zeros", "(", "int", "(", "window_length", "/", "2", ")", "+", "1", ")", "min_mask", "=", "np", ".", "zeros", "(", "int", "(", "window_length", "/", "2", ")", "+", "1", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "training_signal", ")", "-", "window_length", "-", "1", ")", ":", "rfft", "=", "np", ".", "fft", ".", "rfft", "(", "training_signal", "[", "i", ":", "i", "+", "window_length", "]", "*", "self", ".", "window", ")", "temp", "=", "np", ".", "abs", "(", "rfft", ")", "/", "window_weight", "max_mask", "=", "np", ".", "maximum", "(", "max_mask", ",", "temp", ")", "min_mask", "=", "np", ".", "minimum", "(", "min_mask", ",", "temp", ")", "self", ".", "mask_top", "=", "self", ".", "gain", "*", "max_mask", "self", ".", "mask_bottom", "=", "min_mask", "/", "self", ".", "gain" ]
37.681818
17.363636
def process_iter(): """Return a generator yielding a Process class instance for all running processes on the local machine. Every new Process instance is only created once and then cached into an internal table which is updated every time this is used. The sorting order in which processes are yielded is based on their PIDs. """ def add(pid): proc = Process(pid) _pmap[proc.pid] = proc return proc def remove(pid): _pmap.pop(pid, None) a = set(get_pid_list()) b = set(_pmap.keys()) new_pids = a - b gone_pids = b - a for pid in gone_pids: remove(pid) for pid, proc in sorted(list(_pmap.items()) + \ list(dict.fromkeys(new_pids).items())): try: if proc is None: # new process yield add(pid) else: # use is_running() to check whether PID has been reused by # another process in which case yield a new Process instance if proc.is_running(): yield proc else: yield add(pid) except NoSuchProcess: remove(pid) except AccessDenied: # Process creation time can't be determined hence there's # no way to tell whether the pid of the cached process # has been reused. Just return the cached version. yield proc
[ "def", "process_iter", "(", ")", ":", "def", "add", "(", "pid", ")", ":", "proc", "=", "Process", "(", "pid", ")", "_pmap", "[", "proc", ".", "pid", "]", "=", "proc", "return", "proc", "def", "remove", "(", "pid", ")", ":", "_pmap", ".", "pop", "(", "pid", ",", "None", ")", "a", "=", "set", "(", "get_pid_list", "(", ")", ")", "b", "=", "set", "(", "_pmap", ".", "keys", "(", ")", ")", "new_pids", "=", "a", "-", "b", "gone_pids", "=", "b", "-", "a", "for", "pid", "in", "gone_pids", ":", "remove", "(", "pid", ")", "for", "pid", ",", "proc", "in", "sorted", "(", "list", "(", "_pmap", ".", "items", "(", ")", ")", "+", "list", "(", "dict", ".", "fromkeys", "(", "new_pids", ")", ".", "items", "(", ")", ")", ")", ":", "try", ":", "if", "proc", "is", "None", ":", "# new process", "yield", "add", "(", "pid", ")", "else", ":", "# use is_running() to check whether PID has been reused by", "# another process in which case yield a new Process instance", "if", "proc", ".", "is_running", "(", ")", ":", "yield", "proc", "else", ":", "yield", "add", "(", "pid", ")", "except", "NoSuchProcess", ":", "remove", "(", "pid", ")", "except", "AccessDenied", ":", "# Process creation time can't be determined hence there's", "# no way to tell whether the pid of the cached process", "# has been reused. Just return the cached version.", "yield", "proc" ]
32.25
19.886364
def main(args): """ %prog newicktree Plot Newick formatted tree. The gene structure can be plotted along if --gffdir is given. The gff file needs to be `genename.gff`. If --sizes is on, also show the number of amino acids. With --barcode a mapping file can be provided to convert seq names to eg. species names, useful in unified tree display. This file should have distinctive barcodes in column1 and new names in column2, tab delimited. """ p = OptionParser(main.__doc__) p.add_option("--outgroup", help="Outgroup for rerooting the tree. " + "Use comma to separate multiple taxa.") p.add_option("--noreroot", default=False, action="store_true", help="Don't reroot the input tree [default: %default]") p.add_option("--rmargin", default=.3, type="float", help="Set blank rmargin to the right [default: %default]") p.add_option("--gffdir", default=None, help="The directory that contain GFF files [default: %default]") p.add_option("--sizes", default=None, help="The FASTA file or the sizes file [default: %default]") p.add_option("--SH", default=None, type="string", help="SH test p-value [default: %default]") p.add_option("--scutoff", default=0, type="int", help="cutoff for displaying node support, 0-100 [default: %default]") p.add_option("--barcode", default=None, help="path to seq names barcode mapping file: " "barcode<tab>new_name [default: %default]") p.add_option("--leafcolor", default="k", help="Font color for the OTUs, or path to a file " "containing color mappings: leafname<tab>color [default: %default]") p.add_option("--leaffont", default=12, help="Font size for the OTUs") p.add_option("--geoscale", default=False, action="store_true", help="Plot geological scale") opts, args, iopts = p.set_image_options(args, figsize="8x6") if len(args) != 1: sys.exit(not p.print_help()) datafile, = args outgroup = None reroot = not opts.noreroot if opts.outgroup: outgroup = opts.outgroup.split(",") if datafile == "demo": tx = """(((Os02g0681100:0.1151,Sb04g031800:0.11220)1.0:0.0537, (Os04g0578800:0.04318,Sb06g026210:0.04798)-1.0:0.08870)1.0:0.06985, ((Os03g0124100:0.08845,Sb01g048930:0.09055)1.0:0.05332, (Os10g0534700:0.06592,Sb01g030630:0.04824)-1.0:0.07886):0.09389);""" else: logging.debug("Load tree file `{0}`.".format(datafile)) tx = open(datafile).read() pf = datafile.rsplit(".", 1)[0] fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) if opts.geoscale: draw_geoscale(root) else: if op.isfile(opts.leafcolor): leafcolor = "k" leafcolorfile = opts.leafcolor else: leafcolor = opts.leafcolor leafcolorfile = None draw_tree(root, tx, rmargin=opts.rmargin, leafcolor=leafcolor, outgroup=outgroup, reroot=reroot, gffdir=opts.gffdir, sizes=opts.sizes, SH=opts.SH, scutoff=opts.scutoff, barcodefile=opts.barcode, leafcolorfile=leafcolorfile, leaffont=opts.leaffont) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
[ "def", "main", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "main", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--outgroup\"", ",", "help", "=", "\"Outgroup for rerooting the tree. \"", "+", "\"Use comma to separate multiple taxa.\"", ")", "p", ".", "add_option", "(", "\"--noreroot\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Don't reroot the input tree [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--rmargin\"", ",", "default", "=", ".3", ",", "type", "=", "\"float\"", ",", "help", "=", "\"Set blank rmargin to the right [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--gffdir\"", ",", "default", "=", "None", ",", "help", "=", "\"The directory that contain GFF files [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--sizes\"", ",", "default", "=", "None", ",", "help", "=", "\"The FASTA file or the sizes file [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--SH\"", ",", "default", "=", "None", ",", "type", "=", "\"string\"", ",", "help", "=", "\"SH test p-value [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--scutoff\"", ",", "default", "=", "0", ",", "type", "=", "\"int\"", ",", "help", "=", "\"cutoff for displaying node support, 0-100 [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--barcode\"", ",", "default", "=", "None", ",", "help", "=", "\"path to seq names barcode mapping file: \"", "\"barcode<tab>new_name [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--leafcolor\"", ",", "default", "=", "\"k\"", ",", "help", "=", "\"Font color for the OTUs, or path to a file \"", "\"containing color mappings: leafname<tab>color [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--leaffont\"", ",", "default", "=", "12", ",", "help", "=", "\"Font size for the OTUs\"", ")", "p", ".", "add_option", "(", "\"--geoscale\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Plot geological scale\"", ")", "opts", ",", "args", ",", "iopts", "=", "p", ".", "set_image_options", "(", "args", ",", "figsize", "=", "\"8x6\"", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "datafile", ",", "=", "args", "outgroup", "=", "None", "reroot", "=", "not", "opts", ".", "noreroot", "if", "opts", ".", "outgroup", ":", "outgroup", "=", "opts", ".", "outgroup", ".", "split", "(", "\",\"", ")", "if", "datafile", "==", "\"demo\"", ":", "tx", "=", "\"\"\"(((Os02g0681100:0.1151,Sb04g031800:0.11220)1.0:0.0537,\n (Os04g0578800:0.04318,Sb06g026210:0.04798)-1.0:0.08870)1.0:0.06985,\n ((Os03g0124100:0.08845,Sb01g048930:0.09055)1.0:0.05332,\n (Os10g0534700:0.06592,Sb01g030630:0.04824)-1.0:0.07886):0.09389);\"\"\"", "else", ":", "logging", ".", "debug", "(", "\"Load tree file `{0}`.\"", ".", "format", "(", "datafile", ")", ")", "tx", "=", "open", "(", "datafile", ")", ".", "read", "(", ")", "pf", "=", "datafile", ".", "rsplit", "(", "\".\"", ",", "1", ")", "[", "0", "]", "fig", "=", "plt", ".", "figure", "(", "1", ",", "(", "iopts", ".", "w", ",", "iopts", ".", "h", ")", ")", "root", "=", "fig", ".", "add_axes", "(", "[", "0", ",", "0", ",", "1", ",", "1", "]", ")", "if", "opts", ".", "geoscale", ":", "draw_geoscale", "(", "root", ")", "else", ":", "if", "op", ".", "isfile", "(", "opts", ".", "leafcolor", ")", ":", "leafcolor", "=", "\"k\"", "leafcolorfile", "=", "opts", ".", "leafcolor", "else", ":", "leafcolor", "=", "opts", ".", "leafcolor", "leafcolorfile", "=", "None", "draw_tree", "(", "root", ",", "tx", ",", "rmargin", "=", "opts", ".", "rmargin", ",", "leafcolor", "=", "leafcolor", ",", "outgroup", "=", "outgroup", ",", "reroot", "=", "reroot", ",", "gffdir", "=", "opts", ".", "gffdir", ",", "sizes", "=", "opts", ".", "sizes", ",", "SH", "=", "opts", ".", "SH", ",", "scutoff", "=", "opts", ".", "scutoff", ",", "barcodefile", "=", "opts", ".", "barcode", ",", "leafcolorfile", "=", "leafcolorfile", ",", "leaffont", "=", "opts", ".", "leaffont", ")", "root", ".", "set_xlim", "(", "0", ",", "1", ")", "root", ".", "set_ylim", "(", "0", ",", "1", ")", "root", ".", "set_axis_off", "(", ")", "image_name", "=", "pf", "+", "\".\"", "+", "iopts", ".", "format", "savefig", "(", "image_name", ",", "dpi", "=", "iopts", ".", "dpi", ",", "iopts", "=", "iopts", ")" ]
40.647059
21.564706
def before_request(): """Checks to ensure that the session is valid and validates the users CSRF token is present Returns: `None` """ if not request.path.startswith('/saml') and not request.path.startswith('/auth'): # Validate the session has the items we need if 'accounts' not in session: logger.debug('Missing \'accounts\' from session object, sending user to login page') return BaseView.make_unauth_response() # Require the CSRF token to be present if we are performing a change action (add, delete or modify objects) # but exclude the SAML endpoints from the CSRF check if request.method in ('POST', 'PUT', 'DELETE',): if session['csrf_token'] != request.headers.get('X-Csrf-Token'): logger.info('CSRF Token is missing or incorrect, sending user to login page') abort(403)
[ "def", "before_request", "(", ")", ":", "if", "not", "request", ".", "path", ".", "startswith", "(", "'/saml'", ")", "and", "not", "request", ".", "path", ".", "startswith", "(", "'/auth'", ")", ":", "# Validate the session has the items we need", "if", "'accounts'", "not", "in", "session", ":", "logger", ".", "debug", "(", "'Missing \\'accounts\\' from session object, sending user to login page'", ")", "return", "BaseView", ".", "make_unauth_response", "(", ")", "# Require the CSRF token to be present if we are performing a change action (add, delete or modify objects)", "# but exclude the SAML endpoints from the CSRF check", "if", "request", ".", "method", "in", "(", "'POST'", ",", "'PUT'", ",", "'DELETE'", ",", ")", ":", "if", "session", "[", "'csrf_token'", "]", "!=", "request", ".", "headers", ".", "get", "(", "'X-Csrf-Token'", ")", ":", "logger", ".", "info", "(", "'CSRF Token is missing or incorrect, sending user to login page'", ")", "abort", "(", "403", ")" ]
49.722222
27.388889
def add_labels(self, objects, count=1): """Add multiple labels to the sheet. Parameters ---------- objects: iterable An iterable of the objects to add. Each of these will be passed to the add_label method. Note that if this is a generator it will be consumed. count: positive integer or iterable of positive integers, default 1 The number of copies of each label to add. If a single integer, that many copies of every label are added. If an iterable, then each value specifies how many copies of the corresponding label to add. The iterables are advanced in parallel until one is exhausted; extra values in the other one are ignored. This means that if there are fewer count entries than objects, the objects corresponding to the missing counts will not be added to the sheet. Note that if this is a generator it will be consumed. Also note that the drawing function will only be called once for each label and the results copied for the repeats. If the drawing function maintains any state internally then using this parameter may break it. """ # If we can convert it to an int, do so and use the itertools.repeat() # method to create an infinite iterator from it. Otherwise, assume it # is an iterable or sequence. try: count = int(count) except TypeError: pass else: count = repeat(count) # If it is not an iterable (e.g., a list or range object), # create an iterator over it. if not hasattr(count, 'next') and not hasattr(count, '__next__'): count = iter(count) # Go through the objects. for obj in objects: # Check we have a count for this one. try: thiscount = next(count) except StopIteration: break # Draw it. self._draw_label(obj, thiscount)
[ "def", "add_labels", "(", "self", ",", "objects", ",", "count", "=", "1", ")", ":", "# If we can convert it to an int, do so and use the itertools.repeat()", "# method to create an infinite iterator from it. Otherwise, assume it", "# is an iterable or sequence.", "try", ":", "count", "=", "int", "(", "count", ")", "except", "TypeError", ":", "pass", "else", ":", "count", "=", "repeat", "(", "count", ")", "# If it is not an iterable (e.g., a list or range object),", "# create an iterator over it.", "if", "not", "hasattr", "(", "count", ",", "'next'", ")", "and", "not", "hasattr", "(", "count", ",", "'__next__'", ")", ":", "count", "=", "iter", "(", "count", ")", "# Go through the objects.", "for", "obj", "in", "objects", ":", "# Check we have a count for this one.", "try", ":", "thiscount", "=", "next", "(", "count", ")", "except", "StopIteration", ":", "break", "# Draw it.", "self", ".", "_draw_label", "(", "obj", ",", "thiscount", ")" ]
41.28
24.14
def setKeySequenceCounter(self, iKeySequenceValue): """ set the Key sequence counter corresponding to Thread Network master key Args: iKeySequenceValue: key sequence value Returns: True: successful to set the key sequence False: fail to set the key sequence """ print '%s call setKeySequenceCounter' % self.port print iKeySequenceValue try: cmd = WPANCTL_CMD + 'setprop Network:KeyIndex %s' % str(iKeySequenceValue) if self.__sendCommand(cmd)[0] != 'Fail': time.sleep(1) return True else: return False except Exception, e: ModuleHelper.WriteIntoDebugLogger('setKeySequenceCounter() Error: ' + str(e))
[ "def", "setKeySequenceCounter", "(", "self", ",", "iKeySequenceValue", ")", ":", "print", "'%s call setKeySequenceCounter'", "%", "self", ".", "port", "print", "iKeySequenceValue", "try", ":", "cmd", "=", "WPANCTL_CMD", "+", "'setprop Network:KeyIndex %s'", "%", "str", "(", "iKeySequenceValue", ")", "if", "self", ".", "__sendCommand", "(", "cmd", ")", "[", "0", "]", "!=", "'Fail'", ":", "time", ".", "sleep", "(", "1", ")", "return", "True", "else", ":", "return", "False", "except", "Exception", ",", "e", ":", "ModuleHelper", ".", "WriteIntoDebugLogger", "(", "'setKeySequenceCounter() Error: '", "+", "str", "(", "e", ")", ")" ]
37.047619
19.142857
def unset_values(self): """ Resets the user values of all symbols, as if Kconfig.load_config() or Symbol.set_value() had never been called. """ self._warn_for_no_prompt = False try: # set_value() already rejects undefined symbols, and they don't # need to be invalidated (because their value never changes), so we # can just iterate over defined symbols for sym in self.unique_defined_syms: sym.unset_value() for choice in self.unique_choices: choice.unset_value() finally: self._warn_for_no_prompt = True
[ "def", "unset_values", "(", "self", ")", ":", "self", ".", "_warn_for_no_prompt", "=", "False", "try", ":", "# set_value() already rejects undefined symbols, and they don't", "# need to be invalidated (because their value never changes), so we", "# can just iterate over defined symbols", "for", "sym", "in", "self", ".", "unique_defined_syms", ":", "sym", ".", "unset_value", "(", ")", "for", "choice", "in", "self", ".", "unique_choices", ":", "choice", ".", "unset_value", "(", ")", "finally", ":", "self", ".", "_warn_for_no_prompt", "=", "True" ]
38.235294
15.764706
def loop_through_agency(self): """Loop through an agency to grab the definitions for its tables.""" agency = self.agency with open(agency + '.txt') as f: data = eval(f.read()) for table in data: for column in data[table]: value_link = data[table][column] data[table][column] = self.grab_definition(value_link) data = json.dumps(data) with open(agency + '_values.json', 'w') as f: f.write(str(data))
[ "def", "loop_through_agency", "(", "self", ")", ":", "agency", "=", "self", ".", "agency", "with", "open", "(", "agency", "+", "'.txt'", ")", "as", "f", ":", "data", "=", "eval", "(", "f", ".", "read", "(", ")", ")", "for", "table", "in", "data", ":", "for", "column", "in", "data", "[", "table", "]", ":", "value_link", "=", "data", "[", "table", "]", "[", "column", "]", "data", "[", "table", "]", "[", "column", "]", "=", "self", ".", "grab_definition", "(", "value_link", ")", "data", "=", "json", ".", "dumps", "(", "data", ")", "with", "open", "(", "agency", "+", "'_values.json'", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "str", "(", "data", ")", ")" ]
41.916667
9.583333
def reset_mysql(self): """ Resets the MySQL database. """ confirm = self.no_confirm print(""" Cleanup MySQL database: This will truncate all tables and reset the whole database. """) if not confirm: confirm = 'yes' in builtins.input( """ Do you really want to do this? Write 'yes' to confirm: {yes}""" .format(yes='yes' if confirm else '')) if not confirm: print("Did not type yes. Thus aborting.") return print("Resetting database...") try: # initialize DB connection self.conn = pymysql.connect(host=self.mysql["host"], port=self.mysql["port"], db=self.mysql["db"], user=self.mysql["username"], passwd=self.mysql["password"]) self.cursor = self.conn.cursor() self.cursor.execute("TRUNCATE TABLE CurrentVersions") self.cursor.execute("TRUNCATE TABLE ArchiveVersions") self.conn.close() except (pymysql.err.OperationalError, pymysql.ProgrammingError, pymysql.InternalError, pymysql.IntegrityError, TypeError) as error: self.log.error("Database reset error: %s", error)
[ "def", "reset_mysql", "(", "self", ")", ":", "confirm", "=", "self", ".", "no_confirm", "print", "(", "\"\"\"\nCleanup MySQL database:\n This will truncate all tables and reset the whole database.\n\"\"\"", ")", "if", "not", "confirm", ":", "confirm", "=", "'yes'", "in", "builtins", ".", "input", "(", "\"\"\"\n Do you really want to do this? Write 'yes' to confirm: {yes}\"\"\"", ".", "format", "(", "yes", "=", "'yes'", "if", "confirm", "else", "''", ")", ")", "if", "not", "confirm", ":", "print", "(", "\"Did not type yes. Thus aborting.\"", ")", "return", "print", "(", "\"Resetting database...\"", ")", "try", ":", "# initialize DB connection", "self", ".", "conn", "=", "pymysql", ".", "connect", "(", "host", "=", "self", ".", "mysql", "[", "\"host\"", "]", ",", "port", "=", "self", ".", "mysql", "[", "\"port\"", "]", ",", "db", "=", "self", ".", "mysql", "[", "\"db\"", "]", ",", "user", "=", "self", ".", "mysql", "[", "\"username\"", "]", ",", "passwd", "=", "self", ".", "mysql", "[", "\"password\"", "]", ")", "self", ".", "cursor", "=", "self", ".", "conn", ".", "cursor", "(", ")", "self", ".", "cursor", ".", "execute", "(", "\"TRUNCATE TABLE CurrentVersions\"", ")", "self", ".", "cursor", ".", "execute", "(", "\"TRUNCATE TABLE ArchiveVersions\"", ")", "self", ".", "conn", ".", "close", "(", ")", "except", "(", "pymysql", ".", "err", ".", "OperationalError", ",", "pymysql", ".", "ProgrammingError", ",", "pymysql", ".", "InternalError", ",", "pymysql", ".", "IntegrityError", ",", "TypeError", ")", "as", "error", ":", "self", ".", "log", ".", "error", "(", "\"Database reset error: %s\"", ",", "error", ")" ]
34.794872
19.538462
def unassign_authorization_from_vault(self, authorization_id, vault_id): """Removes an ``Authorization`` from a ``Vault``. arg: authorization_id (osid.id.Id): the ``Id`` of the ``Authorization`` arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault`` raise: NotFound - ``authorization_id`` or ``vault_id`` not found or ``authorization_id`` not assigned to ``vault_id`` raise: NullArgument - ``authorization_id`` or ``vault_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin mgr = self._get_provider_manager('AUTHORIZATION', local=True) lookup_session = mgr.get_vault_lookup_session(proxy=self._proxy) lookup_session.get_vault(vault_id) # to raise NotFound self._unassign_object_from_catalog(authorization_id, vault_id)
[ "def", "unassign_authorization_from_vault", "(", "self", ",", "authorization_id", ",", "vault_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin", "mgr", "=", "self", ".", "_get_provider_manager", "(", "'AUTHORIZATION'", ",", "local", "=", "True", ")", "lookup_session", "=", "mgr", ".", "get_vault_lookup_session", "(", "proxy", "=", "self", ".", "_proxy", ")", "lookup_session", ".", "get_vault", "(", "vault_id", ")", "# to raise NotFound", "self", ".", "_unassign_object_from_catalog", "(", "authorization_id", ",", "vault_id", ")" ]
51.272727
22.363636
def get_data(datastore, path): ''' Get the configuration of the device tree at the given path :param datastore: The datastore, e.g. running, operational. One of the NETCONF store IETF types :type datastore: :class:`DatastoreType` (``str`` enum). :param path: The device path to set the value at, a list of element names in order, / separated :type path: ``list``, ``str`` OR ``tuple`` :return: The network configuration at that tree :rtype: ``dict`` .. code-block:: bash salt cisco-nso cisconso.get_data running 'devices/ex0' ''' if isinstance(path, six.string_types): path = '/'.split(path) return _proxy_cmd('get_data', datastore, path)
[ "def", "get_data", "(", "datastore", ",", "path", ")", ":", "if", "isinstance", "(", "path", ",", "six", ".", "string_types", ")", ":", "path", "=", "'/'", ".", "split", "(", "path", ")", "return", "_proxy_cmd", "(", "'get_data'", ",", "datastore", ",", "path", ")" ]
32
21.272727
def start_time(self): """The start time of this experiment.""" t = self.redis.hget('experiment_start_times', self.name) if t: return datetime.strptime(t, '%Y-%m-%dT%H:%M:%S')
[ "def", "start_time", "(", "self", ")", ":", "t", "=", "self", ".", "redis", ".", "hget", "(", "'experiment_start_times'", ",", "self", ".", "name", ")", "if", "t", ":", "return", "datetime", ".", "strptime", "(", "t", ",", "'%Y-%m-%dT%H:%M:%S'", ")" ]
41.2
18
def pusher_connected(self, data): """Called when the pusherclient is connected """ # Inform user that pusher is done connecting self.logger.info("Pusherclient connected") # Bind the events we want to listen to self.callback_client.bind("payment_authorized", self.payment_authorized) self.callback_client.bind("shortlink_scanned", self.shortlink_scanned)
[ "def", "pusher_connected", "(", "self", ",", "data", ")", ":", "# Inform user that pusher is done connecting", "self", ".", "logger", ".", "info", "(", "\"Pusherclient connected\"", ")", "# Bind the events we want to listen to", "self", ".", "callback_client", ".", "bind", "(", "\"payment_authorized\"", ",", "self", ".", "payment_authorized", ")", "self", ".", "callback_client", ".", "bind", "(", "\"shortlink_scanned\"", ",", "self", ".", "shortlink_scanned", ")" ]
42.545455
12.636364
async def declareWorkerType(self, *args, **kwargs): """ Update a worker-type Declare a workerType, supplying some details about it. `declareWorkerType` allows updating one or more properties of a worker-type as long as the required scopes are possessed. For example, a request to update the `gecko-b-1-w2008` worker-type within the `aws-provisioner-v1` provisioner with a body `{description: 'This worker type is great'}` would require you to have the scope `queue:declare-worker-type:aws-provisioner-v1/gecko-b-1-w2008#description`. This method takes input: ``v1/update-workertype-request.json#`` This method gives output: ``v1/workertype-response.json#`` This method is ``experimental`` """ return await self._makeApiCall(self.funcinfo["declareWorkerType"], *args, **kwargs)
[ "async", "def", "declareWorkerType", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "await", "self", ".", "_makeApiCall", "(", "self", ".", "funcinfo", "[", "\"declareWorkerType\"", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
45.263158
34.947368
def get_config_applied_machine_groups(self, project_name, config_name): """ get machine group names where the logtail config applies to Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type config_name: string :param config_name: the logtail config name used to apply :return: GetConfigAppliedMachineGroupsResponse :raise: LogException """ headers = {} params = {} resource = "/configs/" + config_name + "/machinegroups" (resp, header) = self._send("GET", project_name, None, resource, params, headers) return GetConfigAppliedMachineGroupsResponse(resp, header)
[ "def", "get_config_applied_machine_groups", "(", "self", ",", "project_name", ",", "config_name", ")", ":", "headers", "=", "{", "}", "params", "=", "{", "}", "resource", "=", "\"/configs/\"", "+", "config_name", "+", "\"/machinegroups\"", "(", "resp", ",", "header", ")", "=", "self", ".", "_send", "(", "\"GET\"", ",", "project_name", ",", "None", ",", "resource", ",", "params", ",", "headers", ")", "return", "GetConfigAppliedMachineGroupsResponse", "(", "resp", ",", "header", ")" ]
37.75
22.35
def cnfwAlpha(self, R, Rs, rho0, r_core, ax_x, ax_y): """ deflection angel of NFW profile along the projection to coordinate axis :param R: radius of interest :type R: float/numpy array :param Rs: scale radius :type Rs: float :param rho0: density normalization (characteristic density) :type rho0: float :param r200: radius of (sub)halo :type r200: float>0 :param axis: projection to either x- or y-axis :type axis: same as R :return: Epsilon(R) projected density at radius R """ if isinstance(R, int) or isinstance(R, float): R = max(R, 0.00001) else: R[R <= 0.00001] = 0.00001 x = R / Rs b = r_core * Rs ** -1 b = max(b, 0.000001) gx = self._G(x, b) a = 4*rho0*Rs*gx/x**2 return a * ax_x, a * ax_y
[ "def", "cnfwAlpha", "(", "self", ",", "R", ",", "Rs", ",", "rho0", ",", "r_core", ",", "ax_x", ",", "ax_y", ")", ":", "if", "isinstance", "(", "R", ",", "int", ")", "or", "isinstance", "(", "R", ",", "float", ")", ":", "R", "=", "max", "(", "R", ",", "0.00001", ")", "else", ":", "R", "[", "R", "<=", "0.00001", "]", "=", "0.00001", "x", "=", "R", "/", "Rs", "b", "=", "r_core", "*", "Rs", "**", "-", "1", "b", "=", "max", "(", "b", ",", "0.000001", ")", "gx", "=", "self", ".", "_G", "(", "x", ",", "b", ")", "a", "=", "4", "*", "rho0", "*", "Rs", "*", "gx", "/", "x", "**", "2", "return", "a", "*", "ax_x", ",", "a", "*", "ax_y" ]
31.25
15.535714
def _push_property_schema(self, prop): """Construct a sub-schema from a property of the current schema.""" schema = Schema(self._schema.properties[prop]) self._push_schema(schema, ".properties." + prop)
[ "def", "_push_property_schema", "(", "self", ",", "prop", ")", ":", "schema", "=", "Schema", "(", "self", ".", "_schema", ".", "properties", "[", "prop", "]", ")", "self", ".", "_push_schema", "(", "schema", ",", "\".properties.\"", "+", "prop", ")" ]
55.75
8
def send(self, data): """ Send text to the client. """ assert isinstance(data, text_type) # When data is send back to the client, we should replace the line # endings. (We didn't allocate a real pseudo terminal, and the telnet # connection is raw, so we are responsible for inserting \r.) self.stdout.write(data.replace('\n', '\r\n')) self.stdout.flush()
[ "def", "send", "(", "self", ",", "data", ")", ":", "assert", "isinstance", "(", "data", ",", "text_type", ")", "# When data is send back to the client, we should replace the line", "# endings. (We didn't allocate a real pseudo terminal, and the telnet", "# connection is raw, so we are responsible for inserting \\r.)", "self", ".", "stdout", ".", "write", "(", "data", ".", "replace", "(", "'\\n'", ",", "'\\r\\n'", ")", ")", "self", ".", "stdout", ".", "flush", "(", ")" ]
37.909091
17.727273
def lookupmodule(name): """lookupmodule()->(module, file) translates a possibly incomplete file or module name into an absolute file name. None can be returned for either of the values positions of module or file when no or module or file is found. """ if sys.modules.get(name): return (sys.modules[name], sys.modules[name].__file__) if os.path.isabs(name) and readable(name): return (None, name) f = os.path.join(sys.path[0], name) if readable(f): return (None, f) root, ext = os.path.splitext(name) if ext == '': name = name + '.py' pass if os.path.isabs(name): return (None, name) for dirname in sys.path: while os.path.islink(dirname): dirname = os.readlink(dirname) pass fullname = os.path.join(dirname, name) if readable(fullname): return (None, fullname) pass return (None, None)
[ "def", "lookupmodule", "(", "name", ")", ":", "if", "sys", ".", "modules", ".", "get", "(", "name", ")", ":", "return", "(", "sys", ".", "modules", "[", "name", "]", ",", "sys", ".", "modules", "[", "name", "]", ".", "__file__", ")", "if", "os", ".", "path", ".", "isabs", "(", "name", ")", "and", "readable", "(", "name", ")", ":", "return", "(", "None", ",", "name", ")", "f", "=", "os", ".", "path", ".", "join", "(", "sys", ".", "path", "[", "0", "]", ",", "name", ")", "if", "readable", "(", "f", ")", ":", "return", "(", "None", ",", "f", ")", "root", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "name", ")", "if", "ext", "==", "''", ":", "name", "=", "name", "+", "'.py'", "pass", "if", "os", ".", "path", ".", "isabs", "(", "name", ")", ":", "return", "(", "None", ",", "name", ")", "for", "dirname", "in", "sys", ".", "path", ":", "while", "os", ".", "path", ".", "islink", "(", "dirname", ")", ":", "dirname", "=", "os", ".", "readlink", "(", "dirname", ")", "pass", "fullname", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "name", ")", "if", "readable", "(", "fullname", ")", ":", "return", "(", "None", ",", "fullname", ")", "pass", "return", "(", "None", ",", "None", ")" ]
33.25
13
def do_fileplaceholder(parser, token): """ Method that parse the fileplaceholder template tag. """ name, params = parse_placeholder(parser, token) return FilePlaceholderNode(name, **params)
[ "def", "do_fileplaceholder", "(", "parser", ",", "token", ")", ":", "name", ",", "params", "=", "parse_placeholder", "(", "parser", ",", "token", ")", "return", "FilePlaceholderNode", "(", "name", ",", "*", "*", "params", ")" ]
34
5.666667
def Run(self, unused_arg): """Run the kill.""" # Send a message back to the service to say that we are about to shutdown. reply = rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.OK) # Queue up the response message, jump the queue. self.SendReply(reply, message_type=rdf_flows.GrrMessage.Type.STATUS) # Give the http thread some time to send the reply. self.grr_worker.Sleep(10) # Die ourselves. logging.info("Dying on request.") os._exit(242)
[ "def", "Run", "(", "self", ",", "unused_arg", ")", ":", "# Send a message back to the service to say that we are about to shutdown.", "reply", "=", "rdf_flows", ".", "GrrStatus", "(", "status", "=", "rdf_flows", ".", "GrrStatus", ".", "ReturnedStatus", ".", "OK", ")", "# Queue up the response message, jump the queue.", "self", ".", "SendReply", "(", "reply", ",", "message_type", "=", "rdf_flows", ".", "GrrMessage", ".", "Type", ".", "STATUS", ")", "# Give the http thread some time to send the reply.", "self", ".", "grr_worker", ".", "Sleep", "(", "10", ")", "# Die ourselves.", "logging", ".", "info", "(", "\"Dying on request.\"", ")", "os", ".", "_exit", "(", "242", ")" ]
37.384615
21.923077
def import_code(mod_code, mod_name): """Create a module object by code. @param mod_code: the code that the module contains. @param mod_name: module name. """ mod_obj = imp.new_module(mod_name) mod_obj.__file__ = None exec_(mod_code, mod_obj.__dict__, mod_obj.__dict__) add_to_sys_modules(mod_name=mod_name, mod_obj=mod_obj) return mod_obj
[ "def", "import_code", "(", "mod_code", ",", "mod_name", ")", ":", "mod_obj", "=", "imp", ".", "new_module", "(", "mod_name", ")", "mod_obj", ".", "__file__", "=", "None", "exec_", "(", "mod_code", ",", "mod_obj", ".", "__dict__", ",", "mod_obj", ".", "__dict__", ")", "add_to_sys_modules", "(", "mod_name", "=", "mod_name", ",", "mod_obj", "=", "mod_obj", ")", "return", "mod_obj" ]
24.333333
19.733333
def convert(kml_path, output_dir, separate_folders=False, style_type=None, style_filename='style.json'): """ Given a path to a KML file, convert it to one or several GeoJSON FeatureCollection files and save the result(s) to the given output directory. If not ``separate_folders`` (the default), then create one GeoJSON file. Otherwise, create several GeoJSON files, one for each folder in the KML file that contains geodata or that has a descendant node that contains geodata. Warning: this can produce GeoJSON files with the same geodata in case the KML file has nested folders with geodata. If a ``style_type`` is given, then also build a JSON style file of the given style type and save it to the output directory under the name given by ``style_filename``. """ # Create absolute paths kml_path = Path(kml_path).resolve() output_dir = Path(output_dir) if not output_dir.exists(): output_dir.mkdir() output_dir = output_dir.resolve() # Parse KML with kml_path.open(encoding='utf-8', errors='ignore') as src: kml_str = src.read() root = md.parseString(kml_str) # Build GeoJSON layers if separate_folders: layers = build_layers(root) else: layers = [build_feature_collection(root, name=kml_path.stem)] # Create filenames for layers filenames = disambiguate( [to_filename(layer['name']) for layer in layers]) filenames = [name + '.geojson' for name in filenames] # Write layers to files for i in range(len(layers)): path = output_dir/filenames[i] with path.open('w') as tgt: json.dump(layers[i], tgt) # Build and export style file if desired if style_type is not None: if style_type not in STYLE_TYPES: raise ValueError('style type must be one of {!s}'.format( STYLE_TYPES)) builder_name = 'build_{!s}_style'.format(style_type) style_dict = globals()[builder_name](root) path = output_dir/style_filename with path.open('w') as tgt: json.dump(style_dict, tgt)
[ "def", "convert", "(", "kml_path", ",", "output_dir", ",", "separate_folders", "=", "False", ",", "style_type", "=", "None", ",", "style_filename", "=", "'style.json'", ")", ":", "# Create absolute paths", "kml_path", "=", "Path", "(", "kml_path", ")", ".", "resolve", "(", ")", "output_dir", "=", "Path", "(", "output_dir", ")", "if", "not", "output_dir", ".", "exists", "(", ")", ":", "output_dir", ".", "mkdir", "(", ")", "output_dir", "=", "output_dir", ".", "resolve", "(", ")", "# Parse KML", "with", "kml_path", ".", "open", "(", "encoding", "=", "'utf-8'", ",", "errors", "=", "'ignore'", ")", "as", "src", ":", "kml_str", "=", "src", ".", "read", "(", ")", "root", "=", "md", ".", "parseString", "(", "kml_str", ")", "# Build GeoJSON layers", "if", "separate_folders", ":", "layers", "=", "build_layers", "(", "root", ")", "else", ":", "layers", "=", "[", "build_feature_collection", "(", "root", ",", "name", "=", "kml_path", ".", "stem", ")", "]", "# Create filenames for layers", "filenames", "=", "disambiguate", "(", "[", "to_filename", "(", "layer", "[", "'name'", "]", ")", "for", "layer", "in", "layers", "]", ")", "filenames", "=", "[", "name", "+", "'.geojson'", "for", "name", "in", "filenames", "]", "# Write layers to files", "for", "i", "in", "range", "(", "len", "(", "layers", ")", ")", ":", "path", "=", "output_dir", "/", "filenames", "[", "i", "]", "with", "path", ".", "open", "(", "'w'", ")", "as", "tgt", ":", "json", ".", "dump", "(", "layers", "[", "i", "]", ",", "tgt", ")", "# Build and export style file if desired", "if", "style_type", "is", "not", "None", ":", "if", "style_type", "not", "in", "STYLE_TYPES", ":", "raise", "ValueError", "(", "'style type must be one of {!s}'", ".", "format", "(", "STYLE_TYPES", ")", ")", "builder_name", "=", "'build_{!s}_style'", ".", "format", "(", "style_type", ")", "style_dict", "=", "globals", "(", ")", "[", "builder_name", "]", "(", "root", ")", "path", "=", "output_dir", "/", "style_filename", "with", "path", ".", "open", "(", "'w'", ")", "as", "tgt", ":", "json", ".", "dump", "(", "style_dict", ",", "tgt", ")" ]
40.509804
22.784314
def accounts(self): """ Access the Accounts Twilio Domain :returns: Accounts Twilio Domain :rtype: twilio.rest.accounts.Accounts """ if self._accounts is None: from twilio.rest.accounts import Accounts self._accounts = Accounts(self) return self._accounts
[ "def", "accounts", "(", "self", ")", ":", "if", "self", ".", "_accounts", "is", "None", ":", "from", "twilio", ".", "rest", ".", "accounts", "import", "Accounts", "self", ".", "_accounts", "=", "Accounts", "(", "self", ")", "return", "self", ".", "_accounts" ]
29.636364
9.090909
def fasta(self): """Generates sequence data for the protein in FASTA format.""" max_line_length = 79 fasta_str = '>{0}:{1}|PDBID|CHAIN|SEQUENCE\n'.format( self.parent.id.upper(), self.id) seq = self.sequence split_seq = [seq[i: i + max_line_length] for i in range(0, len(seq), max_line_length)] for seq_part in split_seq: fasta_str += '{0}\n'.format(seq_part) return fasta_str
[ "def", "fasta", "(", "self", ")", ":", "max_line_length", "=", "79", "fasta_str", "=", "'>{0}:{1}|PDBID|CHAIN|SEQUENCE\\n'", ".", "format", "(", "self", ".", "parent", ".", "id", ".", "upper", "(", ")", ",", "self", ".", "id", ")", "seq", "=", "self", ".", "sequence", "split_seq", "=", "[", "seq", "[", "i", ":", "i", "+", "max_line_length", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "seq", ")", ",", "max_line_length", ")", "]", "for", "seq_part", "in", "split_seq", ":", "fasta_str", "+=", "'{0}\\n'", ".", "format", "(", "seq_part", ")", "return", "fasta_str" ]
42.454545
12.636364
def remove_extension(self, ext=None): """ Remove extension from IOSystem For single Extensions the same can be achieved with del IOSystem_name.Extension_name Parameters ---------- ext : string or list, optional The extension to remove, this can be given as the name of the instance or of Extension.name (the latter will be checked if no instance was found) If ext is None (default) all Extensions will be removed """ if ext is None: ext = list(self.get_extensions()) if type(ext) is str: ext = [ext] for ee in ext: try: del self.__dict__[ee] except KeyError: for exinstancename, exdata in zip( self.get_extensions(data=False), self.get_extensions(data=True)): if exdata.name == ee: del self.__dict__[exinstancename] finally: self.meta._add_modify("Removed extension {}".format(ee)) return self
[ "def", "remove_extension", "(", "self", ",", "ext", "=", "None", ")", ":", "if", "ext", "is", "None", ":", "ext", "=", "list", "(", "self", ".", "get_extensions", "(", ")", ")", "if", "type", "(", "ext", ")", "is", "str", ":", "ext", "=", "[", "ext", "]", "for", "ee", "in", "ext", ":", "try", ":", "del", "self", ".", "__dict__", "[", "ee", "]", "except", "KeyError", ":", "for", "exinstancename", ",", "exdata", "in", "zip", "(", "self", ".", "get_extensions", "(", "data", "=", "False", ")", ",", "self", ".", "get_extensions", "(", "data", "=", "True", ")", ")", ":", "if", "exdata", ".", "name", "==", "ee", ":", "del", "self", ".", "__dict__", "[", "exinstancename", "]", "finally", ":", "self", ".", "meta", ".", "_add_modify", "(", "\"Removed extension {}\"", ".", "format", "(", "ee", ")", ")", "return", "self" ]
34.4375
18.15625
def show_clusters(clusters, sample, covariances, means, figure = None, display = True): """! @brief Draws clusters and in case of two-dimensional dataset draws their ellipses. @param[in] clusters (list): Clusters that were allocated by the algorithm. @param[in] sample (list): Dataset that were used for clustering. @param[in] covariances (list): Covariances of the clusters. @param[in] means (list): Means of the clusters. @param[in] figure (figure): If 'None' then new is figure is creater, otherwise specified figure is used for visualization. @param[in] display (bool): If 'True' then figure will be shown by the method, otherwise it should be shown manually using matplotlib function 'plt.show()'. @return (figure) Figure where clusters were drawn. """ visualizer = cluster_visualizer() visualizer.append_clusters(clusters, sample) if figure is None: figure = visualizer.show(display = False) else: visualizer.show(figure = figure, display = False) if len(sample[0]) == 2: ema_visualizer.__draw_ellipses(figure, visualizer, clusters, covariances, means) if display is True: plt.show() return figure
[ "def", "show_clusters", "(", "clusters", ",", "sample", ",", "covariances", ",", "means", ",", "figure", "=", "None", ",", "display", "=", "True", ")", ":", "visualizer", "=", "cluster_visualizer", "(", ")", "visualizer", ".", "append_clusters", "(", "clusters", ",", "sample", ")", "if", "figure", "is", "None", ":", "figure", "=", "visualizer", ".", "show", "(", "display", "=", "False", ")", "else", ":", "visualizer", ".", "show", "(", "figure", "=", "figure", ",", "display", "=", "False", ")", "if", "len", "(", "sample", "[", "0", "]", ")", "==", "2", ":", "ema_visualizer", ".", "__draw_ellipses", "(", "figure", ",", "visualizer", ",", "clusters", ",", "covariances", ",", "means", ")", "if", "display", "is", "True", ":", "plt", ".", "show", "(", ")", "return", "figure" ]
43.21875
27.4375
def flags(self, index): """ Reimplements the :meth:`QAbstractItemModel.flags` method. :param index: Index. :type index: QModelIndex :return: Flags. ( Qt.ItemFlags ) """ if not index.isValid(): return Qt.NoItemFlags node = self.get_node(index) if index.column() == 0: return hasattr(node, "flags") and Qt.ItemFlags(node.flags) or Qt.NoItemFlags else: attribute = self.get_attribute(node, index.column()) return attribute and hasattr(attribute, "flags") and Qt.ItemFlags(attribute.flags) or Qt.NoItemFlags
[ "def", "flags", "(", "self", ",", "index", ")", ":", "if", "not", "index", ".", "isValid", "(", ")", ":", "return", "Qt", ".", "NoItemFlags", "node", "=", "self", ".", "get_node", "(", "index", ")", "if", "index", ".", "column", "(", ")", "==", "0", ":", "return", "hasattr", "(", "node", ",", "\"flags\"", ")", "and", "Qt", ".", "ItemFlags", "(", "node", ".", "flags", ")", "or", "Qt", ".", "NoItemFlags", "else", ":", "attribute", "=", "self", ".", "get_attribute", "(", "node", ",", "index", ".", "column", "(", ")", ")", "return", "attribute", "and", "hasattr", "(", "attribute", ",", "\"flags\"", ")", "and", "Qt", ".", "ItemFlags", "(", "attribute", ".", "flags", ")", "or", "Qt", ".", "NoItemFlags" ]
34.277778
21.277778
def maintained_selection(): """Maintain selection during context Example: >>> with maintained_selection(): ... # Modify selection ... node.setSelected(on=False, clear_all_selected=True) >>> # Selection restored """ previous_selection = hou.selectedNodes() try: yield finally: if previous_selection: for node in previous_selection: node.setSelected(on=True) else: for node in previous_selection: node.setSelected(on=False)
[ "def", "maintained_selection", "(", ")", ":", "previous_selection", "=", "hou", ".", "selectedNodes", "(", ")", "try", ":", "yield", "finally", ":", "if", "previous_selection", ":", "for", "node", "in", "previous_selection", ":", "node", ".", "setSelected", "(", "on", "=", "True", ")", "else", ":", "for", "node", "in", "previous_selection", ":", "node", ".", "setSelected", "(", "on", "=", "False", ")" ]
26.095238
16.142857
def num_or_str(x): """The argument is a string; convert to a number if possible, or strip it. >>> num_or_str('42') 42 >>> num_or_str(' 42x ') '42x' """ if isnumber(x): return x try: return int(x) except ValueError: try: return float(x) except ValueError: return str(x).strip()
[ "def", "num_or_str", "(", "x", ")", ":", "if", "isnumber", "(", "x", ")", ":", "return", "x", "try", ":", "return", "int", "(", "x", ")", "except", "ValueError", ":", "try", ":", "return", "float", "(", "x", ")", "except", "ValueError", ":", "return", "str", "(", "x", ")", ".", "strip", "(", ")" ]
23.066667
17.266667
def value(self): """ The current value of the field. If no value is set when this attribute is accessed for reading, the :meth:`default` of the field is invoked and the result is set and returned as value. Only values contained in the :attr:`~.BoundOptionsField.options` can be set, other values are rejected with a :class:`ValueError`. To revert the value to the default value specified in the descriptor, use the ``del`` operator. """ try: return self._value except AttributeError: self._value = self.field.default() return self._value
[ "def", "value", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_value", "except", "AttributeError", ":", "self", ".", "_value", "=", "self", ".", "field", ".", "default", "(", ")", "return", "self", ".", "_value" ]
40.4375
21.1875
def column_map_expectation(cls, func): """For SqlAlchemy, this decorator allows individual column_map_expectations to simply return the filter that describes the expected condition on their data. The decorator will then use that filter to obtain unexpected elements, relevant counts, and return the formatted object. """ if PY3: argspec = inspect.getfullargspec(func)[0][1:] else: argspec = inspect.getargspec(func)[0][1:] @cls.expectation(argspec) @wraps(func) def inner_wrapper(self, column, mostly=None, result_format=None, *args, **kwargs): if result_format is None: result_format = self.default_expectation_args["result_format"] result_format = parse_result_format(result_format) if result_format['result_format'] == 'COMPLETE': warnings.warn("Setting result format to COMPLETE for a SqlAlchemyDataset can be dangerous because it will not limit the number of returned results.") unexpected_count_limit = None else: unexpected_count_limit = result_format['partial_unexpected_count'] expected_condition = func(self, column, *args, **kwargs) # FIXME Temporary Fix for counting missing values # Added to compensate when an ignore_values argument is added to the expectation ignore_values = [None] if func.__name__ in ['expect_column_values_to_not_be_null', 'expect_column_values_to_be_null']: ignore_values = [] result_format['partial_unexpected_count'] = 0 # Optimization to avoid meaningless computation for these expectations count_query = sa.select([ sa.func.count().label('element_count'), sa.func.sum( sa.case([(sa.or_( sa.column(column).in_(ignore_values), # Below is necessary b/c sa.in_() uses `==` but None != None # But we only consider this if None is actually in the list of ignore values sa.column(column).is_(None) if None in ignore_values else False), 1)], else_=0) ).label('null_count'), sa.func.sum( sa.case([ ( sa.and_( sa.not_(expected_condition), sa.case([ ( sa.column(column).is_(None), False ) ], else_=True) if None in ignore_values else True ), 1 ) ], else_=0) ).label('unexpected_count') ]).select_from(self._table) count_results = dict(self.engine.execute(count_query).fetchone()) # Handle case of empty table gracefully: if "element_count" not in count_results or count_results["element_count"] is None: count_results["element_count"] = 0 if "null_count" not in count_results or count_results["null_count"] is None: count_results["null_count"] = 0 if "unexpected_count" not in count_results or count_results["unexpected_count"] is None: count_results["unexpected_count"] = 0 # Retrieve unexpected values unexpected_query_results = self.engine.execute( sa.select([sa.column(column)]).select_from(self._table).where( sa.and_(sa.not_(expected_condition), sa.or_( # SA normally evaluates `== None` as `IS NONE`. However `sa.in_()` # replaces `None` as `NULL` in the list and incorrectly uses `== NULL` sa.case([ ( sa.column(column).is_(None), False ) ], else_=True) if None in ignore_values else False, # Ignore any other values that are in the ignore list sa.column(column).in_(ignore_values) == False)) ).limit(unexpected_count_limit) ) nonnull_count = count_results['element_count'] - \ count_results['null_count'] if "output_strftime_format" in kwargs: output_strftime_format = kwargs["output_strftime_format"] maybe_limited_unexpected_list = [] for x in unexpected_query_results.fetchall(): if isinstance(x[column], string_types): col = parse(x[column]) else: col = x[column] maybe_limited_unexpected_list.append(datetime.strftime(col, output_strftime_format)) else: maybe_limited_unexpected_list = [x[column] for x in unexpected_query_results.fetchall()] success_count = nonnull_count - count_results['unexpected_count'] success, percent_success = self._calc_map_expectation_success( success_count, nonnull_count, mostly) return_obj = self._format_map_output( result_format, success, count_results['element_count'], nonnull_count, count_results['unexpected_count'], maybe_limited_unexpected_list, None ) if func.__name__ in ['expect_column_values_to_not_be_null', 'expect_column_values_to_be_null']: # These results are unnecessary for the above expectations del return_obj['result']['unexpected_percent_nonmissing'] try: del return_obj['result']['partial_unexpected_counts'] except KeyError: pass return return_obj inner_wrapper.__name__ = func.__name__ inner_wrapper.__doc__ = func.__doc__ return inner_wrapper
[ "def", "column_map_expectation", "(", "cls", ",", "func", ")", ":", "if", "PY3", ":", "argspec", "=", "inspect", ".", "getfullargspec", "(", "func", ")", "[", "0", "]", "[", "1", ":", "]", "else", ":", "argspec", "=", "inspect", ".", "getargspec", "(", "func", ")", "[", "0", "]", "[", "1", ":", "]", "@", "cls", ".", "expectation", "(", "argspec", ")", "@", "wraps", "(", "func", ")", "def", "inner_wrapper", "(", "self", ",", "column", ",", "mostly", "=", "None", ",", "result_format", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "result_format", "is", "None", ":", "result_format", "=", "self", ".", "default_expectation_args", "[", "\"result_format\"", "]", "result_format", "=", "parse_result_format", "(", "result_format", ")", "if", "result_format", "[", "'result_format'", "]", "==", "'COMPLETE'", ":", "warnings", ".", "warn", "(", "\"Setting result format to COMPLETE for a SqlAlchemyDataset can be dangerous because it will not limit the number of returned results.\"", ")", "unexpected_count_limit", "=", "None", "else", ":", "unexpected_count_limit", "=", "result_format", "[", "'partial_unexpected_count'", "]", "expected_condition", "=", "func", "(", "self", ",", "column", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# FIXME Temporary Fix for counting missing values", "# Added to compensate when an ignore_values argument is added to the expectation", "ignore_values", "=", "[", "None", "]", "if", "func", ".", "__name__", "in", "[", "'expect_column_values_to_not_be_null'", ",", "'expect_column_values_to_be_null'", "]", ":", "ignore_values", "=", "[", "]", "result_format", "[", "'partial_unexpected_count'", "]", "=", "0", "# Optimization to avoid meaningless computation for these expectations", "count_query", "=", "sa", ".", "select", "(", "[", "sa", ".", "func", ".", "count", "(", ")", ".", "label", "(", "'element_count'", ")", ",", "sa", ".", "func", ".", "sum", "(", "sa", ".", "case", "(", "[", "(", "sa", ".", "or_", "(", "sa", ".", "column", "(", "column", ")", ".", "in_", "(", "ignore_values", ")", ",", "# Below is necessary b/c sa.in_() uses `==` but None != None", "# But we only consider this if None is actually in the list of ignore values", "sa", ".", "column", "(", "column", ")", ".", "is_", "(", "None", ")", "if", "None", "in", "ignore_values", "else", "False", ")", ",", "1", ")", "]", ",", "else_", "=", "0", ")", ")", ".", "label", "(", "'null_count'", ")", ",", "sa", ".", "func", ".", "sum", "(", "sa", ".", "case", "(", "[", "(", "sa", ".", "and_", "(", "sa", ".", "not_", "(", "expected_condition", ")", ",", "sa", ".", "case", "(", "[", "(", "sa", ".", "column", "(", "column", ")", ".", "is_", "(", "None", ")", ",", "False", ")", "]", ",", "else_", "=", "True", ")", "if", "None", "in", "ignore_values", "else", "True", ")", ",", "1", ")", "]", ",", "else_", "=", "0", ")", ")", ".", "label", "(", "'unexpected_count'", ")", "]", ")", ".", "select_from", "(", "self", ".", "_table", ")", "count_results", "=", "dict", "(", "self", ".", "engine", ".", "execute", "(", "count_query", ")", ".", "fetchone", "(", ")", ")", "# Handle case of empty table gracefully:", "if", "\"element_count\"", "not", "in", "count_results", "or", "count_results", "[", "\"element_count\"", "]", "is", "None", ":", "count_results", "[", "\"element_count\"", "]", "=", "0", "if", "\"null_count\"", "not", "in", "count_results", "or", "count_results", "[", "\"null_count\"", "]", "is", "None", ":", "count_results", "[", "\"null_count\"", "]", "=", "0", "if", "\"unexpected_count\"", "not", "in", "count_results", "or", "count_results", "[", "\"unexpected_count\"", "]", "is", "None", ":", "count_results", "[", "\"unexpected_count\"", "]", "=", "0", "# Retrieve unexpected values", "unexpected_query_results", "=", "self", ".", "engine", ".", "execute", "(", "sa", ".", "select", "(", "[", "sa", ".", "column", "(", "column", ")", "]", ")", ".", "select_from", "(", "self", ".", "_table", ")", ".", "where", "(", "sa", ".", "and_", "(", "sa", ".", "not_", "(", "expected_condition", ")", ",", "sa", ".", "or_", "(", "# SA normally evaluates `== None` as `IS NONE`. However `sa.in_()`", "# replaces `None` as `NULL` in the list and incorrectly uses `== NULL`", "sa", ".", "case", "(", "[", "(", "sa", ".", "column", "(", "column", ")", ".", "is_", "(", "None", ")", ",", "False", ")", "]", ",", "else_", "=", "True", ")", "if", "None", "in", "ignore_values", "else", "False", ",", "# Ignore any other values that are in the ignore list", "sa", ".", "column", "(", "column", ")", ".", "in_", "(", "ignore_values", ")", "==", "False", ")", ")", ")", ".", "limit", "(", "unexpected_count_limit", ")", ")", "nonnull_count", "=", "count_results", "[", "'element_count'", "]", "-", "count_results", "[", "'null_count'", "]", "if", "\"output_strftime_format\"", "in", "kwargs", ":", "output_strftime_format", "=", "kwargs", "[", "\"output_strftime_format\"", "]", "maybe_limited_unexpected_list", "=", "[", "]", "for", "x", "in", "unexpected_query_results", ".", "fetchall", "(", ")", ":", "if", "isinstance", "(", "x", "[", "column", "]", ",", "string_types", ")", ":", "col", "=", "parse", "(", "x", "[", "column", "]", ")", "else", ":", "col", "=", "x", "[", "column", "]", "maybe_limited_unexpected_list", ".", "append", "(", "datetime", ".", "strftime", "(", "col", ",", "output_strftime_format", ")", ")", "else", ":", "maybe_limited_unexpected_list", "=", "[", "x", "[", "column", "]", "for", "x", "in", "unexpected_query_results", ".", "fetchall", "(", ")", "]", "success_count", "=", "nonnull_count", "-", "count_results", "[", "'unexpected_count'", "]", "success", ",", "percent_success", "=", "self", ".", "_calc_map_expectation_success", "(", "success_count", ",", "nonnull_count", ",", "mostly", ")", "return_obj", "=", "self", ".", "_format_map_output", "(", "result_format", ",", "success", ",", "count_results", "[", "'element_count'", "]", ",", "nonnull_count", ",", "count_results", "[", "'unexpected_count'", "]", ",", "maybe_limited_unexpected_list", ",", "None", ")", "if", "func", ".", "__name__", "in", "[", "'expect_column_values_to_not_be_null'", ",", "'expect_column_values_to_be_null'", "]", ":", "# These results are unnecessary for the above expectations", "del", "return_obj", "[", "'result'", "]", "[", "'unexpected_percent_nonmissing'", "]", "try", ":", "del", "return_obj", "[", "'result'", "]", "[", "'partial_unexpected_counts'", "]", "except", "KeyError", ":", "pass", "return", "return_obj", "inner_wrapper", ".", "__name__", "=", "func", ".", "__name__", "inner_wrapper", ".", "__doc__", "=", "func", ".", "__doc__", "return", "inner_wrapper" ]
48.068702
25.305344
def handle_stderr(stderr_pipe): """ Takes stderr from the command's output and displays it AFTER the stdout is printed by run_command(). """ stderr_output = stderr_pipe.read() if len(stderr_output) > 0: click.secho("\n__ Error Output {0}".format('_'*62), fg='white', bold=True) click.echo(stderr_output) return True
[ "def", "handle_stderr", "(", "stderr_pipe", ")", ":", "stderr_output", "=", "stderr_pipe", ".", "read", "(", ")", "if", "len", "(", "stderr_output", ")", ">", "0", ":", "click", ".", "secho", "(", "\"\\n__ Error Output {0}\"", ".", "format", "(", "'_'", "*", "62", ")", ",", "fg", "=", "'white'", ",", "bold", "=", "True", ")", "click", ".", "echo", "(", "stderr_output", ")", "return", "True" ]
28.384615
16.692308
def get_events_with_error_code(event_number, event_status, select_mask=0b1111111111111111, condition=0b0000000000000000): '''Selects the events with a certain error code. Parameters ---------- event_number : numpy.array event_status : numpy.array select_mask : int The mask that selects the event error code to check. condition : int The value the selected event error code should have. Returns ------- numpy.array ''' logging.debug("Calculate events with certain error code") return np.unique(event_number[event_status & select_mask == condition])
[ "def", "get_events_with_error_code", "(", "event_number", ",", "event_status", ",", "select_mask", "=", "0b1111111111111111", ",", "condition", "=", "0b0000000000000000", ")", ":", "logging", ".", "debug", "(", "\"Calculate events with certain error code\"", ")", "return", "np", ".", "unique", "(", "event_number", "[", "event_status", "&", "select_mask", "==", "condition", "]", ")" ]
31.631579
28.263158
def get_content_models(self): """ Return all subclasses that are admin registered. """ models = [] for model in self.concrete_model.get_content_models(): try: admin_url(model, "add") except NoReverseMatch: continue else: setattr(model, "meta_verbose_name", model._meta.verbose_name) setattr(model, "add_url", admin_url(model, "add")) models.append(model) return models
[ "def", "get_content_models", "(", "self", ")", ":", "models", "=", "[", "]", "for", "model", "in", "self", ".", "concrete_model", ".", "get_content_models", "(", ")", ":", "try", ":", "admin_url", "(", "model", ",", "\"add\"", ")", "except", "NoReverseMatch", ":", "continue", "else", ":", "setattr", "(", "model", ",", "\"meta_verbose_name\"", ",", "model", ".", "_meta", ".", "verbose_name", ")", "setattr", "(", "model", ",", "\"add_url\"", ",", "admin_url", "(", "model", ",", "\"add\"", ")", ")", "models", ".", "append", "(", "model", ")", "return", "models" ]
33.6
19.333333
def parse_meta(self, selected_meta_data): """ Parses all of the metadata files :param selected_meta_data: if specified then only the columns that are contained here are going to be parsed :return: """ # reads all meta data files files = self._get_files("meta", self.path) df = pd.DataFrame() print("Parsing the metadata files...") for f in tqdm(files): data = self.parse_single_meta(f, selected_meta_data) if data is not None: df = pd.concat([df, data], axis=0) df.index = df['sample'] # # df = df.drop('sample', 1) # 1 for the columns return df
[ "def", "parse_meta", "(", "self", ",", "selected_meta_data", ")", ":", "# reads all meta data files", "files", "=", "self", ".", "_get_files", "(", "\"meta\"", ",", "self", ".", "path", ")", "df", "=", "pd", ".", "DataFrame", "(", ")", "print", "(", "\"Parsing the metadata files...\"", ")", "for", "f", "in", "tqdm", "(", "files", ")", ":", "data", "=", "self", ".", "parse_single_meta", "(", "f", ",", "selected_meta_data", ")", "if", "data", "is", "not", "None", ":", "df", "=", "pd", ".", "concat", "(", "[", "df", ",", "data", "]", ",", "axis", "=", "0", ")", "df", ".", "index", "=", "df", "[", "'sample'", "]", "#", "# df = df.drop('sample', 1) # 1 for the columns", "return", "df" ]
37.833333
14.833333
def recursive_find_search(folder_path, regex=''): """ Returns absolute paths of files that match the regex within file_dir and all its children folders. Note: The regex matching is done using the search function of the re module. Parameters ---------- folder_path: string regex: string Returns ------- A list of strings. """ outlist = [] for root, dirs, files in os.walk(folder_path): outlist.extend([op.join(root, f) for f in files if re.search(regex, f)]) return outlist
[ "def", "recursive_find_search", "(", "folder_path", ",", "regex", "=", "''", ")", ":", "outlist", "=", "[", "]", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "folder_path", ")", ":", "outlist", ".", "extend", "(", "[", "op", ".", "join", "(", "root", ",", "f", ")", "for", "f", "in", "files", "if", "re", ".", "search", "(", "regex", ",", "f", ")", "]", ")", "return", "outlist" ]
22
23.36
def hash_(self, keys: Index, salt: int = 0) -> pd.Series: """Hashes the given index into an integer index in the range [0, self.stride] Parameters ---------- keys : The new index to hash. salt : An integer used to perturb the hash in a deterministic way. Useful in dealing with collisions. Returns ------- pd.Series A pandas series indexed by the given keys and whose values take on integers in the range [0, self.stride]. Duplicates may appear and should be dealt with by the calling code. """ key_frame = keys.to_frame() new_map = pd.Series(0, index=keys) salt = self.convert_to_ten_digit_int(pd.Series(salt, index=keys)) for i, column_name in enumerate(key_frame.columns): column = self.convert_to_ten_digit_int(key_frame[column_name]) primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 27] out = pd.Series(1, index=column.index) for idx, p in enumerate(primes): # numpy will almost always overflow here, but it is equivalent to modding # out by 2**64. Since it's much much larger than our map size # the amount of additional periodicity this introduces is pretty trivial. out *= np.power(p, self.digit(column, idx)) new_map += out + salt return new_map % self.map_size
[ "def", "hash_", "(", "self", ",", "keys", ":", "Index", ",", "salt", ":", "int", "=", "0", ")", "->", "pd", ".", "Series", ":", "key_frame", "=", "keys", ".", "to_frame", "(", ")", "new_map", "=", "pd", ".", "Series", "(", "0", ",", "index", "=", "keys", ")", "salt", "=", "self", ".", "convert_to_ten_digit_int", "(", "pd", ".", "Series", "(", "salt", ",", "index", "=", "keys", ")", ")", "for", "i", ",", "column_name", "in", "enumerate", "(", "key_frame", ".", "columns", ")", ":", "column", "=", "self", ".", "convert_to_ten_digit_int", "(", "key_frame", "[", "column_name", "]", ")", "primes", "=", "[", "2", ",", "3", ",", "5", ",", "7", ",", "11", ",", "13", ",", "17", ",", "19", ",", "23", ",", "27", "]", "out", "=", "pd", ".", "Series", "(", "1", ",", "index", "=", "column", ".", "index", ")", "for", "idx", ",", "p", "in", "enumerate", "(", "primes", ")", ":", "# numpy will almost always overflow here, but it is equivalent to modding", "# out by 2**64. Since it's much much larger than our map size", "# the amount of additional periodicity this introduces is pretty trivial.", "out", "*=", "np", ".", "power", "(", "p", ",", "self", ".", "digit", "(", "column", ",", "idx", ")", ")", "new_map", "+=", "out", "+", "salt", "return", "new_map", "%", "self", ".", "map_size" ]
41.285714
23.571429
def languages(macrolanguage): """ Get a list of :class:`language_tags.Subtag.Subtag` objects given the string macrolanguage. :param string macrolanguage: subtag macrolanguage. :return: a list of the macrolanguage :class:`language_tags.Subtag.Subtag` objects. :raise Exception: if the macrolanguage does not exists. """ results = [] macrolanguage = macrolanguage.lower() macrolanguage_data = data.get('macrolanguage') if macrolanguage not in macrolanguage_data: raise Exception('\'' + macrolanguage + '\' is not a macrolanguage.') for registry_item in registry: record = registry_item if 'Macrolanguage' in record: if record['Macrolanguage'] == macrolanguage: results.append(Subtag(record['Subtag'], record['Type'])) return results
[ "def", "languages", "(", "macrolanguage", ")", ":", "results", "=", "[", "]", "macrolanguage", "=", "macrolanguage", ".", "lower", "(", ")", "macrolanguage_data", "=", "data", ".", "get", "(", "'macrolanguage'", ")", "if", "macrolanguage", "not", "in", "macrolanguage_data", ":", "raise", "Exception", "(", "'\\''", "+", "macrolanguage", "+", "'\\' is not a macrolanguage.'", ")", "for", "registry_item", "in", "registry", ":", "record", "=", "registry_item", "if", "'Macrolanguage'", "in", "record", ":", "if", "record", "[", "'Macrolanguage'", "]", "==", "macrolanguage", ":", "results", ".", "append", "(", "Subtag", "(", "record", "[", "'Subtag'", "]", ",", "record", "[", "'Type'", "]", ")", ")", "return", "results" ]
41.952381
21.571429
def get_object( self, object_t, object_id=None, relation=None, parent=None, **kwargs ): """ Actually query the Deezer API to retrieve the object :returns: json dictionary """ url = self.object_url(object_t, object_id, relation, **kwargs) response = self.session.get(url) return self._process_json(response.json(), parent)
[ "def", "get_object", "(", "self", ",", "object_t", ",", "object_id", "=", "None", ",", "relation", "=", "None", ",", "parent", "=", "None", ",", "*", "*", "kwargs", ")", ":", "url", "=", "self", ".", "object_url", "(", "object_t", ",", "object_id", ",", "relation", ",", "*", "*", "kwargs", ")", "response", "=", "self", ".", "session", ".", "get", "(", "url", ")", "return", "self", ".", "_process_json", "(", "response", ".", "json", "(", ")", ",", "parent", ")" ]
34.545455
19.090909
def fetchUserInfo(self, *user_ids): """ Get users' info from IDs, unordered .. warning:: Sends two requests, to fetch all available info! :param user_ids: One or more user ID(s) to query :return: :class:`models.User` objects, labeled by their ID :rtype: dict :raises: FBchatException if request failed """ threads = self.fetchThreadInfo(*user_ids) users = {} for id_, thread in threads.items(): if thread.type == ThreadType.USER: users[id_] = thread else: raise FBchatUserError("Thread {} was not a user".format(thread)) return users
[ "def", "fetchUserInfo", "(", "self", ",", "*", "user_ids", ")", ":", "threads", "=", "self", ".", "fetchThreadInfo", "(", "*", "user_ids", ")", "users", "=", "{", "}", "for", "id_", ",", "thread", "in", "threads", ".", "items", "(", ")", ":", "if", "thread", ".", "type", "==", "ThreadType", ".", "USER", ":", "users", "[", "id_", "]", "=", "thread", "else", ":", "raise", "FBchatUserError", "(", "\"Thread {} was not a user\"", ".", "format", "(", "thread", ")", ")", "return", "users" ]
32.380952
17.52381
def __get_query_basic(cls, date_field=None, start=None, end=None, filters={}): """ Create a es_dsl query object with the date range and filters. :param date_field: field with the date value :param start: date with the from value, should be a datetime.datetime object :param end: date with the to value, should be a datetime.datetime object :param filters: dict with the filters to be applied :return: a DSL query containing the required parameters Ex: {'query': {'bool': {'filter': [{'range': {'DATE_FIELD': {'gte': '2015-05-19T00:00:00', 'lte': '2018-05-18T00:00:00'}}}], 'must': [{'match_phrase': {'first_name': 'Jhon'}}, {'match_phrase': {'last_name': 'Doe'}}, {'match_phrase': {'Phone': 2222222}} ]}}} """ query_basic = Search() query_filters = cls.__get_query_filters(filters) for f in query_filters: query_basic = query_basic.query(f) query_filters_inverse = cls.__get_query_filters(filters, inverse=True) # Here, don't forget the '~'. That is what makes this an inverse filter. for f in query_filters_inverse: query_basic = query_basic.query(~f) if not date_field: query_range = {} else: query_range = cls.__get_query_range(date_field, start, end) # Applying the range filter query_basic = query_basic.filter('range', **query_range) return query_basic
[ "def", "__get_query_basic", "(", "cls", ",", "date_field", "=", "None", ",", "start", "=", "None", ",", "end", "=", "None", ",", "filters", "=", "{", "}", ")", ":", "query_basic", "=", "Search", "(", ")", "query_filters", "=", "cls", ".", "__get_query_filters", "(", "filters", ")", "for", "f", "in", "query_filters", ":", "query_basic", "=", "query_basic", ".", "query", "(", "f", ")", "query_filters_inverse", "=", "cls", ".", "__get_query_filters", "(", "filters", ",", "inverse", "=", "True", ")", "# Here, don't forget the '~'. That is what makes this an inverse filter.", "for", "f", "in", "query_filters_inverse", ":", "query_basic", "=", "query_basic", ".", "query", "(", "~", "f", ")", "if", "not", "date_field", ":", "query_range", "=", "{", "}", "else", ":", "query_range", "=", "cls", ".", "__get_query_range", "(", "date_field", ",", "start", ",", "end", ")", "# Applying the range filter", "query_basic", "=", "query_basic", ".", "filter", "(", "'range'", ",", "*", "*", "query_range", ")", "return", "query_basic" ]
44.473684
23.947368
def msg_curse(self, args=None, max_width=None): """Return the dict to display in the curse interface.""" # Init the return message ret = [] # Only process if stats exist... if not self.stats: return ret # Max size for the interface name name_max_width = max_width - 12 # Header msg = '{:{width}}'.format('RAID disks', width=name_max_width) ret.append(self.curse_add_line(msg, "TITLE")) msg = '{:>7}'.format('Used') ret.append(self.curse_add_line(msg)) msg = '{:>7}'.format('Avail') ret.append(self.curse_add_line(msg)) # Data arrays = sorted(iterkeys(self.stats)) for array in arrays: # New line ret.append(self.curse_new_line()) # Display the current status status = self.raid_alert(self.stats[array]['status'], self.stats[array]['used'], self.stats[array]['available'], self.stats[array]['type']) # Data: RAID type name | disk used | disk available array_type = self.stats[array]['type'].upper() if self.stats[array]['type'] is not None else 'UNKNOWN' # Build the full name = array type + array name full_name = '{} {}'.format(array_type, array) msg = '{:{width}}'.format(full_name, width=name_max_width) ret.append(self.curse_add_line(msg)) if self.stats[array]['type'] == 'raid0' and self.stats[array]['status'] == 'active': msg = '{:>7}'.format(len(self.stats[array]['components'])) ret.append(self.curse_add_line(msg, status)) msg = '{:>7}'.format('-') ret.append(self.curse_add_line(msg, status)) elif self.stats[array]['status'] == 'active': msg = '{:>7}'.format(self.stats[array]['used']) ret.append(self.curse_add_line(msg, status)) msg = '{:>7}'.format(self.stats[array]['available']) ret.append(self.curse_add_line(msg, status)) elif self.stats[array]['status'] == 'inactive': ret.append(self.curse_new_line()) msg = '└─ Status {}'.format(self.stats[array]['status']) ret.append(self.curse_add_line(msg, status)) components = sorted(iterkeys(self.stats[array]['components'])) for i, component in enumerate(components): if i == len(components) - 1: tree_char = '└─' else: tree_char = '├─' ret.append(self.curse_new_line()) msg = ' {} disk {}: '.format(tree_char, self.stats[array]['components'][component]) ret.append(self.curse_add_line(msg)) msg = '{}'.format(component) ret.append(self.curse_add_line(msg)) if self.stats[array]['type'] != 'raid0' and (self.stats[array]['used'] < self.stats[array]['available']): # Display current array configuration ret.append(self.curse_new_line()) msg = '└─ Degraded mode' ret.append(self.curse_add_line(msg, status)) if len(self.stats[array]['config']) < 17: ret.append(self.curse_new_line()) msg = ' └─ {}'.format(self.stats[array]['config'].replace('_', 'A')) ret.append(self.curse_add_line(msg)) return ret
[ "def", "msg_curse", "(", "self", ",", "args", "=", "None", ",", "max_width", "=", "None", ")", ":", "# Init the return message", "ret", "=", "[", "]", "# Only process if stats exist...", "if", "not", "self", ".", "stats", ":", "return", "ret", "# Max size for the interface name", "name_max_width", "=", "max_width", "-", "12", "# Header", "msg", "=", "'{:{width}}'", ".", "format", "(", "'RAID disks'", ",", "width", "=", "name_max_width", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "\"TITLE\"", ")", ")", "msg", "=", "'{:>7}'", ".", "format", "(", "'Used'", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "msg", "=", "'{:>7}'", ".", "format", "(", "'Avail'", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "# Data", "arrays", "=", "sorted", "(", "iterkeys", "(", "self", ".", "stats", ")", ")", "for", "array", "in", "arrays", ":", "# New line", "ret", ".", "append", "(", "self", ".", "curse_new_line", "(", ")", ")", "# Display the current status", "status", "=", "self", ".", "raid_alert", "(", "self", ".", "stats", "[", "array", "]", "[", "'status'", "]", ",", "self", ".", "stats", "[", "array", "]", "[", "'used'", "]", ",", "self", ".", "stats", "[", "array", "]", "[", "'available'", "]", ",", "self", ".", "stats", "[", "array", "]", "[", "'type'", "]", ")", "# Data: RAID type name | disk used | disk available", "array_type", "=", "self", ".", "stats", "[", "array", "]", "[", "'type'", "]", ".", "upper", "(", ")", "if", "self", ".", "stats", "[", "array", "]", "[", "'type'", "]", "is", "not", "None", "else", "'UNKNOWN'", "# Build the full name = array type + array name", "full_name", "=", "'{} {}'", ".", "format", "(", "array_type", ",", "array", ")", "msg", "=", "'{:{width}}'", ".", "format", "(", "full_name", ",", "width", "=", "name_max_width", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "if", "self", ".", "stats", "[", "array", "]", "[", "'type'", "]", "==", "'raid0'", "and", "self", ".", "stats", "[", "array", "]", "[", "'status'", "]", "==", "'active'", ":", "msg", "=", "'{:>7}'", ".", "format", "(", "len", "(", "self", ".", "stats", "[", "array", "]", "[", "'components'", "]", ")", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "status", ")", ")", "msg", "=", "'{:>7}'", ".", "format", "(", "'-'", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "status", ")", ")", "elif", "self", ".", "stats", "[", "array", "]", "[", "'status'", "]", "==", "'active'", ":", "msg", "=", "'{:>7}'", ".", "format", "(", "self", ".", "stats", "[", "array", "]", "[", "'used'", "]", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "status", ")", ")", "msg", "=", "'{:>7}'", ".", "format", "(", "self", ".", "stats", "[", "array", "]", "[", "'available'", "]", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "status", ")", ")", "elif", "self", ".", "stats", "[", "array", "]", "[", "'status'", "]", "==", "'inactive'", ":", "ret", ".", "append", "(", "self", ".", "curse_new_line", "(", ")", ")", "msg", "=", "'└─ Status {}'.for", "m", "at(sel", "f", ".sta", "t", "s[arr", "a", "y]['s", "t", "a", "tus'])", "", "", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "status", ")", ")", "components", "=", "sorted", "(", "iterkeys", "(", "self", ".", "stats", "[", "array", "]", "[", "'components'", "]", ")", ")", "for", "i", ",", "component", "in", "enumerate", "(", "components", ")", ":", "if", "i", "==", "len", "(", "components", ")", "-", "1", ":", "tree_char", "=", "'└─'", "else", ":", "tree_char", "=", "'├─'", "ret", ".", "append", "(", "self", ".", "curse_new_line", "(", ")", ")", "msg", "=", "' {} disk {}: '", ".", "format", "(", "tree_char", ",", "self", ".", "stats", "[", "array", "]", "[", "'components'", "]", "[", "component", "]", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "msg", "=", "'{}'", ".", "format", "(", "component", ")", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "if", "self", ".", "stats", "[", "array", "]", "[", "'type'", "]", "!=", "'raid0'", "and", "(", "self", ".", "stats", "[", "array", "]", "[", "'used'", "]", "<", "self", ".", "stats", "[", "array", "]", "[", "'available'", "]", ")", ":", "# Display current array configuration", "ret", ".", "append", "(", "self", ".", "curse_new_line", "(", ")", ")", "msg", "=", "'└─ Degraded mode'", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ",", "status", ")", ")", "if", "len", "(", "self", ".", "stats", "[", "array", "]", "[", "'config'", "]", ")", "<", "17", ":", "ret", ".", "append", "(", "self", ".", "curse_new_line", "(", ")", ")", "msg", "=", "' └─ {}'.for", "m", "at(sel", "f", ".sta", "t", "s[arr", "a", "y]['c", "o", "n", "fig'].re", "p", "l", "ace('_'", ",", " 'A", "'", ")", "", "", "ret", ".", "append", "(", "self", ".", "curse_add_line", "(", "msg", ")", ")", "return", "ret" ]
49.863014
19.068493