text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def build_registered_span(self, span): """ Takes a BasicSpan and converts it into a registered JsonSpan """ data = Data(baggage=span.context.baggage) kind = 1 # entry if span.operation_name in self.exit_spans: kind = 2 # exit # log is a special case as it is not entry nor exit if span.operation_name == "log": kind = 3 # intermediate span logs = self.collect_logs(span) if len(logs) > 0: if data.custom is None: data.custom = CustomData() data.custom.logs = logs if span.operation_name in self.http_spans: data.http = HttpData(host=span.tags.pop("http.host", None), url=span.tags.pop(ext.HTTP_URL, None), params=span.tags.pop('http.params', None), method=span.tags.pop(ext.HTTP_METHOD, None), status=span.tags.pop(ext.HTTP_STATUS_CODE, None), path_tpl=span.tags.pop("http.path_tpl", None), error=span.tags.pop('http.error', None)) if span.operation_name == "rabbitmq": data.rabbitmq = RabbitmqData(exchange=span.tags.pop('exchange', None), queue=span.tags.pop('queue', None), sort=span.tags.pop('sort', None), address=span.tags.pop('address', None), key=span.tags.pop('key', None)) if data.rabbitmq.sort == 'consume': kind = 1 # entry if span.operation_name == "redis": data.redis = RedisData(connection=span.tags.pop('connection', None), driver=span.tags.pop('driver', None), command=span.tags.pop('command', None), error=span.tags.pop('redis.error', None), subCommands=span.tags.pop('subCommands', None)) if span.operation_name == "rpc-client" or span.operation_name == "rpc-server": data.rpc = RPCData(flavor=span.tags.pop('rpc.flavor', None), host=span.tags.pop('rpc.host', None), port=span.tags.pop('rpc.port', None), call=span.tags.pop('rpc.call', None), call_type=span.tags.pop('rpc.call_type', None), params=span.tags.pop('rpc.params', None), baggage=span.tags.pop('rpc.baggage', None), error=span.tags.pop('rpc.error', None)) if span.operation_name == "sqlalchemy": data.sqlalchemy = SQLAlchemyData(sql=span.tags.pop('sqlalchemy.sql', None), eng=span.tags.pop('sqlalchemy.eng', None), url=span.tags.pop('sqlalchemy.url', None), err=span.tags.pop('sqlalchemy.err', None)) if span.operation_name == "soap": data.soap = SoapData(action=span.tags.pop('soap.action', None)) if span.operation_name == "mysql": data.mysql = MySQLData(host=span.tags.pop('host', None), db=span.tags.pop(ext.DATABASE_INSTANCE, None), user=span.tags.pop(ext.DATABASE_USER, None), stmt=span.tags.pop(ext.DATABASE_STATEMENT, None)) if (data.custom is not None) and (data.custom.logs is not None) and len(data.custom.logs): tskey = list(data.custom.logs.keys())[0] data.mysql.error = data.custom.logs[tskey]['message'] if span.operation_name == "log": data.log = {} # use last special key values # TODO - logic might need a tweak here for l in span.logs: if "message" in l.key_values: data.log["message"] = l.key_values.pop("message", None) if "parameters" in l.key_values: data.log["parameters"] = l.key_values.pop("parameters", None) entity_from = {'e': instana.singletons.agent.from_.pid, 'h': instana.singletons.agent.from_.agentUuid} json_span = JsonSpan(n=span.operation_name, k=kind, t=span.context.trace_id, p=span.parent_id, s=span.context.span_id, ts=int(round(span.start_time * 1000)), d=int(round(span.duration * 1000)), f=entity_from, data=data) if span.stack: json_span.stack = span.stack error = span.tags.pop("error", False) ec = span.tags.pop("ec", None) if error and ec: json_span.error = error json_span.ec = ec if len(span.tags) > 0: if data.custom is None: data.custom = CustomData() data.custom.tags = span.tags return json_span
[ "def", "build_registered_span", "(", "self", ",", "span", ")", ":", "data", "=", "Data", "(", "baggage", "=", "span", ".", "context", ".", "baggage", ")", "kind", "=", "1", "# entry", "if", "span", ".", "operation_name", "in", "self", ".", "exit_spans", ":", "kind", "=", "2", "# exit", "# log is a special case as it is not entry nor exit", "if", "span", ".", "operation_name", "==", "\"log\"", ":", "kind", "=", "3", "# intermediate span", "logs", "=", "self", ".", "collect_logs", "(", "span", ")", "if", "len", "(", "logs", ")", ">", "0", ":", "if", "data", ".", "custom", "is", "None", ":", "data", ".", "custom", "=", "CustomData", "(", ")", "data", ".", "custom", ".", "logs", "=", "logs", "if", "span", ".", "operation_name", "in", "self", ".", "http_spans", ":", "data", ".", "http", "=", "HttpData", "(", "host", "=", "span", ".", "tags", ".", "pop", "(", "\"http.host\"", ",", "None", ")", ",", "url", "=", "span", ".", "tags", ".", "pop", "(", "ext", ".", "HTTP_URL", ",", "None", ")", ",", "params", "=", "span", ".", "tags", ".", "pop", "(", "'http.params'", ",", "None", ")", ",", "method", "=", "span", ".", "tags", ".", "pop", "(", "ext", ".", "HTTP_METHOD", ",", "None", ")", ",", "status", "=", "span", ".", "tags", ".", "pop", "(", "ext", ".", "HTTP_STATUS_CODE", ",", "None", ")", ",", "path_tpl", "=", "span", ".", "tags", ".", "pop", "(", "\"http.path_tpl\"", ",", "None", ")", ",", "error", "=", "span", ".", "tags", ".", "pop", "(", "'http.error'", ",", "None", ")", ")", "if", "span", ".", "operation_name", "==", "\"rabbitmq\"", ":", "data", ".", "rabbitmq", "=", "RabbitmqData", "(", "exchange", "=", "span", ".", "tags", ".", "pop", "(", "'exchange'", ",", "None", ")", ",", "queue", "=", "span", ".", "tags", ".", "pop", "(", "'queue'", ",", "None", ")", ",", "sort", "=", "span", ".", "tags", ".", "pop", "(", "'sort'", ",", "None", ")", ",", "address", "=", "span", ".", "tags", ".", "pop", "(", "'address'", ",", "None", ")", ",", "key", "=", "span", ".", "tags", ".", "pop", "(", "'key'", ",", "None", ")", ")", "if", "data", ".", "rabbitmq", ".", "sort", "==", "'consume'", ":", "kind", "=", "1", "# entry", "if", "span", ".", "operation_name", "==", "\"redis\"", ":", "data", ".", "redis", "=", "RedisData", "(", "connection", "=", "span", ".", "tags", ".", "pop", "(", "'connection'", ",", "None", ")", ",", "driver", "=", "span", ".", "tags", ".", "pop", "(", "'driver'", ",", "None", ")", ",", "command", "=", "span", ".", "tags", ".", "pop", "(", "'command'", ",", "None", ")", ",", "error", "=", "span", ".", "tags", ".", "pop", "(", "'redis.error'", ",", "None", ")", ",", "subCommands", "=", "span", ".", "tags", ".", "pop", "(", "'subCommands'", ",", "None", ")", ")", "if", "span", ".", "operation_name", "==", "\"rpc-client\"", "or", "span", ".", "operation_name", "==", "\"rpc-server\"", ":", "data", ".", "rpc", "=", "RPCData", "(", "flavor", "=", "span", ".", "tags", ".", "pop", "(", "'rpc.flavor'", ",", "None", ")", ",", "host", "=", "span", ".", "tags", ".", "pop", "(", "'rpc.host'", ",", "None", ")", ",", "port", "=", "span", ".", "tags", ".", "pop", "(", "'rpc.port'", ",", "None", ")", ",", "call", "=", "span", ".", "tags", ".", "pop", "(", "'rpc.call'", ",", "None", ")", ",", "call_type", "=", "span", ".", "tags", ".", "pop", "(", "'rpc.call_type'", ",", "None", ")", ",", "params", "=", "span", ".", "tags", ".", "pop", "(", "'rpc.params'", ",", "None", ")", ",", "baggage", "=", "span", ".", "tags", ".", "pop", "(", "'rpc.baggage'", ",", "None", ")", ",", "error", "=", "span", ".", "tags", ".", "pop", "(", "'rpc.error'", ",", "None", ")", ")", "if", "span", ".", "operation_name", "==", "\"sqlalchemy\"", ":", "data", ".", "sqlalchemy", "=", "SQLAlchemyData", "(", "sql", "=", "span", ".", "tags", ".", "pop", "(", "'sqlalchemy.sql'", ",", "None", ")", ",", "eng", "=", "span", ".", "tags", ".", "pop", "(", "'sqlalchemy.eng'", ",", "None", ")", ",", "url", "=", "span", ".", "tags", ".", "pop", "(", "'sqlalchemy.url'", ",", "None", ")", ",", "err", "=", "span", ".", "tags", ".", "pop", "(", "'sqlalchemy.err'", ",", "None", ")", ")", "if", "span", ".", "operation_name", "==", "\"soap\"", ":", "data", ".", "soap", "=", "SoapData", "(", "action", "=", "span", ".", "tags", ".", "pop", "(", "'soap.action'", ",", "None", ")", ")", "if", "span", ".", "operation_name", "==", "\"mysql\"", ":", "data", ".", "mysql", "=", "MySQLData", "(", "host", "=", "span", ".", "tags", ".", "pop", "(", "'host'", ",", "None", ")", ",", "db", "=", "span", ".", "tags", ".", "pop", "(", "ext", ".", "DATABASE_INSTANCE", ",", "None", ")", ",", "user", "=", "span", ".", "tags", ".", "pop", "(", "ext", ".", "DATABASE_USER", ",", "None", ")", ",", "stmt", "=", "span", ".", "tags", ".", "pop", "(", "ext", ".", "DATABASE_STATEMENT", ",", "None", ")", ")", "if", "(", "data", ".", "custom", "is", "not", "None", ")", "and", "(", "data", ".", "custom", ".", "logs", "is", "not", "None", ")", "and", "len", "(", "data", ".", "custom", ".", "logs", ")", ":", "tskey", "=", "list", "(", "data", ".", "custom", ".", "logs", ".", "keys", "(", ")", ")", "[", "0", "]", "data", ".", "mysql", ".", "error", "=", "data", ".", "custom", ".", "logs", "[", "tskey", "]", "[", "'message'", "]", "if", "span", ".", "operation_name", "==", "\"log\"", ":", "data", ".", "log", "=", "{", "}", "# use last special key values", "# TODO - logic might need a tweak here", "for", "l", "in", "span", ".", "logs", ":", "if", "\"message\"", "in", "l", ".", "key_values", ":", "data", ".", "log", "[", "\"message\"", "]", "=", "l", ".", "key_values", ".", "pop", "(", "\"message\"", ",", "None", ")", "if", "\"parameters\"", "in", "l", ".", "key_values", ":", "data", ".", "log", "[", "\"parameters\"", "]", "=", "l", ".", "key_values", ".", "pop", "(", "\"parameters\"", ",", "None", ")", "entity_from", "=", "{", "'e'", ":", "instana", ".", "singletons", ".", "agent", ".", "from_", ".", "pid", ",", "'h'", ":", "instana", ".", "singletons", ".", "agent", ".", "from_", ".", "agentUuid", "}", "json_span", "=", "JsonSpan", "(", "n", "=", "span", ".", "operation_name", ",", "k", "=", "kind", ",", "t", "=", "span", ".", "context", ".", "trace_id", ",", "p", "=", "span", ".", "parent_id", ",", "s", "=", "span", ".", "context", ".", "span_id", ",", "ts", "=", "int", "(", "round", "(", "span", ".", "start_time", "*", "1000", ")", ")", ",", "d", "=", "int", "(", "round", "(", "span", ".", "duration", "*", "1000", ")", ")", ",", "f", "=", "entity_from", ",", "data", "=", "data", ")", "if", "span", ".", "stack", ":", "json_span", ".", "stack", "=", "span", ".", "stack", "error", "=", "span", ".", "tags", ".", "pop", "(", "\"error\"", ",", "False", ")", "ec", "=", "span", ".", "tags", ".", "pop", "(", "\"ec\"", ",", "None", ")", "if", "error", "and", "ec", ":", "json_span", ".", "error", "=", "error", "json_span", ".", "ec", "=", "ec", "if", "len", "(", "span", ".", "tags", ")", ">", "0", ":", "if", "data", ".", "custom", "is", "None", ":", "data", ".", "custom", "=", "CustomData", "(", ")", "data", ".", "custom", ".", "tags", "=", "span", ".", "tags", "return", "json_span" ]
48.256881
23.669725
def getClassAllSupers(self, aURI): """ note: requires SPARQL 1.1 2015-06-04: currenlty not used, inferred from above """ aURI = aURI try: qres = self.rdfgraph.query( """SELECT DISTINCT ?x WHERE { { <%s> rdfs:subClassOf+ ?x } FILTER (!isBlank(?x)) } """ % (aURI)) except: printDebug("... warning: the 'getClassAllSupers' query failed (maybe missing SPARQL 1.1 support?)") qres = [] return list(qres)
[ "def", "getClassAllSupers", "(", "self", ",", "aURI", ")", ":", "aURI", "=", "aURI", "try", ":", "qres", "=", "self", ".", "rdfgraph", ".", "query", "(", "\"\"\"SELECT DISTINCT ?x\n WHERE {\n { <%s> rdfs:subClassOf+ ?x }\n FILTER (!isBlank(?x))\n }\n \"\"\"", "%", "(", "aURI", ")", ")", "except", ":", "printDebug", "(", "\"... warning: the 'getClassAllSupers' query failed (maybe missing SPARQL 1.1 support?)\"", ")", "qres", "=", "[", "]", "return", "list", "(", "qres", ")" ]
34
14.5
def get_status(self, device_id): """List only MyQ garage door devices.""" devices = self.get_devices() if devices != False: for device in devices: if device['door'] == device_id: return device['status'] return False
[ "def", "get_status", "(", "self", ",", "device_id", ")", ":", "devices", "=", "self", ".", "get_devices", "(", ")", "if", "devices", "!=", "False", ":", "for", "device", "in", "devices", ":", "if", "device", "[", "'door'", "]", "==", "device_id", ":", "return", "device", "[", "'status'", "]", "return", "False" ]
28.8
14
def normalized(self): """Return a normalized version of the histogram where the values sum to one. """ total = self.total() result = Histogram() for value, count in iteritems(self): try: result[value] = count / float(total) except UnorderableElements as e: result = Histogram.from_dict(dict(result), key=hash) result[value] = count / float(total) return result
[ "def", "normalized", "(", "self", ")", ":", "total", "=", "self", ".", "total", "(", ")", "result", "=", "Histogram", "(", ")", "for", "value", ",", "count", "in", "iteritems", "(", "self", ")", ":", "try", ":", "result", "[", "value", "]", "=", "count", "/", "float", "(", "total", ")", "except", "UnorderableElements", "as", "e", ":", "result", "=", "Histogram", ".", "from_dict", "(", "dict", "(", "result", ")", ",", "key", "=", "hash", ")", "result", "[", "value", "]", "=", "count", "/", "float", "(", "total", ")", "return", "result" ]
34
15.071429
def parse_str(self, s): """ Parse entire file and return a :class:`Catchment` object. :param file_name: File path :type file_name: str :return: Parsed object :rtype: :class:`Catchment` """ root = ET.fromstring(s) return self._parse(root)
[ "def", "parse_str", "(", "self", ",", "s", ")", ":", "root", "=", "ET", ".", "fromstring", "(", "s", ")", "return", "self", ".", "_parse", "(", "root", ")" ]
27.272727
12
def merge_all(dcts): """ Shallow merge all the dcts :param dcts: :return: """ return reduce( lambda accum, dct: merge(accum, dct), dict(), dcts )
[ "def", "merge_all", "(", "dcts", ")", ":", "return", "reduce", "(", "lambda", "accum", ",", "dct", ":", "merge", "(", "accum", ",", "dct", ")", ",", "dict", "(", ")", ",", "dcts", ")" ]
17.363636
17.545455
def fixations(self): """ Returns all fixations that are on this image. A precondition for this to work is that a fixmat is associated with this Image object. """ if not self._fixations: raise RuntimeError('This Images object does not have' +' an associated fixmat') return self._fixations[(self._fixations.category == self.category) & (self._fixations.filenumber == self.image)]
[ "def", "fixations", "(", "self", ")", ":", "if", "not", "self", ".", "_fixations", ":", "raise", "RuntimeError", "(", "'This Images object does not have'", "+", "' an associated fixmat'", ")", "return", "self", ".", "_fixations", "[", "(", "self", ".", "_fixations", ".", "category", "==", "self", ".", "category", ")", "&", "(", "self", ".", "_fixations", ".", "filenumber", "==", "self", ".", "image", ")", "]" ]
44
14.545455
def init_states(batch_size, num_lstm_layer, num_hidden): """ Returns name and shape of init states of LSTM network Parameters ---------- batch_size: list of tuple of str and tuple of int and int num_lstm_layer: int num_hidden: int Returns ------- list of tuple of str and tuple of int and int """ init_c = [('l%d_init_c' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)] init_h = [('l%d_init_h' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)] return init_c + init_h
[ "def", "init_states", "(", "batch_size", ",", "num_lstm_layer", ",", "num_hidden", ")", ":", "init_c", "=", "[", "(", "'l%d_init_c'", "%", "l", ",", "(", "batch_size", ",", "num_hidden", ")", ")", "for", "l", "in", "range", "(", "num_lstm_layer", ")", "]", "init_h", "=", "[", "(", "'l%d_init_h'", "%", "l", ",", "(", "batch_size", ",", "num_hidden", ")", ")", "for", "l", "in", "range", "(", "num_lstm_layer", ")", "]", "return", "init_c", "+", "init_h" ]
31.470588
23.823529
def run(self): """Build package and fix ordelist per checksum """ self.files_exist() self.info_file() sources = self.sources if len(sources) > 1 and self.sbo_sources != sources: sources = self.sbo_sources # If the list does not have the same order use from .info # order. BuildPackage(self.script, sources, self.path, auto=True).build() raise SystemExit()
[ "def", "run", "(", "self", ")", ":", "self", ".", "files_exist", "(", ")", "self", ".", "info_file", "(", ")", "sources", "=", "self", ".", "sources", "if", "len", "(", "sources", ")", ">", "1", "and", "self", ".", "sbo_sources", "!=", "sources", ":", "sources", "=", "self", ".", "sbo_sources", "# If the list does not have the same order use from .info", "# order.", "BuildPackage", "(", "self", ".", "script", ",", "sources", ",", "self", ".", "path", ",", "auto", "=", "True", ")", ".", "build", "(", ")", "raise", "SystemExit", "(", ")" ]
36.333333
15.25
def get_post_reference_section_keyword_patterns(): """Return a list of compiled regex patterns used to search for various keywords that can often be found after, and therefore suggest the end of, a reference section in a full-text document. @return: (list) of compiled regex patterns. """ compiled_patterns = [] patterns = [u'(' + _create_regex_pattern_add_optional_spaces_to_word_characters(u'prepared') + ur'|' + _create_regex_pattern_add_optional_spaces_to_word_characters(u'created') + ur').*(AAS\s*)?\sLATEX', ur'AAS\s+?LATEX\s+?' + _create_regex_pattern_add_optional_spaces_to_word_characters( u'macros') + u'v', ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters( u'This paper has been produced using'), ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters(u'This article was processed by the author using Springer-Verlag') + u' LATEX'] for p in patterns: compiled_patterns.append(re.compile(p, re.I | re.UNICODE)) return compiled_patterns
[ "def", "get_post_reference_section_keyword_patterns", "(", ")", ":", "compiled_patterns", "=", "[", "]", "patterns", "=", "[", "u'('", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'prepared'", ")", "+", "ur'|'", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'created'", ")", "+", "ur').*(AAS\\s*)?\\sLATEX'", ",", "ur'AAS\\s+?LATEX\\s+?'", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'macros'", ")", "+", "u'v'", ",", "ur'^\\s*'", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'This paper has been produced using'", ")", ",", "ur'^\\s*'", "+", "_create_regex_pattern_add_optional_spaces_to_word_characters", "(", "u'This article was processed by the author using Springer-Verlag'", ")", "+", "u' LATEX'", "]", "for", "p", "in", "patterns", ":", "compiled_patterns", ".", "append", "(", "re", ".", "compile", "(", "p", ",", "re", ".", "I", "|", "re", ".", "UNICODE", ")", ")", "return", "compiled_patterns" ]
56.52381
23.714286
def add_objects(self, bundle, wait_for_completion=True, poll_interval=1, timeout=60, accept=MEDIA_TYPE_TAXII_V20, content_type=MEDIA_TYPE_STIX_V20): """Implement the ``Add Objects`` endpoint (section 5.4) Add objects to the collection. This may be performed either synchronously or asynchronously. To add asynchronously, set wait_for_completion to False. If False, the latter two args are unused. If the caller wishes to monitor the status of the addition, it may do so in its own way. To add synchronously, set wait_for_completion to True, and optionally set the poll and timeout intervals. After initiating the addition, the caller will block, and the TAXII "status" service will be polled until the timeout expires, or the operation completes. Args: bundle: A STIX bundle with the objects to add (string, dict, binary) wait_for_completion (bool): Whether to wait for the add operation to complete before returning poll_interval (int): If waiting for completion, how often to poll the status service (seconds) timeout (int): If waiting for completion, how long to poll until giving up (seconds). Use <= 0 to wait forever accept (str): media type to include in the ``Accept:`` header. content_type (str): media type to include in the ``Content-Type:`` header. Returns: If ``wait_for_completion`` is False, a Status object corresponding to the initial status data returned from the service, is returned. The status may not yet be complete at this point. If ``wait_for_completion`` is True, a Status object corresponding to the completed operation is returned if it didn't time out; otherwise a Status object corresponding to the most recent data obtained before the timeout, is returned. """ self._verify_can_write() headers = { "Accept": accept, "Content-Type": content_type, } if isinstance(bundle, dict): json_text = json.dumps(bundle, ensure_ascii=False) data = json_text.encode("utf-8") elif isinstance(bundle, six.text_type): data = bundle.encode("utf-8") elif isinstance(bundle, six.binary_type): data = bundle else: raise TypeError("Don't know how to handle type '{}'".format( type(bundle).__name__)) status_json = self._conn.post(self.objects_url, headers=headers, data=data) status_url = urlparse.urljoin( self.url, "../../status/{}".format(status_json["id"]) ) status = Status(url=status_url, conn=self._conn, status_info=status_json) if not wait_for_completion or status.status == "complete": return status status.wait_until_final(poll_interval, timeout) return status
[ "def", "add_objects", "(", "self", ",", "bundle", ",", "wait_for_completion", "=", "True", ",", "poll_interval", "=", "1", ",", "timeout", "=", "60", ",", "accept", "=", "MEDIA_TYPE_TAXII_V20", ",", "content_type", "=", "MEDIA_TYPE_STIX_V20", ")", ":", "self", ".", "_verify_can_write", "(", ")", "headers", "=", "{", "\"Accept\"", ":", "accept", ",", "\"Content-Type\"", ":", "content_type", ",", "}", "if", "isinstance", "(", "bundle", ",", "dict", ")", ":", "json_text", "=", "json", ".", "dumps", "(", "bundle", ",", "ensure_ascii", "=", "False", ")", "data", "=", "json_text", ".", "encode", "(", "\"utf-8\"", ")", "elif", "isinstance", "(", "bundle", ",", "six", ".", "text_type", ")", ":", "data", "=", "bundle", ".", "encode", "(", "\"utf-8\"", ")", "elif", "isinstance", "(", "bundle", ",", "six", ".", "binary_type", ")", ":", "data", "=", "bundle", "else", ":", "raise", "TypeError", "(", "\"Don't know how to handle type '{}'\"", ".", "format", "(", "type", "(", "bundle", ")", ".", "__name__", ")", ")", "status_json", "=", "self", ".", "_conn", ".", "post", "(", "self", ".", "objects_url", ",", "headers", "=", "headers", ",", "data", "=", "data", ")", "status_url", "=", "urlparse", ".", "urljoin", "(", "self", ".", "url", ",", "\"../../status/{}\"", ".", "format", "(", "status_json", "[", "\"id\"", "]", ")", ")", "status", "=", "Status", "(", "url", "=", "status_url", ",", "conn", "=", "self", ".", "_conn", ",", "status_info", "=", "status_json", ")", "if", "not", "wait_for_completion", "or", "status", ".", "status", "==", "\"complete\"", ":", "return", "status", "status", ".", "wait_until_final", "(", "poll_interval", ",", "timeout", ")", "return", "status" ]
40.815789
24.894737
def sample_cleanup(data, sample): """ stats, cleanup, and link to samples """ ## get maxlen and depths array from clusters maxlens, depths = get_quick_depths(data, sample) try: depths.max() except ValueError: ## If depths is an empty array max() will raise print(" no clusters found for {}".format(sample.name)) return ## Test if depths is non-empty, but just full of zeros. if depths.max(): ## store which min was used to calculate hidepth here sample.stats_dfs.s3["hidepth_min"] = data.paramsdict["mindepth_majrule"] ## If our longest sequence is longer than the current max_fragment_length ## then update max_fragment_length. For assurance we require that ## max len is 4 greater than maxlen, to allow for pair separators. hidepths = depths >= data.paramsdict["mindepth_majrule"] maxlens = maxlens[hidepths] ## Handle the case where there are no hidepth clusters if maxlens.any(): maxlen = int(maxlens.mean() + (2.*maxlens.std())) else: maxlen = 0 if maxlen > data._hackersonly["max_fragment_length"]: data._hackersonly["max_fragment_length"] = maxlen + 4 ## make sense of stats keepmj = depths[depths >= data.paramsdict["mindepth_majrule"]] keepstat = depths[depths >= data.paramsdict["mindepth_statistical"]] ## sample summary stat assignments sample.stats["state"] = 3 sample.stats["clusters_total"] = depths.shape[0] sample.stats["clusters_hidepth"] = keepmj.shape[0] ## store depths histogram as a dict. Limit to first 25 bins bars, bins = np.histogram(depths, bins=range(1, 26)) sample.depths = {int(i):v for i, v in zip(bins, bars) if v} ## sample stat assignments ## Trap numpy warnings ("mean of empty slice") printed by samples ## with few reads. with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) sample.stats_dfs.s3["merged_pairs"] = sample.stats.reads_merged sample.stats_dfs.s3["clusters_total"] = depths.shape[0] try: sample.stats_dfs.s3["clusters_hidepth"] = int(sample.stats["clusters_hidepth"]) except ValueError: ## Handle clusters_hidepth == NaN sample.stats_dfs.s3["clusters_hidepth"] = 0 sample.stats_dfs.s3["avg_depth_total"] = depths.mean() LOGGER.debug("total depth {}".format(sample.stats_dfs.s3["avg_depth_total"])) sample.stats_dfs.s3["avg_depth_mj"] = keepmj.mean() LOGGER.debug("mj depth {}".format(sample.stats_dfs.s3["avg_depth_mj"])) sample.stats_dfs.s3["avg_depth_stat"] = keepstat.mean() sample.stats_dfs.s3["sd_depth_total"] = depths.std() sample.stats_dfs.s3["sd_depth_mj"] = keepmj.std() sample.stats_dfs.s3["sd_depth_stat"] = keepstat.std() else: print(" no clusters found for {}".format(sample.name)) ## Get some stats from the bam files ## This is moderately hackish. samtools flagstat returns ## the number of reads in the bam file as the first element ## of the first line, this call makes this assumption. if not data.paramsdict["assembly_method"] == "denovo": refmap_stats(data, sample) log_level = logging.getLevelName(LOGGER.getEffectiveLevel()) if not log_level == "DEBUG": ## Clean up loose files only if not in DEBUG ##- edits/*derep, utemp, *utemp.sort, *htemp, *clust.gz derepfile = os.path.join(data.dirs.edits, sample.name+"_derep.fastq") mergefile = os.path.join(data.dirs.edits, sample.name+"_merged_.fastq") uhandle = os.path.join(data.dirs.clusts, sample.name+".utemp") usort = os.path.join(data.dirs.clusts, sample.name+".utemp.sort") hhandle = os.path.join(data.dirs.clusts, sample.name+".htemp") clusters = os.path.join(data.dirs.clusts, sample.name+".clust.gz") for f in [derepfile, mergefile, uhandle, usort, hhandle, clusters]: try: os.remove(f) except: pass
[ "def", "sample_cleanup", "(", "data", ",", "sample", ")", ":", "## get maxlen and depths array from clusters", "maxlens", ",", "depths", "=", "get_quick_depths", "(", "data", ",", "sample", ")", "try", ":", "depths", ".", "max", "(", ")", "except", "ValueError", ":", "## If depths is an empty array max() will raise", "print", "(", "\" no clusters found for {}\"", ".", "format", "(", "sample", ".", "name", ")", ")", "return", "## Test if depths is non-empty, but just full of zeros.", "if", "depths", ".", "max", "(", ")", ":", "## store which min was used to calculate hidepth here", "sample", ".", "stats_dfs", ".", "s3", "[", "\"hidepth_min\"", "]", "=", "data", ".", "paramsdict", "[", "\"mindepth_majrule\"", "]", "## If our longest sequence is longer than the current max_fragment_length", "## then update max_fragment_length. For assurance we require that", "## max len is 4 greater than maxlen, to allow for pair separators.", "hidepths", "=", "depths", ">=", "data", ".", "paramsdict", "[", "\"mindepth_majrule\"", "]", "maxlens", "=", "maxlens", "[", "hidepths", "]", "## Handle the case where there are no hidepth clusters", "if", "maxlens", ".", "any", "(", ")", ":", "maxlen", "=", "int", "(", "maxlens", ".", "mean", "(", ")", "+", "(", "2.", "*", "maxlens", ".", "std", "(", ")", ")", ")", "else", ":", "maxlen", "=", "0", "if", "maxlen", ">", "data", ".", "_hackersonly", "[", "\"max_fragment_length\"", "]", ":", "data", ".", "_hackersonly", "[", "\"max_fragment_length\"", "]", "=", "maxlen", "+", "4", "## make sense of stats", "keepmj", "=", "depths", "[", "depths", ">=", "data", ".", "paramsdict", "[", "\"mindepth_majrule\"", "]", "]", "keepstat", "=", "depths", "[", "depths", ">=", "data", ".", "paramsdict", "[", "\"mindepth_statistical\"", "]", "]", "## sample summary stat assignments", "sample", ".", "stats", "[", "\"state\"", "]", "=", "3", "sample", ".", "stats", "[", "\"clusters_total\"", "]", "=", "depths", ".", "shape", "[", "0", "]", "sample", ".", "stats", "[", "\"clusters_hidepth\"", "]", "=", "keepmj", ".", "shape", "[", "0", "]", "## store depths histogram as a dict. Limit to first 25 bins", "bars", ",", "bins", "=", "np", ".", "histogram", "(", "depths", ",", "bins", "=", "range", "(", "1", ",", "26", ")", ")", "sample", ".", "depths", "=", "{", "int", "(", "i", ")", ":", "v", "for", "i", ",", "v", "in", "zip", "(", "bins", ",", "bars", ")", "if", "v", "}", "## sample stat assignments", "## Trap numpy warnings (\"mean of empty slice\") printed by samples", "## with few reads.", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ",", "category", "=", "RuntimeWarning", ")", "sample", ".", "stats_dfs", ".", "s3", "[", "\"merged_pairs\"", "]", "=", "sample", ".", "stats", ".", "reads_merged", "sample", ".", "stats_dfs", ".", "s3", "[", "\"clusters_total\"", "]", "=", "depths", ".", "shape", "[", "0", "]", "try", ":", "sample", ".", "stats_dfs", ".", "s3", "[", "\"clusters_hidepth\"", "]", "=", "int", "(", "sample", ".", "stats", "[", "\"clusters_hidepth\"", "]", ")", "except", "ValueError", ":", "## Handle clusters_hidepth == NaN", "sample", ".", "stats_dfs", ".", "s3", "[", "\"clusters_hidepth\"", "]", "=", "0", "sample", ".", "stats_dfs", ".", "s3", "[", "\"avg_depth_total\"", "]", "=", "depths", ".", "mean", "(", ")", "LOGGER", ".", "debug", "(", "\"total depth {}\"", ".", "format", "(", "sample", ".", "stats_dfs", ".", "s3", "[", "\"avg_depth_total\"", "]", ")", ")", "sample", ".", "stats_dfs", ".", "s3", "[", "\"avg_depth_mj\"", "]", "=", "keepmj", ".", "mean", "(", ")", "LOGGER", ".", "debug", "(", "\"mj depth {}\"", ".", "format", "(", "sample", ".", "stats_dfs", ".", "s3", "[", "\"avg_depth_mj\"", "]", ")", ")", "sample", ".", "stats_dfs", ".", "s3", "[", "\"avg_depth_stat\"", "]", "=", "keepstat", ".", "mean", "(", ")", "sample", ".", "stats_dfs", ".", "s3", "[", "\"sd_depth_total\"", "]", "=", "depths", ".", "std", "(", ")", "sample", ".", "stats_dfs", ".", "s3", "[", "\"sd_depth_mj\"", "]", "=", "keepmj", ".", "std", "(", ")", "sample", ".", "stats_dfs", ".", "s3", "[", "\"sd_depth_stat\"", "]", "=", "keepstat", ".", "std", "(", ")", "else", ":", "print", "(", "\" no clusters found for {}\"", ".", "format", "(", "sample", ".", "name", ")", ")", "## Get some stats from the bam files", "## This is moderately hackish. samtools flagstat returns", "## the number of reads in the bam file as the first element", "## of the first line, this call makes this assumption.", "if", "not", "data", ".", "paramsdict", "[", "\"assembly_method\"", "]", "==", "\"denovo\"", ":", "refmap_stats", "(", "data", ",", "sample", ")", "log_level", "=", "logging", ".", "getLevelName", "(", "LOGGER", ".", "getEffectiveLevel", "(", ")", ")", "if", "not", "log_level", "==", "\"DEBUG\"", ":", "## Clean up loose files only if not in DEBUG", "##- edits/*derep, utemp, *utemp.sort, *htemp, *clust.gz", "derepfile", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "edits", ",", "sample", ".", "name", "+", "\"_derep.fastq\"", ")", "mergefile", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "edits", ",", "sample", ".", "name", "+", "\"_merged_.fastq\"", ")", "uhandle", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "clusts", ",", "sample", ".", "name", "+", "\".utemp\"", ")", "usort", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "clusts", ",", "sample", ".", "name", "+", "\".utemp.sort\"", ")", "hhandle", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "clusts", ",", "sample", ".", "name", "+", "\".htemp\"", ")", "clusters", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "clusts", ",", "sample", ".", "name", "+", "\".clust.gz\"", ")", "for", "f", "in", "[", "derepfile", ",", "mergefile", ",", "uhandle", ",", "usort", ",", "hhandle", ",", "clusters", "]", ":", "try", ":", "os", ".", "remove", "(", "f", ")", "except", ":", "pass" ]
44.806452
24.989247
def autocommand(func): """ A simplified decorator for making a single function a Command instance. In the future this will leverage PEP0484 to do really smart function parsing and conversion to argparse actions. """ name = func.__name__ title, desc = command.parse_docstring(func) if not title: title = 'Auto command for: %s' % name if not desc: # Prevent Command from using docstring of AutoCommand desc = ' ' return AutoCommand(title=title, desc=desc, name=name, func=func)
[ "def", "autocommand", "(", "func", ")", ":", "name", "=", "func", ".", "__name__", "title", ",", "desc", "=", "command", ".", "parse_docstring", "(", "func", ")", "if", "not", "title", ":", "title", "=", "'Auto command for: %s'", "%", "name", "if", "not", "desc", ":", "# Prevent Command from using docstring of AutoCommand", "desc", "=", "' '", "return", "AutoCommand", "(", "title", "=", "title", ",", "desc", "=", "desc", ",", "name", "=", "name", ",", "func", "=", "func", ")" ]
43.416667
16.5
def ConvCnstrMOD(*args, **kwargs): """A wrapper function that dynamically defines a class derived from one of the implementations of the Convolutional Constrained MOD problems, and returns an object instantiated with the provided parameters. The wrapper is designed to allow the appropriate object to be created by calling this function using the same syntax as would be used if it were a class. The specific implementation is selected by use of an additional keyword argument 'method'. Valid values are: - ``'ism'`` : Use the implementation defined in :class:`.ConvCnstrMOD_IterSM`. This method works well for a small number of training images, but is very slow for larger training sets. - ``'cg'`` : Use the implementation defined in :class:`.ConvCnstrMOD_CG`. This method is slower than ``'ism'`` for small training sets, but has better run time scaling as the training set grows. - ``'cns'`` : Use the implementation defined in :class:`.ConvCnstrMOD_Consensus`. This method is the best choice for large training sets. The default value is ``'cns'``. """ # Extract method selection argument or set default if 'method' in kwargs: method = kwargs['method'] del kwargs['method'] else: method = 'cns' # Assign base class depending on method selection argument if method == 'ism': base = ConvCnstrMOD_IterSM elif method == 'cg': base = ConvCnstrMOD_CG elif method == 'cns': base = ConvCnstrMOD_Consensus else: raise ValueError('Unknown ConvCnstrMOD solver method %s' % method) # Nested class with dynamically determined inheritance class ConvCnstrMOD(base): def __init__(self, *args, **kwargs): super(ConvCnstrMOD, self).__init__(*args, **kwargs) # Allow pickling of objects of type ConvCnstrMOD _fix_dynamic_class_lookup(ConvCnstrMOD, method) # Return object of the nested class type return ConvCnstrMOD(*args, **kwargs)
[ "def", "ConvCnstrMOD", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Extract method selection argument or set default", "if", "'method'", "in", "kwargs", ":", "method", "=", "kwargs", "[", "'method'", "]", "del", "kwargs", "[", "'method'", "]", "else", ":", "method", "=", "'cns'", "# Assign base class depending on method selection argument", "if", "method", "==", "'ism'", ":", "base", "=", "ConvCnstrMOD_IterSM", "elif", "method", "==", "'cg'", ":", "base", "=", "ConvCnstrMOD_CG", "elif", "method", "==", "'cns'", ":", "base", "=", "ConvCnstrMOD_Consensus", "else", ":", "raise", "ValueError", "(", "'Unknown ConvCnstrMOD solver method %s'", "%", "method", ")", "# Nested class with dynamically determined inheritance", "class", "ConvCnstrMOD", "(", "base", ")", ":", "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "ConvCnstrMOD", ",", "self", ")", ".", "__init__", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# Allow pickling of objects of type ConvCnstrMOD", "_fix_dynamic_class_lookup", "(", "ConvCnstrMOD", ",", "method", ")", "# Return object of the nested class type", "return", "ConvCnstrMOD", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
38.596154
20.019231
def overlay(overlay_name, dataset_uri, item_identifier): """Return abspath to file with item content. Fetches the file from remote storage if required. """ dataset = dtoolcore.DataSet.from_uri(dataset_uri) if overlay_name not in dataset.list_overlay_names(): click.secho( "No such overlay in dataset: {}".format(overlay_name), fg="red", err=True ) sys.exit(4) overlay = dataset.get_overlay(overlay_name) try: click.secho(str(overlay[item_identifier])) except KeyError: click.secho( "No such identifier in overlay: {}".format(item_identifier), fg="red", err=True ) sys.exit(5)
[ "def", "overlay", "(", "overlay_name", ",", "dataset_uri", ",", "item_identifier", ")", ":", "dataset", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "dataset_uri", ")", "if", "overlay_name", "not", "in", "dataset", ".", "list_overlay_names", "(", ")", ":", "click", ".", "secho", "(", "\"No such overlay in dataset: {}\"", ".", "format", "(", "overlay_name", ")", ",", "fg", "=", "\"red\"", ",", "err", "=", "True", ")", "sys", ".", "exit", "(", "4", ")", "overlay", "=", "dataset", ".", "get_overlay", "(", "overlay_name", ")", "try", ":", "click", ".", "secho", "(", "str", "(", "overlay", "[", "item_identifier", "]", ")", ")", "except", "KeyError", ":", "click", ".", "secho", "(", "\"No such identifier in overlay: {}\"", ".", "format", "(", "item_identifier", ")", ",", "fg", "=", "\"red\"", ",", "err", "=", "True", ")", "sys", ".", "exit", "(", "5", ")" ]
29.75
20.291667
def populate_native_libraries(version): """Populates ``binary-extension.rst`` with release-specific data. Args: version (str): The current version. """ with open(BINARY_EXT_TEMPLATE, "r") as file_obj: template = file_obj.read() contents = template.format(revision=version) with open(BINARY_EXT_FILE, "w") as file_obj: file_obj.write(contents)
[ "def", "populate_native_libraries", "(", "version", ")", ":", "with", "open", "(", "BINARY_EXT_TEMPLATE", ",", "\"r\"", ")", "as", "file_obj", ":", "template", "=", "file_obj", ".", "read", "(", ")", "contents", "=", "template", ".", "format", "(", "revision", "=", "version", ")", "with", "open", "(", "BINARY_EXT_FILE", ",", "\"w\"", ")", "as", "file_obj", ":", "file_obj", ".", "write", "(", "contents", ")" ]
34.636364
10.636364
def parse_issuer_cred(issuer_cred): """ Given an X509 PEM file in the form of a string, parses it into sections by the PEM delimiters of: -----BEGIN <label>----- and -----END <label>---- Confirms the sections can be decoded in the proxy credential order of: issuer cert, issuer private key, proxy chain of 0 or more certs . Returns the issuer cert and private key as loaded cryptography objects and the proxy chain as a potentially empty string. """ # get each section of the PEM file sections = re.findall( "-----BEGIN.*?-----.*?-----END.*?-----", issuer_cred, flags=re.DOTALL ) try: issuer_cert = sections[0] issuer_private_key = sections[1] issuer_chain_certs = sections[2:] except IndexError: raise ValueError( "Unable to parse PEM data in credentials, " "make sure the X.509 file is in PEM format and " "consists of the issuer cert, issuer private key, " "and proxy chain (if any) in that order." ) # then validate that each section of data can be decoded as expected try: loaded_cert = x509.load_pem_x509_certificate( six.b(issuer_cert), default_backend() ) loaded_private_key = serialization.load_pem_private_key( six.b(issuer_private_key), password=None, backend=default_backend() ) for chain_cert in issuer_chain_certs: x509.load_pem_x509_certificate(six.b(chain_cert), default_backend()) issuer_chain = "".join(issuer_chain_certs) except ValueError: raise ValueError( "Failed to decode PEM data in credentials. Make sure " "the X.509 file consists of the issuer cert, " "issuer private key, and proxy chain (if any) " "in that order." ) # return loaded cryptography objects and the issuer chain return loaded_cert, loaded_private_key, issuer_chain
[ "def", "parse_issuer_cred", "(", "issuer_cred", ")", ":", "# get each section of the PEM file", "sections", "=", "re", ".", "findall", "(", "\"-----BEGIN.*?-----.*?-----END.*?-----\"", ",", "issuer_cred", ",", "flags", "=", "re", ".", "DOTALL", ")", "try", ":", "issuer_cert", "=", "sections", "[", "0", "]", "issuer_private_key", "=", "sections", "[", "1", "]", "issuer_chain_certs", "=", "sections", "[", "2", ":", "]", "except", "IndexError", ":", "raise", "ValueError", "(", "\"Unable to parse PEM data in credentials, \"", "\"make sure the X.509 file is in PEM format and \"", "\"consists of the issuer cert, issuer private key, \"", "\"and proxy chain (if any) in that order.\"", ")", "# then validate that each section of data can be decoded as expected", "try", ":", "loaded_cert", "=", "x509", ".", "load_pem_x509_certificate", "(", "six", ".", "b", "(", "issuer_cert", ")", ",", "default_backend", "(", ")", ")", "loaded_private_key", "=", "serialization", ".", "load_pem_private_key", "(", "six", ".", "b", "(", "issuer_private_key", ")", ",", "password", "=", "None", ",", "backend", "=", "default_backend", "(", ")", ")", "for", "chain_cert", "in", "issuer_chain_certs", ":", "x509", ".", "load_pem_x509_certificate", "(", "six", ".", "b", "(", "chain_cert", ")", ",", "default_backend", "(", ")", ")", "issuer_chain", "=", "\"\"", ".", "join", "(", "issuer_chain_certs", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Failed to decode PEM data in credentials. Make sure \"", "\"the X.509 file consists of the issuer cert, \"", "\"issuer private key, and proxy chain (if any) \"", "\"in that order.\"", ")", "# return loaded cryptography objects and the issuer chain", "return", "loaded_cert", ",", "loaded_private_key", ",", "issuer_chain" ]
41.956522
21.173913
def append_this_package_path(depth=1): """ this_package.py 에서 사용 import snipy.this_package """ from .caller import caller logg.debug('caller module %s', caller.modulename(depth + 1)) c = caller.abspath(depth + 1) logg.debug('caller path %s', c) p = guess_package_path(dirname(c)) if p: logg.debug('appending sys path %s', p) append_sys_path(p) else: # do some logging logg.debug('failed to guess package path for: %s', c)
[ "def", "append_this_package_path", "(", "depth", "=", "1", ")", ":", "from", ".", "caller", "import", "caller", "logg", ".", "debug", "(", "'caller module %s'", ",", "caller", ".", "modulename", "(", "depth", "+", "1", ")", ")", "c", "=", "caller", ".", "abspath", "(", "depth", "+", "1", ")", "logg", ".", "debug", "(", "'caller path %s'", ",", "c", ")", "p", "=", "guess_package_path", "(", "dirname", "(", "c", ")", ")", "if", "p", ":", "logg", ".", "debug", "(", "'appending sys path %s'", ",", "p", ")", "append_sys_path", "(", "p", ")", "else", ":", "# do some logging", "logg", ".", "debug", "(", "'failed to guess package path for: %s'", ",", "c", ")" ]
26.777778
15.222222
def remove_tags(self, tags): """ Add tags to a server. Accepts tags as strings or Tag objects. """ if self.cloud_manager.remove_tags(self, tags): new_tags = [tag for tag in self.tags if tag not in tags] object.__setattr__(self, 'tags', new_tags)
[ "def", "remove_tags", "(", "self", ",", "tags", ")", ":", "if", "self", ".", "cloud_manager", ".", "remove_tags", "(", "self", ",", "tags", ")", ":", "new_tags", "=", "[", "tag", "for", "tag", "in", "self", ".", "tags", "if", "tag", "not", "in", "tags", "]", "object", ".", "__setattr__", "(", "self", ",", "'tags'", ",", "new_tags", ")" ]
42.142857
13.857143
def get_pdffilepath(pdffilename): """ Returns the path for the pdf file args: pdffilename: string returns path for the plots folder / pdffilename.pdf """ return FILEPATHSTR.format( root_dir=ROOT_DIR, os_sep=os.sep, os_extsep=os.extsep, name=pdffilename, folder=PURPOSE.get("plots").get("folder", "plots"), ext=PURPOSE.get("plots").get("extension", "pdf") )
[ "def", "get_pdffilepath", "(", "pdffilename", ")", ":", "return", "FILEPATHSTR", ".", "format", "(", "root_dir", "=", "ROOT_DIR", ",", "os_sep", "=", "os", ".", "sep", ",", "os_extsep", "=", "os", ".", "extsep", ",", "name", "=", "pdffilename", ",", "folder", "=", "PURPOSE", ".", "get", "(", "\"plots\"", ")", ".", "get", "(", "\"folder\"", ",", "\"plots\"", ")", ",", "ext", "=", "PURPOSE", ".", "get", "(", "\"plots\"", ")", ".", "get", "(", "\"extension\"", ",", "\"pdf\"", ")", ")" ]
33.428571
16.285714
def is_in_data_type_range(self, raise_exception=True): """Check if collection values are in physically possible ranges for the data_type. If this method returns False, the Data Collection's data is physically or mathematically impossible for the data_type.""" return self._header.data_type.is_in_range( self._values, self._header.unit, raise_exception)
[ "def", "is_in_data_type_range", "(", "self", ",", "raise_exception", "=", "True", ")", ":", "return", "self", ".", "_header", ".", "data_type", ".", "is_in_range", "(", "self", ".", "_values", ",", "self", ".", "_header", ".", "unit", ",", "raise_exception", ")" ]
55.857143
16
def propagate(p0, angle, d, deg=True, bearing=False, r=r_earth_mean): """ Given an initial point and angle, move distance d along the surface Parameters ---------- p0 : point-like (or array of point-like) [lon, lat] objects angle : float (or array of float) bearing. Note that by default, 0 degrees is due East increasing clockwise so that 90 degrees is due North. See the bearing flag to change the meaning of this angle d : float (or array of float) distance to move. The units of d should be consistent with input r deg : bool, optional (default True) Whether both p0 and angle are specified in degrees. The output points will also match the value of this flag. bearing : bool, optional (default False) Indicates whether to interpret the input angle as the classical definition of bearing. r : float, optional (default r_earth_mean) radius of the sphere Reference --------- http://www.movable-type.co.uk/scripts/latlong.html - Destination Note: Spherical earth model. By default uses radius of 6371.0 km. """ single, (p0, angle, d) = _to_arrays((p0, 2), (angle, 1), (d, 1)) if deg: p0 = np.radians(p0) angle = np.radians(angle) if not bearing: angle = np.pi / 2.0 - angle lon0, lat0 = p0[:,0], p0[:,1] angd = d / r lat1 = arcsin(sin(lat0) * cos(angd) + cos(lat0) * sin(angd) * cos(angle)) a = sin(angle) * sin(angd) * cos(lat0) b = cos(angd) - sin(lat0) * sin(lat1) lon1 = lon0 + arctan2(a, b) p1 = np.column_stack([lon1, lat1]) if deg: p1 = np.degrees(p1) if single: p1 = p1[0] return p1
[ "def", "propagate", "(", "p0", ",", "angle", ",", "d", ",", "deg", "=", "True", ",", "bearing", "=", "False", ",", "r", "=", "r_earth_mean", ")", ":", "single", ",", "(", "p0", ",", "angle", ",", "d", ")", "=", "_to_arrays", "(", "(", "p0", ",", "2", ")", ",", "(", "angle", ",", "1", ")", ",", "(", "d", ",", "1", ")", ")", "if", "deg", ":", "p0", "=", "np", ".", "radians", "(", "p0", ")", "angle", "=", "np", ".", "radians", "(", "angle", ")", "if", "not", "bearing", ":", "angle", "=", "np", ".", "pi", "/", "2.0", "-", "angle", "lon0", ",", "lat0", "=", "p0", "[", ":", ",", "0", "]", ",", "p0", "[", ":", ",", "1", "]", "angd", "=", "d", "/", "r", "lat1", "=", "arcsin", "(", "sin", "(", "lat0", ")", "*", "cos", "(", "angd", ")", "+", "cos", "(", "lat0", ")", "*", "sin", "(", "angd", ")", "*", "cos", "(", "angle", ")", ")", "a", "=", "sin", "(", "angle", ")", "*", "sin", "(", "angd", ")", "*", "cos", "(", "lat0", ")", "b", "=", "cos", "(", "angd", ")", "-", "sin", "(", "lat0", ")", "*", "sin", "(", "lat1", ")", "lon1", "=", "lon0", "+", "arctan2", "(", "a", ",", "b", ")", "p1", "=", "np", ".", "column_stack", "(", "[", "lon1", ",", "lat1", "]", ")", "if", "deg", ":", "p1", "=", "np", ".", "degrees", "(", "p1", ")", "if", "single", ":", "p1", "=", "p1", "[", "0", "]", "return", "p1" ]
29.892857
22.964286
def fts_count(self, fts, inv): """Return the count of segments in an inventory matching a given feature mask. Args: fts (set): feature mask given as a set of (value, feature) tuples inv (set): inventory of segments (as Unicode IPA strings) Returns: int: number of segments in `inv` that match feature mask `fts` """ return len(list(filter(lambda s: self.fts_match(fts, s), inv)))
[ "def", "fts_count", "(", "self", ",", "fts", ",", "inv", ")", ":", "return", "len", "(", "list", "(", "filter", "(", "lambda", "s", ":", "self", ".", "fts_match", "(", "fts", ",", "s", ")", ",", "inv", ")", ")", ")" ]
37.833333
24.25
def remove_dhcp_server(self, server): """Removes the DHCP server settings in server of type :class:`IDHCPServer` DHCP server settings to be removed raises :class:`OleErrorInvalidarg` Host network interface @a name already exists. """ if not isinstance(server, IDHCPServer): raise TypeError("server can only be an instance of type IDHCPServer") self._call("removeDHCPServer", in_p=[server])
[ "def", "remove_dhcp_server", "(", "self", ",", "server", ")", ":", "if", "not", "isinstance", "(", "server", ",", "IDHCPServer", ")", ":", "raise", "TypeError", "(", "\"server can only be an instance of type IDHCPServer\"", ")", "self", ".", "_call", "(", "\"removeDHCPServer\"", ",", "in_p", "=", "[", "server", "]", ")" ]
35.142857
14.428571
def create_arj (archive, compression, cmd, verbosity, interactive, filenames): """Create an ARJ archive.""" cmdlist = [cmd, 'a', '-r'] if not interactive: cmdlist.append('-y') cmdlist.append(archive) cmdlist.extend(filenames) return cmdlist
[ "def", "create_arj", "(", "archive", ",", "compression", ",", "cmd", ",", "verbosity", ",", "interactive", ",", "filenames", ")", ":", "cmdlist", "=", "[", "cmd", ",", "'a'", ",", "'-r'", "]", "if", "not", "interactive", ":", "cmdlist", ".", "append", "(", "'-y'", ")", "cmdlist", ".", "append", "(", "archive", ")", "cmdlist", ".", "extend", "(", "filenames", ")", "return", "cmdlist" ]
33.125
15.375
def contains_pyversion(marker): """Check whether a marker contains a python_version operand. """ if not marker: return False marker = _ensure_marker(marker) return _markers_contains_pyversion(marker._markers)
[ "def", "contains_pyversion", "(", "marker", ")", ":", "if", "not", "marker", ":", "return", "False", "marker", "=", "_ensure_marker", "(", "marker", ")", "return", "_markers_contains_pyversion", "(", "marker", ".", "_markers", ")" ]
28.75
13.875
def parse_glyphs_filter(filter_str, is_pre=False): """Parses glyphs custom filter string into a dict object that ufo2ft can consume. Reference: ufo2ft: https://github.com/googlei18n/ufo2ft Glyphs 2.3 Handbook July 2016, p184 Args: filter_str - a string of glyphs app filter Return: A dictionary contains the structured filter. Return None if parse failed. """ elements = filter_str.split(";") if elements[0] == "": logger.error( "Failed to parse glyphs filter, expecting a filter name: \ %s", filter_str, ) return None result = {"name": elements[0]} for idx, elem in enumerate(elements[1:]): if not elem: # skip empty arguments continue if ":" in elem: # Key value pair key, value = elem.split(":", 1) if key.lower() in ["include", "exclude"]: if idx != len(elements[1:]) - 1: logger.error( "{} can only present as the last argument in the filter. " "{} is ignored.".format(key, elem) ) continue result[key.lower()] = re.split("[ ,]+", value) else: if "kwargs" not in result: result["kwargs"] = {} result["kwargs"][key] = cast_to_number_or_bool(value) else: if "args" not in result: result["args"] = [] result["args"].append(cast_to_number_or_bool(elem)) if is_pre: result["pre"] = True return result
[ "def", "parse_glyphs_filter", "(", "filter_str", ",", "is_pre", "=", "False", ")", ":", "elements", "=", "filter_str", ".", "split", "(", "\";\"", ")", "if", "elements", "[", "0", "]", "==", "\"\"", ":", "logger", ".", "error", "(", "\"Failed to parse glyphs filter, expecting a filter name: \\\n %s\"", ",", "filter_str", ",", ")", "return", "None", "result", "=", "{", "\"name\"", ":", "elements", "[", "0", "]", "}", "for", "idx", ",", "elem", "in", "enumerate", "(", "elements", "[", "1", ":", "]", ")", ":", "if", "not", "elem", ":", "# skip empty arguments", "continue", "if", "\":\"", "in", "elem", ":", "# Key value pair", "key", ",", "value", "=", "elem", ".", "split", "(", "\":\"", ",", "1", ")", "if", "key", ".", "lower", "(", ")", "in", "[", "\"include\"", ",", "\"exclude\"", "]", ":", "if", "idx", "!=", "len", "(", "elements", "[", "1", ":", "]", ")", "-", "1", ":", "logger", ".", "error", "(", "\"{} can only present as the last argument in the filter. \"", "\"{} is ignored.\"", ".", "format", "(", "key", ",", "elem", ")", ")", "continue", "result", "[", "key", ".", "lower", "(", ")", "]", "=", "re", ".", "split", "(", "\"[ ,]+\"", ",", "value", ")", "else", ":", "if", "\"kwargs\"", "not", "in", "result", ":", "result", "[", "\"kwargs\"", "]", "=", "{", "}", "result", "[", "\"kwargs\"", "]", "[", "key", "]", "=", "cast_to_number_or_bool", "(", "value", ")", "else", ":", "if", "\"args\"", "not", "in", "result", ":", "result", "[", "\"args\"", "]", "=", "[", "]", "result", "[", "\"args\"", "]", ".", "append", "(", "cast_to_number_or_bool", "(", "elem", ")", ")", "if", "is_pre", ":", "result", "[", "\"pre\"", "]", "=", "True", "return", "result" ]
32.192308
17.615385
def _get_embedded(self, name): ''' Return an embedded struct object to calculate the size or use _tobytes(True) to convert just the embedded parts. :param name: either the original type, or the name of the original type. It is always the type used in type definitions, even if it is already replaced once or more. :returns: an embedded struct ''' if hasattr(name, 'readablename'): name = name.readablename t,i = self._target._embedded_indices[name] return t._seqs[i]
[ "def", "_get_embedded", "(", "self", ",", "name", ")", ":", "if", "hasattr", "(", "name", ",", "'readablename'", ")", ":", "name", "=", "name", ".", "readablename", "t", ",", "i", "=", "self", ".", "_target", ".", "_embedded_indices", "[", "name", "]", "return", "t", ".", "_seqs", "[", "i", "]" ]
41.142857
25.714286
def _do_parse(inp, fmt, encoding, force_types): """Actually parse input. Args: inp: bytes yielding file-like object fmt: format to use for parsing encoding: encoding of `inp` force_types: if `True`, integers, floats, booleans and none/null are recognized and returned as proper types instead of strings; if `False`, everything is converted to strings if `None`, backend return value is used Returns: parsed `inp` (dict or list) containing unicode values Raises: various sorts of errors raised by used libraries while parsing """ res = {} _check_lib_installed(fmt, 'parse') if fmt == 'ini': cfg = configobj.ConfigObj(inp, encoding=encoding) res = cfg.dict() elif fmt == 'json': if six.PY3: # python 3 json only reads from unicode objects inp = io.TextIOWrapper(inp, encoding=encoding) res = json.load(inp, encoding=encoding) elif fmt == 'json5': if six.PY3: inp = io.TextIOWrapper(inp, encoding=encoding) res = json5.load(inp, encoding=encoding) elif fmt == 'toml': if not _is_utf8(encoding): raise AnyMarkupError('toml is always utf-8 encoded according to specification') if six.PY3: # python 3 toml prefers unicode objects inp = io.TextIOWrapper(inp, encoding=encoding) res = toml.load(inp) elif fmt == 'xml': res = xmltodict.parse(inp, encoding=encoding) elif fmt == 'yaml': # guesses encoding by its own, there seems to be no way to pass # it explicitly res = yaml.safe_load(inp) else: raise # unknown format # make sure it's all unicode and all int/float values were parsed correctly # the unicode part is here because of yaml on PY2 and also as workaround for # https://github.com/DiffSK/configobj/issues/18#issuecomment-76391689 return _ensure_proper_types(res, encoding, force_types)
[ "def", "_do_parse", "(", "inp", ",", "fmt", ",", "encoding", ",", "force_types", ")", ":", "res", "=", "{", "}", "_check_lib_installed", "(", "fmt", ",", "'parse'", ")", "if", "fmt", "==", "'ini'", ":", "cfg", "=", "configobj", ".", "ConfigObj", "(", "inp", ",", "encoding", "=", "encoding", ")", "res", "=", "cfg", ".", "dict", "(", ")", "elif", "fmt", "==", "'json'", ":", "if", "six", ".", "PY3", ":", "# python 3 json only reads from unicode objects", "inp", "=", "io", ".", "TextIOWrapper", "(", "inp", ",", "encoding", "=", "encoding", ")", "res", "=", "json", ".", "load", "(", "inp", ",", "encoding", "=", "encoding", ")", "elif", "fmt", "==", "'json5'", ":", "if", "six", ".", "PY3", ":", "inp", "=", "io", ".", "TextIOWrapper", "(", "inp", ",", "encoding", "=", "encoding", ")", "res", "=", "json5", ".", "load", "(", "inp", ",", "encoding", "=", "encoding", ")", "elif", "fmt", "==", "'toml'", ":", "if", "not", "_is_utf8", "(", "encoding", ")", ":", "raise", "AnyMarkupError", "(", "'toml is always utf-8 encoded according to specification'", ")", "if", "six", ".", "PY3", ":", "# python 3 toml prefers unicode objects", "inp", "=", "io", ".", "TextIOWrapper", "(", "inp", ",", "encoding", "=", "encoding", ")", "res", "=", "toml", ".", "load", "(", "inp", ")", "elif", "fmt", "==", "'xml'", ":", "res", "=", "xmltodict", ".", "parse", "(", "inp", ",", "encoding", "=", "encoding", ")", "elif", "fmt", "==", "'yaml'", ":", "# guesses encoding by its own, there seems to be no way to pass", "# it explicitly", "res", "=", "yaml", ".", "safe_load", "(", "inp", ")", "else", ":", "raise", "# unknown format", "# make sure it's all unicode and all int/float values were parsed correctly", "# the unicode part is here because of yaml on PY2 and also as workaround for", "# https://github.com/DiffSK/configobj/issues/18#issuecomment-76391689", "return", "_ensure_proper_types", "(", "res", ",", "encoding", ",", "force_types", ")" ]
38.538462
19.788462
def transitive_closure(m, orig, rel): ''' Generate the closure over a transitive relationship in depth-first fashion ''' #FIXME: Broken for now links = list(m.match(orig, rel)) for link in links: yield link[0][TARGET] yield from transitive_closure(m, target, rel)
[ "def", "transitive_closure", "(", "m", ",", "orig", ",", "rel", ")", ":", "#FIXME: Broken for now", "links", "=", "list", "(", "m", ".", "match", "(", "orig", ",", "rel", ")", ")", "for", "link", "in", "links", ":", "yield", "link", "[", "0", "]", "[", "TARGET", "]", "yield", "from", "transitive_closure", "(", "m", ",", "target", ",", "rel", ")" ]
32.777778
18.555556
def version_parts(version): """ Split a version string into numeric X.Y.Z part and the rest (milestone). """ m = re.match(r'(\d+(?:\.\d+)*)([.%]|$)(.*)', version) if m: numver = m.group(1) rest = m.group(2) + m.group(3) return numver, rest else: return version, ''
[ "def", "version_parts", "(", "version", ")", ":", "m", "=", "re", ".", "match", "(", "r'(\\d+(?:\\.\\d+)*)([.%]|$)(.*)'", ",", "version", ")", "if", "m", ":", "numver", "=", "m", ".", "group", "(", "1", ")", "rest", "=", "m", ".", "group", "(", "2", ")", "+", "m", ".", "group", "(", "3", ")", "return", "numver", ",", "rest", "else", ":", "return", "version", ",", "''" ]
28.181818
15.454545
def label( self, node ): """Return textual description of this node""" result = [] if node.get('type'): result.append( node['type'] ) if node.get('name' ): result.append( node['name'] ) elif node.get('value') is not None: result.append( unicode(node['value'])[:32]) if 'module' in node and not node['module'] in result: result.append( ' in %s'%( node['module'] )) if node.get( 'size' ): result.append( '%s'%( mb( node['size'] ))) if node.get( 'totsize' ): result.append( '(%s)'%( mb( node['totsize'] ))) parent_count = len( node.get('parents',())) if parent_count > 1: result.append( '/%s refs'%( parent_count )) return " ".join(result)
[ "def", "label", "(", "self", ",", "node", ")", ":", "result", "=", "[", "]", "if", "node", ".", "get", "(", "'type'", ")", ":", "result", ".", "append", "(", "node", "[", "'type'", "]", ")", "if", "node", ".", "get", "(", "'name'", ")", ":", "result", ".", "append", "(", "node", "[", "'name'", "]", ")", "elif", "node", ".", "get", "(", "'value'", ")", "is", "not", "None", ":", "result", ".", "append", "(", "unicode", "(", "node", "[", "'value'", "]", ")", "[", ":", "32", "]", ")", "if", "'module'", "in", "node", "and", "not", "node", "[", "'module'", "]", "in", "result", ":", "result", ".", "append", "(", "' in %s'", "%", "(", "node", "[", "'module'", "]", ")", ")", "if", "node", ".", "get", "(", "'size'", ")", ":", "result", ".", "append", "(", "'%s'", "%", "(", "mb", "(", "node", "[", "'size'", "]", ")", ")", ")", "if", "node", ".", "get", "(", "'totsize'", ")", ":", "result", ".", "append", "(", "'(%s)'", "%", "(", "mb", "(", "node", "[", "'totsize'", "]", ")", ")", ")", "parent_count", "=", "len", "(", "node", ".", "get", "(", "'parents'", ",", "(", ")", ")", ")", "if", "parent_count", ">", "1", ":", "result", ".", "append", "(", "'/%s refs'", "%", "(", "parent_count", ")", ")", "return", "\" \"", ".", "join", "(", "result", ")" ]
41.578947
11.210526
def get_assignee(self, login): """ given the user login, looks for a user in assignee list of the repo and return it if was found. """ if not login: return GithubObject.NotSet if not hasattr(self, '_assignees'): self._assignees = {c.login: c for c in self.repo.get_assignees()} if login not in self._assignees: # warning print("{} doesn't belong to this repo. This issue won't be assigned.".format(login)) return self._assignees.get(login)
[ "def", "get_assignee", "(", "self", ",", "login", ")", ":", "if", "not", "login", ":", "return", "GithubObject", ".", "NotSet", "if", "not", "hasattr", "(", "self", ",", "'_assignees'", ")", ":", "self", ".", "_assignees", "=", "{", "c", ".", "login", ":", "c", "for", "c", "in", "self", ".", "repo", ".", "get_assignees", "(", ")", "}", "if", "login", "not", "in", "self", ".", "_assignees", ":", "# warning", "print", "(", "\"{} doesn't belong to this repo. This issue won't be assigned.\"", ".", "format", "(", "login", ")", ")", "return", "self", ".", "_assignees", ".", "get", "(", "login", ")" ]
41.461538
14.384615
def add_scheduling_block(config): """Adds a scheduling block to the database, returning a response object""" try: DB.add_sbi(config) except jsonschema.ValidationError as error: error_dict = error.__dict__ for key in error_dict: error_dict[key] = error_dict[key].__str__() error_response = dict(message="Failed to add scheduling block", reason="JSON validation error", details=error_dict) return error_response, HTTPStatus.BAD_REQUEST response = dict(config=config, message='Successfully registered scheduling block ' 'instance with ID: {}'.format(config['id'])) response['links'] = { 'self': '{}scheduling-block/{}'.format(request.url_root, config['id']), 'list': '{}'.format(request.url), 'home': '{}'.format(request.url_root) } return response, HTTPStatus.ACCEPTED
[ "def", "add_scheduling_block", "(", "config", ")", ":", "try", ":", "DB", ".", "add_sbi", "(", "config", ")", "except", "jsonschema", ".", "ValidationError", "as", "error", ":", "error_dict", "=", "error", ".", "__dict__", "for", "key", "in", "error_dict", ":", "error_dict", "[", "key", "]", "=", "error_dict", "[", "key", "]", ".", "__str__", "(", ")", "error_response", "=", "dict", "(", "message", "=", "\"Failed to add scheduling block\"", ",", "reason", "=", "\"JSON validation error\"", ",", "details", "=", "error_dict", ")", "return", "error_response", ",", "HTTPStatus", ".", "BAD_REQUEST", "response", "=", "dict", "(", "config", "=", "config", ",", "message", "=", "'Successfully registered scheduling block '", "'instance with ID: {}'", ".", "format", "(", "config", "[", "'id'", "]", ")", ")", "response", "[", "'links'", "]", "=", "{", "'self'", ":", "'{}scheduling-block/{}'", ".", "format", "(", "request", ".", "url_root", ",", "config", "[", "'id'", "]", ")", ",", "'list'", ":", "'{}'", ".", "format", "(", "request", ".", "url", ")", ",", "'home'", ":", "'{}'", ".", "format", "(", "request", ".", "url_root", ")", "}", "return", "response", ",", "HTTPStatus", ".", "ACCEPTED" ]
45.636364
15.181818
def handle_pkg_lic(self, p_term, predicate, builder_func): """Handles package lics concluded or declared.""" try: for _, _, licenses in self.graph.triples((p_term, predicate, None)): if (licenses, RDF.type, self.spdx_namespace['ConjunctiveLicenseSet']) in self.graph: lics = self.handle_conjunctive_list(licenses) builder_func(self.doc, lics) elif (licenses, RDF.type, self.spdx_namespace['DisjunctiveLicenseSet']) in self.graph: lics = self.handle_disjunctive_list(licenses) builder_func(self.doc, lics) else: try: lics = self.handle_lics(licenses) builder_func(self.doc, lics) except SPDXValueError: self.value_error('PKG_SINGLE_LICS', licenses) except CardinalityError: self.more_than_one_error('package {0}'.format(predicate))
[ "def", "handle_pkg_lic", "(", "self", ",", "p_term", ",", "predicate", ",", "builder_func", ")", ":", "try", ":", "for", "_", ",", "_", ",", "licenses", "in", "self", ".", "graph", ".", "triples", "(", "(", "p_term", ",", "predicate", ",", "None", ")", ")", ":", "if", "(", "licenses", ",", "RDF", ".", "type", ",", "self", ".", "spdx_namespace", "[", "'ConjunctiveLicenseSet'", "]", ")", "in", "self", ".", "graph", ":", "lics", "=", "self", ".", "handle_conjunctive_list", "(", "licenses", ")", "builder_func", "(", "self", ".", "doc", ",", "lics", ")", "elif", "(", "licenses", ",", "RDF", ".", "type", ",", "self", ".", "spdx_namespace", "[", "'DisjunctiveLicenseSet'", "]", ")", "in", "self", ".", "graph", ":", "lics", "=", "self", ".", "handle_disjunctive_list", "(", "licenses", ")", "builder_func", "(", "self", ".", "doc", ",", "lics", ")", "else", ":", "try", ":", "lics", "=", "self", ".", "handle_lics", "(", "licenses", ")", "builder_func", "(", "self", ".", "doc", ",", "lics", ")", "except", "SPDXValueError", ":", "self", ".", "value_error", "(", "'PKG_SINGLE_LICS'", ",", "licenses", ")", "except", "CardinalityError", ":", "self", ".", "more_than_one_error", "(", "'package {0}'", ".", "format", "(", "predicate", ")", ")" ]
50.05
24.3
def install_translations(config): """Add check translations according to ``config`` as a fallback to existing translations""" if not config: return from . import _translation checks_translation = gettext.translation(domain=config["domain"], localedir=internal.check_dir / config["localedir"], fallback=True) _translation.add_fallback(checks_translation)
[ "def", "install_translations", "(", "config", ")", ":", "if", "not", "config", ":", "return", "from", ".", "import", "_translation", "checks_translation", "=", "gettext", ".", "translation", "(", "domain", "=", "config", "[", "\"domain\"", "]", ",", "localedir", "=", "internal", ".", "check_dir", "/", "config", "[", "\"localedir\"", "]", ",", "fallback", "=", "True", ")", "_translation", ".", "add_fallback", "(", "checks_translation", ")" ]
42.090909
23.454545
async def Claim(self, claims): ''' claims : typing.Sequence[~SingularClaim] Returns -> typing.Sequence[~ErrorResult] ''' # map input types to rpc msg _params = dict() msg = dict(type='Singular', request='Claim', version=2, params=_params) _params['claims'] = claims reply = await self.rpc(msg) return reply
[ "async", "def", "Claim", "(", "self", ",", "claims", ")", ":", "# map input types to rpc msg", "_params", "=", "dict", "(", ")", "msg", "=", "dict", "(", "type", "=", "'Singular'", ",", "request", "=", "'Claim'", ",", "version", "=", "2", ",", "params", "=", "_params", ")", "_params", "[", "'claims'", "]", "=", "claims", "reply", "=", "await", "self", ".", "rpc", "(", "msg", ")", "return", "reply" ]
30.714286
11.571429
def inter_data_operation(self, axis, func, other): """Apply a function that requires two BaseFrameManager objects. Args: axis: The axis to apply the function over (0 - rows, 1 - columns) func: The function to apply other: The other BaseFrameManager object to apply func to. Returns: A new BaseFrameManager object, the type of object that called this. """ if axis: partitions = self.row_partitions other_partitions = other.row_partitions else: partitions = self.column_partitions other_partitions = other.column_partitions func = self.preprocess_func(func) result = np.array( [ partitions[i].apply( func, num_splits=self._compute_num_partitions(), other_axis_partition=other_partitions[i], ) for i in range(len(partitions)) ] ) return self.__constructor__(result) if axis else self.__constructor__(result.T)
[ "def", "inter_data_operation", "(", "self", ",", "axis", ",", "func", ",", "other", ")", ":", "if", "axis", ":", "partitions", "=", "self", ".", "row_partitions", "other_partitions", "=", "other", ".", "row_partitions", "else", ":", "partitions", "=", "self", ".", "column_partitions", "other_partitions", "=", "other", ".", "column_partitions", "func", "=", "self", ".", "preprocess_func", "(", "func", ")", "result", "=", "np", ".", "array", "(", "[", "partitions", "[", "i", "]", ".", "apply", "(", "func", ",", "num_splits", "=", "self", ".", "_compute_num_partitions", "(", ")", ",", "other_axis_partition", "=", "other_partitions", "[", "i", "]", ",", ")", "for", "i", "in", "range", "(", "len", "(", "partitions", ")", ")", "]", ")", "return", "self", ".", "__constructor__", "(", "result", ")", "if", "axis", "else", "self", ".", "__constructor__", "(", "result", ".", "T", ")" ]
37.517241
19.793103
def video_category(self): """doc: http://open.youku.com/docs/doc?id=90 """ url = 'https://openapi.youku.com/v2/schemas/video/category.json' r = requests.get(url) check_error(r) return r.json()
[ "def", "video_category", "(", "self", ")", ":", "url", "=", "'https://openapi.youku.com/v2/schemas/video/category.json'", "r", "=", "requests", ".", "get", "(", "url", ")", "check_error", "(", "r", ")", "return", "r", ".", "json", "(", ")" ]
33.428571
13.285714
def run(self, batch=True, interruptible=None, inplace=True): """ Run task :param batch if False batching will be disabled. :param interruptible: If true interruptible instance will be used. :param inplace Apply action on the current object or return a new one. :return: Task object. """ params = {} if not batch: params['batch'] = False if interruptible is not None: params['use_interruptible_instances'] = interruptible extra = { 'resource': self.__class__.__name__, 'query': {'id': self.id, 'batch': batch} } logger.info('Running task', extra=extra) task_data = self._api.post( url=self._URL['run'].format(id=self.id), params=params).json() return Task(api=self._api, **task_data)
[ "def", "run", "(", "self", ",", "batch", "=", "True", ",", "interruptible", "=", "None", ",", "inplace", "=", "True", ")", ":", "params", "=", "{", "}", "if", "not", "batch", ":", "params", "[", "'batch'", "]", "=", "False", "if", "interruptible", "is", "not", "None", ":", "params", "[", "'use_interruptible_instances'", "]", "=", "interruptible", "extra", "=", "{", "'resource'", ":", "self", ".", "__class__", ".", "__name__", ",", "'query'", ":", "{", "'id'", ":", "self", ".", "id", ",", "'batch'", ":", "batch", "}", "}", "logger", ".", "info", "(", "'Running task'", ",", "extra", "=", "extra", ")", "task_data", "=", "self", ".", "_api", ".", "post", "(", "url", "=", "self", ".", "_URL", "[", "'run'", "]", ".", "format", "(", "id", "=", "self", ".", "id", ")", ",", "params", "=", "params", ")", ".", "json", "(", ")", "return", "Task", "(", "api", "=", "self", ".", "_api", ",", "*", "*", "task_data", ")" ]
38.590909
15.863636
def pop(self, key, default=None): """ Remove the key and return the associated value or default if not found Args: key (str): The key to remove default (obj): The value to return if key is not present """ return self._dictionary.pop(key.lower(), default)
[ "def", "pop", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "return", "self", ".", "_dictionary", ".", "pop", "(", "key", ".", "lower", "(", ")", ",", "default", ")" ]
40
14.25
def create_translation_field(model, field_name, lang, empty_value): """ Translation field factory. Returns a ``TranslationField`` based on a fieldname and a language. The list of supported fields can be extended by defining a tuple of field names in the projects settings.py like this:: MODELTRANSLATION_CUSTOM_FIELDS = ('MyField', 'MyOtherField',) If the class is neither a subclass of fields in ``SUPPORTED_FIELDS``, nor in ``CUSTOM_FIELDS`` an ``ImproperlyConfigured`` exception will be raised. """ if empty_value not in ('', 'both', None, NONE): raise ImproperlyConfigured('%s is not a valid empty_value.' % empty_value) field = model._meta.get_field(field_name) cls_name = field.__class__.__name__ if not (isinstance(field, SUPPORTED_FIELDS) or cls_name in mt_settings.CUSTOM_FIELDS): raise ImproperlyConfigured( '%s is not supported by modeltranslation.' % cls_name) translation_class = field_factory(field.__class__) return translation_class(translated_field=field, language=lang, empty_value=empty_value)
[ "def", "create_translation_field", "(", "model", ",", "field_name", ",", "lang", ",", "empty_value", ")", ":", "if", "empty_value", "not", "in", "(", "''", ",", "'both'", ",", "None", ",", "NONE", ")", ":", "raise", "ImproperlyConfigured", "(", "'%s is not a valid empty_value.'", "%", "empty_value", ")", "field", "=", "model", ".", "_meta", ".", "get_field", "(", "field_name", ")", "cls_name", "=", "field", ".", "__class__", ".", "__name__", "if", "not", "(", "isinstance", "(", "field", ",", "SUPPORTED_FIELDS", ")", "or", "cls_name", "in", "mt_settings", ".", "CUSTOM_FIELDS", ")", ":", "raise", "ImproperlyConfigured", "(", "'%s is not supported by modeltranslation.'", "%", "cls_name", ")", "translation_class", "=", "field_factory", "(", "field", ".", "__class__", ")", "return", "translation_class", "(", "translated_field", "=", "field", ",", "language", "=", "lang", ",", "empty_value", "=", "empty_value", ")" ]
49.363636
24.818182
def _depth_event(self, msg): """Handle a depth event :param msg: :return: """ if 'e' in msg and msg['e'] == 'error': # close the socket self.close() # notify the user by returning a None value if self._callback: self._callback(None) if self._last_update_id is None: # Initial depth snapshot fetch not yet performed, buffer messages self._depth_message_buffer.append(msg) else: self._process_depth_message(msg)
[ "def", "_depth_event", "(", "self", ",", "msg", ")", ":", "if", "'e'", "in", "msg", "and", "msg", "[", "'e'", "]", "==", "'error'", ":", "# close the socket", "self", ".", "close", "(", ")", "# notify the user by returning a None value", "if", "self", ".", "_callback", ":", "self", ".", "_callback", "(", "None", ")", "if", "self", ".", "_last_update_id", "is", "None", ":", "# Initial depth snapshot fetch not yet performed, buffer messages", "self", ".", "_depth_message_buffer", ".", "append", "(", "msg", ")", "else", ":", "self", ".", "_process_depth_message", "(", "msg", ")" ]
26.190476
18.857143
def printConcordance(concordance, prefix): """Print the concordance. :param concordance: the concordance of each sample. :param prefix: the prefix of all the files. :type concordance: dict :type prefix: str :returns: the concordance percentage (dict) The concordance is the number of genotypes that are equal when comparing a duplicated samples with another one, divided by the total number of genotypes (excluding genotypes that are no call [*i.e.* ``0``]). If a duplicated sample has 100% of no calls, the concordance will be zero. The file ``prefix.concordance`` will contain :math:`N \\times N` matrices for each set of duplicated samples. """ outFile = None try: outFile = open(prefix + ".concordance", "w") except IOError: msg = "%s: can't write file" % prefix + ".concordance" raise ProgramError(msg) concordance_percentage = {} for key in concordance.iterkeys(): print >>outFile, "#%s\t%s" % key # Doing the division none_zero = concordance[key][1] != 0 true_concordance = np.zeros(np.multiply(*concordance[key][1].shape)) true_concordance[np.ravel(none_zero)] = np.true_divide( concordance[key][0][none_zero], concordance[key][1][none_zero], ) true_concordance.shape = concordance[key][1].shape true_concordance = np.asmatrix(true_concordance) concordance_percentage[key] = true_concordance output = StringIO.StringIO() np.savetxt(output, true_concordance, delimiter="\t", fmt="%.8f") print >>outFile, output.getvalue().rstrip("\r\n") outFile.close() return concordance_percentage
[ "def", "printConcordance", "(", "concordance", ",", "prefix", ")", ":", "outFile", "=", "None", "try", ":", "outFile", "=", "open", "(", "prefix", "+", "\".concordance\"", ",", "\"w\"", ")", "except", "IOError", ":", "msg", "=", "\"%s: can't write file\"", "%", "prefix", "+", "\".concordance\"", "raise", "ProgramError", "(", "msg", ")", "concordance_percentage", "=", "{", "}", "for", "key", "in", "concordance", ".", "iterkeys", "(", ")", ":", "print", ">>", "outFile", ",", "\"#%s\\t%s\"", "%", "key", "# Doing the division", "none_zero", "=", "concordance", "[", "key", "]", "[", "1", "]", "!=", "0", "true_concordance", "=", "np", ".", "zeros", "(", "np", ".", "multiply", "(", "*", "concordance", "[", "key", "]", "[", "1", "]", ".", "shape", ")", ")", "true_concordance", "[", "np", ".", "ravel", "(", "none_zero", ")", "]", "=", "np", ".", "true_divide", "(", "concordance", "[", "key", "]", "[", "0", "]", "[", "none_zero", "]", ",", "concordance", "[", "key", "]", "[", "1", "]", "[", "none_zero", "]", ",", ")", "true_concordance", ".", "shape", "=", "concordance", "[", "key", "]", "[", "1", "]", ".", "shape", "true_concordance", "=", "np", ".", "asmatrix", "(", "true_concordance", ")", "concordance_percentage", "[", "key", "]", "=", "true_concordance", "output", "=", "StringIO", ".", "StringIO", "(", ")", "np", ".", "savetxt", "(", "output", ",", "true_concordance", ",", "delimiter", "=", "\"\\t\"", ",", "fmt", "=", "\"%.8f\"", ")", "print", ">>", "outFile", ",", "output", ".", "getvalue", "(", ")", ".", "rstrip", "(", "\"\\r\\n\"", ")", "outFile", ".", "close", "(", ")", "return", "concordance_percentage" ]
34.204082
21.346939
def _make_blocks(records): # @NoSelf ''' Organizes the physical records into blocks in a list by placing consecutive physical records into a single block, so lesser VXRs will be created. [[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...] Parameters: records: list A list of records that there is data for Returns: sparse_blocks: list of list A list of ranges we have physical values for. Example: Input: [1,2,3,4,10,11,12,13,50,51,52,53] Output: [[1,4],[10,13],[50,53]] ''' sparse_blocks = [] total = len(records) if (total == 0): return [] x = 0 while (x < total): recstart = records[x] y = x recnum = recstart # Find the location in the records before the next gap # Call this value "y" while ((y+1) < total): y = y + 1 nextnum = records[y] diff = nextnum - recnum if (diff == 1): recnum = nextnum else: y = y - 1 break # Put the values of the records into "ablock", append to sparse_blocks ablock = [] ablock.append(recstart) if ((y+1) == total): recend = records[total-1] else: recend = records[y] x = y + 1 ablock.append(recend) sparse_blocks.append(ablock) return sparse_blocks
[ "def", "_make_blocks", "(", "records", ")", ":", "# @NoSelf", "sparse_blocks", "=", "[", "]", "total", "=", "len", "(", "records", ")", "if", "(", "total", "==", "0", ")", ":", "return", "[", "]", "x", "=", "0", "while", "(", "x", "<", "total", ")", ":", "recstart", "=", "records", "[", "x", "]", "y", "=", "x", "recnum", "=", "recstart", "# Find the location in the records before the next gap", "# Call this value \"y\"", "while", "(", "(", "y", "+", "1", ")", "<", "total", ")", ":", "y", "=", "y", "+", "1", "nextnum", "=", "records", "[", "y", "]", "diff", "=", "nextnum", "-", "recnum", "if", "(", "diff", "==", "1", ")", ":", "recnum", "=", "nextnum", "else", ":", "y", "=", "y", "-", "1", "break", "# Put the values of the records into \"ablock\", append to sparse_blocks", "ablock", "=", "[", "]", "ablock", ".", "append", "(", "recstart", ")", "if", "(", "(", "y", "+", "1", ")", "==", "total", ")", ":", "recend", "=", "records", "[", "total", "-", "1", "]", "else", ":", "recend", "=", "records", "[", "y", "]", "x", "=", "y", "+", "1", "ablock", ".", "append", "(", "recend", ")", "sparse_blocks", ".", "append", "(", "ablock", ")", "return", "sparse_blocks" ]
29.218182
18.309091
def is_serving(self) -> bool: """ Tell whether the server is accepting new connections or shutting down. """ try: # Python ≥ 3.7 return self.server.is_serving() # type: ignore except AttributeError: # pragma: no cover # Python < 3.7 return self.server.sockets is not None
[ "def", "is_serving", "(", "self", ")", "->", "bool", ":", "try", ":", "# Python ≥ 3.7", "return", "self", ".", "server", ".", "is_serving", "(", ")", "# type: ignore", "except", "AttributeError", ":", "# pragma: no cover", "# Python < 3.7", "return", "self", ".", "server", ".", "sockets", "is", "not", "None" ]
32
16.727273
def Operate(self, values): """Takes a list of values and if at least one matches, returns True.""" for val in values: try: if self.Operation(val, self.right_operand): return True except (TypeError, ValueError): pass return False
[ "def", "Operate", "(", "self", ",", "values", ")", ":", "for", "val", "in", "values", ":", "try", ":", "if", "self", ".", "Operation", "(", "val", ",", "self", ".", "right_operand", ")", ":", "return", "True", "except", "(", "TypeError", ",", "ValueError", ")", ":", "pass", "return", "False" ]
27
18.7
def get_assessment_offered_id(self): """Gets the ``Id`` of the ``AssessmentOffered``. return: (osid.id.Id) - the assessment offered ``Id`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.learning.Activity.get_objective_id if not bool(self._my_map['assessmentOfferedId']): raise errors.IllegalState('assessment_offered empty') return Id(self._my_map['assessmentOfferedId'])
[ "def", "get_assessment_offered_id", "(", "self", ")", ":", "# Implemented from template for osid.learning.Activity.get_objective_id", "if", "not", "bool", "(", "self", ".", "_my_map", "[", "'assessmentOfferedId'", "]", ")", ":", "raise", "errors", ".", "IllegalState", "(", "'assessment_offered empty'", ")", "return", "Id", "(", "self", ".", "_my_map", "[", "'assessmentOfferedId'", "]", ")" ]
44.090909
20.545455
def config(self, commands, **kwargs): """Configures the node with the specified commands This method is used to send configuration commands to the node. It will take either a string or a list and prepend the necessary commands to put the session into config mode. Args: commands (str, list): The commands to send to the node in config mode. If the commands argument is a string it will be cast to a list. The list of commands will also be prepended with the necessary commands to put the session in config mode. **kwargs: Additional keyword arguments for expanded eAPI functionality. Only supported eAPI params are used in building the request Returns: The config method will return a list of dictionaries with the output from each command. The function will strip the response from any commands it prepends. """ commands = make_iterable(commands) commands = list(commands) # push the configure command onto the command stack commands.insert(0, 'configure terminal') response = self.run_commands(commands, **kwargs) if self.autorefresh: self.refresh() # pop the configure command output off the stack response.pop(0) return response
[ "def", "config", "(", "self", ",", "commands", ",", "*", "*", "kwargs", ")", ":", "commands", "=", "make_iterable", "(", "commands", ")", "commands", "=", "list", "(", "commands", ")", "# push the configure command onto the command stack", "commands", ".", "insert", "(", "0", ",", "'configure terminal'", ")", "response", "=", "self", ".", "run_commands", "(", "commands", ",", "*", "*", "kwargs", ")", "if", "self", ".", "autorefresh", ":", "self", ".", "refresh", "(", ")", "# pop the configure command output off the stack", "response", ".", "pop", "(", "0", ")", "return", "response" ]
39.194444
23.444444
def fit(self, sequences, y=None): """Fit Preprocessing to X. Parameters ---------- sequences : list of array-like, each of shape [sequence_length, n_features] A list of multivariate timeseries. Each sequence may have a different length, but they all must have the same number of features. y : None Ignored Returns ------- self """ check_iter_of_sequences(sequences) for sequence in sequences: s = super(MultiSequencePreprocessingMixin, self) s.partial_fit(sequence) return self
[ "def", "fit", "(", "self", ",", "sequences", ",", "y", "=", "None", ")", ":", "check_iter_of_sequences", "(", "sequences", ")", "for", "sequence", "in", "sequences", ":", "s", "=", "super", "(", "MultiSequencePreprocessingMixin", ",", "self", ")", "s", ".", "partial_fit", "(", "sequence", ")", "return", "self" ]
31.772727
20.090909
def start(name, call=None): ''' start a machine by name :param name: name given to the machine :param call: call value in this case is 'action' :return: true if successful CLI Example: .. code-block:: bash salt-cloud -a start vm_name ''' datacenter_id = get_datacenter_id() conn = get_conn() node = get_node(conn, name) conn.start_server(datacenter_id=datacenter_id, server_id=node['id']) return True
[ "def", "start", "(", "name", ",", "call", "=", "None", ")", ":", "datacenter_id", "=", "get_datacenter_id", "(", ")", "conn", "=", "get_conn", "(", ")", "node", "=", "get_node", "(", "conn", ",", "name", ")", "conn", ".", "start_server", "(", "datacenter_id", "=", "datacenter_id", ",", "server_id", "=", "node", "[", "'id'", "]", ")", "return", "True" ]
21.238095
23.142857
def remove_users(self, user_ids, nid=None): """Remove users from a network `nid` :type user_ids: list of str :param user_ids: a list of user ids. These are the same ids that are returned by get_all_users. :type nid: str :param nid: This is the ID of the network to remove students from. This is optional and only to override the existing `network_id` entered when created the class :returns: Python object containing returned data, a list of dicts of user data of all of the users remaining in the network after users are removed. """ r = self.request( method="network.update", data={"remove_users": user_ids}, nid=nid, nid_key="id" ) return self._handle_error(r, "Could not remove users.")
[ "def", "remove_users", "(", "self", ",", "user_ids", ",", "nid", "=", "None", ")", ":", "r", "=", "self", ".", "request", "(", "method", "=", "\"network.update\"", ",", "data", "=", "{", "\"remove_users\"", ":", "user_ids", "}", ",", "nid", "=", "nid", ",", "nid_key", "=", "\"id\"", ")", "return", "self", ".", "_handle_error", "(", "r", ",", "\"Could not remove users.\"", ")" ]
41
16.190476
def abspath(path): """Return the absolute path to a file and canonicalize it Path is returned without a trailing slash and without redundant slashes. Caches the user's home directory. :param path: A string for the path. This should not have any wildcards. :returns: Absolute path to the file :raises IOError: If unsuccessful """ global _USER_HOME_DIR # FIXME(brandyn): User's home directory must exist # FIXME(brandyn): Requires something to be in home dir if path[0] == '/': return os.path.abspath(path) if _USER_HOME_DIR is None: try: _USER_HOME_DIR = _get_home_dir() except IOError, e: if not exists('.'): raise IOError("Home directory doesn't exist") raise e return os.path.abspath(os.path.join(_USER_HOME_DIR, path))
[ "def", "abspath", "(", "path", ")", ":", "global", "_USER_HOME_DIR", "# FIXME(brandyn): User's home directory must exist", "# FIXME(brandyn): Requires something to be in home dir", "if", "path", "[", "0", "]", "==", "'/'", ":", "return", "os", ".", "path", ".", "abspath", "(", "path", ")", "if", "_USER_HOME_DIR", "is", "None", ":", "try", ":", "_USER_HOME_DIR", "=", "_get_home_dir", "(", ")", "except", "IOError", ",", "e", ":", "if", "not", "exists", "(", "'.'", ")", ":", "raise", "IOError", "(", "\"Home directory doesn't exist\"", ")", "raise", "e", "return", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "_USER_HOME_DIR", ",", "path", ")", ")" ]
36.086957
16.521739
def add_to_stmts_rules(stmts, rules): """Use by plugins to add extra rules to the existing rules for a statement.""" def is_rule_less_than(ra, rb): rka = ra[0] rkb = rb[0] if not util.is_prefixed(rkb): # old rule is non-prefixed; append new rule after return False if not util.is_prefixed(rka): # old rule prefixed, but new rule is not, insert return True # both are prefixed, compare modulename return rka[0] < rkb[0] for s in stmts: (arg, rules0) = stmt_map[s] for r in rules: i = 0 while i < len(rules0): if is_rule_less_than(r, rules0[i]): rules0.insert(i, r) break i += 1 if i == len(rules0): rules0.insert(i, r)
[ "def", "add_to_stmts_rules", "(", "stmts", ",", "rules", ")", ":", "def", "is_rule_less_than", "(", "ra", ",", "rb", ")", ":", "rka", "=", "ra", "[", "0", "]", "rkb", "=", "rb", "[", "0", "]", "if", "not", "util", ".", "is_prefixed", "(", "rkb", ")", ":", "# old rule is non-prefixed; append new rule after", "return", "False", "if", "not", "util", ".", "is_prefixed", "(", "rka", ")", ":", "# old rule prefixed, but new rule is not, insert", "return", "True", "# both are prefixed, compare modulename", "return", "rka", "[", "0", "]", "<", "rkb", "[", "0", "]", "for", "s", "in", "stmts", ":", "(", "arg", ",", "rules0", ")", "=", "stmt_map", "[", "s", "]", "for", "r", "in", "rules", ":", "i", "=", "0", "while", "i", "<", "len", "(", "rules0", ")", ":", "if", "is_rule_less_than", "(", "r", ",", "rules0", "[", "i", "]", ")", ":", "rules0", ".", "insert", "(", "i", ",", "r", ")", "break", "i", "+=", "1", "if", "i", "==", "len", "(", "rules0", ")", ":", "rules0", ".", "insert", "(", "i", ",", "r", ")" ]
33.8
11.12
def simulate_list(nwords=16, nrec=10, ncats=4): """A function to simulate a list""" # load wordpool wp = pd.read_csv('data/cut_wordpool.csv') # get one list wp = wp[wp['GROUP']==np.random.choice(list(range(16)), 1)[0]].sample(16) wp['COLOR'] = [[int(np.random.rand() * 255) for i in range(3)] for i in range(16)]
[ "def", "simulate_list", "(", "nwords", "=", "16", ",", "nrec", "=", "10", ",", "ncats", "=", "4", ")", ":", "# load wordpool", "wp", "=", "pd", ".", "read_csv", "(", "'data/cut_wordpool.csv'", ")", "# get one list", "wp", "=", "wp", "[", "wp", "[", "'GROUP'", "]", "==", "np", ".", "random", ".", "choice", "(", "list", "(", "range", "(", "16", ")", ")", ",", "1", ")", "[", "0", "]", "]", ".", "sample", "(", "16", ")", "wp", "[", "'COLOR'", "]", "=", "[", "[", "int", "(", "np", ".", "random", ".", "rand", "(", ")", "*", "255", ")", "for", "i", "in", "range", "(", "3", ")", "]", "for", "i", "in", "range", "(", "16", ")", "]" ]
33
25.7
def assure_snapshot(fnc): """ Converts a snapshot ID passed as the snapshot to a CloudBlockStorageSnapshot object. """ @wraps(fnc) def _wrapped(self, snapshot, *args, **kwargs): if not isinstance(snapshot, CloudBlockStorageSnapshot): # Must be the ID snapshot = self._snapshot_manager.get(snapshot) return fnc(self, snapshot, *args, **kwargs) return _wrapped
[ "def", "assure_snapshot", "(", "fnc", ")", ":", "@", "wraps", "(", "fnc", ")", "def", "_wrapped", "(", "self", ",", "snapshot", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "snapshot", ",", "CloudBlockStorageSnapshot", ")", ":", "# Must be the ID", "snapshot", "=", "self", ".", "_snapshot_manager", ".", "get", "(", "snapshot", ")", "return", "fnc", "(", "self", ",", "snapshot", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "_wrapped" ]
34.583333
17.083333
def get(self, typ, id, **kwargs): """ Load type by id """ return self._load(self._request(typ, id=id, params=kwargs))
[ "def", "get", "(", "self", ",", "typ", ",", "id", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_load", "(", "self", ".", "_request", "(", "typ", ",", "id", "=", "id", ",", "params", "=", "kwargs", ")", ")" ]
29
10.2
def select_renderer(self, request, renderers): """ Selects the appropriated parser which matches to the request's accept. :param request: The HTTP request. :param renderers: The lists of parsers. :return: The parser selected or none. """ if not len(request.accept_mimetypes): return renderers[0], renderers[0].mimetype for mimetype, quality in request.accept_mimetypes: accept_mimetype = MimeType.parse(mimetype) for renderer in renderers: if accept_mimetype.match(renderer.mimetype): return renderer, renderer.mimetype.replace(params=accept_mimetype.params) return None, None
[ "def", "select_renderer", "(", "self", ",", "request", ",", "renderers", ")", ":", "if", "not", "len", "(", "request", ".", "accept_mimetypes", ")", ":", "return", "renderers", "[", "0", "]", ",", "renderers", "[", "0", "]", ".", "mimetype", "for", "mimetype", ",", "quality", "in", "request", ".", "accept_mimetypes", ":", "accept_mimetype", "=", "MimeType", ".", "parse", "(", "mimetype", ")", "for", "renderer", "in", "renderers", ":", "if", "accept_mimetype", ".", "match", "(", "renderer", ".", "mimetype", ")", ":", "return", "renderer", ",", "renderer", ".", "mimetype", ".", "replace", "(", "params", "=", "accept_mimetype", ".", "params", ")", "return", "None", ",", "None" ]
41.529412
16.352941
def add_phase_interconnections(net, snow_partitioning_n, voxel_size=1, marching_cubes_area=False, alias=None): r""" This function connects networks of two or more phases together by interconnecting neighbouring nodes inside different phases. The resulting network can be used for the study of transport and kinetics at interphase of two phases. Parameters ---------- network : 2D or 3D network A dictoionary containing structural information of two or more phases networks. The dictonary format must be same as porespy region_to_network function. snow_partitioning_n : tuple The output generated by snow_partitioning_n function. The tuple should have phases_max_labels and original image of material. voxel_size : scalar The resolution of the image, expressed as the length of one side of a voxel, so the volume of a voxel would be **voxel_size**-cubed. The default is 1, which is useful when overlaying the PNM on the original image since the scale of the image is alway 1 unit lenth per voxel. marching_cubes_area : bool If ``True`` then the surface area and interfacial area between regions will be causing the marching cube algorithm. This is a more accurate representation of area in extracted network, but is quite slow, so it is ``False`` by default. The default method simply counts voxels so does not correctly account for the voxelated nature of the images. alias : dict (Optional) A dictionary that assigns unique image label to specific phase. For example {1: 'Solid'} will show all structural properties associated with label 1 as Solid phase properties. If ``None`` then default labelling will be used i.e {1: 'Phase1',..}. Returns ------- A dictionary containing network information of individual and connected networks. The dictionary names use the OpenPNM convention so it may be converted directly to an OpenPNM network object using the ``update`` command. """ # ------------------------------------------------------------------------- # Get alias if provided by user im = snow_partitioning_n.im al = _create_alias_map(im, alias=alias) # ------------------------------------------------------------------------- # Find interconnection and interfacial area between ith and jth phases conns1 = net['throat.conns'][:, 0] conns2 = net['throat.conns'][:, 1] label = net['pore.label'] - 1 num = snow_partitioning_n.phase_max_label num = [0, *num] phases_num = sp.unique(im * 1) phases_num = sp.trim_zeros(phases_num) for i in phases_num: loc1 = sp.logical_and(conns1 >= num[i - 1], conns1 < num[i]) loc2 = sp.logical_and(conns2 >= num[i - 1], conns2 < num[i]) loc3 = sp.logical_and(label >= num[i - 1], label < num[i]) net['throat.{}'.format(al[i])] = loc1 * loc2 net['pore.{}'.format(al[i])] = loc3 if i == phases_num[-1]: loc4 = sp.logical_and(conns1 < num[-1], conns2 >= num[-1]) loc5 = label >= num[-1] net['throat.boundary'] = loc4 net['pore.boundary'] = loc5 for j in phases_num: if j > i: pi_pj_sa = sp.zeros_like(label) loc6 = sp.logical_and(conns2 >= num[j - 1], conns2 < num[j]) pi_pj_conns = loc1 * loc6 net['throat.{}_{}'.format(al[i], al[j])] = pi_pj_conns if any(pi_pj_conns): # --------------------------------------------------------- # Calculates phase[i] interfacial area that connects with # phase[j] and vice versa p_conns = net['throat.conns'][:, 0][pi_pj_conns] s_conns = net['throat.conns'][:, 1][pi_pj_conns] ps = net['throat.area'][pi_pj_conns] p_sa = sp.bincount(p_conns, ps) # trim zeros at head/tail position to avoid extra bins p_sa = sp.trim_zeros(p_sa) i_index = sp.arange(min(p_conns), max(p_conns) + 1) j_index = sp.arange(min(s_conns), max(s_conns) + 1) s_pa = sp.bincount(s_conns, ps) s_pa = sp.trim_zeros(s_pa) pi_pj_sa[i_index] = p_sa pi_pj_sa[j_index] = s_pa # --------------------------------------------------------- # Calculates interfacial area using marching cube method if marching_cubes_area: ps_c = net['throat.area'][pi_pj_conns] p_sa_c = sp.bincount(p_conns, ps_c) p_sa_c = sp.trim_zeros(p_sa_c) s_pa_c = sp.bincount(s_conns, ps_c) s_pa_c = sp.trim_zeros(s_pa_c) pi_pj_sa[i_index] = p_sa_c pi_pj_sa[j_index] = s_pa_c net['pore.{}_{}_area'.format(al[i], al[j])] = (pi_pj_sa * voxel_size ** 2) return net
[ "def", "add_phase_interconnections", "(", "net", ",", "snow_partitioning_n", ",", "voxel_size", "=", "1", ",", "marching_cubes_area", "=", "False", ",", "alias", "=", "None", ")", ":", "# -------------------------------------------------------------------------", "# Get alias if provided by user", "im", "=", "snow_partitioning_n", ".", "im", "al", "=", "_create_alias_map", "(", "im", ",", "alias", "=", "alias", ")", "# -------------------------------------------------------------------------", "# Find interconnection and interfacial area between ith and jth phases", "conns1", "=", "net", "[", "'throat.conns'", "]", "[", ":", ",", "0", "]", "conns2", "=", "net", "[", "'throat.conns'", "]", "[", ":", ",", "1", "]", "label", "=", "net", "[", "'pore.label'", "]", "-", "1", "num", "=", "snow_partitioning_n", ".", "phase_max_label", "num", "=", "[", "0", ",", "*", "num", "]", "phases_num", "=", "sp", ".", "unique", "(", "im", "*", "1", ")", "phases_num", "=", "sp", ".", "trim_zeros", "(", "phases_num", ")", "for", "i", "in", "phases_num", ":", "loc1", "=", "sp", ".", "logical_and", "(", "conns1", ">=", "num", "[", "i", "-", "1", "]", ",", "conns1", "<", "num", "[", "i", "]", ")", "loc2", "=", "sp", ".", "logical_and", "(", "conns2", ">=", "num", "[", "i", "-", "1", "]", ",", "conns2", "<", "num", "[", "i", "]", ")", "loc3", "=", "sp", ".", "logical_and", "(", "label", ">=", "num", "[", "i", "-", "1", "]", ",", "label", "<", "num", "[", "i", "]", ")", "net", "[", "'throat.{}'", ".", "format", "(", "al", "[", "i", "]", ")", "]", "=", "loc1", "*", "loc2", "net", "[", "'pore.{}'", ".", "format", "(", "al", "[", "i", "]", ")", "]", "=", "loc3", "if", "i", "==", "phases_num", "[", "-", "1", "]", ":", "loc4", "=", "sp", ".", "logical_and", "(", "conns1", "<", "num", "[", "-", "1", "]", ",", "conns2", ">=", "num", "[", "-", "1", "]", ")", "loc5", "=", "label", ">=", "num", "[", "-", "1", "]", "net", "[", "'throat.boundary'", "]", "=", "loc4", "net", "[", "'pore.boundary'", "]", "=", "loc5", "for", "j", "in", "phases_num", ":", "if", "j", ">", "i", ":", "pi_pj_sa", "=", "sp", ".", "zeros_like", "(", "label", ")", "loc6", "=", "sp", ".", "logical_and", "(", "conns2", ">=", "num", "[", "j", "-", "1", "]", ",", "conns2", "<", "num", "[", "j", "]", ")", "pi_pj_conns", "=", "loc1", "*", "loc6", "net", "[", "'throat.{}_{}'", ".", "format", "(", "al", "[", "i", "]", ",", "al", "[", "j", "]", ")", "]", "=", "pi_pj_conns", "if", "any", "(", "pi_pj_conns", ")", ":", "# ---------------------------------------------------------", "# Calculates phase[i] interfacial area that connects with", "# phase[j] and vice versa", "p_conns", "=", "net", "[", "'throat.conns'", "]", "[", ":", ",", "0", "]", "[", "pi_pj_conns", "]", "s_conns", "=", "net", "[", "'throat.conns'", "]", "[", ":", ",", "1", "]", "[", "pi_pj_conns", "]", "ps", "=", "net", "[", "'throat.area'", "]", "[", "pi_pj_conns", "]", "p_sa", "=", "sp", ".", "bincount", "(", "p_conns", ",", "ps", ")", "# trim zeros at head/tail position to avoid extra bins", "p_sa", "=", "sp", ".", "trim_zeros", "(", "p_sa", ")", "i_index", "=", "sp", ".", "arange", "(", "min", "(", "p_conns", ")", ",", "max", "(", "p_conns", ")", "+", "1", ")", "j_index", "=", "sp", ".", "arange", "(", "min", "(", "s_conns", ")", ",", "max", "(", "s_conns", ")", "+", "1", ")", "s_pa", "=", "sp", ".", "bincount", "(", "s_conns", ",", "ps", ")", "s_pa", "=", "sp", ".", "trim_zeros", "(", "s_pa", ")", "pi_pj_sa", "[", "i_index", "]", "=", "p_sa", "pi_pj_sa", "[", "j_index", "]", "=", "s_pa", "# ---------------------------------------------------------", "# Calculates interfacial area using marching cube method", "if", "marching_cubes_area", ":", "ps_c", "=", "net", "[", "'throat.area'", "]", "[", "pi_pj_conns", "]", "p_sa_c", "=", "sp", ".", "bincount", "(", "p_conns", ",", "ps_c", ")", "p_sa_c", "=", "sp", ".", "trim_zeros", "(", "p_sa_c", ")", "s_pa_c", "=", "sp", ".", "bincount", "(", "s_conns", ",", "ps_c", ")", "s_pa_c", "=", "sp", ".", "trim_zeros", "(", "s_pa_c", ")", "pi_pj_sa", "[", "i_index", "]", "=", "p_sa_c", "pi_pj_sa", "[", "j_index", "]", "=", "s_pa_c", "net", "[", "'pore.{}_{}_area'", ".", "format", "(", "al", "[", "i", "]", ",", "al", "[", "j", "]", ")", "]", "=", "(", "pi_pj_sa", "*", "voxel_size", "**", "2", ")", "return", "net" ]
48.266055
21.963303
def db_value(self, value): """Convert the python value for storage in the database.""" value = self.transform_value(value) return self.hhash.encrypt(value, salt_size=self.salt_size, rounds=self.rounds)
[ "def", "db_value", "(", "self", ",", "value", ")", ":", "value", "=", "self", ".", "transform_value", "(", "value", ")", "return", "self", ".", "hhash", ".", "encrypt", "(", "value", ",", "salt_size", "=", "self", ".", "salt_size", ",", "rounds", "=", "self", ".", "rounds", ")" ]
46.8
7
def train(self, ftrain): '''Trains the polynomial expansion. :param numpy.ndarray/function ftrain: output values corresponding to the quadrature points given by the getQuadraturePoints method to which the expansion should be trained. Or a function that should be evaluated at the quadrature points to give these output values. *Sample Usage*:: >>> thePC = PolySurrogate(dimensions=2) >>> thePC.train(myFunc) >>> predicted_q = thePC.predict([0, 1]) >>> thePC = PolySurrogate(dimensions=2) >>> U = thePC.getQuadraturePoints() >>> Q = [myFunc(u) for u in U] >>> thePC.train(Q) >>> predicted_q = thePC.predict([0, 1]) ''' self.coeffs = 0*self.coeffs upoints, wpoints = self.getQuadraturePointsAndWeights() try: fpoints = [ftrain(u) for u in upoints] except TypeError: fpoints = ftrain for ipoly in np.arange(self.N_poly): inds = tuple(self.index_polys[ipoly]) coeff = 0.0 for (u, q, w) in zip(upoints, fpoints, wpoints): coeff += eval_poly(u, inds, self.J_list)*q*np.prod(w) self.coeffs[inds] = coeff return None
[ "def", "train", "(", "self", ",", "ftrain", ")", ":", "self", ".", "coeffs", "=", "0", "*", "self", ".", "coeffs", "upoints", ",", "wpoints", "=", "self", ".", "getQuadraturePointsAndWeights", "(", ")", "try", ":", "fpoints", "=", "[", "ftrain", "(", "u", ")", "for", "u", "in", "upoints", "]", "except", "TypeError", ":", "fpoints", "=", "ftrain", "for", "ipoly", "in", "np", ".", "arange", "(", "self", ".", "N_poly", ")", ":", "inds", "=", "tuple", "(", "self", ".", "index_polys", "[", "ipoly", "]", ")", "coeff", "=", "0.0", "for", "(", "u", ",", "q", ",", "w", ")", "in", "zip", "(", "upoints", ",", "fpoints", ",", "wpoints", ")", ":", "coeff", "+=", "eval_poly", "(", "u", ",", "inds", ",", "self", ".", "J_list", ")", "*", "q", "*", "np", ".", "prod", "(", "w", ")", "self", ".", "coeffs", "[", "inds", "]", "=", "coeff", "return", "None" ]
32.820513
22.410256
def search_for_devices_by_serial_number(self, sn): """ Returns a list of device objects that match the serial number in param 'sn'. This will match partial serial numbers. """ import re sn_search = re.compile(sn) matches = [] for dev_o in self.get_all_devices_in_portal(): # print("Checking {0}".format(dev_o['sn'])) try: if sn_search.match(dev_o['sn']): matches.append(dev_o) except TypeError as err: print("Problem checking device {!r}: {!r}".format( dev_o['info']['description']['name'], str(err))) return matches
[ "def", "search_for_devices_by_serial_number", "(", "self", ",", "sn", ")", ":", "import", "re", "sn_search", "=", "re", ".", "compile", "(", "sn", ")", "matches", "=", "[", "]", "for", "dev_o", "in", "self", ".", "get_all_devices_in_portal", "(", ")", ":", "# print(\"Checking {0}\".format(dev_o['sn']))", "try", ":", "if", "sn_search", ".", "match", "(", "dev_o", "[", "'sn'", "]", ")", ":", "matches", ".", "append", "(", "dev_o", ")", "except", "TypeError", "as", "err", ":", "print", "(", "\"Problem checking device {!r}: {!r}\"", ".", "format", "(", "dev_o", "[", "'info'", "]", "[", "'description'", "]", "[", "'name'", "]", ",", "str", "(", "err", ")", ")", ")", "return", "matches" ]
32.608696
18.521739
def most_hot(self): """ Returns the *Weather* object in the forecast having the highest max temperature. The temperature is retrieved using the ``get_temperature['temp_max']`` call; was 'temp_max' key missing for every *Weather* instance in the forecast, ``None`` would be returned. :returns: a *Weather* object or ``None`` if no item in the forecast is eligible """ maxtemp = -270.0 # No one would survive that... hottest = None for weather in self._forecast.get_weathers(): d = weather.get_temperature() if 'temp_max' in d: if d['temp_max'] > maxtemp: maxtemp = d['temp_max'] hottest = weather return hottest
[ "def", "most_hot", "(", "self", ")", ":", "maxtemp", "=", "-", "270.0", "# No one would survive that...", "hottest", "=", "None", "for", "weather", "in", "self", ".", "_forecast", ".", "get_weathers", "(", ")", ":", "d", "=", "weather", ".", "get_temperature", "(", ")", "if", "'temp_max'", "in", "d", ":", "if", "d", "[", "'temp_max'", "]", ">", "maxtemp", ":", "maxtemp", "=", "d", "[", "'temp_max'", "]", "hottest", "=", "weather", "return", "hottest" ]
40.736842
17.368421
def _preprocess(self, filehandle, metadata): "Runs all attached preprocessors on the provided filehandle." for process in self._preprocessors: filehandle = process(filehandle, metadata) return filehandle
[ "def", "_preprocess", "(", "self", ",", "filehandle", ",", "metadata", ")", ":", "for", "process", "in", "self", ".", "_preprocessors", ":", "filehandle", "=", "process", "(", "filehandle", ",", "metadata", ")", "return", "filehandle" ]
47
13
def _notify(self, title, message, expected_action=None): """ Notify user from external event """ if self.editor is None: return inital_value = self.editor.save_on_focus_out self.editor.save_on_focus_out = False self._flg_notify = True dlg_type = (QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) expected_action = ( lambda *x: None) if not expected_action else expected_action if (self._auto_reload or QtWidgets.QMessageBox.question( self.editor, title, message, dlg_type, QtWidgets.QMessageBox.Yes) == QtWidgets.QMessageBox.Yes): expected_action(self.editor.file.path) self._update_mtime() self.editor.save_on_focus_out = inital_value
[ "def", "_notify", "(", "self", ",", "title", ",", "message", ",", "expected_action", "=", "None", ")", ":", "if", "self", ".", "editor", "is", "None", ":", "return", "inital_value", "=", "self", ".", "editor", ".", "save_on_focus_out", "self", ".", "editor", ".", "save_on_focus_out", "=", "False", "self", ".", "_flg_notify", "=", "True", "dlg_type", "=", "(", "QtWidgets", ".", "QMessageBox", ".", "Yes", "|", "QtWidgets", ".", "QMessageBox", ".", "No", ")", "expected_action", "=", "(", "lambda", "*", "x", ":", "None", ")", "if", "not", "expected_action", "else", "expected_action", "if", "(", "self", ".", "_auto_reload", "or", "QtWidgets", ".", "QMessageBox", ".", "question", "(", "self", ".", "editor", ",", "title", ",", "message", ",", "dlg_type", ",", "QtWidgets", ".", "QMessageBox", ".", "Yes", ")", "==", "QtWidgets", ".", "QMessageBox", ".", "Yes", ")", ":", "expected_action", "(", "self", ".", "editor", ".", "file", ".", "path", ")", "self", ".", "_update_mtime", "(", ")", "self", ".", "editor", ".", "save_on_focus_out", "=", "inital_value" ]
43.722222
14.277778
def next_game(self): ''' Advances the series to the next game, if possible. Also updates each team's score with points from the most recently completed game. :return: the next game, if the previous game did not end the series; None otherwise :raises SeriesOverException: if the series has already ended :raises GameInProgressException: if the last game has not yet finished ''' if self.is_over(): raise dominoes.SeriesOverException( 'Cannot start a new game - series ended with a score of {} to {}'.format(*self.scores) ) result = self.games[-1].result if result is None: raise dominoes.GameInProgressException( 'Cannot start a new game - the latest one has not finished!' ) # update each team's score with the points from the previous game if result.points >= 0: self.scores[0] += result.points else: self.scores[1] -= result.points # return None if the series is now over if self.is_over(): return # determine the starting player for the next game if result.won or pow(-1, result.player) * result.points > 0: starting_player = result.player elif not result.points: starting_player = self.games[-1].starting_player else: # pow(-1, result.player) * result.points < 0 starting_player = dominoes.game.next_player(result.player) # start the next game self.games.append(dominoes.Game.new(starting_player=starting_player)) return self.games[-1]
[ "def", "next_game", "(", "self", ")", ":", "if", "self", ".", "is_over", "(", ")", ":", "raise", "dominoes", ".", "SeriesOverException", "(", "'Cannot start a new game - series ended with a score of {} to {}'", ".", "format", "(", "*", "self", ".", "scores", ")", ")", "result", "=", "self", ".", "games", "[", "-", "1", "]", ".", "result", "if", "result", "is", "None", ":", "raise", "dominoes", ".", "GameInProgressException", "(", "'Cannot start a new game - the latest one has not finished!'", ")", "# update each team's score with the points from the previous game", "if", "result", ".", "points", ">=", "0", ":", "self", ".", "scores", "[", "0", "]", "+=", "result", ".", "points", "else", ":", "self", ".", "scores", "[", "1", "]", "-=", "result", ".", "points", "# return None if the series is now over", "if", "self", ".", "is_over", "(", ")", ":", "return", "# determine the starting player for the next game", "if", "result", ".", "won", "or", "pow", "(", "-", "1", ",", "result", ".", "player", ")", "*", "result", ".", "points", ">", "0", ":", "starting_player", "=", "result", ".", "player", "elif", "not", "result", ".", "points", ":", "starting_player", "=", "self", ".", "games", "[", "-", "1", "]", ".", "starting_player", "else", ":", "# pow(-1, result.player) * result.points < 0", "starting_player", "=", "dominoes", ".", "game", ".", "next_player", "(", "result", ".", "player", ")", "# start the next game", "self", ".", "games", ".", "append", "(", "dominoes", ".", "Game", ".", "new", "(", "starting_player", "=", "starting_player", ")", ")", "return", "self", ".", "games", "[", "-", "1", "]" ]
38.348837
24.162791
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_migrate_time(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_brief_info = ET.Element("get_stp_brief_info") config = get_stp_brief_info output = ET.SubElement(get_stp_brief_info, "output") spanning_tree_info = ET.SubElement(output, "spanning-tree-info") spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode") rstp = ET.SubElement(spanning_tree_mode, "rstp") rstp = ET.SubElement(rstp, "rstp") migrate_time = ET.SubElement(rstp, "migrate-time") migrate_time.text = kwargs.pop('migrate_time') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_migrate_time", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_stp_brief_info", "=", "ET", ".", "Element", "(", "\"get_stp_brief_info\"", ")", "config", "=", "get_stp_brief_info", "output", "=", "ET", ".", "SubElement", "(", "get_stp_brief_info", ",", "\"output\"", ")", "spanning_tree_info", "=", "ET", ".", "SubElement", "(", "output", ",", "\"spanning-tree-info\"", ")", "spanning_tree_mode", "=", "ET", ".", "SubElement", "(", "spanning_tree_info", ",", "\"spanning-tree-mode\"", ")", "rstp", "=", "ET", ".", "SubElement", "(", "spanning_tree_mode", ",", "\"rstp\"", ")", "rstp", "=", "ET", ".", "SubElement", "(", "rstp", ",", "\"rstp\"", ")", "migrate_time", "=", "ET", ".", "SubElement", "(", "rstp", ",", "\"migrate-time\"", ")", "migrate_time", ".", "text", "=", "kwargs", ".", "pop", "(", "'migrate_time'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
49.6875
19.25
def bundle(self, bundle_name): """Bundle multiple Job or Playbook Apps into a single zip file. Args: bundle_name (str): The output name of the bundle zip file. """ if self.args.bundle or self.tcex_json.get('package', {}).get('bundle', False): if self.tcex_json.get('package', {}).get('bundle_packages') is not None: for bundle in self.tcex_json.get('package', {}).get('bundle_packages') or []: bundle_name = bundle.get('name') bundle_patterns = bundle.get('patterns') bundle_apps = [] for app in self._app_packages: for app_pattern in bundle_patterns: p = re.compile(app_pattern, re.IGNORECASE) if p.match(app): bundle_apps.append(app) # bundle app in zip if bundle_apps: self.bundle_apps(bundle_name, bundle_apps) else: self.bundle_apps(bundle_name, self._app_packages)
[ "def", "bundle", "(", "self", ",", "bundle_name", ")", ":", "if", "self", ".", "args", ".", "bundle", "or", "self", ".", "tcex_json", ".", "get", "(", "'package'", ",", "{", "}", ")", ".", "get", "(", "'bundle'", ",", "False", ")", ":", "if", "self", ".", "tcex_json", ".", "get", "(", "'package'", ",", "{", "}", ")", ".", "get", "(", "'bundle_packages'", ")", "is", "not", "None", ":", "for", "bundle", "in", "self", ".", "tcex_json", ".", "get", "(", "'package'", ",", "{", "}", ")", ".", "get", "(", "'bundle_packages'", ")", "or", "[", "]", ":", "bundle_name", "=", "bundle", ".", "get", "(", "'name'", ")", "bundle_patterns", "=", "bundle", ".", "get", "(", "'patterns'", ")", "bundle_apps", "=", "[", "]", "for", "app", "in", "self", ".", "_app_packages", ":", "for", "app_pattern", "in", "bundle_patterns", ":", "p", "=", "re", ".", "compile", "(", "app_pattern", ",", "re", ".", "IGNORECASE", ")", "if", "p", ".", "match", "(", "app", ")", ":", "bundle_apps", ".", "append", "(", "app", ")", "# bundle app in zip", "if", "bundle_apps", ":", "self", ".", "bundle_apps", "(", "bundle_name", ",", "bundle_apps", ")", "else", ":", "self", ".", "bundle_apps", "(", "bundle_name", ",", "self", ".", "_app_packages", ")" ]
46.083333
21.833333
def set_batch(self, data): """ Store multiple documents Args data <dict> data to store, use document ids as keys Returns revs <dict> dictionary of new revisions indexed by document ids """ # fetch existing documents to get current revisions rows = self.bucket.view("_all_docs", keys=data.keys(), include_docs=True) existing = {} for row in rows: key = row.id if key and not data[key].has_key("_rev"): data[key]["_rev"] = row.doc["_rev"] for id,item in data.items(): data[id]["_id"] = id revs = {} for success, docid, rev_or_exc in self.bucket.update(data.values()): if not success and self.logger: self.logger.error("Document update conflict (batch) '%s', %s" % (docid, rev_or_exc)) elif success: revs[docid] = rev_or_exc return revs
[ "def", "set_batch", "(", "self", ",", "data", ")", ":", "# fetch existing documents to get current revisions", "rows", "=", "self", ".", "bucket", ".", "view", "(", "\"_all_docs\"", ",", "keys", "=", "data", ".", "keys", "(", ")", ",", "include_docs", "=", "True", ")", "existing", "=", "{", "}", "for", "row", "in", "rows", ":", "key", "=", "row", ".", "id", "if", "key", "and", "not", "data", "[", "key", "]", ".", "has_key", "(", "\"_rev\"", ")", ":", "data", "[", "key", "]", "[", "\"_rev\"", "]", "=", "row", ".", "doc", "[", "\"_rev\"", "]", "for", "id", ",", "item", "in", "data", ".", "items", "(", ")", ":", "data", "[", "id", "]", "[", "\"_id\"", "]", "=", "id", "revs", "=", "{", "}", "for", "success", ",", "docid", ",", "rev_or_exc", "in", "self", ".", "bucket", ".", "update", "(", "data", ".", "values", "(", ")", ")", ":", "if", "not", "success", "and", "self", ".", "logger", ":", "self", ".", "logger", ".", "error", "(", "\"Document update conflict (batch) '%s', %s\"", "%", "(", "docid", ",", "rev_or_exc", ")", ")", "elif", "success", ":", "revs", "[", "docid", "]", "=", "rev_or_exc", "return", "revs" ]
25.212121
24.181818
def regulartype(prompt_template="default"): """Echo each character typed. Unlike magictype, this echos the characters the user is pressing. Returns: command_string | The command to be passed to the shell to run. This is | typed by the user. """ echo_prompt(prompt_template) command_string = "" cursor_position = 0 with raw_mode(): while True: in_char = getchar() if in_char in {ESC, CTRLC}: echo(carriage_return=True) raise click.Abort() elif in_char == TAB: echo("\r", nl=True) return in_char elif in_char == BACKSPACE: if cursor_position > 0: echo("\b \b", nl=False) command_string = command_string[:-1] cursor_position -= 1 elif in_char in RETURNS: echo("\r", nl=True) return command_string elif in_char == CTRLZ and hasattr(signal, "SIGTSTP"): # Background process os.kill(0, signal.SIGTSTP) # When doitlive is back in foreground, clear the terminal # and resume where we left off click.clear() echo_prompt(prompt_template) else: echo(in_char, nl=False) command_string += in_char cursor_position += 1
[ "def", "regulartype", "(", "prompt_template", "=", "\"default\"", ")", ":", "echo_prompt", "(", "prompt_template", ")", "command_string", "=", "\"\"", "cursor_position", "=", "0", "with", "raw_mode", "(", ")", ":", "while", "True", ":", "in_char", "=", "getchar", "(", ")", "if", "in_char", "in", "{", "ESC", ",", "CTRLC", "}", ":", "echo", "(", "carriage_return", "=", "True", ")", "raise", "click", ".", "Abort", "(", ")", "elif", "in_char", "==", "TAB", ":", "echo", "(", "\"\\r\"", ",", "nl", "=", "True", ")", "return", "in_char", "elif", "in_char", "==", "BACKSPACE", ":", "if", "cursor_position", ">", "0", ":", "echo", "(", "\"\\b \\b\"", ",", "nl", "=", "False", ")", "command_string", "=", "command_string", "[", ":", "-", "1", "]", "cursor_position", "-=", "1", "elif", "in_char", "in", "RETURNS", ":", "echo", "(", "\"\\r\"", ",", "nl", "=", "True", ")", "return", "command_string", "elif", "in_char", "==", "CTRLZ", "and", "hasattr", "(", "signal", ",", "\"SIGTSTP\"", ")", ":", "# Background process", "os", ".", "kill", "(", "0", ",", "signal", ".", "SIGTSTP", ")", "# When doitlive is back in foreground, clear the terminal", "# and resume where we left off", "click", ".", "clear", "(", ")", "echo_prompt", "(", "prompt_template", ")", "else", ":", "echo", "(", "in_char", ",", "nl", "=", "False", ")", "command_string", "+=", "in_char", "cursor_position", "+=", "1" ]
37.842105
10.157895
def clear_lowest_numeric_score(self): """Clears the lowest score. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.grading.GradeSystemForm.clear_lowest_numeric_score if (self.get_lowest_numeric_score_metadata().is_read_only() or self.get_lowest_numeric_score_metadata().is_required()): raise errors.NoAccess() self._my_map['lowestNumericScore'] = self._lowest_numeric_score_default
[ "def", "clear_lowest_numeric_score", "(", "self", ")", ":", "# Implemented from template for osid.grading.GradeSystemForm.clear_lowest_numeric_score", "if", "(", "self", ".", "get_lowest_numeric_score_metadata", "(", ")", ".", "is_read_only", "(", ")", "or", "self", ".", "get_lowest_numeric_score_metadata", "(", ")", ".", "is_required", "(", ")", ")", ":", "raise", "errors", ".", "NoAccess", "(", ")", "self", ".", "_my_map", "[", "'lowestNumericScore'", "]", "=", "self", ".", "_lowest_numeric_score_default" ]
46.846154
23
def get_original_field_value(self, name): """ Returns original field value or None """ name = self.get_real_name(name) try: value = self.__original_data__[name] except KeyError: return None try: return value.export_original_data() except AttributeError: return value
[ "def", "get_original_field_value", "(", "self", ",", "name", ")", ":", "name", "=", "self", ".", "get_real_name", "(", "name", ")", "try", ":", "value", "=", "self", ".", "__original_data__", "[", "name", "]", "except", "KeyError", ":", "return", "None", "try", ":", "return", "value", ".", "export_original_data", "(", ")", "except", "AttributeError", ":", "return", "value" ]
24.4
14.4
def plot_roc_curve(y_true, y_probas, title='ROC Curves', curves=('micro', 'macro', 'each_class'), ax=None, figsize=None, cmap='nipy_spectral', title_fontsize="large", text_fontsize="medium"): """Generates the ROC curves from labels and predicted scores/probabilities Args: y_true (array-like, shape (n_samples)): Ground truth (correct) target values. y_probas (array-like, shape (n_samples, n_classes)): Prediction probabilities for each class returned by a classifier. title (string, optional): Title of the generated plot. Defaults to "ROC Curves". curves (array-like): A listing of which curves should be plotted on the resulting plot. Defaults to `("micro", "macro", "each_class")` i.e. "micro" for micro-averaged curve, "macro" for macro-averaged curve ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. cmap (string or :class:`matplotlib.colors.Colormap` instance, optional): Colormap used for plotting the projection. View Matplotlib Colormap documentation for available options. https://matplotlib.org/users/colormaps.html title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn. Example: >>> import scikitplot.plotters as skplt >>> nb = GaussianNB() >>> nb = nb.fit(X_train, y_train) >>> y_probas = nb.predict_proba(X_test) >>> skplt.plot_roc_curve(y_test, y_probas) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_roc_curve.png :align: center :alt: ROC Curves """ y_true = np.array(y_true) y_probas = np.array(y_probas) if 'micro' not in curves and 'macro' not in curves and \ 'each_class' not in curves: raise ValueError('Invalid argument for curves as it ' 'only takes "micro", "macro", or "each_class"') classes = np.unique(y_true) probas = y_probas fpr = dict() tpr = dict() roc_auc = dict() for i in range(len(classes)): fpr[i], tpr[i], _ = roc_curve(y_true, probas[:, i], pos_label=classes[i]) roc_auc[i] = auc(fpr[i], tpr[i]) # Compute micro-average ROC curve and ROC area micro_key = 'micro' i = 0 while micro_key in fpr: i += 1 micro_key += str(i) y_true = label_binarize(y_true, classes=classes) if len(classes) == 2: y_true = np.hstack((1 - y_true, y_true)) fpr[micro_key], tpr[micro_key], _ = roc_curve(y_true.ravel(), probas.ravel()) roc_auc[micro_key] = auc(fpr[micro_key], tpr[micro_key]) # Compute macro-average ROC curve and ROC area # First aggregate all false positive rates all_fpr = np.unique(np.concatenate([fpr[x] for x in range(len(classes))])) # Then interpolate all ROC curves at this points mean_tpr = np.zeros_like(all_fpr) for i in range(len(classes)): mean_tpr += interp(all_fpr, fpr[i], tpr[i]) # Finally average it and compute AUC mean_tpr /= len(classes) macro_key = 'macro' i = 0 while macro_key in fpr: i += 1 macro_key += str(i) fpr[macro_key] = all_fpr tpr[macro_key] = mean_tpr roc_auc[macro_key] = auc(fpr[macro_key], tpr[macro_key]) if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize) ax.set_title(title, fontsize=title_fontsize) if 'each_class' in curves: for i in range(len(classes)): color = plt.cm.get_cmap(cmap)(float(i) / len(classes)) ax.plot(fpr[i], tpr[i], lw=2, color=color, label='ROC curve of class {0} (area = {1:0.2f})' ''.format(classes[i], roc_auc[i])) if 'micro' in curves: ax.plot(fpr[micro_key], tpr[micro_key], label='micro-average ROC curve ' '(area = {0:0.2f})'.format(roc_auc[micro_key]), color='deeppink', linestyle=':', linewidth=4) if 'macro' in curves: ax.plot(fpr[macro_key], tpr[macro_key], label='macro-average ROC curve ' '(area = {0:0.2f})'.format(roc_auc[macro_key]), color='navy', linestyle=':', linewidth=4) ax.plot([0, 1], [0, 1], 'k--', lw=2) ax.set_xlim([0.0, 1.0]) ax.set_ylim([0.0, 1.05]) ax.set_xlabel('False Positive Rate', fontsize=text_fontsize) ax.set_ylabel('True Positive Rate', fontsize=text_fontsize) ax.tick_params(labelsize=text_fontsize) ax.legend(loc='lower right', fontsize=text_fontsize) return ax
[ "def", "plot_roc_curve", "(", "y_true", ",", "y_probas", ",", "title", "=", "'ROC Curves'", ",", "curves", "=", "(", "'micro'", ",", "'macro'", ",", "'each_class'", ")", ",", "ax", "=", "None", ",", "figsize", "=", "None", ",", "cmap", "=", "'nipy_spectral'", ",", "title_fontsize", "=", "\"large\"", ",", "text_fontsize", "=", "\"medium\"", ")", ":", "y_true", "=", "np", ".", "array", "(", "y_true", ")", "y_probas", "=", "np", ".", "array", "(", "y_probas", ")", "if", "'micro'", "not", "in", "curves", "and", "'macro'", "not", "in", "curves", "and", "'each_class'", "not", "in", "curves", ":", "raise", "ValueError", "(", "'Invalid argument for curves as it '", "'only takes \"micro\", \"macro\", or \"each_class\"'", ")", "classes", "=", "np", ".", "unique", "(", "y_true", ")", "probas", "=", "y_probas", "fpr", "=", "dict", "(", ")", "tpr", "=", "dict", "(", ")", "roc_auc", "=", "dict", "(", ")", "for", "i", "in", "range", "(", "len", "(", "classes", ")", ")", ":", "fpr", "[", "i", "]", ",", "tpr", "[", "i", "]", ",", "_", "=", "roc_curve", "(", "y_true", ",", "probas", "[", ":", ",", "i", "]", ",", "pos_label", "=", "classes", "[", "i", "]", ")", "roc_auc", "[", "i", "]", "=", "auc", "(", "fpr", "[", "i", "]", ",", "tpr", "[", "i", "]", ")", "# Compute micro-average ROC curve and ROC area", "micro_key", "=", "'micro'", "i", "=", "0", "while", "micro_key", "in", "fpr", ":", "i", "+=", "1", "micro_key", "+=", "str", "(", "i", ")", "y_true", "=", "label_binarize", "(", "y_true", ",", "classes", "=", "classes", ")", "if", "len", "(", "classes", ")", "==", "2", ":", "y_true", "=", "np", ".", "hstack", "(", "(", "1", "-", "y_true", ",", "y_true", ")", ")", "fpr", "[", "micro_key", "]", ",", "tpr", "[", "micro_key", "]", ",", "_", "=", "roc_curve", "(", "y_true", ".", "ravel", "(", ")", ",", "probas", ".", "ravel", "(", ")", ")", "roc_auc", "[", "micro_key", "]", "=", "auc", "(", "fpr", "[", "micro_key", "]", ",", "tpr", "[", "micro_key", "]", ")", "# Compute macro-average ROC curve and ROC area", "# First aggregate all false positive rates", "all_fpr", "=", "np", ".", "unique", "(", "np", ".", "concatenate", "(", "[", "fpr", "[", "x", "]", "for", "x", "in", "range", "(", "len", "(", "classes", ")", ")", "]", ")", ")", "# Then interpolate all ROC curves at this points", "mean_tpr", "=", "np", ".", "zeros_like", "(", "all_fpr", ")", "for", "i", "in", "range", "(", "len", "(", "classes", ")", ")", ":", "mean_tpr", "+=", "interp", "(", "all_fpr", ",", "fpr", "[", "i", "]", ",", "tpr", "[", "i", "]", ")", "# Finally average it and compute AUC", "mean_tpr", "/=", "len", "(", "classes", ")", "macro_key", "=", "'macro'", "i", "=", "0", "while", "macro_key", "in", "fpr", ":", "i", "+=", "1", "macro_key", "+=", "str", "(", "i", ")", "fpr", "[", "macro_key", "]", "=", "all_fpr", "tpr", "[", "macro_key", "]", "=", "mean_tpr", "roc_auc", "[", "macro_key", "]", "=", "auc", "(", "fpr", "[", "macro_key", "]", ",", "tpr", "[", "macro_key", "]", ")", "if", "ax", "is", "None", ":", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "1", ",", "1", ",", "figsize", "=", "figsize", ")", "ax", ".", "set_title", "(", "title", ",", "fontsize", "=", "title_fontsize", ")", "if", "'each_class'", "in", "curves", ":", "for", "i", "in", "range", "(", "len", "(", "classes", ")", ")", ":", "color", "=", "plt", ".", "cm", ".", "get_cmap", "(", "cmap", ")", "(", "float", "(", "i", ")", "/", "len", "(", "classes", ")", ")", "ax", ".", "plot", "(", "fpr", "[", "i", "]", ",", "tpr", "[", "i", "]", ",", "lw", "=", "2", ",", "color", "=", "color", ",", "label", "=", "'ROC curve of class {0} (area = {1:0.2f})'", "''", ".", "format", "(", "classes", "[", "i", "]", ",", "roc_auc", "[", "i", "]", ")", ")", "if", "'micro'", "in", "curves", ":", "ax", ".", "plot", "(", "fpr", "[", "micro_key", "]", ",", "tpr", "[", "micro_key", "]", ",", "label", "=", "'micro-average ROC curve '", "'(area = {0:0.2f})'", ".", "format", "(", "roc_auc", "[", "micro_key", "]", ")", ",", "color", "=", "'deeppink'", ",", "linestyle", "=", "':'", ",", "linewidth", "=", "4", ")", "if", "'macro'", "in", "curves", ":", "ax", ".", "plot", "(", "fpr", "[", "macro_key", "]", ",", "tpr", "[", "macro_key", "]", ",", "label", "=", "'macro-average ROC curve '", "'(area = {0:0.2f})'", ".", "format", "(", "roc_auc", "[", "macro_key", "]", ")", ",", "color", "=", "'navy'", ",", "linestyle", "=", "':'", ",", "linewidth", "=", "4", ")", "ax", ".", "plot", "(", "[", "0", ",", "1", "]", ",", "[", "0", ",", "1", "]", ",", "'k--'", ",", "lw", "=", "2", ")", "ax", ".", "set_xlim", "(", "[", "0.0", ",", "1.0", "]", ")", "ax", ".", "set_ylim", "(", "[", "0.0", ",", "1.05", "]", ")", "ax", ".", "set_xlabel", "(", "'False Positive Rate'", ",", "fontsize", "=", "text_fontsize", ")", "ax", ".", "set_ylabel", "(", "'True Positive Rate'", ",", "fontsize", "=", "text_fontsize", ")", "ax", ".", "tick_params", "(", "labelsize", "=", "text_fontsize", ")", "ax", ".", "legend", "(", "loc", "=", "'lower right'", ",", "fontsize", "=", "text_fontsize", ")", "return", "ax" ]
36.393103
22.593103
def is_valid_resource_name(rname, exception_type=None): """Validates the given resource name to ARM guidelines, individual services may be more restrictive. :param rname: The resource name being validated. :type rname: str :param exception_type: Raises this Exception if invalid. :type exception_type: :class:`Exception` :returns: A boolean describing whether the name is valid. :rtype: bool """ match = _ARMNAME_RE.match(rname) if match: return True if exception_type: raise exception_type() return False
[ "def", "is_valid_resource_name", "(", "rname", ",", "exception_type", "=", "None", ")", ":", "match", "=", "_ARMNAME_RE", ".", "match", "(", "rname", ")", "if", "match", ":", "return", "True", "if", "exception_type", ":", "raise", "exception_type", "(", ")", "return", "False" ]
30.833333
18.888889
def _write_family(family, filename): """ Write a family to a csv file. :type family: :class:`eqcorrscan.core.match_filter.Family` :param family: Family to write to file :type filename: str :param filename: File to write to. """ with open(filename, 'w') as f: for detection in family.detections: det_str = '' for key in detection.__dict__.keys(): if key == 'event' and detection.__dict__[key] is not None: value = str(detection.event.resource_id) elif key in ['threshold', 'detect_val', 'threshold_input']: value = format(detection.__dict__[key], '.32f').rstrip('0') else: value = str(detection.__dict__[key]) det_str += key + ': ' + value + '; ' f.write(det_str + '\n') return
[ "def", "_write_family", "(", "family", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "for", "detection", "in", "family", ".", "detections", ":", "det_str", "=", "''", "for", "key", "in", "detection", ".", "__dict__", ".", "keys", "(", ")", ":", "if", "key", "==", "'event'", "and", "detection", ".", "__dict__", "[", "key", "]", "is", "not", "None", ":", "value", "=", "str", "(", "detection", ".", "event", ".", "resource_id", ")", "elif", "key", "in", "[", "'threshold'", ",", "'detect_val'", ",", "'threshold_input'", "]", ":", "value", "=", "format", "(", "detection", ".", "__dict__", "[", "key", "]", ",", "'.32f'", ")", ".", "rstrip", "(", "'0'", ")", "else", ":", "value", "=", "str", "(", "detection", ".", "__dict__", "[", "key", "]", ")", "det_str", "+=", "key", "+", "': '", "+", "value", "+", "'; '", "f", ".", "write", "(", "det_str", "+", "'\\n'", ")", "return" ]
39.090909
15.363636
def load_from_file(module_path): """ Load a python module from its absolute filesystem path Borrowed from django-cms """ from imp import load_module, PY_SOURCE imported = None if module_path: with open(module_path, 'r') as openfile: imported = load_module('mod', openfile, module_path, ('imported', 'r', PY_SOURCE)) return imported
[ "def", "load_from_file", "(", "module_path", ")", ":", "from", "imp", "import", "load_module", ",", "PY_SOURCE", "imported", "=", "None", "if", "module_path", ":", "with", "open", "(", "module_path", ",", "'r'", ")", "as", "openfile", ":", "imported", "=", "load_module", "(", "'mod'", ",", "openfile", ",", "module_path", ",", "(", "'imported'", ",", "'r'", ",", "PY_SOURCE", ")", ")", "return", "imported" ]
28.692308
18.846154
def quick_scan(zap_helper, url, **options): """ Run a quick scan of a site by opening a URL, optionally spidering the URL, running an Active Scan, and reporting any issues found. This command contains most scan options as parameters, so you can do everything in one go. If any alerts are found for the given alert level, this command will exit with a status code of 1. """ if options['self_contained']: console.info('Starting ZAP daemon') with helpers.zap_error_handler(): zap_helper.start(options['start_options']) console.info('Running a quick scan for {0}'.format(url)) with helpers.zap_error_handler(): if options['scanners']: zap_helper.set_enabled_scanners(options['scanners']) if options['exclude']: zap_helper.exclude_from_all(options['exclude']) zap_helper.open_url(url) if options['spider']: zap_helper.run_spider(url, options['context_name'], options['user_name']) if options['ajax_spider']: zap_helper.run_ajax_spider(url) zap_helper.run_active_scan(url, options['recursive'], options['context_name'], options['user_name']) alerts = zap_helper.alerts(options['alert_level']) helpers.report_alerts(alerts, options['output_format']) if options['self_contained']: console.info('Shutting down ZAP daemon') with helpers.zap_error_handler(): zap_helper.shutdown() exit_code = 1 if len(alerts) > 0 else 0 sys.exit(exit_code)
[ "def", "quick_scan", "(", "zap_helper", ",", "url", ",", "*", "*", "options", ")", ":", "if", "options", "[", "'self_contained'", "]", ":", "console", ".", "info", "(", "'Starting ZAP daemon'", ")", "with", "helpers", ".", "zap_error_handler", "(", ")", ":", "zap_helper", ".", "start", "(", "options", "[", "'start_options'", "]", ")", "console", ".", "info", "(", "'Running a quick scan for {0}'", ".", "format", "(", "url", ")", ")", "with", "helpers", ".", "zap_error_handler", "(", ")", ":", "if", "options", "[", "'scanners'", "]", ":", "zap_helper", ".", "set_enabled_scanners", "(", "options", "[", "'scanners'", "]", ")", "if", "options", "[", "'exclude'", "]", ":", "zap_helper", ".", "exclude_from_all", "(", "options", "[", "'exclude'", "]", ")", "zap_helper", ".", "open_url", "(", "url", ")", "if", "options", "[", "'spider'", "]", ":", "zap_helper", ".", "run_spider", "(", "url", ",", "options", "[", "'context_name'", "]", ",", "options", "[", "'user_name'", "]", ")", "if", "options", "[", "'ajax_spider'", "]", ":", "zap_helper", ".", "run_ajax_spider", "(", "url", ")", "zap_helper", ".", "run_active_scan", "(", "url", ",", "options", "[", "'recursive'", "]", ",", "options", "[", "'context_name'", "]", ",", "options", "[", "'user_name'", "]", ")", "alerts", "=", "zap_helper", ".", "alerts", "(", "options", "[", "'alert_level'", "]", ")", "helpers", ".", "report_alerts", "(", "alerts", ",", "options", "[", "'output_format'", "]", ")", "if", "options", "[", "'self_contained'", "]", ":", "console", ".", "info", "(", "'Shutting down ZAP daemon'", ")", "with", "helpers", ".", "zap_error_handler", "(", ")", ":", "zap_helper", ".", "shutdown", "(", ")", "exit_code", "=", "1", "if", "len", "(", "alerts", ")", ">", "0", "else", "0", "sys", ".", "exit", "(", "exit_code", ")" ]
32.891304
21.804348
def _get_auth_packet(self, username, password, client): """ Get the pyrad authentication packet for the username/password and the given pyrad client. """ pkt = client.CreateAuthPacket(code=AccessRequest, User_Name=username) pkt["User-Password"] = pkt.PwCrypt(password) pkt["NAS-Identifier"] = 'django-radius' for key, val in list(getattr(settings, 'RADIUS_ATTRIBUTES', {}).items()): pkt[key] = val return pkt
[ "def", "_get_auth_packet", "(", "self", ",", "username", ",", "password", ",", "client", ")", ":", "pkt", "=", "client", ".", "CreateAuthPacket", "(", "code", "=", "AccessRequest", ",", "User_Name", "=", "username", ")", "pkt", "[", "\"User-Password\"", "]", "=", "pkt", ".", "PwCrypt", "(", "password", ")", "pkt", "[", "\"NAS-Identifier\"", "]", "=", "'django-radius'", "for", "key", ",", "val", "in", "list", "(", "getattr", "(", "settings", ",", "'RADIUS_ATTRIBUTES'", ",", "{", "}", ")", ".", "items", "(", ")", ")", ":", "pkt", "[", "key", "]", "=", "val", "return", "pkt" ]
43.25
16.25
def string_in_list(tmp_str, strlist): # type: (AnyStr, List[AnyStr]) -> bool """Is tmp_str in strlist, case insensitive.""" new_str_list = strlist[:] for i, str_in_list in enumerate(new_str_list): new_str_list[i] = str_in_list.lower() return tmp_str.lower() in new_str_list
[ "def", "string_in_list", "(", "tmp_str", ",", "strlist", ")", ":", "# type: (AnyStr, List[AnyStr]) -> bool", "new_str_list", "=", "strlist", "[", ":", "]", "for", "i", ",", "str_in_list", "in", "enumerate", "(", "new_str_list", ")", ":", "new_str_list", "[", "i", "]", "=", "str_in_list", ".", "lower", "(", ")", "return", "tmp_str", ".", "lower", "(", ")", "in", "new_str_list" ]
45.571429
6.428571
def sendp(x, inter=0, loop=0, iface=None, iface_hint=None, count=None, verbose=None, realtime=None, *args, **kargs): """Send packets at layer 2 sendp(packets, [inter=0], [loop=0], [verbose=conf.verb]) -> None""" if iface is None and iface_hint is not None: iface = conf.route.route(iface_hint)[0] __gen_send(conf.L2socket(iface=iface, *args, **kargs), x, inter=inter, loop=loop, count=count, verbose=verbose, realtime=realtime)
[ "def", "sendp", "(", "x", ",", "inter", "=", "0", ",", "loop", "=", "0", ",", "iface", "=", "None", ",", "iface_hint", "=", "None", ",", "count", "=", "None", ",", "verbose", "=", "None", ",", "realtime", "=", "None", ",", "*", "args", ",", "*", "*", "kargs", ")", ":", "if", "iface", "is", "None", "and", "iface_hint", "is", "not", "None", ":", "iface", "=", "conf", ".", "route", ".", "route", "(", "iface_hint", ")", "[", "0", "]", "__gen_send", "(", "conf", ".", "L2socket", "(", "iface", "=", "iface", ",", "*", "args", ",", "*", "*", "kargs", ")", ",", "x", ",", "inter", "=", "inter", ",", "loop", "=", "loop", ",", "count", "=", "count", ",", "verbose", "=", "verbose", ",", "realtime", "=", "realtime", ")" ]
73.666667
30.833333
def randrange(seq): """ Yields random values from @seq until @seq is empty """ seq = seq.copy() choose = rng().choice remove = seq.remove for x in range(len(seq)): y = choose(seq) remove(y) yield y
[ "def", "randrange", "(", "seq", ")", ":", "seq", "=", "seq", ".", "copy", "(", ")", "choose", "=", "rng", "(", ")", ".", "choice", "remove", "=", "seq", ".", "remove", "for", "x", "in", "range", "(", "len", "(", "seq", ")", ")", ":", "y", "=", "choose", "(", "seq", ")", "remove", "(", "y", ")", "yield", "y" ]
25.888889
16.555556
def bivconvolve (sx_a, sy_a, cxy_a, sx_b, sy_b, cxy_b): """Given two independent bivariate distributions, compute a bivariate distribution corresponding to their convolution. I'm sure this is worked out in a ton of places, but I got the equations from Pineau+ (2011A&A...527A.126P). Returns: (sx_c, sy_c, cxy_c), the parameters of the convolved distribution. """ _bivcheck (sx_a, sy_a, cxy_a) _bivcheck (sx_b, sy_b, cxy_b) sx_c = np.sqrt (sx_a**2 + sx_b**2) sy_c = np.sqrt (sy_a**2 + sy_b**2) cxy_c = cxy_a + cxy_b return _bivcheck (sx_c, sy_c, cxy_c)
[ "def", "bivconvolve", "(", "sx_a", ",", "sy_a", ",", "cxy_a", ",", "sx_b", ",", "sy_b", ",", "cxy_b", ")", ":", "_bivcheck", "(", "sx_a", ",", "sy_a", ",", "cxy_a", ")", "_bivcheck", "(", "sx_b", ",", "sy_b", ",", "cxy_b", ")", "sx_c", "=", "np", ".", "sqrt", "(", "sx_a", "**", "2", "+", "sx_b", "**", "2", ")", "sy_c", "=", "np", ".", "sqrt", "(", "sy_a", "**", "2", "+", "sy_b", "**", "2", ")", "cxy_c", "=", "cxy_a", "+", "cxy_b", "return", "_bivcheck", "(", "sx_c", ",", "sy_c", ",", "cxy_c", ")" ]
31.052632
18.105263
def _kmedoids_run(X, n_clusters, distance, max_iter, tol, rng): """ Run a single trial of k-medoids clustering on dataset X, and given number of clusters """ membs = np.empty(shape=X.shape[0], dtype=int) centers = kmeans._kmeans_init(X, n_clusters, method='', rng=rng) sse_last = 9999.9 n_iter = 0 for it in range(1,max_iter): membs = kmeans._assign_clusters(X, centers) centers,sse_arr = _update_centers(X, membs, n_clusters, distance) sse_total = np.sum(sse_arr) if np.abs(sse_total - sse_last) < tol: n_iter = it break sse_last = sse_total return(centers, membs, sse_total, sse_arr, n_iter)
[ "def", "_kmedoids_run", "(", "X", ",", "n_clusters", ",", "distance", ",", "max_iter", ",", "tol", ",", "rng", ")", ":", "membs", "=", "np", ".", "empty", "(", "shape", "=", "X", ".", "shape", "[", "0", "]", ",", "dtype", "=", "int", ")", "centers", "=", "kmeans", ".", "_kmeans_init", "(", "X", ",", "n_clusters", ",", "method", "=", "''", ",", "rng", "=", "rng", ")", "sse_last", "=", "9999.9", "n_iter", "=", "0", "for", "it", "in", "range", "(", "1", ",", "max_iter", ")", ":", "membs", "=", "kmeans", ".", "_assign_clusters", "(", "X", ",", "centers", ")", "centers", ",", "sse_arr", "=", "_update_centers", "(", "X", ",", "membs", ",", "n_clusters", ",", "distance", ")", "sse_total", "=", "np", ".", "sum", "(", "sse_arr", ")", "if", "np", ".", "abs", "(", "sse_total", "-", "sse_last", ")", "<", "tol", ":", "n_iter", "=", "it", "break", "sse_last", "=", "sse_total", "return", "(", "centers", ",", "membs", ",", "sse_total", ",", "sse_arr", ",", "n_iter", ")" ]
35.842105
17.052632
def update_history(self, it, j=0, M=None, **kwargs): """Add the current state for all kwargs to the history """ # Create a new entry in the history for new variables (if they don't exist) if not np.any([k in self.history[j] for k in kwargs]): for k in kwargs: if M is None or M == 0: self.history[j][k] = [[]] else: self.history[j][k] = [[] for m in range(M)] """ # Check that the variables have been updated once per iteration elif np.any([[len(h)!=it+self.offset for h in self.history[j][k]] for k in kwargs.keys()]): for k in kwargs.keys(): for n,h in enumerate(self.history[j][k]): if len(h) != it+self.offset: err_str = "At iteration {0}, {1}[{2}] already has {3} entries" raise Exception(err_str.format(it, k, n, len(h)-self.offset)) """ # Add the variables to the history for k,v in kwargs.items(): if M is None or M == 0: self._store_variable(j, k, 0, v) else: for m in range(M): self._store_variable(j, k, m, v[m])
[ "def", "update_history", "(", "self", ",", "it", ",", "j", "=", "0", ",", "M", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Create a new entry in the history for new variables (if they don't exist)", "if", "not", "np", ".", "any", "(", "[", "k", "in", "self", ".", "history", "[", "j", "]", "for", "k", "in", "kwargs", "]", ")", ":", "for", "k", "in", "kwargs", ":", "if", "M", "is", "None", "or", "M", "==", "0", ":", "self", ".", "history", "[", "j", "]", "[", "k", "]", "=", "[", "[", "]", "]", "else", ":", "self", ".", "history", "[", "j", "]", "[", "k", "]", "=", "[", "[", "]", "for", "m", "in", "range", "(", "M", ")", "]", "\"\"\"\n # Check that the variables have been updated once per iteration\n elif np.any([[len(h)!=it+self.offset for h in self.history[j][k]] for k in kwargs.keys()]):\n for k in kwargs.keys():\n for n,h in enumerate(self.history[j][k]):\n if len(h) != it+self.offset:\n err_str = \"At iteration {0}, {1}[{2}] already has {3} entries\"\n raise Exception(err_str.format(it, k, n, len(h)-self.offset))\n \"\"\"", "# Add the variables to the history", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "if", "M", "is", "None", "or", "M", "==", "0", ":", "self", ".", "_store_variable", "(", "j", ",", "k", ",", "0", ",", "v", ")", "else", ":", "for", "m", "in", "range", "(", "M", ")", ":", "self", ".", "_store_variable", "(", "j", ",", "k", ",", "m", ",", "v", "[", "m", "]", ")" ]
47.461538
15.884615
def _process_for(self, node, **kwargs): """ Processes a for loop. e.g. {% for number in numbers %} {{ number }} {% endfor %} {% for key, value in somemap.items() %} {{ key }} -> {{ value }} {% %} """ # since a for loop can introduce new names into the context # we need to remember the ones that existed outside the loop previous_stored_names = self.stored_names.copy() with self._execution(): self.output.write('__runtime.each(') if is_method_call(node.iter, dict.keys.__name__): self.output.write('Object.keys(') self._process_node(node.iter, **kwargs) if is_method_call(node.iter, dict.keys.__name__): self.output.write(')') self.output.write(',') self.output.write('function') self.output.write('(') # javascript iterations put the value first, then the key if isinstance(node.target, nodes.Tuple): if len(node.target.items) > 2: raise Exception('De-structuring more than 2 items is not supported.') for i, item in enumerate(reversed(node.target.items)): self._process_node(item, **kwargs) if i < len(node.target.items) - 1: self.output.write(',') else: self._process_node(node.target, **kwargs) self.output.write(')') self.output.write('{') if node.test: self.output.write('if (!(') self._process_node(node.test, **kwargs) self.output.write(')) { return; }') assigns = node.target.items if isinstance(node.target, nodes.Tuple) else [node.target] with self._scoped_variables(assigns, **kwargs): for n in node.body: self._process_node(n, **kwargs) with self._execution(): self.output.write('}') self.output.write(')') self.output.write(';') # restore the stored names self.stored_names = previous_stored_names
[ "def", "_process_for", "(", "self", ",", "node", ",", "*", "*", "kwargs", ")", ":", "# since a for loop can introduce new names into the context", "# we need to remember the ones that existed outside the loop", "previous_stored_names", "=", "self", ".", "stored_names", ".", "copy", "(", ")", "with", "self", ".", "_execution", "(", ")", ":", "self", ".", "output", ".", "write", "(", "'__runtime.each('", ")", "if", "is_method_call", "(", "node", ".", "iter", ",", "dict", ".", "keys", ".", "__name__", ")", ":", "self", ".", "output", ".", "write", "(", "'Object.keys('", ")", "self", ".", "_process_node", "(", "node", ".", "iter", ",", "*", "*", "kwargs", ")", "if", "is_method_call", "(", "node", ".", "iter", ",", "dict", ".", "keys", ".", "__name__", ")", ":", "self", ".", "output", ".", "write", "(", "')'", ")", "self", ".", "output", ".", "write", "(", "','", ")", "self", ".", "output", ".", "write", "(", "'function'", ")", "self", ".", "output", ".", "write", "(", "'('", ")", "# javascript iterations put the value first, then the key", "if", "isinstance", "(", "node", ".", "target", ",", "nodes", ".", "Tuple", ")", ":", "if", "len", "(", "node", ".", "target", ".", "items", ")", ">", "2", ":", "raise", "Exception", "(", "'De-structuring more than 2 items is not supported.'", ")", "for", "i", ",", "item", "in", "enumerate", "(", "reversed", "(", "node", ".", "target", ".", "items", ")", ")", ":", "self", ".", "_process_node", "(", "item", ",", "*", "*", "kwargs", ")", "if", "i", "<", "len", "(", "node", ".", "target", ".", "items", ")", "-", "1", ":", "self", ".", "output", ".", "write", "(", "','", ")", "else", ":", "self", ".", "_process_node", "(", "node", ".", "target", ",", "*", "*", "kwargs", ")", "self", ".", "output", ".", "write", "(", "')'", ")", "self", ".", "output", ".", "write", "(", "'{'", ")", "if", "node", ".", "test", ":", "self", ".", "output", ".", "write", "(", "'if (!('", ")", "self", ".", "_process_node", "(", "node", ".", "test", ",", "*", "*", "kwargs", ")", "self", ".", "output", ".", "write", "(", "')) { return; }'", ")", "assigns", "=", "node", ".", "target", ".", "items", "if", "isinstance", "(", "node", ".", "target", ",", "nodes", ".", "Tuple", ")", "else", "[", "node", ".", "target", "]", "with", "self", ".", "_scoped_variables", "(", "assigns", ",", "*", "*", "kwargs", ")", ":", "for", "n", "in", "node", ".", "body", ":", "self", ".", "_process_node", "(", "n", ",", "*", "*", "kwargs", ")", "with", "self", ".", "_execution", "(", ")", ":", "self", ".", "output", ".", "write", "(", "'}'", ")", "self", ".", "output", ".", "write", "(", "')'", ")", "self", ".", "output", ".", "write", "(", "';'", ")", "# restore the stored names", "self", ".", "stored_names", "=", "previous_stored_names" ]
34.47619
18.698413
def make_meta_dict_consistent(self): """ Remove the possibility of the main keys being undefined. """ if self.meta_dict is None: self.meta_dict = {} if "galaxy_info" not in self.meta_dict: self.meta_dict["galaxy_info"] = {} if "dependencies" not in self.meta_dict: self.meta_dict["dependencies"] = [] if "ansigenome_info" not in self.meta_dict: self.meta_dict["ansigenome_info"] = {}
[ "def", "make_meta_dict_consistent", "(", "self", ")", ":", "if", "self", ".", "meta_dict", "is", "None", ":", "self", ".", "meta_dict", "=", "{", "}", "if", "\"galaxy_info\"", "not", "in", "self", ".", "meta_dict", ":", "self", ".", "meta_dict", "[", "\"galaxy_info\"", "]", "=", "{", "}", "if", "\"dependencies\"", "not", "in", "self", ".", "meta_dict", ":", "self", ".", "meta_dict", "[", "\"dependencies\"", "]", "=", "[", "]", "if", "\"ansigenome_info\"", "not", "in", "self", ".", "meta_dict", ":", "self", ".", "meta_dict", "[", "\"ansigenome_info\"", "]", "=", "{", "}" ]
31.733333
14.133333
def two_param_shortcut(parser, token): """ Shortcut to transmogrify thumbnail """ bits = smart_split(token.contents) tagname = bits.next() try: imageurl = bits.next() param1 = bits.next() param2 = bits.next() param2 = param2.lstrip("#") except StopIteration: raise template.TemplateSyntaxError("%r tag requires at least the image url" % tagname) return MogrifyNode(imageurl, [(tagname, param1, param2), ])
[ "def", "two_param_shortcut", "(", "parser", ",", "token", ")", ":", "bits", "=", "smart_split", "(", "token", ".", "contents", ")", "tagname", "=", "bits", ".", "next", "(", ")", "try", ":", "imageurl", "=", "bits", ".", "next", "(", ")", "param1", "=", "bits", ".", "next", "(", ")", "param2", "=", "bits", ".", "next", "(", ")", "param2", "=", "param2", ".", "lstrip", "(", "\"#\"", ")", "except", "StopIteration", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"%r tag requires at least the image url\"", "%", "tagname", ")", "return", "MogrifyNode", "(", "imageurl", ",", "[", "(", "tagname", ",", "param1", ",", "param2", ")", ",", "]", ")" ]
30.933333
14.933333
def predict_percentile(self, X, p=0.5): """ Returns the median lifetimes for the individuals, by default. If the survival curve of an individual does not cross 0.5, then the result is infinity. http://stats.stackexchange.com/questions/102986/percentile-loss-functions Parameters ---------- X: numpy array or DataFrame a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns can be in any order. If a numpy array, columns must be in the same order as the training data. p: float, optional (default=0.5) the percentile, must be between 0 and 1. Returns ------- percentiles: DataFrame See Also -------- predict_median """ subjects = _get_index(X) return qth_survival_times(p, self.predict_survival_function(X)[subjects]).T
[ "def", "predict_percentile", "(", "self", ",", "X", ",", "p", "=", "0.5", ")", ":", "subjects", "=", "_get_index", "(", "X", ")", "return", "qth_survival_times", "(", "p", ",", "self", ".", "predict_survival_function", "(", "X", ")", "[", "subjects", "]", ")", ".", "T" ]
34.423077
23.038462
def randomize(self, device=None, percent=100, silent=False): """ Writes random data to the beginning of each 4MB block on a block device this is useful when performance testing the backup process (Without any optional arguments will randomize the first 32k of each 4MB block on 100 percent of the device) """ volume = self.get_volume(device) # The number of blocks in the volume blocks = int(volume['size'] / BLOCK_SIZE) # How many writes should be to the device # (based on the percentage requested) num_writes = int(blocks * percent * 0.01) # Build a list of offsets we write to offsets = sorted(random.sample(range(blocks), num_writes)) total = 0 if not silent: print('Writing urandom to %s bytes in %s' % (volume['size'], volume['path'])) with open(volume['path'], 'w') as file: for offset in offsets: if not silent: self.dot() file.seek(offset * BLOCK_SIZE) # Create a random string 32k long then duplicate # the randomized string 128 times (32768 * 128 = 4MB) data = os.urandom(32768) * 128 total += len(data) # write out the 4MB block of randomized data file.write(data) print("\nWrote: %s" % total)
[ "def", "randomize", "(", "self", ",", "device", "=", "None", ",", "percent", "=", "100", ",", "silent", "=", "False", ")", ":", "volume", "=", "self", ".", "get_volume", "(", "device", ")", "# The number of blocks in the volume", "blocks", "=", "int", "(", "volume", "[", "'size'", "]", "/", "BLOCK_SIZE", ")", "# How many writes should be to the device", "# (based on the percentage requested)", "num_writes", "=", "int", "(", "blocks", "*", "percent", "*", "0.01", ")", "# Build a list of offsets we write to", "offsets", "=", "sorted", "(", "random", ".", "sample", "(", "range", "(", "blocks", ")", ",", "num_writes", ")", ")", "total", "=", "0", "if", "not", "silent", ":", "print", "(", "'Writing urandom to %s bytes in %s'", "%", "(", "volume", "[", "'size'", "]", ",", "volume", "[", "'path'", "]", ")", ")", "with", "open", "(", "volume", "[", "'path'", "]", ",", "'w'", ")", "as", "file", ":", "for", "offset", "in", "offsets", ":", "if", "not", "silent", ":", "self", ".", "dot", "(", ")", "file", ".", "seek", "(", "offset", "*", "BLOCK_SIZE", ")", "# Create a random string 32k long then duplicate", "# the randomized string 128 times (32768 * 128 = 4MB)", "data", "=", "os", ".", "urandom", "(", "32768", ")", "*", "128", "total", "+=", "len", "(", "data", ")", "# write out the 4MB block of randomized data", "file", ".", "write", "(", "data", ")", "print", "(", "\"\\nWrote: %s\"", "%", "total", ")" ]
42.617647
16.382353
def one_phase_dP_gravitational(angle, rho, L=1.0, g=g): r'''This function handles calculation of one-phase liquid-gas pressure drop due to gravitation for flow inside channels. This is either a differential calculation for a segment with an infinitesimal difference in elevation (if `L`=1 or a discrete calculation. .. math:: -\left(\frac{dP}{dz} \right)_{grav} = \rho g \sin \theta .. math:: -\left(\Delta P \right)_{grav} = L \rho g \sin \theta Parameters ---------- angle : float The angle of the pipe with respect to the horizontal, [degrees] rho : float Fluid density, [kg/m^3] L : float, optional Length of pipe, [m] g : float, optional Acceleration due to gravity, [m/s^2] Returns ------- dP : float Gravitational component of pressure drop for one-phase flow, [Pa/m] or [Pa] Notes ----- Examples -------- >>> one_phase_dP_gravitational(angle=90, rho=2.6) 25.49729 >>> one_phase_dP_gravitational(angle=90, rho=2.6, L=4) 101.98916 ''' angle = radians(angle) return L*g*sin(angle)*rho
[ "def", "one_phase_dP_gravitational", "(", "angle", ",", "rho", ",", "L", "=", "1.0", ",", "g", "=", "g", ")", ":", "angle", "=", "radians", "(", "angle", ")", "return", "L", "*", "g", "*", "sin", "(", "angle", ")", "*", "rho" ]
28.243902
25.560976
def print_messages(domain, msg): """Debugging function to print all message language variants""" domain = Domain(domain) for lang in all_languages(): print(lang, ':', domain.get(lang, msg))
[ "def", "print_messages", "(", "domain", ",", "msg", ")", ":", "domain", "=", "Domain", "(", "domain", ")", "for", "lang", "in", "all_languages", "(", ")", ":", "print", "(", "lang", ",", "':'", ",", "domain", ".", "get", "(", "lang", ",", "msg", ")", ")" ]
34.166667
12.666667
def _dump_queue_stats(self): ''' Dumps basic info about the queue lengths for the spider types ''' extras = {} keys = self.redis_conn.keys('*:*:queue') total_backlog = 0 for key in keys: elements = key.split(":") spider = elements[0] domain = elements[1] spider = 'queue_' + spider if spider not in extras: extras[spider] = {} extras[spider]['spider_backlog'] = 0 extras[spider]['num_domains'] = 0 count = self.redis_conn.zcard(key) total_backlog += count extras[spider]['spider_backlog'] += count extras[spider]['num_domains'] += 1 extras['total_backlog'] = total_backlog if not self.logger.json: self.logger.info('Queue Stats Dump:\n{0}'.format( json.dumps(extras, indent=4, sort_keys=True))) else: self.logger.info('Queue Stats Dump', extra=extras)
[ "def", "_dump_queue_stats", "(", "self", ")", ":", "extras", "=", "{", "}", "keys", "=", "self", ".", "redis_conn", ".", "keys", "(", "'*:*:queue'", ")", "total_backlog", "=", "0", "for", "key", "in", "keys", ":", "elements", "=", "key", ".", "split", "(", "\":\"", ")", "spider", "=", "elements", "[", "0", "]", "domain", "=", "elements", "[", "1", "]", "spider", "=", "'queue_'", "+", "spider", "if", "spider", "not", "in", "extras", ":", "extras", "[", "spider", "]", "=", "{", "}", "extras", "[", "spider", "]", "[", "'spider_backlog'", "]", "=", "0", "extras", "[", "spider", "]", "[", "'num_domains'", "]", "=", "0", "count", "=", "self", ".", "redis_conn", ".", "zcard", "(", "key", ")", "total_backlog", "+=", "count", "extras", "[", "spider", "]", "[", "'spider_backlog'", "]", "+=", "count", "extras", "[", "spider", "]", "[", "'num_domains'", "]", "+=", "1", "extras", "[", "'total_backlog'", "]", "=", "total_backlog", "if", "not", "self", ".", "logger", ".", "json", ":", "self", ".", "logger", ".", "info", "(", "'Queue Stats Dump:\\n{0}'", ".", "format", "(", "json", ".", "dumps", "(", "extras", ",", "indent", "=", "4", ",", "sort_keys", "=", "True", ")", ")", ")", "else", ":", "self", ".", "logger", ".", "info", "(", "'Queue Stats Dump'", ",", "extra", "=", "extras", ")" ]
33.533333
17.066667
def _create_storage_profile(self): """ Create the storage profile for the instance. Image reference can be a custom image name or a published urn. """ if self.image_publisher: storage_profile = { 'image_reference': { 'publisher': self.image_publisher, 'offer': self.image_offer, 'sku': self.image_sku, 'version': self.image_version }, } else: for image in self.compute.images.list(): if image.name == self.image_id: image_id = image.id break else: raise AzureCloudException( 'Image with name {0} not found.'.format(self.image_id) ) storage_profile = { 'image_reference': { 'id': image_id } } return storage_profile
[ "def", "_create_storage_profile", "(", "self", ")", ":", "if", "self", ".", "image_publisher", ":", "storage_profile", "=", "{", "'image_reference'", ":", "{", "'publisher'", ":", "self", ".", "image_publisher", ",", "'offer'", ":", "self", ".", "image_offer", ",", "'sku'", ":", "self", ".", "image_sku", ",", "'version'", ":", "self", ".", "image_version", "}", ",", "}", "else", ":", "for", "image", "in", "self", ".", "compute", ".", "images", ".", "list", "(", ")", ":", "if", "image", ".", "name", "==", "self", ".", "image_id", ":", "image_id", "=", "image", ".", "id", "break", "else", ":", "raise", "AzureCloudException", "(", "'Image with name {0} not found.'", ".", "format", "(", "self", ".", "image_id", ")", ")", "storage_profile", "=", "{", "'image_reference'", ":", "{", "'id'", ":", "image_id", "}", "}", "return", "storage_profile" ]
30.8125
15.375
def _create_models_for_relation_step(self, rel_model_name, rel_key, rel_value, model): """ Create a new model linked to the given model. Syntax: And `model` with `field` "`value`" has `new model` in the database: Example: .. code-block:: gherkin And project with name "Ball Project" has goals in the database: | description | | To have fun playing with balls of twine | """ model = get_model(model) lookup = {rel_key: rel_value} rel_model = get_model(rel_model_name).objects.get(**lookup) data = guess_types(self.hashes) for hash_ in data: hash_['%s' % rel_model_name] = rel_model try: func = _WRITE_MODEL[model] except KeyError: func = partial(write_models, model) func(data, None)
[ "def", "_create_models_for_relation_step", "(", "self", ",", "rel_model_name", ",", "rel_key", ",", "rel_value", ",", "model", ")", ":", "model", "=", "get_model", "(", "model", ")", "lookup", "=", "{", "rel_key", ":", "rel_value", "}", "rel_model", "=", "get_model", "(", "rel_model_name", ")", ".", "objects", ".", "get", "(", "*", "*", "lookup", ")", "data", "=", "guess_types", "(", "self", ".", "hashes", ")", "for", "hash_", "in", "data", ":", "hash_", "[", "'%s'", "%", "rel_model_name", "]", "=", "rel_model", "try", ":", "func", "=", "_WRITE_MODEL", "[", "model", "]", "except", "KeyError", ":", "func", "=", "partial", "(", "write_models", ",", "model", ")", "func", "(", "data", ",", "None", ")" ]
25.606061
23.363636
def track_job(job_id): """ Tracking is done by requesting each job and then searching for whether the job has one of the following states: - "RUN", - "PEND", - "SSUSP", - "EXIT" based on the LSF documentation """ cmd = "bjobs -noheader -o stat {}".format(job_id) track_job_proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, shell=True) status = track_job_proc.communicate()[0].strip('\n') return status
[ "def", "track_job", "(", "job_id", ")", ":", "cmd", "=", "\"bjobs -noheader -o stat {}\"", ".", "format", "(", "job_id", ")", "track_job_proc", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "shell", "=", "True", ")", "status", "=", "track_job_proc", ".", "communicate", "(", ")", "[", "0", "]", ".", "strip", "(", "'\\n'", ")", "return", "status" ]
30.066667
16.066667
def _configure(self, **kwargs): """Configure authentication endpoint. Optional kwargs may include: - cloud_environment (msrestazure.azure_cloud.Cloud): A targeted cloud environment - china (bool): Configure auth for China-based service, default is 'False'. - tenant (str): Alternative tenant, default is 'common'. - resource (str): Alternative authentication resource, default is 'https://management.core.windows.net/'. - verify (bool): Verify secure connection, default is 'True'. - timeout (int): Timeout of the request in seconds. - proxies (dict): Dictionary mapping protocol or protocol and hostname to the URL of the proxy. - cache (adal.TokenCache): A adal.TokenCache, see ADAL configuration for details. This parameter is not used here and directly passed to ADAL. """ if kwargs.get('china'): err_msg = ("china parameter is deprecated, " "please use " "cloud_environment=msrestazure.azure_cloud.AZURE_CHINA_CLOUD") warnings.warn(err_msg, DeprecationWarning) self._cloud_environment = AZURE_CHINA_CLOUD else: self._cloud_environment = AZURE_PUBLIC_CLOUD self._cloud_environment = kwargs.get('cloud_environment', self._cloud_environment) auth_endpoint = self._cloud_environment.endpoints.active_directory resource = self._cloud_environment.endpoints.active_directory_resource_id self._tenant = kwargs.get('tenant', "common") self._verify = kwargs.get('verify') # 'None' will honor ADAL_PYTHON_SSL_NO_VERIFY self.resource = kwargs.get('resource', resource) self._proxies = kwargs.get('proxies') self._timeout = kwargs.get('timeout') self._cache = kwargs.get('cache') self.store_key = "{}_{}".format( auth_endpoint.strip('/'), self.store_key) self.secret = None self._context = None
[ "def", "_configure", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ".", "get", "(", "'china'", ")", ":", "err_msg", "=", "(", "\"china parameter is deprecated, \"", "\"please use \"", "\"cloud_environment=msrestazure.azure_cloud.AZURE_CHINA_CLOUD\"", ")", "warnings", ".", "warn", "(", "err_msg", ",", "DeprecationWarning", ")", "self", ".", "_cloud_environment", "=", "AZURE_CHINA_CLOUD", "else", ":", "self", ".", "_cloud_environment", "=", "AZURE_PUBLIC_CLOUD", "self", ".", "_cloud_environment", "=", "kwargs", ".", "get", "(", "'cloud_environment'", ",", "self", ".", "_cloud_environment", ")", "auth_endpoint", "=", "self", ".", "_cloud_environment", ".", "endpoints", ".", "active_directory", "resource", "=", "self", ".", "_cloud_environment", ".", "endpoints", ".", "active_directory_resource_id", "self", ".", "_tenant", "=", "kwargs", ".", "get", "(", "'tenant'", ",", "\"common\"", ")", "self", ".", "_verify", "=", "kwargs", ".", "get", "(", "'verify'", ")", "# 'None' will honor ADAL_PYTHON_SSL_NO_VERIFY", "self", ".", "resource", "=", "kwargs", ".", "get", "(", "'resource'", ",", "resource", ")", "self", ".", "_proxies", "=", "kwargs", ".", "get", "(", "'proxies'", ")", "self", ".", "_timeout", "=", "kwargs", ".", "get", "(", "'timeout'", ")", "self", ".", "_cache", "=", "kwargs", ".", "get", "(", "'cache'", ")", "self", ".", "store_key", "=", "\"{}_{}\"", ".", "format", "(", "auth_endpoint", ".", "strip", "(", "'/'", ")", ",", "self", ".", "store_key", ")", "self", ".", "secret", "=", "None", "self", ".", "_context", "=", "None" ]
49.878049
22.463415
def get_corresponding_chains(self, from_pdb_id, from_chain_id, to_pdb_id): '''Should be called after get_mutations.''' chains = self.chain_map.get(from_pdb_id, {}).get(from_chain_id, {}).get(to_pdb_id, []) return sorted(chains)
[ "def", "get_corresponding_chains", "(", "self", ",", "from_pdb_id", ",", "from_chain_id", ",", "to_pdb_id", ")", ":", "chains", "=", "self", ".", "chain_map", ".", "get", "(", "from_pdb_id", ",", "{", "}", ")", ".", "get", "(", "from_chain_id", ",", "{", "}", ")", ".", "get", "(", "to_pdb_id", ",", "[", "]", ")", "return", "sorted", "(", "chains", ")" ]
62
27.5
def publish(self, tag, message): """ Publish a message down the socket """ payload = self.build_payload(tag, message) self.socket.send(payload)
[ "def", "publish", "(", "self", ",", "tag", ",", "message", ")", ":", "payload", "=", "self", ".", "build_payload", "(", "tag", ",", "message", ")", "self", ".", "socket", ".", "send", "(", "payload", ")" ]
41
6.25
def plot_mag(fignum, datablock, s, num, units, norm): """ plots magnetization against (de)magnetizing temperature or field Parameters _________________ fignum : matplotlib figure number for plotting datablock : nested list of [step, 0, 0, magnetization, 1,quality] s : string for title num : matplotlib figure number, can set to 1 units : [T,K,U] for tesla, kelvin or arbitrary norm : [True,False] if True, normalize Effects ______ plots figure """ global globals, graphmenu Ints = [] for plotrec in datablock: Ints.append(plotrec[3]) Ints.sort() plt.figure(num=fignum) T, M, Tv, recnum = [], [], [], 0 Mex, Tex, Vdif = [], [], [] recbak = [] for rec in datablock: if rec[5] == 'g': if units == "T": T.append(rec[0] * 1e3) Tv.append(rec[0] * 1e3) if recnum > 0: Tv.append(rec[0] * 1e3) elif units == "U": T.append(rec[0]) Tv.append(rec[0]) if recnum > 0: Tv.append(rec[0]) elif units == "K": T.append(rec[0] - 273) Tv.append(rec[0] - 273) if recnum > 0: Tv.append(rec[0] - 273) elif "T" in units and "K" in units: if rec[0] < 1.: T.append(rec[0] * 1e3) Tv.append(rec[0] * 1e3) else: T.append(rec[0] - 273) Tv.append(rec[0] - 273) if recnum > 0: Tv.append(rec[0] - 273) else: T.append(rec[0]) Tv.append(rec[0]) if recnum > 0: Tv.append(rec[0]) if norm: M.append(old_div(rec[3], Ints[-1])) else: M.append(rec[3]) if recnum > 0 and len(rec) > 0 and len(recbak) > 0: v = [] if recbak[0] != rec[0]: V0 = pmag.dir2cart([recbak[1], recbak[2], recbak[3]]) V1 = pmag.dir2cart([rec[1], rec[2], rec[3]]) for el in range(3): v.append(abs(V1[el] - V0[el])) vdir = pmag.cart2dir(v) # append vector difference Vdif.append(old_div(vdir[2], Ints[-1])) Vdif.append(old_div(vdir[2], Ints[-1])) recbak = [] for el in rec: recbak.append(el) delta = .005 * M[0] if num == 1: if recnum % 2 == 0: plt.text(T[-1] + delta, M[-1], (' ' + str(recnum)), fontsize=9) recnum += 1 else: if rec[0] < 200: Tex.append(rec[0] * 1e3) if rec[0] >= 200: Tex.append(rec[0] - 273) Mex.append(old_div(rec[3], Ints[-1])) recnum += 1 if globals != 0: globals.MTlist = T globals.MTlisty = M if len(Mex) > 0 and len(Tex) > 0: plt.scatter(Tex, Mex, marker='d', color='k') if len(Vdif) > 0: Vdif.append(old_div(vdir[2], Ints[-1])) Vdif.append(0) if Tv: Tv.append(Tv[-1]) plt.plot(T, M) plt.plot(T, M, 'ro') if len(Tv) == len(Vdif) and norm: plt.plot(Tv, Vdif, 'g-') if units == "T": plt.xlabel("Step (mT)") elif units == "K": plt.xlabel("Step (C)") elif units == "J": plt.xlabel("Step (J)") else: plt.xlabel("Step [mT,C]") if norm == 1: plt.ylabel("Fractional Magnetization") if norm == 0: plt.ylabel("Magnetization") plt.axvline(0, color='k') plt.axhline(0, color='k') tstring = s plt.title(tstring) plt.draw()
[ "def", "plot_mag", "(", "fignum", ",", "datablock", ",", "s", ",", "num", ",", "units", ",", "norm", ")", ":", "global", "globals", ",", "graphmenu", "Ints", "=", "[", "]", "for", "plotrec", "in", "datablock", ":", "Ints", ".", "append", "(", "plotrec", "[", "3", "]", ")", "Ints", ".", "sort", "(", ")", "plt", ".", "figure", "(", "num", "=", "fignum", ")", "T", ",", "M", ",", "Tv", ",", "recnum", "=", "[", "]", ",", "[", "]", ",", "[", "]", ",", "0", "Mex", ",", "Tex", ",", "Vdif", "=", "[", "]", ",", "[", "]", ",", "[", "]", "recbak", "=", "[", "]", "for", "rec", "in", "datablock", ":", "if", "rec", "[", "5", "]", "==", "'g'", ":", "if", "units", "==", "\"T\"", ":", "T", ".", "append", "(", "rec", "[", "0", "]", "*", "1e3", ")", "Tv", ".", "append", "(", "rec", "[", "0", "]", "*", "1e3", ")", "if", "recnum", ">", "0", ":", "Tv", ".", "append", "(", "rec", "[", "0", "]", "*", "1e3", ")", "elif", "units", "==", "\"U\"", ":", "T", ".", "append", "(", "rec", "[", "0", "]", ")", "Tv", ".", "append", "(", "rec", "[", "0", "]", ")", "if", "recnum", ">", "0", ":", "Tv", ".", "append", "(", "rec", "[", "0", "]", ")", "elif", "units", "==", "\"K\"", ":", "T", ".", "append", "(", "rec", "[", "0", "]", "-", "273", ")", "Tv", ".", "append", "(", "rec", "[", "0", "]", "-", "273", ")", "if", "recnum", ">", "0", ":", "Tv", ".", "append", "(", "rec", "[", "0", "]", "-", "273", ")", "elif", "\"T\"", "in", "units", "and", "\"K\"", "in", "units", ":", "if", "rec", "[", "0", "]", "<", "1.", ":", "T", ".", "append", "(", "rec", "[", "0", "]", "*", "1e3", ")", "Tv", ".", "append", "(", "rec", "[", "0", "]", "*", "1e3", ")", "else", ":", "T", ".", "append", "(", "rec", "[", "0", "]", "-", "273", ")", "Tv", ".", "append", "(", "rec", "[", "0", "]", "-", "273", ")", "if", "recnum", ">", "0", ":", "Tv", ".", "append", "(", "rec", "[", "0", "]", "-", "273", ")", "else", ":", "T", ".", "append", "(", "rec", "[", "0", "]", ")", "Tv", ".", "append", "(", "rec", "[", "0", "]", ")", "if", "recnum", ">", "0", ":", "Tv", ".", "append", "(", "rec", "[", "0", "]", ")", "if", "norm", ":", "M", ".", "append", "(", "old_div", "(", "rec", "[", "3", "]", ",", "Ints", "[", "-", "1", "]", ")", ")", "else", ":", "M", ".", "append", "(", "rec", "[", "3", "]", ")", "if", "recnum", ">", "0", "and", "len", "(", "rec", ")", ">", "0", "and", "len", "(", "recbak", ")", ">", "0", ":", "v", "=", "[", "]", "if", "recbak", "[", "0", "]", "!=", "rec", "[", "0", "]", ":", "V0", "=", "pmag", ".", "dir2cart", "(", "[", "recbak", "[", "1", "]", ",", "recbak", "[", "2", "]", ",", "recbak", "[", "3", "]", "]", ")", "V1", "=", "pmag", ".", "dir2cart", "(", "[", "rec", "[", "1", "]", ",", "rec", "[", "2", "]", ",", "rec", "[", "3", "]", "]", ")", "for", "el", "in", "range", "(", "3", ")", ":", "v", ".", "append", "(", "abs", "(", "V1", "[", "el", "]", "-", "V0", "[", "el", "]", ")", ")", "vdir", "=", "pmag", ".", "cart2dir", "(", "v", ")", "# append vector difference", "Vdif", ".", "append", "(", "old_div", "(", "vdir", "[", "2", "]", ",", "Ints", "[", "-", "1", "]", ")", ")", "Vdif", ".", "append", "(", "old_div", "(", "vdir", "[", "2", "]", ",", "Ints", "[", "-", "1", "]", ")", ")", "recbak", "=", "[", "]", "for", "el", "in", "rec", ":", "recbak", ".", "append", "(", "el", ")", "delta", "=", ".005", "*", "M", "[", "0", "]", "if", "num", "==", "1", ":", "if", "recnum", "%", "2", "==", "0", ":", "plt", ".", "text", "(", "T", "[", "-", "1", "]", "+", "delta", ",", "M", "[", "-", "1", "]", ",", "(", "' '", "+", "str", "(", "recnum", ")", ")", ",", "fontsize", "=", "9", ")", "recnum", "+=", "1", "else", ":", "if", "rec", "[", "0", "]", "<", "200", ":", "Tex", ".", "append", "(", "rec", "[", "0", "]", "*", "1e3", ")", "if", "rec", "[", "0", "]", ">=", "200", ":", "Tex", ".", "append", "(", "rec", "[", "0", "]", "-", "273", ")", "Mex", ".", "append", "(", "old_div", "(", "rec", "[", "3", "]", ",", "Ints", "[", "-", "1", "]", ")", ")", "recnum", "+=", "1", "if", "globals", "!=", "0", ":", "globals", ".", "MTlist", "=", "T", "globals", ".", "MTlisty", "=", "M", "if", "len", "(", "Mex", ")", ">", "0", "and", "len", "(", "Tex", ")", ">", "0", ":", "plt", ".", "scatter", "(", "Tex", ",", "Mex", ",", "marker", "=", "'d'", ",", "color", "=", "'k'", ")", "if", "len", "(", "Vdif", ")", ">", "0", ":", "Vdif", ".", "append", "(", "old_div", "(", "vdir", "[", "2", "]", ",", "Ints", "[", "-", "1", "]", ")", ")", "Vdif", ".", "append", "(", "0", ")", "if", "Tv", ":", "Tv", ".", "append", "(", "Tv", "[", "-", "1", "]", ")", "plt", ".", "plot", "(", "T", ",", "M", ")", "plt", ".", "plot", "(", "T", ",", "M", ",", "'ro'", ")", "if", "len", "(", "Tv", ")", "==", "len", "(", "Vdif", ")", "and", "norm", ":", "plt", ".", "plot", "(", "Tv", ",", "Vdif", ",", "'g-'", ")", "if", "units", "==", "\"T\"", ":", "plt", ".", "xlabel", "(", "\"Step (mT)\"", ")", "elif", "units", "==", "\"K\"", ":", "plt", ".", "xlabel", "(", "\"Step (C)\"", ")", "elif", "units", "==", "\"J\"", ":", "plt", ".", "xlabel", "(", "\"Step (J)\"", ")", "else", ":", "plt", ".", "xlabel", "(", "\"Step [mT,C]\"", ")", "if", "norm", "==", "1", ":", "plt", ".", "ylabel", "(", "\"Fractional Magnetization\"", ")", "if", "norm", "==", "0", ":", "plt", ".", "ylabel", "(", "\"Magnetization\"", ")", "plt", ".", "axvline", "(", "0", ",", "color", "=", "'k'", ")", "plt", ".", "axhline", "(", "0", ",", "color", "=", "'k'", ")", "tstring", "=", "s", "plt", ".", "title", "(", "tstring", ")", "plt", ".", "draw", "(", ")" ]
32.067227
13.210084
def fetch_coords(self, query): """Pull down coordinate data from the endpoint.""" q = query.add_query_parameter(req='coord') return self._parse_messages(self.get_query(q).content)
[ "def", "fetch_coords", "(", "self", ",", "query", ")", ":", "q", "=", "query", ".", "add_query_parameter", "(", "req", "=", "'coord'", ")", "return", "self", ".", "_parse_messages", "(", "self", ".", "get_query", "(", "q", ")", ".", "content", ")" ]
50
10.5