text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def __add_variables(self, *args, **kwargs): """ Adds given variables to __variables attribute. :param \*args: Variables. :type \*args: \* :param \*\*kwargs: Variables : Values. :type \*\*kwargs: \* """ for variable in args: self.__variables[variable] = None self.__variables.update(kwargs)
[ "def", "__add_variables", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "variable", "in", "args", ":", "self", ".", "__variables", "[", "variable", "]", "=", "None", "self", ".", "__variables", ".", "update", "(", "kwargs", ")" ]
27.923077
11.923077
def raw_cmd(self, *args): '''adb command. return the subprocess.Popen object.''' cmd_line = [self.adb()] + self.adb_host_port_options + list(args) if os.name != "nt": cmd_line = [" ".join(cmd_line)] return subprocess.Popen(cmd_line, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
[ "def", "raw_cmd", "(", "self", ",", "*", "args", ")", ":", "cmd_line", "=", "[", "self", ".", "adb", "(", ")", "]", "+", "self", ".", "adb_host_port_options", "+", "list", "(", "args", ")", "if", "os", ".", "name", "!=", "\"nt\"", ":", "cmd_line", "=", "[", "\" \"", ".", "join", "(", "cmd_line", ")", "]", "return", "subprocess", ".", "Popen", "(", "cmd_line", ",", "shell", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")" ]
55.166667
24.5
def shh_newFilter(self, to=None, *, topics): """https://github.com/ethereum/wiki/wiki/JSON-RPC#shh_newfilter DEPRECATED """ obj = { 'to': to, 'topics': topics, } warnings.warn('deprecated', DeprecationWarning) return (yield from self.rpc_call('shh_newFilter', [obj]))
[ "def", "shh_newFilter", "(", "self", ",", "to", "=", "None", ",", "*", ",", "topics", ")", ":", "obj", "=", "{", "'to'", ":", "to", ",", "'topics'", ":", "topics", ",", "}", "warnings", ".", "warn", "(", "'deprecated'", ",", "DeprecationWarning", ")", "return", "(", "yield", "from", "self", ".", "rpc_call", "(", "'shh_newFilter'", ",", "[", "obj", "]", ")", ")" ]
30.727273
17.454545
def connect(self, access_key_id=None, secret_access_key=None, **kwargs): """ Opens a connection to appropriate provider, depending on provider portion of URI. Requires Credentials defined in boto config file (see boto/pyami/config.py). @type storage_uri: StorageUri @param storage_uri: StorageUri specifying a bucket or a bucket+object @rtype: L{AWSAuthConnection<boto.gs.connection.AWSAuthConnection>} @return: A connection to storage service provider of the given URI. """ connection_args = dict(self.connection_args or ()) # Use OrdinaryCallingFormat instead of boto-default # SubdomainCallingFormat because the latter changes the hostname # that's checked during cert validation for HTTPS connections, # which will fail cert validation (when cert validation is enabled). # Note: the following import can't be moved up to the start of # this file else it causes a config import failure when run from # the resumable upload/download tests. from boto.s3.connection import OrdinaryCallingFormat connection_args['calling_format'] = OrdinaryCallingFormat() if (hasattr(self, 'suppress_consec_slashes') and 'suppress_consec_slashes' not in connection_args): connection_args['suppress_consec_slashes'] = ( self.suppress_consec_slashes) connection_args.update(kwargs) if not self.connection: if self.scheme in self.provider_pool: self.connection = self.provider_pool[self.scheme] elif self.scheme == 's3': from boto.s3.connection import S3Connection self.connection = S3Connection(access_key_id, secret_access_key, **connection_args) self.provider_pool[self.scheme] = self.connection elif self.scheme == 'gs': from boto.gs.connection import GSConnection self.connection = GSConnection(access_key_id, secret_access_key, **connection_args) self.provider_pool[self.scheme] = self.connection elif self.scheme == 'file': from boto.file.connection import FileConnection self.connection = FileConnection(self) else: raise InvalidUriError('Unrecognized scheme "%s"' % self.scheme) self.connection.debug = self.debug return self.connection
[ "def", "connect", "(", "self", ",", "access_key_id", "=", "None", ",", "secret_access_key", "=", "None", ",", "*", "*", "kwargs", ")", ":", "connection_args", "=", "dict", "(", "self", ".", "connection_args", "or", "(", ")", ")", "# Use OrdinaryCallingFormat instead of boto-default", "# SubdomainCallingFormat because the latter changes the hostname", "# that's checked during cert validation for HTTPS connections,", "# which will fail cert validation (when cert validation is enabled).", "# Note: the following import can't be moved up to the start of", "# this file else it causes a config import failure when run from", "# the resumable upload/download tests.", "from", "boto", ".", "s3", ".", "connection", "import", "OrdinaryCallingFormat", "connection_args", "[", "'calling_format'", "]", "=", "OrdinaryCallingFormat", "(", ")", "if", "(", "hasattr", "(", "self", ",", "'suppress_consec_slashes'", ")", "and", "'suppress_consec_slashes'", "not", "in", "connection_args", ")", ":", "connection_args", "[", "'suppress_consec_slashes'", "]", "=", "(", "self", ".", "suppress_consec_slashes", ")", "connection_args", ".", "update", "(", "kwargs", ")", "if", "not", "self", ".", "connection", ":", "if", "self", ".", "scheme", "in", "self", ".", "provider_pool", ":", "self", ".", "connection", "=", "self", ".", "provider_pool", "[", "self", ".", "scheme", "]", "elif", "self", ".", "scheme", "==", "'s3'", ":", "from", "boto", ".", "s3", ".", "connection", "import", "S3Connection", "self", ".", "connection", "=", "S3Connection", "(", "access_key_id", ",", "secret_access_key", ",", "*", "*", "connection_args", ")", "self", ".", "provider_pool", "[", "self", ".", "scheme", "]", "=", "self", ".", "connection", "elif", "self", ".", "scheme", "==", "'gs'", ":", "from", "boto", ".", "gs", ".", "connection", "import", "GSConnection", "self", ".", "connection", "=", "GSConnection", "(", "access_key_id", ",", "secret_access_key", ",", "*", "*", "connection_args", ")", "self", ".", "provider_pool", "[", "self", ".", "scheme", "]", "=", "self", ".", "connection", "elif", "self", ".", "scheme", "==", "'file'", ":", "from", "boto", ".", "file", ".", "connection", "import", "FileConnection", "self", ".", "connection", "=", "FileConnection", "(", "self", ")", "else", ":", "raise", "InvalidUriError", "(", "'Unrecognized scheme \"%s\"'", "%", "self", ".", "scheme", ")", "self", ".", "connection", ".", "debug", "=", "self", ".", "debug", "return", "self", ".", "connection" ]
55.333333
19.208333
def decode_params_utf8(params): """Ensures that all parameters in a list of 2-element tuples are decoded to unicode using UTF-8. """ decoded = [] for k, v in params: decoded.append(( k.decode('utf-8') if isinstance(k, bytes) else k, v.decode('utf-8') if isinstance(v, bytes) else v)) return decoded
[ "def", "decode_params_utf8", "(", "params", ")", ":", "decoded", "=", "[", "]", "for", "k", ",", "v", "in", "params", ":", "decoded", ".", "append", "(", "(", "k", ".", "decode", "(", "'utf-8'", ")", "if", "isinstance", "(", "k", ",", "bytes", ")", "else", "k", ",", "v", ".", "decode", "(", "'utf-8'", ")", "if", "isinstance", "(", "v", ",", "bytes", ")", "else", "v", ")", ")", "return", "decoded" ]
34.5
14.7
def check_docker_command_works(): """ Verify that dockerd and docker binary works fine. This is performed by calling `docker version`, which also checks server API version. :return: bool, True if all is good, otherwise ConuException or CommandDoesNotExistException is thrown """ try: out = subprocess.check_output(["docker", "version"], stderr=subprocess.STDOUT, universal_newlines=True) except OSError: logger.info("docker binary is not available") raise CommandDoesNotExistException( "docker command doesn't seem to be available on your system. " "Please install and configure docker." ) except subprocess.CalledProcessError as ex: logger.error("exception: %s", ex) logger.error("rc: %s, output: %r", ex.returncode, ex.output) raise ConuException( "`docker version` call failed, it seems that your docker daemon is misconfigured or " "this user can't communicate with dockerd." ) else: logger.info("docker environment info: %r", out) return True
[ "def", "check_docker_command_works", "(", ")", ":", "try", ":", "out", "=", "subprocess", ".", "check_output", "(", "[", "\"docker\"", ",", "\"version\"", "]", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "universal_newlines", "=", "True", ")", "except", "OSError", ":", "logger", ".", "info", "(", "\"docker binary is not available\"", ")", "raise", "CommandDoesNotExistException", "(", "\"docker command doesn't seem to be available on your system. \"", "\"Please install and configure docker.\"", ")", "except", "subprocess", ".", "CalledProcessError", "as", "ex", ":", "logger", ".", "error", "(", "\"exception: %s\"", ",", "ex", ")", "logger", ".", "error", "(", "\"rc: %s, output: %r\"", ",", "ex", ".", "returncode", ",", "ex", ".", "output", ")", "raise", "ConuException", "(", "\"`docker version` call failed, it seems that your docker daemon is misconfigured or \"", "\"this user can't communicate with dockerd.\"", ")", "else", ":", "logger", ".", "info", "(", "\"docker environment info: %r\"", ",", "out", ")", "return", "True" ]
41.821429
21.821429
def version(): ''' Return imgadm version CLI Example: .. code-block:: bash salt '*' imgadm.version ''' ret = {} cmd = 'imgadm --version' res = __salt__['cmd.run'](cmd).splitlines() ret = res[0].split() return ret[-1]
[ "def", "version", "(", ")", ":", "ret", "=", "{", "}", "cmd", "=", "'imgadm --version'", "res", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ")", ".", "splitlines", "(", ")", "ret", "=", "res", "[", "0", "]", ".", "split", "(", ")", "return", "ret", "[", "-", "1", "]" ]
16.866667
24.066667
def instantiate_labels(instructions): """ Takes an iterable of instructions which may contain label placeholders and assigns them all defined values. :return: list of instructions with all label placeholders assigned to real labels. """ label_i = 1 result = [] label_mapping = dict() for instr in instructions: if isinstance(instr, Jump) and isinstance(instr.target, LabelPlaceholder): new_target, label_mapping, label_i = _get_label(instr.target, label_mapping, label_i) result.append(Jump(new_target)) elif isinstance(instr, JumpConditional) and isinstance(instr.target, LabelPlaceholder): new_target, label_mapping, label_i = _get_label(instr.target, label_mapping, label_i) cls = instr.__class__ # Make the correct subclass result.append(cls(new_target, instr.condition)) elif isinstance(instr, JumpTarget) and isinstance(instr.label, LabelPlaceholder): new_label, label_mapping, label_i = _get_label(instr.label, label_mapping, label_i) result.append(JumpTarget(new_label)) else: result.append(instr) return result
[ "def", "instantiate_labels", "(", "instructions", ")", ":", "label_i", "=", "1", "result", "=", "[", "]", "label_mapping", "=", "dict", "(", ")", "for", "instr", "in", "instructions", ":", "if", "isinstance", "(", "instr", ",", "Jump", ")", "and", "isinstance", "(", "instr", ".", "target", ",", "LabelPlaceholder", ")", ":", "new_target", ",", "label_mapping", ",", "label_i", "=", "_get_label", "(", "instr", ".", "target", ",", "label_mapping", ",", "label_i", ")", "result", ".", "append", "(", "Jump", "(", "new_target", ")", ")", "elif", "isinstance", "(", "instr", ",", "JumpConditional", ")", "and", "isinstance", "(", "instr", ".", "target", ",", "LabelPlaceholder", ")", ":", "new_target", ",", "label_mapping", ",", "label_i", "=", "_get_label", "(", "instr", ".", "target", ",", "label_mapping", ",", "label_i", ")", "cls", "=", "instr", ".", "__class__", "# Make the correct subclass", "result", ".", "append", "(", "cls", "(", "new_target", ",", "instr", ".", "condition", ")", ")", "elif", "isinstance", "(", "instr", ",", "JumpTarget", ")", "and", "isinstance", "(", "instr", ".", "label", ",", "LabelPlaceholder", ")", ":", "new_label", ",", "label_mapping", ",", "label_i", "=", "_get_label", "(", "instr", ".", "label", ",", "label_mapping", ",", "label_i", ")", "result", ".", "append", "(", "JumpTarget", "(", "new_label", ")", ")", "else", ":", "result", ".", "append", "(", "instr", ")", "return", "result" ]
46.64
27.44
def _setup_kafka(self): """ Sets up kafka connections """ # close older connections if self.consumer is not None: self.logger.debug("Closing existing kafka consumer") self.consumer.close() self.consumer = None if self.producer is not None: self.logger.debug("Closing existing kafka producer") self.producer.flush() self.producer.close(timeout=10) self.producer = None # create new connections self._consumer_thread = None self.logger.debug("Creating kafka connections") self.consumer = self._create_consumer() if not self.closed: self.logger.debug("Kafka Conumer created") self.producer = self._create_producer() if not self.closed: self.logger.debug("Kafka Producer created") if not self.closed: self.kafka_connected = True self.logger.info("Connected successfully to Kafka") self._spawn_kafka_consumer_thread()
[ "def", "_setup_kafka", "(", "self", ")", ":", "# close older connections", "if", "self", ".", "consumer", "is", "not", "None", ":", "self", ".", "logger", ".", "debug", "(", "\"Closing existing kafka consumer\"", ")", "self", ".", "consumer", ".", "close", "(", ")", "self", ".", "consumer", "=", "None", "if", "self", ".", "producer", "is", "not", "None", ":", "self", ".", "logger", ".", "debug", "(", "\"Closing existing kafka producer\"", ")", "self", ".", "producer", ".", "flush", "(", ")", "self", ".", "producer", ".", "close", "(", "timeout", "=", "10", ")", "self", ".", "producer", "=", "None", "# create new connections", "self", ".", "_consumer_thread", "=", "None", "self", ".", "logger", ".", "debug", "(", "\"Creating kafka connections\"", ")", "self", ".", "consumer", "=", "self", ".", "_create_consumer", "(", ")", "if", "not", "self", ".", "closed", ":", "self", ".", "logger", ".", "debug", "(", "\"Kafka Conumer created\"", ")", "self", ".", "producer", "=", "self", ".", "_create_producer", "(", ")", "if", "not", "self", ".", "closed", ":", "self", ".", "logger", ".", "debug", "(", "\"Kafka Producer created\"", ")", "if", "not", "self", ".", "closed", ":", "self", ".", "kafka_connected", "=", "True", "self", ".", "logger", ".", "info", "(", "\"Connected successfully to Kafka\"", ")", "self", ".", "_spawn_kafka_consumer_thread", "(", ")" ]
35.931034
11.655172
def restore_original_method(self): """Replaces the proxy method on the target object with its original value.""" if self._target.is_class_or_module(): setattr(self._target.obj, self._method_name, self._original_method) if self._method_name == '__new__' and sys.version_info >= (3, 0): _restore__new__(self._target.obj, self._original_method) else: setattr(self._target.obj, self._method_name, self._original_method) elif self._attr.kind == 'property': setattr(self._target.obj.__class__, self._method_name, self._original_method) del self._target.obj.__dict__[double_name(self._method_name)] elif self._attr.kind == 'attribute': self._target.obj.__dict__[self._method_name] = self._original_method else: # TODO: Could there ever have been a value here that needs to be restored? del self._target.obj.__dict__[self._method_name] if self._method_name in ['__call__', '__enter__', '__exit__']: self._target.restore_attr(self._method_name)
[ "def", "restore_original_method", "(", "self", ")", ":", "if", "self", ".", "_target", ".", "is_class_or_module", "(", ")", ":", "setattr", "(", "self", ".", "_target", ".", "obj", ",", "self", ".", "_method_name", ",", "self", ".", "_original_method", ")", "if", "self", ".", "_method_name", "==", "'__new__'", "and", "sys", ".", "version_info", ">=", "(", "3", ",", "0", ")", ":", "_restore__new__", "(", "self", ".", "_target", ".", "obj", ",", "self", ".", "_original_method", ")", "else", ":", "setattr", "(", "self", ".", "_target", ".", "obj", ",", "self", ".", "_method_name", ",", "self", ".", "_original_method", ")", "elif", "self", ".", "_attr", ".", "kind", "==", "'property'", ":", "setattr", "(", "self", ".", "_target", ".", "obj", ".", "__class__", ",", "self", ".", "_method_name", ",", "self", ".", "_original_method", ")", "del", "self", ".", "_target", ".", "obj", ".", "__dict__", "[", "double_name", "(", "self", ".", "_method_name", ")", "]", "elif", "self", ".", "_attr", ".", "kind", "==", "'attribute'", ":", "self", ".", "_target", ".", "obj", ".", "__dict__", "[", "self", ".", "_method_name", "]", "=", "self", ".", "_original_method", "else", ":", "# TODO: Could there ever have been a value here that needs to be restored?", "del", "self", ".", "_target", ".", "obj", ".", "__dict__", "[", "self", ".", "_method_name", "]", "if", "self", ".", "_method_name", "in", "[", "'__call__'", ",", "'__enter__'", ",", "'__exit__'", "]", ":", "self", ".", "_target", ".", "restore_attr", "(", "self", ".", "_method_name", ")" ]
55.3
26.65
def getRvaFromOffset(self, offset): """ Converts a RVA to an offset. @type offset: int @param offset: The offset value to be converted to RVA. @rtype: int @return: The RVA obtained from the given offset. """ rva = -1 s = self.getSectionByOffset(offset) if s: rva = (offset - self.sectionHeaders[s].pointerToRawData.value) + self.sectionHeaders[s].virtualAddress.value return rva
[ "def", "getRvaFromOffset", "(", "self", ",", "offset", ")", ":", "rva", "=", "-", "1", "s", "=", "self", ".", "getSectionByOffset", "(", "offset", ")", "if", "s", ":", "rva", "=", "(", "offset", "-", "self", ".", "sectionHeaders", "[", "s", "]", ".", "pointerToRawData", ".", "value", ")", "+", "self", ".", "sectionHeaders", "[", "s", "]", ".", "virtualAddress", ".", "value", "return", "rva" ]
29.529412
21.411765
def timeit_compare(stmt_list, setup='', iterations=100000, verbose=True, strict=False, assertsame=True): """ Compares several statments by timing them and also checks that they have the same return value Args: stmt_list (list) : list of statments to compare setup (str) : iterations (int) : verbose (bool) : strict (bool) : Returns: tuple (bool, list, list) : (passed, time_list, result_list) passed (bool): True if all results are the same time_list (list): list of times for each statment result_list (list): list of results values for each statment CommandLine: python -m utool.util_dev --exec-timeit_compare Example: >>> # ENABLE_DOCTEST >>> from utool.util_dev import * # NOQA >>> import utool as ut >>> setup = ut.codeblock( ''' import numpy as np rng = np.random.RandomState(0) invVR_mats = rng.rand(1000, 3, 3).astype(np.float64) ''') >>> stmt1 = 'invVR_mats[:, 0:2, 2].T' >>> stmt2 = 'invVR_mats.T[2, 0:2]' >>> iterations = 1000 >>> verbose = True >>> stmt_list = [stmt1, stmt2] >>> ut.timeit_compare(stmt_list, setup=setup, iterations=iterations, verbose=verbose) """ import timeit import utool as ut stmt_list = [s for s in stmt_list if not s.startswith('#')] for stmtx in range(len(stmt_list)): # Hacky way of removing assignment and just getting statement # We have to make sure it is ok when using it for kwargs stmt = stmt_list[stmtx] eqpos = stmt.find('=') lparen_pos = stmt.find('(') if eqpos > 0 and (lparen_pos == -1 or lparen_pos > eqpos): stmt = '='.join(stmt.split('=')[1:]) stmt_list[stmtx] = stmt if verbose: print('+----------------') print('| TIMEIT COMPARE') print('+----------------') print('| iterations = %d' % (iterations,)) print('| Input:') #print('| +------------') print('| | num | stmt') for count, stmt in enumerate(stmt_list): print('| | %3d | %r' % (count, stmt)) print('...') sys.stdout.flush() #print('+ L________________') if assertsame: result_list = [_testit(stmt, setup) for stmt in stmt_list] else: result_list = None time_list = [timeit.timeit(stmt, setup=setup, number=iterations) for stmt in stmt_list] def numpy_diff_tests(result_list): print('Testing numpy arrays') shape_list = [a.shape for a in result_list] print('shape_list = %r' % (shape_list,)) sum_list = [a.sum() for a in result_list] diff_list = [np.abs(a - b) for a, b in ut.itertwo(result_list)] print('diff stats') for diffs in diff_list: print(ut.repr4(ut.get_stats(diffs, precision=2, use_median=True))) print('diff_list = %r' % (diff_list,)) print('sum_list = %r' % (sum_list,)) print('passed_list = %r' % (passed_list,)) if assertsame: if ut.list_type(result_list) is np.ndarray: passed_list = [np.allclose(*tup) for tup in ut.itertwo(result_list)] passed = all(passed_list) is_numpy = True else: passed = ut.allsame(result_list, strict=False) is_numpy = False else: passed = True if verbose: print('| Output:') if not passed and assertsame: print('| * FAILED: results differ between some statements') if is_numpy: numpy_diff_tests(result_list) print('| Results:') for result in result_list: for count, result in enumerate(result_list): print('<Result %d>' % count) print(result) #print(ut.truncate_str(repr(result))) print('</Result %d>' % count) if strict: raise AssertionError('Results are not valid') else: if assertsame: print('| * PASSED: each statement produced the same result') else: print('| * PASSED: each statement did not error') passed = True #print('| +-----------------------------------') print('| | num | total time | per loop | stmt') for count, tup in enumerate(zip(stmt_list, time_list)): stmt, time = tup print('| | %3d | %10s | %8s | %s' % (count, ut.seconds_str(time), ut.seconds_str(time / iterations), stmt)) #print('| L___________________________________') if verbose: print('L_________________') return (passed, time_list, result_list)
[ "def", "timeit_compare", "(", "stmt_list", ",", "setup", "=", "''", ",", "iterations", "=", "100000", ",", "verbose", "=", "True", ",", "strict", "=", "False", ",", "assertsame", "=", "True", ")", ":", "import", "timeit", "import", "utool", "as", "ut", "stmt_list", "=", "[", "s", "for", "s", "in", "stmt_list", "if", "not", "s", ".", "startswith", "(", "'#'", ")", "]", "for", "stmtx", "in", "range", "(", "len", "(", "stmt_list", ")", ")", ":", "# Hacky way of removing assignment and just getting statement", "# We have to make sure it is ok when using it for kwargs", "stmt", "=", "stmt_list", "[", "stmtx", "]", "eqpos", "=", "stmt", ".", "find", "(", "'='", ")", "lparen_pos", "=", "stmt", ".", "find", "(", "'('", ")", "if", "eqpos", ">", "0", "and", "(", "lparen_pos", "==", "-", "1", "or", "lparen_pos", ">", "eqpos", ")", ":", "stmt", "=", "'='", ".", "join", "(", "stmt", ".", "split", "(", "'='", ")", "[", "1", ":", "]", ")", "stmt_list", "[", "stmtx", "]", "=", "stmt", "if", "verbose", ":", "print", "(", "'+----------------'", ")", "print", "(", "'| TIMEIT COMPARE'", ")", "print", "(", "'+----------------'", ")", "print", "(", "'| iterations = %d'", "%", "(", "iterations", ",", ")", ")", "print", "(", "'| Input:'", ")", "#print('| +------------')", "print", "(", "'| | num | stmt'", ")", "for", "count", ",", "stmt", "in", "enumerate", "(", "stmt_list", ")", ":", "print", "(", "'| | %3d | %r'", "%", "(", "count", ",", "stmt", ")", ")", "print", "(", "'...'", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "#print('+ L________________')", "if", "assertsame", ":", "result_list", "=", "[", "_testit", "(", "stmt", ",", "setup", ")", "for", "stmt", "in", "stmt_list", "]", "else", ":", "result_list", "=", "None", "time_list", "=", "[", "timeit", ".", "timeit", "(", "stmt", ",", "setup", "=", "setup", ",", "number", "=", "iterations", ")", "for", "stmt", "in", "stmt_list", "]", "def", "numpy_diff_tests", "(", "result_list", ")", ":", "print", "(", "'Testing numpy arrays'", ")", "shape_list", "=", "[", "a", ".", "shape", "for", "a", "in", "result_list", "]", "print", "(", "'shape_list = %r'", "%", "(", "shape_list", ",", ")", ")", "sum_list", "=", "[", "a", ".", "sum", "(", ")", "for", "a", "in", "result_list", "]", "diff_list", "=", "[", "np", ".", "abs", "(", "a", "-", "b", ")", "for", "a", ",", "b", "in", "ut", ".", "itertwo", "(", "result_list", ")", "]", "print", "(", "'diff stats'", ")", "for", "diffs", "in", "diff_list", ":", "print", "(", "ut", ".", "repr4", "(", "ut", ".", "get_stats", "(", "diffs", ",", "precision", "=", "2", ",", "use_median", "=", "True", ")", ")", ")", "print", "(", "'diff_list = %r'", "%", "(", "diff_list", ",", ")", ")", "print", "(", "'sum_list = %r'", "%", "(", "sum_list", ",", ")", ")", "print", "(", "'passed_list = %r'", "%", "(", "passed_list", ",", ")", ")", "if", "assertsame", ":", "if", "ut", ".", "list_type", "(", "result_list", ")", "is", "np", ".", "ndarray", ":", "passed_list", "=", "[", "np", ".", "allclose", "(", "*", "tup", ")", "for", "tup", "in", "ut", ".", "itertwo", "(", "result_list", ")", "]", "passed", "=", "all", "(", "passed_list", ")", "is_numpy", "=", "True", "else", ":", "passed", "=", "ut", ".", "allsame", "(", "result_list", ",", "strict", "=", "False", ")", "is_numpy", "=", "False", "else", ":", "passed", "=", "True", "if", "verbose", ":", "print", "(", "'| Output:'", ")", "if", "not", "passed", "and", "assertsame", ":", "print", "(", "'| * FAILED: results differ between some statements'", ")", "if", "is_numpy", ":", "numpy_diff_tests", "(", "result_list", ")", "print", "(", "'| Results:'", ")", "for", "result", "in", "result_list", ":", "for", "count", ",", "result", "in", "enumerate", "(", "result_list", ")", ":", "print", "(", "'<Result %d>'", "%", "count", ")", "print", "(", "result", ")", "#print(ut.truncate_str(repr(result)))", "print", "(", "'</Result %d>'", "%", "count", ")", "if", "strict", ":", "raise", "AssertionError", "(", "'Results are not valid'", ")", "else", ":", "if", "assertsame", ":", "print", "(", "'| * PASSED: each statement produced the same result'", ")", "else", ":", "print", "(", "'| * PASSED: each statement did not error'", ")", "passed", "=", "True", "#print('| +-----------------------------------')", "print", "(", "'| | num | total time | per loop | stmt'", ")", "for", "count", ",", "tup", "in", "enumerate", "(", "zip", "(", "stmt_list", ",", "time_list", ")", ")", ":", "stmt", ",", "time", "=", "tup", "print", "(", "'| | %3d | %10s | %8s | %s'", "%", "(", "count", ",", "ut", ".", "seconds_str", "(", "time", ")", ",", "ut", ".", "seconds_str", "(", "time", "/", "iterations", ")", ",", "stmt", ")", ")", "#print('| L___________________________________')", "if", "verbose", ":", "print", "(", "'L_________________'", ")", "return", "(", "passed", ",", "time_list", ",", "result_list", ")" ]
37.2
17.4
def _time_delta_from_info(info): """Format the elapsed time for the given TensorBoardInfo. Args: info: A TensorBoardInfo value. Returns: A human-readable string describing the time since the server described by `info` started: e.g., "2 days, 0:48:58". """ delta_seconds = int(time.time()) - info.start_time return str(datetime.timedelta(seconds=delta_seconds))
[ "def", "_time_delta_from_info", "(", "info", ")", ":", "delta_seconds", "=", "int", "(", "time", ".", "time", "(", ")", ")", "-", "info", ".", "start_time", "return", "str", "(", "datetime", ".", "timedelta", "(", "seconds", "=", "delta_seconds", ")", ")" ]
31.25
18.75
def decode_embedded_strs(src): ''' Convert enbedded bytes to strings if possible. This is necessary because Python 3 makes a distinction between these types. This wouldn't be needed if we used "use_bin_type=True" when encoding and "encoding='utf-8'" when decoding. Unfortunately, this would break backwards compatibility due to a change in wire protocol, so this less than ideal solution is used instead. ''' if not six.PY3: return src if isinstance(src, dict): return _decode_embedded_dict(src) elif isinstance(src, list): return _decode_embedded_list(src) elif isinstance(src, bytes): try: return src.decode() # pylint: disable=redefined-variable-type except UnicodeError: return src else: return src
[ "def", "decode_embedded_strs", "(", "src", ")", ":", "if", "not", "six", ".", "PY3", ":", "return", "src", "if", "isinstance", "(", "src", ",", "dict", ")", ":", "return", "_decode_embedded_dict", "(", "src", ")", "elif", "isinstance", "(", "src", ",", "list", ")", ":", "return", "_decode_embedded_list", "(", "src", ")", "elif", "isinstance", "(", "src", ",", "bytes", ")", ":", "try", ":", "return", "src", ".", "decode", "(", ")", "# pylint: disable=redefined-variable-type", "except", "UnicodeError", ":", "return", "src", "else", ":", "return", "src" ]
32.36
20.68
def align_chunk_with_ner(tmp_ner_path, i_chunk, tmp_done_path): ''' iterate through the i_chunk and tmp_ner_path to generate a new Chunk with body.ner ''' o_chunk = Chunk() input_iter = i_chunk.__iter__() ner = '' stream_id = None all_ner = xml.dom.minidom.parse(open(tmp_ner_path)) for raw_ner in all_ner.getElementsByTagName('FILENAME'): stream_item = input_iter.next() ## get stream_id out of the XML stream_id = raw_ner.attributes.get('docid').value assert stream_id and stream_id == stream_item.stream_id, \ '%s != %s\nner=%r' % (stream_id, stream_item.stream_id, ner) tagger_id = 'lingpipe' tagging = Tagging() tagging.tagger_id = tagger_id ## get this one file out of its FILENAME tags tagged_doc = list(lingpipe.files(raw_ner.toxml()))[0][1] tagging.raw_tagging = tagged_doc tagging.generation_time = streamcorpus.make_stream_time() stream_item.body.taggings[tagger_id] = tagging sentences = list(lingpipe.sentences(tagged_doc)) ## make JS labels on individual tokens assert stream_item.ratings[0].mentions, stream_item.stream_id john_smith_label = Label() john_smith_label.annotator = stream_item.ratings[0].annotator john_smith_label.target_id = stream_item.ratings[0].target_id # first map all corefchains to their words equiv_ids = collections.defaultdict(lambda: set()) for sent in sentences: for tok in sent.tokens: if tok.entity_type is not None: equiv_ids[tok.equiv_id].add(cleanse(tok.token)) ## find all the chains that are John Smith johnsmiths = set() for equiv_id, names in equiv_ids.items(): ## detect 'smith' in 'smithye' _names = cleanse(' '.join(names)) if 'john' in _names and 'smith' in _names: johnsmiths.add(equiv_id) print len(johnsmiths) ## now apply the label for sent in sentences: for tok in sent.tokens: if tok.equiv_id in johnsmiths: tok.labels = [john_smith_label] stream_item.body.sentences[tagger_id] = sentences o_chunk.add(stream_item) ## put the o_chunk bytes into the specified file open(tmp_done_path, 'wb').write(str(o_chunk)) ## replace this with log.info() print 'created %s' % tmp_done_path
[ "def", "align_chunk_with_ner", "(", "tmp_ner_path", ",", "i_chunk", ",", "tmp_done_path", ")", ":", "o_chunk", "=", "Chunk", "(", ")", "input_iter", "=", "i_chunk", ".", "__iter__", "(", ")", "ner", "=", "''", "stream_id", "=", "None", "all_ner", "=", "xml", ".", "dom", ".", "minidom", ".", "parse", "(", "open", "(", "tmp_ner_path", ")", ")", "for", "raw_ner", "in", "all_ner", ".", "getElementsByTagName", "(", "'FILENAME'", ")", ":", "stream_item", "=", "input_iter", ".", "next", "(", ")", "## get stream_id out of the XML", "stream_id", "=", "raw_ner", ".", "attributes", ".", "get", "(", "'docid'", ")", ".", "value", "assert", "stream_id", "and", "stream_id", "==", "stream_item", ".", "stream_id", ",", "'%s != %s\\nner=%r'", "%", "(", "stream_id", ",", "stream_item", ".", "stream_id", ",", "ner", ")", "tagger_id", "=", "'lingpipe'", "tagging", "=", "Tagging", "(", ")", "tagging", ".", "tagger_id", "=", "tagger_id", "## get this one file out of its FILENAME tags", "tagged_doc", "=", "list", "(", "lingpipe", ".", "files", "(", "raw_ner", ".", "toxml", "(", ")", ")", ")", "[", "0", "]", "[", "1", "]", "tagging", ".", "raw_tagging", "=", "tagged_doc", "tagging", ".", "generation_time", "=", "streamcorpus", ".", "make_stream_time", "(", ")", "stream_item", ".", "body", ".", "taggings", "[", "tagger_id", "]", "=", "tagging", "sentences", "=", "list", "(", "lingpipe", ".", "sentences", "(", "tagged_doc", ")", ")", "## make JS labels on individual tokens", "assert", "stream_item", ".", "ratings", "[", "0", "]", ".", "mentions", ",", "stream_item", ".", "stream_id", "john_smith_label", "=", "Label", "(", ")", "john_smith_label", ".", "annotator", "=", "stream_item", ".", "ratings", "[", "0", "]", ".", "annotator", "john_smith_label", ".", "target_id", "=", "stream_item", ".", "ratings", "[", "0", "]", ".", "target_id", "# first map all corefchains to their words", "equiv_ids", "=", "collections", ".", "defaultdict", "(", "lambda", ":", "set", "(", ")", ")", "for", "sent", "in", "sentences", ":", "for", "tok", "in", "sent", ".", "tokens", ":", "if", "tok", ".", "entity_type", "is", "not", "None", ":", "equiv_ids", "[", "tok", ".", "equiv_id", "]", ".", "add", "(", "cleanse", "(", "tok", ".", "token", ")", ")", "## find all the chains that are John Smith", "johnsmiths", "=", "set", "(", ")", "for", "equiv_id", ",", "names", "in", "equiv_ids", ".", "items", "(", ")", ":", "## detect 'smith' in 'smithye'", "_names", "=", "cleanse", "(", "' '", ".", "join", "(", "names", ")", ")", "if", "'john'", "in", "_names", "and", "'smith'", "in", "_names", ":", "johnsmiths", ".", "add", "(", "equiv_id", ")", "print", "len", "(", "johnsmiths", ")", "## now apply the label", "for", "sent", "in", "sentences", ":", "for", "tok", "in", "sent", ".", "tokens", ":", "if", "tok", ".", "equiv_id", "in", "johnsmiths", ":", "tok", ".", "labels", "=", "[", "john_smith_label", "]", "stream_item", ".", "body", ".", "sentences", "[", "tagger_id", "]", "=", "sentences", "o_chunk", ".", "add", "(", "stream_item", ")", "## put the o_chunk bytes into the specified file", "open", "(", "tmp_done_path", ",", "'wb'", ")", ".", "write", "(", "str", "(", "o_chunk", ")", ")", "## replace this with log.info()", "print", "'created %s'", "%", "tmp_done_path" ]
36.776119
18.656716
def add_review_date(self, doc, reviewed): """Sets the review date. Raises CardinalityError if already set. OrderError if no reviewer defined before. Raises SPDXValueError if invalid reviewed value. """ if len(doc.reviews) != 0: if not self.review_date_set: self.review_date_set = True date = utils.datetime_from_iso_format(reviewed) if date is not None: doc.reviews[-1].review_date = date return True else: raise SPDXValueError('Review::ReviewDate') else: raise CardinalityError('Review::ReviewDate') else: raise OrderError('Review::ReviewDate')
[ "def", "add_review_date", "(", "self", ",", "doc", ",", "reviewed", ")", ":", "if", "len", "(", "doc", ".", "reviews", ")", "!=", "0", ":", "if", "not", "self", ".", "review_date_set", ":", "self", ".", "review_date_set", "=", "True", "date", "=", "utils", ".", "datetime_from_iso_format", "(", "reviewed", ")", "if", "date", "is", "not", "None", ":", "doc", ".", "reviews", "[", "-", "1", "]", ".", "review_date", "=", "date", "return", "True", "else", ":", "raise", "SPDXValueError", "(", "'Review::ReviewDate'", ")", "else", ":", "raise", "CardinalityError", "(", "'Review::ReviewDate'", ")", "else", ":", "raise", "OrderError", "(", "'Review::ReviewDate'", ")" ]
41.777778
12.222222
def modify_service(self, service_id, type): ''' modify_service(self, service_id, type) | Modifies a service type (action, container, etc.) :Parameters: * *service_id* (`string`) -- Identifier of an existing service * *type* (`string`) -- service type :return: Service modification metadata (service id, type, modified date, versions :Example: .. code-block:: python service_modification_metadata = opereto_client.modify_service ('myService', 'container') if service_modification_metadata['type'] == 'container' print 'service type of {} changed to container'.format('myService') ''' request_data = {'id': service_id, 'type': type} return self._call_rest_api('post', '/services', data=request_data, error='Failed to modify service [%s]'%service_id)
[ "def", "modify_service", "(", "self", ",", "service_id", ",", "type", ")", ":", "request_data", "=", "{", "'id'", ":", "service_id", ",", "'type'", ":", "type", "}", "return", "self", ".", "_call_rest_api", "(", "'post'", ",", "'/services'", ",", "data", "=", "request_data", ",", "error", "=", "'Failed to modify service [%s]'", "%", "service_id", ")" ]
39.272727
31.181818
def getIRData(self): ''' Returns last LaserData. @return last JdeRobotTypes LaserData saved ''' if self.hasproxy(): self.lock.acquire() ir = self.ir self.lock.release() return ir return None
[ "def", "getIRData", "(", "self", ")", ":", "if", "self", ".", "hasproxy", "(", ")", ":", "self", ".", "lock", ".", "acquire", "(", ")", "ir", "=", "self", ".", "ir", "self", ".", "lock", ".", "release", "(", ")", "return", "ir", "return", "None" ]
20.142857
21.285714
def list_(return_yaml=True, include_pillar=True, include_opts=True, **kwargs): ''' List the beacons currently configured on the minion. Args: return_yaml (bool): Whether to return YAML formatted output, default ``True``. include_pillar (bool): Whether to include beacons that are configured in pillar, default is ``True``. include_opts (bool): Whether to include beacons that are configured in opts, default is ``True``. Returns: list: List of currently configured Beacons. CLI Example: .. code-block:: bash salt '*' beacons.list ''' beacons = None try: eventer = salt.utils.event.get_event('minion', opts=__opts__) res = __salt__['event.fire']({'func': 'list', 'include_pillar': include_pillar, 'include_opts': include_opts}, 'manage_beacons') if res: event_ret = eventer.get_event( tag='/salt/minion/minion_beacons_list_complete', wait=kwargs.get('timeout', 30)) log.debug('event_ret %s', event_ret) if event_ret and event_ret['complete']: beacons = event_ret['beacons'] except KeyError: # Effectively a no-op, since we can't really return without an event # system ret = {'comment': 'Event module not available. Beacon list failed.', 'result': False} return ret if beacons: if return_yaml: tmp = {'beacons': beacons} return salt.utils.yaml.safe_dump(tmp, default_flow_style=False) else: return beacons else: return {'beacons': {}}
[ "def", "list_", "(", "return_yaml", "=", "True", ",", "include_pillar", "=", "True", ",", "include_opts", "=", "True", ",", "*", "*", "kwargs", ")", ":", "beacons", "=", "None", "try", ":", "eventer", "=", "salt", ".", "utils", ".", "event", ".", "get_event", "(", "'minion'", ",", "opts", "=", "__opts__", ")", "res", "=", "__salt__", "[", "'event.fire'", "]", "(", "{", "'func'", ":", "'list'", ",", "'include_pillar'", ":", "include_pillar", ",", "'include_opts'", ":", "include_opts", "}", ",", "'manage_beacons'", ")", "if", "res", ":", "event_ret", "=", "eventer", ".", "get_event", "(", "tag", "=", "'/salt/minion/minion_beacons_list_complete'", ",", "wait", "=", "kwargs", ".", "get", "(", "'timeout'", ",", "30", ")", ")", "log", ".", "debug", "(", "'event_ret %s'", ",", "event_ret", ")", "if", "event_ret", "and", "event_ret", "[", "'complete'", "]", ":", "beacons", "=", "event_ret", "[", "'beacons'", "]", "except", "KeyError", ":", "# Effectively a no-op, since we can't really return without an event", "# system", "ret", "=", "{", "'comment'", ":", "'Event module not available. Beacon list failed.'", ",", "'result'", ":", "False", "}", "return", "ret", "if", "beacons", ":", "if", "return_yaml", ":", "tmp", "=", "{", "'beacons'", ":", "beacons", "}", "return", "salt", ".", "utils", ".", "yaml", ".", "safe_dump", "(", "tmp", ",", "default_flow_style", "=", "False", ")", "else", ":", "return", "beacons", "else", ":", "return", "{", "'beacons'", ":", "{", "}", "}" ]
29.85
23.883333
def _fix_sitk_bug(self, path, metadata): """ There is a bug in simple ITK for Z axis in 3D images. This is a fix :param path: :param metadata: :return: """ ds = dicom.read_file(path) ds.SpacingBetweenSlices = str(metadata["voxelsize_mm"][0])[:16] dicom.write_file(path, ds)
[ "def", "_fix_sitk_bug", "(", "self", ",", "path", ",", "metadata", ")", ":", "ds", "=", "dicom", ".", "read_file", "(", "path", ")", "ds", ".", "SpacingBetweenSlices", "=", "str", "(", "metadata", "[", "\"voxelsize_mm\"", "]", "[", "0", "]", ")", "[", ":", "16", "]", "dicom", ".", "write_file", "(", "path", ",", "ds", ")" ]
33.6
13.8
def dobingham(di_block): """ Calculates the Bingham mean and associated statistical parameters from directions that are input as a di_block Parameters ---------- di_block : a nested list of [dec,inc] or [dec,inc,intensity] Returns ------- bpars : dictionary containing the Bingham mean and associated statistics dictionary keys dec : mean declination inc : mean inclination n : number of datapoints Eta : major ellipse Edec : declination of major ellipse axis Einc : inclination of major ellipse axis Zeta : minor ellipse Zdec : declination of minor ellipse axis Zinc : inclination of minor ellipse axis """ control, X, bpars = [], [], {} N = len(di_block) if N < 2: return bpars # # get cartesian coordinates # for rec in di_block: X.append(dir2cart([rec[0], rec[1], 1.])) # # put in T matrix # T = np.array(Tmatrix(X)) t, V = tauV(T) w1, w2, w3 = t[2], t[1], t[0] k1, k2 = binglookup(w1, w2) PDir = cart2dir(V[0]) EDir = cart2dir(V[1]) ZDir = cart2dir(V[2]) if PDir[1] < 0: PDir[0] += 180. PDir[1] = -PDir[1] PDir[0] = PDir[0] % 360. bpars["dec"] = PDir[0] bpars["inc"] = PDir[1] bpars["Edec"] = EDir[0] bpars["Einc"] = EDir[1] bpars["Zdec"] = ZDir[0] bpars["Zinc"] = ZDir[1] bpars["n"] = N # # now for Bingham ellipses. # fac1, fac2 = -2 * N * (k1) * (w3 - w1), -2 * N * (k2) * (w3 - w2) sig31, sig32 = np.sqrt(old_div(1., fac1)), np.sqrt(old_div(1., fac2)) bpars["Zeta"], bpars["Eta"] = 2.45 * sig31 * \ 180. / np.pi, 2.45 * sig32 * 180. / np.pi return bpars
[ "def", "dobingham", "(", "di_block", ")", ":", "control", ",", "X", ",", "bpars", "=", "[", "]", ",", "[", "]", ",", "{", "}", "N", "=", "len", "(", "di_block", ")", "if", "N", "<", "2", ":", "return", "bpars", "#", "# get cartesian coordinates", "#", "for", "rec", "in", "di_block", ":", "X", ".", "append", "(", "dir2cart", "(", "[", "rec", "[", "0", "]", ",", "rec", "[", "1", "]", ",", "1.", "]", ")", ")", "#", "# put in T matrix", "#", "T", "=", "np", ".", "array", "(", "Tmatrix", "(", "X", ")", ")", "t", ",", "V", "=", "tauV", "(", "T", ")", "w1", ",", "w2", ",", "w3", "=", "t", "[", "2", "]", ",", "t", "[", "1", "]", ",", "t", "[", "0", "]", "k1", ",", "k2", "=", "binglookup", "(", "w1", ",", "w2", ")", "PDir", "=", "cart2dir", "(", "V", "[", "0", "]", ")", "EDir", "=", "cart2dir", "(", "V", "[", "1", "]", ")", "ZDir", "=", "cart2dir", "(", "V", "[", "2", "]", ")", "if", "PDir", "[", "1", "]", "<", "0", ":", "PDir", "[", "0", "]", "+=", "180.", "PDir", "[", "1", "]", "=", "-", "PDir", "[", "1", "]", "PDir", "[", "0", "]", "=", "PDir", "[", "0", "]", "%", "360.", "bpars", "[", "\"dec\"", "]", "=", "PDir", "[", "0", "]", "bpars", "[", "\"inc\"", "]", "=", "PDir", "[", "1", "]", "bpars", "[", "\"Edec\"", "]", "=", "EDir", "[", "0", "]", "bpars", "[", "\"Einc\"", "]", "=", "EDir", "[", "1", "]", "bpars", "[", "\"Zdec\"", "]", "=", "ZDir", "[", "0", "]", "bpars", "[", "\"Zinc\"", "]", "=", "ZDir", "[", "1", "]", "bpars", "[", "\"n\"", "]", "=", "N", "#", "# now for Bingham ellipses.", "#", "fac1", ",", "fac2", "=", "-", "2", "*", "N", "*", "(", "k1", ")", "*", "(", "w3", "-", "w1", ")", ",", "-", "2", "*", "N", "*", "(", "k2", ")", "*", "(", "w3", "-", "w2", ")", "sig31", ",", "sig32", "=", "np", ".", "sqrt", "(", "old_div", "(", "1.", ",", "fac1", ")", ")", ",", "np", ".", "sqrt", "(", "old_div", "(", "1.", ",", "fac2", ")", ")", "bpars", "[", "\"Zeta\"", "]", ",", "bpars", "[", "\"Eta\"", "]", "=", "2.45", "*", "sig31", "*", "180.", "/", "np", ".", "pi", ",", "2.45", "*", "sig32", "*", "180.", "/", "np", ".", "pi", "return", "bpars" ]
26.774194
19.193548
def push(self, line, frame, buffer_output=True): """Change built-in stdout and stderr methods by the new custom StdMessage. execute the InteractiveConsole.push. Change the stdout and stderr back be the original built-ins :param buffer_output: if False won't redirect the output. Return boolean (True if more input is required else False), output_messages and input_messages """ self.__buffer_output = buffer_output more = False if buffer_output: original_stdout = sys.stdout original_stderr = sys.stderr try: try: self.frame = frame if buffer_output: out = sys.stdout = IOBuf() err = sys.stderr = IOBuf() more = self.add_exec(line) except Exception: exc = get_exception_traceback_str() if buffer_output: err.buflist.append("Internal Error: %s" % (exc,)) else: sys.stderr.write("Internal Error: %s\n" % (exc,)) finally: #Remove frame references. self.frame = None frame = None if buffer_output: sys.stdout = original_stdout sys.stderr = original_stderr if buffer_output: return more, out.buflist, err.buflist else: return more, [], []
[ "def", "push", "(", "self", ",", "line", ",", "frame", ",", "buffer_output", "=", "True", ")", ":", "self", ".", "__buffer_output", "=", "buffer_output", "more", "=", "False", "if", "buffer_output", ":", "original_stdout", "=", "sys", ".", "stdout", "original_stderr", "=", "sys", ".", "stderr", "try", ":", "try", ":", "self", ".", "frame", "=", "frame", "if", "buffer_output", ":", "out", "=", "sys", ".", "stdout", "=", "IOBuf", "(", ")", "err", "=", "sys", ".", "stderr", "=", "IOBuf", "(", ")", "more", "=", "self", ".", "add_exec", "(", "line", ")", "except", "Exception", ":", "exc", "=", "get_exception_traceback_str", "(", ")", "if", "buffer_output", ":", "err", ".", "buflist", ".", "append", "(", "\"Internal Error: %s\"", "%", "(", "exc", ",", ")", ")", "else", ":", "sys", ".", "stderr", ".", "write", "(", "\"Internal Error: %s\\n\"", "%", "(", "exc", ",", ")", ")", "finally", ":", "#Remove frame references.", "self", ".", "frame", "=", "None", "frame", "=", "None", "if", "buffer_output", ":", "sys", ".", "stdout", "=", "original_stdout", "sys", ".", "stderr", "=", "original_stderr", "if", "buffer_output", ":", "return", "more", ",", "out", ".", "buflist", ",", "err", ".", "buflist", "else", ":", "return", "more", ",", "[", "]", ",", "[", "]" ]
35.219512
14.146341
def get_metadata_desc(self): """ See super class satosa.backends.backend_base.BackendModule#get_metadata_desc :rtype: satosa.metadata_creation.description.MetadataDescription """ entity_descriptions = [] idp_entities = self.sp.metadata.with_descriptor("idpsso") for entity_id, entity in idp_entities.items(): description = MetadataDescription(urlsafe_b64encode(entity_id.encode("utf-8")).decode("utf-8")) # Add organization info try: organization_info = entity["organization"] except KeyError: pass else: organization = OrganizationDesc() for name_info in organization_info.get("organization_name", []): organization.add_name(name_info["text"], name_info["lang"]) for display_name_info in organization_info.get("organization_display_name", []): organization.add_display_name(display_name_info["text"], display_name_info["lang"]) for url_info in organization_info.get("organization_url", []): organization.add_url(url_info["text"], url_info["lang"]) description.organization = organization # Add contact person info try: contact_persons = entity["contact_person"] except KeyError: pass else: for person in contact_persons: person_desc = ContactPersonDesc() person_desc.contact_type = person.get("contact_type") for address in person.get('email_address', []): person_desc.add_email_address(address["text"]) if "given_name" in person: person_desc.given_name = person["given_name"]["text"] if "sur_name" in person: person_desc.sur_name = person["sur_name"]["text"] description.add_contact_person(person_desc) # Add UI info ui_info = self.sp.metadata.extension(entity_id, "idpsso_descriptor", "{}&UIInfo".format(UI_NAMESPACE)) if ui_info: ui_info = ui_info[0] ui_info_desc = UIInfoDesc() for desc in ui_info.get("description", []): ui_info_desc.add_description(desc["text"], desc["lang"]) for name in ui_info.get("display_name", []): ui_info_desc.add_display_name(name["text"], name["lang"]) for logo in ui_info.get("logo", []): ui_info_desc.add_logo(logo["text"], logo["width"], logo["height"], logo.get("lang")) description.ui_info = ui_info_desc entity_descriptions.append(description) return entity_descriptions
[ "def", "get_metadata_desc", "(", "self", ")", ":", "entity_descriptions", "=", "[", "]", "idp_entities", "=", "self", ".", "sp", ".", "metadata", ".", "with_descriptor", "(", "\"idpsso\"", ")", "for", "entity_id", ",", "entity", "in", "idp_entities", ".", "items", "(", ")", ":", "description", "=", "MetadataDescription", "(", "urlsafe_b64encode", "(", "entity_id", ".", "encode", "(", "\"utf-8\"", ")", ")", ".", "decode", "(", "\"utf-8\"", ")", ")", "# Add organization info", "try", ":", "organization_info", "=", "entity", "[", "\"organization\"", "]", "except", "KeyError", ":", "pass", "else", ":", "organization", "=", "OrganizationDesc", "(", ")", "for", "name_info", "in", "organization_info", ".", "get", "(", "\"organization_name\"", ",", "[", "]", ")", ":", "organization", ".", "add_name", "(", "name_info", "[", "\"text\"", "]", ",", "name_info", "[", "\"lang\"", "]", ")", "for", "display_name_info", "in", "organization_info", ".", "get", "(", "\"organization_display_name\"", ",", "[", "]", ")", ":", "organization", ".", "add_display_name", "(", "display_name_info", "[", "\"text\"", "]", ",", "display_name_info", "[", "\"lang\"", "]", ")", "for", "url_info", "in", "organization_info", ".", "get", "(", "\"organization_url\"", ",", "[", "]", ")", ":", "organization", ".", "add_url", "(", "url_info", "[", "\"text\"", "]", ",", "url_info", "[", "\"lang\"", "]", ")", "description", ".", "organization", "=", "organization", "# Add contact person info", "try", ":", "contact_persons", "=", "entity", "[", "\"contact_person\"", "]", "except", "KeyError", ":", "pass", "else", ":", "for", "person", "in", "contact_persons", ":", "person_desc", "=", "ContactPersonDesc", "(", ")", "person_desc", ".", "contact_type", "=", "person", ".", "get", "(", "\"contact_type\"", ")", "for", "address", "in", "person", ".", "get", "(", "'email_address'", ",", "[", "]", ")", ":", "person_desc", ".", "add_email_address", "(", "address", "[", "\"text\"", "]", ")", "if", "\"given_name\"", "in", "person", ":", "person_desc", ".", "given_name", "=", "person", "[", "\"given_name\"", "]", "[", "\"text\"", "]", "if", "\"sur_name\"", "in", "person", ":", "person_desc", ".", "sur_name", "=", "person", "[", "\"sur_name\"", "]", "[", "\"text\"", "]", "description", ".", "add_contact_person", "(", "person_desc", ")", "# Add UI info", "ui_info", "=", "self", ".", "sp", ".", "metadata", ".", "extension", "(", "entity_id", ",", "\"idpsso_descriptor\"", ",", "\"{}&UIInfo\"", ".", "format", "(", "UI_NAMESPACE", ")", ")", "if", "ui_info", ":", "ui_info", "=", "ui_info", "[", "0", "]", "ui_info_desc", "=", "UIInfoDesc", "(", ")", "for", "desc", "in", "ui_info", ".", "get", "(", "\"description\"", ",", "[", "]", ")", ":", "ui_info_desc", ".", "add_description", "(", "desc", "[", "\"text\"", "]", ",", "desc", "[", "\"lang\"", "]", ")", "for", "name", "in", "ui_info", ".", "get", "(", "\"display_name\"", ",", "[", "]", ")", ":", "ui_info_desc", ".", "add_display_name", "(", "name", "[", "\"text\"", "]", ",", "name", "[", "\"lang\"", "]", ")", "for", "logo", "in", "ui_info", ".", "get", "(", "\"logo\"", ",", "[", "]", ")", ":", "ui_info_desc", ".", "add_logo", "(", "logo", "[", "\"text\"", "]", ",", "logo", "[", "\"width\"", "]", ",", "logo", "[", "\"height\"", "]", ",", "logo", ".", "get", "(", "\"lang\"", ")", ")", "description", ".", "ui_info", "=", "ui_info_desc", "entity_descriptions", ".", "append", "(", "description", ")", "return", "entity_descriptions" ]
48.237288
25.084746
def set_export(args): '''Return a list of lines in TSV form that would suffice to reconstitute a container (set) entity, if passed to entity_import. The first line in the list is the header, and subsequent lines are the container members. ''' r = fapi.get_entity(args.project, args.workspace, args.entity_type, args.entity) fapi._check_response_code(r, 200) set_type = args.entity_type set_name = args.entity member_type = set_type.split('_')[0] members = r.json()['attributes'][member_type+'s']['items'] result = ["membership:{}_id\t{}_id".format(set_type, member_type)] result += ["%s\t%s" % (set_name, m['entityName']) for m in members ] return result
[ "def", "set_export", "(", "args", ")", ":", "r", "=", "fapi", ".", "get_entity", "(", "args", ".", "project", ",", "args", ".", "workspace", ",", "args", ".", "entity_type", ",", "args", ".", "entity", ")", "fapi", ".", "_check_response_code", "(", "r", ",", "200", ")", "set_type", "=", "args", ".", "entity_type", "set_name", "=", "args", ".", "entity", "member_type", "=", "set_type", ".", "split", "(", "'_'", ")", "[", "0", "]", "members", "=", "r", ".", "json", "(", ")", "[", "'attributes'", "]", "[", "member_type", "+", "'s'", "]", "[", "'items'", "]", "result", "=", "[", "\"membership:{}_id\\t{}_id\"", ".", "format", "(", "set_type", ",", "member_type", ")", "]", "result", "+=", "[", "\"%s\\t%s\"", "%", "(", "set_name", ",", "m", "[", "'entityName'", "]", ")", "for", "m", "in", "members", "]", "return", "result" ]
43.75
26.375
def rm_rf(path): """ Recursively (if needed) delete path. """ if os.path.isdir(path) and not os.path.islink(path): shutil.rmtree(path) elif os.path.lexists(path): os.remove(path)
[ "def", "rm_rf", "(", "path", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", "and", "not", "os", ".", "path", ".", "islink", "(", "path", ")", ":", "shutil", ".", "rmtree", "(", "path", ")", "elif", "os", ".", "path", ".", "lexists", "(", "path", ")", ":", "os", ".", "remove", "(", "path", ")" ]
25.875
9.875
def p_list_or(self, p): 'list : list OR list' p[0] = pd.concat( [p[1], p[3]], axis=1).fillna(0.0).apply(self.func, axis=1)
[ "def", "p_list_or", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "pd", ".", "concat", "(", "[", "p", "[", "1", "]", ",", "p", "[", "3", "]", "]", ",", "axis", "=", "1", ")", ".", "fillna", "(", "0.0", ")", ".", "apply", "(", "self", ".", "func", ",", "axis", "=", "1", ")" ]
36.75
18.25
def do_transition_for(brain_or_object, transition): """Performs a workflow transition for the passed in object. :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: The object where the transtion was performed """ if not isinstance(transition, basestring): fail("Transition type needs to be string, got '%s'" % type(transition)) obj = get_object(brain_or_object) try: ploneapi.content.transition(obj, transition) except ploneapi.exc.InvalidParameterError as e: fail("Failed to perform transition '{}' on {}: {}".format( transition, obj, str(e))) return obj
[ "def", "do_transition_for", "(", "brain_or_object", ",", "transition", ")", ":", "if", "not", "isinstance", "(", "transition", ",", "basestring", ")", ":", "fail", "(", "\"Transition type needs to be string, got '%s'\"", "%", "type", "(", "transition", ")", ")", "obj", "=", "get_object", "(", "brain_or_object", ")", "try", ":", "ploneapi", ".", "content", ".", "transition", "(", "obj", ",", "transition", ")", "except", "ploneapi", ".", "exc", ".", "InvalidParameterError", "as", "e", ":", "fail", "(", "\"Failed to perform transition '{}' on {}: {}\"", ".", "format", "(", "transition", ",", "obj", ",", "str", "(", "e", ")", ")", ")", "return", "obj" ]
44.5
18
def run_at_subprocess(self, use_subprocess, foo, *args, **kwrags): """ This method for run some function at subprocess. Very useful when you have a problem with memory leaks. """ if use_subprocess is False: return foo(*args, **kwrags) child_pid = os.fork() if child_pid == 0: foo(*args, **kwrags) sys.exit(0) return os.waitpid(child_pid, 0)[1] == 0
[ "def", "run_at_subprocess", "(", "self", ",", "use_subprocess", ",", "foo", ",", "*", "args", ",", "*", "*", "kwrags", ")", ":", "if", "use_subprocess", "is", "False", ":", "return", "foo", "(", "*", "args", ",", "*", "*", "kwrags", ")", "child_pid", "=", "os", ".", "fork", "(", ")", "if", "child_pid", "==", "0", ":", "foo", "(", "*", "args", ",", "*", "*", "kwrags", ")", "sys", ".", "exit", "(", "0", ")", "return", "os", ".", "waitpid", "(", "child_pid", ",", "0", ")", "[", "1", "]", "==", "0" ]
33.615385
12.846154
def search(i): """ Input: { (repo_uoa) - repo UOA (module_uoa) - module UOA (data_uoa) - data UOA (repo_uoa_list) - list of repos to search (module_uoa_list) - list of module to search (data_uoa_list) - list of data to search (add_if_date_before) - add only entries with date before this date (add_if_date_after) - add only entries with date after this date (add_if_date) - add only entries with this date (search_by_name) - search by name (print_time) - if 'yes', print elapsed time at the end (search_flat_dict) - search if these flat keys/values exist in entries (search_dict) - search if this dict is a part of the entry (tags) - add tags to search in format tags=x,y,z or (search_string) - search with expressions *? (ignore_case) - if 'yes', ignore case of letters (time_out) - in secs, default=30 (internal) - if 'yes', use internal search even if indexing is on (limit_size) - by default 5000 or -1 if no limit (print_full) - if 'yes', show CID (repo_uoa:module_uoa:data_uoa) (print_uid) - if 'yes', print UID in brackets (print_name) - if 'yes', print name (and add info to the list) (add_info) - if 'yes', add info about entry to the list (add_meta) - if 'yes', add meta about entry to the list } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 lst - [{'repo_uoa', 'repo_uid', 'module_uoa', 'module_uid', 'data_uoa','data_uid', 'path'}] elapsed_time - elapsed time in string (timed_out) - if 'yes', timed out } """ o=i.get('out','') ss=i.get('search_string','') ls=i.get('limit_size','5000') rr={'return':0} sd=i.get('search_dict',{}) tags=i.get('tags','') if tags!='': xtags=tags.split(',') xtags1=[] for q in xtags: xtags1.append(q.strip()) sd['tags']=xtags1 # Check if index if cfg.get('use_indexing','')!='yes' or i.get('internal','')=='yes': if ss!='': i['filter_func']='search_string_filter' else: sfd=i.get('search_flat_dict',{}) if len(sfd)>0: r=restore_flattened_dict({'dict':sfd}) if r['return']>0: return r nd=r['dict'] sd.update(nd) del (i['search_flat_dict']) i['filter_func']='search_filter' i['search_dict']=sd pf=i.get('print_full','') if pf=='': pf='yes' i['print_full']=pf rr=list_data(i) else: import time start_time = time.time() dss={} ruoa=i.get('repo_uoa','') muoa=i.get('module_uoa','') duoa=i.get('data_uoa','') lruoa=i.get('repo_uoa_list',[]) lmuoa=i.get('module_uoa_list',[]) lduoa=i.get('data_uoa_list',[]) if ruoa!='': lruoa.append(ruoa) if muoa!='': lmuoa.append(muoa) if duoa!='': lduoa.append(duoa) if len(lruoa)>0: if ss!='': ss+=' AND ' ss+=' (' first=True for x in lruoa: if first: first=False else: ss+=' OR ' ss+='(repo_uid:"'+x+'") OR (repo_uoa:"'+x+'")' ss+=')' if len(lmuoa)>0: if ss!='': ss+=' AND ' ss+='(' first=True for x in lmuoa: if first: first=False else: ss+=' OR ' ss+='(module_uid:"'+x+'") OR (module_uoa:"'+x+'")' ss+=')' if len(lduoa)>0: if ss!='': ss+=' AND ' ss+='(' first=True for x in lduoa: if first: first=False else: ss+=' OR ' ss+='(data_uid:"'+x+'") OR (data_uoa:"'+x+'")' ss+=')' # Check search keys first=True for u in sd: v=sd[u] if first: first=False if ss=='': ss+='(' else: ss+=' AND (' else: ss+=' AND ' if type(v)==list: first1=True for lk in v: if first1: first1=False else: ss+=' AND ' ss+=u+':"'+str(lk)+'"' else: ss+=u+':"'+v+'"' # Check special parameters aidb=i.get('add_if_date_before','') aida=i.get('add_if_date_after','') aid=i.get('add_if_date','') # Support ISO and human readable time aidb=aidb.strip().replace(' ','T') aida=aida.strip().replace(' ','T') aid=aid.strip().replace(' ','T') sn=i.get('search_by_name','') if sn!='': if first: first=False if ss=='': ss+='(' else: ss+=' AND (' else: ss+=' AND ' if sn.find('*')<0 and sn.find('?')<0: ss+='data_name:"'+sn+'"' else: ss+='data_name:'+sn+'' if aidb!='' or aida!='' or aid!='': if first: first=False if ss=='': ss+='(' else: ss+=' AND (' else: ss+=' AND ' ss+='iso_datetime:' if aid!='': ss+='"'+aid+'"' else: ss+='[' if aida!='': ss+='"'+aida+'"' else: ss+='*' if aidb!='': ss+=' TO "'+aidb+'"' ss+='] ' # Finish query if not first: ss+=')' # Prepare ElasticSearch query import urllib path='/_search?' if ss!='': path+='q='+urllib.quote_plus(ss.encode('utf-8')) if ls!='': path+='&size='+ls # dss={'query':{'filtered':{'filter':{'terms':sd}}}} dss={} ri=access_index_server({'request':'GET', 'path':path, 'dict':dss}) if ri['return']>0: return ri dd=ri['dict'].get('hits',{}).get('hits',[]) lst=[] for qx in dd: q=qx.get('_source',{}) ruoa=q.get('repo_uoa','') ruid=q.get('repo_uid','') muoa=q.get('module_uoa','') muid=q.get('module_uid','') duoa=q.get('data_uoa','') duid=q.get('data_uid','') path=q.get('path','') lst.append({'repo_uoa':ruoa, 'repo_uid':ruid, 'module_uoa':muoa, 'module_uid':muid, 'data_uoa':duoa, 'data_uid':duid, 'path':path}) if o=='con': x=ruoa+':'+muoa+':' if sys.version_info[0]<3: y=duoa try: y=y.decode(sys.stdin.encoding) except Exception as e: try: y=y.decode('utf8') except Exception as e: pass x+=y else: x+=duoa out(x) rr['lst']=lst rr['elapsed_time']=str(time.time() - start_time) if o=='con' and i.get('print_time','')=='yes': out('Elapsed time: '+rr['elapsed_time']+' sec., number of entries: '+str(len(lst))) return rr
[ "def", "search", "(", "i", ")", ":", "o", "=", "i", ".", "get", "(", "'out'", ",", "''", ")", "ss", "=", "i", ".", "get", "(", "'search_string'", ",", "''", ")", "ls", "=", "i", ".", "get", "(", "'limit_size'", ",", "'5000'", ")", "rr", "=", "{", "'return'", ":", "0", "}", "sd", "=", "i", ".", "get", "(", "'search_dict'", ",", "{", "}", ")", "tags", "=", "i", ".", "get", "(", "'tags'", ",", "''", ")", "if", "tags", "!=", "''", ":", "xtags", "=", "tags", ".", "split", "(", "','", ")", "xtags1", "=", "[", "]", "for", "q", "in", "xtags", ":", "xtags1", ".", "append", "(", "q", ".", "strip", "(", ")", ")", "sd", "[", "'tags'", "]", "=", "xtags1", "# Check if index", "if", "cfg", ".", "get", "(", "'use_indexing'", ",", "''", ")", "!=", "'yes'", "or", "i", ".", "get", "(", "'internal'", ",", "''", ")", "==", "'yes'", ":", "if", "ss", "!=", "''", ":", "i", "[", "'filter_func'", "]", "=", "'search_string_filter'", "else", ":", "sfd", "=", "i", ".", "get", "(", "'search_flat_dict'", ",", "{", "}", ")", "if", "len", "(", "sfd", ")", ">", "0", ":", "r", "=", "restore_flattened_dict", "(", "{", "'dict'", ":", "sfd", "}", ")", "if", "r", "[", "'return'", "]", ">", "0", ":", "return", "r", "nd", "=", "r", "[", "'dict'", "]", "sd", ".", "update", "(", "nd", ")", "del", "(", "i", "[", "'search_flat_dict'", "]", ")", "i", "[", "'filter_func'", "]", "=", "'search_filter'", "i", "[", "'search_dict'", "]", "=", "sd", "pf", "=", "i", ".", "get", "(", "'print_full'", ",", "''", ")", "if", "pf", "==", "''", ":", "pf", "=", "'yes'", "i", "[", "'print_full'", "]", "=", "pf", "rr", "=", "list_data", "(", "i", ")", "else", ":", "import", "time", "start_time", "=", "time", ".", "time", "(", ")", "dss", "=", "{", "}", "ruoa", "=", "i", ".", "get", "(", "'repo_uoa'", ",", "''", ")", "muoa", "=", "i", ".", "get", "(", "'module_uoa'", ",", "''", ")", "duoa", "=", "i", ".", "get", "(", "'data_uoa'", ",", "''", ")", "lruoa", "=", "i", ".", "get", "(", "'repo_uoa_list'", ",", "[", "]", ")", "lmuoa", "=", "i", ".", "get", "(", "'module_uoa_list'", ",", "[", "]", ")", "lduoa", "=", "i", ".", "get", "(", "'data_uoa_list'", ",", "[", "]", ")", "if", "ruoa", "!=", "''", ":", "lruoa", ".", "append", "(", "ruoa", ")", "if", "muoa", "!=", "''", ":", "lmuoa", ".", "append", "(", "muoa", ")", "if", "duoa", "!=", "''", ":", "lduoa", ".", "append", "(", "duoa", ")", "if", "len", "(", "lruoa", ")", ">", "0", ":", "if", "ss", "!=", "''", ":", "ss", "+=", "' AND '", "ss", "+=", "' ('", "first", "=", "True", "for", "x", "in", "lruoa", ":", "if", "first", ":", "first", "=", "False", "else", ":", "ss", "+=", "' OR '", "ss", "+=", "'(repo_uid:\"'", "+", "x", "+", "'\") OR (repo_uoa:\"'", "+", "x", "+", "'\")'", "ss", "+=", "')'", "if", "len", "(", "lmuoa", ")", ">", "0", ":", "if", "ss", "!=", "''", ":", "ss", "+=", "' AND '", "ss", "+=", "'('", "first", "=", "True", "for", "x", "in", "lmuoa", ":", "if", "first", ":", "first", "=", "False", "else", ":", "ss", "+=", "' OR '", "ss", "+=", "'(module_uid:\"'", "+", "x", "+", "'\") OR (module_uoa:\"'", "+", "x", "+", "'\")'", "ss", "+=", "')'", "if", "len", "(", "lduoa", ")", ">", "0", ":", "if", "ss", "!=", "''", ":", "ss", "+=", "' AND '", "ss", "+=", "'('", "first", "=", "True", "for", "x", "in", "lduoa", ":", "if", "first", ":", "first", "=", "False", "else", ":", "ss", "+=", "' OR '", "ss", "+=", "'(data_uid:\"'", "+", "x", "+", "'\") OR (data_uoa:\"'", "+", "x", "+", "'\")'", "ss", "+=", "')'", "# Check search keys", "first", "=", "True", "for", "u", "in", "sd", ":", "v", "=", "sd", "[", "u", "]", "if", "first", ":", "first", "=", "False", "if", "ss", "==", "''", ":", "ss", "+=", "'('", "else", ":", "ss", "+=", "' AND ('", "else", ":", "ss", "+=", "' AND '", "if", "type", "(", "v", ")", "==", "list", ":", "first1", "=", "True", "for", "lk", "in", "v", ":", "if", "first1", ":", "first1", "=", "False", "else", ":", "ss", "+=", "' AND '", "ss", "+=", "u", "+", "':\"'", "+", "str", "(", "lk", ")", "+", "'\"'", "else", ":", "ss", "+=", "u", "+", "':\"'", "+", "v", "+", "'\"'", "# Check special parameters", "aidb", "=", "i", ".", "get", "(", "'add_if_date_before'", ",", "''", ")", "aida", "=", "i", ".", "get", "(", "'add_if_date_after'", ",", "''", ")", "aid", "=", "i", ".", "get", "(", "'add_if_date'", ",", "''", ")", "# Support ISO and human readable time", "aidb", "=", "aidb", ".", "strip", "(", ")", ".", "replace", "(", "' '", ",", "'T'", ")", "aida", "=", "aida", ".", "strip", "(", ")", ".", "replace", "(", "' '", ",", "'T'", ")", "aid", "=", "aid", ".", "strip", "(", ")", ".", "replace", "(", "' '", ",", "'T'", ")", "sn", "=", "i", ".", "get", "(", "'search_by_name'", ",", "''", ")", "if", "sn", "!=", "''", ":", "if", "first", ":", "first", "=", "False", "if", "ss", "==", "''", ":", "ss", "+=", "'('", "else", ":", "ss", "+=", "' AND ('", "else", ":", "ss", "+=", "' AND '", "if", "sn", ".", "find", "(", "'*'", ")", "<", "0", "and", "sn", ".", "find", "(", "'?'", ")", "<", "0", ":", "ss", "+=", "'data_name:\"'", "+", "sn", "+", "'\"'", "else", ":", "ss", "+=", "'data_name:'", "+", "sn", "+", "''", "if", "aidb", "!=", "''", "or", "aida", "!=", "''", "or", "aid", "!=", "''", ":", "if", "first", ":", "first", "=", "False", "if", "ss", "==", "''", ":", "ss", "+=", "'('", "else", ":", "ss", "+=", "' AND ('", "else", ":", "ss", "+=", "' AND '", "ss", "+=", "'iso_datetime:'", "if", "aid", "!=", "''", ":", "ss", "+=", "'\"'", "+", "aid", "+", "'\"'", "else", ":", "ss", "+=", "'['", "if", "aida", "!=", "''", ":", "ss", "+=", "'\"'", "+", "aida", "+", "'\"'", "else", ":", "ss", "+=", "'*'", "if", "aidb", "!=", "''", ":", "ss", "+=", "' TO \"'", "+", "aidb", "+", "'\"'", "ss", "+=", "'] '", "# Finish query", "if", "not", "first", ":", "ss", "+=", "')'", "# Prepare ElasticSearch query", "import", "urllib", "path", "=", "'/_search?'", "if", "ss", "!=", "''", ":", "path", "+=", "'q='", "+", "urllib", ".", "quote_plus", "(", "ss", ".", "encode", "(", "'utf-8'", ")", ")", "if", "ls", "!=", "''", ":", "path", "+=", "'&size='", "+", "ls", "# dss={'query':{'filtered':{'filter':{'terms':sd}}}}", "dss", "=", "{", "}", "ri", "=", "access_index_server", "(", "{", "'request'", ":", "'GET'", ",", "'path'", ":", "path", ",", "'dict'", ":", "dss", "}", ")", "if", "ri", "[", "'return'", "]", ">", "0", ":", "return", "ri", "dd", "=", "ri", "[", "'dict'", "]", ".", "get", "(", "'hits'", ",", "{", "}", ")", ".", "get", "(", "'hits'", ",", "[", "]", ")", "lst", "=", "[", "]", "for", "qx", "in", "dd", ":", "q", "=", "qx", ".", "get", "(", "'_source'", ",", "{", "}", ")", "ruoa", "=", "q", ".", "get", "(", "'repo_uoa'", ",", "''", ")", "ruid", "=", "q", ".", "get", "(", "'repo_uid'", ",", "''", ")", "muoa", "=", "q", ".", "get", "(", "'module_uoa'", ",", "''", ")", "muid", "=", "q", ".", "get", "(", "'module_uid'", ",", "''", ")", "duoa", "=", "q", ".", "get", "(", "'data_uoa'", ",", "''", ")", "duid", "=", "q", ".", "get", "(", "'data_uid'", ",", "''", ")", "path", "=", "q", ".", "get", "(", "'path'", ",", "''", ")", "lst", ".", "append", "(", "{", "'repo_uoa'", ":", "ruoa", ",", "'repo_uid'", ":", "ruid", ",", "'module_uoa'", ":", "muoa", ",", "'module_uid'", ":", "muid", ",", "'data_uoa'", ":", "duoa", ",", "'data_uid'", ":", "duid", ",", "'path'", ":", "path", "}", ")", "if", "o", "==", "'con'", ":", "x", "=", "ruoa", "+", "':'", "+", "muoa", "+", "':'", "if", "sys", ".", "version_info", "[", "0", "]", "<", "3", ":", "y", "=", "duoa", "try", ":", "y", "=", "y", ".", "decode", "(", "sys", ".", "stdin", ".", "encoding", ")", "except", "Exception", "as", "e", ":", "try", ":", "y", "=", "y", ".", "decode", "(", "'utf8'", ")", "except", "Exception", "as", "e", ":", "pass", "x", "+=", "y", "else", ":", "x", "+=", "duoa", "out", "(", "x", ")", "rr", "[", "'lst'", "]", "=", "lst", "rr", "[", "'elapsed_time'", "]", "=", "str", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", "if", "o", "==", "'con'", "and", "i", ".", "get", "(", "'print_time'", ",", "''", ")", "==", "'yes'", ":", "out", "(", "'Elapsed time: '", "+", "rr", "[", "'elapsed_time'", "]", "+", "' sec., number of entries: '", "+", "str", "(", "len", "(", "lst", ")", ")", ")", "return", "rr" ]
28.191729
20.349624
def get_layer(self): """ retrieve layer from DB """ if self.layer: return try: self.layer = Layer.objects.get(slug=self.kwargs['slug']) except Layer.DoesNotExist: raise Http404(_('Layer not found'))
[ "def", "get_layer", "(", "self", ")", ":", "if", "self", ".", "layer", ":", "return", "try", ":", "self", ".", "layer", "=", "Layer", ".", "objects", ".", "get", "(", "slug", "=", "self", ".", "kwargs", "[", "'slug'", "]", ")", "except", "Layer", ".", "DoesNotExist", ":", "raise", "Http404", "(", "_", "(", "'Layer not found'", ")", ")" ]
32.375
16.125
def create_or_update_secret(self, path, secret, cas=None, mount_point=DEFAULT_MOUNT_POINT): """Create a new version of a secret at the specified location. If the value does not yet exist, the calling token must have an ACL policy granting the create capability. If the value already exists, the calling token must have an ACL policy granting the update capability. Supported methods: POST: /{mount_point}/data/{path}. Produces: 200 application/json :param path: Path :type path: str | unicode :param cas: Set the "cas" value to use a Check-And-Set operation. If not set the write will be allowed. If set to 0 a write will only be allowed if the key doesn't exist. If the index is non-zero the write will only be allowed if the key's current version matches the version specified in the cas parameter. :type cas: int :param secret: The contents of the "secret" dict will be stored and returned on read. :type secret: dict :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: dict """ params = { 'options': {}, 'data': secret, } if cas is not None: params['options']['cas'] = cas api_path = '/v1/{mount_point}/data/{path}'.format(mount_point=mount_point, path=path) response = self._adapter.post( url=api_path, json=params, ) return response.json()
[ "def", "create_or_update_secret", "(", "self", ",", "path", ",", "secret", ",", "cas", "=", "None", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'options'", ":", "{", "}", ",", "'data'", ":", "secret", ",", "}", "if", "cas", "is", "not", "None", ":", "params", "[", "'options'", "]", "[", "'cas'", "]", "=", "cas", "api_path", "=", "'/v1/{mount_point}/data/{path}'", ".", "format", "(", "mount_point", "=", "mount_point", ",", "path", "=", "path", ")", "response", "=", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")", "return", "response", ".", "json", "(", ")" ]
44
29.25
def importPreflibFile(self, fileName): """ Imports a preflib format file that contains all the information of a Profile. This function will completely override all members of the current Profile object. Currently, we assume that in an election where incomplete ordering are allowed, if a voter ranks only one candidate, then the voter did not prefer any candidates over another. This may lead to some discrepancies when importing and exporting a .toi preflib file or a .soi preflib file. :ivar str fileName: The name of the input file to be imported. """ # Use the functionality found in io to read the file. elecFileObj = open(fileName, 'r') self.candMap, rankMaps, wmgMapsCounts, self.numVoters = prefpy_io.read_election_file(elecFileObj) elecFileObj.close() self.numCands = len(self.candMap.keys()) # Go through the rankMaps and generate a wmgMap for each vote. Use the wmgMap to create a # Preference object. self.preferences = [] for i in range(0, len(rankMaps)): wmgMap = self.genWmgMapFromRankMap(rankMaps[i]) self.preferences.append(Preference(wmgMap, wmgMapsCounts[i]))
[ "def", "importPreflibFile", "(", "self", ",", "fileName", ")", ":", "# Use the functionality found in io to read the file.", "elecFileObj", "=", "open", "(", "fileName", ",", "'r'", ")", "self", ".", "candMap", ",", "rankMaps", ",", "wmgMapsCounts", ",", "self", ".", "numVoters", "=", "prefpy_io", ".", "read_election_file", "(", "elecFileObj", ")", "elecFileObj", ".", "close", "(", ")", "self", ".", "numCands", "=", "len", "(", "self", ".", "candMap", ".", "keys", "(", ")", ")", "# Go through the rankMaps and generate a wmgMap for each vote. Use the wmgMap to create a", "# Preference object.", "self", ".", "preferences", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "rankMaps", ")", ")", ":", "wmgMap", "=", "self", ".", "genWmgMapFromRankMap", "(", "rankMaps", "[", "i", "]", ")", "self", ".", "preferences", ".", "append", "(", "Preference", "(", "wmgMap", ",", "wmgMapsCounts", "[", "i", "]", ")", ")" ]
50.875
29.791667
def species_string(self): """ String representation of species on the site. """ if self.is_ordered: return list(self.species.keys())[0].__str__() else: sorted_species = sorted(self.species.keys()) return ", ".join(["{}:{:.3f}".format(sp, self.species[sp]) for sp in sorted_species])
[ "def", "species_string", "(", "self", ")", ":", "if", "self", ".", "is_ordered", ":", "return", "list", "(", "self", ".", "species", ".", "keys", "(", ")", ")", "[", "0", "]", ".", "__str__", "(", ")", "else", ":", "sorted_species", "=", "sorted", "(", "self", ".", "species", ".", "keys", "(", ")", ")", "return", "\", \"", ".", "join", "(", "[", "\"{}:{:.3f}\"", ".", "format", "(", "sp", ",", "self", ".", "species", "[", "sp", "]", ")", "for", "sp", "in", "sorted_species", "]", ")" ]
37.9
14.7
def parse_experiment_params(name_exp): '''Parse experiment parameters from the data directory name Args ---- name_exp: str Name of data directory with experiment parameters Returns ------- tag_params: dict of str Dictionary of parsed experiment parameters ''' if ('/' in name_exp) or ('\\' in name_exp): raise ValueError("The path {} appears to be a path. Please pass " "only the data directory's name (i.e. the " "experiment name)".format(name_exp)) tag_params = dict() tag_params['experiment'] = name_exp tag_params['tag_model'] = (name_exp.split('_')[1]).replace('-','') tag_params['tag_id'] = name_exp.split('_')[2] tag_params['animal'] = name_exp.split('_')[3] tag_params['notes'] = name_exp.split('_')[4] return tag_params
[ "def", "parse_experiment_params", "(", "name_exp", ")", ":", "if", "(", "'/'", "in", "name_exp", ")", "or", "(", "'\\\\'", "in", "name_exp", ")", ":", "raise", "ValueError", "(", "\"The path {} appears to be a path. Please pass \"", "\"only the data directory's name (i.e. the \"", "\"experiment name)\"", ".", "format", "(", "name_exp", ")", ")", "tag_params", "=", "dict", "(", ")", "tag_params", "[", "'experiment'", "]", "=", "name_exp", "tag_params", "[", "'tag_model'", "]", "=", "(", "name_exp", ".", "split", "(", "'_'", ")", "[", "1", "]", ")", ".", "replace", "(", "'-'", ",", "''", ")", "tag_params", "[", "'tag_id'", "]", "=", "name_exp", ".", "split", "(", "'_'", ")", "[", "2", "]", "tag_params", "[", "'animal'", "]", "=", "name_exp", ".", "split", "(", "'_'", ")", "[", "3", "]", "tag_params", "[", "'notes'", "]", "=", "name_exp", ".", "split", "(", "'_'", ")", "[", "4", "]", "return", "tag_params" ]
32.5
22.5
def clone(self): """Create a complete copy of the stream. :returns: A new MaterialStream object.""" result = copy.copy(self) result._compound_mfrs = copy.deepcopy(self._compound_mfrs) return result
[ "def", "clone", "(", "self", ")", ":", "result", "=", "copy", ".", "copy", "(", "self", ")", "result", ".", "_compound_mfrs", "=", "copy", ".", "deepcopy", "(", "self", ".", "_compound_mfrs", ")", "return", "result" ]
29
19.625
def get_last_name_first_name(self): """ :rtype: str """ last_names = [] if self._get_last_names(): last_names += self._get_last_names() first_and_additional_names = [] if self._get_first_names(): first_and_additional_names += self._get_first_names() if self._get_additional_names(): first_and_additional_names += self._get_additional_names() if last_names and first_and_additional_names: return "{}, {}".format( helpers.list_to_string(last_names, " "), helpers.list_to_string(first_and_additional_names, " ")) elif last_names: return helpers.list_to_string(last_names, " ") elif first_and_additional_names: return helpers.list_to_string(first_and_additional_names, " ") else: return self.get_full_name()
[ "def", "get_last_name_first_name", "(", "self", ")", ":", "last_names", "=", "[", "]", "if", "self", ".", "_get_last_names", "(", ")", ":", "last_names", "+=", "self", ".", "_get_last_names", "(", ")", "first_and_additional_names", "=", "[", "]", "if", "self", ".", "_get_first_names", "(", ")", ":", "first_and_additional_names", "+=", "self", ".", "_get_first_names", "(", ")", "if", "self", ".", "_get_additional_names", "(", ")", ":", "first_and_additional_names", "+=", "self", ".", "_get_additional_names", "(", ")", "if", "last_names", "and", "first_and_additional_names", ":", "return", "\"{}, {}\"", ".", "format", "(", "helpers", ".", "list_to_string", "(", "last_names", ",", "\" \"", ")", ",", "helpers", ".", "list_to_string", "(", "first_and_additional_names", ",", "\" \"", ")", ")", "elif", "last_names", ":", "return", "helpers", ".", "list_to_string", "(", "last_names", ",", "\" \"", ")", "elif", "first_and_additional_names", ":", "return", "helpers", ".", "list_to_string", "(", "first_and_additional_names", ",", "\" \"", ")", "else", ":", "return", "self", ".", "get_full_name", "(", ")" ]
40.636364
12.727273
def new_post(blog_id, username, password, post, publish): """ metaWeblog.newPost(blog_id, username, password, post, publish) => post_id """ user = authenticate(username, password, 'zinnia.add_entry') if post.get('dateCreated'): creation_date = datetime.strptime( post['dateCreated'].value[:18], '%Y-%m-%dT%H:%M:%S') if settings.USE_TZ: creation_date = timezone.make_aware( creation_date, timezone.utc) else: creation_date = timezone.now() entry_dict = {'title': post['title'], 'content': post['description'], 'excerpt': post.get('mt_excerpt', ''), 'publication_date': creation_date, 'creation_date': creation_date, 'last_update': creation_date, 'comment_enabled': post.get('mt_allow_comments', 1) == 1, 'pingback_enabled': post.get('mt_allow_pings', 1) == 1, 'trackback_enabled': post.get('mt_allow_pings', 1) == 1, 'featured': post.get('sticky', 0) == 1, 'tags': 'mt_keywords' in post and post['mt_keywords'] or '', 'slug': 'wp_slug' in post and post['wp_slug'] or slugify( post['title']), 'password': post.get('wp_password', '')} if user.has_perm('zinnia.can_change_status'): entry_dict['status'] = publish and PUBLISHED or DRAFT entry = Entry.objects.create(**entry_dict) author = user if 'wp_author_id' in post and user.has_perm('zinnia.can_change_author'): if int(post['wp_author_id']) != user.pk: author = Author.objects.get(pk=post['wp_author_id']) entry.authors.add(author) entry.sites.add(Site.objects.get_current()) if 'categories' in post: entry.categories.add(*[ Category.objects.get_or_create( title=cat, slug=slugify(cat))[0] for cat in post['categories']]) return entry.pk
[ "def", "new_post", "(", "blog_id", ",", "username", ",", "password", ",", "post", ",", "publish", ")", ":", "user", "=", "authenticate", "(", "username", ",", "password", ",", "'zinnia.add_entry'", ")", "if", "post", ".", "get", "(", "'dateCreated'", ")", ":", "creation_date", "=", "datetime", ".", "strptime", "(", "post", "[", "'dateCreated'", "]", ".", "value", "[", ":", "18", "]", ",", "'%Y-%m-%dT%H:%M:%S'", ")", "if", "settings", ".", "USE_TZ", ":", "creation_date", "=", "timezone", ".", "make_aware", "(", "creation_date", ",", "timezone", ".", "utc", ")", "else", ":", "creation_date", "=", "timezone", ".", "now", "(", ")", "entry_dict", "=", "{", "'title'", ":", "post", "[", "'title'", "]", ",", "'content'", ":", "post", "[", "'description'", "]", ",", "'excerpt'", ":", "post", ".", "get", "(", "'mt_excerpt'", ",", "''", ")", ",", "'publication_date'", ":", "creation_date", ",", "'creation_date'", ":", "creation_date", ",", "'last_update'", ":", "creation_date", ",", "'comment_enabled'", ":", "post", ".", "get", "(", "'mt_allow_comments'", ",", "1", ")", "==", "1", ",", "'pingback_enabled'", ":", "post", ".", "get", "(", "'mt_allow_pings'", ",", "1", ")", "==", "1", ",", "'trackback_enabled'", ":", "post", ".", "get", "(", "'mt_allow_pings'", ",", "1", ")", "==", "1", ",", "'featured'", ":", "post", ".", "get", "(", "'sticky'", ",", "0", ")", "==", "1", ",", "'tags'", ":", "'mt_keywords'", "in", "post", "and", "post", "[", "'mt_keywords'", "]", "or", "''", ",", "'slug'", ":", "'wp_slug'", "in", "post", "and", "post", "[", "'wp_slug'", "]", "or", "slugify", "(", "post", "[", "'title'", "]", ")", ",", "'password'", ":", "post", ".", "get", "(", "'wp_password'", ",", "''", ")", "}", "if", "user", ".", "has_perm", "(", "'zinnia.can_change_status'", ")", ":", "entry_dict", "[", "'status'", "]", "=", "publish", "and", "PUBLISHED", "or", "DRAFT", "entry", "=", "Entry", ".", "objects", ".", "create", "(", "*", "*", "entry_dict", ")", "author", "=", "user", "if", "'wp_author_id'", "in", "post", "and", "user", ".", "has_perm", "(", "'zinnia.can_change_author'", ")", ":", "if", "int", "(", "post", "[", "'wp_author_id'", "]", ")", "!=", "user", ".", "pk", ":", "author", "=", "Author", ".", "objects", ".", "get", "(", "pk", "=", "post", "[", "'wp_author_id'", "]", ")", "entry", ".", "authors", ".", "add", "(", "author", ")", "entry", ".", "sites", ".", "add", "(", "Site", ".", "objects", ".", "get_current", "(", ")", ")", "if", "'categories'", "in", "post", ":", "entry", ".", "categories", ".", "add", "(", "*", "[", "Category", ".", "objects", ".", "get_or_create", "(", "title", "=", "cat", ",", "slug", "=", "slugify", "(", "cat", ")", ")", "[", "0", "]", "for", "cat", "in", "post", "[", "'categories'", "]", "]", ")", "return", "entry", ".", "pk" ]
41.395833
17.770833
def _IOC(cls, dir, op, structure=None): """ Encode an ioctl id. """ control = cls(dir, op, structure) def do(dev, **args): return control(dev, **args) return do
[ "def", "_IOC", "(", "cls", ",", "dir", ",", "op", ",", "structure", "=", "None", ")", ":", "control", "=", "cls", "(", "dir", ",", "op", ",", "structure", ")", "def", "do", "(", "dev", ",", "*", "*", "args", ")", ":", "return", "control", "(", "dev", ",", "*", "*", "args", ")", "return", "do" ]
28.428571
11.142857
def nonblock_read(stream, limit=None, forceMode=None): ''' nonblock_read - Read any data available on the given stream (file, socket, etc) without blocking and regardless of newlines. @param stream <object> - A stream (like a file object or a socket) @param limit <None/int> - Max number of bytes to read. If None or 0, will read as much data is available. @param forceMode <None/mode string> - Default None. Will be autodetected if None. If you want to explicitly force a mode, provide 'b' for binary (bytes) or 't' for text (Str). This determines the return type. @return <str or bytes depending on stream's mode> - Any data available on the stream, or "None" if the stream was closed on the other side and all data has already been read. ''' bytesRead = 0 ret = [] if forceMode: if 'b' in forceMode: streamMode = bytes elif 't' in forceMode: streamMode = str else: streamMode = detect_stream_mode(stream) else: streamMode = detect_stream_mode(stream) emptyStr = streamMode() # Determine if our function is "read" (file-like objects) or "recv" (socket-like objects) if hasattr(stream, 'read'): readByte = lambda : stream.read(1) elif hasattr(stream, 'recv'): readByte = lambda : stream.recv(1) else: raise ValueError('Cannot determine how to read from provided stream, %s.' %(repr(stream),)) while True: # Check if data on stream is immediately available (readyToRead, junk1, junk2) = select.select([stream], [], [], .000001) if not readyToRead: break c = readByte() if c == emptyStr: # Stream has been closed if not ret: # All data already read, so return None return None # Otherwise, return data collected. Next call will return None. break bytesRead += 1 ret.append(c) if limit and bytesRead >= limit: break return emptyStr.join(ret)
[ "def", "nonblock_read", "(", "stream", ",", "limit", "=", "None", ",", "forceMode", "=", "None", ")", ":", "bytesRead", "=", "0", "ret", "=", "[", "]", "if", "forceMode", ":", "if", "'b'", "in", "forceMode", ":", "streamMode", "=", "bytes", "elif", "'t'", "in", "forceMode", ":", "streamMode", "=", "str", "else", ":", "streamMode", "=", "detect_stream_mode", "(", "stream", ")", "else", ":", "streamMode", "=", "detect_stream_mode", "(", "stream", ")", "emptyStr", "=", "streamMode", "(", ")", "# Determine if our function is \"read\" (file-like objects) or \"recv\" (socket-like objects)", "if", "hasattr", "(", "stream", ",", "'read'", ")", ":", "readByte", "=", "lambda", ":", "stream", ".", "read", "(", "1", ")", "elif", "hasattr", "(", "stream", ",", "'recv'", ")", ":", "readByte", "=", "lambda", ":", "stream", ".", "recv", "(", "1", ")", "else", ":", "raise", "ValueError", "(", "'Cannot determine how to read from provided stream, %s.'", "%", "(", "repr", "(", "stream", ")", ",", ")", ")", "while", "True", ":", "# Check if data on stream is immediately available", "(", "readyToRead", ",", "junk1", ",", "junk2", ")", "=", "select", ".", "select", "(", "[", "stream", "]", ",", "[", "]", ",", "[", "]", ",", ".000001", ")", "if", "not", "readyToRead", ":", "break", "c", "=", "readByte", "(", ")", "if", "c", "==", "emptyStr", ":", "# Stream has been closed", "if", "not", "ret", ":", "# All data already read, so return None", "return", "None", "# Otherwise, return data collected. Next call will return None.", "break", "bytesRead", "+=", "1", "ret", ".", "append", "(", "c", ")", "if", "limit", "and", "bytesRead", ">=", "limit", ":", "break", "return", "emptyStr", ".", "join", "(", "ret", ")" ]
36.839286
31.267857
def n_members(self): """ Returns the number of members in the domain if it `is_finite`, otherwise, returns `np.inf`. :type: ``int`` or ``np.inf`` """ if self.is_finite: return reduce(mul, [domain.n_members for domain in self._domains], 1) else: return np.inf
[ "def", "n_members", "(", "self", ")", ":", "if", "self", ".", "is_finite", ":", "return", "reduce", "(", "mul", ",", "[", "domain", ".", "n_members", "for", "domain", "in", "self", ".", "_domains", "]", ",", "1", ")", "else", ":", "return", "np", ".", "inf" ]
29.909091
17
def sample( self, n=None, frac=None, replace=False, weights=None, random_state=None, axis=None, ): """Returns a random sample of items from an axis of object. Args: n: Number of items from axis to return. Cannot be used with frac. Default = 1 if frac = None. frac: Fraction of axis items to return. Cannot be used with n. replace: Sample with or without replacement. Default = False. weights: Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and index values in sampled object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. inf and -inf values not allowed. random_state: Seed for the random number generator (if int), or numpy RandomState object. axis: Axis to sample. Accepts axis number or name. Returns: A new Dataframe """ axis = self._get_axis_number(axis) if axis is not None else 0 if axis: axis_labels = self.columns axis_length = len(axis_labels) else: # Getting rows requires indices instead of labels. RangeIndex provides this. axis_labels = pandas.RangeIndex(len(self.index)) axis_length = len(axis_labels) if weights is not None: # Index of the weights Series should correspond to the index of the # Dataframe in order to sample if isinstance(weights, BasePandasDataset): weights = weights.reindex(self.axes[axis]) # If weights arg is a string, the weights used for sampling will # the be values in the column corresponding to that string if isinstance(weights, string_types): if axis == 0: try: weights = self[weights] except KeyError: raise KeyError("String passed to weights not a valid column") else: raise ValueError( "Strings can only be passed to " "weights when sampling from rows on " "a DataFrame" ) weights = pandas.Series(weights, dtype="float64") if len(weights) != axis_length: raise ValueError( "Weights and axis to be sampled must be of same length" ) if (weights == np.inf).any() or (weights == -np.inf).any(): raise ValueError("weight vector may not include `inf` values") if (weights < 0).any(): raise ValueError("weight vector many not include negative values") # weights cannot be NaN when sampling, so we must set all nan # values to 0 weights = weights.fillna(0) # If passed in weights are not equal to 1, renormalize them # otherwise numpy sampling function will error weights_sum = weights.sum() if weights_sum != 1: if weights_sum != 0: weights = weights / weights_sum else: raise ValueError("Invalid weights: weights sum to zero") weights = weights.values if n is None and frac is None: # default to n = 1 if n and frac are both None (in accordance with # Pandas specification) n = 1 elif n is not None and frac is None and n % 1 != 0: # n must be an integer raise ValueError("Only integers accepted as `n` values") elif n is None and frac is not None: # compute the number of samples based on frac n = int(round(frac * axis_length)) elif n is not None and frac is not None: # Pandas specification does not allow both n and frac to be passed # in raise ValueError("Please enter a value for `frac` OR `n`, not both") if n < 0: raise ValueError( "A negative number of rows requested. Please provide positive value." ) if n == 0: # This returns an empty object, and since it is a weird edge case that # doesn't need to be distributed, we default to pandas for n=0. return self._default_to_pandas( "sample", n=n, frac=frac, replace=replace, weights=weights, random_state=random_state, axis=axis, ) if random_state is not None: # Get a random number generator depending on the type of # random_state that is passed in if isinstance(random_state, int): random_num_gen = np.random.RandomState(random_state) elif isinstance(random_state, np.random.randomState): random_num_gen = random_state else: # random_state must be an int or a numpy RandomState object raise ValueError( "Please enter an `int` OR a " "np.random.RandomState for random_state" ) # choose random numbers and then get corresponding labels from # chosen axis sample_indices = random_num_gen.choice( np.arange(0, axis_length), size=n, replace=replace, p=weights ) samples = axis_labels[sample_indices] else: # randomly select labels from chosen axis samples = np.random.choice( a=axis_labels, size=n, replace=replace, p=weights ) if axis: query_compiler = self._query_compiler.getitem_column_array(samples) return self.__constructor__(query_compiler=query_compiler) else: query_compiler = self._query_compiler.getitem_row_array(samples) return self.__constructor__(query_compiler=query_compiler)
[ "def", "sample", "(", "self", ",", "n", "=", "None", ",", "frac", "=", "None", ",", "replace", "=", "False", ",", "weights", "=", "None", ",", "random_state", "=", "None", ",", "axis", "=", "None", ",", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "axis", "is", "not", "None", "else", "0", "if", "axis", ":", "axis_labels", "=", "self", ".", "columns", "axis_length", "=", "len", "(", "axis_labels", ")", "else", ":", "# Getting rows requires indices instead of labels. RangeIndex provides this.\r", "axis_labels", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "self", ".", "index", ")", ")", "axis_length", "=", "len", "(", "axis_labels", ")", "if", "weights", "is", "not", "None", ":", "# Index of the weights Series should correspond to the index of the\r", "# Dataframe in order to sample\r", "if", "isinstance", "(", "weights", ",", "BasePandasDataset", ")", ":", "weights", "=", "weights", ".", "reindex", "(", "self", ".", "axes", "[", "axis", "]", ")", "# If weights arg is a string, the weights used for sampling will\r", "# the be values in the column corresponding to that string\r", "if", "isinstance", "(", "weights", ",", "string_types", ")", ":", "if", "axis", "==", "0", ":", "try", ":", "weights", "=", "self", "[", "weights", "]", "except", "KeyError", ":", "raise", "KeyError", "(", "\"String passed to weights not a valid column\"", ")", "else", ":", "raise", "ValueError", "(", "\"Strings can only be passed to \"", "\"weights when sampling from rows on \"", "\"a DataFrame\"", ")", "weights", "=", "pandas", ".", "Series", "(", "weights", ",", "dtype", "=", "\"float64\"", ")", "if", "len", "(", "weights", ")", "!=", "axis_length", ":", "raise", "ValueError", "(", "\"Weights and axis to be sampled must be of same length\"", ")", "if", "(", "weights", "==", "np", ".", "inf", ")", ".", "any", "(", ")", "or", "(", "weights", "==", "-", "np", ".", "inf", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"weight vector may not include `inf` values\"", ")", "if", "(", "weights", "<", "0", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"weight vector many not include negative values\"", ")", "# weights cannot be NaN when sampling, so we must set all nan\r", "# values to 0\r", "weights", "=", "weights", ".", "fillna", "(", "0", ")", "# If passed in weights are not equal to 1, renormalize them\r", "# otherwise numpy sampling function will error\r", "weights_sum", "=", "weights", ".", "sum", "(", ")", "if", "weights_sum", "!=", "1", ":", "if", "weights_sum", "!=", "0", ":", "weights", "=", "weights", "/", "weights_sum", "else", ":", "raise", "ValueError", "(", "\"Invalid weights: weights sum to zero\"", ")", "weights", "=", "weights", ".", "values", "if", "n", "is", "None", "and", "frac", "is", "None", ":", "# default to n = 1 if n and frac are both None (in accordance with\r", "# Pandas specification)\r", "n", "=", "1", "elif", "n", "is", "not", "None", "and", "frac", "is", "None", "and", "n", "%", "1", "!=", "0", ":", "# n must be an integer\r", "raise", "ValueError", "(", "\"Only integers accepted as `n` values\"", ")", "elif", "n", "is", "None", "and", "frac", "is", "not", "None", ":", "# compute the number of samples based on frac\r", "n", "=", "int", "(", "round", "(", "frac", "*", "axis_length", ")", ")", "elif", "n", "is", "not", "None", "and", "frac", "is", "not", "None", ":", "# Pandas specification does not allow both n and frac to be passed\r", "# in\r", "raise", "ValueError", "(", "\"Please enter a value for `frac` OR `n`, not both\"", ")", "if", "n", "<", "0", ":", "raise", "ValueError", "(", "\"A negative number of rows requested. Please provide positive value.\"", ")", "if", "n", "==", "0", ":", "# This returns an empty object, and since it is a weird edge case that\r", "# doesn't need to be distributed, we default to pandas for n=0.\r", "return", "self", ".", "_default_to_pandas", "(", "\"sample\"", ",", "n", "=", "n", ",", "frac", "=", "frac", ",", "replace", "=", "replace", ",", "weights", "=", "weights", ",", "random_state", "=", "random_state", ",", "axis", "=", "axis", ",", ")", "if", "random_state", "is", "not", "None", ":", "# Get a random number generator depending on the type of\r", "# random_state that is passed in\r", "if", "isinstance", "(", "random_state", ",", "int", ")", ":", "random_num_gen", "=", "np", ".", "random", ".", "RandomState", "(", "random_state", ")", "elif", "isinstance", "(", "random_state", ",", "np", ".", "random", ".", "randomState", ")", ":", "random_num_gen", "=", "random_state", "else", ":", "# random_state must be an int or a numpy RandomState object\r", "raise", "ValueError", "(", "\"Please enter an `int` OR a \"", "\"np.random.RandomState for random_state\"", ")", "# choose random numbers and then get corresponding labels from\r", "# chosen axis\r", "sample_indices", "=", "random_num_gen", ".", "choice", "(", "np", ".", "arange", "(", "0", ",", "axis_length", ")", ",", "size", "=", "n", ",", "replace", "=", "replace", ",", "p", "=", "weights", ")", "samples", "=", "axis_labels", "[", "sample_indices", "]", "else", ":", "# randomly select labels from chosen axis\r", "samples", "=", "np", ".", "random", ".", "choice", "(", "a", "=", "axis_labels", ",", "size", "=", "n", ",", "replace", "=", "replace", ",", "p", "=", "weights", ")", "if", "axis", ":", "query_compiler", "=", "self", ".", "_query_compiler", ".", "getitem_column_array", "(", "samples", ")", "return", "self", ".", "__constructor__", "(", "query_compiler", "=", "query_compiler", ")", "else", ":", "query_compiler", "=", "self", ".", "_query_compiler", ".", "getitem_row_array", "(", "samples", ")", "return", "self", ".", "__constructor__", "(", "query_compiler", "=", "query_compiler", ")" ]
46.391608
21.048951
def make_config(self, instance_relative=False): """Used to create the config attribute by the Flask constructor. The `instance_relative` parameter is passed in from the constructor of Flask (there named `instance_relative_config`) and indicates if the config should be relative to the instance path or the root path of the application. .. versionadded:: 0.8 """ root_path = self.root_path if instance_relative: root_path = self.instance_path return Config(root_path, self.default_config)
[ "def", "make_config", "(", "self", ",", "instance_relative", "=", "False", ")", ":", "root_path", "=", "self", ".", "root_path", "if", "instance_relative", ":", "root_path", "=", "self", ".", "instance_path", "return", "Config", "(", "root_path", ",", "self", ".", "default_config", ")" ]
43.692308
15.923077
def parse_mark_duplicate_metrics(fn): """ Parse the output from Picard's MarkDuplicates and return as pandas Series. Parameters ---------- filename : str of filename or file handle Filename of the Picard output you want to parse. Returns ------- metrics : pandas.Series Duplicate metrics. hist : pandas.Series Duplicate histogram. """ with open(fn) as f: lines = [x.strip().split('\t') for x in f.readlines()] metrics = pd.Series(lines[7], lines[6]) m = pd.to_numeric(metrics[metrics.index[1:]]) metrics[m.index] = m.values vals = np.array(lines[11:-1]) hist = pd.Series(vals[:, 1], index=[int(float(x)) for x in vals[:, 0]]) hist = pd.to_numeric(hist) return metrics, hist
[ "def", "parse_mark_duplicate_metrics", "(", "fn", ")", ":", "with", "open", "(", "fn", ")", "as", "f", ":", "lines", "=", "[", "x", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", "for", "x", "in", "f", ".", "readlines", "(", ")", "]", "metrics", "=", "pd", ".", "Series", "(", "lines", "[", "7", "]", ",", "lines", "[", "6", "]", ")", "m", "=", "pd", ".", "to_numeric", "(", "metrics", "[", "metrics", ".", "index", "[", "1", ":", "]", "]", ")", "metrics", "[", "m", ".", "index", "]", "=", "m", ".", "values", "vals", "=", "np", ".", "array", "(", "lines", "[", "11", ":", "-", "1", "]", ")", "hist", "=", "pd", ".", "Series", "(", "vals", "[", ":", ",", "1", "]", ",", "index", "=", "[", "int", "(", "float", "(", "x", ")", ")", "for", "x", "in", "vals", "[", ":", ",", "0", "]", "]", ")", "hist", "=", "pd", ".", "to_numeric", "(", "hist", ")", "return", "metrics", ",", "hist" ]
26.137931
19.862069
def transformToNative(obj): """ Convert comma separated periods into tuples. """ if obj.isNative: return obj obj.isNative = True if obj.value == '': obj.value = [] return obj tzinfo = getTzid(getattr(obj, 'tzid_param', None)) obj.value = [stringToPeriod(x, tzinfo) for x in obj.value.split(",")] return obj
[ "def", "transformToNative", "(", "obj", ")", ":", "if", "obj", ".", "isNative", ":", "return", "obj", "obj", ".", "isNative", "=", "True", "if", "obj", ".", "value", "==", "''", ":", "obj", ".", "value", "=", "[", "]", "return", "obj", "tzinfo", "=", "getTzid", "(", "getattr", "(", "obj", ",", "'tzid_param'", ",", "None", ")", ")", "obj", ".", "value", "=", "[", "stringToPeriod", "(", "x", ",", "tzinfo", ")", "for", "x", "in", "obj", ".", "value", ".", "split", "(", "\",\"", ")", "]", "return", "obj" ]
30.923077
14.923077
def morlet(freq, s_freq, ratio=5, sigma_f=None, dur_in_sd=4, dur_in_s=None, normalization='peak', zero_mean=False): """Create a Morlet wavelet. Parameters ---------- freq : float central frequency of the wavelet s_freq : int sampling frequency ratio : float ratio for a wavelet family ( = freq / sigma_f) sigma_f : float standard deviation of the wavelet in frequency domain dur_in_sd : float duration of the wavelet, given as number of the standard deviation in the time domain, in one side. dur_in_s : float total duration of the wavelet, two-sided (i.e. from start to finish) normalization : str 'area' means that energy is normalized to 1, 'peak' means that the peak is set at 1, 'max' is a normalization used by nitime which does not change max value of output when you change sigma_f. zero_mean : bool make sure that the wavelet has zero mean (only relevant if ratio < 5) Returns ------- ndarray vector containing the complex Morlet wavelets Notes ----- 'ratio' and 'sigma_f' are mutually exclusive. If you use 'sigma_f', the standard deviation stays the same for all the frequency. It's more common to specify a constant ratio for the wavelet family, so that the frequency resolution changes with the frequency of interest. 'dur_in_sd' and 'dur_in_s' are mutually exclusive. 'dur_in_s' specifies the total duration (from start to finish) of the window. 'dur_in_sd' calculates the total duration as the length in standard deviations in the time domain: dur_in_s = dur_in_sd * 2 * sigma_t, with sigma_t = 1 / (2 * pi * sigma_f) """ if sigma_f is None: sigma_f = freq / ratio else: ratio = freq / sigma_f sigma_t = 1 / (2 * pi * sigma_f) if ratio < 5 and not zero_mean: lg.info('The wavelet won\'t have zero mean, set zero_mean=True to ' 'correct it') if dur_in_s is None: dur_in_s = sigma_t * dur_in_sd * 2 t = arange(-dur_in_s / 2, dur_in_s / 2, 1 / s_freq) w = exp(1j * 2 * pi * freq * t) if zero_mean: w -= exp(-1 / 2 * ratio ** 2) w *= exp(-t ** 2 / (2 * sigma_t ** 2)) if normalization == 'area': w /= sqrt(sqrt(pi) * sigma_t * s_freq) elif normalization == 'max': w /= 2 * sigma_t * sqrt(2 * pi) / s_freq elif normalization == 'peak': pass lg.info('At freq {0: 9.3f}Hz, sigma_f={1: 9.3f}Hz, sigma_t={2: 9.3f}s, ' 'total duration={3: 9.3f}s'.format(freq, sigma_f, sigma_t, dur_in_s)) lg.debug(' Real peak={0: 9.3f}, Mean={1: 12.6f}, ' 'Energy={2: 9.3f}'.format(max(real(w)), mean(w), norm(w) ** 2)) return w
[ "def", "morlet", "(", "freq", ",", "s_freq", ",", "ratio", "=", "5", ",", "sigma_f", "=", "None", ",", "dur_in_sd", "=", "4", ",", "dur_in_s", "=", "None", ",", "normalization", "=", "'peak'", ",", "zero_mean", "=", "False", ")", ":", "if", "sigma_f", "is", "None", ":", "sigma_f", "=", "freq", "/", "ratio", "else", ":", "ratio", "=", "freq", "/", "sigma_f", "sigma_t", "=", "1", "/", "(", "2", "*", "pi", "*", "sigma_f", ")", "if", "ratio", "<", "5", "and", "not", "zero_mean", ":", "lg", ".", "info", "(", "'The wavelet won\\'t have zero mean, set zero_mean=True to '", "'correct it'", ")", "if", "dur_in_s", "is", "None", ":", "dur_in_s", "=", "sigma_t", "*", "dur_in_sd", "*", "2", "t", "=", "arange", "(", "-", "dur_in_s", "/", "2", ",", "dur_in_s", "/", "2", ",", "1", "/", "s_freq", ")", "w", "=", "exp", "(", "1j", "*", "2", "*", "pi", "*", "freq", "*", "t", ")", "if", "zero_mean", ":", "w", "-=", "exp", "(", "-", "1", "/", "2", "*", "ratio", "**", "2", ")", "w", "*=", "exp", "(", "-", "t", "**", "2", "/", "(", "2", "*", "sigma_t", "**", "2", ")", ")", "if", "normalization", "==", "'area'", ":", "w", "/=", "sqrt", "(", "sqrt", "(", "pi", ")", "*", "sigma_t", "*", "s_freq", ")", "elif", "normalization", "==", "'max'", ":", "w", "/=", "2", "*", "sigma_t", "*", "sqrt", "(", "2", "*", "pi", ")", "/", "s_freq", "elif", "normalization", "==", "'peak'", ":", "pass", "lg", ".", "info", "(", "'At freq {0: 9.3f}Hz, sigma_f={1: 9.3f}Hz, sigma_t={2: 9.3f}s, '", "'total duration={3: 9.3f}s'", ".", "format", "(", "freq", ",", "sigma_f", ",", "sigma_t", ",", "dur_in_s", ")", ")", "lg", ".", "debug", "(", "' Real peak={0: 9.3f}, Mean={1: 12.6f}, '", "'Energy={2: 9.3f}'", ".", "format", "(", "max", "(", "real", "(", "w", ")", ")", ",", "mean", "(", "w", ")", ",", "norm", "(", "w", ")", "**", "2", ")", ")", "return", "w" ]
35.564103
23.820513
def delete_genelist(list_id, case_id=None): """Delete a whole gene list with links to cases or a link.""" if case_id: # unlink a case from a gene list case_obj = app.db.case(case_id) app.db.remove_genelist(list_id, case_obj=case_obj) return redirect(request.referrer) else: # remove the whole gene list app.db.remove_genelist(list_id) return redirect(url_for('.index'))
[ "def", "delete_genelist", "(", "list_id", ",", "case_id", "=", "None", ")", ":", "if", "case_id", ":", "# unlink a case from a gene list", "case_obj", "=", "app", ".", "db", ".", "case", "(", "case_id", ")", "app", ".", "db", ".", "remove_genelist", "(", "list_id", ",", "case_obj", "=", "case_obj", ")", "return", "redirect", "(", "request", ".", "referrer", ")", "else", ":", "# remove the whole gene list", "app", ".", "db", ".", "remove_genelist", "(", "list_id", ")", "return", "redirect", "(", "url_for", "(", "'.index'", ")", ")" ]
38.818182
7.818182
def get_all_items_of_confirmation(self, confirmation_id): """ Get all items of confirmation This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param confirmation_id: the confirmation id :return: list """ return self._iterate_through_pages( get_function=self.get_items_of_confirmation_per_page, resource=CONFIRMATION_ITEMS, **{'confirmation_id': confirmation_id} )
[ "def", "get_all_items_of_confirmation", "(", "self", ",", "confirmation_id", ")", ":", "return", "self", ".", "_iterate_through_pages", "(", "get_function", "=", "self", ".", "get_items_of_confirmation_per_page", ",", "resource", "=", "CONFIRMATION_ITEMS", ",", "*", "*", "{", "'confirmation_id'", ":", "confirmation_id", "}", ")" ]
39.428571
16.857143
def draw(self): """ Renders the class balance chart on the specified axes from support. """ # Number of colors is either number of classes or 2 colors = resolve_colors(len(self.support_)) if self._mode == BALANCE: self.ax.bar( np.arange(len(self.support_)), self.support_, color=colors, align='center', width=0.5 ) # Compare mode else: bar_width = 0.35 labels = ["train", "test"] for idx, support in enumerate(self.support_): index = np.arange(len(self.classes_)) if idx > 0: index = index + bar_width self.ax.bar( index, support, bar_width, color=colors[idx], label=labels[idx] ) return self.ax
[ "def", "draw", "(", "self", ")", ":", "# Number of colors is either number of classes or 2", "colors", "=", "resolve_colors", "(", "len", "(", "self", ".", "support_", ")", ")", "if", "self", ".", "_mode", "==", "BALANCE", ":", "self", ".", "ax", ".", "bar", "(", "np", ".", "arange", "(", "len", "(", "self", ".", "support_", ")", ")", ",", "self", ".", "support_", ",", "color", "=", "colors", ",", "align", "=", "'center'", ",", "width", "=", "0.5", ")", "# Compare mode", "else", ":", "bar_width", "=", "0.35", "labels", "=", "[", "\"train\"", ",", "\"test\"", "]", "for", "idx", ",", "support", "in", "enumerate", "(", "self", ".", "support_", ")", ":", "index", "=", "np", ".", "arange", "(", "len", "(", "self", ".", "classes_", ")", ")", "if", "idx", ">", "0", ":", "index", "=", "index", "+", "bar_width", "self", ".", "ax", ".", "bar", "(", "index", ",", "support", ",", "bar_width", ",", "color", "=", "colors", "[", "idx", "]", ",", "label", "=", "labels", "[", "idx", "]", ")", "return", "self", ".", "ax" ]
29.655172
19.241379
def get_subwords(self, word, on_unicode_error='strict'): """ Given a word, get the subwords and their indicies. """ pair = self.f.getSubwords(word, on_unicode_error) return pair[0], np.array(pair[1])
[ "def", "get_subwords", "(", "self", ",", "word", ",", "on_unicode_error", "=", "'strict'", ")", ":", "pair", "=", "self", ".", "f", ".", "getSubwords", "(", "word", ",", "on_unicode_error", ")", "return", "pair", "[", "0", "]", ",", "np", ".", "array", "(", "pair", "[", "1", "]", ")" ]
39
8.666667
def folderitem(self, obj, item, index): """Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item """ item = super(ReferenceSamplesView, self).folderitem(obj, item, index) # ensure we have an object and not a brain obj = api.get_object(obj) url = api.get_url(obj) title = api.get_title(obj) item["Title"] = title item["replace"]["Title"] = get_link(url, value=title) item["allow_edit"] = self.get_editable_columns() # Supported Services supported_services_choices = self.make_supported_services_choices(obj) item["choices"]["SupportedServices"] = supported_services_choices # Position item["Position"] = "new" item["choices"]["Position"] = self.make_position_choices() return item
[ "def", "folderitem", "(", "self", ",", "obj", ",", "item", ",", "index", ")", ":", "item", "=", "super", "(", "ReferenceSamplesView", ",", "self", ")", ".", "folderitem", "(", "obj", ",", "item", ",", "index", ")", "# ensure we have an object and not a brain", "obj", "=", "api", ".", "get_object", "(", "obj", ")", "url", "=", "api", ".", "get_url", "(", "obj", ")", "title", "=", "api", ".", "get_title", "(", "obj", ")", "item", "[", "\"Title\"", "]", "=", "title", "item", "[", "\"replace\"", "]", "[", "\"Title\"", "]", "=", "get_link", "(", "url", ",", "value", "=", "title", ")", "item", "[", "\"allow_edit\"", "]", "=", "self", ".", "get_editable_columns", "(", ")", "# Supported Services", "supported_services_choices", "=", "self", ".", "make_supported_services_choices", "(", "obj", ")", "item", "[", "\"choices\"", "]", "[", "\"SupportedServices\"", "]", "=", "supported_services_choices", "# Position", "item", "[", "\"Position\"", "]", "=", "\"new\"", "item", "[", "\"choices\"", "]", "[", "\"Position\"", "]", "=", "self", ".", "make_position_choices", "(", ")", "return", "item" ]
35.8
21.9
def process(self, filename, encoding, **kwargs): """Process ``filename`` and encode byte-string with ``encoding``. This method is called by :func:`textract.parsers.process` and wraps the :meth:`.BaseParser.extract` method in `a delicious unicode sandwich <http://nedbatchelder.com/text/unipain.html>`_. """ # make a "unicode sandwich" to handle dealing with unknown # input byte strings and converting them to a predictable # output encoding # http://nedbatchelder.com/text/unipain/unipain.html#35 byte_string = self.extract(filename, **kwargs) unicode_string = self.decode(byte_string) return self.encode(unicode_string, encoding)
[ "def", "process", "(", "self", ",", "filename", ",", "encoding", ",", "*", "*", "kwargs", ")", ":", "# make a \"unicode sandwich\" to handle dealing with unknown", "# input byte strings and converting them to a predictable", "# output encoding", "# http://nedbatchelder.com/text/unipain/unipain.html#35", "byte_string", "=", "self", ".", "extract", "(", "filename", ",", "*", "*", "kwargs", ")", "unicode_string", "=", "self", ".", "decode", "(", "byte_string", ")", "return", "self", ".", "encode", "(", "unicode_string", ",", "encoding", ")" ]
51.071429
18.285714
def reconstitute_path(drive, folders): """Reverts a tuple from `get_path_components` into a path. :param drive: A drive (eg 'c:'). Only applicable for NT systems :param folders: A list of folder names :return: A path comprising the drive and list of folder names. The path terminate with a `os.path.sep` *only* if it is a root directory """ reconstituted = os.path.join(drive, os.path.sep, *folders) return reconstituted
[ "def", "reconstitute_path", "(", "drive", ",", "folders", ")", ":", "reconstituted", "=", "os", ".", "path", ".", "join", "(", "drive", ",", "os", ".", "path", ".", "sep", ",", "*", "folders", ")", "return", "reconstituted" ]
45.3
18
def lookup(id=None, artist_amg_id=None, upc=None, country='US', media='all', entity=None, attribute=None, limit=50): """ Returns the result of the lookup of the specified id, artist_amg_id or upc in an array of result_item(s) :param id: String. iTunes ID of the artist, album, track, ebook or software :param artist_amg_id: String. All Music Guide ID of the artist :param upc: String. UPCs/EANs :param country: String. The two-letter country code for the store you want to search. For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2 :param media: String. The media type you want to search for. Example: music :param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist. Full list: musicArtist, musicTrack, album, musicVideo, mix, song :param attribute: String. The attribute you want to search for in the stores, relative to the specified media type. :param limit: Integer. The number of search results you want the iTunes Store to return. :return: An array of result_item(s) """ # If none of the basic lookup arguments are provided, raise a ValueError if id is None and artist_amg_id is None and upc is None: raise ValueError(lookup_no_ids) lookup_url = _url_lookup_builder(id, artist_amg_id, upc, country, media, entity, attribute, limit) r = requests.get(lookup_url) try: json = r.json()['results'] result_count = r.json()['resultCount'] except: raise ConnectionError(general_no_connection) if result_count == 0: raise LookupError(lookup_error) return _get_result_list(json)
[ "def", "lookup", "(", "id", "=", "None", ",", "artist_amg_id", "=", "None", ",", "upc", "=", "None", ",", "country", "=", "'US'", ",", "media", "=", "'all'", ",", "entity", "=", "None", ",", "attribute", "=", "None", ",", "limit", "=", "50", ")", ":", "# If none of the basic lookup arguments are provided, raise a ValueError", "if", "id", "is", "None", "and", "artist_amg_id", "is", "None", "and", "upc", "is", "None", ":", "raise", "ValueError", "(", "lookup_no_ids", ")", "lookup_url", "=", "_url_lookup_builder", "(", "id", ",", "artist_amg_id", ",", "upc", ",", "country", ",", "media", ",", "entity", ",", "attribute", ",", "limit", ")", "r", "=", "requests", ".", "get", "(", "lookup_url", ")", "try", ":", "json", "=", "r", ".", "json", "(", ")", "[", "'results'", "]", "result_count", "=", "r", ".", "json", "(", ")", "[", "'resultCount'", "]", "except", ":", "raise", "ConnectionError", "(", "general_no_connection", ")", "if", "result_count", "==", "0", ":", "raise", "LookupError", "(", "lookup_error", ")", "return", "_get_result_list", "(", "json", ")" ]
53.09375
31.84375
def check_surface_validity(cls, edges): """ Check validity of the surface. Project edge points to vertical plane anchored to surface upper left edge and with strike equal to top edge strike. Check that resulting polygon is valid. This method doesn't have to be called by hands before creating the surface object, because it is called from :meth:`from_fault_data`. """ # extract coordinates of surface boundary (as defined from edges) full_boundary = [] left_boundary = [] right_boundary = [] for i in range(1, len(edges) - 1): left_boundary.append(edges[i].points[0]) right_boundary.append(edges[i].points[-1]) full_boundary.extend(edges[0].points) full_boundary.extend(right_boundary) full_boundary.extend(edges[-1].points[::-1]) full_boundary.extend(left_boundary[::-1]) lons = [p.longitude for p in full_boundary] lats = [p.latitude for p in full_boundary] depths = [p.depth for p in full_boundary] # define reference plane. Corner points are separated by an arbitrary # distance of 10 km. The mesh spacing is set to 2 km. Both corner # distance and mesh spacing values do not affect the algorithm results. ul = edges[0].points[0] strike = ul.azimuth(edges[0].points[-1]) dist = 10. ur = ul.point_at(dist, 0, strike) bl = Point(ul.longitude, ul.latitude, ul.depth + dist) br = bl.point_at(dist, 0, strike) # project surface boundary to reference plane and check for # validity. ref_plane = PlanarSurface.from_corner_points(ul, ur, br, bl) _, xx, yy = ref_plane._project( spherical_to_cartesian(lons, lats, depths)) coords = [(x, y) for x, y in zip(xx, yy)] p = shapely.geometry.Polygon(coords) if not p.is_valid: raise ValueError('Edges points are not in the right order')
[ "def", "check_surface_validity", "(", "cls", ",", "edges", ")", ":", "# extract coordinates of surface boundary (as defined from edges)", "full_boundary", "=", "[", "]", "left_boundary", "=", "[", "]", "right_boundary", "=", "[", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "edges", ")", "-", "1", ")", ":", "left_boundary", ".", "append", "(", "edges", "[", "i", "]", ".", "points", "[", "0", "]", ")", "right_boundary", ".", "append", "(", "edges", "[", "i", "]", ".", "points", "[", "-", "1", "]", ")", "full_boundary", ".", "extend", "(", "edges", "[", "0", "]", ".", "points", ")", "full_boundary", ".", "extend", "(", "right_boundary", ")", "full_boundary", ".", "extend", "(", "edges", "[", "-", "1", "]", ".", "points", "[", ":", ":", "-", "1", "]", ")", "full_boundary", ".", "extend", "(", "left_boundary", "[", ":", ":", "-", "1", "]", ")", "lons", "=", "[", "p", ".", "longitude", "for", "p", "in", "full_boundary", "]", "lats", "=", "[", "p", ".", "latitude", "for", "p", "in", "full_boundary", "]", "depths", "=", "[", "p", ".", "depth", "for", "p", "in", "full_boundary", "]", "# define reference plane. Corner points are separated by an arbitrary", "# distance of 10 km. The mesh spacing is set to 2 km. Both corner", "# distance and mesh spacing values do not affect the algorithm results.", "ul", "=", "edges", "[", "0", "]", ".", "points", "[", "0", "]", "strike", "=", "ul", ".", "azimuth", "(", "edges", "[", "0", "]", ".", "points", "[", "-", "1", "]", ")", "dist", "=", "10.", "ur", "=", "ul", ".", "point_at", "(", "dist", ",", "0", ",", "strike", ")", "bl", "=", "Point", "(", "ul", ".", "longitude", ",", "ul", ".", "latitude", ",", "ul", ".", "depth", "+", "dist", ")", "br", "=", "bl", ".", "point_at", "(", "dist", ",", "0", ",", "strike", ")", "# project surface boundary to reference plane and check for", "# validity.", "ref_plane", "=", "PlanarSurface", ".", "from_corner_points", "(", "ul", ",", "ur", ",", "br", ",", "bl", ")", "_", ",", "xx", ",", "yy", "=", "ref_plane", ".", "_project", "(", "spherical_to_cartesian", "(", "lons", ",", "lats", ",", "depths", ")", ")", "coords", "=", "[", "(", "x", ",", "y", ")", "for", "x", ",", "y", "in", "zip", "(", "xx", ",", "yy", ")", "]", "p", "=", "shapely", ".", "geometry", ".", "Polygon", "(", "coords", ")", "if", "not", "p", ".", "is_valid", ":", "raise", "ValueError", "(", "'Edges points are not in the right order'", ")" ]
40.22449
19.612245
def learn(self, initial_state_key, limit=1000, game_n=1): ''' Multi-Agent Learning. Override. Args: initial_state_key: Initial state. limit: Limit of the number of learning. game_n: The number of games. ''' end_flag_list = [False] * len(self.q_learning_list) for game in range(game_n): state_key = copy.copy(initial_state_key) self.t = 1 while self.t <= limit: for i in range(len(self.q_learning_list)): if game + 1 == game_n: self.state_key_list.append((i, copy.copy(state_key))) self.q_learning_list[i].t = self.t next_action_list = self.q_learning_list[i].extract_possible_actions(state_key) if len(next_action_list): action_key = self.q_learning_list[i].select_action( state_key=state_key, next_action_list=next_action_list ) reward_value = self.q_learning_list[i].observe_reward_value(state_key, action_key) # Check. if self.q_learning_list[i].check_the_end_flag(state_key) is True: end_flag_list[i] = True # Max-Q-Value in next action time. next_state_key = self.q_learning_list[i].update_state( state_key=state_key, action_key=action_key ) next_next_action_list = self.q_learning_list[i].extract_possible_actions(next_state_key) if len(next_next_action_list): next_action_key = self.q_learning_list[i].predict_next_action( next_state_key, next_next_action_list ) next_max_q = self.q_learning_list[i].extract_q_df(next_state_key, next_action_key) # Update Q-Value. self.q_learning_list[i].update_q( state_key=state_key, action_key=action_key, reward_value=reward_value, next_max_q=next_max_q ) # Update State. state_key = next_state_key # Epsode. self.t += 1 self.q_learning_list[i].t = self.t if False not in end_flag_list: break
[ "def", "learn", "(", "self", ",", "initial_state_key", ",", "limit", "=", "1000", ",", "game_n", "=", "1", ")", ":", "end_flag_list", "=", "[", "False", "]", "*", "len", "(", "self", ".", "q_learning_list", ")", "for", "game", "in", "range", "(", "game_n", ")", ":", "state_key", "=", "copy", ".", "copy", "(", "initial_state_key", ")", "self", ".", "t", "=", "1", "while", "self", ".", "t", "<=", "limit", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "q_learning_list", ")", ")", ":", "if", "game", "+", "1", "==", "game_n", ":", "self", ".", "state_key_list", ".", "append", "(", "(", "i", ",", "copy", ".", "copy", "(", "state_key", ")", ")", ")", "self", ".", "q_learning_list", "[", "i", "]", ".", "t", "=", "self", ".", "t", "next_action_list", "=", "self", ".", "q_learning_list", "[", "i", "]", ".", "extract_possible_actions", "(", "state_key", ")", "if", "len", "(", "next_action_list", ")", ":", "action_key", "=", "self", ".", "q_learning_list", "[", "i", "]", ".", "select_action", "(", "state_key", "=", "state_key", ",", "next_action_list", "=", "next_action_list", ")", "reward_value", "=", "self", ".", "q_learning_list", "[", "i", "]", ".", "observe_reward_value", "(", "state_key", ",", "action_key", ")", "# Check.", "if", "self", ".", "q_learning_list", "[", "i", "]", ".", "check_the_end_flag", "(", "state_key", ")", "is", "True", ":", "end_flag_list", "[", "i", "]", "=", "True", "# Max-Q-Value in next action time.", "next_state_key", "=", "self", ".", "q_learning_list", "[", "i", "]", ".", "update_state", "(", "state_key", "=", "state_key", ",", "action_key", "=", "action_key", ")", "next_next_action_list", "=", "self", ".", "q_learning_list", "[", "i", "]", ".", "extract_possible_actions", "(", "next_state_key", ")", "if", "len", "(", "next_next_action_list", ")", ":", "next_action_key", "=", "self", ".", "q_learning_list", "[", "i", "]", ".", "predict_next_action", "(", "next_state_key", ",", "next_next_action_list", ")", "next_max_q", "=", "self", ".", "q_learning_list", "[", "i", "]", ".", "extract_q_df", "(", "next_state_key", ",", "next_action_key", ")", "# Update Q-Value.", "self", ".", "q_learning_list", "[", "i", "]", ".", "update_q", "(", "state_key", "=", "state_key", ",", "action_key", "=", "action_key", ",", "reward_value", "=", "reward_value", ",", "next_max_q", "=", "next_max_q", ")", "# Update State.", "state_key", "=", "next_state_key", "# Epsode.", "self", ".", "t", "+=", "1", "self", ".", "q_learning_list", "[", "i", "]", ".", "t", "=", "self", ".", "t", "if", "False", "not", "in", "end_flag_list", ":", "break" ]
44.079365
22.269841
def render(self, name, value, attrs=None, renderer=None): """ Returns this Widget rendered as HTML, as a Unicode string. The 'value' given is not guaranteed to be valid input, so subclass implementations should program defensively. """ html = '' html += '%s' % value html += '<input type="hidden" name="%s" value="%s">' % (escape(name), escape(value)) return mark_safe(html)
[ "def", "render", "(", "self", ",", "name", ",", "value", ",", "attrs", "=", "None", ",", "renderer", "=", "None", ")", ":", "html", "=", "''", "html", "+=", "'%s'", "%", "value", "html", "+=", "'<input type=\"hidden\" name=\"%s\" value=\"%s\">'", "%", "(", "escape", "(", "name", ")", ",", "escape", "(", "value", ")", ")", "return", "mark_safe", "(", "html", ")" ]
39.727273
20.454545
def apply_filter(objs, selector, mode): '''Apply selector to transform each object in objs. This operates in-place on objs. Empty objects are removed from the list. Args: mode: either KEEP (to keep selected items & their ancestors) or DELETE (to delete selected items and their children). ''' indices_to_delete = [] presumption = DELETE if mode == KEEP else KEEP for i, obj in enumerate(objs): timer.log('Applying selector: %s' % selector) marks = {k: mode for k in selector_to_ids(selector, obj, mode)} timer.log('done applying selector') timer.log('filtering object...') filter_object(obj, marks, presumption=presumption) timer.log('done filtering') if obj is None: indices_to_delete.append(i) for index in reversed(indices_to_delete): del objs[index]
[ "def", "apply_filter", "(", "objs", ",", "selector", ",", "mode", ")", ":", "indices_to_delete", "=", "[", "]", "presumption", "=", "DELETE", "if", "mode", "==", "KEEP", "else", "KEEP", "for", "i", ",", "obj", "in", "enumerate", "(", "objs", ")", ":", "timer", ".", "log", "(", "'Applying selector: %s'", "%", "selector", ")", "marks", "=", "{", "k", ":", "mode", "for", "k", "in", "selector_to_ids", "(", "selector", ",", "obj", ",", "mode", ")", "}", "timer", ".", "log", "(", "'done applying selector'", ")", "timer", ".", "log", "(", "'filtering object...'", ")", "filter_object", "(", "obj", ",", "marks", ",", "presumption", "=", "presumption", ")", "timer", ".", "log", "(", "'done filtering'", ")", "if", "obj", "is", "None", ":", "indices_to_delete", ".", "append", "(", "i", ")", "for", "index", "in", "reversed", "(", "indices_to_delete", ")", ":", "del", "objs", "[", "index", "]" ]
37.565217
18.869565
def from_flag(cls, flag): """ Find relation implementation in the current charm, based on the name of an active flag. You should not use this method directly. Use :func:`endpoint_from_flag` instead. """ value = _get_flag_value(flag) if value is None: return None relation_name = value['relation'] conversations = Conversation.load(value['conversations']) return cls.from_name(relation_name, conversations)
[ "def", "from_flag", "(", "cls", ",", "flag", ")", ":", "value", "=", "_get_flag_value", "(", "flag", ")", "if", "value", "is", "None", ":", "return", "None", "relation_name", "=", "value", "[", "'relation'", "]", "conversations", "=", "Conversation", ".", "load", "(", "value", "[", "'conversations'", "]", ")", "return", "cls", ".", "from_name", "(", "relation_name", ",", "conversations", ")" ]
35.214286
13.5
def query(self): """ Returns the full query for this widget. This will reflect the complete combined query for all containers within this widget. :return <orb.Query> """ if self._loadQuery is not None: return self._loadQuery container = self.widget(0) if container: query = container.query() else: query = Query() return query
[ "def", "query", "(", "self", ")", ":", "if", "self", ".", "_loadQuery", "is", "not", "None", ":", "return", "self", ".", "_loadQuery", "container", "=", "self", ".", "widget", "(", "0", ")", "if", "container", ":", "query", "=", "container", ".", "query", "(", ")", "else", ":", "query", "=", "Query", "(", ")", "return", "query" ]
27.882353
16.117647
def bind_socket(self, config): """ :meth:`.WNetworkNativeTransportProto.bind_socket` method implementation """ address = config[self.__bind_socket_config.section][self.__bind_socket_config.address_option] port = config.getint(self.__bind_socket_config.section, self.__bind_socket_config.port_option) return WIPV4SocketInfo(address, port)
[ "def", "bind_socket", "(", "self", ",", "config", ")", ":", "address", "=", "config", "[", "self", ".", "__bind_socket_config", ".", "section", "]", "[", "self", ".", "__bind_socket_config", ".", "address_option", "]", "port", "=", "config", ".", "getint", "(", "self", ".", "__bind_socket_config", ".", "section", ",", "self", ".", "__bind_socket_config", ".", "port_option", ")", "return", "WIPV4SocketInfo", "(", "address", ",", "port", ")" ]
57
20.333333
def fcsp_sa_fcsp_auth_proto_auth_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcsp_sa = ET.SubElement(config, "fcsp-sa", xmlns="urn:brocade.com:mgmt:brocade-fc-auth") fcsp = ET.SubElement(fcsp_sa, "fcsp") auth = ET.SubElement(fcsp, "auth") proto = ET.SubElement(auth, "proto") auth_type = ET.SubElement(proto, "auth-type") auth_type.text = kwargs.pop('auth_type') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "fcsp_sa_fcsp_auth_proto_auth_type", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "fcsp_sa", "=", "ET", ".", "SubElement", "(", "config", ",", "\"fcsp-sa\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-fc-auth\"", ")", "fcsp", "=", "ET", ".", "SubElement", "(", "fcsp_sa", ",", "\"fcsp\"", ")", "auth", "=", "ET", ".", "SubElement", "(", "fcsp", ",", "\"auth\"", ")", "proto", "=", "ET", ".", "SubElement", "(", "auth", ",", "\"proto\"", ")", "auth_type", "=", "ET", ".", "SubElement", "(", "proto", ",", "\"auth-type\"", ")", "auth_type", ".", "text", "=", "kwargs", ".", "pop", "(", "'auth_type'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
42.153846
13.153846
def f(self): """ Time, in 12-hour hours and minutes, with minutes left off if they're zero. Examples: '1', '1:30', '2:05', '2' Proprietary extension. """ if self.data.minute == 0: return self.g() return u'%s:%s' % (self.g(), self.i())
[ "def", "f", "(", "self", ")", ":", "if", "self", ".", "data", ".", "minute", "==", "0", ":", "return", "self", ".", "g", "(", ")", "return", "u'%s:%s'", "%", "(", "self", ".", "g", "(", ")", ",", "self", ".", "i", "(", ")", ")" ]
30.1
12.9
def parse_options(cls, options): """Extracts subdomain and endpoint values from the options dict and returns them along with a new dict without those values. """ options = options.copy() subdomain = options.pop('subdomain', None) endpoint = options.pop('endpoint', None) return subdomain, endpoint, options,
[ "def", "parse_options", "(", "cls", ",", "options", ")", ":", "options", "=", "options", ".", "copy", "(", ")", "subdomain", "=", "options", ".", "pop", "(", "'subdomain'", ",", "None", ")", "endpoint", "=", "options", ".", "pop", "(", "'endpoint'", ",", "None", ")", "return", "subdomain", ",", "endpoint", ",", "options", "," ]
44.875
7.125
def _add_initial_value(self, data_id, value, initial_dist=0.0, fringe=None, check_cutoff=None, no_call=None): """ Add initial values updating workflow, seen, and fringe. :param fringe: Heapq of closest available nodes. :type fringe: list[(float | int, bool, (str, Dispatcher)] :param check_cutoff: Check the cutoff limit. :type check_cutoff: (int | float) -> bool :param no_call: If True data node estimation function is not used. :type no_call: bool :param data_id: Data node id. :type data_id: str :param value: Data node value e.g., {'value': val}. :type value: dict[str, T] :param initial_dist: Data node initial distance in the ArciDispatch algorithm. :type initial_dist: float, int, optional :return: True if the data has been visited, otherwise false. :rtype: bool """ # Namespace shortcuts for speed. nodes, seen, edge_weight = self.nodes, self.seen, self._edge_length wf_remove_edge, check_wait_in = self._wf_remove_edge, self.check_wait_in wf_add_edge, dsp_in = self._wf_add_edge, self._set_sub_dsp_node_input update_view = self._update_meeting if fringe is None: fringe = self.fringe if no_call is None: no_call = self.no_call check_cutoff = check_cutoff or self.check_cutoff if data_id not in nodes: # Data node is not in the dmap. return False wait_in = nodes[data_id]['wait_inputs'] # Store wait inputs flag. index = nodes[data_id]['index'] # Store node index. wf_add_edge(START, data_id, **value) # Add edge. if data_id in self._wildcards: # Check if the data node has wildcard. self._visited.add(data_id) # Update visited nodes. self.workflow.add_node(data_id) # Add node to workflow. for w, edge_data in self.dmap[data_id].items(): # See func node. wf_add_edge(data_id, w, **value) # Set workflow. node = nodes[w] # Node attributes. # Evaluate distance. vw_dist = initial_dist + edge_weight(edge_data, node) update_view(w, vw_dist) # Update view distance. # Check the cutoff limit and if all inputs are satisfied. if check_cutoff(vw_dist): wf_remove_edge(data_id, w) # Remove workflow edge. continue # Pass the node. elif node['type'] == 'dispatcher': dsp_in(data_id, w, fringe, check_cutoff, no_call, vw_dist) elif check_wait_in(True, w): continue # Pass the node. seen[w] = vw_dist # Update distance. vd = (True, w, self.index + node['index']) # Virtual distance. heapq.heappush(fringe, (vw_dist, vd, (w, self))) # Add 2 heapq. return True update_view(data_id, initial_dist) # Update view distance. if check_cutoff(initial_dist): # Check the cutoff limit. wf_remove_edge(START, data_id) # Remove workflow edge. elif not check_wait_in(wait_in, data_id): # Check inputs. seen[data_id] = initial_dist # Update distance. vd = (wait_in, data_id, self.index + index) # Virtual distance. # Add node to heapq. heapq.heappush(fringe, (initial_dist, vd, (data_id, self))) return True return False
[ "def", "_add_initial_value", "(", "self", ",", "data_id", ",", "value", ",", "initial_dist", "=", "0.0", ",", "fringe", "=", "None", ",", "check_cutoff", "=", "None", ",", "no_call", "=", "None", ")", ":", "# Namespace shortcuts for speed.", "nodes", ",", "seen", ",", "edge_weight", "=", "self", ".", "nodes", ",", "self", ".", "seen", ",", "self", ".", "_edge_length", "wf_remove_edge", ",", "check_wait_in", "=", "self", ".", "_wf_remove_edge", ",", "self", ".", "check_wait_in", "wf_add_edge", ",", "dsp_in", "=", "self", ".", "_wf_add_edge", ",", "self", ".", "_set_sub_dsp_node_input", "update_view", "=", "self", ".", "_update_meeting", "if", "fringe", "is", "None", ":", "fringe", "=", "self", ".", "fringe", "if", "no_call", "is", "None", ":", "no_call", "=", "self", ".", "no_call", "check_cutoff", "=", "check_cutoff", "or", "self", ".", "check_cutoff", "if", "data_id", "not", "in", "nodes", ":", "# Data node is not in the dmap.", "return", "False", "wait_in", "=", "nodes", "[", "data_id", "]", "[", "'wait_inputs'", "]", "# Store wait inputs flag.", "index", "=", "nodes", "[", "data_id", "]", "[", "'index'", "]", "# Store node index.", "wf_add_edge", "(", "START", ",", "data_id", ",", "*", "*", "value", ")", "# Add edge.", "if", "data_id", "in", "self", ".", "_wildcards", ":", "# Check if the data node has wildcard.", "self", ".", "_visited", ".", "add", "(", "data_id", ")", "# Update visited nodes.", "self", ".", "workflow", ".", "add_node", "(", "data_id", ")", "# Add node to workflow.", "for", "w", ",", "edge_data", "in", "self", ".", "dmap", "[", "data_id", "]", ".", "items", "(", ")", ":", "# See func node.", "wf_add_edge", "(", "data_id", ",", "w", ",", "*", "*", "value", ")", "# Set workflow.", "node", "=", "nodes", "[", "w", "]", "# Node attributes.", "# Evaluate distance.", "vw_dist", "=", "initial_dist", "+", "edge_weight", "(", "edge_data", ",", "node", ")", "update_view", "(", "w", ",", "vw_dist", ")", "# Update view distance.", "# Check the cutoff limit and if all inputs are satisfied.", "if", "check_cutoff", "(", "vw_dist", ")", ":", "wf_remove_edge", "(", "data_id", ",", "w", ")", "# Remove workflow edge.", "continue", "# Pass the node.", "elif", "node", "[", "'type'", "]", "==", "'dispatcher'", ":", "dsp_in", "(", "data_id", ",", "w", ",", "fringe", ",", "check_cutoff", ",", "no_call", ",", "vw_dist", ")", "elif", "check_wait_in", "(", "True", ",", "w", ")", ":", "continue", "# Pass the node.", "seen", "[", "w", "]", "=", "vw_dist", "# Update distance.", "vd", "=", "(", "True", ",", "w", ",", "self", ".", "index", "+", "node", "[", "'index'", "]", ")", "# Virtual distance.", "heapq", ".", "heappush", "(", "fringe", ",", "(", "vw_dist", ",", "vd", ",", "(", "w", ",", "self", ")", ")", ")", "# Add 2 heapq.", "return", "True", "update_view", "(", "data_id", ",", "initial_dist", ")", "# Update view distance.", "if", "check_cutoff", "(", "initial_dist", ")", ":", "# Check the cutoff limit.", "wf_remove_edge", "(", "START", ",", "data_id", ")", "# Remove workflow edge.", "elif", "not", "check_wait_in", "(", "wait_in", ",", "data_id", ")", ":", "# Check inputs.", "seen", "[", "data_id", "]", "=", "initial_dist", "# Update distance.", "vd", "=", "(", "wait_in", ",", "data_id", ",", "self", ".", "index", "+", "index", ")", "# Virtual distance.", "# Add node to heapq.", "heapq", ".", "heappush", "(", "fringe", ",", "(", "initial_dist", ",", "vd", ",", "(", "data_id", ",", "self", ")", ")", ")", "return", "True", "return", "False" ]
34.307692
25.557692
def should_try_kafka_again(error): """Determine if the error means to retry or fail, True to retry.""" msg = 'Unable to retrieve' return isinstance(error, KafkaException) and str(error).startswith(msg)
[ "def", "should_try_kafka_again", "(", "error", ")", ":", "msg", "=", "'Unable to retrieve'", "return", "isinstance", "(", "error", ",", "KafkaException", ")", "and", "str", "(", "error", ")", ".", "startswith", "(", "msg", ")" ]
52.5
12.75
def update_grade(self, grade_form): """Updates an existing grade. arg: grade_form (osid.grading.GradeForm): the form containing the elements to be updated raise: IllegalState - ``grade_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``grade_id`` or ``grade_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``grade_form`` did not originate from ``get_grade_form_for_update()`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.repository.AssetAdminSession.update_asset_content_template from dlkit.abstract_osid.grading.objects import GradeForm as ABCGradeForm collection = JSONClientValidated('grading', collection='GradeSystem', runtime=self._runtime) if not isinstance(grade_form, ABCGradeForm): raise errors.InvalidArgument('argument type is not an GradeForm') if not grade_form.is_for_update(): raise errors.InvalidArgument('the GradeForm is for update only, not create') try: if self._forms[grade_form.get_id().get_identifier()] == UPDATED: raise errors.IllegalState('grade_form already used in an update transaction') except KeyError: raise errors.Unsupported('grade_form did not originate from this session') if not grade_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') grade_system_id = Id(grade_form._my_map['gradeSystemId']).get_identifier() grade_system = collection.find_one( {'$and': [{'_id': ObjectId(grade_system_id)}, {'assigned' + self._catalog_name + 'Ids': {'$in': [str(self._catalog_id)]}}]}) index = 0 found = False for i in grade_system['grades']: if i['_id'] == ObjectId(grade_form._my_map['_id']): grade_system['grades'].pop(index) grade_system['grades'].insert(index, grade_form._my_map) found = True break index += 1 if not found: raise errors.NotFound() try: collection.save(grade_system) except: # what exceptions does mongodb save raise? raise errors.OperationFailed() self._forms[grade_form.get_id().get_identifier()] = UPDATED # Note: this is out of spec. The OSIDs don't require an object to be returned: from .objects import Grade return Grade( osid_object_map=grade_form._my_map, runtime=self._runtime, proxy=self._proxy)
[ "def", "update_grade", "(", "self", ",", "grade_form", ")", ":", "# Implemented from template for", "# osid.repository.AssetAdminSession.update_asset_content_template", "from", "dlkit", ".", "abstract_osid", ".", "grading", ".", "objects", "import", "GradeForm", "as", "ABCGradeForm", "collection", "=", "JSONClientValidated", "(", "'grading'", ",", "collection", "=", "'GradeSystem'", ",", "runtime", "=", "self", ".", "_runtime", ")", "if", "not", "isinstance", "(", "grade_form", ",", "ABCGradeForm", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'argument type is not an GradeForm'", ")", "if", "not", "grade_form", ".", "is_for_update", "(", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'the GradeForm is for update only, not create'", ")", "try", ":", "if", "self", ".", "_forms", "[", "grade_form", ".", "get_id", "(", ")", ".", "get_identifier", "(", ")", "]", "==", "UPDATED", ":", "raise", "errors", ".", "IllegalState", "(", "'grade_form already used in an update transaction'", ")", "except", "KeyError", ":", "raise", "errors", ".", "Unsupported", "(", "'grade_form did not originate from this session'", ")", "if", "not", "grade_form", ".", "is_valid", "(", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'one or more of the form elements is invalid'", ")", "grade_system_id", "=", "Id", "(", "grade_form", ".", "_my_map", "[", "'gradeSystemId'", "]", ")", ".", "get_identifier", "(", ")", "grade_system", "=", "collection", ".", "find_one", "(", "{", "'$and'", ":", "[", "{", "'_id'", ":", "ObjectId", "(", "grade_system_id", ")", "}", ",", "{", "'assigned'", "+", "self", ".", "_catalog_name", "+", "'Ids'", ":", "{", "'$in'", ":", "[", "str", "(", "self", ".", "_catalog_id", ")", "]", "}", "}", "]", "}", ")", "index", "=", "0", "found", "=", "False", "for", "i", "in", "grade_system", "[", "'grades'", "]", ":", "if", "i", "[", "'_id'", "]", "==", "ObjectId", "(", "grade_form", ".", "_my_map", "[", "'_id'", "]", ")", ":", "grade_system", "[", "'grades'", "]", ".", "pop", "(", "index", ")", "grade_system", "[", "'grades'", "]", ".", "insert", "(", "index", ",", "grade_form", ".", "_my_map", ")", "found", "=", "True", "break", "index", "+=", "1", "if", "not", "found", ":", "raise", "errors", ".", "NotFound", "(", ")", "try", ":", "collection", ".", "save", "(", "grade_system", ")", "except", ":", "# what exceptions does mongodb save raise?", "raise", "errors", ".", "OperationFailed", "(", ")", "self", ".", "_forms", "[", "grade_form", ".", "get_id", "(", ")", ".", "get_identifier", "(", ")", "]", "=", "UPDATED", "# Note: this is out of spec. The OSIDs don't require an object to be returned:", "from", ".", "objects", "import", "Grade", "return", "Grade", "(", "osid_object_map", "=", "grade_form", ".", "_my_map", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")" ]
48.180328
21.360656
def mtf_unitransformer_base(): """Hyperparameters for single-stack Transformer.""" hparams = mtf_transformer2_base() hparams.add_hparam("autoregressive", True) # HYPERPARAMETERS FOR THE SINGLE LAYER STACK hparams.add_hparam("layers", ["self_att", "drd"] * 6) # number of heads in multihead attention hparams.add_hparam("num_heads", 8) # default of 0 for standard transformer behavior # 1 means a single set of keys and values that are read by all query heads hparams.add_hparam("num_memory_heads", 0) # share attention keys and values hparams.add_hparam("shared_kv", False) # if nonzero then use local attention hparams.add_hparam("local_attention_radius", 128) return hparams
[ "def", "mtf_unitransformer_base", "(", ")", ":", "hparams", "=", "mtf_transformer2_base", "(", ")", "hparams", ".", "add_hparam", "(", "\"autoregressive\"", ",", "True", ")", "# HYPERPARAMETERS FOR THE SINGLE LAYER STACK", "hparams", ".", "add_hparam", "(", "\"layers\"", ",", "[", "\"self_att\"", ",", "\"drd\"", "]", "*", "6", ")", "# number of heads in multihead attention", "hparams", ".", "add_hparam", "(", "\"num_heads\"", ",", "8", ")", "# default of 0 for standard transformer behavior", "# 1 means a single set of keys and values that are read by all query heads", "hparams", ".", "add_hparam", "(", "\"num_memory_heads\"", ",", "0", ")", "# share attention keys and values", "hparams", ".", "add_hparam", "(", "\"shared_kv\"", ",", "False", ")", "# if nonzero then use local attention", "hparams", ".", "add_hparam", "(", "\"local_attention_radius\"", ",", "128", ")", "return", "hparams" ]
43.1875
8.5
def start(self): """ Start agents execute popen of agent.py on target and start output reader thread. """ [agent.start() for agent in self.agents] [agent.reader_thread.start() for agent in self.agents]
[ "def", "start", "(", "self", ")", ":", "[", "agent", ".", "start", "(", ")", "for", "agent", "in", "self", ".", "agents", "]", "[", "agent", ".", "reader_thread", ".", "start", "(", ")", "for", "agent", "in", "self", ".", "agents", "]" ]
33.714286
18.428571
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'languages') and self.languages is not None: _dict['languages'] = [x._to_dict() for x in self.languages] return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'languages'", ")", "and", "self", ".", "languages", "is", "not", "None", ":", "_dict", "[", "'languages'", "]", "=", "[", "x", ".", "_to_dict", "(", ")", "for", "x", "in", "self", ".", "languages", "]", "return", "_dict" ]
43.333333
20.5
def from_xml(cls, input_xml: str) -> 'NistBeaconValue': """ Convert a string of XML which represents a NIST Randomness Beacon value into a 'NistBeaconValue' object. :param input_xml: XML to build a 'NistBeaconValue' from :return: A 'NistBeaconValue' object, 'None' otherwise """ invalid_result = None understood_namespaces = { 'nist-0.1': 'http://beacon.nist.gov/record/0.1/', } # Our required values are "must haves". This makes it simple # to verify we loaded everything out of XML correctly. required_values = { cls._KEY_FREQUENCY: None, cls._KEY_OUTPUT_VALUE: None, cls._KEY_PREVIOUS_OUTPUT_VALUE: None, cls._KEY_SEED_VALUE: None, cls._KEY_SIGNATURE_VALUE: None, cls._KEY_STATUS_CODE: None, cls._KEY_TIMESTAMP: None, cls._KEY_VERSION: None, } # First attempt to load the xml, return 'None' on ParseError try: tree = ElementTree.ElementTree(ElementTree.fromstring(input_xml)) except ElementTree.ParseError: return invalid_result # Using the required values, let's load the xml values in for key in required_values: discovered_element = tree.find( "{0}:{1}".format('nist-0.1', key), namespaces=understood_namespaces, ) if not isinstance(discovered_element, ElementTree.Element): continue # Bad pylint message - https://github.com/PyCQA/pylint/issues/476 # pylint: disable=no-member required_values[key] = discovered_element.text # Confirm that the required values are set, and not 'None' if None in required_values.values(): return invalid_result # We have all the required values, return a node object return cls( version=required_values[cls._KEY_VERSION], frequency=int(required_values[cls._KEY_FREQUENCY]), timestamp=int(required_values[cls._KEY_TIMESTAMP]), seed_value=required_values[cls._KEY_SEED_VALUE], previous_output_value=required_values[ cls._KEY_PREVIOUS_OUTPUT_VALUE ], signature_value=required_values[cls._KEY_SIGNATURE_VALUE], output_value=required_values[cls._KEY_OUTPUT_VALUE], status_code=required_values[cls._KEY_STATUS_CODE], )
[ "def", "from_xml", "(", "cls", ",", "input_xml", ":", "str", ")", "->", "'NistBeaconValue'", ":", "invalid_result", "=", "None", "understood_namespaces", "=", "{", "'nist-0.1'", ":", "'http://beacon.nist.gov/record/0.1/'", ",", "}", "# Our required values are \"must haves\". This makes it simple", "# to verify we loaded everything out of XML correctly.", "required_values", "=", "{", "cls", ".", "_KEY_FREQUENCY", ":", "None", ",", "cls", ".", "_KEY_OUTPUT_VALUE", ":", "None", ",", "cls", ".", "_KEY_PREVIOUS_OUTPUT_VALUE", ":", "None", ",", "cls", ".", "_KEY_SEED_VALUE", ":", "None", ",", "cls", ".", "_KEY_SIGNATURE_VALUE", ":", "None", ",", "cls", ".", "_KEY_STATUS_CODE", ":", "None", ",", "cls", ".", "_KEY_TIMESTAMP", ":", "None", ",", "cls", ".", "_KEY_VERSION", ":", "None", ",", "}", "# First attempt to load the xml, return 'None' on ParseError", "try", ":", "tree", "=", "ElementTree", ".", "ElementTree", "(", "ElementTree", ".", "fromstring", "(", "input_xml", ")", ")", "except", "ElementTree", ".", "ParseError", ":", "return", "invalid_result", "# Using the required values, let's load the xml values in", "for", "key", "in", "required_values", ":", "discovered_element", "=", "tree", ".", "find", "(", "\"{0}:{1}\"", ".", "format", "(", "'nist-0.1'", ",", "key", ")", ",", "namespaces", "=", "understood_namespaces", ",", ")", "if", "not", "isinstance", "(", "discovered_element", ",", "ElementTree", ".", "Element", ")", ":", "continue", "# Bad pylint message - https://github.com/PyCQA/pylint/issues/476", "# pylint: disable=no-member", "required_values", "[", "key", "]", "=", "discovered_element", ".", "text", "# Confirm that the required values are set, and not 'None'", "if", "None", "in", "required_values", ".", "values", "(", ")", ":", "return", "invalid_result", "# We have all the required values, return a node object", "return", "cls", "(", "version", "=", "required_values", "[", "cls", ".", "_KEY_VERSION", "]", ",", "frequency", "=", "int", "(", "required_values", "[", "cls", ".", "_KEY_FREQUENCY", "]", ")", ",", "timestamp", "=", "int", "(", "required_values", "[", "cls", ".", "_KEY_TIMESTAMP", "]", ")", ",", "seed_value", "=", "required_values", "[", "cls", ".", "_KEY_SEED_VALUE", "]", ",", "previous_output_value", "=", "required_values", "[", "cls", ".", "_KEY_PREVIOUS_OUTPUT_VALUE", "]", ",", "signature_value", "=", "required_values", "[", "cls", ".", "_KEY_SIGNATURE_VALUE", "]", ",", "output_value", "=", "required_values", "[", "cls", ".", "_KEY_OUTPUT_VALUE", "]", ",", "status_code", "=", "required_values", "[", "cls", ".", "_KEY_STATUS_CODE", "]", ",", ")" ]
37.969231
19.723077
def get_data(latitude=52.091579, longitude=5.119734, usexml=False): """Get buienradar xml data and return results.""" if usexml: log.info("Getting buienradar XML data for latitude=%s, longitude=%s", latitude, longitude) return get_xml_data(latitude, longitude) else: log.info("Getting buienradar JSON data for latitude=%s, longitude=%s", latitude, longitude) return get_json_data(latitude, longitude)
[ "def", "get_data", "(", "latitude", "=", "52.091579", ",", "longitude", "=", "5.119734", ",", "usexml", "=", "False", ")", ":", "if", "usexml", ":", "log", ".", "info", "(", "\"Getting buienradar XML data for latitude=%s, longitude=%s\"", ",", "latitude", ",", "longitude", ")", "return", "get_xml_data", "(", "latitude", ",", "longitude", ")", "else", ":", "log", ".", "info", "(", "\"Getting buienradar JSON data for latitude=%s, longitude=%s\"", ",", "latitude", ",", "longitude", ")", "return", "get_json_data", "(", "latitude", ",", "longitude", ")" ]
46.9
18.2
def calculate_temperature_equivalent(temperatures): """ Calculates the temperature equivalent from a series of average daily temperatures according to the formula: 0.6 * tempDay0 + 0.3 * tempDay-1 + 0.1 * tempDay-2 Parameters ---------- series : Pandas Series Returns ------- Pandas Series """ ret = 0.6*temperatures + 0.3*temperatures.shift(1) + 0.1*temperatures.shift(2) ret.name = 'temp_equivalent' return ret
[ "def", "calculate_temperature_equivalent", "(", "temperatures", ")", ":", "ret", "=", "0.6", "*", "temperatures", "+", "0.3", "*", "temperatures", ".", "shift", "(", "1", ")", "+", "0.1", "*", "temperatures", ".", "shift", "(", "2", ")", "ret", ".", "name", "=", "'temp_equivalent'", "return", "ret" ]
26.529412
25.823529
def _exec_nb(self, cmd, cwd=None, env=None, encoding='utf-8'): """Run a command with a non blocking call. Execute `cmd` command with a non blocking call. The command will be run in the directory set by `cwd`. Enviroment variables can be set using the `env` dictionary. The output data is returned as encoded bytes in an iterator. Each item will be a line of the output. :returns: an iterator with the output of the command as encoded bytes :raises RepositoryError: when an error occurs running the command """ self.failed_message = None logger.debug("Running command %s (cwd: %s, env: %s)", ' '.join(cmd), cwd, str(env)) try: self.proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env) err_thread = threading.Thread(target=self._read_stderr, kwargs={'encoding': encoding}, daemon=True) err_thread.start() for line in self.proc.stdout: yield line.decode(encoding, errors='surrogateescape') err_thread.join() self.proc.communicate() self.proc.stdout.close() self.proc.stderr.close() except OSError as e: err_thread.join() raise RepositoryError(cause=str(e)) if self.proc.returncode != 0: cause = "git command - %s (return code: %d)" % \ (self.failed_message, self.proc.returncode) raise RepositoryError(cause=cause)
[ "def", "_exec_nb", "(", "self", ",", "cmd", ",", "cwd", "=", "None", ",", "env", "=", "None", ",", "encoding", "=", "'utf-8'", ")", ":", "self", ".", "failed_message", "=", "None", "logger", ".", "debug", "(", "\"Running command %s (cwd: %s, env: %s)\"", ",", "' '", ".", "join", "(", "cmd", ")", ",", "cwd", ",", "str", "(", "env", ")", ")", "try", ":", "self", ".", "proc", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "cwd", "=", "cwd", ",", "env", "=", "env", ")", "err_thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_read_stderr", ",", "kwargs", "=", "{", "'encoding'", ":", "encoding", "}", ",", "daemon", "=", "True", ")", "err_thread", ".", "start", "(", ")", "for", "line", "in", "self", ".", "proc", ".", "stdout", ":", "yield", "line", ".", "decode", "(", "encoding", ",", "errors", "=", "'surrogateescape'", ")", "err_thread", ".", "join", "(", ")", "self", ".", "proc", ".", "communicate", "(", ")", "self", ".", "proc", ".", "stdout", ".", "close", "(", ")", "self", ".", "proc", ".", "stderr", ".", "close", "(", ")", "except", "OSError", "as", "e", ":", "err_thread", ".", "join", "(", ")", "raise", "RepositoryError", "(", "cause", "=", "str", "(", "e", ")", ")", "if", "self", ".", "proc", ".", "returncode", "!=", "0", ":", "cause", "=", "\"git command - %s (return code: %d)\"", "%", "(", "self", ".", "failed_message", ",", "self", ".", "proc", ".", "returncode", ")", "raise", "RepositoryError", "(", "cause", "=", "cause", ")" ]
41.27907
20.27907
def _margtimedist_loglr(self, mf_snr, opt_snr): """Returns the log likelihood ratio marginalized over time and distance. """ logl = special.logsumexp(mf_snr, b=self._deltat) logl_marg = logl/self._dist_array opt_snr_marg = opt_snr/self._dist_array**2 return special.logsumexp(logl_marg - 0.5*opt_snr_marg, b=self._deltad*self.dist_prior)
[ "def", "_margtimedist_loglr", "(", "self", ",", "mf_snr", ",", "opt_snr", ")", ":", "logl", "=", "special", ".", "logsumexp", "(", "mf_snr", ",", "b", "=", "self", ".", "_deltat", ")", "logl_marg", "=", "logl", "/", "self", ".", "_dist_array", "opt_snr_marg", "=", "opt_snr", "/", "self", ".", "_dist_array", "**", "2", "return", "special", ".", "logsumexp", "(", "logl_marg", "-", "0.5", "*", "opt_snr_marg", ",", "b", "=", "self", ".", "_deltad", "*", "self", ".", "dist_prior", ")" ]
46.444444
11.444444
def p_expression_unand(self, p): 'expression : NAND expression %prec UNAND' p[0] = Unand(p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_expression_unand", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "Unand", "(", "p", "[", "2", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lineno", "(", "1", ")", ")" ]
41
7
def _generate_rsa_key(key_length): """Generate a new RSA private key. :param int key_length: Required key length in bits :returns: DER-encoded private key, private key identifier, and DER encoding identifier :rtype: tuple(bytes, :class:`EncryptionKeyType`, :class:`KeyEncodingType`) """ private_key = rsa.generate_private_key(public_exponent=65537, key_size=key_length, backend=default_backend()) key_bytes = private_key.private_bytes( encoding=serialization.Encoding.DER, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption(), ) return key_bytes, EncryptionKeyType.PRIVATE, KeyEncodingType.DER
[ "def", "_generate_rsa_key", "(", "key_length", ")", ":", "private_key", "=", "rsa", ".", "generate_private_key", "(", "public_exponent", "=", "65537", ",", "key_size", "=", "key_length", ",", "backend", "=", "default_backend", "(", ")", ")", "key_bytes", "=", "private_key", ".", "private_bytes", "(", "encoding", "=", "serialization", ".", "Encoding", ".", "DER", ",", "format", "=", "serialization", ".", "PrivateFormat", ".", "PKCS8", ",", "encryption_algorithm", "=", "serialization", ".", "NoEncryption", "(", ")", ",", ")", "return", "key_bytes", ",", "EncryptionKeyType", ".", "PRIVATE", ",", "KeyEncodingType", ".", "DER" ]
48.571429
22.642857
def random(cls, engine_or_session, limit=5): """ Return random ORM instance. :type engine_or_session: Union[Engine, Session] :type limit: int :rtype: List[ExtendedBase] """ ses, auto_close = ensure_session(engine_or_session) result = ses.query(cls).order_by(func.random()).limit(limit).all() if auto_close: # pragma: no cover ses.close() return result
[ "def", "random", "(", "cls", ",", "engine_or_session", ",", "limit", "=", "5", ")", ":", "ses", ",", "auto_close", "=", "ensure_session", "(", "engine_or_session", ")", "result", "=", "ses", ".", "query", "(", "cls", ")", ".", "order_by", "(", "func", ".", "random", "(", ")", ")", ".", "limit", "(", "limit", ")", ".", "all", "(", ")", "if", "auto_close", ":", "# pragma: no cover", "ses", ".", "close", "(", ")", "return", "result" ]
30.928571
15.5
def get_gene2section2gos(gene2gos, sec2gos): """Get a list of section aliases for each gene product ID.""" gene2section2gos = {} for geneid, gos_gene in gene2gos.items(): section2gos = {} for section_name, gos_sec in sec2gos.items(): gos_secgene = gos_gene.intersection(gos_sec) if gos_secgene: section2gos[section_name] = gos_secgene gene2section2gos[geneid] = section2gos return gene2section2gos
[ "def", "get_gene2section2gos", "(", "gene2gos", ",", "sec2gos", ")", ":", "gene2section2gos", "=", "{", "}", "for", "geneid", ",", "gos_gene", "in", "gene2gos", ".", "items", "(", ")", ":", "section2gos", "=", "{", "}", "for", "section_name", ",", "gos_sec", "in", "sec2gos", ".", "items", "(", ")", ":", "gos_secgene", "=", "gos_gene", ".", "intersection", "(", "gos_sec", ")", "if", "gos_secgene", ":", "section2gos", "[", "section_name", "]", "=", "gos_secgene", "gene2section2gos", "[", "geneid", "]", "=", "section2gos", "return", "gene2section2gos" ]
46.090909
10.909091
def get_record(self, dns_type, name): """ Get a dns record :param dns_type: :param name: :return: """ try: record = [record for record in self.dns_records if record['type'] == dns_type and record['name'] == name][0] except IndexError: raise RecordNotFound( 'Cannot find the specified dns record in domain {domain}' .format(domain=name)) return record
[ "def", "get_record", "(", "self", ",", "dns_type", ",", "name", ")", ":", "try", ":", "record", "=", "[", "record", "for", "record", "in", "self", ".", "dns_records", "if", "record", "[", "'type'", "]", "==", "dns_type", "and", "record", "[", "'name'", "]", "==", "name", "]", "[", "0", "]", "except", "IndexError", ":", "raise", "RecordNotFound", "(", "'Cannot find the specified dns record in domain {domain}'", ".", "format", "(", "domain", "=", "name", ")", ")", "return", "record" ]
32.4
16.266667
def set_geometry(self, im, geometry, options=None): """Rescale the image to the new geometry. """ if not geometry: return im options = options or {} width, height = geometry if not width and not height: return im imw, imh = self.get_size(im) # Geometry match the current size? if (width is None) or (imw == width): if (height is None) or (imh == height): return im ratio = float(imw) / imh if width and height: # Smaller than the target? smaller = (imw <= width) and (imh <= height) if smaller and not options['upscale']: return im resize = options.get('resize', 'fill') if resize == 'fill': new_width = width new_height = int(ceil(width / ratio)) if new_height < height: new_height = height new_width = int(ceil(height * ratio)) elif resize == 'fit': new_width = int(ceil(height * ratio)) new_height = height if new_width > width: new_width = width new_height = int(ceil(width / ratio)) elif resize == 'stretch': new_width = width new_height = height elif height: # Smaller than the target? smaller = imh <= height if smaller and not options['upscale']: return im new_width = int(ceil(height * ratio)) new_height = height else: # Smaller than the target? smaller = imw <= width if smaller and not options['upscale']: return im new_width = width new_height = int(ceil(width / ratio)) im = self.scale(im, new_width, new_height) return im
[ "def", "set_geometry", "(", "self", ",", "im", ",", "geometry", ",", "options", "=", "None", ")", ":", "if", "not", "geometry", ":", "return", "im", "options", "=", "options", "or", "{", "}", "width", ",", "height", "=", "geometry", "if", "not", "width", "and", "not", "height", ":", "return", "im", "imw", ",", "imh", "=", "self", ".", "get_size", "(", "im", ")", "# Geometry match the current size?", "if", "(", "width", "is", "None", ")", "or", "(", "imw", "==", "width", ")", ":", "if", "(", "height", "is", "None", ")", "or", "(", "imh", "==", "height", ")", ":", "return", "im", "ratio", "=", "float", "(", "imw", ")", "/", "imh", "if", "width", "and", "height", ":", "# Smaller than the target?", "smaller", "=", "(", "imw", "<=", "width", ")", "and", "(", "imh", "<=", "height", ")", "if", "smaller", "and", "not", "options", "[", "'upscale'", "]", ":", "return", "im", "resize", "=", "options", ".", "get", "(", "'resize'", ",", "'fill'", ")", "if", "resize", "==", "'fill'", ":", "new_width", "=", "width", "new_height", "=", "int", "(", "ceil", "(", "width", "/", "ratio", ")", ")", "if", "new_height", "<", "height", ":", "new_height", "=", "height", "new_width", "=", "int", "(", "ceil", "(", "height", "*", "ratio", ")", ")", "elif", "resize", "==", "'fit'", ":", "new_width", "=", "int", "(", "ceil", "(", "height", "*", "ratio", ")", ")", "new_height", "=", "height", "if", "new_width", ">", "width", ":", "new_width", "=", "width", "new_height", "=", "int", "(", "ceil", "(", "width", "/", "ratio", ")", ")", "elif", "resize", "==", "'stretch'", ":", "new_width", "=", "width", "new_height", "=", "height", "elif", "height", ":", "# Smaller than the target?", "smaller", "=", "imh", "<=", "height", "if", "smaller", "and", "not", "options", "[", "'upscale'", "]", ":", "return", "im", "new_width", "=", "int", "(", "ceil", "(", "height", "*", "ratio", ")", ")", "new_height", "=", "height", "else", ":", "# Smaller than the target?", "smaller", "=", "imw", "<=", "width", "if", "smaller", "and", "not", "options", "[", "'upscale'", "]", ":", "return", "im", "new_width", "=", "width", "new_height", "=", "int", "(", "ceil", "(", "width", "/", "ratio", ")", ")", "im", "=", "self", ".", "scale", "(", "im", ",", "new_width", ",", "new_height", ")", "return", "im" ]
30.919355
14.33871
def get_port_channel_detail_output_lacp_aggr_member_actor_port(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_port_channel_detail = ET.Element("get_port_channel_detail") config = get_port_channel_detail output = ET.SubElement(get_port_channel_detail, "output") lacp = ET.SubElement(output, "lacp") aggr_member = ET.SubElement(lacp, "aggr-member") actor_port = ET.SubElement(aggr_member, "actor-port") actor_port.text = kwargs.pop('actor_port') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_port_channel_detail_output_lacp_aggr_member_actor_port", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_port_channel_detail", "=", "ET", ".", "Element", "(", "\"get_port_channel_detail\"", ")", "config", "=", "get_port_channel_detail", "output", "=", "ET", ".", "SubElement", "(", "get_port_channel_detail", ",", "\"output\"", ")", "lacp", "=", "ET", ".", "SubElement", "(", "output", ",", "\"lacp\"", ")", "aggr_member", "=", "ET", ".", "SubElement", "(", "lacp", ",", "\"aggr-member\"", ")", "actor_port", "=", "ET", ".", "SubElement", "(", "aggr_member", ",", "\"actor-port\"", ")", "actor_port", ".", "text", "=", "kwargs", ".", "pop", "(", "'actor_port'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
45.142857
15.357143
def select(self, *command_tokens, **command_env): """ Select suitable command, that matches the given tokens. Each new command to check is fetched with this object iterator (:meth:`.WCommandSelector.__iter__`) :param command_tokens: command :param command_env: command environment :return: WCommandProto """ for command_obj in self: if command_obj.match(*command_tokens, **command_env): return command_obj
[ "def", "select", "(", "self", ",", "*", "command_tokens", ",", "*", "*", "command_env", ")", ":", "for", "command_obj", "in", "self", ":", "if", "command_obj", ".", "match", "(", "*", "command_tokens", ",", "*", "*", "command_env", ")", ":", "return", "command_obj" ]
37.909091
12.818182
def _entry_must_not_exist(df, k1, k2): """Evaluate key-subkey non-existence. Checks that the key-subkey combo does not exists in the configuration options. """ count = df[(df['k1'] == k1) & (df['k2'] == k2)].shape[0] if count > 0: raise AlreadyRegisteredError( "Option {0}.{1} already registered".format(k1, k2))
[ "def", "_entry_must_not_exist", "(", "df", ",", "k1", ",", "k2", ")", ":", "count", "=", "df", "[", "(", "df", "[", "'k1'", "]", "==", "k1", ")", "&", "(", "df", "[", "'k2'", "]", "==", "k2", ")", "]", ".", "shape", "[", "0", "]", "if", "count", ">", "0", ":", "raise", "AlreadyRegisteredError", "(", "\"Option {0}.{1} already registered\"", ".", "format", "(", "k1", ",", "k2", ")", ")" ]
32.909091
12
def _create_environment(config): """Constructor for an instance of the environment. Args: config: Object providing configurations via attributes. Raises: NotImplementedError: For action spaces other than Box and Discrete. Returns: Wrapped OpenAI Gym environment. """ if isinstance(config.env, str): env = gym.make(config.env) else: env = config.env() if config.max_length: env = tools.wrappers.LimitDuration(env, config.max_length) if isinstance(env.action_space, gym.spaces.Box): if config.normalize_ranges: env = tools.wrappers.RangeNormalize(env) env = tools.wrappers.ClipAction(env) elif isinstance(env.action_space, gym.spaces.Discrete): if config.normalize_ranges: env = tools.wrappers.RangeNormalize(env, action=False) else: message = "Unsupported action space '{}'".format(type(env.action_space)) raise NotImplementedError(message) env = tools.wrappers.ConvertTo32Bit(env) env = tools.wrappers.CacheSpaces(env) return env
[ "def", "_create_environment", "(", "config", ")", ":", "if", "isinstance", "(", "config", ".", "env", ",", "str", ")", ":", "env", "=", "gym", ".", "make", "(", "config", ".", "env", ")", "else", ":", "env", "=", "config", ".", "env", "(", ")", "if", "config", ".", "max_length", ":", "env", "=", "tools", ".", "wrappers", ".", "LimitDuration", "(", "env", ",", "config", ".", "max_length", ")", "if", "isinstance", "(", "env", ".", "action_space", ",", "gym", ".", "spaces", ".", "Box", ")", ":", "if", "config", ".", "normalize_ranges", ":", "env", "=", "tools", ".", "wrappers", ".", "RangeNormalize", "(", "env", ")", "env", "=", "tools", ".", "wrappers", ".", "ClipAction", "(", "env", ")", "elif", "isinstance", "(", "env", ".", "action_space", ",", "gym", ".", "spaces", ".", "Discrete", ")", ":", "if", "config", ".", "normalize_ranges", ":", "env", "=", "tools", ".", "wrappers", ".", "RangeNormalize", "(", "env", ",", "action", "=", "False", ")", "else", ":", "message", "=", "\"Unsupported action space '{}'\"", ".", "format", "(", "type", "(", "env", ".", "action_space", ")", ")", "raise", "NotImplementedError", "(", "message", ")", "env", "=", "tools", ".", "wrappers", ".", "ConvertTo32Bit", "(", "env", ")", "env", "=", "tools", ".", "wrappers", ".", "CacheSpaces", "(", "env", ")", "return", "env" ]
31.806452
17.967742
def prod(): """Option to do something on the production server.""" common_conf() env.user = settings.LOGIN_USER_PROD env.machine = 'prod' env.host_string = settings.HOST_PROD env.hosts = [env.host_string, ]
[ "def", "prod", "(", ")", ":", "common_conf", "(", ")", "env", ".", "user", "=", "settings", ".", "LOGIN_USER_PROD", "env", ".", "machine", "=", "'prod'", "env", ".", "host_string", "=", "settings", ".", "HOST_PROD", "env", ".", "hosts", "=", "[", "env", ".", "host_string", ",", "]" ]
32
10.571429
def route(self, url_rule, name=None, options=None): '''A decorator to add a route to a view. name is used to differentiate when there are multiple routes for a given view.''' # TODO: change options kwarg to defaults def decorator(f): view_name = name or f.__name__ self.add_url_rule(url_rule, f, name=view_name, options=options) return f return decorator
[ "def", "route", "(", "self", ",", "url_rule", ",", "name", "=", "None", ",", "options", "=", "None", ")", ":", "# TODO: change options kwarg to defaults", "def", "decorator", "(", "f", ")", ":", "view_name", "=", "name", "or", "f", ".", "__name__", "self", ".", "add_url_rule", "(", "url_rule", ",", "f", ",", "name", "=", "view_name", ",", "options", "=", "options", ")", "return", "f", "return", "decorator" ]
46.888889
18.222222
def _EncodeInt64Field(field, value): """Handle the special case of int64 as a string.""" capabilities = [ messages.Variant.INT64, messages.Variant.UINT64, ] if field.variant not in capabilities: return encoding.CodecResult(value=value, complete=False) if field.repeated: result = [str(x) for x in value] else: result = str(value) return encoding.CodecResult(value=result, complete=True)
[ "def", "_EncodeInt64Field", "(", "field", ",", "value", ")", ":", "capabilities", "=", "[", "messages", ".", "Variant", ".", "INT64", ",", "messages", ".", "Variant", ".", "UINT64", ",", "]", "if", "field", ".", "variant", "not", "in", "capabilities", ":", "return", "encoding", ".", "CodecResult", "(", "value", "=", "value", ",", "complete", "=", "False", ")", "if", "field", ".", "repeated", ":", "result", "=", "[", "str", "(", "x", ")", "for", "x", "in", "value", "]", "else", ":", "result", "=", "str", "(", "value", ")", "return", "encoding", ".", "CodecResult", "(", "value", "=", "result", ",", "complete", "=", "True", ")" ]
31.571429
15.928571
def _get_vsan_eligible_disks(service_instance, host, host_names): ''' Helper function that returns a dictionary of host_name keys with either a list of eligible disks that can be added to VSAN or either an 'Error' message or a message saying no eligible disks were found. Possible keys/values look like: return = {'host_1': {'Error': 'VSAN System Config Manager is unset ...'}, 'host_2': {'Eligible': 'The host xxx does not have any VSAN eligible disks.'}, 'host_3': {'Eligible': [disk1, disk2, disk3, disk4], 'host_4': {'Eligible': []}} ''' ret = {} for host_name in host_names: # Get VSAN System Config Manager, if available. host_ref = _get_host_ref(service_instance, host, host_name=host_name) vsan_system = host_ref.configManager.vsanSystem if vsan_system is None: msg = 'VSAN System Config Manager is unset for host \'{0}\'. ' \ 'VSAN configuration cannot be changed without a configured ' \ 'VSAN System.'.format(host_name) log.debug(msg) ret.update({host_name: {'Error': msg}}) continue # Get all VSAN suitable disks for this host. suitable_disks = [] query = vsan_system.QueryDisksForVsan() for item in query: if item.state == 'eligible': suitable_disks.append(item) # No suitable disks were found to add. Warn and move on. # This isn't an error as the state may run repeatedly after all eligible disks are added. if not suitable_disks: msg = 'The host \'{0}\' does not have any VSAN eligible disks.'.format(host_name) log.warning(msg) ret.update({host_name: {'Eligible': msg}}) continue # Get disks for host and combine into one list of Disk Objects disks = _get_host_ssds(host_ref) + _get_host_non_ssds(host_ref) # Get disks that are in both the disks list and suitable_disks lists. matching = [] for disk in disks: for suitable_disk in suitable_disks: if disk.canonicalName == suitable_disk.disk.canonicalName: matching.append(disk) ret.update({host_name: {'Eligible': matching}}) return ret
[ "def", "_get_vsan_eligible_disks", "(", "service_instance", ",", "host", ",", "host_names", ")", ":", "ret", "=", "{", "}", "for", "host_name", "in", "host_names", ":", "# Get VSAN System Config Manager, if available.", "host_ref", "=", "_get_host_ref", "(", "service_instance", ",", "host", ",", "host_name", "=", "host_name", ")", "vsan_system", "=", "host_ref", ".", "configManager", ".", "vsanSystem", "if", "vsan_system", "is", "None", ":", "msg", "=", "'VSAN System Config Manager is unset for host \\'{0}\\'. '", "'VSAN configuration cannot be changed without a configured '", "'VSAN System.'", ".", "format", "(", "host_name", ")", "log", ".", "debug", "(", "msg", ")", "ret", ".", "update", "(", "{", "host_name", ":", "{", "'Error'", ":", "msg", "}", "}", ")", "continue", "# Get all VSAN suitable disks for this host.", "suitable_disks", "=", "[", "]", "query", "=", "vsan_system", ".", "QueryDisksForVsan", "(", ")", "for", "item", "in", "query", ":", "if", "item", ".", "state", "==", "'eligible'", ":", "suitable_disks", ".", "append", "(", "item", ")", "# No suitable disks were found to add. Warn and move on.", "# This isn't an error as the state may run repeatedly after all eligible disks are added.", "if", "not", "suitable_disks", ":", "msg", "=", "'The host \\'{0}\\' does not have any VSAN eligible disks.'", ".", "format", "(", "host_name", ")", "log", ".", "warning", "(", "msg", ")", "ret", ".", "update", "(", "{", "host_name", ":", "{", "'Eligible'", ":", "msg", "}", "}", ")", "continue", "# Get disks for host and combine into one list of Disk Objects", "disks", "=", "_get_host_ssds", "(", "host_ref", ")", "+", "_get_host_non_ssds", "(", "host_ref", ")", "# Get disks that are in both the disks list and suitable_disks lists.", "matching", "=", "[", "]", "for", "disk", "in", "disks", ":", "for", "suitable_disk", "in", "suitable_disks", ":", "if", "disk", ".", "canonicalName", "==", "suitable_disk", ".", "disk", ".", "canonicalName", ":", "matching", ".", "append", "(", "disk", ")", "ret", ".", "update", "(", "{", "host_name", ":", "{", "'Eligible'", ":", "matching", "}", "}", ")", "return", "ret" ]
43.037736
25.415094
async def handle_adapter_event(self, adapter_id, conn_string, conn_id, name, event): """Handle an event received from an adapter.""" if name == 'device_seen': self._track_device_seen(adapter_id, conn_string, event) event = self._translate_device_seen(adapter_id, conn_string, event) conn_string = self._translate_conn_string(adapter_id, conn_string) elif conn_id is not None and self._get_property(conn_id, 'translate'): conn_string = self._translate_conn_string(adapter_id, conn_string) else: conn_string = "adapter/%d/%s" % (adapter_id, conn_string) await self.notify_event(conn_string, name, event)
[ "async", "def", "handle_adapter_event", "(", "self", ",", "adapter_id", ",", "conn_string", ",", "conn_id", ",", "name", ",", "event", ")", ":", "if", "name", "==", "'device_seen'", ":", "self", ".", "_track_device_seen", "(", "adapter_id", ",", "conn_string", ",", "event", ")", "event", "=", "self", ".", "_translate_device_seen", "(", "adapter_id", ",", "conn_string", ",", "event", ")", "conn_string", "=", "self", ".", "_translate_conn_string", "(", "adapter_id", ",", "conn_string", ")", "elif", "conn_id", "is", "not", "None", "and", "self", ".", "_get_property", "(", "conn_id", ",", "'translate'", ")", ":", "conn_string", "=", "self", ".", "_translate_conn_string", "(", "adapter_id", ",", "conn_string", ")", "else", ":", "conn_string", "=", "\"adapter/%d/%s\"", "%", "(", "adapter_id", ",", "conn_string", ")", "await", "self", ".", "notify_event", "(", "conn_string", ",", "name", ",", "event", ")" ]
49.357143
30.285714
def download_to_shared_memory(self, slices, location=None): """ Download images to a shared memory array. https://github.com/seung-lab/cloud-volume/wiki/Advanced-Topic:-Shared-Memory tip: If you want to use slice notation, np.s_[...] will help in a pinch. MEMORY LIFECYCLE WARNING: You are responsible for managing the lifecycle of the shared memory. CloudVolume will merely write to it, it will not unlink the memory automatically. To fully clear the shared memory you must unlink the location and close any mmap file handles. You can use `cloudvolume.sharedmemory.unlink(...)` to help you unlink the shared memory file or `vol.unlink_shared_memory()` if you do not specify location (meaning the default instance location is used). EXPERT MODE WARNING: If you aren't sure you need this function (e.g. to relieve memory pressure or improve performance in some way) you should use the ordinary download method of img = vol[:]. A typical use case is transferring arrays between different processes without making copies. For reference, this feature was created for downloading a 62 GB array and working with it in Julia. Required: slices: (Bbox or list of slices) the bounding box the shared array represents. For instance if you have a 1024x1024x128 volume and you're uploading only a 512x512x64 corner touching the origin, your Bbox would be `Bbox( (0,0,0), (512,512,64) )`. Optional: location: (str) Defaults to self.shared_memory_id. Shared memory location e.g. 'cloudvolume-shm-RANDOM-STRING' This typically corresponds to a file in `/dev/shm` or `/run/shm/`. It can also be a file if you're using that for mmap. Returns: void """ if self.path.protocol == 'boss': raise NotImplementedError('BOSS protocol does not support shared memory download.') if type(slices) == Bbox: slices = slices.to_slices() (requested_bbox, steps, channel_slice) = self.__interpret_slices(slices) if self.autocrop: requested_bbox = Bbox.intersection(requested_bbox, self.bounds) location = location or self.shared_memory_id return txrx.cutout(self, requested_bbox, steps, channel_slice, parallel=self.parallel, shared_memory_location=location, output_to_shared_memory=True)
[ "def", "download_to_shared_memory", "(", "self", ",", "slices", ",", "location", "=", "None", ")", ":", "if", "self", ".", "path", ".", "protocol", "==", "'boss'", ":", "raise", "NotImplementedError", "(", "'BOSS protocol does not support shared memory download.'", ")", "if", "type", "(", "slices", ")", "==", "Bbox", ":", "slices", "=", "slices", ".", "to_slices", "(", ")", "(", "requested_bbox", ",", "steps", ",", "channel_slice", ")", "=", "self", ".", "__interpret_slices", "(", "slices", ")", "if", "self", ".", "autocrop", ":", "requested_bbox", "=", "Bbox", ".", "intersection", "(", "requested_bbox", ",", "self", ".", "bounds", ")", "location", "=", "location", "or", "self", ".", "shared_memory_id", "return", "txrx", ".", "cutout", "(", "self", ",", "requested_bbox", ",", "steps", ",", "channel_slice", ",", "parallel", "=", "self", ".", "parallel", ",", "shared_memory_location", "=", "location", ",", "output_to_shared_memory", "=", "True", ")" ]
51.688889
34.088889
def unattach_issue(resource_id, issue_id, table): """Unattach an issue from a specific job.""" v1_utils.verify_existence_and_get(issue_id, _TABLE) if table.name == 'jobs': join_table = models.JOIN_JOBS_ISSUES where_clause = sql.and_(join_table.c.job_id == resource_id, join_table.c.issue_id == issue_id) else: join_table = models.JOIN_COMPONENTS_ISSUES where_clause = sql.and_(join_table.c.component_id == resource_id, join_table.c.issue_id == issue_id) query = join_table.delete().where(where_clause) result = flask.g.db_conn.execute(query) if not result.rowcount: raise dci_exc.DCIConflict('%s_issues' % table.name, issue_id) return flask.Response(None, 204, content_type='application/json')
[ "def", "unattach_issue", "(", "resource_id", ",", "issue_id", ",", "table", ")", ":", "v1_utils", ".", "verify_existence_and_get", "(", "issue_id", ",", "_TABLE", ")", "if", "table", ".", "name", "==", "'jobs'", ":", "join_table", "=", "models", ".", "JOIN_JOBS_ISSUES", "where_clause", "=", "sql", ".", "and_", "(", "join_table", ".", "c", ".", "job_id", "==", "resource_id", ",", "join_table", ".", "c", ".", "issue_id", "==", "issue_id", ")", "else", ":", "join_table", "=", "models", ".", "JOIN_COMPONENTS_ISSUES", "where_clause", "=", "sql", ".", "and_", "(", "join_table", ".", "c", ".", "component_id", "==", "resource_id", ",", "join_table", ".", "c", ".", "issue_id", "==", "issue_id", ")", "query", "=", "join_table", ".", "delete", "(", ")", ".", "where", "(", "where_clause", ")", "result", "=", "flask", ".", "g", ".", "db_conn", ".", "execute", "(", "query", ")", "if", "not", "result", ".", "rowcount", ":", "raise", "dci_exc", ".", "DCIConflict", "(", "'%s_issues'", "%", "table", ".", "name", ",", "issue_id", ")", "return", "flask", ".", "Response", "(", "None", ",", "204", ",", "content_type", "=", "'application/json'", ")" ]
40.7
21.9
def compile_dmtf_schema(self, schema_version, schema_root_dir, class_names, use_experimental=False, namespace=None, verbose=False): """ Compile the classes defined by `class_names` and their dependent classes from the DMTF CIM schema version defined by `schema_version` and keep the downloaded DMTF CIM schema in the directory defined by `schema_dir`. This method uses the :class:`~pywbem_mock.DMTFCIMSchema` class to download the DMTF CIM schema defined by `schema_version` from the DMTF, into the `schema_root_dir` directory, extract the MOF files, create a MOF file with the `#include pragma` statements for the files in `class_names` and attempt to compile this set of files. It automatically compiles all of the DMTF qualifier declarations that are in the files `qualifiers.mof` and `qualifiers_optional.mof`. The result of the compilation is added to the specified CIM namespace of the mock repository. If the namespace does not exist, :exc:`~pywbem.CIMError` with status CIM_ERR_INVALID_NAMESPACE is raised. Parameters: schema_version (tuple of 3 integers (m, n, u): Represents the DMTF CIM schema version where: * m is the DMTF CIM schema major version * n is the DMTF CIM schema minor version * u is the DMTF CIM schema update version This must represent a DMTF CIM schema that is available from the DMTF web site. schema_root_dir (:term:`string`): Directory into which the DMTF CIM schema is installed or will be installed. A single `schema_dir` can be used for multiple schema versions because subdirectories are uniquely defined by schema version and schema_type (i.e. Final or Experimental). Multiple DMTF CIM schemas may be maintained in the same `schema_root_dir` simultaneously because the MOF for each schema is extracted into a subdirectory identified by the schema version information. class_names (:term:`py:list` of :term:`string` or :term:`string`): List of class names from the DMTF CIM Schema to be included in the repository. A single class may be defined as a string not in a list. These must be classes in the defined DMTF CIM schema and can be a list of just the leaf classes required The MOF compiler will search the DMTF CIM schema MOF classes for superclasses, classes defined in reference properties, and classes defined in EmbeddedInstance qualifiers and compile them also. use_experimental (:class:`py:bool`): If `True` the expermental version of the DMTF CIM Schema is installed or to be installed. If `False` (default) the final version of the DMTF CIM Schema is installed or to be installed. namespace (:term:`string`): The name of the target CIM namespace in the mock repository. This namespace is also used for lookup of any existing or dependent CIM objects. If `None`, the default namespace of the connection is used. verbose (:class:`py:bool`): If `True`, progress messages are output to stdout Raises: ValueError: The schema cannot be retrieved from the DMTF web site, the schema_version is invalid, or a class name cannot be found in the defined DMTF CIM schema. TypeError: The 'schema_version' is not a valid tuple with 3 integer components :exc:`~pywbem.MOFParseError`: Compile error in the MOF. :exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does not exist. :exc:`~pywbem.CIMError`: Failure related to the CIM objects in the mock repository. """ schema = DMTFCIMSchema(schema_version, schema_root_dir, use_experimental=use_experimental, verbose=verbose) schema_mof = schema.build_schema_mof(class_names) search_paths = schema.schema_mof_dir self.compile_mof_string(schema_mof, namespace=namespace, search_paths=[search_paths], verbose=verbose)
[ "def", "compile_dmtf_schema", "(", "self", ",", "schema_version", ",", "schema_root_dir", ",", "class_names", ",", "use_experimental", "=", "False", ",", "namespace", "=", "None", ",", "verbose", "=", "False", ")", ":", "schema", "=", "DMTFCIMSchema", "(", "schema_version", ",", "schema_root_dir", ",", "use_experimental", "=", "use_experimental", ",", "verbose", "=", "verbose", ")", "schema_mof", "=", "schema", ".", "build_schema_mof", "(", "class_names", ")", "search_paths", "=", "schema", ".", "schema_mof_dir", "self", ".", "compile_mof_string", "(", "schema_mof", ",", "namespace", "=", "namespace", ",", "search_paths", "=", "[", "search_paths", "]", ",", "verbose", "=", "verbose", ")" ]
45.762887
26.381443
def getTokensEndLoc(): """Method to be called from within a parse action to determine the end location of the parsed tokens.""" import inspect fstack = inspect.stack() try: # search up the stack (through intervening argument normalizers) for correct calling routine for f in fstack[2:]: if f[3] == "_parseNoCache": endloc = f[0].f_locals["loc"] return endloc else: raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action") finally: del fstack
[ "def", "getTokensEndLoc", "(", ")", ":", "import", "inspect", "fstack", "=", "inspect", ".", "stack", "(", ")", "try", ":", "# search up the stack (through intervening argument normalizers) for correct calling routine\r", "for", "f", "in", "fstack", "[", "2", ":", "]", ":", "if", "f", "[", "3", "]", "==", "\"_parseNoCache\"", ":", "endloc", "=", "f", "[", "0", "]", ".", "f_locals", "[", "\"loc\"", "]", "return", "endloc", "else", ":", "raise", "ParseFatalException", "(", "\"incorrect usage of getTokensEndLoc - may only be called from within a parse action\"", ")", "finally", ":", "del", "fstack" ]
40.733333
21.8
def run_competition(builders=[], task=BalanceTask(), Optimizer=HillClimber, rounds=3, max_eval=20, N_hidden=3, verbosity=0): """ pybrain buildNetwork builds a subtly different network structhan build_ann... so compete them! Arguments: task (Task): task to compete at Optimizer (class): pybrain.Optimizer class to instantiate for each competitor rounds (int): number of times to run the competition max_eval (int): number of objective function evaluations that the optimizer is allowed in each round N_hidden (int): number of hidden nodes in each network being competed The functional difference that I can see is that: buildNetwork connects the bias to the output build_ann does not The api differences are: build_ann allows heterogeneous layer types but the output layer is always linear buildNetwork allows specification of the output layer type """ results = [] builders = list(builders) + [buildNetwork, util.build_ann] for r in range(rounds): heat = [] # FIXME: shuffle the order of the builders to keep things fair # (like switching sides of the tennis court) for builder in builders: try: competitor = builder(task.outdim, N_hidden, task.indim, verbosity=verbosity) except NetworkError: competitor = builder(task.outdim, N_hidden, task.indim) # TODO: verify that a full reset is actually happening task.reset() optimizer = Optimizer(task, competitor, maxEvaluations=max_eval) t0 = time.time() nn, nn_best = optimizer.learn() t1 = time.time() heat += [(nn_best, t1-t0, nn)] results += [tuple(heat)] if verbosity >= 0: print([competitor_scores[:2] for competitor_scores in heat]) # # alternatively: # agent = ( pybrain.rl.agents.OptimizationAgent(net, HillClimber()) # or # pybrain.rl.agents.LearningAgent(net, pybrain.rl.learners.ENAC()) ) # exp = pybrain.rl.experiments.EpisodicExperiment(task, agent).doEpisodes(100) means = [[np.array([r[i][j] for r in results]).mean() for i in range(len(results[0]))] for j in range(2)] if verbosity > -1: print('Mean Performance:') print(means) perfi, speedi = np.argmax(means[0]), np.argmin(means[1]) print('And the winner for performance is ... Algorithm #{} (0-offset array index [{}])'.format(perfi+1, perfi)) print('And the winner for speed is ... Algorithm #{} (0-offset array index [{}])'.format(speedi+1, speedi)) return results, means
[ "def", "run_competition", "(", "builders", "=", "[", "]", ",", "task", "=", "BalanceTask", "(", ")", ",", "Optimizer", "=", "HillClimber", ",", "rounds", "=", "3", ",", "max_eval", "=", "20", ",", "N_hidden", "=", "3", ",", "verbosity", "=", "0", ")", ":", "results", "=", "[", "]", "builders", "=", "list", "(", "builders", ")", "+", "[", "buildNetwork", ",", "util", ".", "build_ann", "]", "for", "r", "in", "range", "(", "rounds", ")", ":", "heat", "=", "[", "]", "# FIXME: shuffle the order of the builders to keep things fair", "# (like switching sides of the tennis court)", "for", "builder", "in", "builders", ":", "try", ":", "competitor", "=", "builder", "(", "task", ".", "outdim", ",", "N_hidden", ",", "task", ".", "indim", ",", "verbosity", "=", "verbosity", ")", "except", "NetworkError", ":", "competitor", "=", "builder", "(", "task", ".", "outdim", ",", "N_hidden", ",", "task", ".", "indim", ")", "# TODO: verify that a full reset is actually happening", "task", ".", "reset", "(", ")", "optimizer", "=", "Optimizer", "(", "task", ",", "competitor", ",", "maxEvaluations", "=", "max_eval", ")", "t0", "=", "time", ".", "time", "(", ")", "nn", ",", "nn_best", "=", "optimizer", ".", "learn", "(", ")", "t1", "=", "time", ".", "time", "(", ")", "heat", "+=", "[", "(", "nn_best", ",", "t1", "-", "t0", ",", "nn", ")", "]", "results", "+=", "[", "tuple", "(", "heat", ")", "]", "if", "verbosity", ">=", "0", ":", "print", "(", "[", "competitor_scores", "[", ":", "2", "]", "for", "competitor_scores", "in", "heat", "]", ")", "# # alternatively:", "# agent = ( pybrain.rl.agents.OptimizationAgent(net, HillClimber())", "# or", "# pybrain.rl.agents.LearningAgent(net, pybrain.rl.learners.ENAC()) )", "# exp = pybrain.rl.experiments.EpisodicExperiment(task, agent).doEpisodes(100)", "means", "=", "[", "[", "np", ".", "array", "(", "[", "r", "[", "i", "]", "[", "j", "]", "for", "r", "in", "results", "]", ")", ".", "mean", "(", ")", "for", "i", "in", "range", "(", "len", "(", "results", "[", "0", "]", ")", ")", "]", "for", "j", "in", "range", "(", "2", ")", "]", "if", "verbosity", ">", "-", "1", ":", "print", "(", "'Mean Performance:'", ")", "print", "(", "means", ")", "perfi", ",", "speedi", "=", "np", ".", "argmax", "(", "means", "[", "0", "]", ")", ",", "np", ".", "argmin", "(", "means", "[", "1", "]", ")", "print", "(", "'And the winner for performance is ... Algorithm #{} (0-offset array index [{}])'", ".", "format", "(", "perfi", "+", "1", ",", "perfi", ")", ")", "print", "(", "'And the winner for speed is ... Algorithm #{} (0-offset array index [{}])'", ".", "format", "(", "speedi", "+", "1", ",", "speedi", ")", ")", "return", "results", ",", "means" ]
42.774194
28.467742
def add_alias(self, alias, indices, **kwargs): """ Add an alias to point to a set of indices. (See :ref:`es-guide-reference-api-admin-indices-aliases`) :param alias: the name of an alias :param indices: a list of indices """ indices = self.conn._validate_indices(indices) return self.change_aliases(['add', index, alias, self._get_alias_params(**kwargs)] for index in indices)
[ "def", "add_alias", "(", "self", ",", "alias", ",", "indices", ",", "*", "*", "kwargs", ")", ":", "indices", "=", "self", ".", "conn", ".", "_validate_indices", "(", "indices", ")", "return", "self", ".", "change_aliases", "(", "[", "'add'", ",", "index", ",", "alias", ",", "self", ".", "_get_alias_params", "(", "*", "*", "kwargs", ")", "]", "for", "index", "in", "indices", ")" ]
35.857143
17.142857
def get_cdd_hdd_candidate_models( data, minimum_non_zero_cdd, minimum_non_zero_hdd, minimum_total_cdd, minimum_total_hdd, beta_cdd_maximum_p_value, beta_hdd_maximum_p_value, weights_col, ): """ Return a list of candidate cdd_hdd models for a particular selection of cooling balance point and heating balance point Parameters ---------- data : :any:`pandas.DataFrame` A DataFrame containing at least the column ``meter_value`` and 1 to n columns each of the form ``hdd_<heating_balance_point>`` and ``cdd_<cooling_balance_point>``. DataFrames of this form can be made using the :any:`eemeter.create_caltrack_daily_design_matrix` or :any:`eemeter.create_caltrack_billing_design_matrix` methods. minimum_non_zero_cdd : :any:`int` Minimum allowable number of non-zero cooling degree day values. minimum_non_zero_hdd : :any:`int` Minimum allowable number of non-zero heating degree day values. minimum_total_cdd : :any:`float` Minimum allowable total sum of cooling degree day values. minimum_total_hdd : :any:`float` Minimum allowable total sum of heating degree day values. beta_cdd_maximum_p_value : :any:`float` The maximum allowable p-value of the beta cdd parameter. beta_hdd_maximum_p_value : :any:`float` The maximum allowable p-value of the beta hdd parameter. weights_col : :any:`str` or None The name of the column (if any) in ``data`` to use as weights. Returns ------- candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel` A list of cdd_hdd candidate models, with any associated warnings. """ cooling_balance_points = [ int(col[4:]) for col in data.columns if col.startswith("cdd") ] heating_balance_points = [ int(col[4:]) for col in data.columns if col.startswith("hdd") ] # CalTrack 3.2.2.1 candidate_models = [ get_single_cdd_hdd_candidate_model( data, minimum_non_zero_cdd, minimum_non_zero_hdd, minimum_total_cdd, minimum_total_hdd, beta_cdd_maximum_p_value, beta_hdd_maximum_p_value, weights_col, cooling_balance_point, heating_balance_point, ) for cooling_balance_point in cooling_balance_points for heating_balance_point in heating_balance_points if heating_balance_point <= cooling_balance_point ] return candidate_models
[ "def", "get_cdd_hdd_candidate_models", "(", "data", ",", "minimum_non_zero_cdd", ",", "minimum_non_zero_hdd", ",", "minimum_total_cdd", ",", "minimum_total_hdd", ",", "beta_cdd_maximum_p_value", ",", "beta_hdd_maximum_p_value", ",", "weights_col", ",", ")", ":", "cooling_balance_points", "=", "[", "int", "(", "col", "[", "4", ":", "]", ")", "for", "col", "in", "data", ".", "columns", "if", "col", ".", "startswith", "(", "\"cdd\"", ")", "]", "heating_balance_points", "=", "[", "int", "(", "col", "[", "4", ":", "]", ")", "for", "col", "in", "data", ".", "columns", "if", "col", ".", "startswith", "(", "\"hdd\"", ")", "]", "# CalTrack 3.2.2.1", "candidate_models", "=", "[", "get_single_cdd_hdd_candidate_model", "(", "data", ",", "minimum_non_zero_cdd", ",", "minimum_non_zero_hdd", ",", "minimum_total_cdd", ",", "minimum_total_hdd", ",", "beta_cdd_maximum_p_value", ",", "beta_hdd_maximum_p_value", ",", "weights_col", ",", "cooling_balance_point", ",", "heating_balance_point", ",", ")", "for", "cooling_balance_point", "in", "cooling_balance_points", "for", "heating_balance_point", "in", "heating_balance_points", "if", "heating_balance_point", "<=", "cooling_balance_point", "]", "return", "candidate_models" ]
36.405797
19.376812
def file_sign( blockchain_id, hostname, input_path, passphrase=None, config_path=CONFIG_PATH, wallet_keys=None ): """ Sign a file with the current blockchain ID's host's public key. @config_path should be for the *client*, not blockstack-file Return {'status': True, 'sender_key_id': ..., 'sig': ...} on success, and write ciphertext to output_path Return {'error': ...} on error """ config_dir = os.path.dirname(config_path) # find our encryption key key_info = file_key_lookup( blockchain_id, 0, hostname, config_path=config_path, wallet_keys=wallet_keys ) if 'error' in key_info: return {'error': 'Failed to lookup encryption key'} # sign res = blockstack_gpg.gpg_sign( input_path, key_info, config_dir=config_dir ) if 'error' in res: log.error("Failed to encrypt: %s" % res['error']) return {'error': 'Failed to encrypt'} return {'status': True, 'sender_key_id': key_info['key_id'], 'sig': res['sig']}
[ "def", "file_sign", "(", "blockchain_id", ",", "hostname", ",", "input_path", ",", "passphrase", "=", "None", ",", "config_path", "=", "CONFIG_PATH", ",", "wallet_keys", "=", "None", ")", ":", "config_dir", "=", "os", ".", "path", ".", "dirname", "(", "config_path", ")", "# find our encryption key", "key_info", "=", "file_key_lookup", "(", "blockchain_id", ",", "0", ",", "hostname", ",", "config_path", "=", "config_path", ",", "wallet_keys", "=", "wallet_keys", ")", "if", "'error'", "in", "key_info", ":", "return", "{", "'error'", ":", "'Failed to lookup encryption key'", "}", "# sign", "res", "=", "blockstack_gpg", ".", "gpg_sign", "(", "input_path", ",", "key_info", ",", "config_dir", "=", "config_dir", ")", "if", "'error'", "in", "res", ":", "log", ".", "error", "(", "\"Failed to encrypt: %s\"", "%", "res", "[", "'error'", "]", ")", "return", "{", "'error'", ":", "'Failed to encrypt'", "}", "return", "{", "'status'", ":", "True", ",", "'sender_key_id'", ":", "key_info", "[", "'key_id'", "]", ",", "'sig'", ":", "res", "[", "'sig'", "]", "}" ]
46.095238
28.095238
def hyperrectangle(lower, upper, bdy=True): '''Returns the indicator function of a hyperrectangle. :param lower: Vector-like numpy array, defining the lower boundary of the hyperrectangle.\n len(lower) fixes the dimension. :param upper: Vector-like numpy array, defining the upper boundary of the hyperrectangle.\n :param bdy: Bool. When ``x`` is at the hyperrectangles's boundary then ``hr_indicator(x)`` returns ``True`` if and only if ``bdy=True``. ''' # copy input lower = _np.array(lower) upper = _np.array(upper) dim = len(lower) if (upper <= lower).any(): raise ValueError('invalid input; found upper <= lower') if bdy: def hr_indicator(x): if len(x) != dim: raise ValueError('input has wrong dimension (%i instead of %i)' % (len(x), dim)) if (lower <= x).all() and (x <= upper).all(): return True return False else: def hr_indicator(x): if len(x) != dim: raise ValueError('input has wrong dimension (%i instead of %i)' % (len(x), dim)) if (lower < x).all() and (x < upper).all(): return True return False # write docstring for ball_indicator hr_indicator.__doc__ = 'automatically generated hyperrectangle indicator function:' hr_indicator.__doc__ += '\nlower = ' + repr(lower)[6:-1] hr_indicator.__doc__ += '\nupper = ' + repr(upper)[6:-1] hr_indicator.__doc__ += '\nbdy = ' + str(bdy) return hr_indicator
[ "def", "hyperrectangle", "(", "lower", ",", "upper", ",", "bdy", "=", "True", ")", ":", "# copy input", "lower", "=", "_np", ".", "array", "(", "lower", ")", "upper", "=", "_np", ".", "array", "(", "upper", ")", "dim", "=", "len", "(", "lower", ")", "if", "(", "upper", "<=", "lower", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "'invalid input; found upper <= lower'", ")", "if", "bdy", ":", "def", "hr_indicator", "(", "x", ")", ":", "if", "len", "(", "x", ")", "!=", "dim", ":", "raise", "ValueError", "(", "'input has wrong dimension (%i instead of %i)'", "%", "(", "len", "(", "x", ")", ",", "dim", ")", ")", "if", "(", "lower", "<=", "x", ")", ".", "all", "(", ")", "and", "(", "x", "<=", "upper", ")", ".", "all", "(", ")", ":", "return", "True", "return", "False", "else", ":", "def", "hr_indicator", "(", "x", ")", ":", "if", "len", "(", "x", ")", "!=", "dim", ":", "raise", "ValueError", "(", "'input has wrong dimension (%i instead of %i)'", "%", "(", "len", "(", "x", ")", ",", "dim", ")", ")", "if", "(", "lower", "<", "x", ")", ".", "all", "(", ")", "and", "(", "x", "<", "upper", ")", ".", "all", "(", ")", ":", "return", "True", "return", "False", "# write docstring for ball_indicator", "hr_indicator", ".", "__doc__", "=", "'automatically generated hyperrectangle indicator function:'", "hr_indicator", ".", "__doc__", "+=", "'\\nlower = '", "+", "repr", "(", "lower", ")", "[", "6", ":", "-", "1", "]", "hr_indicator", ".", "__doc__", "+=", "'\\nupper = '", "+", "repr", "(", "upper", ")", "[", "6", ":", "-", "1", "]", "hr_indicator", ".", "__doc__", "+=", "'\\nbdy = '", "+", "str", "(", "bdy", ")", "return", "hr_indicator" ]
33.021277
25.531915
def get_book_metadata(self, asin): """Returns a book's metadata. Args: asin: The ASIN of the book to be queried. Returns: A `KindleBook` instance corresponding to the book associated with `asin`. """ kbm = self._get_api_call('get_book_metadata', '"%s"' % asin) return KindleCloudReaderAPI._kbm_to_book(kbm)
[ "def", "get_book_metadata", "(", "self", ",", "asin", ")", ":", "kbm", "=", "self", ".", "_get_api_call", "(", "'get_book_metadata'", ",", "'\"%s\"'", "%", "asin", ")", "return", "KindleCloudReaderAPI", ".", "_kbm_to_book", "(", "kbm", ")" ]
28.25
20.25
def hmget(key, *fields, **options): ''' Returns the values of all the given hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmget foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hmget(key, *fields)
[ "def", "hmget", "(", "key", ",", "*", "fields", ",", "*", "*", "options", ")", ":", "host", "=", "options", ".", "get", "(", "'host'", ",", "None", ")", "port", "=", "options", ".", "get", "(", "'port'", ",", "None", ")", "database", "=", "options", ".", "get", "(", "'db'", ",", "None", ")", "password", "=", "options", ".", "get", "(", "'password'", ",", "None", ")", "server", "=", "_connect", "(", "host", ",", "port", ",", "database", ",", "password", ")", "return", "server", ".", "hmget", "(", "key", ",", "*", "fields", ")" ]
26.333333
19