code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def n_exec_stmt(self, node): self.write(self.indent, 'exec ') self.preorder(node[1]) if len(node) > 2: self.write(self.indent, ' in ') self.preorder(node[3]) if len(node) > 5: self.write(self.indent, ', ') self.preorder(node[5]) self.println() self.prune()
exec_stmt ::= EXEC expr exec_stmt ::= EXEC expr IN test exec_stmt ::= EXEC expr IN test COMMA test
def songname(song_name): ''' Improves file name by removing crap words ''' try: song_name = splitext(song_name)[0] except IndexError: pass # Words to omit from song title for better results through spotify's API chars_filter = "()[]{}-:_/=+\"\'" words_filter = ('official', 'lyrics', 'audio', 'remixed', 'remix', 'video', 'full', 'version', 'music', 'mp3', 'hd', 'hq', 'uploaded') # Replace characters to filter with spaces song_name = ''.join(map(lambda c: " " if c in chars_filter else c, song_name)) # Remove crap words song_name = re.sub('|'.join(re.escape(key) for key in words_filter), "", song_name, flags=re.IGNORECASE) # Remove duplicate spaces song_name = re.sub(' +', ' ', song_name) return song_name.strip(f songname(song_name): ''' Improves file name by removing crap words ''' try: song_name = splitext(song_name)[0] except IndexError: pass # Words to omit from song title for better results through spotify's API chars_filter = "()[]{}-:_/=+\"\'" words_filter = ('official', 'lyrics', 'audio', 'remixed', 'remix', 'video', 'full', 'version', 'music', 'mp3', 'hd', 'hq', 'uploaded') # Replace characters to filter with spaces song_name = ''.join(map(lambda c: " " if c in chars_filter else c, song_name)) # Remove crap words song_name = re.sub('|'.join(re.escape(key) for key in words_filter), "", song_name, flags=re.IGNORECASE) # Remove duplicate spaces song_name = re.sub(' +', ' ', song_name) return song_name.strip()
Improves file name by removing crap words
def document(info=None, input=None, output=None): def wrapper(func): if info is not None: setattr(func, "_swg_info", info) if input is not None: setattr(func, "_swg_input", input) if output is not None: setattr(func, "_swg_output", output) return func return wrapper
Add extra information about request handler and its params
def thunk(coro): assert_corofunction(coro=coro) @asyncio.coroutine def wrapper(): return (yield from coro()) return wrapper
A thunk is a subroutine that is created, often automatically, to assist a call to another subroutine. Creates a thunk coroutine which returns coroutine function that accepts no arguments and when invoked it schedules the wrapper coroutine and returns the final result. See Wikipedia page for more information about Thunk subroutines: https://en.wikipedia.org/wiki/Thunk Arguments: value (coroutinefunction): wrapped coroutine function to invoke. Returns: coroutinefunction Usage:: async def task(): return 'foo' coro = paco.thunk(task) await coro() # => 'foo' await coro() # => 'foo'
def t_name(self, s): r'[A-Za-z_][A-Za-z_0-9]*' if s in RESERVED_WORDS: self.add_token(s.upper(), s) else: self.add_token('NAME', sf t_name(self, s): r'[A-Za-z_][A-Za-z_0-9]*' if s in RESERVED_WORDS: self.add_token(s.upper(), s) else: self.add_token('NAME', s)
r'[A-Za-z_][A-Za-z_0-9]*
def t_star_star(self, s): r'\*\*?' token_name = "STARSTAR" if len(s) == 2 else 'STAR' self.add_token(token_name, sf t_star_star(self, s): r'\*\*?' token_name = "STARSTAR" if len(s) == 2 else 'STAR' self.add_token(token_name, s)
r'\*\*?
def t_whitespace_or_comment(self, s): r'([ \t]*[#].*[^\x04][\n]?)|([ \t]+)' if '#' in s: # We have a comment matches = re.match('(\s+)(.*[\n]?)', s) if matches and self.is_newline: self.handle_indent_dedent(matches.group(1)) s = matches.group(2) if s.endswith("\n"): self.add_token('COMMENT', s[:-1]) self.add_token('NEWLINE', "\n") else: self.add_token('COMMENT', s) elif self.is_newline: self.handle_indent_dedent(s) pass returf t_whitespace_or_comment(self, s): r'([ \t]*[#].*[^\x04][\n]?)|([ \t]+)' if '#' in s: # We have a comment matches = re.match('(\s+)(.*[\n]?)', s) if matches and self.is_newline: self.handle_indent_dedent(matches.group(1)) s = matches.group(2) if s.endswith("\n"): self.add_token('COMMENT', s[:-1]) self.add_token('NEWLINE', "\n") else: self.add_token('COMMENT', s) elif self.is_newline: self.handle_indent_dedent(s) pass return
r'([ \t]*[#].*[^\x04][\n]?)|([ \t]+)
def reduce(coro, iterable, initializer=None, limit=1, right=False, loop=None): assert_corofunction(coro=coro) assert_iter(iterable=iterable) # Reduced accumulator value acc = initializer # If interable is empty, just return the initializer value if len(iterable) == 0: return initializer # Create concurrent executor pool = ConcurrentExecutor(limit=limit, loop=loop) # Reducer partial function for deferred coroutine execution def reducer(element): @asyncio.coroutine def wrapper(): nonlocal acc acc = yield from coro(acc, element) return wrapper # Support right reduction if right: iterable.reverse() # Iterate and attach coroutine for defer scheduling for element in iterable: pool.add(reducer(element)) # Wait until all coroutines finish yield from pool.run(ignore_empty=True) # Returns final reduced value return acc
Apply function of two arguments cumulatively to the items of sequence, from left to right, so as to reduce the sequence to a single value. Reduction will be executed sequentially without concurrency, so passed values would be in order. This function is the asynchronous coroutine equivalent to Python standard `functools.reduce()` function. This function is a coroutine. This function can be composed in a pipeline chain with ``|`` operator. Arguments: coro (coroutine function): reducer coroutine binary function. iterable (iterable|asynchronousiterable): an iterable collection yielding coroutines functions. initializer (mixed): initial accumulator value used in the first reduction call. limit (int): max iteration concurrency limit. Use ``0`` for no limit. right (bool): reduce iterable from right to left. loop (asyncio.BaseEventLoop): optional event loop to use. Raises: TypeError: if input arguments are not valid. Returns: mixed: accumulated final reduced value. Usage:: async def reducer(acc, num): return acc + num await paco.reduce(reducer, [1, 2, 3, 4, 5], initializer=0) # => 15
def times(coro, limit=1, raise_exception=False, return_value=None): assert_corofunction(coro=coro) # Store call times limit = max(limit, 1) times = limit # Store result from last execution result = None @asyncio.coroutine def wrapper(*args, **kw): nonlocal limit nonlocal result # Check execution limit if limit == 0: if raise_exception: raise RuntimeError(ExceptionMessage.format(times)) if return_value: return return_value return result # Decreases counter limit -= 1 # If return_value is present, do not memoize result if return_value: return (yield from coro(*args, **kw)) # Schedule coroutine and memoize result result = yield from coro(*args, **kw) return result return wrapper
Wraps a given coroutine function to be executed only a certain amount of times. If the execution limit is exceeded, the last execution return value will be returned as result. You can optionally define a custom return value on exceeded via `return_value` param. This function can be used as decorator. arguments: coro (coroutinefunction): coroutine function to wrap. limit (int): max limit of coroutine executions. raise_exception (bool): raise exception if execution times exceeded. return_value (mixed): value to return when execution times exceeded. Raises: TypeError: if coro argument is not a coroutine function. RuntimeError: if max execution excedeed (optional). Returns: coroutinefunction Usage:: async def mul_2(num): return num * 2 timed = paco.times(mul_2, 3) await timed(2) # => 4 await timed(3) # => 6 await timed(4) # => 8 await timed(5) # ignored! # => 8
def uninstall(ctx, module_list): modules.uninstall(ctx, module_list) ctx.log_line(u'Deprecated: use anthem.lyrics.modules.uninstall instead of ' 'anthem.lyrics.uninstaller.uninstall')
uninstall module
def log(func=None, name=None, timing=True, timestamp=False): # support to be called as @log or as @log(name='') if func is None: return functools.partial(log, name=name, timing=timing, timestamp=timestamp) @functools.wraps(func) def decorated(*args, **kwargs): assert len(args) > 0 and hasattr(args[0], 'log'), \ "The first argument of the decorated function must be a Context" ctx = args[0] message = name if message is None: if func.__doc__: message = func.__doc__.splitlines()[0].strip() if message is None: message = func.__name__ with ctx.log(message, timing=timing, timestamp=timestamp): return func(*args, **kwargs) return decorated
Decorator to show a description of the running function By default, it outputs the first line of the docstring. If the docstring is empty, it displays the name of the function. Alternatively, if a ``name`` is specified, it will display that only. It can be called as ``@log`` or as ``@log(name='abc, timing=True, timestamp=True)``.
def constant(value, delay=None): @asyncio.coroutine def coro(): if delay: yield from asyncio.sleep(delay) return value return coro
Returns a coroutine function that when called, always returns the provided value. This function has an alias: `paco.identity`. Arguments: value (mixed): value to constantly return when coroutine is called. delay (int/float): optional return value delay in seconds. Returns: coroutinefunction Usage:: coro = paco.constant('foo') await coro() # => 'foo' await coro() # => 'foo'
def dropwhile(coro, iterable, loop=None): drop = False @asyncio.coroutine def assert_fn(element): nonlocal drop if element and not drop: return False if not element and not drop: drop = True return True if drop else element @asyncio.coroutine def filter_fn(element): return (yield from coro(element)) return (yield from filter(filter_fn, iterable, assert_fn=assert_fn, limit=1, loop=loop))
Make an iterator that drops elements from the iterable as long as the predicate is true; afterwards, returns every element. Note, the iterator does not produce any output until the predicate first becomes false, so it may have a lengthy start-up time. This function is pretty much equivalent to Python standard `itertools.dropwhile()`, but designed to be used with async coroutines. This function is a coroutine. This function can be composed in a pipeline chain with ``|`` operator. Arguments: coro (coroutine function): coroutine function to call with values to reduce. iterable (iterable|asynchronousiterable): an iterable collection yielding coroutines functions. loop (asyncio.BaseEventLoop): optional event loop to use. Raises: TypeError: if coro argument is not a coroutine function. Returns: filtered values (list): ordered list of resultant values. Usage:: async def filter(num): return num < 4 await paco.dropwhile(filter, [1, 2, 3, 4, 5, 1]) # => [4, 5, 1]
def add_xmlid(ctx, record, xmlid, noupdate=False): try: ref_id, __, __ = ctx.env['ir.model.data'].xmlid_lookup(xmlid) except ValueError: pass # does not exist, we'll create a new one else: return ctx.env['ir.model.data'].browse(ref_id) if '.' in xmlid: module, name = xmlid.split('.') else: module = '' name = xmlid return ctx.env['ir.model.data'].create({ 'name': name, 'module': module, 'model': record._name, 'res_id': record.id, 'noupdate': noupdate, })
Add a XMLID on an existing record
def create_or_update(ctx, model, xmlid, values): if isinstance(model, basestring): model = ctx.env[model] record = ctx.env.ref(xmlid, raise_if_not_found=False) if record: record.update(values) else: record = model.create(values) add_xmlid(ctx, record, xmlid) return record
Create or update a record matching xmlid with values
def safe_record(ctx, item): if isinstance(item, basestring): return ctx.env.ref(item) return item
Make sure we get a record instance even if we pass an xmlid.
def switch_company(ctx, company): current_company = ctx.env.user.company_id ctx.env.user.company_id = safe_record(ctx, company) yield ctx ctx.env.user.company_id = current_company
Context manager to switch current company. Accepts both company record and xmlid.
def apply(coro, *args, **kw): assert_corofunction(coro=coro) @asyncio.coroutine def wrapper(*_args, **_kw): # Explicitely ignore wrapper arguments return (yield from coro(*args, **kw)) return wrapper
Creates a continuation coroutine function with some arguments already applied. Useful as a shorthand when combined with other control flow functions. Any arguments passed to the returned function are added to the arguments originally passed to apply. This is similar to `paco.partial()`. This function can be used as decorator. arguments: coro (coroutinefunction): coroutine function to wrap. *args (mixed): mixed variadic arguments for partial application. *kwargs (mixed): mixed variadic keyword arguments for partial application. Raises: TypeError: if coro argument is not a coroutine function. Returns: coroutinefunction: wrapped coroutine function. Usage:: async def hello(name, mark='!'): print('Hello, {name}{mark}'.format(name=name, mark=mark)) hello_mike = paco.apply(hello, 'Mike') await hello_mike() # => Hello, Mike! hello_mike = paco.apply(hello, 'Mike', mark='?') await hello_mike() # => Hello, Mike?
def run(coro, loop=None): loop = loop or asyncio.get_event_loop() return loop.run_until_complete(coro)
Convenient shortcut alias to ``loop.run_until_complete``. Arguments: coro (coroutine): coroutine object to schedule. loop (asyncio.BaseEventLoop): optional event loop to use. Defaults to: ``asyncio.get_event_loop()``. Returns: mixed: returned value by coroutine. Usage:: async def mul_2(num): return num * 2 paco.run(mul_2(4)) # => 8
def postorder(self, node=None): if node is None: node = self.ast try: first = iter(node) except TypeError: first = None if first: for kid in node: self.postorder(kid) try: name = 'n_' + self.typestring(node) if hasattr(self, name): func = getattr(self, name) func(node) else: self.default(node) except GenericASTTraversalPruningException: return name = name + '_exit' if hasattr(self, name): func = getattr(self, name) func(node)
Walk the tree in roughly 'postorder' (a bit of a lie explained below). For each node with typestring name *name* if the node has a method called n_*name*, call that before walking children. If there is no method define, call a self.default(node) instead. Subclasses of GenericASTTtraversal ill probably want to override this method. If the node has a method called *name*_exit, that is called after all children have been called. So in this sense this function is a lie. In typical use a node with children can call "postorder" in any order it wants which may skip children or order then in ways other than first to last. In fact, this this happens.
def p_expr_add_term(self, args): ' expr ::= expr ADD_OP term ' op = 'add' if args[1].attr == '+' else 'subtract' return AST(op, [args[0], args[2]]f p_expr_add_term(self, args): ' expr ::= expr ADD_OP term ' op = 'add' if args[1].attr == '+' else 'subtract' return AST(op, [args[0], args[2]])
expr ::= expr ADD_OP term
def p_term_mult_factor(self, args): ' term ::= term MULT_OP factor ' op = 'multiply' if args[1].attr == '*' else 'divide' return AST(op, [args[0], args[2]]f p_term_mult_factor(self, args): ' term ::= term MULT_OP factor ' op = 'multiply' if args[1].attr == '*' else 'divide' return AST(op, [args[0], args[2]])
term ::= term MULT_OP factor
def addAttribute(self, attrib: Attribute): self._attributes[attrib.key()] = attrib req = attrib.ledgerRequest() if req: self.pendRequest(req, attrib.key()) return len(self._pending)
Used to create a new attribute on Sovrin :param attrib: attribute to add :return: number of pending txns
def addNode(self, node: Node): self._nodes[node.id] = node req = node.ledgerRequest() if req: self.pendRequest(req, node.id) return len(self._pending)
Used to add a new node on Sovrin :param node: Node :return: number of pending txns
def doPoolUpgrade(self, upgrade: Upgrade): key = upgrade.key self._upgrades[key] = upgrade req = upgrade.ledgerRequest() if req: self.pendRequest(req, key) return len(self._pending)
Used to send a new code upgrade :param upgrade: upgrade data :return: number of pending txns
def handleIncomingReply(self, observer_name, reqId, frm, result, numReplies): preparedReq = self._prepared.get((result[IDENTIFIER], reqId)) if not preparedReq: raise RuntimeError('no matching prepared value for {},{}'. format(result[IDENTIFIER], reqId)) typ = result.get(TXN_TYPE) if typ and typ in self.replyHandler: self.replyHandler[typ](result, preparedReq)
Called by an external entity, like a Client, to notify of incoming replies :return:
def requestAttribute(self, attrib: Attribute, sender): self._attributes[attrib.key()] = attrib req = attrib.getRequest(sender) if req: return self.prepReq(req, key=attrib.key())
Used to get a raw attribute from Sovrin :param attrib: attribute to add :return: number of pending txns
def requestSchema(self, nym, name, version, sender): operation = { TARGET_NYM: nym, TXN_TYPE: GET_SCHEMA, DATA: {NAME : name, VERSION: version} } req = Request(sender, operation=operation) return self.prepReq(req)
Used to get a schema from Sovrin :param nym: nym that schema is attached to :param name: name of schema :param version: version of schema :return: req object
def requestClaimDef(self, seqNo, signature, sender): operation = { TXN_TYPE: GET_CLAIM_DEF, ORIGIN: sender, REF : seqNo, SIGNATURE_TYPE : signature } req = Request(sender, operation=operation) return self.prepReq(req)
Used to get a claim def from Sovrin :param seqNo: reference number of schema :param signature: CL is only supported option currently :return: req object
def t_file_or_func(self, s): r'(?:[^*-+,\d\'"\t \n:][^\'"\t \n:,]*)|(?:^)|(?:\'\'\'.+\'\'\')' maybe_funcname = True if s == 'if': self.add_token('IF', s) return if s[0] in frozenset(('"', "'")): # Pick out text inside of triple-quoted string if ( (s.startswith("'''") and s.endswith("'''") ) or (s.startswith('"""') and s.endswith('"""') ) ): base = s[3:-3] else: # Pick out text inside singly-quote string base = s[1:-1] maybe_funcname = False else: base = s pos = self.pos if maybe_funcname and re.match('[a-zA-Z_][[a-zA-Z_.0-9\[\]]+\(\)', s): self.add_token('FUNCNAME', base) else: self.add_token('FILENAME', base) self.pos = pos + len(s)
r'(?:[^*-+,\d\'"\t \n:][^\'"\t \n:,]*)|(?:^""".+""")|(?:\'\'\'.+\'\'\')
def series(*coros_or_futures, timeout=None, loop=None, return_exceptions=False): return (yield from gather(*coros_or_futures, loop=loop, limit=1, timeout=timeout, return_exceptions=return_exceptions))
Run the given coroutine functions in series, each one running once the previous execution has completed. If any coroutines raises an exception, no more coroutines are executed. Otherwise, the coroutines returned values will be returned as `list`. ``timeout`` can be used to control the maximum number of seconds to wait before returning. timeout can be an int or float. If timeout is not specified or None, there is no limit to the wait time. If ``return_exceptions`` is True, exceptions in the tasks are treated the same as successful results, and gathered in the result list; otherwise, the first raised exception will be immediately propagated to the returned future. All futures must share the same event loop. This functions is basically the sequential execution version of ``asyncio.gather()``. Interface compatible with ``asyncio.gather()``. This function is a coroutine. Arguments: *coros_or_futures (iter|list): an iterable collection yielding coroutines functions. timeout (int/float): maximum number of seconds to wait before returning. return_exceptions (bool): exceptions in the tasks are treated the same as successful results, instead of raising them. loop (asyncio.BaseEventLoop): optional event loop to use. *args (mixed): optional variadic argument to pass to the coroutines function. Returns: list: coroutines returned results. Raises: TypeError: in case of invalid coroutine object. ValueError: in case of empty set of coroutines or futures. TimeoutError: if execution takes more than expected. Usage:: async def sum(x, y): return x + y await paco.series( sum(1, 2), sum(2, 3), sum(3, 4)) # => [3, 5, 7]
def repeat(coro, times=1, step=1, limit=1, loop=None): assert_corofunction(coro=coro) # Iterate and attach coroutine for defer scheduling times = max(int(times), 1) iterable = range(1, times + 1, step) # Run iterable times return (yield from map(coro, iterable, limit=limit, loop=loop))
Executes the coroutine function ``x`` number of times, and accumulates results in order as you would use with ``map``. Execution concurrency is configurable using ``limit`` param. This function is a coroutine. Arguments: coro (coroutinefunction): coroutine function to schedule. times (int): number of times to execute the coroutine. step (int): increment iteration step, as with ``range()``. limit (int): concurrency execution limit. Defaults to 10. loop (asyncio.BaseEventLoop): optional event loop to use. Raises: TypeError: if coro is not a coroutine function. Returns: list: accumulated yielded values returned by coroutine. Usage:: async def mul_2(num): return num * 2 await paco.repeat(mul_2, times=5) # => [2, 4, 6, 8, 10]
def once(coro, raise_exception=False, return_value=None): return times(coro, limit=1, return_value=return_value, raise_exception=raise_exception)
Wrap a given coroutine function that is restricted to one execution. Repeated calls to the coroutine function will return the value of the first invocation. This function can be used as decorator. arguments: coro (coroutinefunction): coroutine function to wrap. raise_exception (bool): raise exception if execution times exceeded. return_value (mixed): value to return when execution times exceeded, instead of the memoized one from last invocation. Raises: TypeError: if coro argument is not a coroutine function. Returns: coroutinefunction Usage:: async def mul_2(num): return num * 2 once = paco.once(mul_2) await once(2) # => 4 await once(3) # => 4 once = paco.once(mul_2, return_value='exceeded') await once(2) # => 4 await once(3) # => 'exceeded'
def defer(coro, delay=1): assert_corofunction(coro=coro) @asyncio.coroutine def wrapper(*args, **kw): # Wait until we're done yield from asyncio.sleep(delay) return (yield from coro(*args, **kw)) return wrapper
Returns a coroutine function wrapper that will defer the given coroutine execution for a certain amount of seconds in a non-blocking way. This function can be used as decorator. Arguments: coro (coroutinefunction): coroutine function to defer. delay (int/float): number of seconds to defer execution. Raises: TypeError: if coro argument is not a coroutine function. Returns: filtered values (list): ordered list of resultant values. Usage:: # Usage as function await paco.defer(coro, delay=1) await paco.defer(coro, delay=0.5) # Usage as decorator @paco.defer(delay=1) async def mul_2(num): return num * 2 await mul_2(2) # => 4
def safe_run(coro, return_exceptions=False): try: result = yield from coro except Exception as err: if return_exceptions: result = err else: raise err return result
Executes a given coroutine and optionally catches exceptions, returning them as value. This function is intended to be used internally.
def collect(coro, index, results, preserve_order=False, return_exceptions=False): result = yield from safe_run(coro, return_exceptions=return_exceptions) if preserve_order: results[index] = result else: results.append(result)
Collect is used internally to execute coroutines and collect the returned value. This function is intended to be used internally.
def reset(self): if self.running: raise RuntimeError('paco: executor is still running') self.pool.clear() self.observer.clear() self.semaphore = asyncio.Semaphore(self.limit, loop=self.loop)
Resets the executer scheduler internal state. Raises: RuntimeError: is the executor is still running.
def add(self, coro, *args, **kw): # Create coroutine object if a function is provided if asyncio.iscoroutinefunction(coro): coro = coro(*args, **kw) # Verify coroutine if not asyncio.iscoroutine(coro): raise TypeError('paco: coro must be a coroutine object') # Store coroutine with arguments for deferred execution index = max(len(self.pool), 0) task = Task(index, coro) # Append the coroutine data to the pool self.pool.append(task) return coro
Adds a new coroutine function with optional variadic argumetns. Arguments: coro (coroutine function): coroutine to execute. *args (mixed): optional variadic arguments Raises: TypeError: if the coro object is not a valid coroutine Returns: future: coroutine wrapped future
def next_interval(self, interval): index = np.where(self.intervals == interval) if index[0][0] + 1 < len(self.intervals): return self.intervals[index[0][0] + 1] else: raise IndexError("Ran out of intervals!")
Given a value of an interval, this function returns the next interval value
def nearest_interval(self, interval): thresh_range = 25 # in cents if interval < self.intervals[0] - thresh_range or interval > self.intervals[-1] + thresh_range: raise IndexError("The interval given is beyond " + str(thresh_range) + " cents over the range of intervals defined.") index = find_nearest_index(self.intervals, interval) return self.intervals[index]
This function returns the nearest interval to any given interval.
def brent_optimise(node1, node2, min_brlen=0.001, max_brlen=10, verbose=False): from scipy.optimize import minimize_scalar wrapper = BranchLengthOptimiser(node1, node2, (min_brlen + max_brlen) / 2.) n = minimize_scalar(lambda x: -wrapper(x)[0], method='brent', bracket=(min_brlen, max_brlen))['x'] if verbose: logger.info(wrapper) if n < min_brlen: n = min_brlen wrapper(n) return n, -1 / wrapper.get_d2lnl(n)
Optimise ML distance between two partials. min and max set brackets
def pairdists(alignment, subs_model, alpha=None, ncat=4, tolerance=1e-6, verbose=False): # Check if not isinstance(subs_model, phylo_utils.models.Model): raise ValueError("Can't handle this model: {}".format(model)) if alpha is None: alpha = 1.0 ncat = 1 # Set up markov model tm = TransitionMatrix(subs_model) gamma_rates = discrete_gamma(alpha, ncat) partials = alignment_to_partials(alignment) seqnames = alignment.get_names() nseq = len(seqnames) distances = np.zeros((nseq, nseq)) variances = np.zeros((nseq, nseq)) # Check the model has the appropriate size if not subs_model.size == partials[seqnames[0]].shape[1]: raise ValueError("Model {} expects {} states, but the alignment has {}".format(model.name, model.size, partials[seqnames[0]].shape[1])) nodes = [phylo_utils.likelihood.LnlModel(tm) for seq in range(nseq)] for node, header in zip(nodes, seqnames): node.set_partials(partials[header]) # retrieve partial likelihoods from partials dictionary for i, j in itertools.combinations(range(nseq), 2): brlen, var = brent_optimise(nodes[i], nodes[j], verbose=verbose) distances[i, j] = distances[j, i] = brlen variances[i, j] = variances[j, i] = var dm = DistanceMatrix.from_array(distances, names=seqnames) vm = DistanceMatrix.from_array(variances, names=seqnames) return dm, vm
Load an alignment, calculate all pairwise distances and variances model parameter must be a Substitution model type from phylo_utils
def write_alignment(self, filename, file_format, interleaved=None): if file_format == 'phylip': file_format = 'phylip-relaxed' AlignIO.write(self._msa, filename, file_format)
Write the alignment to file using Bio.AlignIO
def compute_distances(self, model, alpha=None, ncat=4, tolerance=1e-6): return pairdists(self, model, alpha, ncat, tolerance)
Compute pairwise distances between all sequences according to a given substitution model, `model`, of type phylo_utils.models.Model (e.g. phylo_utils.models.WAG(freqs), phylo_utils.models.GTR(rates, freqs)) The number of gamma categories is controlled by `ncat`. Setting ncat=1 disable gamma rate variation. The gamma alpha parameter must be supplied to enable gamma rate variation.
def simulate(self, nsites, transition_matrix, tree, ncat=1, alpha=1): sim = SequenceSimulator(transition_matrix, tree, ncat, alpha) return list(sim.simulate(nsites).items())
Return sequences simulated under the transition matrix's model
def bootstrap(self): new_sites = sorted(sample_wr(self.get_sites())) seqs = list(zip(self.get_names(), (''.join(seq) for seq in zip(*new_sites)))) return self.__class__(seqs)
Return a new Alignment that is a bootstrap replicate of self
def simulate(self, n): self.tree._tree.seed_node.states = self.ancestral_states(n) categories = np.random.randint(self.ncat, size=n).astype(np.intc) for node in self.tree.preorder(skip_seed=True): node.states = self.evolve_states(node.parent_node.states, categories, node.pmats) if node.is_leaf(): self.sequences[node.taxon.label] = node.states return self.sequences_to_string()
Evolve multiple sites during one tree traversal
def ancestral_states(self, n): anc = np.empty(n, dtype=np.intc) _weighted_choices(self.state_indices, self.freqs, anc) return anc
Generate ancestral sequence states from the equilibrium frequencies
def evolve_states(self, parent_states, categories, probs): child_states = np.empty(parent_states.shape, dtype=np.intc) _evolve_states(self.state_indices, parent_states, categories, probs, child_states) return child_states
Evolve states from parent to child. States are sampled from gamma categories passed in the array 'categories'. The branch length information is encoded in the probability matrix, 'probs', generated in __init__.
def sequences_to_string(self): return {k: ''.join(self.states[v]) for (k, v) in self.sequences.items()}
Convert state indices to a string of characters
def crc_srec(hexstr): crc = sum(bytearray(binascii.unhexlify(hexstr))) crc &= 0xff crc ^= 0xff return crc
Calculate the CRC for given Motorola S-Record hexstring.
def crc_ihex(hexstr): crc = sum(bytearray(binascii.unhexlify(hexstr))) crc &= 0xff crc = ((~crc + 1) & 0xff) return crc
Calculate the CRC for given Intel HEX hexstring.
def pack_srec(type_, address, size, data): if type_ in '0159': line = '{:02X}{:04X}'.format(size + 2 + 1, address) elif type_ in '268': line = '{:02X}{:06X}'.format(size + 3 + 1, address) elif type_ in '37': line = '{:02X}{:08X}'.format(size + 4 + 1, address) else: raise Error( "expected record type 0..3 or 5..9, but got '{}'".format(type_)) if data: line += binascii.hexlify(data).decode('ascii').upper() return 'S{}{}{:02X}'.format(type_, line, crc_srec(line))
Create a Motorola S-Record record of given data.
def unpack_srec(record): # Minimum STSSCC, where T is type, SS is size and CC is crc. if len(record) < 6: raise Error("record '{}' too short".format(record)) if record[0] != 'S': raise Error( "record '{}' not starting with an 'S'".format(record)) size = int(record[2:4], 16) type_ = record[1:2] if type_ in '0159': width = 4 elif type_ in '268': width = 6 elif type_ in '37': width = 8 else: raise Error( "expected record type 0..3 or 5..9, but got '{}'".format(type_)) data_offset = (4 + width) crc_offset = (4 + 2 * size - 2) address = int(record[4:data_offset], 16) data = binascii.unhexlify(record[data_offset:crc_offset]) actual_crc = int(record[crc_offset:], 16) expected_crc = crc_srec(record[2:crc_offset]) if actual_crc != expected_crc: raise Error( "expected crc '{:02X}' in record {}, but got '{:02X}'".format( expected_crc, record, actual_crc)) return (type_, address, size - 1 - width // 2, data)
Unpack given Motorola S-Record record into variables.
def pack_ihex(type_, address, size, data): line = '{:02X}{:04X}{:02X}'.format(size, address, type_) if data: line += binascii.hexlify(data).decode('ascii').upper() return ':{}{:02X}'.format(line, crc_ihex(line))
Create a Intel HEX record of given data.
def unpack_ihex(record): # Minimum :SSAAAATTCC, where SS is size, AAAA is address, TT is # type and CC is crc. if len(record) < 11: raise Error("record '{}' too short".format(record)) if record[0] != ':': raise Error("record '{}' not starting with a ':'".format(record)) size = int(record[1:3], 16) address = int(record[3:7], 16) type_ = int(record[7:9], 16) if size > 0: data = binascii.unhexlify(record[9:9 + 2 * size]) else: data = b'' actual_crc = int(record[9 + 2 * size:], 16) expected_crc = crc_ihex(record[1:9 + 2 * size]) if actual_crc != expected_crc: raise Error( "expected crc '{:02X}' in record {}, but got '{:02X}'".format( expected_crc, record, actual_crc)) return (type_, address, size, data)
Unpack given Intel HEX record into variables.
def chunks(self, size=32, alignment=1): if (size % alignment) != 0: raise Error( 'size {} is not a multiple of alignment {}'.format( size, alignment)) address = self.address data = self.data # First chunk may be shorter than `size` due to alignment. chunk_offset = (address % alignment) if chunk_offset != 0: first_chunk_size = (alignment - chunk_offset) yield self._Chunk(address, data[:first_chunk_size]) address += (first_chunk_size // self._word_size_bytes) data = data[first_chunk_size:] else: first_chunk_size = 0 for offset in range(0, len(data), size): yield self._Chunk(address + offset // self._word_size_bytes, data[offset:offset + size])
Return chunks of the data aligned as given by `alignment`. `size` must be a multiple of `alignment`. Each chunk is returned as a named two-tuple of its address and data.
def add_data(self, minimum_address, maximum_address, data, overwrite): if minimum_address == self.maximum_address: self.maximum_address = maximum_address self.data += data elif maximum_address == self.minimum_address: self.minimum_address = minimum_address self.data = data + self.data elif (overwrite and minimum_address < self.maximum_address and maximum_address > self.minimum_address): self_data_offset = minimum_address - self.minimum_address # Prepend data. if self_data_offset < 0: self_data_offset *= -1 self.data = data[:self_data_offset] + self.data del data[:self_data_offset] self.minimum_address = minimum_address # Overwrite overlapping part. self_data_left = len(self.data) - self_data_offset if len(data) <= self_data_left: self.data[self_data_offset:self_data_offset + len(data)] = data data = bytearray() else: self.data[self_data_offset:] = data[:self_data_left] data = data[self_data_left:] # Append data. if len(data) > 0: self.data += data self.maximum_address = maximum_address else: raise AddDataError( 'data added to a segment must be adjacent to or overlapping ' 'with the original segment data')
Add given data to this segment. The added data must be adjacent to the current segment data, otherwise an exception is thrown.
def remove_data(self, minimum_address, maximum_address): if ((minimum_address >= self.maximum_address) and (maximum_address <= self.minimum_address)): raise Error('cannot remove data that is not part of the segment') if minimum_address < self.minimum_address: minimum_address = self.minimum_address if maximum_address > self.maximum_address: maximum_address = self.maximum_address remove_size = maximum_address - minimum_address part1_size = minimum_address - self.minimum_address part1_data = self.data[0:part1_size] part2_data = self.data[part1_size + remove_size:] if len(part1_data) and len(part2_data): # Update this segment and return the second segment. self.maximum_address = self.minimum_address + part1_size self.data = part1_data return _Segment(maximum_address, maximum_address + len(part2_data), part2_data, self._word_size_bytes) else: # Update this segment. if len(part1_data) > 0: self.maximum_address = minimum_address self.data = part1_data elif len(part2_data) > 0: self.minimum_address = maximum_address self.data = part2_data else: self.maximum_address = self.minimum_address self.data = bytearray()
Remove given data range from this segment. Returns the second segment if the removed data splits this segment in two.
def add(self, segment, overwrite=False): if self._list: if segment.minimum_address == self._current_segment.maximum_address: # Fast insertion for adjacent segments. self._current_segment.add_data(segment.minimum_address, segment.maximum_address, segment.data, overwrite) else: # Linear insert. for i, s in enumerate(self._list): if segment.minimum_address <= s.maximum_address: break if segment.minimum_address > s.maximum_address: # Non-overlapping, non-adjacent after. self._list.append(segment) elif segment.maximum_address < s.minimum_address: # Non-overlapping, non-adjacent before. self._list.insert(i, segment) else: # Adjacent or overlapping. s.add_data(segment.minimum_address, segment.maximum_address, segment.data, overwrite) segment = s self._current_segment = segment self._current_segment_index = i # Remove overwritten and merge adjacent segments. while self._current_segment is not self._list[-1]: s = self._list[self._current_segment_index + 1] if self._current_segment.maximum_address >= s.maximum_address: # The whole segment is overwritten. del self._list[self._current_segment_index + 1] elif self._current_segment.maximum_address >= s.minimum_address: # Adjacent or beginning of the segment overwritten. self._current_segment.add_data( self._current_segment.maximum_address, s.maximum_address, s.data[self._current_segment.maximum_address - s.minimum_address:], overwrite=False) del self._list[self._current_segment_index+1] break else: # Segments are not overlapping, nor adjacent. break else: self._list.append(segment) self._current_segment = segment self._current_segment_index = 0
Add segments by ascending address.
def chunks(self, size=32, alignment=1): if (size % alignment) != 0: raise Error( 'size {} is not a multiple of alignment {}'.format( size, alignment)) for segment in self: for chunk in segment.chunks(size, alignment): yield chunk
Iterate over all segments and return chunks of the data aligned as given by `alignment`. `size` must be a multiple of `alignment`. Each chunk is returned as a named two-tuple of its address and data.
def minimum_address(self): minimum_address = self._segments.minimum_address if minimum_address is not None: minimum_address //= self.word_size_bytes return minimum_address
The minimum address of the data, or ``None`` if the file is empty.
def maximum_address(self): maximum_address = self._segments.maximum_address if maximum_address is not None: maximum_address //= self.word_size_bytes return maximum_address
The maximum address of the data, or ``None`` if the file is empty.
def header(self): if self._header_encoding is None: return self._header else: return self._header.decode(self._header_encoding)
The binary file header, or ``None`` if missing. See :class:`BinFile's<.BinFile>` `header_encoding` argument for encoding options.
def add(self, data, overwrite=False): if is_srec(data): self.add_srec(data, overwrite) elif is_ihex(data): self.add_ihex(data, overwrite) elif is_ti_txt(data): self.add_ti_txt(data, overwrite) else: raise UnsupportedFileFormatError()
Add given data string by guessing its format. The format must be Motorola S-Records, Intel HEX or TI-TXT. Set `overwrite` to ``True`` to allow already added data to be overwritten.
def add_srec(self, records, overwrite=False): for record in StringIO(records): type_, address, size, data = unpack_srec(record.strip()) if type_ == '0': self._header = data elif type_ in '123': address *= self.word_size_bytes self._segments.add(_Segment(address, address + size, bytearray(data), self.word_size_bytes), overwrite) elif type_ in '789': self.execution_start_address = address
Add given Motorola S-Records string. Set `overwrite` to ``True`` to allow already added data to be overwritten.
def add_ihex(self, records, overwrite=False): extended_segment_address = 0 extended_linear_address = 0 for record in StringIO(records): type_, address, size, data = unpack_ihex(record.strip()) if type_ == IHEX_DATA: address = (address + extended_segment_address + extended_linear_address) address *= self.word_size_bytes self._segments.add(_Segment(address, address + size, bytearray(data), self.word_size_bytes), overwrite) elif type_ == IHEX_END_OF_FILE: pass elif type_ == IHEX_EXTENDED_SEGMENT_ADDRESS: extended_segment_address = int(binascii.hexlify(data), 16) extended_segment_address *= 16 elif type_ == IHEX_EXTENDED_LINEAR_ADDRESS: extended_linear_address = int(binascii.hexlify(data), 16) extended_linear_address <<= 16 elif type_ in [IHEX_START_SEGMENT_ADDRESS, IHEX_START_LINEAR_ADDRESS]: self.execution_start_address = int(binascii.hexlify(data), 16) else: raise Error("expected type 1..5 in record {}, but got {}".format( record, type_))
Add given Intel HEX records string. Set `overwrite` to ``True`` to allow already added data to be overwritten.
def add_ti_txt(self, lines, overwrite=False): address = None eof_found = False for line in StringIO(lines): # Abort if data is found after end of file. if eof_found: raise Error("bad file terminator") line = line.strip() if len(line) < 1: raise Error("bad line length") if line[0] == 'q': eof_found = True elif line[0] == '@': try: address = int(line[1:], 16) except ValueError: raise Error("bad section address") else: # Try to decode the data. try: data = bytearray(binascii.unhexlify(line.replace(' ', ''))) except (TypeError, binascii.Error): raise Error("bad data") size = len(data) # Check that there are correct number of bytes per # line. There should TI_TXT_BYTES_PER_LINE. Only # exception is last line of section which may be # shorter. if size > TI_TXT_BYTES_PER_LINE: raise Error("bad line length") if address is None: raise Error("missing section address") self._segments.add(_Segment(address, address + size, data, self.word_size_bytes), overwrite) if size == TI_TXT_BYTES_PER_LINE: address += size else: address = None if not eof_found: raise Error("missing file terminator")
Add given TI-TXT string `lines`. Set `overwrite` to ``True`` to allow already added data to be overwritten.
def add_binary(self, data, address=0, overwrite=False): address *= self.word_size_bytes self._segments.add(_Segment(address, address + len(data), bytearray(data), self.word_size_bytes), overwrite)
Add given data at given address. Set `overwrite` to ``True`` to allow already added data to be overwritten.
def add_file(self, filename, overwrite=False): with open(filename, 'r') as fin: self.add(fin.read(), overwrite)
Open given file and add its data by guessing its format. The format must be Motorola S-Records, Intel HEX or TI-TXT. Set `overwrite` to ``True`` to allow already added data to be overwritten.
def add_srec_file(self, filename, overwrite=False): with open(filename, 'r') as fin: self.add_srec(fin.read(), overwrite)
Open given Motorola S-Records file and add its records. Set `overwrite` to ``True`` to allow already added data to be overwritten.
def add_ihex_file(self, filename, overwrite=False): with open(filename, 'r') as fin: self.add_ihex(fin.read(), overwrite)
Open given Intel HEX file and add its records. Set `overwrite` to ``True`` to allow already added data to be overwritten.
def add_ti_txt_file(self, filename, overwrite=False): with open(filename, 'r') as fin: self.add_ti_txt(fin.read(), overwrite)
Open given TI-TXT file and add its contents. Set `overwrite` to ``True`` to allow already added data to be overwritten.
def add_binary_file(self, filename, address=0, overwrite=False): with open(filename, 'rb') as fin: self.add_binary(fin.read(), address, overwrite)
Open given binary file and add its contents. Set `overwrite` to ``True`` to allow already added data to be overwritten.
def as_ti_txt(self): lines = [] for segment in self._segments: lines.append('@{:04X}'.format(segment.address)) for _, data in segment.chunks(TI_TXT_BYTES_PER_LINE): lines.append(' '.join('{:02X}'.format(byte) for byte in data)) lines.append('q') return '\n'.join(lines) + '\n'
Format the binary file as a TI-TXT file and return it as a string. >>> print(binfile.as_ti_txt()) @0100 21 46 01 36 01 21 47 01 36 00 7E FE 09 D2 19 01 21 46 01 7E 17 C2 00 01 FF 5F 16 00 21 48 01 19 19 4E 79 23 46 23 96 57 78 23 9E DA 3F 01 B2 CA 3F 01 56 70 2B 5E 71 2B 72 2B 73 21 46 01 34 21 q
def as_array(self, minimum_address=None, padding=None, separator=', '): binary_data = self.as_binary(minimum_address, padding=padding) words = [] for offset in range(0, len(binary_data), self.word_size_bytes): word = 0 for byte in binary_data[offset:offset + self.word_size_bytes]: word <<= 8 word += byte words.append('0x{:02x}'.format(word)) return separator.join(words)
Format the binary file as a string values separated by given separator `separator`. This function can be used to generate array initialization code for C and other languages. `minimum_address` is the start address of the resulting binary data. `padding` is the value of the padding between not adjacent segments. >>> binfile.as_array() '0x21, 0x46, 0x01, 0x36, 0x01, 0x21, 0x47, 0x01, 0x36, 0x00, 0x7e, 0xfe, 0x09, 0xd2, 0x19, 0x01, 0x21, 0x46, 0x01, 0x7e, 0x17, 0xc2, 0x00, 0x01, 0xff, 0x5f, 0x16, 0x00, 0x21, 0x48, 0x01, 0x19, 0x19, 0x4e, 0x79, 0x23, 0x46, 0x23, 0x96, 0x57, 0x78, 0x23, 0x9e, 0xda, 0x3f, 0x01, 0xb2, 0xca, 0x3f, 0x01, 0x56, 0x70, 0x2b, 0x5e, 0x71, 0x2b, 0x72, 0x2b, 0x73, 0x21, 0x46, 0x01, 0x34, 0x21'
def fill(self, value=b'\xff'): previous_segment_maximum_address = None fill_segments = [] for address, data in self._segments: maximum_address = address + len(data) if previous_segment_maximum_address is not None: fill_size = address - previous_segment_maximum_address fill_size_words = fill_size // self.word_size_bytes fill_segments.append(_Segment( previous_segment_maximum_address, previous_segment_maximum_address + fill_size, value * fill_size_words, self.word_size_bytes)) previous_segment_maximum_address = maximum_address for segment in fill_segments: self._segments.add(segment)
Fill all empty space between segments with given value `value`.
def exclude(self, minimum_address, maximum_address): if maximum_address < minimum_address: raise Error('bad address range') minimum_address *= self.word_size_bytes maximum_address *= self.word_size_bytes self._segments.remove(minimum_address, maximum_address)
Exclude given range and keep the rest. `minimum_address` is the first word address to exclude (including). `maximum_address` is the last word address to exclude (excluding).
def crop(self, minimum_address, maximum_address): minimum_address *= self.word_size_bytes maximum_address *= self.word_size_bytes maximum_address_address = self._segments.maximum_address self._segments.remove(0, minimum_address) self._segments.remove(maximum_address, maximum_address_address)
Keep given range and discard the rest. `minimum_address` is the first word address to keep (including). `maximum_address` is the last word address to keep (excluding).
def info(self): info = '' if self._header is not None: if self._header_encoding is None: header = '' for b in bytearray(self.header): if chr(b) in string.printable: header += chr(b) else: header += '\\x{:02x}'.format(b) else: header = self.header info += 'Header: "{}"\n'.format(header) if self.execution_start_address is not None: info += 'Execution start address: 0x{:08x}\n'.format( self.execution_start_address) info += 'Data ranges:\n\n' for address, data in self._segments: minimum_address = address size = len(data) maximum_address = (minimum_address + size // self.word_size_bytes) info += 4 * ' ' info += '0x{:08x} - 0x{:08x} ({})\n'.format( minimum_address, maximum_address, format_size(size, binary=True)) return info
Return a string of human readable information about the binary file. .. code-block:: python >>> print(binfile.info()) Data ranges: 0x00000100 - 0x00000140 (64 bytes)
def _precompute(self, tree): d = {} for n in tree.preorder_internal_node_iter(): d[n] = namedtuple('NodeDist', ['dist_from_root', 'edges_from_root']) if n.parent_node: d[n].dist_from_root = d[n.parent_node].dist_from_root + n.edge_length d[n].edges_from_root = d[n.parent_node].edges_from_root + 1 else: d[n].dist_from_root = 0.0 d[n].edges_from_root = 0 return d
Collect metric info in a single preorder traversal.
def _get_vectors(self, tree, precomputed_info): little_m = [] big_m = [] leaf_nodes = sorted(tree.leaf_nodes(), key=lambda x: x.taxon.label) # inner nodes, sorted order for leaf_a, leaf_b in combinations(leaf_nodes, 2): mrca = tree.mrca(taxa=[leaf_a.taxon, leaf_b.taxon]) little_m.append(precomputed_info[mrca].edges_from_root) big_m.append(precomputed_info[mrca].dist_from_root) # leaf nodes, sorted order for leaf in leaf_nodes: little_m.append(1) big_m.append(leaf.edge_length) return np.array(little_m), np.array(big_m)
Populate the vectors m and M.
def get_distance(self, other, lbda=0.5, min_overlap=4): if self.tree ^ other.tree: if len(self.tree & other.tree) < min_overlap: return 0 # raise AttributeError('Can\'t calculate tree distances when tree overlap is less than two leaves') else: t1, t2 = self._equalise_leaf_sets(other, False) tmp_self = KendallColijn(t1) tmp_other = KendallColijn(t2) return np.sqrt(((tmp_self.get_vector(lbda) - tmp_other.get_vector(lbda)) ** 2).sum()) else: return np.sqrt(((self.get_vector(lbda) - other.get_vector(lbda)) ** 2).sum())
Return the Euclidean distance between vectors v of two trees. Must have the same leaf set (too lazy to check).
def ambiguate(sequence1, sequence2, delete_ambiguous=False): delete = False combination = list() z = list(zip(sequence1, sequence2)) for (a, b) in z: if a == b: combination.append(a) else: if a == '-' or b == '-': combination.append('-') else: if delete_ambiguous: delete = True ambig = get_ambiguity(a, b) combination.append(ambig) if delete: return 'X' * len(combination) return ''.join(combination)
delete_ambiguous: Marks sequences for deletion by replacing all chars with 'X'. These seqs are deleted later with remove_empty
def remove_empty(rec): for header, sequence in rec.mapping.items(): if all(char == 'X' for char in sequence): rec.headers.remove(header) rec.sequences.remove(sequence) rec.update() return rec
Deletes sequences that were marked for deletion by convert_to_IUPAC
def transliterate(text): text = unidecode(six.text_type(text)) text = text.replace('@', 'a') return text
Utility to properly transliterate text.
def slugify(mapping, bind, values): for value in values: if isinstance(value, six.string_types): value = transliterate(value) value = normality.slugify(value) yield value
Transform all values into URL-capable slugs.
def latinize(mapping, bind, values): for v in values: if isinstance(v, six.string_types): v = transliterate(v) yield v
Transliterate a given string into the latin alphabet.
def join(mapping, bind, values): return [' '.join([six.text_type(v) for v in values if v is not None])]
Merge all the strings. Put space between them.
def str_func(name): def func(mapping, bind, values): for v in values: if isinstance(v, six.string_types): v = getattr(v, name)() yield v return func
Apply functions like upper(), lower() and strip().
def hash(mapping, bind, values): for v in values: if v is None: continue if not isinstance(v, six.string_types): v = six.text_type(v) yield sha1(v.encode('utf-8')).hexdigest()
Generate a sha1 for each of the given values.
def clean(mapping, bind, values): categories = {'C': ' '} for value in values: if isinstance(value, six.string_types): value = normality.normalize(value, lowercase=False, collapse=True, decompose=False, replace_categories=categories) yield value
Perform several types of string cleaning for titles etc..
def isconnected(mask): nodes_to_check = list((np.where(mask[0, :])[0])[1:]) seen = [True] + [False] * (len(mask) - 1) while nodes_to_check and not all(seen): node = nodes_to_check.pop() reachable = np.where(mask[node, :])[0] for i in reachable: if not seen[i]: nodes_to_check.append(i) seen[i] = True return all(seen)
Checks that all nodes are reachable from the first node - i.e. that the graph is fully connected.
def affinity( matrix, mask=None, scale=None, ): mask = (mask if mask is not None else np.ones(matrix.shape, dtype=bool)) assert isconnected(mask) scale = (scale if scale is not None else np.ones(matrix.shape)) ix = np.where(np.logical_not(mask)) scaled_matrix = -matrix ** 2 / scale # inputs where distance = 0 and scale = 0 result in NaN: # the next line replaces NaNs with -1.0 scaled_matrix[np.where(np.isnan(scaled_matrix))] = -1.0 affinity_matrix = np.exp(scaled_matrix) affinity_matrix[ix] = 0. # mask affinity_matrix.flat[::len(affinity_matrix) + 1] = 0. # diagonal return affinity_matrix
Mask is a 2d boolean matrix. Scale is a 2d local scale matrix, as output by kscale(). It's the outer product of the kdists column vector produced by kdists.
def double_centre(matrix, square_input=True): m = matrix.copy() if square_input: m **= 2 (rows, cols) = m.shape cm = np.mean(m, axis=0) # column means rm = np.mean(m, axis=1).reshape((rows, 1)) # row means gm = np.mean(cm) # grand mean m -= rm + cm - gm m /= -2 return m
Double-centres the input matrix: From each element: Subtract the row mean Subtract the column mean Add the grand mean Divide by -2 Method from: Torgerson, W S (1952). Multidimensional scaling: I. Theory and method. Alternatively M = -0.5 * (I - 1/n)D[^2](I - 1/n)
def _estimate_additive_constant(matrix): topleft = np.zeros(matrix.shape) topright = 2*double_centre(matrix) bottomleft = -np.eye(matrix.shape[0]) bottomright = -4*double_centre(matrix, square_input=False) Z = np.vstack([np.hstack([topleft,topright]), np.hstack([bottomleft,bottomright])]) return max(np.real(np.linalg.eigvals(Z)))
CMDS Additive Constant: correction for non-Euclidean distances. Procedure taken from R function cmdscale. The additive constant is given by the largest eigenvalue (real part) of this 2x2 block matrix - /-------+-------\ | | | | 0 | 2*dbc | NB: dbc function(m: matrix): | | (d^2) | double_centre() [see above] +-------+-------+ | | | | -I |-4*dbc | | | (2d) | \-------+-------/ corrected matrix = matrix + additive constant (diagonal kept as 0)
def check_pd(matrix): try: np.linalg.cholesky(matrix) return True except np.linalg.LinAlgError: return False
A symmetric matrix (M) is PD if it has a Cholesky decomposition, i.e. M = R.T dot R, where R is upper triangular with positive diagonal entries
def check_psd(matrix, tolerance=1e-6): hermitian = (matrix + matrix.T.conjugate()) / 2 eigenvalues = np.linalg.eigh(hermitian)[0] return (eigenvalues > -tolerance).all()
A square matrix is PSD if all eigenvalues of its Hermitian part are non- negative. The Hermitian part is given by (self + M*)/2, where M* is the complex conjugate transpose of M
def normalise_rows(matrix): lengths = np.apply_along_axis(np.linalg.norm, 1, matrix) if not (lengths > 0).all(): # raise ValueError('Cannot normalise 0 length vector to length 1') # print(matrix) lengths[lengths == 0] = 1 return matrix / lengths[:, np.newaxis]
Scales all rows to length 1. Fails when row is 0-length, so it leaves these unchanged
def kdists(matrix, k=7, ix=None): ix = ix or kindex(matrix, k) return matrix[ix][np.newaxis].T
Returns the k-th nearest distances, row-wise, as a column vector