code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def heatmap(dm, partition=None, cmap=CM.Blues, fontsize=10): assert isinstance(dm, DistanceMatrix) datamax = float(np.abs(dm.values).max()) length = dm.shape[0] if partition: sorting = np.array(flatten_list(partition.get_membership())) new_dm = dm.reorder(dm.df.columns[sorting]) else: new_dm = dm fig = plt.figure() ax = fig.add_subplot(111) ax.xaxis.tick_top() ax.grid(False) tick_positions = np.array(list(range(length))) + 0.5 if fontsize is not None: ax.set_yticks(tick_positions) ax.set_xticks(tick_positions) ax.set_xticklabels(new_dm.df.columns, rotation=90, fontsize=fontsize, ha='center') ax.set_yticklabels(new_dm.df.index, fontsize=fontsize, va='center') cbar_ticks_at = [0, 0.5 * datamax, datamax] cax = ax.imshow( new_dm.values, interpolation='nearest', extent=[0., length, length, 0.], vmin=0, vmax=datamax, cmap=cmap, ) cbar = fig.colorbar(cax, ticks=cbar_ticks_at, format='%1.2g') cbar.set_label('Distance') return fig
heatmap(dm, partition=None, cmap=CM.Blues, fontsize=10) Produce a 2D plot of the distance matrix, with values encoded by coloured cells. Args: partition: treeCl.Partition object - if supplied, will reorder rows and columns of the distance matrix to reflect the groups defined by the partition cmap: matplotlib colourmap object - the colour palette to use fontsize: int or None - sets the size of the locus lab Returns: matplotlib plottable object
def _plotly_3d_scatter(coords, partition=None): from plotly.graph_objs import Scatter3d, Data, Figure, Layout, Line, Margin, Marker # auto sign-in with credentials or use py.sign_in() colourmap = { 'A':'#1f77b4', 'B':'#ff7f0e', 'C':'#2ca02c', 'D':'#d62728', 'E':'#9467bd', 1:'#1f77b4', 2:'#ff7f0e', 3:'#2ca02c', 4:'#d62728', 5:'#9467bd' } df = coords.df if partition: assert len(partition.partition_vector) == df.shape[0] labels = [x+1 for x in partition.partition_vector] else: labels = [1 for _ in range(df.shape[0])] x, y, z = df.columns[:3] df['Label'] = labels colours = [colourmap[lab] for lab in df['Label']] trace = Scatter3d(x=df[x], y=df[y], z=df[z], mode='markers', marker=Marker(size=9, color=colours, line=Line(color=colours, width=0.5), opacity=0.8), text=[str(ix) for ix in df.index]) data = Data([trace]) layout = Layout( margin=Margin(l=0, r=0, b=0, t=0 ), hovermode='x', ) fig = Figure(data=data, layout=layout) return fig
_plotly_3d_scatter(coords, partition=None) Make a scatterplot of treeCl.CoordinateMatrix using the Plotly plotting engine
def _add_sphere(ax): (u, v) = np.mgrid[0:2 * np.pi:20j, 0:np.pi:10j] x = np.cos(u) * np.sin(v) y = np.sin(u) * np.sin(v) z = np.cos(v) ax.plot_wireframe(x, y, z, color='grey', linewidth=0.2) return ax
_add_sphere(ax) Add a wireframe unit sphere onto matplotlib 3D axes Args: ax - matplotlib 3D axes object Returns: updated matplotlib 3D axes
def heatmap(self, partition=None, cmap=CM.Blues): if isinstance(self.dm, DistanceMatrix): length = self.dm.values.shape[0] else: length = self.dm.shape[0] datamax = float(np.abs(self.dm).max()) fig = plt.figure() ax = fig.add_subplot(111) ticks_at = [0, 0.5 * datamax, datamax] if partition: sorting = flatten_list(partition.get_membership()) self.dm = self.dm.reorder(sorting) cax = ax.imshow( self.dm.values, interpolation='nearest', origin='lower', extent=[0., length, 0., length], vmin=0, vmax=datamax, cmap=cmap, ) cbar = fig.colorbar(cax, ticks=ticks_at, format='%1.2g') cbar.set_label('Distance') return fig
Plots a visual representation of a distance matrix
def get_tree_collection_strings(self, scale=1, guide_tree=None): records = [self.collection[i] for i in self.indices] return TreeCollectionTaskInterface().scrape_args(records)
Function to get input strings for tree_collection tree_collection needs distvar, genome_map and labels - these are returned in the order above
def from_json(buffer, auto_flatten=True, raise_for_index=True): buffer = to_bytes(buffer) view_out = _ffi.new('lsm_view_t **') index_out = _ffi.new('lsm_index_t **') buffer = to_bytes(buffer) rv = rustcall( _lib.lsm_view_or_index_from_json, buffer, len(buffer), view_out, index_out) if rv == 1: return View._from_ptr(view_out[0]) elif rv == 2: index = Index._from_ptr(index_out[0]) if auto_flatten and index.can_flatten: return index.into_view() if raise_for_index: raise IndexedSourceMap('Unexpected source map index', index=index) return index else: raise AssertionError('Unknown response from C ABI (%r)' % rv)
Parses a JSON string into either a view or an index. If auto flatten is enabled a sourcemap index that does not contain external references is automatically flattened into a view. By default if an index would be returned an `IndexedSourceMap` error is raised instead which holds the index.
def from_json(buffer): buffer = to_bytes(buffer) return View._from_ptr(rustcall( _lib.lsm_view_from_json, buffer, len(buffer)))
Creates a sourcemap view from a JSON string.
def from_memdb(buffer): buffer = to_bytes(buffer) return View._from_ptr(rustcall( _lib.lsm_view_from_memdb, buffer, len(buffer)))
Creates a sourcemap view from MemDB bytes.
def from_memdb_file(path): path = to_bytes(path) return View._from_ptr(rustcall(_lib.lsm_view_from_memdb_file, path))
Creates a sourcemap view from MemDB at a given file.
def dump_memdb(self, with_source_contents=True, with_names=True): len_out = _ffi.new('unsigned int *') buf = rustcall( _lib.lsm_view_dump_memdb, self._get_ptr(), len_out, with_source_contents, with_names) try: rv = _ffi.unpack(buf, len_out[0]) finally: _lib.lsm_buffer_free(buf) return rv
Dumps a sourcemap in MemDB format into bytes.
def lookup_token(self, line, col): # Silently ignore underflows if line < 0 or col < 0: return None tok_out = _ffi.new('lsm_token_t *') if rustcall(_lib.lsm_view_lookup_token, self._get_ptr(), line, col, tok_out): return convert_token(tok_out[0])
Given a minified location, this tries to locate the closest token that is a match. Returns `None` if no match can be found.
def get_original_function_name(self, line, col, minified_name, minified_source): # Silently ignore underflows if line < 0 or col < 0: return None minified_name = minified_name.encode('utf-8') sout = _ffi.new('const char **') try: slen = rustcall(_lib.lsm_view_get_original_function_name, self._get_ptr(), line, col, minified_name, minified_source, sout) if slen > 0: return _ffi.unpack(sout[0], slen).decode('utf-8', 'replace') except SourceMapError: # In some rare cases the library is/was known to panic. We do # not want to report this upwards (this happens on slicing # out of range on older rust versions in the rust-sourcemap # library) pass
Given a token location and a minified function name and the minified source file this returns the original function name if it can be found of the minified function in scope.
def get_source_contents(self, src_id): len_out = _ffi.new('unsigned int *') must_free = _ffi.new('int *') rv = rustcall(_lib.lsm_view_get_source_contents, self._get_ptr(), src_id, len_out, must_free) if rv: try: return _ffi.unpack(rv, len_out[0]) finally: if must_free[0]: _lib.lsm_buffer_free(rv)
Given a source ID this returns the embedded sourcecode if there is. The sourcecode is returned as UTF-8 bytes for more efficient processing.
def has_source_contents(self, src_id): return bool(rustcall(_lib.lsm_view_has_source_contents, self._get_ptr(), src_id))
Checks if some sources exist.
def get_source_name(self, src_id): len_out = _ffi.new('unsigned int *') rv = rustcall(_lib.lsm_view_get_source_name, self._get_ptr(), src_id, len_out) if rv: return decode_rust_str(rv, len_out[0])
Returns the name of the given source.
def iter_sources(self): for src_id in xrange(self.get_source_count()): yield src_id, self.get_source_name(src_id)
Iterates over all source names and IDs.
def from_json(buffer): buffer = to_bytes(buffer) return Index._from_ptr(rustcall( _lib.lsm_index_from_json, buffer, len(buffer)))
Creates an index from a JSON string.
def into_view(self): try: return View._from_ptr(rustcall( _lib.lsm_index_into_view, self._get_ptr())) finally: self._ptr = None
Converts the index into a view
def from_bytes(buffer): buffer = to_bytes(buffer) return ProguardView._from_ptr(rustcall( _lib.lsm_proguard_mapping_from_bytes, buffer, len(buffer)))
Creates a sourcemap view from a JSON string.
def from_path(filename): filename = to_bytes(filename) if NULL_BYTE in filename: raise ValueError('null byte in path') return ProguardView._from_ptr(rustcall( _lib.lsm_proguard_mapping_from_path, filename + b'\x00'))
Creates a sourcemap view from a file path.
def lookup(self, dotted_path, lineno=None): rv = None try: rv = rustcall( _lib.lsm_proguard_mapping_convert_dotted_path, self._get_ptr(), dotted_path.encode('utf-8'), lineno or 0) return _ffi.string(rv).decode('utf-8', 'replace') finally: if rv is not None: _lib.lsm_buffer_free(rv)
Given a dotted path in the format ``class_name`` or ``class_name:method_name`` this performs an alias lookup. For methods the line number must be supplied or the result is unreliable.
def apply(self, data): if self.visitor.is_object: obj = {} if self.visitor.parent is None: obj['$schema'] = self.visitor.path obj_empty = True for child in self.children: empty, value = child.apply(data) if empty and child.optional: continue obj_empty = False if not empty else obj_empty if child.visitor.name in obj and child.visitor.is_array: obj[child.visitor.name].extend(value) else: obj[child.visitor.name] = value return obj_empty, obj elif self.visitor.is_array: empty, value = self.children.apply(data) return empty, [value] elif self.visitor.is_value: return extract_value(self.mapping, self.visitor, data)
Apply the given mapping to ``data``, recursively. The return type is a tuple of a boolean and the resulting data element. The boolean indicates whether any values were mapped in the child nodes of the mapping. It is used to skip optional branches of the object graph.
def apply_iter(cls, rows, mapping, resolver, scope=None): mapper = cls(mapping, resolver, scope=scope) for row in rows: _, data = mapper.apply(row) yield data
Given an iterable ``rows`` that yield data records, and a ``mapping`` which is to be applied to them, return a tuple of ``data`` (the generated object graph) and ``err``, a validation exception if the resulting data did not match the expected schema.
def translate(self, text): # Reset substitution counter self.count = 0 # Process text return self._make_regex().sub(self, text)
Translate text, returns the modified text.
def cluster(self, n, embed_dim=None, algo=spectral.SPECTRAL, method=methods.KMEANS): if n == 1: return Partition([1] * len(self.get_dm(False))) if embed_dim is None: embed_dim = n if algo == spectral.SPECTRAL: self._coords = self.spectral_embedding(embed_dim) elif algo == spectral.KPCA: self._coords = self.kpca_embedding(embed_dim) elif algo == spectral.ZELNIKMANOR: self._coords = self.spectral_embedding_(embed_dim) else: raise OptionError(algo, list(spectral.reverse.values())) if method == methods.KMEANS: p = self.kmeans(n, self._coords.df.values) elif method == methods.GMM: p = self.gmm(n, self._coords.df.values) elif method == methods.WARD: linkmat = fastcluster.linkage(self._coords.values, 'ward') p = _hclust(linkmat, n) else: raise OptionError(method, list(methods.reverse.values())) if self._verbosity > 0: print('Using clustering method: {}'.format(methods.reverse[method])) return p
Cluster the embedded coordinates using spectral clustering Parameters ---------- n: int The number of clusters to return embed_dim: int The dimensionality of the underlying coordinates Defaults to same value as n algo: enum value (spectral.SPECTRAL | spectral.KPCA | spectral.ZELNIKMANOR) Type of embedding to use method: enum value (methods.KMEANS | methods.GMM) The clustering method to use Returns ------- Partition: Partition object describing the data partition
def spectral_embedding(self, n): coords = spectral_embedding(self._affinity, n) return CoordinateMatrix(normalise_rows(coords))
Embed the points using spectral decomposition of the laplacian of the affinity matrix Parameters ---------- n: int The number of dimensions
def spectral_embedding_(self, n): aff = self._affinity.copy() aff.flat[::aff.shape[0]+1] = 0 laplacian = laplace(aff) decomp = eigen(laplacian) return CoordinateMatrix(normalise_rows(decomp.vecs[:,:n]))
Old method for generating coords, used on original analysis of yeast data. Included to reproduce yeast result from paper. Reason for difference - switched to using spectral embedding method provided by scikit-learn (mainly because it spreads points over a sphere, rather than a half sphere, so looks better plotted). Uses a different Laplacian matrix.
def kpca_embedding(self, n): return self.dm.embedding(n, 'kpca', affinity_matrix=self._affinity)
Embed the points using kernel PCA of the affinity matrix Parameters ---------- n: int The number of dimensions
def cluster(self, n, embed_dim=None, algo=mds.CLASSICAL, method=methods.KMEANS): if n == 1: return Partition([1] * len(self.get_dm(False))) if embed_dim is None: embed_dim = n if algo == mds.CLASSICAL: self._coords = self.dm.embedding(embed_dim, 'cmds') elif algo == mds.METRIC: self._coords = self.dm.embedding(embed_dim, 'mmds') else: raise OptionError(algo, list(mds.reverse.values())) if method == methods.KMEANS: p = self.kmeans(n, self._coords.values) elif method == methods.GMM: p = self.gmm(n, self._coords.values) elif method == methods.WARD: linkmat = fastcluster.linkage(self._coords.values, 'ward') p = _hclust(linkmat, n) else: raise OptionError(method, list(methods.reverse.values())) #if self._verbosity > 0: # print('Using clustering method: {}'.format(methods.reverse[method])) return p
Cluster the embedded coordinates using multidimensional scaling Parameters ---------- n: int The number of clusters to return embed_dim int The dimensionality of the underlying coordinates Defaults to same value as n method: enum value (methods.KMEANS | methods.GMM) The clustering method to use Returns ------- Partition: Partition object describing the data partition
def cluster(self, nclusters, linkage_method=linkage.WARD, **kwargs): if linkage_method == linkage.SINGLE: return self._hclust(nclusters, 'single', **kwargs) elif linkage_method == linkage.COMPLETE: return self._hclust(nclusters, 'complete', **kwargs) elif linkage_method == linkage.AVERAGE: return self._hclust(nclusters, 'average', **kwargs) elif linkage_method == linkage.WARD: return self._hclust(nclusters, 'ward', **kwargs) elif linkage_method == linkage.WEIGHTED: return self._hclust(nclusters, 'weighted', **kwargs) elif linkage_method == linkage.CENTROID: return self._hclust(nclusters, 'centroid', **kwargs) elif linkage_method == linkage.MEDIAN: return self._hclust(nclusters, 'median', **kwargs) else: raise ValueError('Unknown linkage_method: {}'.format(linkage_method))
Do hierarchical clustering on a distance matrix using one of the methods: methods.SINGLE = single-linkage clustering methods.COMPLETE = complete-linkage clustering methods.AVERAGE = average-linkage clustering methods.WARD = Ward's minimum variance method
def _hclust(self, nclusters, method, noise=False): matrix = self.get_dm(noise) linkmat = fastcluster.linkage(squareform(matrix), method) self.nclusters = nclusters # Store these in case we want to plot self.linkmat = linkmat # return _hclust(linkmat, nclusters)
:param nclusters: Number of clusters to return :param linkage_method: single, complete, average, ward, weighted, centroid or median (http://docs.scipy.org/doc/scipy/reference/cluster.hierarchy.html) :param noise: Add Gaussian noise to the distance matrix prior to clustering (bool, default=False) :return: Partition object describing clustering
def plot_dendrogram(self, nclusters=None, leaf_font_size=8, leaf_rotation=90, names=None, title_font_size=16, ): if not hasattr(self, 'nclusters') and not hasattr(self, 'linkmat'): raise ValueError("This instance has no plottable information.") if nclusters is None: nclusters = self.nclusters threshold = _get_threshold(self.linkmat, nclusters) import matplotlib.pyplot as plt fig = plt.figure(figsize=(11.7, 8.3)) if names is not None: labfn=lambda leaf: names[leaf] else: labfn=None leaf_rotation=0 dendrogram( self.linkmat, color_threshold=threshold, leaf_font_size=leaf_font_size, leaf_rotation=leaf_rotation, leaf_label_func=labfn, count_sort=True, ) plt.suptitle('Dendrogram', fontsize=title_font_size) # plt.title('Distance metric: {0} Linkage method: {1} Number of classes: {2}'.format(compound_key[0], # compound_key[1], compound_key[2]), fontsize=12) plt.axhline(threshold, color='grey', ls='dashed') plt.xlabel('Gene') plt.ylabel('Distance') return fig
Plots the dendrogram of the most recently generated partition :param nclusters: Override the plot default number of clusters :return: matplotlib.pyplot.figure
def dbscan(self, eps=0.75, min_samples=3): est = DBSCAN(metric='precomputed', eps=eps, min_samples=min_samples) est.fit(self.get_dm(False)) return Partition(est.labels_)
:param kwargs: key-value arguments to pass to DBSCAN (eps: max dist between points in same neighbourhood, min_samples: number of points in a neighbourhood) :return:
def _py2_and_3_joiner(sep, joinable): if ISPY3: sep = bytes(sep, DEFAULT_ENCODING) joined = sep.join(joinable) return joined.decode(DEFAULT_ENCODING) if ISPY3 else joined
Allow '\n'.join(...) statements to work in Py2 and Py3. :param sep: :param joinable: :return:
def _log_thread(self, pipe, queue): # thread function to log subprocess output (LOG is a queue) def enqueue_output(out, q): for line in iter(out.readline, b''): q.put(line.rstrip()) out.close() # start thread t = threading.Thread(target=enqueue_output, args=(pipe, queue)) t.daemon = True # thread dies with the program t.start() self.threads.append(t)
Start a thread logging output from pipe
def _search_for_executable(self, executable): if os.path.isfile(executable): return os.path.abspath(executable) else: envpath = os.getenv('PATH') if envpath is None: return for path in envpath.split(os.pathsep): exe = os.path.join(path, executable) if os.path.isfile(exe): return os.path.abspath(exe)
Search for file give in "executable". If it is not found, we try the environment PATH. Returns either the absolute path to the found executable, or None if the executable couldn't be found.
def get_stderr(self, tail=None): if self.finished(): self.join_threads() while not self.stderr_q.empty(): self.stderr_l.append(self.stderr_q.get_nowait()) if tail is None: tail = len(self.stderr_l) return _py2_and_3_joiner('\n', self.stderr_l[:tail])
Returns current total output written to standard error. :param tail: Return this number of most-recent lines. :return: copy of stderr stream
def get_stdout(self, tail=None): if self.finished(): self.join_threads() while not self.stdout_q.empty(): self.stdout_l.append(self.stdout_q.get_nowait()) if tail is None: tail = len(self.stdout_l) return _py2_and_3_joiner('\n', self.stdout_l[:tail])
Returns current total output written to standard output. :param tail: Return this number of most-recent lines. :return: copy of stdout stream
def kill(self): if self.running(): if self.verbose: print('Killing {} with PID {}'.format(self.exe, self.process.pid)) self.process.kill() # Threads *should* tidy up after themselves, but we do it explicitly self.join_threads()
Kill the running process (if there is one) :return: void
def _command_template(self, switches, objectInput=None): command = ["java", "-jar", self.file_jar, "-eUTF-8"] if self.memory_allocation: command.append("-Xmx{}".format(self.memory_allocation)) command.extend(switches) if not objectInput: objectInput = subprocess.PIPE log.debug("Subprocess command: {}".format(", ".join(command))) if six.PY2: with open(os.devnull, "w") as devnull: out = subprocess.Popen( command, stdin=objectInput, stdout=subprocess.PIPE, stderr=devnull) elif six.PY3: out = subprocess.Popen( command, stdin=objectInput, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) stdoutdata, _ = out.communicate() return stdoutdata.decode("utf-8").strip()
Template for Tika app commands Args: switches (list): list of switches to Tika app Jar objectInput (object): file object/standard input to analyze Return: Standard output data (unicode Python 2, str Python 3)
def detect_content_type(self, path=None, payload=None, objectInput=None): # From Python detection content type from stdin doesn't work TO FIX if objectInput: message = "Detection content type with file object is not stable." log.exception(message) raise TikaAppError(message) f = file_path(path, payload, objectInput) switches = ["-d", f] result = self._command_template(switches).lower() return result, path, f
Return the content type of passed file or payload. Args: path (string): Path of file to analyze payload (string): Payload base64 to analyze objectInput (object): file object/standard input to analyze Returns: content type of file (string)
def extract_only_content(self, path=None, payload=None, objectInput=None): if objectInput: switches = ["-t"] result = self._command_template(switches, objectInput) return result, True, None else: f = file_path(path, payload) switches = ["-t", f] result = self._command_template(switches) return result, path, f
Return only the text content of passed file. These parameters are in OR. Only one of them can be analyzed. Args: path (string): Path of file to analyze payload (string): Payload base64 to analyze objectInput (object): file object/standard input to analyze Returns: text of file passed (string)
def extract_all_content( self, path=None, payload=None, objectInput=None, pretty_print=False, convert_to_obj=False, ): f = file_path(path, payload, objectInput) switches = ["-J", "-t", "-r", f] if not pretty_print: switches.remove("-r") result = self._command_template(switches) if result and convert_to_obj: result = json.loads(result, encoding="utf-8") return result, path, f
This function returns a JSON of all contents and metadata of passed file Args: path (string): Path of file to analyze payload (string): Payload base64 to analyze objectInput (object): file object/standard input to analyze pretty_print (boolean): If True adds newlines and whitespace, for better readability convert_to_obj (boolean): If True convert JSON in object
def sanitize(func): def wrapper(*args, **kwargs): return normalize('NFC', func(*args, **kwargs)) return wrapper
NFC is the normalization form recommended by W3C.
def clean(func): def wrapper(*args, **kwargs): # tuple: output command, path given from command line, # path of templ file when you give the payload out, given_path, path = func(*args, **kwargs) try: if not given_path: os.remove(path) except OSError: pass return out return wrapper
This decorator removes the temp file from disk. This is the case where you want to analyze from a payload.
def file_path(path=None, payload=None, objectInput=None): f = path if path else write_payload(payload, objectInput) if not os.path.exists(f): msg = "File {!r} does not exist".format(f) log.exception(msg) raise TikaAppFilePathError(msg) return f
Given a file path, payload or file object, it writes file on disk and returns the temp path. Args: path (string): path of real file payload(string): payload in base64 of file objectInput (object): file object/standard input to analyze Returns: Path of file
def write_payload(payload=None, objectInput=None): temp = tempfile.mkstemp()[1] log.debug("Write payload in temp file {!r}".format(temp)) with open(temp, 'wb') as f: if payload: payload = base64.b64decode(payload) elif objectInput: if six.PY3: payload = objectInput.buffer.read() elif six.PY2: payload = objectInput.read() f.write(payload) return temp
This function writes a base64 payload or file object on disk. Args: payload (string): payload in base64 objectInput (object): file object/standard input to analyze Returns: Path of file
def get_subject(self, data): if not isinstance(data, Mapping): return None if data.get(self.subject): return data.get(self.subject) return uuid.uuid4().urn
Try to get a unique ID from the object. By default, this will be the 'id' field of any given object, or a field specified by the 'rdfSubject' property. If no other option is available, a UUID will be generated.
def reverse(self): name = self.schema.get('rdfReverse') if name is not None: return name if self.parent is not None and self.parent.is_array: return self.parent.reverse
Reverse links make sense for object to object links where we later may want to also query the reverse of the relationship, e.g. when obj1 is a child of obj2, we want to infer that obj2 is a parent of obj1.
def triplify(self, data, parent=None): if data is None: return if self.is_object: for res in self._triplify_object(data, parent): yield res elif self.is_array: for item in data: for res in self.items.triplify(item, parent): yield res else: # TODO: figure out if I ever want to check for reverse here. type_name = typecast.name(data) obj = typecast.stringify(type_name, data) if obj is not None: obj = obj.strip() yield (parent, self.predicate, obj, type_name)
Recursively generate statements from the data supplied.
def _triplify_object(self, data, parent): subject = self.get_subject(data) if self.path: yield (subject, TYPE_SCHEMA, self.path, TYPE_SCHEMA) if parent is not None: yield (parent, self.predicate, subject, TYPE_LINK) if self.reverse is not None: yield (subject, self.reverse, parent, TYPE_LINK) for prop in self.properties: for res in prop.triplify(data.get(prop.name), subject): yield res
Create bi-directional statements for object relationships.
def objectify(self, load, node, depth=2, path=None): if path is None: path = set() if self.is_object: if depth < 1: return return self._objectify_object(load, node, depth, path) elif self.is_array: if depth < 1: return return [self.items.objectify(load, node, depth, path)] else: return node
Given a node ID, return an object the information available about this node. This accepts a loader function as it's first argument, which is expected to return all tuples of (predicate, object, source) for the given subject.
def get_json(request, token): result = [] searchtext = request.GET['q'] if len(searchtext) >= 3: pickled = _simple_autocomplete_queryset_cache.get(token, None) if pickled is not None: app_label, model_name, query = pickle.loads(pickled) model = apps.get_model(app_label, model_name) queryset = QuerySet(model=model, query=query) fieldname = get_search_fieldname(model) di = {'%s__istartswith' % fieldname: searchtext} app_label_model = '%s.%s' % (app_label, model_name) max_items = get_setting(app_label_model, 'max_items', 10) items = queryset.filter(**di).order_by(fieldname)[:max_items] # Check for duplicate strings counts = {} for item in items: if hasattr(item, "__unicode__"): key = item.__unicode__() else: key = str(item) #key = unicode(item) counts.setdefault(key, 0) counts[key] += 1 # Assemble result set for item in items: #key = value = unicode(item) if hasattr(item, "__unicode__"): key = value = item.__unicode__() else: key = value = str(item) value = getattr(item, fieldname) if counts[key] > 1: func = get_setting( app_label_model, 'duplicate_format_function', lambda obj, model, content_type: content_type.name ) content_type = ContentType.objects.get_for_model(model) value = '%s (%s)' % (value, func(item, model, content_type)) result.append((item.id, value)) else: result = 'CACHE_MISS' return HttpResponse(json.dumps(result))
Return matching results as JSON
def _dash_f_e_to_dict(self, info_filename, tree_filename): with open(info_filename) as fl: models, likelihood, partition_params = self._dash_f_e_parser.parseFile(fl).asList() with open(tree_filename) as fl: tree = fl.read() d = {'likelihood': likelihood, 'ml_tree': tree, 'partitions': {}} for model, params in zip(models, partition_params): subdict = {} index, name, _, alpha, rates, freqs = params subdict['alpha'] = alpha subdict['name'] = name subdict['rates'] = rates subdict['frequencies'] = freqs subdict['model'] = model d['partitions'][index] = subdict return d
Raxml provides an option to fit model params to a tree, selected with -f e. The output is different and needs a different parser.
def to_dict(self, info_filename, tree_filename, dash_f_e=False): logger.debug('info_filename: {} {}' .format(info_filename, '(FOUND)' if os.path.exists(info_filename) else '(NOT FOUND)')) logger.debug('tree_filename: {} {}' .format(tree_filename, '(FOUND)' if os.path.exists(tree_filename) else '(NOT FOUND)')) if dash_f_e: return self._dash_f_e_to_dict(info_filename, tree_filename) else: return self._to_dict(info_filename, tree_filename)
Parse raxml output and return a dict Option dash_f_e=True will parse the output of a raxml -f e run, which has different output
def freader(filename, gz=False, bz=False): filecheck(filename) if filename.endswith('.gz'): gz = True elif filename.endswith('.bz2'): bz = True if gz: return gzip.open(filename, 'rb') elif bz: return bz2.BZ2File(filename, 'rb') else: return io.open(filename, 'rb')
Returns a filereader object that can handle gzipped input
def fwriter(filename, gz=False, bz=False): if filename.endswith('.gz'): gz = True elif filename.endswith('.bz2'): bz = True if gz: if not filename.endswith('.gz'): filename += '.gz' return gzip.open(filename, 'wb') elif bz: if not filename.endswith('.bz2'): filename += '.bz2' return bz2.BZ2File(filename, 'w') else: return open(filename, 'w')
Returns a filewriter object that can write plain or gzipped output. If gzip or bzip2 compression is asked for then the usual filename extension will be added.
def glob_by_extensions(directory, extensions): directorycheck(directory) files = [] xt = files.extend for ex in extensions: xt(glob.glob('{0}/*.{1}'.format(directory, ex))) return files
Returns files matched by all extensions in the extensions list
def head(filename, n=10): with freader(filename) as fr: for _ in range(n): print(fr.readline().strip())
prints the top `n` lines of a file
def locate_file(filename, env_var='', directory=''): f = locate_by_env(filename, env_var) or locate_by_dir(filename, directory) return os.path.abspath(f) if can_locate(f) else None
Locates a file given an environment variable or directory :param filename: filename to search for :param env_var: environment variable to look under :param directory: directory to look in :return: (string) absolute path to filename or None if not found
def edge_length_check(length, edge): try: assert 0 <= length <= edge.length except AssertionError: if length < 0: raise TreeError('Negative edge-lengths are disallowed') raise TreeError( 'This edge isn\'t long enough to prune at length {0}\n' '(Edge length = {1})'.format(length, edge.length))
Raises error if length is not in interval [0, edge.length]
def logn_correlated_rate(parent_rate, branch_length, autocorrel_param, size=1): if autocorrel_param <= 0: raise Exception('Autocorrelation parameter must be greater than 0') variance = branch_length * autocorrel_param stdev = np.sqrt(variance) ln_descendant_rate = np.random.normal(np.log(parent_rate) - 0.5 * variance, scale=stdev, size=size) descendant_rate = np.exp(ln_descendant_rate) return float(descendant_rate) if size == 1 else descendant_rate
The log of the descendent rate, ln(Rd), is ~ N(mu, bl*ac), where the variance = bl*ac = branch_length * autocorrel_param, and mu is set so that E[Rd] = Rp: E[X] where ln(X) ~ N(mu, sigma^2) = exp(mu+(1/2)*sigma_sq) so Rp = exp(mu+(1/2)*bl*ac), ln(Rp) = mu + (1/2)*bl*ac, ln(Rp) - (1/2)*bl*ac = mu, so ln(Rd) ~ N(ln(Rp) - (1/2)*bl*ac, bl*ac) (NB: Var[Rd] = Rp^2 * (exp(bl*ac)-1), Std[Rd] = Rp * sqrt(exp(bl*ac)-1) See: H Kishino, J L Thorne, and W J Bruno (2001)
def _check_single_outgroup(self): root_child_nodes = self.tree._tree.seed_node.child_nodes() not_leaves = np.logical_not([n.is_leaf() for n in root_child_nodes]) if not_leaves[not_leaves].size <= 1: return [root_child_nodes[np.where(not_leaves)[0]].edge] return []
If only one (or none) of the seed node children is not a leaf node it is not possible to prune that edge and make a topology-changing regraft.
def prune(self, edge, length=None): length = length or edge.length edge_length_check(length, edge) n = edge.head_node self.tree._tree.prune_subtree(n, suppress_unifurcations=False) n.edge_length = length self.tree._dirty = True return n
Prunes a subtree from the main Tree, retaining an edge length specified by length (defaults to entire length). The length is sanity- checked by edge_length_check, to ensure it is within the bounds [0, edge.length]. Returns the basal node of the pruned subtree.
def regraft(self, edge, node, length=None): rootcheck(edge, 'SPR regraft is not allowed on the root edge') length = length or edge.length / 2. # Length measured from head to tail edge_length_check(length, edge) t = edge.tail_node h = edge.head_node new = t.new_child(edge_length=edge.length - length) t.remove_child(h) new.add_child(h) h.edge.length=length new.add_child(node) self.tree._dirty = True self.tree._tree.encode_bipartitions(suppress_unifurcations=True)
Grafts a node onto an edge of the Tree, at a point specified by length (defaults to middle of edge).
def rspr(self, disallow_sibling_sprs=False, keep_entire_edge=False, rescale=False): starting_length = self.tree._tree.length() excl = [self.tree._tree.seed_node.edge] # exclude r if disallow_sibling_sprs: excl.extend(self._check_single_outgroup()) prune_edge, l1 = self.tree.map_event_onto_tree(excl) if keep_entire_edge: l1 = prune_edge.length prune_edge_child_nodes = prune_edge.head_node.preorder_iter() excl.extend([node.edge for node in prune_edge_child_nodes]) if disallow_sibling_sprs: sibs = [node.edge for node in prune_edge.head_node.sister_nodes()] par = prune_edge.tail_node.edge sibs.append(par) for edge in sibs: if edge not in excl: excl.append(edge) if set(self.tree._tree.preorder_edge_iter()) - set(excl) == set([]): print(repr(self.tree)) print(self.tree._tree.as_ascii_plot()) # print(edges[prune_edge]) raise Exception('No non-sibling sprs available') regraft_edge, l2 = self.tree.map_event_onto_tree(excl) # edges, nodes, redges, rnodes = self.tree._name_things() # print(edges[prune_edge], l1, edges[regraft_edge], l2) self.spr(prune_edge, l1, regraft_edge, l2) if rescale: self.tree.scale(starting_length / self.tree.length()) self.tree._dirty = True
Random SPR, with prune and regraft edges chosen randomly, and lengths drawn uniformly from the available edge lengths. N1: disallow_sibling_sprs prevents sprs that don't alter the topology of the tree
def get_exchangeable_nodes(self, n): parent = n.parent_node a, b = random.sample(n.child_nodes(), 2) if parent.parent_node is None: if self.tree.rooted: c, d = random.sample(n.sister_nodes()[0].child_nodes(), 2) else: c, d = random.sample(n.sister_nodes(), 2) else: c = random.choice(n.sister_nodes()) d = random.choice(parent.sister_nodes()) return a, b, c, d
A C | Subtrees A, B, C and D are the exchangeable nodes \ / | around the edge headed by n -->n | The NNI exchanges either A or B with either C or D / \ B D A C C A | Subtree A is exchanged \ / +NNI(A,C) \ / | with subtree C. -->n ==========> -->n / \ / \ B D B D
def get_children(self, inner_edge): h = inner_edge.head_node t = inner_edge.tail_node if not self.tree._tree.seed_node == t: original_seed = self.tree._tree.seed_node self.tree._tree.reseed_at(t) else: original_seed = None head_children = h.child_nodes() tail_children = list(set(t.child_nodes()) - {h}) # See N1 if original_seed: self.tree._tree.reseed_at(original_seed) return {'head': head_children, 'tail': tail_children}
Given an edge in the tree, returns the child nodes of the head and the tail nodes of the edge, for instance: A C | A, B, C and D are the children of the edge --->, \ / | C and D are the head node children, and A and B t--->h | are the tail node children. / \ B D | Output: {'head': [<C>, <D>], 'tail': [<A>, <B>]} N1: Edges are directional in dendropy trees. The head node of an edge is automatically a child of the tail node, but we don't want this.
def nni( self, edge, head_subtree, tail_subtree, ): # This implementation works on unrooted Trees. If the input Tree is # rooted, the ReversibleDeroot decorator will temporarily unroot the # tree while the NNI is carried out original_seed = self.tree._tree.seed_node head = edge.head_node tail = edge.tail_node self.tree._tree.reseed_at(tail) try: assert head_subtree.parent_node == head assert tail_subtree.parent_node == tail except: print(head, tail, head_subtree, tail_subtree) raise head.remove_child(head_subtree) tail.remove_child(tail_subtree) head.add_child(tail_subtree) tail.add_child(head_subtree) self.tree._tree.reseed_at(original_seed) self.tree._tree.encode_bipartitions() self.tree._dirty = True
*Inplace* Nearest-neighbour interchange (NNI) operation. An edge in the tree has two or more subtrees at each end (ends are designated 'head' and 'tail'). The NNI operation exchanges one of the head subtrees for one of the tail subtrees, as follows: A C C A | Subtree A is exchanged \ / +NNI(A,C) \ / | with subtree C. ---> ==========> ---> | / \ / \ | B D B D
def rnni(self, use_weighted_choice=False, invert_weights=False): if use_weighted_choice: leaves = list(self.tree._tree.leaf_edge_iter()) e, _ = self.tree.map_event_onto_tree(excluded_edges=leaves, invert_weights=invert_weights) else: e = random.choice(self.tree.get_inner_edges()) children = self.get_children(e) h = random.choice(children['head']) t = random.choice(children['tail']) self.nni(e, h, t)
Apply a random NNI operation at a randomly selected edge The edge can be chosen uniformly, or weighted by length -- invert_weights favours short edges.
def labels(self): return set([n.taxon.label for n in self._tree.leaf_nodes()])
Returns the taxon set of the tree (same as the label- or leaf-set)
def sample_labels(self, n): if n >= len(self): return self.labels sample = random.sample(self.labels, n) return set(sample)
Returns a set of n labels sampled from the labels of the tree :param n: Number of labels to sample :return: set of randomly sampled labels
def newick(self): n = self._tree.as_string('newick', suppress_rooting=True, suppress_internal_node_labels=True) if n: return n.strip(';\n') + ';' return n
For more control the dendropy method self.as_string('newick', **kwargs) can be used. KWargs include: suppress_internal_node_labels [True/False] - turn on/off bootstrap labels suppress_rooting [True/False] - turn on/off [&U] or [&R] rooting state labels edge_label_compose_func - function to convert edge lengths: takes edge as arg, returns string
def phylotree(self): if not self._phylotree or self._dirty: try: if ISPY3: self._phylotree = PhyloTree(self.newick.encode(), self.rooted) else: self._phylotree = PhyloTree(self.newick, self.rooted) except ValueError: logger.error('Couldn\'t convert to C++ PhyloTree -- are there bootstrap values?') self._dirty = False return self._phylotree
Get the c++ PhyloTree object corresponding to this tree. :return: PhyloTree instance
def bifurcate_base(cls, newick): t = cls(newick) t._tree.resolve_polytomies() return t.newick
Rewrites a newick string so that the base is a bifurcation (rooted tree)
def trifurcate_base(cls, newick): t = cls(newick) t._tree.deroot() return t.newick
Rewrites a newick string so that the base is a trifurcation (usually means an unrooted tree)
def get_inner_edges(self): inner_edges = [e for e in self._tree.preorder_edge_iter() if e.is_internal() and e.head_node and e.tail_node] return inner_edges
Returns a list of the internal edges of the tree.
def intersection(self, other): taxa1 = self.labels taxa2 = other.labels return taxa1 & taxa2
Returns the intersection of the taxon sets of two Trees
def postorder(self, skip_seed=False): for node in self._tree.postorder_node_iter(): if skip_seed and node is self._tree.seed_node: continue yield node
Return a generator that yields the nodes of the tree in postorder. If skip_seed=True then the root node is not included.
def preorder(self, skip_seed=False): for node in self._tree.preorder_node_iter(): if skip_seed and node is self._tree.seed_node: continue yield node
Return a generator that yields the nodes of the tree in preorder. If skip_seed=True then the root node is not included.
def prune_to_subset(self, subset, inplace=False): if not subset.issubset(self.labels): print('"subset" is not a subset') return if not inplace: t = self.copy() else: t = self t._tree.retain_taxa_with_labels(subset) t._tree.encode_bipartitions() t._dirty = True return t
Prunes the Tree to just the taxon set given in `subset`
def randomise_branch_lengths( self, i=(1, 1), l=(1, 1), distribution_func=random.gammavariate, inplace=False, ): if not inplace: t = self.copy() else: t = self for n in t._tree.preorder_node_iter(): if n.is_internal(): n.edge.length = max(0, distribution_func(*i)) else: n.edge.length = max(0, distribution_func(*l)) t._dirty = True return t
Replaces branch lengths with values drawn from the specified distribution_func. Parameters of the distribution are given in the tuples i and l, for interior and leaf nodes respectively.
def randomise_labels( self, inplace=False, ): if not inplace: t = self.copy() else: t = self names = list(t.labels) random.shuffle(names) for l in t._tree.leaf_node_iter(): l.taxon._label = names.pop() t._dirty = True return t
Shuffles the leaf labels, but doesn't alter the tree structure
def reversible_deroot(self): root_edge = self._tree.seed_node.edge lengths = dict([(edge, edge.length) for edge in self._tree.seed_node.incident_edges() if edge is not root_edge]) self._tree.deroot() reroot_edge = (set(self._tree.seed_node.incident_edges()) & set(lengths.keys())).pop() self._tree.encode_bipartitions() self._dirty = True return (reroot_edge, reroot_edge.length - lengths[reroot_edge], lengths[reroot_edge])
Stores info required to restore rootedness to derooted Tree. Returns the edge that was originally rooted, the length of e1, and the length of e2. Dendropy Derooting Process: In a rooted tree the root node is bifurcating. Derooting makes it trifurcating. Call the two edges leading out of the root node e1 and e2. Derooting with Tree.deroot() deletes one of e1 and e2 (let's say e2), and stretches the other to the sum of their lengths. Call this e3. Rooted tree: Derooted tree: A A B |_ B \ / / | /e1 |e3 (length = e1+e2; e2 is deleted) Root--o ===> | \e2 Root--o _ C \ _ C | | D D Reverse this with Tree.reroot_at_edge(edge, length1, length2, ...)
def autocorrelated_relaxed_clock(self, root_rate, autocorrel, distribution='lognormal'): optioncheck(distribution, ['exponential', 'lognormal']) if autocorrel == 0: for node in self._tree.preorder_node_iter(): node.rate = root_rate return for node in self._tree.preorder_node_iter(): if node == self._tree.seed_node: node.rate = root_rate else: parent_rate = node.parent_node.rate bl = node.edge_length if distribution == 'lognormal': node.rate = logn_correlated_rate(parent_rate, bl, autocorrel) else: node.rate = np.random.exponential(parent_rate)
Attaches rates to each node according to autocorrelated lognormal model from Kishino et al.(2001), or autocorrelated exponential
def rlgt(self, time=None, times=1, disallow_sibling_lgts=False): lgt = LGT(self.copy()) for _ in range(times): lgt.rlgt(time, disallow_sibling_lgts) return lgt.tree
Uses class LGT to perform random lateral gene transfer on ultrametric tree
def rnni(self, times=1, **kwargs): nni = NNI(self.copy()) for _ in range(times): nni.rnni(**kwargs) # nni.reroot_tree() return nni.tree
Applies a NNI operation on a randomly chosen edge. keyword args: use_weighted_choice (True/False) weight the random edge selection by edge length transform (callable) transforms the edges using this function, prior to weighted selection
def rspr(self, times=1, **kwargs): spr = SPR(self.copy()) for _ in range(times): spr.rspr(**kwargs) return spr.tree
Random SPR, with prune and regraft edges chosen randomly, and lengths drawn uniformly from the available edge lengths. N1: disallow_sibling_sprs prevents sprs that don't alter the topology of the tree
def scale(self, factor, inplace=True): if not inplace: t = self.copy() else: t = self t._tree.scale_edges(factor) t._dirty = True return t
Multiplies all branch lengths by factor.
def strip(self, inplace=False): if not inplace: t = self.copy() else: t = self for e in t._tree.preorder_edge_iter(): e.length = None t._dirty = True return t
Sets all edge lengths to None
def translate(self, dct): new_tree = self.copy() for leaf in new_tree._tree.leaf_node_iter(): curr_name = leaf.taxon.label leaf.taxon.label = dct.get(curr_name, curr_name) return new_tree
Translate leaf names using a dictionary of names :param dct: Dictionary of current names -> updated names :return: Copy of tree with names changed
def _name_things(self): edges = {} nodes = {None: 'root'} for n in self._tree.postorder_node_iter(): nodes[n] = '.'.join([str(x.taxon) for x in n.leaf_nodes()]) for e in self._tree.preorder_edge_iter(): edges[e] = ' ---> '.join([nodes[e.tail_node], nodes[e.head_node]]) r_edges = {value: key for key, value in edges.items()} r_nodes = {value: key for key, value in nodes.items()} return edges, nodes, r_edges, r_nodes
Easy names for debugging
def gene_tree( self, scale_to=None, population_size=1, trim_names=True, ): tree = self.template or self.yule() for leaf in tree._tree.leaf_node_iter(): leaf.num_genes = 1 dfr = tree._tree.seed_node.distance_from_root() dft = tree._tree.seed_node.distance_from_tip() tree_height = dfr + dft if scale_to: population_size = tree_height / scale_to for edge in tree._tree.preorder_edge_iter(): edge.pop_size = population_size gene_tree = dpy.simulate.treesim.constrained_kingman_tree(tree._tree)[0] if trim_names: for leaf in gene_tree.leaf_node_iter(): leaf.taxon.label = leaf.taxon.label.replace('\'', '').split('_')[0] # Dendropy changed its API return {'gene_tree': tree.__class__(gene_tree.as_string('newick', suppress_rooting=True).strip(';\n') + ';'), 'species_tree': tree}
Using the current tree object as a species tree, generate a gene tree using the constrained Kingman coalescent process from dendropy. The species tree should probably be a valid, ultrametric tree, generated by some pure birth, birth-death or coalescent process, but no checks are made. Optional kwargs are: -- scale_to, which is a floating point value to scale the total tree tip-to-root length to, -- population_size, which is a floating point value which all branch lengths will be divided by to convert them to coalescent units, and -- trim_names, boolean, defaults to true, trims off the number which dendropy appends to the sequence name
def fit(self, ini_betas=None, tol=1.0e-6, max_iter=200, solve='iwls'): self.fit_params['ini_betas'] = ini_betas self.fit_params['tol'] = tol self.fit_params['max_iter'] = max_iter self.fit_params['solve'] = solve if solve.lower() == 'iwls': params, predy, w, n_iter = iwls( self.y, self.X, self.family, self.offset, self.y_fix, ini_betas, tol, max_iter) self.fit_params['n_iter'] = n_iter return GLMResults(self, params.flatten(), predy, w)
Method that fits a model with a particular estimation routine. Parameters ---------- ini_betas : array k*1, initial coefficient values, including constant. Default is None, which calculates initial values during estimation. tol: float Tolerence for estimation convergence. max_iter : integer Maximum number of iterations if convergence not achieved. solve :string Technique to solve MLE equations. 'iwls' = iteratively (re)weighted least squares (default)
def deriv2(self, p): from statsmodels.tools.numdiff import approx_fprime_cs # TODO: workaround proplem with numdiff for 1d return np.diag(approx_fprime_cs(p, self.deriv))
Second derivative of the link function g''(p) implemented through numerical differentiation
def inverse(self, z): z = np.asarray(z) t = np.exp(-z) return 1. / (1. + t)
Inverse of the logit transform Parameters ---------- z : array-like The value of the logit transform at `p` Returns ------- p : array Probabilities Notes ----- g^(-1)(z) = exp(z)/(1+exp(z))
def inverse(self, z): p = np.power(z, 1. / self.power) return p
Inverse of the power transform link function Parameters ---------- `z` : array-like Value of the transformed mean parameters at `p` Returns ------- `p` : array Mean parameters Notes ----- g^(-1)(z`) = `z`**(1/`power`)
def deriv(self, p): return self.power * np.power(p, self.power - 1)
Derivative of the power transform Parameters ---------- p : array-like Mean parameters Returns -------- g'(p) : array Derivative of power transform of `p` Notes ----- g'(`p`) = `power` * `p`**(`power` - 1)
def deriv2(self, p): return self.power * (self.power - 1) * np.power(p, self.power - 2)
Second derivative of the power transform Parameters ---------- p : array-like Mean parameters Returns -------- g''(p) : array Second derivative of the power transform of `p` Notes ----- g''(`p`) = `power` * (`power` - 1) * `p`**(`power` - 2)
def inverse_deriv(self, z): return np.power(z, (1 - self.power)/self.power) / self.power
Derivative of the inverse of the power transform Parameters ---------- z : array-like `z` is usually the linear predictor for a GLM or GEE model. Returns ------- g^(-1)'(z) : array The value of the derivative of the inverse of the power transform function