code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def kindex(matrix, k): ix = (np.arange(len(matrix)), matrix.argsort(axis=0)[k]) return ix
Returns indices to select the kth nearest neighbour
def kmask(matrix, k=7, dists=None, logic='or'): dists = (kdists(matrix, k=k) if dists is None else dists) mask = (matrix <= dists) if logic == 'or' or logic == '|': return mask | mask.T elif logic == 'and' or logic == '&': return mask & mask.T return mask
Creates a boolean mask to include points within k nearest neighbours, and exclude the rest. Logic can be OR or AND. OR gives the k-nearest-neighbour mask, AND gives the mutual k-nearest-neighbour mask.
def kscale(matrix, k=7, dists=None): dists = (kdists(matrix, k=k) if dists is None else dists) scale = dists.dot(dists.T) return scale
Returns the local scale based on the k-th nearest neighbour
def laplace(affinity_matrix, shi_malik_type=False): diagonal = affinity_matrix.sum(axis=1) - affinity_matrix.diagonal() zeros = diagonal <= 1e-10 diagonal[zeros] = 1 if (diagonal <= 1e-10).any(): # arbitrarily small value raise ZeroDivisionError if shi_malik_type: inv_d = np.diag(1 / diagonal) return inv_d.dot(affinity_matrix) diagonal = np.sqrt(diagonal) return affinity_matrix / diagonal / diagonal[:, np.newaxis]
Converts affinity matrix into normalised graph Laplacian, for spectral clustering. (At least) two forms exist: L = (D^-0.5).A.(D^-0.5) - default L = (D^-1).A - `Shi-Malik` type, from Shi Malik paper
def shift_and_scale(matrix, shift, scale): zeroed = matrix - matrix.min() scaled = (scale - shift) * (zeroed / zeroed.max()) return scaled + shift
Shift and scale matrix so its minimum value is placed at `shift` and its maximum value is scaled to `scale`
def eigen(matrix): (vals, vecs) = np.linalg.eigh(matrix) ind = vals.argsort()[::-1] vals = vals[ind] vecs = vecs[:, ind] vals_ = vals.copy() vals_[vals_ < 0] = 0. cum_var_exp = np.cumsum(vals_ / vals_.sum()) return Decomp(matrix.copy(), vals, vecs, cum_var_exp)
Calculates the eigenvalues and eigenvectors of the input matrix. Returns a tuple of (eigenvalues, eigenvectors, cumulative percentage of variance explained). Eigenvalues and eigenvectors are sorted in order of eigenvalue magnitude, high to low
def _embedding_classical_mds(matrix, dimensions=3, additive_correct=False): if additive_correct: dbc = double_centre(_additive_correct(matrix)) else: dbc = double_centre(matrix) decomp = eigen(dbc) lambda_ = np.diag(np.sqrt(np.abs(decomp.vals[:dimensions]))) evecs = decomp.vecs[:, :dimensions] coords = evecs.dot(lambda_) return coords
Private method to calculate CMDS embedding :param dimensions: (int) :return: coordinate matrix (np.array)
def _embedding_spectral(matrix, dimensions=3, unit_length=True, affinity_matrix=None, sigma=1): if affinity_matrix is None: aff = rbf(matrix, sigma=sigma) else: aff = affinity_matrix coords = sklearn.manifold.spectral_embedding(aff, dimensions) return normalise_rows(coords) if unit_length else coords
Private method to calculate Spectral embedding :param dimensions: (int) :return: coordinate matrix (np.array)
def _embedding_tsne(matrix, dimensions=3, early_exaggeration=12.0, method='barnes_hut', perplexity=30, learning_rate=200, n_iter=1000): tsne = sklearn.manifold.TSNE(n_components=dimensions, metric="precomputed", early_exaggeration=early_exaggeration, method=method, perplexity=perplexity, learning_rate=learning_rate, n_iter=1000) return tsne.fit_transform(matrix)
Private method to perform tSNE embedding :param matrix: treeCl Distance Matrix :param dimensions: Number of dimensions in which to embed points :return: treeCl CoordinateMatrix
def _embedding_metric_mds(matrix, dimensions=3): mds = sklearn.manifold.MDS(n_components=dimensions, dissimilarity='precomputed', metric=True) mds.fit(matrix) return mds.embedding_
Private method to calculate MMDS embedding :param dimensions: (int) :return: coordinate matrix (np.array)
def _embedding_nonmetric_mds(matrix, dimensions=3, initial_coords=None): mds = sklearn.manifold.MDS(n_components=dimensions, dissimilarity='precomputed', metric=False) if initial_coords is not None: mds.fit(matrix, init=initial_coords) else: mds.fit(matrix) return mds.embedding_
Private method to calculate NMMDS embedding :param dimensions: (int) :return: coordinate matrix (np.array)
def _embedding_kernel_pca(matrix, dimensions=3, affinity_matrix=None, sigma=1): if affinity_matrix is None: aff = rbf(matrix, sigma) else: aff = affinity_matrix kpca = sklearn.decomposition.KernelPCA(kernel='precomputed', n_components=dimensions) return kpca.fit_transform(aff)
Private method to calculate KPCA embedding :param dimensions: (int) :return: coordinate matrix (np.array)
def coords_by_cutoff(self, cutoff=0.80): i = np.where(self.cve >= cutoff)[0][0] coords_matrix = self.vecs[:, :i + 1] return coords_matrix, self.cve[i]
Returns fitted coordinates in as many dimensions as are needed to explain a given amount of variance (specified in the cutoff)
def coords_by_dimension(self, dimensions=3): coords_matrix = self.vecs[:, :dimensions] varexp = self.cve[dimensions - 1] return coords_matrix, varexp
Returns fitted coordinates in specified number of dimensions, and the amount of variance explained)
def embedding(self, dimensions, method, **kwargs): errors.optioncheck(method, ['cmds', 'kpca', 'mmds', 'nmmds', 'spectral', 'tsne']) if method == 'cmds': array = _embedding_classical_mds(self.to_array(), dimensions, **kwargs) elif method == 'kpca': array = _embedding_kernel_pca(self.to_array(), dimensions, **kwargs) elif method == 'mmds': array = _embedding_metric_mds(self.to_array(), dimensions) elif method == 'nmmds': array = _embedding_nonmetric_mds(self.to_array(), dimensions, **kwargs) elif method == 'spectral': array = _embedding_spectral(self.to_array(), dimensions, **kwargs) elif method == 'tsne': array = _embedding_tsne(self.to_array(), dimensions, **kwargs) return CoordinateMatrix(array, names=self.df.index)
Embeds the distance matrix in a coordinate space. Implemented methods are: cmds: Classical MultiDimensional Scaling kpca: Kernel Principal Components Analysis mmds: Metric MultiDimensional Scaling nmmds: Non-Metric MultiDimensional Scaling spectral: Spectral decomposition of Laplacian matrix tsne: t-distributed Stochastic Neighbour Embedding Valid kwargs: kpca: affinity_matrix - a precomputed array of affinities sigma - the value of sigma to use when computing the affinity matrix via the Radial Basis Function nmmds: initial_coords - a set of coordinates to refine. NMMDS works very badly without this spectral: affinity_matrix, sigma unit_length - scale the coordinates to unit length, so points sit on the surface of the unit sphere :param dimensions: (int) number of coordinate axes to use :param method: (string) one of cmds, kpca, mmds, nmmds, spectral :param kwargs: unit_length (bool), affinity_matrix (np.array), sigma (float), initial_coords (np.array) :return: coordinate matrix (np.array)
def extract_value(mapping, bind, data): columns = mapping.get('columns', [mapping.get('column')]) values = [data.get(c) for c in columns] for transform in mapping.get('transforms', []): # any added transforms must also be added to the schema. values = list(TRANSFORMS[transform](mapping, bind, values)) format_str = mapping.get('format') value = values[0] if len(values) else None if not is_empty(format_str): value = format_str % tuple('' if v is None else v for v in values) empty = is_empty(value) if empty: value = mapping.get('default') or bind.schema.get('default') return empty, convert_value(bind, value)
Given a mapping and JSON schema spec, extract a value from ``data`` and apply certain transformations to normalize the value.
def get_type(bind): types = bind.types + [bind.schema.get('format')] for type_name in ('date-time', 'date', 'decimal', 'integer', 'boolean', 'number', 'string'): if type_name in types: return type_name return 'string'
Detect the ideal type for the data, either using the explicit type definition or the format (for date, date-time, not supported by JSON).
def convert_value(bind, value): type_name = get_type(bind) try: return typecast.cast(type_name, value) except typecast.ConverterError: return value
Type casting.
def peaks(x, y, lookahead=20, delta=0.00003): _max, _min = peakdetect(y, x, lookahead, delta) x_peaks = [p[0] for p in _max] y_peaks = [p[1] for p in _max] x_valleys = [p[0] for p in _min] y_valleys = [p[1] for p in _min] _peaks = [x_peaks, y_peaks] _valleys = [x_valleys, y_valleys] return {"peaks": _peaks, "valleys": _valleys}
A wrapper around peakdetect to pack the return values in a nicer format
def _restricted_growth_notation(l): list_length = len(l) d = defaultdict(list) for (i, element) in enumerate(l): d[element].append(i) l2 = [None] * list_length for (name, index_list) in enumerate(sorted(d.values(), key=min)): for index in index_list: l2[index] = name return tuple(l2)
The clustering returned by the hcluster module gives group membership without regard for numerical order This function preserves the group membership, but sorts the labelling into numerical order
def random(cls, alpha, size): props = np.concatenate([[0], (scipy.stats.dirichlet.rvs(alpha) * size).cumsum().round().astype(int)]) indices = np.array(list(range(size))) random.shuffle(indices) x = [] for i in range(len(props)-1): ix = indices[props[i]:props[i+1]] x.append(ix) return cls.from_membership(x)
Generate a random start using expected proportions, alpha. These are used to parameterise a random draw from a Dirichlet distribution. An example, to split a dataset of 20 items into 3 groups of [10, 6, 4] items: - alpha = [10, 6, 4], - alpha = [100, 60, 40], - alpha = [5, 3, 2], would all work. Variance is inversely related to sum(alpha)
def get_membership(self): result = defaultdict(list) for (position, value) in enumerate(self.partition_vector): result[value].append(position) return sorted([tuple(x) for x in result.values()])
Alternative representation of group membership - creates a list with one tuple per group; each tuple contains the indices of its members Example: partition = (0,0,0,1,0,1,2,2) membership = [(0,1,2,4), (3,5), (6,7)] :return: list of tuples giving group memberships by index
def variation_of_information(self, other): (entropy_1, entropy_2, mut_inf) = entropies(self, other) return entropy_1 + entropy_2 - 2 * mut_inf
calculates Variation of Information Metric between two clusterings of the same data - SEE Meila, M. (2007). Comparing clusterings: an information based distance. Journal of Multivariate Analysis, 98(5), 873-895. doi:10.1016/j.jmva.2006.11.013
def normalize(self): median_diff = np.median(np.diff(self.x)) bin_edges = [self.x[0] - median_diff/2.0] bin_edges.extend(median_diff/2.0 + self.x) self.y_raw = self.y_raw/(self.y_raw.sum()*np.diff(bin_edges)) self.smooth()
Normalizes the given data such that the area under the histogram/curve comes to 1. Also re applies smoothing once done.
def serialize(self, path): pickle.dump([self.x, self.y_raw], file(path, 'w'))
Saves the raw (read unsmoothed) histogram data to the given path using pickle python module.
def extend_peaks(self, prop_thresh=50): # octave propagation of the reference peaks temp_peaks = [i + 1200 for i in self.peaks["peaks"][0]] temp_peaks.extend([i - 1200 for i in self.peaks["peaks"][0]]) extended_peaks = [] extended_peaks.extend(self.peaks["peaks"][0]) for i in temp_peaks: # if a peak exists around, don't add this new one. nearest_ind = slope.find_nearest_index(self.peaks["peaks"][0], i) diff = abs(self.peaks["peaks"][0][nearest_ind] - i) diff = np.mod(diff, 1200) if diff > prop_thresh: extended_peaks.append(i) return extended_peaks
Each peak in the peaks of the object is checked for its presence in other octaves. If it does not exist, it is created. prop_thresh is the cent range within which the peak in the other octave is expected to be present, i.e., only if there is a peak within this cent range in other octaves, then the peak is considered to be present in that octave. Note that this does not change the peaks of the object. It just returns the extended peaks.
def plot(self, intervals=None, new_fig=True): import pylab as p if new_fig: p.figure() #step 1: plot histogram p.plot(self.x, self.y, ls='-', c='b', lw='1.5') #step 2: plot peaks first_peak = None last_peak = None if self.peaks: first_peak = min(self.peaks["peaks"][0]) last_peak = max(self.peaks["peaks"][0]) p.plot(self.peaks["peaks"][0], self.peaks["peaks"][1], 'rD', ms=10) p.plot(self.peaks["valleys"][0], self.peaks["valleys"][1], 'yD', ms=5) #Intervals if intervals is not None: #spacing = 0.02*max(self.y) for interval in intervals: if first_peak is not None: if interval <= first_peak or interval >= last_peak: continue p.axvline(x=interval, ls='-.', c='g', lw='1.5') if interval-1200 >= min(self.x): p.axvline(x=interval-1200, ls=':', c='b', lw='0.5') if interval+1200 <= max(self.x): p.axvline(x=interval+1200, ls=':', c='b', lw='0.5') if interval+2400 <= max(self.x): p.axvline(x=interval+2400, ls='-.', c='r', lw='0.5') #spacing *= -1 #p.title("Tonic-aligned complete-range pitch histogram") #p.xlabel("Pitch value (Cents)") #p.ylabel("Normalized frequency of occurence") p.show()
This function plots histogram together with its smoothed version and peak information if provided. Just intonation intervals are plotted for a reference.
def parallel_map(client, task, args, message, batchsize=1, background=False, nargs=None): show_progress = bool(message) njobs = get_njobs(nargs, args) nproc = len(client) logger.debug('parallel_map: len(client) = {}'.format(len(client))) view = client.load_balanced_view() if show_progress: message += ' (IP:{}w:{}b)'.format(nproc, batchsize) pbar = setup_progressbar(message, njobs, simple_progress=True) if not background: pbar.start() map_result = view.map(task, *list(zip(*args)), chunksize=batchsize) if background: return map_result, client while not map_result.ready(): map_result.wait(1) if show_progress: pbar.update(min(njobs, map_result.progress * batchsize)) if show_progress: pbar.finish() return map_result
Helper to map a function over a sequence of inputs, in parallel, with progress meter. :param client: IPython.parallel.Client instance :param task: Function :param args: Must be a list of tuples of arguments that the task function will be mapped onto. If the function takes a single argument, it still must be a 1-tuple. :param message: String for progress bar :param batchsize: Jobs are shipped in batches of this size. Higher numbers mean less network traffic, but longer execution time per job. :return: IPython.parallel.AsyncMapResult
def sequential_map(task, args, message, nargs=None): njobs = get_njobs(nargs, args) show_progress = bool(message) if show_progress: pbar = setup_progressbar(message, njobs, simple_progress=True) pbar.start() map_result = [] for (i, arglist) in enumerate(tupleise(args), start=1): map_result.append(task(*arglist)) if show_progress: pbar.update(i) if show_progress: pbar.finish() return map_result
Helper to map a function over a sequence of inputs, sequentially, with progress meter. :param client: IPython.parallel.Client instance :param task: Function :param args: Must be a list of tuples of arguments that the task function will be mapped onto. If the function takes a single argument, it still must be a 1-tuple. :param message: String for progress bar :param batchsize: Jobs are shipped in batches of this size. Higher numbers mean less network traffic, but longer execution time per job. :return: IPython.parallel.AsyncMapResult
def threadpool_map(task, args, message, concurrency, batchsize=1, nargs=None): import concurrent.futures njobs = get_njobs(nargs, args) show_progress = bool(message) batches = grouper(batchsize, tupleise(args)) batched_task = lambda batch: [task(*job) for job in batch] if show_progress: message += ' (TP:{}w:{}b)'.format(concurrency, batchsize) pbar = setup_progressbar(message, njobs, simple_progress=True) pbar.start() with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency) as executor: futures = [] completed_count = 0 for batch in batches: futures.append(executor.submit(batched_task, batch)) if show_progress: for i, fut in enumerate(concurrent.futures.as_completed(futures), start=1): completed_count += len(fut.result()) pbar.update(completed_count) else: concurrent.futures.wait(futures) if show_progress: pbar.finish() return flatten_list([fut.result() for fut in futures])
Helper to map a function over a range of inputs, using a threadpool, with a progress meter
def processpool_map(task, args, message, concurrency, batchsize=1, nargs=None): njobs = get_njobs(nargs, args) show_progress = bool(message) batches = grouper(batchsize, tupleise(args)) def batched_task(*batch): return [task(*job) for job in batch] if show_progress: message += ' (PP:{}w:{}b)'.format(concurrency, batchsize) pbar = setup_progressbar(message, njobs, simple_progress=True) pbar.start() q_in = multiprocessing.Queue() # Should I limit either queue size? Limiting in-queue q_out = multiprocessing.Queue() # increases time taken to send jobs, makes pbar less useful proc = [multiprocessing.Process(target=fun, args=(batched_task, q_in, q_out)) for _ in range(concurrency)] for p in proc: p.daemon = True p.start() sent = [q_in.put((i, x)) for (i, x) in enumerate(batches)] [q_in.put((None, None)) for _ in range(concurrency)] res = [] completed_count = 0 for _ in range(len(sent)): result = get_from_queue(q_out) res.append(result) completed_count += len(result[1]) if show_progress: pbar.update(completed_count) [p.join() for p in proc] if show_progress: pbar.finish() return flatten_list([x for (i, x) in sorted(res)])
See http://stackoverflow.com/a/16071616
def concatenate(alignments): # Get the full set of labels (i.e. sequence ids) for all the alignments all_labels = set(seq.id for aln in alignments for seq in aln) # Make a dictionary to store info as we go along # (defaultdict is convenient -- asking for a missing key gives back an empty list) tmp = defaultdict(list) # Assume all alignments have same alphabet alphabet = alignments[0]._alphabet for aln in alignments: length = aln.get_alignment_length() # check if any labels are missing in the current alignment these_labels = set(rec.id for rec in aln) missing = all_labels - these_labels # if any are missing, create unknown data of the right length, # stuff the string representation into the tmp dict for label in missing: new_seq = UnknownSeq(length, alphabet=alphabet) tmp[label].append(str(new_seq)) # else stuff the string representation into the tmp dict for rec in aln: tmp[rec.id].append(str(rec.seq)) # Stitch all the substrings together using join (most efficient way), # and build the Biopython data structures Seq, SeqRecord and MultipleSeqAlignment msa = MultipleSeqAlignment(SeqRecord(Seq(''.join(v), alphabet=alphabet), id=k, name=k, description=k) for (k,v) in tmp.items()) return msa
Concatenates a list of Bio.Align.MultipleSeqAlignment objects. If any sequences are missing the are padded with unknown data (Bio.Seq.UnknownSeq). Returns a single Bio.Align.MultipleSeqAlignment. Limitations: any annotations in the sub-alignments are lost in the concatenated alignment.
def symmetrise(matrix, tri='upper'): if tri == 'upper': tri_fn = np.triu_indices else: tri_fn = np.tril_indices size = matrix.shape[0] matrix[tri_fn(size)[::-1]] = matrix[tri_fn(size)] return matrix
Will copy the selected (upper or lower) triangle of a square matrix to the opposite side, so that the matrix is symmetrical. Alters in place.
def grouper(n, iterable): iterable = iter(iterable) return iter(lambda: list(itertools.islice(iterable, n)), [])
>>> list(grouper(3, 'ABCDEFG')) [['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]
def insort_no_dup(lst, item): import bisect ix = bisect.bisect_left(lst, item) if lst[ix] != item: lst[ix:ix] = [item]
If item is not in lst, add item to list at its sorted position
def alignment_to_partials(alignment, missing_data=None): partials_dict = {} for (name, sequence) in alignment.get_sequences(): datatype = 'dna' if alignment.is_dna() else 'protein' partials_dict[name] = seq_to_partials(sequence, datatype) if missing_data is not None: l = len(alignment) for name in missing_data: if name not in partials_dict: partials_dict[name] = seq_to_partials('-'*l, datatype) return partials_dict
Generate a partials dictionary from a treeCl.Alignment
def biopython_to_partials(alignment, datatype): partials_dict = {} for seq in alignment: partials_dict[seq.name] = seq_to_partials(seq, datatype) return partials_dict
Generate a partials dictionary from a treeCl.Alignment
def create_gamma_model(alignment, missing_data=None, ncat=4): model = alignment.parameters.partitions.model freqs = alignment.parameters.partitions.frequencies alpha = alignment.parameters.partitions.alpha if model == 'LG': subs_model = LG(freqs) elif model == 'WAG': subs_model = WAG(freqs) elif model == 'GTR': rates = alignment.parameters.partitions.rates subs_model = GTR(rates, freqs, True) else: raise ValueError("Can't handle this model: {}".format(model)) tm = TransitionMatrix(subs_model) gamma = GammaMixture(alpha, ncat) gamma.init_models(tm, alignment_to_partials(alignment, missing_data)) return gamma
Create a phylo_utils.likelihood.GammaMixture for calculating likelihood on a tree, from a treeCl.Alignment and its matching treeCl.Parameters
def sample_wr(lst): arr = np.array(lst) indices = np.random.randint(len(lst), size=len(lst)) sample = np.empty(arr.shape, dtype=arr.dtype) for i, ix in enumerate(indices): sample[i] = arr[ix] return list(sample)
Sample from lst, with replacement
def _preprocess_inputs(x, weights): if weights is None: w_arr = np.ones(len(x)) else: w_arr = np.array(weights) x_arr = np.array(x) if x_arr.ndim == 2: if w_arr.ndim == 1: w_arr = w_arr[:, np.newaxis] return x_arr, w_arr
Coerce inputs into compatible format
def amean(x, weights=None): w_arr, x_arr = _preprocess_inputs(x, weights) return (w_arr*x_arr).sum(axis=0) / w_arr.sum(axis=0)
Return the weighted arithmetic mean of x
def gmean(x, weights=None): w_arr, x_arr = _preprocess_inputs(x, weights) return np.exp((w_arr*np.log(x_arr)).sum(axis=0) / w_arr.sum(axis=0))
Return the weighted geometric mean of x
def hmean(x, weights=None): w_arr, x_arr = _preprocess_inputs(x, weights) return w_arr.sum(axis=0) / (w_arr/x_arr).sum(axis=0)
Return the weighted harmonic mean of x
def gapmask(simseqs, origseqs): import numpy as np simdict = dict(simseqs) origdict = dict(origseqs) for k in origdict: origseq = np.array(list(origdict[k])) gap_pos = np.where(origseq=='-') simseq = np.array(list(simdict[k])) simseq[gap_pos] = '-' simdict[k] = ''.join(simseq) return list(simdict.items())
:param sims: list of (header, sequence) tuples of simulated sequences [no gaps] :param aln: list of (header, sequence) tuples of original sequences :return:
def records(self): return [self._records[i] for i in range(len(self._records))]
Returns a list of records in SORT_KEY order
def read_trees(self, input_dir): if self.show_progress: pbar = setup_progressbar("Loading trees", len(self.records)) pbar.start() for i, rec in enumerate(self.records): hook = os.path.join(input_dir, '{}.nwk*'.format(rec.name)) filename = glob.glob(hook) try: with fileIO.freader(filename[0]) as infile: tree = infile.read().decode('utf-8') d = dict(ml_tree=tree) rec.parameters.construct_from_dict(d) except (IOError, IndexError): continue finally: if self.show_progress: pbar.update(i) if self.show_progress: pbar.finish()
Read a directory full of tree files, matching them up to the already loaded alignments
def read_parameters(self, input_dir): if self.show_progress: pbar = setup_progressbar("Loading parameters", len(self.records)) pbar.start() for i, rec in enumerate(self.records): hook = os.path.join(input_dir, '{}.json*'.format(rec.name)) filename = glob.glob(hook) try: with fileIO.freader(filename[0]) as infile: d = json.loads(infile.read().decode('utf-8'), parse_int=True) rec.parameters.construct_from_dict(d) except (IOError, IndexError): continue finally: if self.show_progress: pbar.update(i) if self.show_progress: pbar.finish()
Read a directory full of json parameter files, matching them up to the already loaded alignments
def calc_distances(self, indices=None, task_interface=None, jobhandler=default_jobhandler, batchsize=1, show_progress=True): if indices is None: indices = list(range(len(self))) if task_interface is None: task_interface = tasks.MLDistanceTaskInterface() records = [self[i] for i in indices] # Assemble argument lists args, to_delete = task_interface.scrape_args(records) # Dispatch msg = '{} estimation'.format(task_interface.name) if show_progress else '' map_result = jobhandler(task_interface.get_task(), args, msg, batchsize) # Process results with fileIO.TempFileList(to_delete): # pbar = setup_progressbar('Processing results', len(map_result)) # j = 0 # pbar.start() for rec, result in zip(records, map_result): rec.parameters.partitions.distances = result['partitions'][0]['distances'] rec.parameters.partitions.variances = result['partitions'][0]['variances'] rec.parameters.nj_tree = result['nj_tree']
Calculate fast approximate intra-alignment pairwise distances and variances using ML (requires ML models to have been set up using `calc_trees`). :return: None (all side effects)
def calc_trees(self, indices=None, task_interface=None, jobhandler=default_jobhandler, batchsize=1, show_progress=True, **kwargs): if indices is None: indices = list(range(len(self))) if task_interface is None: task_interface = tasks.RaxmlTaskInterface() records = [self[i] for i in indices] # Scrape args from records args, to_delete = task_interface.scrape_args(records, **kwargs) # Dispatch work msg = '{} Tree estimation'.format(task_interface.name) if show_progress else '' map_result = jobhandler(task_interface.get_task(), args, msg, batchsize) # Process results with fileIO.TempFileList(to_delete): for rec, result in zip(records, map_result): #logger.debug('Result - {}'.format(result)) rec.parameters.construct_from_dict(result)
Infer phylogenetic trees for the loaded Alignments :param indices: Only run inference on the alignments at these given indices :param task_interface: Inference tool specified via TaskInterface (default RaxmlTaskInterface) :param jobhandler: Launch jobs via this JobHandler (default SequentialJobHandler; also available are ThreadpoolJobHandler and ProcesspoolJobHandler for running inference in parallel) :param batchsize: Batch size for Thread- or ProcesspoolJobHandlers) :param kwargs: Remaining arguments to pass to the TaskInterface :return: None
def num_species(self): all_headers = reduce(lambda x, y: set(x) | set(y), (rec.get_names() for rec in self.records)) return len(all_headers)
Returns the number of species found over all records
def permuted_copy(self, partition=None): def take(n, iterable): return [next(iterable) for _ in range(n)] if partition is None: partition = Partition([1] * len(self)) index_tuples = partition.get_membership() alignments = [] for ix in index_tuples: concat = Concatenation(self, ix) sites = concat.alignment.get_sites() random.shuffle(sites) d = dict(zip(concat.alignment.get_names(), [iter(x) for x in zip(*sites)])) new_seqs = [[(k, ''.join(take(l, d[k]))) for k in d] for l in concat.lengths] for seqs, datatype, name in zip(new_seqs, concat.datatypes, concat.names): alignment = Alignment(seqs, datatype) alignment.name = name alignments.append(alignment) return self.__class__(records=sorted(alignments, key=lambda x: SORT_KEY(x.name)))
Return a copy of the collection with all alignment columns permuted
def get_id(self, grp): thehash = hex(hash(grp)) if ISPY3: # use default encoding to get bytes thehash = thehash.encode() return self.cache.get(grp, hashlib.sha1(thehash).hexdigest())
Return a hash of the tuple of indices that specify the group
def check_work_done(self, grp): id_ = self.get_id(grp) concat_file = os.path.join(self.cache_dir, '{}.phy'.format(id_)) result_file = os.path.join(self.cache_dir, '{}.{}.json'.format(id_, self.task_interface.name)) return os.path.exists(concat_file), os.path.exists(result_file)
Check for the existence of alignment and result files.
def write_group(self, grp, overwrite=False, **kwargs): id_ = self.get_id(grp) alignment_done, result_done = self.check_work_done(grp) self.cache[grp] = id_ al_filename = os.path.join(self.cache_dir, '{}.phy'.format(id_)) qfile_filename = os.path.join(self.cache_dir, '{}.partitions.txt'.format(id_)) if overwrite or not (alignment_done or result_done): conc = self.collection.concatenate(grp) al = conc.alignment al.write_alignment(al_filename, 'phylip', True) q = conc.qfile(**kwargs) with open(qfile_filename, 'w') as fl: fl.write(q + '\n')
Write the concatenated alignment to disk in the location specified by self.cache_dir
def get_group_result(self, grp, **kwargs): id_ = self.get_id(grp) self.cache[grp] = id_ # Check if this file is already processed alignment_written, results_written = self.check_work_done(grp) if not results_written: if not alignment_written: self.write_group(grp, **kwargs) logger.error('Alignment {} has not been analysed - run analyse_cache_dir'.format(id_)) raise ValueError('Missing result') else: with open(self.get_result_file(id_)) as fl: return json.load(fl)
Retrieve the results for a group. Needs this to already be calculated - errors out if result not available.
def analyse_cache_dir(self, jobhandler=None, batchsize=1, **kwargs): if jobhandler is None: jobhandler = SequentialJobHandler() files = glob.glob(os.path.join(self.cache_dir, '*.phy')) #logger.debug('Files - {}'.format(files)) records = [] outfiles = [] dna = self.collection[0].is_dna() # THIS IS ONLY A GUESS AT SEQ TYPE!! for infile in files: id_ = fileIO.strip_extensions(infile) outfile = self.get_result_file(id_) #logger.debug('Looking for {}: {}'.format(outfile, os.path.exists(outfile))) if not os.path.exists(outfile): record = Alignment(infile, 'phylip', True) records.append(record) outfiles.append(outfile) if len(records) == 0: return [] args, to_delete = self.task_interface.scrape_args(records, outfiles=outfiles, **kwargs) # logger.debug('Args - {}'.format(args)) with fileIO.TempFileList(to_delete): result = jobhandler(self.task_interface.get_task(), args, 'Cache dir analysis', batchsize) for (out, res) in zip(outfiles, result): if not os.path.exists(out) and res: with open(out, 'w') as outfl: json.dump(res, outfl) return result
Scan the cache directory and launch analysis for all unscored alignments using associated task handler. KWargs are passed to the tree calculating task managed by the TaskInterface in self.task_interface. Example kwargs: TreeCollectionTaskInterface: scale=1, guide_tree=None, niters=10, keep_topology=False RaxmlTaskInterface: -------- partition_files=None, model=None, threads=1 FastTreeTaskInterface: ----- No kwargs
def get_partition_score(self, p): scores = [] for grp in p.get_membership(): try: result = self.get_group_result(grp) scores.append(result['likelihood']) except ValueError: scores.append(None) return sum(scores)
Assumes analysis is done and written to id.json!
def get_partition_trees(self, p): trees = [] for grp in p.get_membership(): try: result = self.get_group_result(grp) trees.append(result['ml_tree']) except ValueError: trees.append(None) logger.error('No tree found for group {}'.format(grp)) return trees
Return the trees associated with a partition, p
def expect(self, use_proportions=True): changed = self.get_changed(self.partition, self.prev_partition) lk_table = self.generate_lktable(self.partition, changed, use_proportions) self.table = self.likelihood_table_to_probs(lk_table)
The Expectation step of the CEM algorithm
def classify(self, table, weighted_choice=False, transform=None): assert table.shape[1] == self.numgrp if weighted_choice: if transform is not None: probs = transform_fn(table.copy(), transform) # else: probs = table.copy() cmprobs = probs.cumsum(1) logger.info('Probabilities\n{}'.format(probs)) r = np.random.random(cmprobs.shape[0]) search = np.apply_along_axis(np.searchsorted, 1, cmprobs, r) # Not very efficient assignment = np.diag(search) else: probs = table assignment = np.where(probs==probs.max(1)[:, np.newaxis])[1] logger.info('Assignment\n{}'.format(assignment)) assignment = self._fill_empty_groups(probs, assignment) # don't want empty groups new_partition = Partition(tuple(assignment)) self.set_partition(new_partition)
The Classification step of the CEM algorithm
def maximise(self, **kwargs): self.scorer.write_partition(self.partition) self.scorer.analyse_cache_dir(**kwargs) self.likelihood = self.scorer.get_partition_score(self.partition) self.scorer.clean_cache() changed = self.get_changed(self.partition, self.prev_partition) self.update_perlocus_likelihood_objects(self.partition, changed) return self.partition, self.likelihood, sum(inst.get_likelihood() for inst in self.insts)
The Maximisation step of the CEM algorithm
def set_partition(self, partition): assert len(partition) == self.numgrp self.partition, self.prev_partition = partition, self.partition
Store the partition in self.partition, and move the old self.partition into self.prev_partition
def get_changed(self, p1, p2): if p1 is None or p2 is None: return list(range(len(self.insts))) return set(flatten_list(set(p1) - set(p2)))
Return the loci that are in clusters that have changed between partitions p1 and p2
def _update_likelihood_model(self, inst, partition_parameters, tree): # Build transition matrix from dict model = partition_parameters['model'] freqs = partition_parameters.get('frequencies') if model == 'LG': subs_model = phylo_utils.models.LG(freqs) elif model == 'WAG': subs_model = phylo_utils.models.WAG(freqs) elif model == 'GTR': rates = partition_parameters.get('rates') subs_model = phylo_utils.models.GTR(rates, freqs, True) else: raise ValueError("Can't handle this model: {}".format(model)) tm = phylo_utils.markov.TransitionMatrix(subs_model) # Read alpha value alpha = partition_parameters['alpha'] inst.set_tree(tree) inst.update_alpha(alpha) inst.update_transition_matrix(tm)
Set parameters of likelihood model - inst - using values in dictionary - partition_parameters -, and - tree -
def likelihood_table_to_probs(self, lktable): m = lktable.max(1) # row max of lktable shifted = lktable-m[:,np.newaxis] # shift lktable of log-likelihoods to a non-underflowing range expsum = np.exp(shifted).sum(1) # convert logs to (scaled) normal space, and sum the rows logexpsum = np.log(expsum)+m # convert back to log space, and undo the scaling return np.exp(lktable - logexpsum[:, np.newaxis])
Calculates this formula (1), given the log of the numerator as input p_k * f(x_i, a_k) t_k(x_i) = ----------------------- ---K \ p_k * f(x_i, a_k) /__k=1 x_i is data point i P_k is cluster k of K t_k is the posterior probability of x_i belonging to P_k p_k is the prior probability of belong to P_k (the proportional size of P_k) f(x, a) is the likelihood of x with parameters a
def _fill_empty_groups_old(self, probs, assignment): new_assignment = np.array(assignment.tolist()) for k in range(self.numgrp): if np.count_nonzero(assignment==k) == 0: logger.info('Group {} became empty'.format(k)) best = np.where(probs[:,k]==probs[:,k].max())[0][0] new_assignment[best] = k new_assignment = self._fill_empty_groups(probs, new_assignment) return new_assignment
Does the simple thing - if any group is empty, but needs to have at least one member, assign the data point with highest probability of membership
def wipe_partition(self, partition): for grp in partition.get_membership(): grpid = self.scorer.get_id(grp) cache_dir = self.scorer.cache_dir prog = self.scorer.task_interface.name filename = os.path.join(cache_dir, '{}.{}.json'.format(grpid, prog)) if os.path.exists(filename): os.unlink(filename)
Deletes analysis result of partition, e.g. so a repeat optimisation of the same partition can be done with a different model
def g(x,a,c): return np.sqrt(((x-a)**2).sum(1)) - c
Christophe's suggestion for residuals, G[i] = Sqrt(Sum_j (x[j] - a[i,j])^2) - C[i]
def f(x, a, c): v = g(x, a, c) return v.dot(v)
Objective function (sum of squared residuals)
def jac(x,a): return (x-a) / np.sqrt(((x-a)**2).sum(1))[:,np.newaxis]
Jacobian matrix given Christophe's suggestion of f
def gradient(x, a, c): return jac(x, a).T.dot(g(x, a, c))
J'.G
def hessian(x, a): j = jac(x, a) return j.T.dot(j)
J'.J
def grad_desc_update(x, a, c, step=0.01): return x - step * gradient(x,a,c)
Given a value of x, return a better x using gradient descent
def newton_update(x, a, c, step=1.0): return x - step*np.linalg.inv(hessian(x, a)).dot(gradient(x, a, c))
Given a value of x, return a better x using newton-gauss
def levenberg_marquardt_update(x, a, c, damping=0.001): hess = hessian(x, a) return x - np.linalg.inv(hess + damping*np.diag(hess)).dot(gradient(x, a, c))
Given a value of x, return a better x using newton-gauss
def golden_section_search(fn, a, b, tolerance=1e-5): c = b - GOLDEN*(b-a) d = a + GOLDEN*(b-a) while abs(c-d) > tolerance: fc, fd = fn(c), fn(d) if fc < fd: b = d d = c #fd=fc;fc=f(c) c = b - GOLDEN*(b-a) else: a = c c = d #fc=fd;fd=f(d) d = a + GOLDEN*(b-a) return (b+a)/2
WIKIPEDIA IMPLEMENTATION golden section search to find the minimum of f on [a,b] f: a strictly unimodal function on [a,b] example: >>> f=lambda x:(x-2)**2 >>> x=gss(f,1,5) >>> x 2.000009644875678
def optimise_newton(x, a, c, tolerance=0.001): x_new = x x_old = x-1 # dummy value while np.abs(x_new - x_old).sum() > tolerance: x_old = x_new x_new = newton_update(x_old, a, c) return x_new
Optimise value of x using newton gauss
def optimise_levenberg_marquardt(x, a, c, damping=0.001, tolerance=0.001): x_new = x x_old = x-1 # dummy value f_old = f(x_new, a, c) while np.abs(x_new - x_old).sum() > tolerance: x_old = x_new x_tmp = levenberg_marquardt_update(x_old, a, c, damping) f_new = f(x_tmp, a, c) if f_new < f_old: damping = np.max(damping/10., 1e-20) x_new = x_tmp f_old = f_new else: damping *= 10. return x_new
Optimise value of x using levenberg-marquardt
def optimise_gradient_descent(x, a, c, tolerance=0.001): x_new = x x_old = x-1 # dummy value while np.abs(x_new - x_old).sum() > tolerance: x_old = x_new step_size = golden_section_search(lambda step: f(grad_desc_update(x_old, a, c, step), a, c), -1.0, 1.0) x_new = grad_desc_update(x_old, a, c, step_size) return x_new
Optimise value of x using gradient descent
def run_out_of_sample_mds(boot_collection, ref_collection, ref_distance_matrix, index, dimensions, task=_fast_geo, rooted=False, **kwargs): fit = np.empty((len(boot_collection), dimensions)) if ISPY3: query_trees = [PhyloTree(tree.encode(), rooted) for tree in boot_collection.trees] ref_trees = [PhyloTree(tree.encode(), rooted) for tree in ref_collection.trees] else: query_trees = [PhyloTree(tree, rooted) for tree in boot_collection.trees] ref_trees = [PhyloTree(tree, rooted) for tree in ref_collection.trees] for i, tree in enumerate(query_trees): distvec = np.array([task(tree, ref_tree, False) for ref_tree in ref_trees]) oos = OutOfSampleMDS(ref_distance_matrix) fit[i] = oos.fit(index, distvec, dimensions=dimensions, **kwargs) return fit
index = index of the locus the bootstrap sample corresponds to - only important if using recalc=True in kwargs
def stress(ref_cds, est_cds): ref_dists = pdist(ref_cds) est_dists = pdist(est_cds) return np.sqrt(((ref_dists - est_dists)**2).sum() / (ref_dists**2).sum())
Kruskal's stress
def rmsd(ref_cds, est_cds): ref_dists = pdist(ref_cds) est_dists = pdist(est_cds) return np.sqrt(((ref_dists - est_dists)**2).mean())
Root-mean-squared-difference
def newton(self, start_x=None, tolerance=1.0e-6): if start_x is None: start_x = self._analytical_fitter.fit(self._c) return optimise_newton(start_x, self._a, self._c, tolerance)
Optimise value of x using newton gauss
def gradient_descent(self, start_x=None, tolerance=1.0e-6): if start_x is None: start_x = self._analytical_fitter.fit(self._c) return optimise_gradient_descent(start_x, self._a, self._c, tolerance)
Optimise value of x using gradient descent
def levenberg_marquardt(self, start_x=None, damping=1.0e-3, tolerance=1.0e-6): if start_x is None: start_x = self._analytical_fitter.fit(self._c) return optimise_levenberg_marquardt(start_x, self._a, self._c, tolerance)
Optimise value of x using levenberg marquardt
def fit(self, index, distvec, recalc=False, dimensions=3): brow = self.new_B_row(index, distvec**2, recalc) return self.new_coords(brow)[:dimensions]
Replace distance matrix values at row/column index with distances in distvec, and compute new coordinates. Optionally use distvec to update means and (potentially) get a better estimate. distvec values should be plain distances, not squared distances.
def _make_A_and_part_of_b_adjacent(self, ref_crds): rot = self._rotate_rows(ref_crds) A = 2*(rot - ref_crds) partial_b = (rot**2 - ref_crds**2).sum(1) return A, partial_b
Make A and part of b. See docstring of this class for answer to "What are A and b?"
def _analytical_fit_adjacent(self, ref_dists): dists = ref_dists**2 rot_dists = self._rotate_rows(dists) b = dists - rot_dists + self._partial_b self._b = b return self._pinvA.dot(b)
Fit coords (x,y,[z]) so that distances from reference coordinates match closest to reference distances
def generate_schema_mapping(resolver, schema_uri, depth=1): visitor = SchemaVisitor({'$ref': schema_uri}, resolver) return _generate_schema_mapping(visitor, set(), depth)
Try and recursively iterate a JSON schema and to generate an ES mapping that encasulates it.
def eucdist_task(newick_string_a, newick_string_b, normalise, min_overlap=4, overlap_fail_value=0): tree_a = Tree(newick_string_a) tree_b = Tree(newick_string_b) return treedist.eucdist(tree_a, tree_b, normalise, min_overlap, overlap_fail_value)
Distributed version of tree_distance.eucdist Parameters: two valid newick strings and a boolean
def geodist_task(newick_string_a, newick_string_b, normalise, min_overlap=4, overlap_fail_value=0): tree_a = Tree(newick_string_a) tree_b = Tree(newick_string_b) return treedist.geodist(tree_a, tree_b, normalise, min_overlap, overlap_fail_value)
Distributed version of tree_distance.geodist Parameters: two valid newick strings and a boolean
def rfdist_task(newick_string_a, newick_string_b, normalise, min_overlap=4, overlap_fail_value=0): tree_a = Tree(newick_string_a) tree_b = Tree(newick_string_b) return treedist.rfdist(tree_a, tree_b, normalise, min_overlap, overlap_fail_value)
Distributed version of tree_distance.rfdist Parameters: two valid newick strings and a boolean
def wrfdist_task(newick_string_a, newick_string_b, normalise, min_overlap=4, overlap_fail_value=0): tree_a = Tree(newick_string_a) tree_b = Tree(newick_string_b) return treedist.wrfdist(tree_a, tree_b, normalise, min_overlap, overlap_fail_value)
Distributed version of tree_distance.rfdist Parameters: two valid newick strings and a boolean
def phyml_task(alignment_file, model, **kwargs): import re fl = os.path.abspath(alignment_file) ph = Phyml(verbose=False) if model in ['JC69', 'K80', 'F81', 'F84', 'HKY85', 'TN93', 'GTR']: datatype = 'nt' elif re.search('[01]{6}', model) is not None: datatype = 'nt' else: datatype = 'aa' cmd = '-i {} -m {} -d {} -f m --quiet'.format(alignment_file, model, datatype) logger.debug("Phyml command = {}".format(cmd)) ph(cmd, wait=True, **kwargs) logger.debug("Phyml stdout = {}".format(ph.get_stdout())) logger.debug("Phyml stderr = {}".format(ph.get_stderr())) parser = PhymlParser() expected_outfiles = ['{}_phyml_stats'.format(alignment_file), '{}_phyml_tree'.format(alignment_file)] for i in range(2): if not os.path.exists(expected_outfiles[i]): expected_outfiles[i] += '.txt' logger.debug('Stats file {} {}'.format(expected_outfiles[0], 'exists' if os.path.exists(expected_outfiles[0]) else 'doesn\'t exist')) logger.debug('Tree file {} {}'.format(expected_outfiles[1], 'exists' if os.path.exists(expected_outfiles[1]) else 'doesn\'t exist')) with fileIO.TempFileList(expected_outfiles): try: result = parser.to_dict(*expected_outfiles) except IOError as ioerr: logger.error('File IO error: {}'.format(ioerr)) result = None except ParseException as parseerr: logger.error('Other parse error: {}'.format(parseerr)) result = None return result
Kwargs are passed to the Phyml process command line
def make_alf_dirs_(self): alf_dirs = {} for k in range(self.num_classes): dirname = fileIO.join_path(self.tmpdir, 'class{0:0>1}'.format( k + 1)) alf_dirs[k + 1] = errors.directorymake(dirname) self.alf_dirs = alf_dirs
DEPRECATED
def write_alf_params_(self): if not hasattr(self, 'alf_dirs'): self.make_alf_dirs() if not hasattr(self, 'class_trees'): self.generate_class_trees() alf_params = {} for k in range(self.num_classes): alfdir = self.alf_dirs[k + 1] tree = self.class_trees[k + 1] datatype = self.datatype name = 'class{0}'.format(k + 1) num_genes = self.class_list[k] seqlength = self.gene_length_min gene_length_kappa = self.gene_length_kappa gene_length_theta = self.gene_length_theta alf_obj = ALF(tree=tree, datatype=datatype, num_genes=num_genes, seqlength=seqlength, gene_length_kappa=gene_length_kappa, gene_length_theta=gene_length_theta, name=name, tmpdir=alfdir) if datatype == 'protein': alf_obj.params.one_word_model('WAG') else: alf_obj.params.jc_model() alf_params[k + 1] = alf_obj self.alf_params = alf_params
DEPRECATED
def run_(self): all_records = [] for k in range(self.num_classes): simulated_records = self.alf_params[k + 1].run() names = ['class{0}_{1:0>{2}}'.format(k + 1, i, len(str(self.class_list[k]))) for i in range(1, len( simulated_records) + 1)] for (rec, name) in zip(simulated_records, names): rec.name = name all_records.extend(simulated_records) self.result = all_records self.clean() return all_records
DEPRECATED
def validate_mapping(mapping): file_path = os.path.join(os.path.dirname(__file__), 'schemas', 'mapping.json') with open(file_path, 'r') as fh: validator = Draft4Validator(json.load(fh)) validator.validate(mapping) return mapping
Validate a mapping configuration file against the relevant schema.
def _generic_distance_calc(fn, t1, t2, normalise, min_overlap=4, overlap_fail_value=0): if t1 ^ t2: if len(t1 & t2) < min_overlap: return overlap_fail_value #raise AttributeError('Can\'t calculate tree distances when tree overlap is less than two leaves') else: t1, t2 = _equalise_leaf_sets(t1, t2, False) return fn(t1.phylotree, t2.phylotree, normalise)
(fn, t1, t2, normalise) Calculates the distance between trees t1 and t2. Can optionally be normalised to range [0, 1]. If the trees have different leaf sets, the distance is calculated on their intersection. This incurs some overhead - if the trees are known to have the same leaves, then the underlying distance function [listed below] can be called instead, although in these cases the Tree arguments will need to be replaced with Tree.phylotree to make sure the appropriate data structure is passed. By default the distance is taken to be zero if the leaf overlap between the trees is less than `min_overlap`. This value can be customised using the parameter `overlap_fail_value`. E.g. treeCl.treedist.eucdist(t1, t2, False) is the leafset checking equivalent of treeCl.treedist.getEuclideanDistance(t1.phylotree, t2.phylotree, False) Distance functions: eucdist geodist rfdist wrfdist Underlying non-leafset-checking Distance functions: getEuclideanDistance getGeodesicDistance getRobinsonFouldsDistance getWeightedRobinsonFouldsDistance :param t1: Tree :param t2: Tree :param normalise: boolean :param min_overlap: int :param overlap_fail_value: any :return: float
def _generic_matrix_calc(fn, trees, normalise, min_overlap=4, overlap_fail_value=0, show_progress=True): jobs = itertools.combinations(trees, 2) results = [] if show_progress: pbar = setup_progressbar('Calculating tree distances', 0.5 * len(trees) * (len(trees) - 1)) pbar.start() for i, (t1, t2) in enumerate(jobs): results.append(_generic_distance_calc(fn, t1, t2, normalise, min_overlap, overlap_fail_value)) if show_progress: pbar.update(i) if show_progress: pbar.finish() return scipy.spatial.distance.squareform(results)
(fn, trees, normalise) Calculates all pairwise distances between trees given in the parameter 'trees'. Distance functions: eucdist_matrix geodist_matrix rfdist_matrix wrfdist_matrix These wrap the leafset-checking functions. If the faster non-leafset-checking functions are needed, do this: scipy.spatial.distance(['getDistance'(t1.phylotree, t2.phylotree, normalise) for (t1, t2) in itertools.combinations(trees, 2)]) for your choice of 'getDistance' out of: getEuclideanDistance getGeodesicDistance getRobinsonFouldsDistance getWeightedRobinsonFouldsDistance :param trees: list or tuple, or some other iterable container type containing Tree objects :param normalise: boolean :param min_overlap: int :return: numpy.array