text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Evaluate all spherical harmonics of degree at most `n` at angles `polar`, <END_TASK> <USER_TASK:> Description: def tree_sph(polar, azimuthal, n, standardization, symbolic=False): """Evaluate all spherical harmonics of degree at most `n` at angles `polar`, `azimuthal`. """
cos = numpy.vectorize(sympy.cos) if symbolic else numpy.cos # Conventions from # <https://en.wikipedia.org/wiki/Spherical_harmonics#Orthogonality_and_normalization>. config = { "acoustic": ("complex spherical", False), "quantum mechanic": ("complex spherical", True), "geodetic": ("complex spherical 1", False), "schmidt": ("schmidt", False), } standard, cs_phase = config[standardization] return tree_alp( cos(polar), n, phi=azimuthal, standardization=standard, with_condon_shortley_phase=cs_phase, symbolic=symbolic, )
<SYSTEM_TASK:> Check if this file exist and if it's a directory <END_TASK> <USER_TASK:> Description: def check_if_this_file_exist(filename): """Check if this file exist and if it's a directory This function will check if the given filename actually exists and if it's not a Directory Arguments: filename {string} -- filename Return: True : if it's not a directory and if this file exist False : If it's not a file and if it's a directory """
#get the absolute path filename = os.path.abspath(filename) #Boolean this_file_exist = os.path.exists(filename) a_directory = os.path.isdir(filename) result = this_file_exist and not a_directory if result == False: raise ValueError('The filename given was either non existent or was a directory') else: return result
<SYSTEM_TASK:> Handle the command line call <END_TASK> <USER_TASK:> Description: def command_line(cmd): """Handle the command line call keyword arguments: cmd = a list return 0 if error or a string for the command line output """
try: s = subprocess.Popen(cmd, stdout=subprocess.PIPE) s = s.stdout.read() return s.strip() except subprocess.CalledProcessError: return 0
<SYSTEM_TASK:> Return a csv representation of the exif <END_TASK> <USER_TASK:> Description: def get_csv(filename): """ Return a csv representation of the exif get a filename and returns a unicode string with a CSV format Arguments: filename {string} -- your filename Returns: [unicode] -- unicode string """
check_if_this_file_exist(filename) #Process this function filename = os.path.abspath(filename) s = command_line(['exiftool', '-G', '-csv', '-sort', filename]) if s: #convert bytes to string s = s.decode('utf-8') return s else: return 0
<SYSTEM_TASK:> Render email with provided context <END_TASK> <USER_TASK:> Description: def render(self, context=None, clean=False): """ Render email with provided context Arguments --------- context : dict |context| If not specified then the :attr:`~mail_templated.EmailMessage.context` property is used. Keyword Arguments ----------------- clean : bool If ``True``, remove any template specific properties from the message object. Default is ``False``. """
# Load template if it is not loaded yet. if not self.template: self.load_template(self.template_name) # The signature of the `render()` method was changed in Django 1.7. # https://docs.djangoproject.com/en/1.8/ref/templates/upgrading/#get-template-and-select-template if hasattr(self.template, 'template'): context = (context or self.context).copy() else: context = Context(context or self.context) # Add tag strings to the context. context.update(self.extra_context) result = self.template.render(context) # Don't overwrite default value with empty one. subject = self._get_block(result, 'subject') if subject: self.subject = self._get_block(result, 'subject') body = self._get_block(result, 'body') is_html_body = False # The html block is optional, and it also may be set manually. html = self._get_block(result, 'html') if html: if not body: # This is an html message without plain text part. body = html is_html_body = True else: # Add alternative content. self.attach_alternative(html, 'text/html') # Don't overwrite default value with empty one. if body: self.body = body if is_html_body: self.content_subtype = 'html' self._is_rendered = True if clean: self.clean()
<SYSTEM_TASK:> Send email message, render if it is not rendered yet. <END_TASK> <USER_TASK:> Description: def send(self, *args, **kwargs): """ Send email message, render if it is not rendered yet. Note ---- Any extra arguments are passed to :class:`EmailMultiAlternatives.send() <django.core.mail.EmailMessage>`. Keyword Arguments ----------------- clean : bool If ``True``, remove any template specific properties from the message object. Default is ``False``. """
clean = kwargs.pop('clean', False) if not self._is_rendered: self.render() if clean: self.clean() return super(EmailMessage, self).send(*args, **kwargs)
<SYSTEM_TASK:> Easy wrapper for sending a single email message to a recipient list using <END_TASK> <USER_TASK:> Description: def send_mail(template_name, context, from_email, recipient_list, fail_silently=False, auth_user=None, auth_password=None, connection=None, **kwargs): """ Easy wrapper for sending a single email message to a recipient list using django template system. It works almost the same way as the standard :func:`send_mail()<django.core.mail.send_mail>` function. .. |main_difference| replace:: The main difference is that two first arguments ``subject`` and ``body`` are replaced with ``template_name`` and ``context``. However you still can pass subject or body as keyword arguments to provide static content if needed. |main_difference| The ``template_name``, ``context``, ``from_email`` and ``recipient_list`` parameters are required. Note ---- |args_note| Arguments --------- template_name : str |template_name| context : dict |context| from_email : str |from_email| recipient_list : list |recipient_list| Keyword Arguments ----------------- fail_silently : bool If it's False, send_mail will raise an :exc:`smtplib.SMTPException`. See the :mod:`smtplib` docs for a list of possible exceptions, all of which are subclasses of :exc:`smtplib.SMTPException`. auth_user | str The optional username to use to authenticate to the SMTP server. If this isn't provided, Django will use the value of the :django:setting:`EMAIL_HOST_USER` setting. auth_password | str The optional password to use to authenticate to the SMTP server. If this isn't provided, Django will use the value of the :django:setting:`EMAIL_HOST_PASSWORD` setting. connection : EmailBackend The optional email backend to use to send the mail. If unspecified, an instance of the default backend will be used. See the documentation on :ref:`Email backends<django:topic-email-backends>` for more details. subject : str |subject| body : str |body| render : bool |render| Returns ------- int The number of successfully delivered messages (which can be 0 or 1 since it can only send one message). See Also -------- :func:`django.core.mail.send_mail` Documentation for the standard ``send_mail()`` function. """
connection = connection or mail.get_connection(username=auth_user, password=auth_password, fail_silently=fail_silently) clean = kwargs.pop('clean', True) return EmailMessage( template_name, context, from_email, recipient_list, connection=connection, **kwargs).send(clean=clean)
<SYSTEM_TASK:> Compute the mean Silhouette Coefficient of all samples. <END_TASK> <USER_TASK:> Description: def silhouette_score(X, labels, metric='euclidean', sample_size=None, random_state=None, **kwds): """Compute the mean Silhouette Coefficient of all samples. The Silhouette Coefficient is calculated using the mean intra-cluster distance (``a``) and the mean nearest-cluster distance (``b``) for each sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a, b)``. To clarify, ``b`` is the distance between a sample and the nearest cluster that the sample is not a part of. Note that Silhouette Coefficient is only defined if number of labels is 2 <= n_labels <= n_samples - 1. This function returns the mean Silhouette Coefficient over all samples. To obtain the values for each sample, use :func:`silhouette_samples`. The best value is 1 and the worst value is -1. Values near 0 indicate overlapping clusters. Negative values generally indicate that a sample has been assigned to the wrong cluster, as a different cluster is more similar. Read more in the :ref:`User Guide <silhouette_coefficient>`. Parameters ---------- X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \ [n_samples_a, n_features] otherwise Array of pairwise distances between samples, or a feature array. labels : array, shape = [n_samples] Predicted labels for each sample. metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by :func:`metrics.pairwise.pairwise_distances <sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance array itself, use ``metric="precomputed"``. sample_size : int or None The size of the sample to use when computing the Silhouette Coefficient on a random subset of the data. If ``sample_size is None``, no sampling is used. random_state : int, RandomState instance or None, optional (default=None) The generator used to randomly select a subset of samples. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``sample_size is not None``. **kwds : optional keyword parameters Any further parameters are passed directly to the distance function. If using a scipy.spatial.distance metric, the parameters are still metric dependent. See the scipy docs for usage examples. Returns ------- silhouette : float Mean Silhouette Coefficient for all samples. References ---------- .. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the Interpretation and Validation of Cluster Analysis". Computational and Applied Mathematics 20: 53-65. <http://www.sciencedirect.com/science/article/pii/0377042787901257>`_ .. [2] `Wikipedia entry on the Silhouette Coefficient <https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_ """
if sample_size is not None: X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr']) random_state = check_random_state(random_state) indices = random_state.permutation(X.shape[0])[:sample_size] if metric == "precomputed": X, labels = X[indices].T[indices].T, labels[indices] else: X, labels = X[indices], labels[indices] return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
<SYSTEM_TASK:> Compute the Silhouette Coefficient for each sample. <END_TASK> <USER_TASK:> Description: def silhouette_samples(X, labels, metric='euclidean', **kwds): """Compute the Silhouette Coefficient for each sample. The Silhouette Coefficient is a measure of how well samples are clustered with samples that are similar to themselves. Clustering models with a high Silhouette Coefficient are said to be dense, where samples in the same cluster are similar to each other, and well separated, where samples in different clusters are not very similar to each other. The Silhouette Coefficient is calculated using the mean intra-cluster distance (``a``) and the mean nearest-cluster distance (``b``) for each sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a, b)``. Note that Silhouette Coefficient is only defined if number of labels is 2 <= n_labels <= n_samples - 1. This function returns the Silhouette Coefficient for each sample. The best value is 1 and the worst value is -1. Values near 0 indicate overlapping clusters. Read more in the :ref:`User Guide <silhouette_coefficient>`. Parameters ---------- X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \ [n_samples_a, n_features] otherwise Array of pairwise distances between samples, or a feature array. labels : array, shape = [n_samples] label values for each sample metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is the distance array itself, use "precomputed" as the metric. **kwds : optional keyword parameters Any further parameters are passed directly to the distance function. If using a ``scipy.spatial.distance`` metric, the parameters are still metric dependent. See the scipy docs for usage examples. Returns ------- silhouette : array, shape = [n_samples] Silhouette Coefficient for each samples. References ---------- .. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the Interpretation and Validation of Cluster Analysis". Computational and Applied Mathematics 20: 53-65. <http://www.sciencedirect.com/science/article/pii/0377042787901257>`_ .. [2] `Wikipedia entry on the Silhouette Coefficient <https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_ """
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr']) le = LabelEncoder() labels = le.fit_transform(labels) check_number_of_labels(len(le.classes_), X.shape[0]) distances = pairwise_distances(X, metric=metric, **kwds) unique_labels = le.classes_ n_samples_per_label = np.bincount(labels, minlength=len(unique_labels)) # For sample i, store the mean distance of the cluster to which # it belongs in intra_clust_dists[i] intra_clust_dists = np.zeros(distances.shape[0], dtype=distances.dtype) # For sample i, store the mean distance of the second closest # cluster in inter_clust_dists[i] inter_clust_dists = np.inf + intra_clust_dists for curr_label in range(len(unique_labels)): # Find inter_clust_dist for all samples belonging to the same # label. mask = labels == curr_label current_distances = distances[mask] # Leave out current sample. n_samples_curr_lab = n_samples_per_label[curr_label] - 1 if n_samples_curr_lab != 0: intra_clust_dists[mask] = np.sum( current_distances[:, mask], axis=1) / n_samples_curr_lab # Now iterate over all other labels, finding the mean # cluster distance that is closest to every sample. for other_label in range(len(unique_labels)): if other_label != curr_label: other_mask = labels == other_label other_distances = np.mean( current_distances[:, other_mask], axis=1) inter_clust_dists[mask] = np.minimum( inter_clust_dists[mask], other_distances) sil_samples = inter_clust_dists - intra_clust_dists sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists) # score 0 for clusters of size 1, according to the paper sil_samples[n_samples_per_label.take(labels) == 1] = 0 return sil_samples
<SYSTEM_TASK:> Compute the Calinski and Harabaz score. <END_TASK> <USER_TASK:> Description: def calinski_harabaz_score(X, labels): """Compute the Calinski and Harabaz score. The score is defined as ratio between the within-cluster dispersion and the between-cluster dispersion. Read more in the :ref:`User Guide <calinski_harabaz_index>`. Parameters ---------- X : array-like, shape (``n_samples``, ``n_features``) List of ``n_features``-dimensional data points. Each row corresponds to a single data point. labels : array-like, shape (``n_samples``,) Predicted labels for each sample. Returns ------- score : float The resulting Calinski-Harabaz score. References ---------- .. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster analysis". Communications in Statistics <http://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_ """
X, labels = check_X_y(X, labels) le = LabelEncoder() labels = le.fit_transform(labels) n_samples, _ = X.shape n_labels = len(le.classes_) check_number_of_labels(n_labels, n_samples) extra_disp, intra_disp = 0., 0. mean = np.mean(X, axis=0) for k in range(n_labels): cluster_k = X[labels == k] mean_k = np.mean(cluster_k, axis=0) extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2) intra_disp += np.sum((cluster_k - mean_k) ** 2) return (1. if intra_disp == 0. else extra_disp * (n_samples - n_labels) / (intra_disp * (n_labels - 1.)))
<SYSTEM_TASK:> Compute joint probabilities p_ij from distances. <END_TASK> <USER_TASK:> Description: def _joint_probabilities(distances, desired_perplexity, verbose): """Compute joint probabilities p_ij from distances. Parameters ---------- distances : array, shape (n_samples * (n_samples-1) / 2,) Distances of samples are stored as condensed matrices, i.e. we omit the diagonal and duplicate entries and store everything in a one-dimensional array. desired_perplexity : float Desired perplexity of the joint probability distributions. verbose : int Verbosity level. Returns ------- P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. """
# Compute conditional probabilities such that they approximately match # the desired perplexity distances = distances.astype(np.float32, copy=False) conditional_P = _utils._binary_search_perplexity( distances, None, desired_perplexity, verbose) P = conditional_P + conditional_P.T sum_P = np.maximum(np.sum(P), MACHINE_EPSILON) P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON) return P
<SYSTEM_TASK:> Compute joint probabilities p_ij from distances using just nearest <END_TASK> <USER_TASK:> Description: def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose): """Compute joint probabilities p_ij from distances using just nearest neighbors. This method is approximately equal to _joint_probabilities. The latter is O(N), but limiting the joint probability to nearest neighbors improves this substantially to O(uN). Parameters ---------- distances : array, shape (n_samples, k) Distances of samples to its k nearest neighbors. neighbors : array, shape (n_samples, k) Indices of the k nearest-neighbors for each samples. desired_perplexity : float Desired perplexity of the joint probability distributions. verbose : int Verbosity level. Returns ------- P : csr sparse matrix, shape (n_samples, n_samples) Condensed joint probability matrix with only nearest neighbors. """
t0 = time() # Compute conditional probabilities such that they approximately match # the desired perplexity n_samples, k = neighbors.shape distances = distances.astype(np.float32, copy=False) neighbors = neighbors.astype(np.int64, copy=False) conditional_P = _utils._binary_search_perplexity( distances, neighbors, desired_perplexity, verbose) assert np.all(np.isfinite(conditional_P)), \ "All probabilities should be finite" # Symmetrize the joint probability distribution using sparse operations P = csr_matrix((conditional_P.ravel(), neighbors.ravel(), range(0, n_samples * k + 1, k)), shape=(n_samples, n_samples)) P = P + P.T # Normalize the joint probability distribution sum_P = np.maximum(P.sum(), MACHINE_EPSILON) P /= sum_P assert np.all(np.abs(P.data) <= 1.0) if verbose >= 2: duration = time() - t0 print("[t-SNE] Computed conditional probabilities in {:.3f}s" .format(duration)) return P
<SYSTEM_TASK:> Batch gradient descent with momentum and individual gains. <END_TASK> <USER_TASK:> Description: def _gradient_descent(objective, p0, it, n_iter, n_iter_check=1, n_iter_without_progress=300, momentum=0.8, learning_rate=200.0, min_gain=0.01, min_grad_norm=1e-7, verbose=0, args=None, kwargs=None): """Batch gradient descent with momentum and individual gains. Parameters ---------- objective : function or callable Should return a tuple of cost and gradient for a given parameter vector. When expensive to compute, the cost can optionally be None and can be computed every n_iter_check steps using the objective_error function. p0 : array-like, shape (n_params,) Initial parameter vector. it : int Current number of iterations (this function will be called more than once during the optimization). n_iter : int Maximum number of gradient descent iterations. n_iter_check : int Number of iterations before evaluating the global error. If the error is sufficiently low, we abort the optimization. n_iter_without_progress : int, optional (default: 300) Maximum number of iterations without progress before we abort the optimization. momentum : float, within (0.0, 1.0), optional (default: 0.8) The momentum generates a weight for previous gradients that decays exponentially. learning_rate : float, optional (default: 200.0) The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If the learning rate is too high, the data may look like a 'ball' with any point approximately equidistant from its nearest neighbours. If the learning rate is too low, most points may look compressed in a dense cloud with few outliers. min_gain : float, optional (default: 0.01) Minimum individual gain for each parameter. min_grad_norm : float, optional (default: 1e-7) If the gradient norm is below this threshold, the optimization will be aborted. verbose : int, optional (default: 0) Verbosity level. args : sequence Arguments to pass to objective function. kwargs : dict Keyword arguments to pass to objective function. Returns ------- p : array, shape (n_params,) Optimum parameters. error : float Optimum. i : int Last iteration. """
if args is None: args = [] if kwargs is None: kwargs = {} p = p0.copy().ravel() update = np.zeros_like(p) gains = np.ones_like(p) error = np.finfo(np.float).max best_error = np.finfo(np.float).max best_iter = i = it tic = time() for i in range(it, n_iter): error, grad = objective(p, *args, **kwargs) grad_norm = linalg.norm(grad) inc = update * grad < 0.0 dec = np.invert(inc) gains[inc] += 0.2 gains[dec] *= 0.8 np.clip(gains, min_gain, np.inf, out=gains) grad *= gains update = momentum * update - learning_rate * grad p += update if (i + 1) % n_iter_check == 0: toc = time() duration = toc - tic tic = toc if verbose >= 2: print("[t-SNE] Iteration %d: error = %.7f," " gradient norm = %.7f" " (%s iterations in %0.3fs)" % (i + 1, error, grad_norm, n_iter_check, duration)) if error < best_error: best_error = error best_iter = i elif i - best_iter > n_iter_without_progress: if verbose >= 2: print("[t-SNE] Iteration %d: did not make any progress " "during the last %d episodes. Finished." % (i + 1, n_iter_without_progress)) break if grad_norm <= min_grad_norm: if verbose >= 2: print("[t-SNE] Iteration %d: gradient norm %f. Finished." % (i + 1, grad_norm)) break return p, error, i
<SYSTEM_TASK:> Expresses to what extent the local structure is retained. <END_TASK> <USER_TASK:> Description: def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False): """Expresses to what extent the local structure is retained. The trustworthiness is within [0, 1]. It is defined as .. math:: T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1} \sum_{j \in U^{(k)}_i} (r(i, j) - k) where :math:`r(i, j)` is the rank of the embedded datapoint j according to the pairwise distances between the embedded datapoints, :math:`U^{(k)}_i` is the set of points that are in the k nearest neighbors in the embedded space but not in the original space. * "Neighborhood Preservation in Nonlinear Projection Methods: An Experimental Study" J. Venna, S. Kaski * "Learning a Parametric Embedding by Preserving Local Structure" L.J.P. van der Maaten Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. X_embedded : array, shape (n_samples, n_components) Embedding of the training data in low-dimensional space. n_neighbors : int, optional (default: 5) Number of neighbors k that will be considered. precomputed : bool, optional (default: False) Set this flag if X is a precomputed square distance matrix. Returns ------- trustworthiness : float Trustworthiness of the low-dimensional embedding. """
if precomputed: dist_X = X else: dist_X = pairwise_distances(X, squared=True) dist_X_embedded = pairwise_distances(X_embedded, squared=True) ind_X = np.argsort(dist_X, axis=1) ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1] n_samples = X.shape[0] t = 0.0 ranks = np.zeros(n_neighbors) for i in range(n_samples): for j in range(n_neighbors): ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0] ranks -= n_neighbors t += np.sum(ranks[ranks > 0]) t = 1.0 - t * (2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0))) return t
<SYSTEM_TASK:> Integrate and batch correct a list of data sets. <END_TASK> <USER_TASK:> Description: def correct(datasets_full, genes_list, return_dimred=False, batch_size=BATCH_SIZE, verbose=VERBOSE, ds_names=None, dimred=DIMRED, approx=APPROX, sigma=SIGMA, alpha=ALPHA, knn=KNN, return_dense=False, hvg=None, union=False, geosketch=False, geosketch_max=20000): """Integrate and batch correct a list of data sets. Parameters ---------- datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray` Data sets to integrate and correct. genes_list: `list` of `list` of `string` List of genes for each data set. return_dimred: `bool`, optional (default: `False`) In addition to returning batch corrected matrices, also returns integrated low-dimesional embeddings. batch_size: `int`, optional (default: `5000`) The batch size used in the alignment vector computation. Useful when correcting very large (>100k samples) data sets. Set to large value that runs within available memory. verbose: `bool` or `int`, optional (default: 2) When `True` or not equal to 0, prints logging output. ds_names: `list` of `string`, optional When `verbose=True`, reports data set names in logging output. dimred: `int`, optional (default: 100) Dimensionality of integrated embedding. approx: `bool`, optional (default: `True`) Use approximate nearest neighbors, greatly speeds up matching runtime. sigma: `float`, optional (default: 15) Correction smoothing parameter on Gaussian kernel. alpha: `float`, optional (default: 0.10) Alignment score minimum cutoff. knn: `int`, optional (default: 20) Number of nearest neighbors to use for matching. return_dense: `bool`, optional (default: `False`) Return `numpy.ndarray` matrices instead of `scipy.sparse.csr_matrix`. hvg: `int`, optional (default: None) Use this number of top highly variable genes based on dispersion. Returns ------- corrected, genes By default (`return_dimred=False`), returns a two-tuple containing a list of `scipy.sparse.csr_matrix` each with batch corrected values, and a single list of genes containing the intersection of inputted genes. integrated, corrected, genes When `return_dimred=False`, returns a three-tuple containing a list of `numpy.ndarray` with integrated low dimensional embeddings, a list of `scipy.sparse.csr_matrix` each with batch corrected values, and a a single list of genes containing the intersection of inputted genes. """
datasets_full = check_datasets(datasets_full) datasets, genes = merge_datasets(datasets_full, genes_list, ds_names=ds_names, union=union) datasets_dimred, genes = process_data(datasets, genes, hvg=hvg, dimred=dimred) datasets_dimred = assemble( datasets_dimred, # Assemble in low dimensional space. expr_datasets=datasets, # Modified in place. verbose=verbose, knn=knn, sigma=sigma, approx=approx, alpha=alpha, ds_names=ds_names, batch_size=batch_size, geosketch=geosketch, geosketch_max=geosketch_max, ) if return_dense: datasets = [ ds.toarray() for ds in datasets ] if return_dimred: return datasets_dimred, datasets, genes return datasets, genes
<SYSTEM_TASK:> Integrate a list of data sets. <END_TASK> <USER_TASK:> Description: def integrate(datasets_full, genes_list, batch_size=BATCH_SIZE, verbose=VERBOSE, ds_names=None, dimred=DIMRED, approx=APPROX, sigma=SIGMA, alpha=ALPHA, knn=KNN, geosketch=False, geosketch_max=20000, n_iter=1, union=False, hvg=None): """Integrate a list of data sets. Parameters ---------- datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray` Data sets to integrate and correct. genes_list: `list` of `list` of `string` List of genes for each data set. batch_size: `int`, optional (default: `5000`) The batch size used in the alignment vector computation. Useful when correcting very large (>100k samples) data sets. Set to large value that runs within available memory. verbose: `bool` or `int`, optional (default: 2) When `True` or not equal to 0, prints logging output. ds_names: `list` of `string`, optional When `verbose=True`, reports data set names in logging output. dimred: `int`, optional (default: 100) Dimensionality of integrated embedding. approx: `bool`, optional (default: `True`) Use approximate nearest neighbors, greatly speeds up matching runtime. sigma: `float`, optional (default: 15) Correction smoothing parameter on Gaussian kernel. alpha: `float`, optional (default: 0.10) Alignment score minimum cutoff. knn: `int`, optional (default: 20) Number of nearest neighbors to use for matching. hvg: `int`, optional (default: None) Use this number of top highly variable genes based on dispersion. Returns ------- integrated, genes Returns a two-tuple containing a list of `numpy.ndarray` with integrated low dimensional embeddings and a single list of genes containing the intersection of inputted genes. """
datasets_full = check_datasets(datasets_full) datasets, genes = merge_datasets(datasets_full, genes_list, ds_names=ds_names, union=union) datasets_dimred, genes = process_data(datasets, genes, hvg=hvg, dimred=dimred) for _ in range(n_iter): datasets_dimred = assemble( datasets_dimred, # Assemble in low dimensional space. verbose=verbose, knn=knn, sigma=sigma, approx=approx, alpha=alpha, ds_names=ds_names, batch_size=batch_size, geosketch=geosketch, geosketch_max=geosketch_max, ) return datasets_dimred, genes
<SYSTEM_TASK:> Batch correct a list of `scanpy.api.AnnData`. <END_TASK> <USER_TASK:> Description: def correct_scanpy(adatas, **kwargs): """Batch correct a list of `scanpy.api.AnnData`. Parameters ---------- adatas : `list` of `scanpy.api.AnnData` Data sets to integrate and/or correct. kwargs : `dict` See documentation for the `correct()` method for a full list of parameters to use for batch correction. Returns ------- corrected By default (`return_dimred=False`), returns a list of `scanpy.api.AnnData` with batch corrected values in the `.X` field. corrected, integrated When `return_dimred=False`, returns a two-tuple containing a list of `np.ndarray` with integrated low-dimensional embeddings and a list of `scanpy.api.AnnData` with batch corrected values in the `.X` field. """
if 'return_dimred' in kwargs and kwargs['return_dimred']: datasets_dimred, datasets, genes = correct( [adata.X for adata in adatas], [adata.var_names.values for adata in adatas], **kwargs ) else: datasets, genes = correct( [adata.X for adata in adatas], [adata.var_names.values for adata in adatas], **kwargs ) new_adatas = [] for i, adata in enumerate(adatas): adata.X = datasets[i] new_adatas.append(adata) if 'return_dimred' in kwargs and kwargs['return_dimred']: return datasets_dimred, new_adatas else: return new_adatas
<SYSTEM_TASK:> Integrate a list of `scanpy.api.AnnData`. <END_TASK> <USER_TASK:> Description: def integrate_scanpy(adatas, **kwargs): """Integrate a list of `scanpy.api.AnnData`. Parameters ---------- adatas : `list` of `scanpy.api.AnnData` Data sets to integrate. kwargs : `dict` See documentation for the `integrate()` method for a full list of parameters to use for batch correction. Returns ------- integrated Returns a list of `np.ndarray` with integrated low-dimensional embeddings. """
datasets_dimred, genes = integrate( [adata.X for adata in adatas], [adata.var_names.values for adata in adatas], **kwargs ) return datasets_dimred
<SYSTEM_TASK:> Augment a knot vector. <END_TASK> <USER_TASK:> Description: def augknt(knots, order): """Augment a knot vector. Parameters: knots: Python list or rank-1 array, the original knot vector (without endpoint repeats) order: int, >= 0, order of spline Returns: list_of_knots: rank-1 array that has (`order` + 1) copies of ``knots[0]``, then ``knots[1:-1]``, and finally (`order` + 1) copies of ``knots[-1]``. Caveats: `order` is the spline order `p`, not `p` + 1, and existing knots are never deleted. The knot vector always becomes longer by calling this function. """
if isinstance(knots, np.ndarray) and knots.ndim > 1: raise ValueError("knots must be a list or a rank-1 array") knots = list(knots) # ensure Python list # One copy of knots[0] and knots[-1] will come from "knots" itself, # so we only need to prepend/append "order" copies. # return np.array( [knots[0]] * order + knots + [knots[-1]] * order )
<SYSTEM_TASK:> Compute the running average of `k` successive elements of `t`. Return the averaged array. <END_TASK> <USER_TASK:> Description: def aveknt(t, k): """Compute the running average of `k` successive elements of `t`. Return the averaged array. Parameters: t: Python list or rank-1 array k: int, >= 2, how many successive elements to average Returns: rank-1 array, averaged data. If k > len(t), returns a zero-length array. Caveat: This is slightly different from MATLAB's aveknt, which returns the running average of `k`-1 successive elements of ``t[1:-1]`` (and the empty vector if ``len(t) - 2 < k - 1``). """
t = np.atleast_1d(t) if t.ndim > 1: raise ValueError("t must be a list or a rank-1 array") n = t.shape[0] u = max(0, n - (k-1)) # number of elements in the output array out = np.empty( (u,), dtype=t.dtype ) for j in range(u): out[j] = sum( t[j:(j+k)] ) / k return out
<SYSTEM_TASK:> Create an acceptable knot vector. <END_TASK> <USER_TASK:> Description: def aptknt(tau, order): """Create an acceptable knot vector. Minimal emulation of MATLAB's ``aptknt``. The returned knot vector can be used to generate splines of desired `order` that are suitable for interpolation to the collocation sites `tau`. Note that this is only possible when ``len(tau)`` >= `order` + 1. When this condition does not hold, a valid knot vector is returned, but using it to generate a spline basis will not have the desired effect (the spline will return a length-zero array upon evaluation). Parameters: tau: Python list or rank-1 array, collocation sites order: int, >= 0, order of spline Returns: rank-1 array, `k` copies of ``tau[0]``, then ``aveknt(tau[1:-1], k-1)``, and finally `k` copies of ``tau[-1]``, where ``k = min(order+1, len(tau))``. """
tau = np.atleast_1d(tau) k = order + 1 if tau.ndim > 1: raise ValueError("tau must be a list or a rank-1 array") # emulate MATLAB behavior for the "k" parameter # # See # https://se.mathworks.com/help/curvefit/aptknt.html # if len(tau) < k: k = len(tau) if not (tau == sorted(tau)).all(): raise ValueError("tau must be nondecreasing") # last processed element needs to be: # i + k - 1 = len(tau)- 1 # => i + k = len(tau) # => i = len(tau) - k # u = len(tau) - k for i in range(u): if tau[i+k-1] == tau[i]: raise ValueError("k-fold (or higher) repeated sites not allowed, but tau[i+k-1] == tau[i] for i = %d, k = %d" % (i,k)) # form the output sequence # prefix = [ tau[0] ] * k suffix = [ tau[-1] ] * k # https://se.mathworks.com/help/curvefit/aveknt.html # MATLAB's aveknt(): # - averages successive k-1 entries, but ours averages k # - seems to ignore the endpoints # tmp = aveknt(tau[1:-1], k-1) middle = tmp.tolist() return np.array( prefix + middle + suffix, dtype=tmp.dtype )
<SYSTEM_TASK:> Count multiplicities of elements in a sorted list or rank-1 array. <END_TASK> <USER_TASK:> Description: def knt2mlt(t): """Count multiplicities of elements in a sorted list or rank-1 array. Minimal emulation of MATLAB's ``knt2mlt``. Parameters: t: Python list or rank-1 array. Must be sorted! Returns: out rank-1 array such that out[k] = #{ t[i] == t[k] for i < k } Example: If ``t = [1, 1, 2, 3, 3, 3]``, then ``out = [0, 1, 0, 0, 1, 2]``. Caveat: Requires input to be already sorted (this is not checked). """
t = np.atleast_1d(t) if t.ndim > 1: raise ValueError("t must be a list or a rank-1 array") out = [] e = None for k in range(t.shape[0]): if t[k] != e: e = t[k] count = 0 else: count += 1 out.append(count) return np.array( out )
<SYSTEM_TASK:> Return collocation matrix. <END_TASK> <USER_TASK:> Description: def spcol(knots, order, tau): """Return collocation matrix. Minimal emulation of MATLAB's ``spcol``. Parameters: knots: rank-1 array, knot vector (with appropriately repeated endpoints; see `augknt`, `aptknt`) order: int, >= 0, order of spline tau: rank-1 array, collocation sites Returns: rank-2 array A such that A[i,j] = D**{m(i)} B_j(tau[i]) where m(i) = multiplicity of site tau[i] D**k = kth derivative (0 for function value itself) """
m = knt2mlt(tau) B = bspline.Bspline(knots, order) dummy = B(0.) nbasis = len(dummy) # perform dummy evaluation to get number of basis functions A = np.empty( (tau.shape[0], nbasis), dtype=dummy.dtype ) for i,item in enumerate(zip(tau,m)): taui,mi = item f = B.diff(order=mi) A[i,:] = f(taui) return A
<SYSTEM_TASK:> Convenience function to compute first derivative of basis functions. 'Memoized' for speed. <END_TASK> <USER_TASK:> Description: def d(self, xi): """Convenience function to compute first derivative of basis functions. 'Memoized' for speed."""
return self.__basis(xi, self.p, compute_derivatives=True)
<SYSTEM_TASK:> Plot basis functions over full range of knots. <END_TASK> <USER_TASK:> Description: def plot(self): """Plot basis functions over full range of knots. Convenience function. Requires matplotlib. """
try: import matplotlib.pyplot as plt except ImportError: from sys import stderr print("ERROR: matplotlib.pyplot not found, matplotlib must be installed to use this function", file=stderr) raise x_min = np.min(self.knot_vector) x_max = np.max(self.knot_vector) x = np.linspace(x_min, x_max, num=1000) N = np.array([self(i) for i in x]).T for n in N: plt.plot(x,n) return plt.show()
<SYSTEM_TASK:> Differentiate a B-spline once, and return the resulting coefficients and Bspline objects. <END_TASK> <USER_TASK:> Description: def __diff_internal(self): """Differentiate a B-spline once, and return the resulting coefficients and Bspline objects. This preserves the Bspline object nature of the data, enabling recursive implementation of higher-order differentiation (see `diff`). The value of the first derivative of `B` at a point `x` can be obtained as:: def diff1(B, x): terms = B.__diff_internal() return sum( ci*Bi(x) for ci,Bi in terms ) Returns: tuple of tuples, where each item is (coefficient, Bspline object). See: `diff`: differentiation of any order >= 0 """
assert self.p > 0, "order of Bspline must be > 0" # we already handle the other case in diff() # https://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/B-spline/bspline-derv.html # t = self.knot_vector p = self.p Bi = Bspline( t[:-1], p-1 ) Bip1 = Bspline( t[1:], p-1 ) numer1 = +p numer2 = -p denom1 = t[p:-1] - t[:-(p+1)] denom2 = t[(p+1):] - t[1:-p] with np.errstate(divide='ignore', invalid='ignore'): ci = np.where(denom1 != 0., (numer1 / denom1), 0.) cip1 = np.where(denom2 != 0., (numer2 / denom2), 0.) return ( (ci,Bi), (cip1,Bip1) )
<SYSTEM_TASK:> Differentiate a B-spline `order` number of times. <END_TASK> <USER_TASK:> Description: def diff(self, order=1): """Differentiate a B-spline `order` number of times. Parameters: order: int, >= 0 Returns: **lambda** `x`: ... that evaluates the `order`-th derivative of `B` at the point `x`. The returned function internally uses __call__, which is 'memoized' for speed. """
order = int(order) if order < 0: raise ValueError("order must be >= 0, got %d" % (order)) if order == 0: return self.__call__ if order > self.p: # identically zero, but force the same output format as in the general case dummy = self.__call__(0.) # get number of basis functions and output dtype nbasis = dummy.shape[0] return lambda x: np.zeros( (nbasis,), dtype=dummy.dtype ) # accept but ignore input x # At each differentiation, each term maps into two new terms. # The number of terms in the result will be 2**order. # # This will cause an exponential explosion in the number of terms for high derivative orders, # but for the first few orders (practical usage; >3 is rarely needed) the approach works. # terms = [ (1.,self) ] for k in range(order): tmp = [] for Ci,Bi in terms: tmp.extend( (Ci*cn, Bn) for cn,Bn in Bi.__diff_internal() ) # NOTE: also propagate Ci terms = tmp # perform final summation at call time return lambda x: sum( ci*Bi(x) for ci,Bi in terms )
<SYSTEM_TASK:> Compute collocation matrix. <END_TASK> <USER_TASK:> Description: def collmat(self, tau, deriv_order=0): """Compute collocation matrix. Parameters: tau: Python list or rank-1 array, collocation sites deriv_order: int, >=0, order of derivative for which to compute the collocation matrix. The default is 0, which means the function value itself. Returns: A: if len(tau) > 1, rank-2 array such that A[i,j] = D**deriv_order B_j(tau[i]) where D**k = kth derivative (0 for function value itself) if len(tau) == 1, rank-1 array such that A[j] = D**deriv_order B_j(tau) Example: If the coefficients of a spline function are given in the vector c, then:: np.sum( A*c, axis=-1 ) will give a rank-1 array of function values at the sites tau[i] that were supplied to `collmat`. Similarly for derivatives (if the supplied `deriv_order`> 0). """
# get number of basis functions and output dtype dummy = self.__call__(0.) nbasis = dummy.shape[0] tau = np.atleast_1d(tau) if tau.ndim > 1: raise ValueError("tau must be a list or a rank-1 array") A = np.empty( (tau.shape[0], nbasis), dtype=dummy.dtype ) f = self.diff(order=deriv_order) for i,taui in enumerate(tau): A[i,:] = f(taui) return np.squeeze(A)
<SYSTEM_TASK:> Returns a normalized request string as described iN OAuth2 MAC spec. <END_TASK> <USER_TASK:> Description: def get_normalized_request_string(method, url, nonce, params, ext='', body_hash=None): """ Returns a normalized request string as described iN OAuth2 MAC spec. http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-00#section-3.3.1 """
urlparts = urlparse.urlparse(url) if urlparts.query: norm_url = '%s?%s' % (urlparts.path, urlparts.query) elif params: norm_url = '%s?%s' % (urlparts.path, get_normalized_params(params)) else: norm_url = urlparts.path if not body_hash: body_hash = get_body_hash(params) port = urlparts.port if not port: assert urlparts.scheme in ('http', 'https') if urlparts.scheme == 'http': port = 80 elif urlparts.scheme == 'https': port = 443 output = [nonce, method.upper(), norm_url, urlparts.hostname, port, body_hash, ext, ''] return '\n'.join(map(str, output))
<SYSTEM_TASK:> Computes the viterbi paths using the current HMM model <END_TASK> <USER_TASK:> Description: def compute_viterbi_paths(self): """ Computes the viterbi paths using the current HMM model """
# get parameters K = len(self._observations) A = self._hmm.transition_matrix pi = self._hmm.initial_distribution # compute viterbi path for each trajectory paths = np.empty(K, dtype=object) for itraj in range(K): obs = self._observations[itraj] # compute output probability matrix pobs = self._hmm.output_model.p_obs(obs) # hidden path paths[itraj] = hidden.viterbi(A, pobs, pi) # done return paths
<SYSTEM_TASK:> Generate random samples from a Gaussian distribution. <END_TASK> <USER_TASK:> Description: def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1, random_state=None): """Generate random samples from a Gaussian distribution. Parameters ---------- mean : array_like, shape (n_features,) Mean of the distribution. covar : array_like, optional Covariance of the distribution. The shape depends on `covariance_type`: scalar if 'spherical', (n_features) if 'diag', (n_features, n_features) if 'tied', or 'full' covariance_type : string, optional Type of the covariance parameters. Must be one of 'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'. n_samples : int, optional Number of samples to generate. Defaults to 1. Returns ------- X : array, shape (n_features, n_samples) Randomly generated sample """
rng = check_random_state(random_state) n_dim = len(mean) rand = rng.randn(n_dim, n_samples) if n_samples == 1: rand.shape = (n_dim,) if covariance_type == 'spherical': rand *= np.sqrt(covar) elif covariance_type == 'diag': rand = np.dot(np.diag(np.sqrt(covar)), rand) else: s, U = linalg.eigh(covar) s.clip(0, out=s) # get rid of tiny negatives np.sqrt(s, out=s) U *= s rand = np.dot(U, rand) return (rand.T + mean).T
<SYSTEM_TASK:> Performing the covariance M step for diagonal cases <END_TASK> <USER_TASK:> Description: def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm, min_covar): """Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm avg_means2 = gmm.means_ ** 2 avg_X_means = gmm.means_ * weighted_X_sum * norm return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
<SYSTEM_TASK:> Performing the covariance M step for spherical cases <END_TASK> <USER_TASK:> Description: def _covar_mstep_spherical(*args): """Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args) return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
<SYSTEM_TASK:> Performing the covariance M step for full cases <END_TASK> <USER_TASK:> Description: def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm, min_covar): """Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian # Distribution" n_features = X.shape[1] cv = np.empty((gmm.n_components, n_features, n_features)) for c in range(gmm.n_components): post = responsibilities[:, c] mu = gmm.means_[c] diff = X - mu with np.errstate(under='ignore'): # Underflow Errors in doing post * X.T are not important avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS) cv[c] = avg_cv + min_covar * np.eye(n_features) return cv
<SYSTEM_TASK:> Return the per-sample likelihood of the data under the model. <END_TASK> <USER_TASK:> Description: def score_samples(self, X): """Return the per-sample likelihood of the data under the model. Compute the log probability of X under the model and return the posterior distribution (responsibilities) of each mixture component for each element of X. Parameters ---------- X: array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X. responsibilities : array_like, shape (n_samples, n_components) Posterior probabilities of each mixture component for each observation """
check_is_fitted(self, 'means_') X = check_array(X) if X.ndim == 1: X = X[:, np.newaxis] if X.size == 0: return np.array([]), np.empty((0, self.n_components)) if X.shape[1] != self.means_.shape[1]: raise ValueError('The shape of X is not compatible with self') lpr = (log_multivariate_normal_density(X, self.means_, self.covars_, self.covariance_type) + np.log(self.weights_)) logprob = logsumexp(lpr, axis=1) responsibilities = np.exp(lpr - logprob[:, np.newaxis]) return logprob, responsibilities
<SYSTEM_TASK:> Predict label for data. <END_TASK> <USER_TASK:> Description: def predict(self, X): """Predict label for data. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = (n_samples,) """
logprob, responsibilities = self.score_samples(X) return responsibilities.argmax(axis=1)
<SYSTEM_TASK:> Estimate model parameters with the expectation-maximization <END_TASK> <USER_TASK:> Description: def fit(self, X, y=None): """Estimate model parameters with the expectation-maximization algorithm. A initialization step is performed before entering the em algorithm. If you want to avoid this step, set the keyword argument init_params to the empty string '' when creating the GMM object. Likewise, if you would like just to do an initialization, set n_iter=0. Parameters ---------- X : array_like, shape (n, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. """
# initialization step X = check_array(X, dtype=np.float64) if X.shape[0] < self.n_components: raise ValueError( 'GMM estimation with %s components, but got only %s samples' % (self.n_components, X.shape[0])) max_log_prob = -np.infty for _ in range(self.n_init): if 'm' in self.init_params or not hasattr(self, 'means_'): if np.issubdtype(X.dtype, np.float32): from bhmm._external.clustering.kmeans_clustering_32 import init_centers elif np.issubdtype(X.dtype, np.float64): from bhmm._external.clustering.kmeans_clustering_64 import init_centers else: raise ValueError("Could not handle dtype %s for clustering!" % X.dtype) centers = init_centers(X, 'euclidean', self.n_components) self.means_ = centers if 'w' in self.init_params or not hasattr(self, 'weights_'): self.weights_ = np.tile(1.0 / self.n_components, self.n_components) if 'c' in self.init_params or not hasattr(self, 'covars_'): cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1]) if not cv.shape: cv.shape = (1, 1) self.covars_ = \ distribute_covar_matrix_to_match_covariance_type( cv, self.covariance_type, self.n_components) # EM algorithms current_log_likelihood = None # reset self.converged_ to False self.converged_ = False # this line should be removed when 'thresh' is removed in v0.18 tol = (self.tol if self.thresh is None else self.thresh / float(X.shape[0])) for i in range(self.n_iter): prev_log_likelihood = current_log_likelihood # Expectation step log_likelihoods, responsibilities = self.score_samples(X) current_log_likelihood = log_likelihoods.mean() # Check for convergence. # (should compare to self.tol when dreprecated 'thresh' is # removed in v0.18) if prev_log_likelihood is not None: change = abs(current_log_likelihood - prev_log_likelihood) if change < tol: self.converged_ = True break # Maximization step self._do_mstep(X, responsibilities, self.params, self.min_covar) # if the results are better, keep it if self.n_iter: if current_log_likelihood > max_log_prob: max_log_prob = current_log_likelihood best_params = {'weights': self.weights_, 'means': self.means_, 'covars': self.covars_} # check the existence of an init param that was not subject to # likelihood computation issue. if np.isneginf(max_log_prob) and self.n_iter: raise RuntimeError( "EM algorithm was never able to compute a valid likelihood " + "given initial parameters. Try different init parameters " + "(or increasing n_init) or check for degenerate data.") # self.n_iter == 0 occurs when using GMM within HMM if self.n_iter: self.covars_ = best_params['covars'] self.means_ = best_params['means'] self.weights_ = best_params['weights'] return self
<SYSTEM_TASK:> Perform the Mstep of the EM algorithm and return the class weihgts. <END_TASK> <USER_TASK:> Description: def _do_mstep(self, X, responsibilities, params, min_covar=0): """ Perform the Mstep of the EM algorithm and return the class weihgts. """
weights = responsibilities.sum(axis=0) weighted_X_sum = np.dot(responsibilities.T, X) inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS) if 'w' in params: self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS) if 'm' in params: self.means_ = weighted_X_sum * inverse_weights if 'c' in params: covar_mstep_func = _covar_mstep_funcs[self.covariance_type] self.covars_ = covar_mstep_func( self, X, responsibilities, weighted_X_sum, inverse_weights, min_covar) return weights
<SYSTEM_TASK:> Return the number of free parameters in the model. <END_TASK> <USER_TASK:> Description: def _n_parameters(self): """Return the number of free parameters in the model."""
ndim = self.means_.shape[1] if self.covariance_type == 'full': cov_params = self.n_components * ndim * (ndim + 1) / 2. elif self.covariance_type == 'diag': cov_params = self.n_components * ndim elif self.covariance_type == 'tied': cov_params = ndim * (ndim + 1) / 2. elif self.covariance_type == 'spherical': cov_params = self.n_components mean_params = ndim * self.n_components return int(cov_params + mean_params + self.n_components - 1)
<SYSTEM_TASK:> Bayesian information criterion for the current model fit <END_TASK> <USER_TASK:> Description: def bic(self, X): """Bayesian information criterion for the current model fit and the proposed data Parameters ---------- X : array of shape(n_samples, n_dimensions) Returns ------- bic: float (the lower the better) """
return (-2 * self.score(X).sum() + self._n_parameters() * np.log(X.shape[0]))
<SYSTEM_TASK:> Generate an initial model with 1D-Gaussian output densities <END_TASK> <USER_TASK:> Description: def init_model_gaussian1d(observations, nstates, reversible=True): """Generate an initial model with 1D-Gaussian output densities Parameters ---------- observations : list of ndarray((T_i), dtype=float) list of arrays of length T_i with observation data nstates : int The number of states. Examples -------- Generate initial model for a gaussian output model. >>> from bhmm import testsystems >>> [model, observations, states] = testsystems.generate_synthetic_observations(output='gaussian') >>> initial_model = init_model_gaussian1d(observations, model.nstates) """
ntrajectories = len(observations) # Concatenate all observations. collected_observations = np.array([], dtype=config.dtype) for o_t in observations: collected_observations = np.append(collected_observations, o_t) # Fit a Gaussian mixture model to obtain emission distributions and state stationary probabilities. from bhmm._external.sklearn import mixture gmm = mixture.GMM(n_components=nstates) gmm.fit(collected_observations[:,None]) from bhmm import GaussianOutputModel output_model = GaussianOutputModel(nstates, means=gmm.means_[:,0], sigmas=np.sqrt(gmm.covars_[:,0])) logger().info("Gaussian output model:\n"+str(output_model)) # Extract stationary distributions. Pi = np.zeros([nstates], np.float64) Pi[:] = gmm.weights_[:] logger().info("GMM weights: %s" % str(gmm.weights_)) # Compute fractional state memberships. Nij = np.zeros([nstates, nstates], np.float64) for o_t in observations: # length of trajectory T = o_t.shape[0] # output probability pobs = output_model.p_obs(o_t) # normalize pobs /= pobs.sum(axis=1)[:,None] # Accumulate fractional transition counts from this trajectory. for t in range(T-1): Nij[:,:] = Nij[:,:] + np.outer(pobs[t,:], pobs[t+1,:]) logger().info("Nij\n"+str(Nij)) # Compute transition matrix maximum likelihood estimate. import msmtools.estimation as msmest import msmtools.analysis as msmana Tij = msmest.transition_matrix(Nij, reversible=reversible) pi = msmana.stationary_distribution(Tij) # Update model. model = HMM(pi, Tij, output_model) return model
<SYSTEM_TASK:> Returns the output probability for symbol o from all hidden states <END_TASK> <USER_TASK:> Description: def _p_o(self, o): """ Returns the output probability for symbol o from all hidden states Parameters ---------- o : float A single observation. Return ------ p_o : ndarray (N) p_o[i] is the probability density of the observation o from state i emission distribution Examples -------- Create an observation model. >>> output_model = GaussianOutputModel(nstates=3, means=[-1, 0, 1], sigmas=[0.5, 1, 2]) Compute the output probability of a single observation from all hidden states. >>> observation = 0 >>> p_o = output_model._p_o(observation) """
if self.__impl__ == self.__IMPL_C__: return gc.p_o(o, self.means, self.sigmas, out=None, dtype=type(o)) elif self.__impl__ == self.__IMPL_PYTHON__: if np.any(self.sigmas < np.finfo(self.sigmas.dtype).eps): raise RuntimeError('at least one sigma is too small to continue.') C = 1.0 / (np.sqrt(2.0 * np.pi) * self.sigmas) Pobs = C * np.exp(-0.5 * ((o-self.means)/self.sigmas)**2) return Pobs else: raise RuntimeError('Implementation '+str(self.__impl__)+' not available')
<SYSTEM_TASK:> Fits the output model given the observations and weights <END_TASK> <USER_TASK:> Description: def estimate(self, observations, weights): """ Fits the output model given the observations and weights Parameters ---------- observations : [ ndarray(T_k,) ] with K elements A list of K observation trajectories, each having length T_k and d dimensions weights : [ ndarray(T_k,nstates) ] with K elements A list of K weight matrices, each having length T_k weights[k][t,n] is the weight assignment from observations[k][t] to state index n Examples -------- Generate an observation model and samples from each state. >>> ntrajectories = 3 >>> nobs = 1000 >>> output_model = GaussianOutputModel(nstates=3, means=[-1, 0, +1], sigmas=[0.5, 1, 2]) >>> observations = [ np.random.randn(nobs) for _ in range(ntrajectories) ] # random observations >>> weights = [ np.random.dirichlet([2, 3, 4], size=nobs) for _ in range(ntrajectories) ] # random weights Update the observation model parameters my a maximum-likelihood fit. >>> output_model.estimate(observations, weights) """
# sizes N = self.nstates K = len(observations) # fit means self._means = np.zeros(N) w_sum = np.zeros(N) for k in range(K): # update nominator for i in range(N): self.means[i] += np.dot(weights[k][:, i], observations[k]) # update denominator w_sum += np.sum(weights[k], axis=0) # normalize self._means /= w_sum # fit variances self._sigmas = np.zeros(N) w_sum = np.zeros(N) for k in range(K): # update nominator for i in range(N): Y = (observations[k] - self.means[i])**2 self.sigmas[i] += np.dot(weights[k][:, i], Y) # update denominator w_sum += np.sum(weights[k], axis=0) # normalize self._sigmas /= w_sum self._sigmas = np.sqrt(self.sigmas) if np.any(self._sigmas < np.finfo(self._sigmas.dtype).eps): raise RuntimeError('at least one sigma is too small to continue.')
<SYSTEM_TASK:> r""" Samples of the initial distribution <END_TASK> <USER_TASK:> Description: def initial_distribution_samples(self): r""" Samples of the initial distribution """
res = np.empty((self.nsamples, self.nstates), dtype=config.dtype) for i in range(self.nsamples): res[i, :] = self._sampled_hmms[i].stationary_distribution return res
<SYSTEM_TASK:> r""" Samples of the transition matrix <END_TASK> <USER_TASK:> Description: def transition_matrix_samples(self): r""" Samples of the transition matrix """
res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=config.dtype) for i in range(self.nsamples): res[i, :, :] = self._sampled_hmms[i].transition_matrix return res
<SYSTEM_TASK:> r""" Samples of the eigenvalues <END_TASK> <USER_TASK:> Description: def eigenvalues_samples(self): r""" Samples of the eigenvalues """
res = np.empty((self.nsamples, self.nstates), dtype=config.dtype) for i in range(self.nsamples): res[i, :] = self._sampled_hmms[i].eigenvalues return res
<SYSTEM_TASK:> r""" Samples of the left eigenvectors of the hidden transition matrix <END_TASK> <USER_TASK:> Description: def eigenvectors_left_samples(self): r""" Samples of the left eigenvectors of the hidden transition matrix """
res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=config.dtype) for i in range(self.nsamples): res[i, :, :] = self._sampled_hmms[i].eigenvectors_left return res
<SYSTEM_TASK:> r""" Samples of the right eigenvectors of the hidden transition matrix <END_TASK> <USER_TASK:> Description: def eigenvectors_right_samples(self): r""" Samples of the right eigenvectors of the hidden transition matrix """
res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=config.dtype) for i in range(self.nsamples): res[i, :, :] = self._sampled_hmms[i].eigenvectors_right return res
<SYSTEM_TASK:> Returns the element-wise logarithm of the output probabilities for an entire trajectory and all hidden states <END_TASK> <USER_TASK:> Description: def log_p_obs(self, obs, out=None, dtype=np.float32): """ Returns the element-wise logarithm of the output probabilities for an entire trajectory and all hidden states This is a default implementation that will take the log of p_obs(obs) and should only be used if p_obs(obs) is numerically stable. If there is any danger of running into numerical problems *during* the calculation of p_obs, this function should be overwritten in order to compute the log-probabilities directly. Parameters ---------- obs : ndarray((T), dtype=int) a discrete trajectory of length T Return ------ p_o : ndarray (T,N) the log probability of generating the symbol at time point t from any of the N hidden states """
if out is None: return np.log(self.p_obs(obs)) else: self.p_obs(obs, out=out, dtype=dtype) np.log(out, out=out) return out
<SYSTEM_TASK:> Coarse grain transition matrix P using memberships M <END_TASK> <USER_TASK:> Description: def coarse_grain_transition_matrix(P, M): """ Coarse grain transition matrix P using memberships M Computes .. math: Pc = (M' M)^-1 M' P M Parameters ---------- P : ndarray(n, n) microstate transition matrix M : ndarray(n, m) membership matrix. Membership to macrostate m for each microstate. Returns ------- Pc : ndarray(m, m) coarse-grained transition matrix. """
# coarse-grain matrix: Pc = (M' M)^-1 M' P M W = np.linalg.inv(np.dot(M.T, M)) A = np.dot(np.dot(M.T, P), M) P_coarse = np.dot(W, A) # this coarse-graining can lead to negative elements. Setting them to zero here. P_coarse = np.maximum(P_coarse, 0) # and renormalize P_coarse /= P_coarse.sum(axis=1)[:, None] return P_coarse
<SYSTEM_TASK:> Regularizes the hidden initial distribution and transition matrix. <END_TASK> <USER_TASK:> Description: def regularize_hidden(p0, P, reversible=True, stationary=False, C=None, eps=None): """ Regularizes the hidden initial distribution and transition matrix. Makes sure that the hidden initial distribution and transition matrix have nonzero probabilities by setting them to eps and then renormalizing. Avoids zeros that would cause estimation algorithms to crash or get stuck in suboptimal states. Parameters ---------- p0 : ndarray(n) Initial hidden distribution of the HMM P : ndarray(n, n) Hidden transition matrix reversible : bool HMM is reversible. Will make sure it is still reversible after modification. stationary : bool p0 is the stationary distribution of P. In this case, will not regularize p0 separately. If stationary=False, the regularization will be applied to p0. C : ndarray(n, n) Hidden count matrix. Only needed for stationary=True and P disconnected. epsilon : float or None minimum value of the resulting transition matrix. Default: evaluates to 0.01 / n. The coarse-graining equation can lead to negative elements and thus epsilon should be set to at least 0. Positive settings of epsilon are similar to a prior and enforce minimum positive values for all transition probabilities. Return ------ p0 : ndarray(n) regularized initial distribution P : ndarray(n, n) regularized transition matrix """
# input n = P.shape[0] if eps is None: # default output probability, in order to avoid zero columns eps = 0.01 / n # REGULARIZE P P = np.maximum(P, eps) # and renormalize P /= P.sum(axis=1)[:, None] # ensure reversibility if reversible: P = _tmatrix_disconnected.enforce_reversible_on_closed(P) # REGULARIZE p0 if stationary: _tmatrix_disconnected.stationary_distribution(P, C=C) else: p0 = np.maximum(p0, eps) p0 /= p0.sum() return p0, P
<SYSTEM_TASK:> Regularizes the output probabilities. <END_TASK> <USER_TASK:> Description: def regularize_pobs(B, nonempty=None, separate=None, eps=None): """ Regularizes the output probabilities. Makes sure that the output probability distributions has nonzero probabilities by setting them to eps and then renormalizing. Avoids zeros that would cause estimation algorithms to crash or get stuck in suboptimal states. Parameters ---------- B : ndarray(n, m) HMM output probabilities nonempty : None or iterable of int Nonempty set. Only regularize on this subset. separate : None or iterable of int Force the given set of observed states to stay in a separate hidden state. The remaining nstates-1 states will be assigned by a metastable decomposition. reversible : bool HMM is reversible. Will make sure it is still reversible after modification. Returns ------- B : ndarray(n, m) Regularized output probabilities """
# input B = B.copy() # modify copy n, m = B.shape # number of hidden / observable states if eps is None: # default output probability, in order to avoid zero columns eps = 0.01 / m # observable sets if nonempty is None: nonempty = np.arange(m) if separate is None: B[:, nonempty] = np.maximum(B[:, nonempty], eps) else: nonempty_nonseparate = np.array(list(set(nonempty) - set(separate)), dtype=int) nonempty_separate = np.array(list(set(nonempty).intersection(set(separate))), dtype=int) B[:n-1, nonempty_nonseparate] = np.maximum(B[:n-1, nonempty_nonseparate], eps) B[n-1, nonempty_separate] = np.maximum(B[n-1, nonempty_separate], eps) # renormalize and return copy B /= B.sum(axis=1)[:, None] return B
<SYSTEM_TASK:> Initializes discrete HMM using maximum likelihood of observation counts <END_TASK> <USER_TASK:> Description: def init_discrete_hmm_ml(C_full, nstates, reversible=True, stationary=True, active_set=None, P=None, eps_A=None, eps_B=None, separate=None): """Initializes discrete HMM using maximum likelihood of observation counts"""
raise NotImplementedError('ML-initialization not yet implemented')
<SYSTEM_TASK:> r""" Updates the transition matrix and recomputes all derived quantities <END_TASK> <USER_TASK:> Description: def update(self, Pi, Tij): r""" Updates the transition matrix and recomputes all derived quantities """
from msmtools import analysis as msmana # update transition matrix by copy self._Tij = np.array(Tij) assert msmana.is_transition_matrix(self._Tij), 'Given transition matrix is not a stochastic matrix' assert self._Tij.shape[0] == self._nstates, 'Given transition matrix has unexpected number of states ' # reset spectral decomposition self._spectral_decomp_available = False # check initial distribution assert np.all(Pi >= 0), 'Given initial distribution contains negative elements.' assert np.any(Pi > 0), 'Given initial distribution is zero' self._Pi = np.array(Pi) / np.sum(Pi)
<SYSTEM_TASK:> r""" Whether the MSM is stationary, i.e. whether the initial distribution is the stationary distribution <END_TASK> <USER_TASK:> Description: def is_stationary(self): r""" Whether the MSM is stationary, i.e. whether the initial distribution is the stationary distribution of the hidden transition matrix. """
# for disconnected matrices, the stationary distribution depends on the estimator, so we can't compute # it directly. Therefore we test whether the initial distribution is stationary. return np.allclose(np.dot(self._Pi, self._Tij), self._Pi)
<SYSTEM_TASK:> r""" Compute stationary distribution of hidden states if possible. <END_TASK> <USER_TASK:> Description: def stationary_distribution(self): r""" Compute stationary distribution of hidden states if possible. Raises ------ ValueError if the HMM is not stationary """
assert _tmatrix_disconnected.is_connected(self._Tij, strong=False), \ 'No unique stationary distribution because transition matrix is not connected' import msmtools.analysis as msmana return msmana.stationary_distribution(self._Tij)
<SYSTEM_TASK:> r""" Relaxation timescales of the hidden transition matrix <END_TASK> <USER_TASK:> Description: def timescales(self): r""" Relaxation timescales of the hidden transition matrix Returns ------- ts : ndarray(m) relaxation timescales in units of the input trajectory time step, defined by :math:`-tau / ln | \lambda_i |, i = 2,...,nstates`, where :math:`\lambda_i` are the hidden transition matrix eigenvalues. """
from msmtools.analysis.dense.decomposition import timescales_from_eigenvalues as _timescales self._ensure_spectral_decomposition() ts = _timescales(self._eigenvalues, tau=self._lag) return ts[1:]
<SYSTEM_TASK:> r""" Lifetimes of states of the hidden transition matrix <END_TASK> <USER_TASK:> Description: def lifetimes(self): r""" Lifetimes of states of the hidden transition matrix Returns ------- l : ndarray(nstates) state lifetimes in units of the input trajectory time step, defined by :math:`-tau / ln | p_{ii} |, i = 1,...,nstates`, where :math:`p_{ii}` are the diagonal entries of the hidden transition matrix. """
return -self._lag / np.log(np.diag(self.transition_matrix))
<SYSTEM_TASK:> r""" Returns HMM on a subset of states <END_TASK> <USER_TASK:> Description: def sub_hmm(self, states): r""" Returns HMM on a subset of states Returns the HMM restricted to the selected subset of states. Will raise exception if the hidden transition matrix cannot be normalized on this subset """
# restrict initial distribution pi_sub = self._Pi[states] pi_sub /= pi_sub.sum() # restrict transition matrix P_sub = self._Tij[states, :][:, states] # checks if this selection is possible assert np.all(P_sub.sum(axis=1) > 0), \ 'Illegal sub_hmm request: transition matrix cannot be normalized on ' + str(states) P_sub /= P_sub.sum(axis=1)[:, None] # restrict output model out_sub = self.output_model.sub_output_model(states) return HMM(pi_sub, P_sub, out_sub, lag=self.lag)
<SYSTEM_TASK:> Compute the transition count matrix from hidden state trajectory. <END_TASK> <USER_TASK:> Description: def count_matrix(self): # TODO: does this belong here or to the BHMM sampler, or in a subclass containing HMM with data? """Compute the transition count matrix from hidden state trajectory. Returns ------- C : numpy.array with shape (nstates,nstates) C[i,j] is the number of transitions observed from state i to state j Raises ------ RuntimeError A RuntimeError is raised if the HMM model does not yet have a hidden state trajectory associated with it. Examples -------- """
if self.hidden_state_trajectories is None: raise RuntimeError('HMM model does not have a hidden state trajectory.') C = msmest.count_matrix(self.hidden_state_trajectories, 1, nstates=self._nstates) return C.toarray()
<SYSTEM_TASK:> Compute the counts at the first time step <END_TASK> <USER_TASK:> Description: def count_init(self): """Compute the counts at the first time step Returns ------- n : ndarray(nstates) n[i] is the number of trajectories starting in state i """
if self.hidden_state_trajectories is None: raise RuntimeError('HMM model does not have a hidden state trajectory.') n = [traj[0] for traj in self.hidden_state_trajectories] return np.bincount(n, minlength=self.nstates)
<SYSTEM_TASK:> Collect a vector of all observations belonging to a specified hidden state. <END_TASK> <USER_TASK:> Description: def collect_observations_in_state(self, observations, state_index): # TODO: this would work well in a subclass with data """Collect a vector of all observations belonging to a specified hidden state. Parameters ---------- observations : list of numpy.array List of observed trajectories. state_index : int The index of the hidden state for which corresponding observations are to be retrieved. dtype : numpy.dtype, optional, default=numpy.float64 The numpy dtype to use to store the collected observations. Returns ------- collected_observations : numpy.array with shape (nsamples,) The collected vector of observations belonging to the specified hidden state. Raises ------ RuntimeError A RuntimeError is raised if the HMM model does not yet have a hidden state trajectory associated with it. """
if not self.hidden_state_trajectories: raise RuntimeError('HMM model does not have a hidden state trajectory.') dtype = observations[0].dtype collected_observations = np.array([], dtype=dtype) for (s_t, o_t) in zip(self.hidden_state_trajectories, observations): indices = np.where(s_t == state_index)[0] collected_observations = np.append(collected_observations, o_t[indices]) return collected_observations
<SYSTEM_TASK:> Generate a synthetic state trajectory. <END_TASK> <USER_TASK:> Description: def generate_synthetic_state_trajectory(self, nsteps, initial_Pi=None, start=None, stop=None, dtype=np.int32): """Generate a synthetic state trajectory. Parameters ---------- nsteps : int Number of steps in the synthetic state trajectory to be generated. initial_Pi : np.array of shape (nstates,), optional, default=None The initial probability distribution, if samples are not to be taken from the intrinsic initial distribution. start : int starting state. Exclusive with initial_Pi stop : int stopping state. Trajectory will terminate when reaching the stopping state before length number of steps. dtype : numpy.dtype, optional, default=numpy.int32 The numpy dtype to use to store the synthetic trajectory. Returns ------- states : np.array of shape (nstates,) of dtype=np.int32 The trajectory of hidden states, with each element in range(0,nstates). Examples -------- Generate a synthetic state trajectory of a specified length. >>> from bhmm import testsystems >>> model = testsystems.dalton_model() >>> states = model.generate_synthetic_state_trajectory(nsteps=100) """
# consistency check if initial_Pi is not None and start is not None: raise ValueError('Arguments initial_Pi and start are exclusive. Only set one of them.') # Generate first state sample. if start is None: if initial_Pi is not None: start = np.random.choice(range(self._nstates), size=1, p=initial_Pi) else: start = np.random.choice(range(self._nstates), size=1, p=self._Pi) # Generate and return trajectory from msmtools import generation as msmgen traj = msmgen.generate_traj(self.transition_matrix, nsteps, start=start, stop=stop, dt=1) return traj.astype(dtype)
<SYSTEM_TASK:> Generate a synthetic realization of observables. <END_TASK> <USER_TASK:> Description: def generate_synthetic_observation_trajectory(self, length, initial_Pi=None): """Generate a synthetic realization of observables. Parameters ---------- length : int Length of synthetic state trajectory to be generated. initial_Pi : np.array of shape (nstates,), optional, default=None The initial probability distribution, if samples are not to be taken from equilibrium. Returns ------- o_t : np.array of shape (nstates,) of dtype=np.float32 The trajectory of observations. s_t : np.array of shape (nstates,) of dtype=np.int32 The trajectory of hidden states, with each element in range(0,nstates). Examples -------- Generate a synthetic observation trajectory for an equilibrium realization. >>> from bhmm import testsystems >>> model = testsystems.dalton_model() >>> [o_t, s_t] = model.generate_synthetic_observation_trajectory(length=100) Use an initial nonequilibrium distribution. >>> from bhmm import testsystems >>> model = testsystems.dalton_model() >>> [o_t, s_t] = model.generate_synthetic_observation_trajectory(length=100, initial_Pi=np.array([1,0,0])) """
# First, generate synthetic state trajetory. s_t = self.generate_synthetic_state_trajectory(length, initial_Pi=initial_Pi) # Next, generate observations from these states. o_t = self.output_model.generate_observation_trajectory(s_t) return [o_t, s_t]
<SYSTEM_TASK:> Generate a number of synthetic realization of observables from this model. <END_TASK> <USER_TASK:> Description: def generate_synthetic_observation_trajectories(self, ntrajectories, length, initial_Pi=None): """Generate a number of synthetic realization of observables from this model. Parameters ---------- ntrajectories : int The number of trajectories to be generated. length : int Length of synthetic state trajectory to be generated. initial_Pi : np.array of shape (nstates,), optional, default=None The initial probability distribution, if samples are not to be taken from equilibrium. Returns ------- O : list of np.array of shape (nstates,) of dtype=np.float32 The trajectories of observations S : list of np.array of shape (nstates,) of dtype=np.int32 The trajectories of hidden states Examples -------- Generate a number of synthetic trajectories. >>> from bhmm import testsystems >>> model = testsystems.dalton_model() >>> O, S = model.generate_synthetic_observation_trajectories(ntrajectories=10, length=100) Use an initial nonequilibrium distribution. >>> from bhmm import testsystems >>> model = testsystems.dalton_model(nstates=3) >>> O, S = model.generate_synthetic_observation_trajectories(ntrajectories=10, length=100, initial_Pi=np.array([1,0,0])) """
O = list() # observations S = list() # state trajectories for trajectory_index in range(ntrajectories): o_t, s_t = self.generate_synthetic_observation_trajectory(length=length, initial_Pi=initial_Pi) O.append(o_t) S.append(s_t) return O, S
<SYSTEM_TASK:> Maximum likelihood estimation of output model given the observations and weights <END_TASK> <USER_TASK:> Description: def estimate(self, observations, weights): """ Maximum likelihood estimation of output model given the observations and weights Parameters ---------- observations : [ ndarray(T_k) ] with K elements A list of K observation trajectories, each having length T_k weights : [ ndarray(T_k, N) ] with K elements A list of K weight matrices, each having length T_k and containing the probability of any of the states in the given time step Examples -------- Generate an observation model and samples from each state. >>> import numpy as np >>> ntrajectories = 3 >>> nobs = 1000 >>> B = np.array([[0.5,0.5],[0.1,0.9]]) >>> output_model = DiscreteOutputModel(B) >>> from scipy import stats >>> nobs = 1000 >>> obs = np.empty(nobs, dtype = object) >>> weights = np.empty(nobs, dtype = object) >>> gens = [stats.rv_discrete(values=(range(len(B[i])), B[i])) for i in range(B.shape[0])] >>> obs = [gens[i].rvs(size=nobs) for i in range(B.shape[0])] >>> weights = [np.zeros((nobs, B.shape[1])) for i in range(B.shape[0])] >>> for i in range(B.shape[0]): weights[i][:, i] = 1.0 Update the observation model parameters my a maximum-likelihood fit. >>> output_model.estimate(obs, weights) """
# sizes N, M = self._output_probabilities.shape K = len(observations) # initialize output probability matrix self._output_probabilities = np.zeros((N, M)) # update output probability matrix (numerator) if self.__impl__ == self.__IMPL_C__: for k in range(K): dc.update_pout(observations[k], weights[k], self._output_probabilities, dtype=config.dtype) elif self.__impl__ == self.__IMPL_PYTHON__: for k in range(K): for o in range(M): times = np.where(observations[k] == o)[0] self._output_probabilities[:, o] += np.sum(weights[k][times, :], axis=0) else: raise RuntimeError('Implementation '+str(self.__impl__)+' not available') # normalize self._output_probabilities /= np.sum(self._output_probabilities, axis=1)[:, None]
<SYSTEM_TASK:> Sum the probabilities of being in state i to time t <END_TASK> <USER_TASK:> Description: def state_counts(gamma, T, out=None): """ Sum the probabilities of being in state i to time t Parameters ---------- gamma : ndarray((T,N), dtype = float), optional, default = None gamma[t,i] is the probabilty at time t to be in state i ! T : int number of time steps Returns ------- count : numpy.array shape (N) count[i] is the summed probabilty to be in state i ! See Also -------- state_probabilities : to calculate `gamma` """
return np.sum(gamma[0:T], axis=0, out=out)
<SYSTEM_TASK:> Retrieves the logger instance associated to the given name. <END_TASK> <USER_TASK:> Description: def logger(name='BHMM', pattern='%(asctime)s %(levelname)s %(name)s: %(message)s', date_format='%H:%M:%S', handler=logging.StreamHandler(sys.stdout)): """ Retrieves the logger instance associated to the given name. :param name: The name of the logger instance. :type name: str :param pattern: The associated pattern. :type pattern: str :param date_format: The date format to be used in the pattern. :type date_format: str :param handler: The logging handler, by default console output. :type handler: FileHandler or StreamHandler or NullHandler :return: The logger. :rtype: Logger """
_logger = logging.getLogger(name) _logger.setLevel(config.log_level()) if not _logger.handlers: formatter = logging.Formatter(pattern, date_format) handler.setFormatter(formatter) handler.setLevel(config.log_level()) _logger.addHandler(handler) _logger.propagate = False return _logger
<SYSTEM_TASK:> Sample from the BHMM posterior. <END_TASK> <USER_TASK:> Description: def sample(self, nsamples, nburn=0, nthin=1, save_hidden_state_trajectory=False, call_back=None): """Sample from the BHMM posterior. Parameters ---------- nsamples : int The number of samples to generate. nburn : int, optional, default=0 The number of samples to discard to burn-in, following which `nsamples` will be generated. nthin : int, optional, default=1 The number of Gibbs sampling updates used to generate each returned sample. save_hidden_state_trajectory : bool, optional, default=False If True, the hidden state trajectory for each sample will be saved as well. call_back : function, optional, default=None a call back function with no arguments, which if given is being called after each computed sample. This is useful for implementing progress bars. Returns ------- models : list of bhmm.HMM The sampled HMM models from the Bayesian posterior. Examples -------- >>> from bhmm import testsystems >>> [model, observations, states, sampled_model] = testsystems.generate_random_bhmm(ntrajectories=5, length=1000) >>> nburn = 5 # run the sampler a bit before recording samples >>> nsamples = 10 # generate 10 samples >>> nthin = 2 # discard one sample in between each recorded sample >>> samples = sampled_model.sample(nsamples, nburn=nburn, nthin=nthin) """
# Run burn-in. for iteration in range(nburn): logger().info("Burn-in %8d / %8d" % (iteration, nburn)) self._update() # Collect data. models = list() for iteration in range(nsamples): logger().info("Iteration %8d / %8d" % (iteration, nsamples)) # Run a number of Gibbs sampling updates to generate each sample. for thin in range(nthin): self._update() # Save a copy of the current model. model_copy = copy.deepcopy(self.model) # print "Sampled: \n",repr(model_copy) if not save_hidden_state_trajectory: model_copy.hidden_state_trajectory = None models.append(model_copy) if call_back is not None: call_back() # Return the list of models saved. return models
<SYSTEM_TASK:> Update the current model using one round of Gibbs sampling. <END_TASK> <USER_TASK:> Description: def _update(self): """Update the current model using one round of Gibbs sampling. """
initial_time = time.time() self._updateHiddenStateTrajectories() self._updateEmissionProbabilities() self._updateTransitionMatrix() final_time = time.time() elapsed_time = final_time - initial_time logger().info("BHMM update iteration took %.3f s" % elapsed_time)
<SYSTEM_TASK:> Updates the hidden-state transition matrix and the initial distribution <END_TASK> <USER_TASK:> Description: def _updateTransitionMatrix(self): """ Updates the hidden-state transition matrix and the initial distribution """
# TRANSITION MATRIX C = self.model.count_matrix() + self.prior_C # posterior count matrix # check if we work with these options if self.reversible and not _tmatrix_disconnected.is_connected(C, strong=True): raise NotImplementedError('Encountered disconnected count matrix with sampling option reversible:\n ' + str(C) + '\nUse prior to ensure connectivity or use reversible=False.') # ensure consistent sparsity pattern (P0 might have additional zeros because of underflows) # TODO: these steps work around a bug in msmtools. Should be fixed there P0 = msmest.transition_matrix(C, reversible=self.reversible, maxiter=10000, warn_not_converged=False) zeros = np.where(P0 + P0.T == 0) C[zeros] = 0 # run sampler Tij = msmest.sample_tmatrix(C, nsample=1, nsteps=self.transition_matrix_sampling_steps, reversible=self.reversible) # INITIAL DISTRIBUTION if self.stationary: # p0 is consistent with P p0 = _tmatrix_disconnected.stationary_distribution(Tij, C=C) else: n0 = self.model.count_init().astype(float) first_timestep_counts_with_prior = n0 + self.prior_n0 positive = first_timestep_counts_with_prior > 0 p0 = np.zeros_like(n0) p0[positive] = np.random.dirichlet(first_timestep_counts_with_prior[positive]) # sample p0 from posterior # update HMM with new sample self.model.update(p0, Tij)
<SYSTEM_TASK:> Computes the connected sets of C. <END_TASK> <USER_TASK:> Description: def connected_sets(C, mincount_connectivity=0, strong=True): """ Computes the connected sets of C. C : count matrix mincount_connectivity : float Minimum count which counts as a connection. strong : boolean True: Seek strongly connected sets. False: Seek weakly connected sets. """
import msmtools.estimation as msmest Cconn = C.copy() Cconn[np.where(C <= mincount_connectivity)] = 0 # treat each connected set separately S = msmest.connected_sets(Cconn, directed=strong) return S
<SYSTEM_TASK:> Computes the strongly connected closed sets of C <END_TASK> <USER_TASK:> Description: def closed_sets(C, mincount_connectivity=0): """ Computes the strongly connected closed sets of C """
n = np.shape(C)[0] S = connected_sets(C, mincount_connectivity=mincount_connectivity, strong=True) closed = [] for s in S: mask = np.zeros(n, dtype=bool) mask[s] = True if C[np.ix_(mask, ~mask)].sum() == 0: # closed set, take it closed.append(s) return closed
<SYSTEM_TASK:> Returns the set of states that have at least one incoming or outgoing count <END_TASK> <USER_TASK:> Description: def nonempty_set(C, mincount_connectivity=0): """ Returns the set of states that have at least one incoming or outgoing count """
# truncate to states with at least one observed incoming or outgoing count. if mincount_connectivity > 0: C = C.copy() C[np.where(C < mincount_connectivity)] = 0 return np.where(C.sum(axis=0) + C.sum(axis=1) > 0)[0]
<SYSTEM_TASK:> Estimates full transition matrix for general connectivity structure <END_TASK> <USER_TASK:> Description: def estimate_P(C, reversible=True, fixed_statdist=None, maxiter=1000000, maxerr=1e-8, mincount_connectivity=0): """ Estimates full transition matrix for general connectivity structure Parameters ---------- C : ndarray count matrix reversible : bool estimate reversible? fixed_statdist : ndarray or None estimate with given stationary distribution maxiter : int Maximum number of reversible iterations. maxerr : float Stopping criterion for reversible iteration: Will stop when infinity norm of difference vector of two subsequent equilibrium distributions is below maxerr. mincount_connectivity : float Minimum count which counts as a connection. """
import msmtools.estimation as msmest n = np.shape(C)[0] # output matrix. Set initially to Identity matrix in order to handle empty states P = np.eye(n, dtype=np.float64) # decide if we need to proceed by weakly or strongly connected sets if reversible and fixed_statdist is None: # reversible to unknown eq. dist. - use strongly connected sets. S = connected_sets(C, mincount_connectivity=mincount_connectivity, strong=True) for s in S: mask = np.zeros(n, dtype=bool) mask[s] = True if C[np.ix_(mask, ~mask)].sum() > np.finfo(C.dtype).eps: # outgoing transitions - use partial rev algo. transition_matrix_partial_rev(C, P, mask, maxiter=maxiter, maxerr=maxerr) else: # closed set - use standard estimator I = np.ix_(mask, mask) if s.size > 1: # leave diagonal 1 if single closed state. P[I] = msmest.transition_matrix(C[I], reversible=True, warn_not_converged=False, maxiter=maxiter, maxerr=maxerr) else: # nonreversible or given equilibrium distribution - weakly connected sets S = connected_sets(C, mincount_connectivity=mincount_connectivity, strong=False) for s in S: I = np.ix_(s, s) if not reversible: Csub = C[I] # any zero rows? must set Cii = 1 to avoid dividing by zero zero_rows = np.where(Csub.sum(axis=1) == 0)[0] Csub[zero_rows, zero_rows] = 1.0 P[I] = msmest.transition_matrix(Csub, reversible=False) elif reversible and fixed_statdist is not None: P[I] = msmest.transition_matrix(C[I], reversible=True, fixed_statdist=fixed_statdist, maxiter=maxiter, maxerr=maxerr) else: # unknown case raise NotImplementedError('Transition estimation for the case reversible=' + str(reversible) + ' fixed_statdist=' + str(fixed_statdist is not None) + ' not implemented.') # done return P
<SYSTEM_TASK:> Maximum likelihood estimation of transition matrix which is reversible on parts <END_TASK> <USER_TASK:> Description: def transition_matrix_partial_rev(C, P, S, maxiter=1000000, maxerr=1e-8): """Maximum likelihood estimation of transition matrix which is reversible on parts Partially-reversible estimation of transition matrix. Maximizes the likelihood: .. math: P_S &=& arg max prod_{S, :} (p_ij)^c_ij \\ \Pi_S P_{S,S} &=& \Pi_S P_{S,S} where the product runs over all elements of the rows S, and detailed balance only acts on the block with rows and columns S. :math:`\Pi_S` is the diagonal matrix of equilibrium probabilities restricted to set S. Note that this formulation Parameters ---------- C : ndarray full count matrix P : ndarray full transition matrix to write to. Will overwrite P[S] S : ndarray, bool boolean selection of reversible set with outgoing transitions maxerr : float maximum difference in matrix sums between iterations (infinity norm) in order to stop. """
# test input assert np.array_equal(C.shape, P.shape) # constants A = C[S][:, S] B = C[S][:, ~S] ATA = A + A.T countsums = C[S].sum(axis=1) # initialize X = 0.5 * ATA Y = C[S][:, ~S] # normalize X, Y totalsum = X.sum() + Y.sum() X /= totalsum Y /= totalsum # rowsums rowsums = X.sum(axis=1) + Y.sum(axis=1) err = 1.0 it = 0 while err > maxerr and it < maxiter: # update d = countsums / rowsums X = ATA / (d[:, None] + d) Y = B / d[:, None] # normalize X, Y totalsum = X.sum() + Y.sum() X /= totalsum Y /= totalsum # update sums rowsums_new = X.sum(axis=1) + Y.sum(axis=1) # compute error err = np.max(np.abs(rowsums_new - rowsums)) # update rowsums = rowsums_new it += 1 # write to P P[np.ix_(S, S)] = X P[np.ix_(S, ~S)] = Y P[S] /= P[S].sum(axis=1)[:, None]
<SYSTEM_TASK:> Enforces transition matrix P to be reversible on its closed sets. <END_TASK> <USER_TASK:> Description: def enforce_reversible_on_closed(P): """ Enforces transition matrix P to be reversible on its closed sets. """
import msmtools.analysis as msmana n = np.shape(P)[0] Prev = P.copy() # treat each weakly connected set separately sets = closed_sets(P) for s in sets: I = np.ix_(s, s) # compute stationary probability pi_s = msmana.stationary_distribution(P[I]) # symmetrize X_s = pi_s[:, None] * P[I] X_s = 0.5 * (X_s + X_s.T) # normalize Prev[I] = X_s / X_s.sum(axis=1)[:, None] return Prev
<SYSTEM_TASK:> Returns if P is reversible on its weakly connected sets <END_TASK> <USER_TASK:> Description: def is_reversible(P): """ Returns if P is reversible on its weakly connected sets """
import msmtools.analysis as msmana # treat each weakly connected set separately sets = connected_sets(P, strong=False) for s in sets: Ps = P[s, :][:, s] if not msmana.is_transition_matrix(Ps): return False # isn't even a transition matrix! pi = msmana.stationary_distribution(Ps) X = pi[:, None] * Ps if not np.allclose(X, X.T): return False # survived. return True
<SYSTEM_TASK:> Simple estimator for stationary distribution for multiple strongly connected sets <END_TASK> <USER_TASK:> Description: def stationary_distribution(P, C=None, mincount_connectivity=0): """ Simple estimator for stationary distribution for multiple strongly connected sets """
# can be replaced by msmtools.analysis.stationary_distribution in next msmtools release from msmtools.analysis.dense.stationary_vector import stationary_distribution as msmstatdist if C is None: if is_connected(P, strong=True): return msmstatdist(P) else: raise ValueError('Computing stationary distribution for disconnected matrix. Need count matrix.') # disconnected sets n = np.shape(C)[0] ctot = np.sum(C) pi = np.zeros(n) # treat each weakly connected set separately sets = connected_sets(C, mincount_connectivity=mincount_connectivity, strong=False) for s in sets: # compute weight w = np.sum(C[s, :]) / ctot pi[s] = w * msmstatdist(P[s, :][:, s]) # reinforce normalization pi /= np.sum(pi) return pi
<SYSTEM_TASK:> r""" Samples of the Gaussian distribution means <END_TASK> <USER_TASK:> Description: def means_samples(self): r""" Samples of the Gaussian distribution means """
res = np.empty((self.nsamples, self.nstates, self.dimension), dtype=config.dtype) for i in range(self.nsamples): for j in range(self.nstates): res[i, j, :] = self._sampled_hmms[i].means[j] return res
<SYSTEM_TASK:> r""" Samples of the Gaussian distribution standard deviations <END_TASK> <USER_TASK:> Description: def sigmas_samples(self): r""" Samples of the Gaussian distribution standard deviations """
res = np.empty((self.nsamples, self.nstates, self.dimension), dtype=config.dtype) for i in range(self.nsamples): for j in range(self.nstates): res[i, j, :] = self._sampled_hmms[i].sigmas[j] return res
<SYSTEM_TASK:> Suggests a HMM model type based on the observation data <END_TASK> <USER_TASK:> Description: def _guess_output_type(observations): """ Suggests a HMM model type based on the observation data Uses simple rules in order to decide which HMM model type makes sense based on observation data. If observations consist of arrays/lists of integer numbers (irrespective of whether the python type is int or float), our guess is 'discrete'. If observations consist of arrays/lists of 1D-floats, our guess is 'discrete'. In any other case, a TypeError is raised because we are not supporting that data type yet. Parameters ---------- observations : list of lists or arrays observation trajectories Returns ------- output_type : str One of {'discrete', 'gaussian'} """
from bhmm.util import types as _types o1 = _np.array(observations[0]) # CASE: vector of int? Then we want a discrete HMM if _types.is_int_vector(o1): return 'discrete' # CASE: not int type, but everything is an integral number. Then we also go for discrete if _np.allclose(o1, _np.round(o1)): isintegral = True for i in range(1, len(observations)): if not _np.allclose(observations[i], _np.round(observations[i])): isintegral = False break if isintegral: return 'discrete' # CASE: vector of double? Then we want a gaussian if _types.is_float_vector(o1): return 'gaussian' # None of the above? Then we currently do not support this format! raise TypeError('Observations is neither sequences of integers nor 1D-sequences of floats. The current version' 'does not support your input.')
<SYSTEM_TASK:> r""" Create new trajectories that are subsampled at lag but shifted <END_TASK> <USER_TASK:> Description: def lag_observations(observations, lag, stride=1): r""" Create new trajectories that are subsampled at lag but shifted Given a trajectory (s0, s1, s2, s3, s4, ...) and lag 3, this function will generate 3 trajectories (s0, s3, s6, ...), (s1, s4, s7, ...) and (s2, s5, s8, ...). Use this function in order to parametrize a MLE at lag times larger than 1 without discarding data. Do not use this function for Bayesian estimators, where data must be given such that subsequent transitions are uncorrelated. Parameters ---------- observations : list of int arrays observation trajectories lag : int lag time stride : int, default=1 will return only one trajectory for every stride. Use this for Bayesian analysis. """
obsnew = [] for obs in observations: for shift in range(0, lag, stride): obs_lagged = (obs[shift:][::lag]) if len(obs_lagged) > 1: obsnew.append(obs_lagged) return obsnew
<SYSTEM_TASK:> Initializes a 1D-Gaussian HMM <END_TASK> <USER_TASK:> Description: def gaussian_hmm(pi, P, means, sigmas): """ Initializes a 1D-Gaussian HMM Parameters ---------- pi : ndarray(nstates, ) Initial distribution. P : ndarray(nstates,nstates) Hidden transition matrix means : ndarray(nstates, ) Means of Gaussian output distributions sigmas : ndarray(nstates, ) Standard deviations of Gaussian output distributions stationary : bool, optional, default=True If True: initial distribution is equal to stationary distribution of transition matrix reversible : bool, optional, default=True If True: transition matrix will fulfill detailed balance constraints. """
from bhmm.hmm.gaussian_hmm import GaussianHMM from bhmm.output_models.gaussian import GaussianOutputModel # count states nstates = _np.array(P).shape[0] # initialize output model output_model = GaussianOutputModel(nstates, means, sigmas) # initialize general HMM from bhmm.hmm.generic_hmm import HMM as _HMM ghmm = _HMM(pi, P, output_model) # turn it into a Gaussian HMM ghmm = GaussianHMM(ghmm) return ghmm
<SYSTEM_TASK:> Initializes a discrete HMM <END_TASK> <USER_TASK:> Description: def discrete_hmm(pi, P, pout): """ Initializes a discrete HMM Parameters ---------- pi : ndarray(nstates, ) Initial distribution. P : ndarray(nstates,nstates) Hidden transition matrix pout : ndarray(nstates,nsymbols) Output matrix from hidden states to observable symbols pi : ndarray(nstates, ) Fixed initial (if stationary=False) or fixed stationary distribution (if stationary=True). stationary : bool, optional, default=True If True: initial distribution is equal to stationary distribution of transition matrix reversible : bool, optional, default=True If True: transition matrix will fulfill detailed balance constraints. """
from bhmm.hmm.discrete_hmm import DiscreteHMM from bhmm.output_models.discrete import DiscreteOutputModel # initialize output model output_model = DiscreteOutputModel(pout) # initialize general HMM from bhmm.hmm.generic_hmm import HMM as _HMM dhmm = _HMM(pi, P, output_model) # turn it into a Gaussian HMM dhmm = DiscreteHMM(dhmm) return dhmm
<SYSTEM_TASK:> r""" Estimate maximum-likelihood HMM <END_TASK> <USER_TASK:> Description: def estimate_hmm(observations, nstates, lag=1, initial_model=None, output=None, reversible=True, stationary=False, p=None, accuracy=1e-3, maxit=1000, maxit_P=100000, mincount_connectivity=1e-2): r""" Estimate maximum-likelihood HMM Generic maximum-likelihood estimation of HMMs Parameters ---------- observations : list of numpy arrays representing temporal data `observations[i]` is a 1d numpy array corresponding to the observed trajectory index `i` nstates : int The number of states in the model. lag : int the lag time at which observations should be read initial_model : HMM, optional, default=None If specified, the given initial model will be used to initialize the BHMM. Otherwise, a heuristic scheme is used to generate an initial guess. output : str, optional, default=None Output model type from [None, 'gaussian', 'discrete']. If None, will automatically select an output model type based on the format of observations. reversible : bool, optional, default=True If True, a prior that enforces reversible transition matrices (detailed balance) is used; otherwise, a standard non-reversible prior is used. stationary : bool, optional, default=False If True, the initial distribution of hidden states is self-consistently computed as the stationary distribution of the transition matrix. If False, it will be estimated from the starting states. Only set this to true if you're sure that the observation trajectories are initiated from a global equilibrium distribution. p : ndarray (nstates), optional, default=None Initial or fixed stationary distribution. If given and stationary=True, transition matrices will be estimated with the constraint that they have p as their stationary distribution. If given and stationary=False, p is the fixed initial distribution of hidden states. accuracy : float convergence threshold for EM iteration. When two the likelihood does not increase by more than accuracy, the iteration is stopped successfully. maxit : int stopping criterion for EM iteration. When so many iterations are performed without reaching the requested accuracy, the iteration is stopped without convergence (a warning is given) Return ------ hmm : :class:`HMM <bhmm.hmm.generic_hmm.HMM>` """
# select output model type if output is None: output = _guess_output_type(observations) if lag > 1: observations = lag_observations(observations, lag) # construct estimator from bhmm.estimators.maximum_likelihood import MaximumLikelihoodEstimator as _MaximumLikelihoodEstimator est = _MaximumLikelihoodEstimator(observations, nstates, initial_model=initial_model, output=output, reversible=reversible, stationary=stationary, p=p, accuracy=accuracy, maxit=maxit, maxit_P=maxit_P) # run est.fit() # set lag time est.hmm._lag = lag # return model # TODO: package into specific class (DiscreteHMM, GaussianHMM) return est.hmm
<SYSTEM_TASK:> r""" Bayesian HMM based on sampling the posterior <END_TASK> <USER_TASK:> Description: def bayesian_hmm(observations, estimated_hmm, nsample=100, reversible=True, stationary=False, p0_prior='mixed', transition_matrix_prior='mixed', store_hidden=False, call_back=None): r""" Bayesian HMM based on sampling the posterior Generic maximum-likelihood estimation of HMMs Parameters ---------- observations : list of numpy arrays representing temporal data `observations[i]` is a 1d numpy array corresponding to the observed trajectory index `i` estimated_hmm : HMM HMM estimated from estimate_hmm or initialize_hmm reversible : bool, optional, default=True If True, a prior that enforces reversible transition matrices (detailed balance) is used; otherwise, a standard non-reversible prior is used. stationary : bool, optional, default=False If True, the stationary distribution of the transition matrix will be used as initial distribution. Only use True if you are confident that the observation trajectories are started from a global equilibrium. If False, the initial distribution will be estimated as usual from the first step of the hidden trajectories. nsample : int, optional, default=100 number of Gibbs sampling steps p0_prior : None, str, float or ndarray(n) Prior for the initial distribution of the HMM. Will only be active if stationary=False (stationary=True means that p0 is identical to the stationary distribution of the transition matrix). Currently implements different versions of the Dirichlet prior that is conjugate to the Dirichlet distribution of p0. p0 is sampled from: .. math: p0 \sim \prod_i (p0)_i^{a_i + n_i - 1} where :math:`n_i` are the number of times a hidden trajectory was in state :math:`i` at time step 0 and :math:`a_i` is the prior count. Following options are available: | 'mixed' (default), :math:`a_i = p_{0,init}`, where :math:`p_{0,init}` is the initial distribution of initial_model. | 'uniform', :math:`a_i = 1` | ndarray(n) or float, the given array will be used as A. | None, :math:`a_i = 0`. This option ensures coincidence between sample mean an MLE. Will sooner or later lead to sampling problems, because as soon as zero trajectories are drawn from a given state, the sampler cannot recover and that state will never serve as a starting state subsequently. Only recommended in the large data regime and when the probability to sample zero trajectories from any state is negligible. transition_matrix_prior : str or ndarray(n, n) Prior for the HMM transition matrix. Currently implements Dirichlet priors if reversible=False and reversible transition matrix priors as described in [1]_ if reversible=True. For the nonreversible case the posterior of transition matrix :math:`P` is: .. math: P \sim \prod_{i,j} p_{ij}^{b_{ij} + c_{ij} - 1} where :math:`c_{ij}` are the number of transitions found for hidden trajectories and :math:`b_{ij}` are prior counts. | 'mixed' (default), :math:`b_{ij} = p_{ij,init}`, where :math:`p_{ij,init}` is the transition matrix of initial_model. That means one prior count will be used per row. | 'uniform', :math:`b_{ij} = 1` | ndarray(n, n) or broadcastable, the given array will be used as B. | None, :math:`b_ij = 0`. This option ensures coincidence between sample mean an MLE. Will sooner or later lead to sampling problems, because as soon as a transition :math:`ij` will not occur in a sample, the sampler cannot recover and that transition will never be sampled again. This option is not recommended unless you have a small HMM and a lot of data. store_hidden : bool, optional, default=False store hidden trajectories in sampled HMMs call_back : function, optional, default=None a call back function with no arguments, which if given is being called after each computed sample. This is useful for implementing progress bars. Return ------ hmm : :class:`SampledHMM <bhmm.hmm.generic_sampled_hmm.SampledHMM>` References ---------- .. [1] Trendelkamp-Schroer, B., H. Wu, F. Paul and F. Noe: Estimation and uncertainty of reversible Markov models. J. Chem. Phys. 143, 174101 (2015). """
# construct estimator from bhmm.estimators.bayesian_sampling import BayesianHMMSampler as _BHMM sampler = _BHMM(observations, estimated_hmm.nstates, initial_model=estimated_hmm, reversible=reversible, stationary=stationary, transition_matrix_sampling_steps=1000, p0_prior=p0_prior, transition_matrix_prior=transition_matrix_prior, output=estimated_hmm.output_model.model_type) # Sample models. sampled_hmms = sampler.sample(nsamples=nsample, save_hidden_state_trajectory=store_hidden, call_back=call_back) # return model from bhmm.hmm.generic_sampled_hmm import SampledHMM return SampledHMM(estimated_hmm, sampled_hmms)
<SYSTEM_TASK:> Computes the sum of arr assuming arr is in the log domain. <END_TASK> <USER_TASK:> Description: def logsumexp(arr, axis=0): """Computes the sum of arr assuming arr is in the log domain. Returns log(sum(exp(arr))) while minimizing the possibility of over/underflow. Examples -------- >>> import numpy as np >>> from sklearn.utils.extmath import logsumexp >>> a = np.arange(10) >>> np.log(np.sum(np.exp(a))) 9.4586297444267107 >>> logsumexp(a) 9.4586297444267107 """
arr = np.rollaxis(arr, axis) # Use the max to normalize, as with the log this is what accumulates # the less errors vmax = arr.max(axis=0) out = np.log(np.sum(np.exp(arr - vmax), axis=0)) out += vmax return out
<SYSTEM_TASK:> Convert a sparse matrix to a given format. <END_TASK> <USER_TASK:> Description: def _ensure_sparse_format(spmatrix, accept_sparse, dtype, order, copy, force_all_finite): """Convert a sparse matrix to a given format. Checks the sparse format of spmatrix and converts if necessary. Parameters ---------- spmatrix : scipy sparse matrix Input to validate and convert. accept_sparse : string, list of string or None (default=None) String[s] representing allowed sparse matrix formats ('csc', 'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse matrix input will raise an error. If the input is sparse but not in the allowed format, it will be converted to the first listed format. dtype : string, type or None (default=none) Data type of result. If None, the dtype of the input is preserved. order : 'F', 'C' or None (default=None) Whether an array will be forced to be fortran or c-style. copy : boolean (default=False) Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : boolean (default=True) Whether to raise an error on np.inf and np.nan in X. Returns ------- spmatrix_converted : scipy sparse matrix. Matrix that is ensured to have an allowed type. """
if accept_sparse is None: raise TypeError('A sparse matrix was passed, but dense ' 'data is required. Use X.toarray() to ' 'convert to a dense numpy array.') sparse_type = spmatrix.format if dtype is None: dtype = spmatrix.dtype if sparse_type in accept_sparse: # correct type if dtype == spmatrix.dtype: # correct dtype if copy: spmatrix = spmatrix.copy() else: # convert dtype spmatrix = spmatrix.astype(dtype) else: # create new spmatrix = spmatrix.asformat(accept_sparse[0]).astype(dtype) if force_all_finite: if not hasattr(spmatrix, "data"): warnings.warn("Can't check %s sparse matrix for nan or inf." % spmatrix.format) else: _assert_all_finite(spmatrix.data) if hasattr(spmatrix, "data"): spmatrix.data = np.array(spmatrix.data, copy=False, order=order) return spmatrix
<SYSTEM_TASK:> Input validation on an array, list, sparse matrix or similar. <END_TASK> <USER_TASK:> Description: def check_array(array, accept_sparse=None, dtype="numeric", order=None, copy=False, force_all_finite=True, ensure_2d=True, allow_nd=False, ensure_min_samples=1, ensure_min_features=1): """Input validation on an array, list, sparse matrix or similar. By default, the input is converted to an at least 2nd numpy array. If the dtype of the array is object, attempt converting to float, raising on failure. Parameters ---------- array : object Input object to check / convert. accept_sparse : string, list of string or None (default=None) String[s] representing allowed sparse matrix formats, such as 'csc', 'csr', etc. None means that sparse matrix input will raise an error. If the input is sparse but not in the allowed format, it will be converted to the first listed format. dtype : string, type or None (default="numeric") Data type of result. If None, the dtype of the input is preserved. If "numeric", dtype is preserved unless array.dtype is object. order : 'F', 'C' or None (default=None) Whether an array will be forced to be fortran or c-style. copy : boolean (default=False) Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : boolean (default=True) Whether to raise an error on np.inf and np.nan in X. ensure_2d : boolean (default=True) Whether to make X at least 2d. allow_nd : boolean (default=False) Whether to allow X.ndim > 2. ensure_min_samples : int (default=1) Make sure that the array has a minimum number of samples in its first axis (rows for a 2D array). Setting to 0 disables this check. ensure_min_features : int (default=1) Make sure that the 2D array has some minimum number of features (columns). The default value of 1 rejects empty datasets. This check is only enforced when the input data has effectively 2 dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0 disables this check. Returns ------- X_converted : object The converted and validated X. """
if isinstance(accept_sparse, str): accept_sparse = [accept_sparse] # store whether originally we wanted numeric dtype dtype_numeric = dtype == "numeric" if sp.issparse(array): if dtype_numeric: dtype = None array = _ensure_sparse_format(array, accept_sparse, dtype, order, copy, force_all_finite) else: if ensure_2d: array = np.atleast_2d(array) if dtype_numeric: if hasattr(array, "dtype") and getattr(array.dtype, "kind", None) == "O": # if input is object, convert to float. dtype = np.float64 else: dtype = None array = np.array(array, dtype=dtype, order=order, copy=copy) # make sure we actually converted to numeric: if dtype_numeric and array.dtype.kind == "O": array = array.astype(np.float64) if not allow_nd and array.ndim >= 3: raise ValueError("Found array with dim %d. Expected <= 2" % array.ndim) if force_all_finite: _assert_all_finite(array) shape_repr = _shape_repr(array.shape) if ensure_min_samples > 0: n_samples = _num_samples(array) if n_samples < ensure_min_samples: raise ValueError("Found array with %d sample(s) (shape=%s) while a" " minimum of %d is required." % (n_samples, shape_repr, ensure_min_samples)) if ensure_min_features > 0 and array.ndim == 2: n_features = array.shape[1] if n_features < ensure_min_features: raise ValueError("Found array with %d feature(s) (shape=%s) while" " a minimum of %d is required." % (n_features, shape_repr, ensure_min_features)) return array
<SYSTEM_TASK:> Compute confidence intervals of beta distributions. <END_TASK> <USER_TASK:> Description: def beta_confidence_intervals(ci_X, ntrials, ci=0.95): """ Compute confidence intervals of beta distributions. Parameters ---------- ci_X : numpy.array Computed confidence interval estimate from `ntrials` experiments ntrials : int The number of trials that were run. ci : float, optional, default=0.95 Confidence interval to report (e.g. 0.95 for 95% confidence interval) Returns ------- Plow : float The lower bound of the symmetric confidence interval. Phigh : float The upper bound of the symmetric confidence interval. Examples -------- >>> ci_X = np.random.rand(10,10) >>> ntrials = 100 >>> [Plow, Phigh] = beta_confidence_intervals(ci_X, ntrials) """
# Compute low and high confidence interval for symmetric CI about mean. ci_low = 0.5 - ci/2; ci_high = 0.5 + ci/2; # Compute for every element of ci_X. from scipy.stats import beta Plow = ci_X * 0.0; Phigh = ci_X * 0.0; for i in range(ci_X.shape[0]): for j in range(ci_X.shape[1]): Plow[i,j] = beta.ppf(ci_low, a = ci_X[i,j] * ntrials, b = (1-ci_X[i,j]) * ntrials); Phigh[i,j] = beta.ppf(ci_high, a = ci_X[i,j] * ntrials, b = (1-ci_X[i,j]) * ntrials); return [Plow, Phigh]
<SYSTEM_TASK:> Compute specified symmetric confidence interval for empirical sample. <END_TASK> <USER_TASK:> Description: def empirical_confidence_interval(sample, interval=0.95): """ Compute specified symmetric confidence interval for empirical sample. Parameters ---------- sample : numpy.array The empirical samples. interval : float, optional, default=0.95 Size of desired symmetric confidence interval (0 < interval < 1) e.g. 0.68 for 68% confidence interval, 0.95 for 95% confidence interval Returns ------- low : float The lower bound of the symmetric confidence interval. high : float The upper bound of the symmetric confidence interval. Examples -------- >>> sample = np.random.randn(1000) >>> [low, high] = empirical_confidence_interval(sample) >>> [low, high] = empirical_confidence_interval(sample, interval=0.65) >>> [low, high] = empirical_confidence_interval(sample, interval=0.99) """
# Sort sample in increasing order. sample = np.sort(sample) # Determine sample size. N = len(sample) # Compute low and high indices. low_index = int(np.round((N-1) * (0.5 - interval/2))) + 1 high_index = int(np.round((N-1) * (0.5 + interval/2))) + 1 # Compute low and high. low = sample[low_index] high = sample[high_index] return [low, high]
<SYSTEM_TASK:> Computes the mean and alpha-confidence interval of the given sample set <END_TASK> <USER_TASK:> Description: def confidence_interval(data, alpha): """ Computes the mean and alpha-confidence interval of the given sample set Parameters ---------- data : ndarray a 1D-array of samples alpha : float in [0,1] the confidence level, i.e. percentage of data included in the interval Returns ------- [m,l,r] where m is the mean of the data, and (l,r) are the m-alpha/2 and m+alpha/2 confidence interval boundaries. """
if alpha < 0 or alpha > 1: raise ValueError('Not a meaningful confidence level: '+str(alpha)) # compute mean m = np.mean(data) # sort data sdata = np.sort(data) # index of the mean im = np.searchsorted(sdata, m) if im == 0 or im == len(sdata): pm = im else: pm = (im-1) + (m-sdata[im-1]) / (sdata[im]-sdata[im-1]) # left interval boundary pl = pm - alpha * pm il1 = max(0, int(math.floor(pl))) il2 = min(len(sdata)-1, int(math.ceil(pl))) l = sdata[il1] + (pl - il1)*(sdata[il2] - sdata[il1]) # right interval boundary pr = pm + alpha * (len(data)-im) ir1 = max(0, int(math.floor(pr))) ir2 = min(len(sdata)-1, int(math.ceil(pr))) r = sdata[ir1] + (pr - ir1)*(sdata[ir2] - sdata[ir1]) # return return m, l, r
<SYSTEM_TASK:> Return the connection status, both locally and remotely. <END_TASK> <USER_TASK:> Description: def status(self, remote=False): """ Return the connection status, both locally and remotely. The local connection status is a dictionary that gives: * the count of multiple queries sent to the server. * the count of single queries sent to the server. * the count of actions sent to the server. * the count of actions executed successfully by the server. * the count of actions queued to go to the server. The remote connection status includes whether the server is live, as well as data about version and build. The server data is cached, unless the remote flag is specified. :param remote: whether to query the server for its latest status :return: tuple of status dicts: (local, server). """
if remote: components = urlparse.urlparse(self.endpoint) try: result = self.session.get(components[0] + "://" + components[1] + "/status", timeout=self.timeout) except Exception as e: if self.logger: self.logger.debug("Failed to connect to server for status: %s", e) result = None if result and result.status_code == 200: self.server_status = result.json() self.server_status["endpoint"] = self.endpoint elif result: if self.logger: self.logger.debug("Server status response not understandable: Status: %d, Body: %s", result.status_code, result.text) self.server_status = {"endpoint": self.endpoint, "status": ("Unexpected HTTP status " + str(result.status_code) + " at: " + strftime("%d %b %Y %H:%M:%S +0000", gmtime()))} else: self.server_status = {"endpoint": self.endpoint, "status": "Unreachable at: " + strftime("%d %b %Y %H:%M:%S +0000", gmtime())} return self.local_status, self.server_status
<SYSTEM_TASK:> Add commands at the end of the sequence. <END_TASK> <USER_TASK:> Description: def append(self, **kwargs): """ Add commands at the end of the sequence. Be careful: because this runs in Python 2.x, the order of the kwargs dict may not match the order in which the args were specified. Thus, if you care about specific ordering, you must make multiple calls to append in that order. Luckily, append returns the Action so you can compose easily: Action(...).append(...).append(...). See also insert, below. :param kwargs: the key/value pairs to add :return: the action """
for k, v in six.iteritems(kwargs): self.commands.append({k: v}) return self
<SYSTEM_TASK:> Insert commands at the beginning of the sequence. <END_TASK> <USER_TASK:> Description: def insert(self, **kwargs): """ Insert commands at the beginning of the sequence. This is provided because certain commands have to come first (such as user creation), but may be need to beadded after other commands have already been specified. Later calls to insert put their commands before those in the earlier calls. Also, since the order of iterated kwargs is not guaranteed (in Python 2.x), you should really only call insert with one keyword at a time. See the doc of append for more details. :param kwargs: the key/value pair to append first :return: the action, so you can append Action(...).insert(...).append(...) """
for k, v in six.iteritems(kwargs): self.commands.insert(0, {k: v}) return self
<SYSTEM_TASK:> Report a server error executing a command. <END_TASK> <USER_TASK:> Description: def report_command_error(self, error_dict): """ Report a server error executing a command. We keep track of the command's position in the command list, and we add annotation of what the command was, to the error. :param error_dict: The server's error dict for the error encountered """
error = dict(error_dict) error["command"] = self.commands[error_dict["step"]] error["target"] = self.frame del error["index"] # throttling can change which action this was in the batch del error["step"] # throttling can change which step this was in the action self.errors.append(error)
<SYSTEM_TASK:> Return a list of commands that encountered execution errors, with the error. <END_TASK> <USER_TASK:> Description: def execution_errors(self): """ Return a list of commands that encountered execution errors, with the error. Each dictionary entry gives the command dictionary and the error dictionary :return: list of commands that gave errors, with their error information """
if self.split_actions: # throttling split this action, get errors from the split return [dict(e) for s in self.split_actions for e in s.errors] else: return [dict(e) for e in self.errors]
<SYSTEM_TASK:> Fetch the next page of the query. <END_TASK> <USER_TASK:> Description: def _next_page(self): """ Fetch the next page of the query. """
if self._last_page_seen: raise StopIteration new, self._last_page_seen = self.conn.query_multiple(self.object_type, self._next_page_index, self.url_params, self.query_params) self._next_page_index += 1 if len(new) == 0: self._last_page_seen = True # don't bother with next page if nothing was returned else: self._results += new
<SYSTEM_TASK:> Fetch the queried object. <END_TASK> <USER_TASK:> Description: def _fetch_result(self): """ Fetch the queried object. """
self._result = self.conn.query_single(self.object_type, self.url_params, self.query_params)