text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded, neighbors=None, skip_num_points=0): """Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P # and the Student's t-distributions Q. The optimization algorithm that # we use is batch gradient descent with two stages: # * initial optimization with early exaggeration and momentum at 0.5 # * final optimization with momentum at 0.8 params = X_embedded.ravel() opt_args = { "it": 0, "n_iter_check": self._N_ITER_CHECK, "min_grad_norm": self.min_grad_norm, "learning_rate": self.learning_rate, "verbose": self.verbose, "kwargs": dict(skip_num_points=skip_num_points), "args": [P, degrees_of_freedom, n_samples, self.n_components], "n_iter_without_progress": self._EXPLORATION_N_ITER, "n_iter": self._EXPLORATION_N_ITER, "momentum": 0.5, } if self.method == 'barnes_hut': obj_func = _kl_divergence_bh opt_args['kwargs']['angle'] = self.angle # Repeat verbose argument for _kl_divergence_bh opt_args['kwargs']['verbose'] = self.verbose else: obj_func = _kl_divergence # Learning schedule (part 1): do 250 iteration with lower momentum but # higher learning rate controlled via the early exageration parameter P *= self.early_exaggeration params, kl_divergence, it = _gradient_descent(obj_func, params, **opt_args) if self.verbose: print("[t-SNE] KL divergence after %d iterations with early " "exaggeration: %f" % (it + 1, kl_divergence)) # Learning schedule (part 2): disable early exaggeration and finish # optimization with a higher momentum at 0.8 P /= self.early_exaggeration remaining = self.n_iter - self._EXPLORATION_N_ITER if it < self._EXPLORATION_N_ITER or remaining > 0: opt_args['n_iter'] = self.n_iter opt_args['it'] = it + 1 opt_args['momentum'] = 0.8 opt_args['n_iter_without_progress'] = self.n_iter_without_progress params, kl_divergence, it = _gradient_descent(obj_func, params, **opt_args) # Save the final number of iterations self.n_iter_ = it if self.verbose: print("[t-SNE] Error after %d iterations: %f" % (it + 1, kl_divergence)) X_embedded = params.reshape(n_samples, self.n_components) self.kl_divergence_ = kl_divergence return X_embedded
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def correct(datasets_full, genes_list, return_dimred=False, batch_size=BATCH_SIZE, verbose=VERBOSE, ds_names=None, dimred=DIMRED, approx=APPROX, sigma=SIGMA, alpha=ALPHA, knn=KNN, return_dense=False, hvg=None, union=False, geosketch=False, geosketch_max=20000): """Integrate and batch correct a list of data sets. Parameters datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray` Data sets to integrate and correct. genes_list: `list` of `list` of `string` List of genes for each data set. return_dimred: `bool`, optional (default: `False`) In addition to returning batch corrected matrices, also returns integrated low-dimesional embeddings. batch_size: `int`, optional (default: `5000`) The batch size used in the alignment vector computation. Useful when correcting very large (>100k samples) data sets. Set to large value that runs within available memory. verbose: `bool` or `int`, optional (default: 2) When `True` or not equal to 0, prints logging output. ds_names: `list` of `string`, optional When `verbose=True`, reports data set names in logging output. dimred: `int`, optional (default: 100) Dimensionality of integrated embedding. approx: `bool`, optional (default: `True`) Use approximate nearest neighbors, greatly speeds up matching runtime. sigma: `float`, optional (default: 15) Correction smoothing parameter on Gaussian kernel. alpha: `float`, optional (default: 0.10) Alignment score minimum cutoff. knn: `int`, optional (default: 20) Number of nearest neighbors to use for matching. return_dense: `bool`, optional (default: `False`) Return `numpy.ndarray` matrices instead of `scipy.sparse.csr_matrix`. hvg: `int`, optional (default: None) Use this number of top highly variable genes based on dispersion. Returns ------- corrected, genes By default (`return_dimred=False`), returns a two-tuple containing a list of `scipy.sparse.csr_matrix` each with batch corrected values, and a single list of genes containing the intersection of inputted genes. integrated, corrected, genes When `return_dimred=False`, returns a three-tuple containing a list of `numpy.ndarray` with integrated low dimensional embeddings, a list of `scipy.sparse.csr_matrix` each with batch corrected values, and a a single list of genes containing the intersection of inputted genes. """
datasets_full = check_datasets(datasets_full) datasets, genes = merge_datasets(datasets_full, genes_list, ds_names=ds_names, union=union) datasets_dimred, genes = process_data(datasets, genes, hvg=hvg, dimred=dimred) datasets_dimred = assemble( datasets_dimred, # Assemble in low dimensional space. expr_datasets=datasets, # Modified in place. verbose=verbose, knn=knn, sigma=sigma, approx=approx, alpha=alpha, ds_names=ds_names, batch_size=batch_size, geosketch=geosketch, geosketch_max=geosketch_max, ) if return_dense: datasets = [ ds.toarray() for ds in datasets ] if return_dimred: return datasets_dimred, datasets, genes return datasets, genes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def integrate(datasets_full, genes_list, batch_size=BATCH_SIZE, verbose=VERBOSE, ds_names=None, dimred=DIMRED, approx=APPROX, sigma=SIGMA, alpha=ALPHA, knn=KNN, geosketch=False, geosketch_max=20000, n_iter=1, union=False, hvg=None): """Integrate a list of data sets. Parameters datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray` Data sets to integrate and correct. genes_list: `list` of `list` of `string` List of genes for each data set. batch_size: `int`, optional (default: `5000`) The batch size used in the alignment vector computation. Useful when correcting very large (>100k samples) data sets. Set to large value that runs within available memory. verbose: `bool` or `int`, optional (default: 2) When `True` or not equal to 0, prints logging output. ds_names: `list` of `string`, optional When `verbose=True`, reports data set names in logging output. dimred: `int`, optional (default: 100) Dimensionality of integrated embedding. approx: `bool`, optional (default: `True`) Use approximate nearest neighbors, greatly speeds up matching runtime. sigma: `float`, optional (default: 15) Correction smoothing parameter on Gaussian kernel. alpha: `float`, optional (default: 0.10) Alignment score minimum cutoff. knn: `int`, optional (default: 20) Number of nearest neighbors to use for matching. hvg: `int`, optional (default: None) Use this number of top highly variable genes based on dispersion. Returns ------- integrated, genes Returns a two-tuple containing a list of `numpy.ndarray` with integrated low dimensional embeddings and a single list of genes containing the intersection of inputted genes. """
datasets_full = check_datasets(datasets_full) datasets, genes = merge_datasets(datasets_full, genes_list, ds_names=ds_names, union=union) datasets_dimred, genes = process_data(datasets, genes, hvg=hvg, dimred=dimred) for _ in range(n_iter): datasets_dimred = assemble( datasets_dimred, # Assemble in low dimensional space. verbose=verbose, knn=knn, sigma=sigma, approx=approx, alpha=alpha, ds_names=ds_names, batch_size=batch_size, geosketch=geosketch, geosketch_max=geosketch_max, ) return datasets_dimred, genes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def correct_scanpy(adatas, **kwargs): """Batch correct a list of `scanpy.api.AnnData`. Parameters adatas : `list` of `scanpy.api.AnnData` Data sets to integrate and/or correct. kwargs : `dict` See documentation for the `correct()` method for a full list of parameters to use for batch correction. Returns ------- corrected By default (`return_dimred=False`), returns a list of `scanpy.api.AnnData` with batch corrected values in the `.X` field. corrected, integrated When `return_dimred=False`, returns a two-tuple containing a list of `np.ndarray` with integrated low-dimensional embeddings and a list of `scanpy.api.AnnData` with batch corrected values in the `.X` field. """
if 'return_dimred' in kwargs and kwargs['return_dimred']: datasets_dimred, datasets, genes = correct( [adata.X for adata in adatas], [adata.var_names.values for adata in adatas], **kwargs ) else: datasets, genes = correct( [adata.X for adata in adatas], [adata.var_names.values for adata in adatas], **kwargs ) new_adatas = [] for i, adata in enumerate(adatas): adata.X = datasets[i] new_adatas.append(adata) if 'return_dimred' in kwargs and kwargs['return_dimred']: return datasets_dimred, new_adatas else: return new_adatas
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def integrate_scanpy(adatas, **kwargs): """Integrate a list of `scanpy.api.AnnData`. Parameters adatas : `list` of `scanpy.api.AnnData` Data sets to integrate. kwargs : `dict` See documentation for the `integrate()` method for a full list of parameters to use for batch correction. Returns ------- integrated Returns a list of `np.ndarray` with integrated low-dimensional embeddings. """
datasets_dimred, genes = integrate( [adata.X for adata in adatas], [adata.var_names.values for adata in adatas], **kwargs ) return datasets_dimred
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def augknt(knots, order): """Augment a knot vector. Parameters: knots: Python list or rank-1 array, the original knot vector (without endpoint repeats) order: int, >= 0, order of spline Returns: list_of_knots: rank-1 array that has (`order` + 1) copies of ``knots[0]``, then ``knots[1:-1]``, and finally (`order` + 1) copies of ``knots[-1]``. Caveats: `order` is the spline order `p`, not `p` + 1, and existing knots are never deleted. The knot vector always becomes longer by calling this function. """
if isinstance(knots, np.ndarray) and knots.ndim > 1: raise ValueError("knots must be a list or a rank-1 array") knots = list(knots) # ensure Python list # One copy of knots[0] and knots[-1] will come from "knots" itself, # so we only need to prepend/append "order" copies. # return np.array( [knots[0]] * order + knots + [knots[-1]] * order )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def aveknt(t, k): """Compute the running average of `k` successive elements of `t`. Return the averaged array. Parameters: t: Python list or rank-1 array k: int, >= 2, how many successive elements to average Returns: rank-1 array, averaged data. If k > len(t), returns a zero-length array. Caveat: This is slightly different from MATLAB's aveknt, which returns the running average of `k`-1 successive elements of ``t[1:-1]`` (and the empty vector if ``len(t) - 2 < k - 1``). """
t = np.atleast_1d(t) if t.ndim > 1: raise ValueError("t must be a list or a rank-1 array") n = t.shape[0] u = max(0, n - (k-1)) # number of elements in the output array out = np.empty( (u,), dtype=t.dtype ) for j in range(u): out[j] = sum( t[j:(j+k)] ) / k return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def aptknt(tau, order): """Create an acceptable knot vector. Minimal emulation of MATLAB's ``aptknt``. The returned knot vector can be used to generate splines of desired `order` that are suitable for interpolation to the collocation sites `tau`. Note that this is only possible when ``len(tau)`` >= `order` + 1. When this condition does not hold, a valid knot vector is returned, but using it to generate a spline basis will not have the desired effect (the spline will return a length-zero array upon evaluation). Parameters: tau: Python list or rank-1 array, collocation sites order: int, >= 0, order of spline Returns: rank-1 array, `k` copies of ``tau[0]``, then ``aveknt(tau[1:-1], k-1)``, and finally `k` copies of ``tau[-1]``, where ``k = min(order+1, len(tau))``. """
tau = np.atleast_1d(tau) k = order + 1 if tau.ndim > 1: raise ValueError("tau must be a list or a rank-1 array") # emulate MATLAB behavior for the "k" parameter # # See # https://se.mathworks.com/help/curvefit/aptknt.html # if len(tau) < k: k = len(tau) if not (tau == sorted(tau)).all(): raise ValueError("tau must be nondecreasing") # last processed element needs to be: # i + k - 1 = len(tau)- 1 # => i + k = len(tau) # => i = len(tau) - k # u = len(tau) - k for i in range(u): if tau[i+k-1] == tau[i]: raise ValueError("k-fold (or higher) repeated sites not allowed, but tau[i+k-1] == tau[i] for i = %d, k = %d" % (i,k)) # form the output sequence # prefix = [ tau[0] ] * k suffix = [ tau[-1] ] * k # https://se.mathworks.com/help/curvefit/aveknt.html # MATLAB's aveknt(): # - averages successive k-1 entries, but ours averages k # - seems to ignore the endpoints # tmp = aveknt(tau[1:-1], k-1) middle = tmp.tolist() return np.array( prefix + middle + suffix, dtype=tmp.dtype )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def knt2mlt(t): """Count multiplicities of elements in a sorted list or rank-1 array. Minimal emulation of MATLAB's ``knt2mlt``. Parameters: t: Python list or rank-1 array. Must be sorted! Returns: out rank-1 array such that out[k] = #{ t[i] == t[k] for i < k } Example: If ``t = [1, 1, 2, 3, 3, 3]``, then ``out = [0, 1, 0, 0, 1, 2]``. Caveat: Requires input to be already sorted (this is not checked). """
t = np.atleast_1d(t) if t.ndim > 1: raise ValueError("t must be a list or a rank-1 array") out = [] e = None for k in range(t.shape[0]): if t[k] != e: e = t[k] count = 0 else: count += 1 out.append(count) return np.array( out )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def spcol(knots, order, tau): """Return collocation matrix. Minimal emulation of MATLAB's ``spcol``. Parameters: knots: rank-1 array, knot vector (with appropriately repeated endpoints; see `augknt`, `aptknt`) order: int, >= 0, order of spline tau: rank-1 array, collocation sites Returns: rank-2 array A such that A[i,j] = D**{m(i)} B_j(tau[i]) where m(i) = multiplicity of site tau[i] D**k = kth derivative (0 for function value itself) """
m = knt2mlt(tau) B = bspline.Bspline(knots, order) dummy = B(0.) nbasis = len(dummy) # perform dummy evaluation to get number of basis functions A = np.empty( (tau.shape[0], nbasis), dtype=dummy.dtype ) for i,item in enumerate(zip(tau,m)): taui,mi = item f = B.diff(order=mi) A[i,:] = f(taui) return A
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def d(self, xi): """Convenience function to compute first derivative of basis functions. 'Memoized' for speed."""
return self.__basis(xi, self.p, compute_derivatives=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot(self): """Plot basis functions over full range of knots. Convenience function. Requires matplotlib. """
try: import matplotlib.pyplot as plt except ImportError: from sys import stderr print("ERROR: matplotlib.pyplot not found, matplotlib must be installed to use this function", file=stderr) raise x_min = np.min(self.knot_vector) x_max = np.max(self.knot_vector) x = np.linspace(x_min, x_max, num=1000) N = np.array([self(i) for i in x]).T for n in N: plt.plot(x,n) return plt.show()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __diff_internal(self): """Differentiate a B-spline once, and return the resulting coefficients and Bspline objects. This preserves the Bspline object nature of the data, enabling recursive implementation of higher-order differentiation (see `diff`). The value of the first derivative of `B` at a point `x` can be obtained as:: def diff1(B, x): terms = B.__diff_internal() return sum( ci*Bi(x) for ci,Bi in terms ) Returns: tuple of tuples, where each item is (coefficient, Bspline object). See: `diff`: differentiation of any order >= 0 """
assert self.p > 0, "order of Bspline must be > 0" # we already handle the other case in diff() # https://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/B-spline/bspline-derv.html # t = self.knot_vector p = self.p Bi = Bspline( t[:-1], p-1 ) Bip1 = Bspline( t[1:], p-1 ) numer1 = +p numer2 = -p denom1 = t[p:-1] - t[:-(p+1)] denom2 = t[(p+1):] - t[1:-p] with np.errstate(divide='ignore', invalid='ignore'): ci = np.where(denom1 != 0., (numer1 / denom1), 0.) cip1 = np.where(denom2 != 0., (numer2 / denom2), 0.) return ( (ci,Bi), (cip1,Bip1) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def diff(self, order=1): """Differentiate a B-spline `order` number of times. Parameters: order: int, >= 0 Returns: The returned function internally uses __call__, which is 'memoized' for speed. """
order = int(order) if order < 0: raise ValueError("order must be >= 0, got %d" % (order)) if order == 0: return self.__call__ if order > self.p: # identically zero, but force the same output format as in the general case dummy = self.__call__(0.) # get number of basis functions and output dtype nbasis = dummy.shape[0] return lambda x: np.zeros( (nbasis,), dtype=dummy.dtype ) # accept but ignore input x # At each differentiation, each term maps into two new terms. # The number of terms in the result will be 2**order. # # This will cause an exponential explosion in the number of terms for high derivative orders, # but for the first few orders (practical usage; >3 is rarely needed) the approach works. # terms = [ (1.,self) ] for k in range(order): tmp = [] for Ci,Bi in terms: tmp.extend( (Ci*cn, Bn) for cn,Bn in Bi.__diff_internal() ) # NOTE: also propagate Ci terms = tmp # perform final summation at call time return lambda x: sum( ci*Bi(x) for ci,Bi in terms )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def collmat(self, tau, deriv_order=0): """Compute collocation matrix. Parameters: tau: Python list or rank-1 array, collocation sites deriv_order: int, >=0, order of derivative for which to compute the collocation matrix. The default is 0, which means the function value itself. Returns: A: if len(tau) > 1, rank-2 array such that A[i,j] = D**deriv_order B_j(tau[i]) where D**k = kth derivative (0 for function value itself) if len(tau) == 1, rank-1 array such that A[j] = D**deriv_order B_j(tau) Example: If the coefficients of a spline function are given in the vector c, then:: np.sum( A*c, axis=-1 ) will give a rank-1 array of function values at the sites tau[i] that were supplied to `collmat`. Similarly for derivatives (if the supplied `deriv_order`> 0). """
# get number of basis functions and output dtype dummy = self.__call__(0.) nbasis = dummy.shape[0] tau = np.atleast_1d(tau) if tau.ndim > 1: raise ValueError("tau must be a list or a rank-1 array") A = np.empty( (tau.shape[0], nbasis), dtype=dummy.dtype ) f = self.diff(order=deriv_order) for i,taui in enumerate(tau): A[i,:] = f(taui) return np.squeeze(A)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_normalized_request_string(method, url, nonce, params, ext='', body_hash=None): """ Returns a normalized request string as described iN OAuth2 MAC spec. http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-00#section-3.3.1 """
urlparts = urlparse.urlparse(url) if urlparts.query: norm_url = '%s?%s' % (urlparts.path, urlparts.query) elif params: norm_url = '%s?%s' % (urlparts.path, get_normalized_params(params)) else: norm_url = urlparts.path if not body_hash: body_hash = get_body_hash(params) port = urlparts.port if not port: assert urlparts.scheme in ('http', 'https') if urlparts.scheme == 'http': port = 80 elif urlparts.scheme == 'https': port = 443 output = [nonce, method.upper(), norm_url, urlparts.hostname, port, body_hash, ext, ''] return '\n'.join(map(str, output))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_viterbi_paths(self): """ Computes the viterbi paths using the current HMM model """
# get parameters K = len(self._observations) A = self._hmm.transition_matrix pi = self._hmm.initial_distribution # compute viterbi path for each trajectory paths = np.empty(K, dtype=object) for itraj in range(K): obs = self._observations[itraj] # compute output probability matrix pobs = self._hmm.output_model.p_obs(obs) # hidden path paths[itraj] = hidden.viterbi(A, pobs, pi) # done return paths
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fit(self): """ Maximum-likelihood estimation of the HMM using the Baum-Welch algorithm Returns ------- model : HMM The maximum likelihood HMM model. """
logger().info("=================================================================") logger().info("Running Baum-Welch:") logger().info(" input observations: "+str(self.nobservations)+" of lengths "+str(self.observation_lengths)) logger().info(" initial HMM guess:"+str(self._hmm)) initial_time = time.time() it = 0 self._likelihoods = np.zeros(self.maxit) loglik = 0.0 # flag if connectivity has changed (e.g. state lost) - in that case the likelihood # is discontinuous and can't be used as a convergence criterion in that iteration. tmatrix_nonzeros = self.hmm.transition_matrix.nonzero() converged = False while not converged and it < self.maxit: # self._fbtimings = np.zeros(5) t1 = time.time() loglik = 0.0 for k in range(self._nobs): loglik += self._forward_backward(k) assert np.isfinite(loglik), it t2 = time.time() # convergence check if it > 0: dL = loglik - self._likelihoods[it-1] # print 'dL ', dL, 'iter_P ', maxiter_P if dL < self._accuracy: # print "CONVERGED! Likelihood change = ",(loglik - self.likelihoods[it-1]) converged = True # update model self._update_model(self._gammas, self._Cs, maxiter=self._maxit_P) t3 = time.time() # connectivity change check tmatrix_nonzeros_new = self.hmm.transition_matrix.nonzero() if not np.array_equal(tmatrix_nonzeros, tmatrix_nonzeros_new): converged = False # unset converged tmatrix_nonzeros = tmatrix_nonzeros_new # print 't_fb: ', str(1000.0*(t2-t1)), 't_up: ', str(1000.0*(t3-t2)), 'L = ', loglik, 'dL = ', (loglik - self._likelihoods[it-1]) # print ' fb timings (ms): pobs', (1000.0*self._fbtimings).astype(int) logger().info(str(it) + " ll = " + str(loglik)) # print self.model.output_model # print "---------------------" # end of iteration self._likelihoods[it] = loglik it += 1 # final update with high precision # self._update_model(self._gammas, self._Cs, maxiter=10000000) # truncate likelihood history self._likelihoods = self._likelihoods[:it] # set final likelihood self._hmm.likelihood = loglik # set final count matrix self.count_matrix = self._transition_counts(self._Cs) self.initial_count = self._init_counts(self._gammas) final_time = time.time() elapsed_time = final_time - initial_time logger().info("maximum likelihood HMM:"+str(self._hmm)) logger().info("Elapsed time for Baum-Welch solution: %.3f s" % elapsed_time) logger().info("Computing Viterbi path:") initial_time = time.time() # Compute hidden state trajectories using the Viterbi algorithm. self._hmm.hidden_state_trajectories = self.compute_viterbi_paths() final_time = time.time() elapsed_time = final_time - initial_time logger().info("Elapsed time for Viterbi path computation: %.3f s" % elapsed_time) logger().info("=================================================================") return self._hmm
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1, random_state=None): """Generate random samples from a Gaussian distribution. Parameters mean : array_like, shape (n_features,) Mean of the distribution. covar : array_like, optional Covariance of the distribution. The shape depends on `covariance_type`: scalar if 'spherical', (n_features) if 'diag', (n_features, n_features) if 'tied', or 'full' covariance_type : string, optional Type of the covariance parameters. Must be one of 'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'. n_samples : int, optional Number of samples to generate. Defaults to 1. Returns ------- X : array, shape (n_features, n_samples) Randomly generated sample """
rng = check_random_state(random_state) n_dim = len(mean) rand = rng.randn(n_dim, n_samples) if n_samples == 1: rand.shape = (n_dim,) if covariance_type == 'spherical': rand *= np.sqrt(covar) elif covariance_type == 'diag': rand = np.dot(np.diag(np.sqrt(covar)), rand) else: s, U = linalg.eigh(covar) s.clip(0, out=s) # get rid of tiny negatives np.sqrt(s, out=s) U *= s rand = np.dot(U, rand) return (rand.T + mean).T
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm, min_covar): """Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm avg_means2 = gmm.means_ ** 2 avg_X_means = gmm.means_ * weighted_X_sum * norm return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _covar_mstep_spherical(*args): """Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args) return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm, min_covar): """Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian # Distribution" n_features = X.shape[1] cv = np.empty((gmm.n_components, n_features, n_features)) for c in range(gmm.n_components): post = responsibilities[:, c] mu = gmm.means_[c] diff = X - mu with np.errstate(under='ignore'): # Underflow Errors in doing post * X.T are not important avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS) cv[c] = avg_cv + min_covar * np.eye(n_features) return cv
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _set_covars(self, covars): """Provide values for covariance"""
covars = np.asarray(covars) _validate_covars(covars, self.covariance_type, self.n_components) self.covars_ = covars
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def score_samples(self, X): """Return the per-sample likelihood of the data under the model. Compute the log probability of X under the model and return the posterior distribution (responsibilities) of each mixture component for each element of X. Parameters X: array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X. responsibilities : array_like, shape (n_samples, n_components) Posterior probabilities of each mixture component for each observation """
check_is_fitted(self, 'means_') X = check_array(X) if X.ndim == 1: X = X[:, np.newaxis] if X.size == 0: return np.array([]), np.empty((0, self.n_components)) if X.shape[1] != self.means_.shape[1]: raise ValueError('The shape of X is not compatible with self') lpr = (log_multivariate_normal_density(X, self.means_, self.covars_, self.covariance_type) + np.log(self.weights_)) logprob = logsumexp(lpr, axis=1) responsibilities = np.exp(lpr - logprob[:, np.newaxis]) return logprob, responsibilities
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def predict(self, X): """Predict label for data. Parameters X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = (n_samples,) """
logprob, responsibilities = self.score_samples(X) return responsibilities.argmax(axis=1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fit(self, X, y=None): """Estimate model parameters with the expectation-maximization algorithm. A initialization step is performed before entering the em algorithm. If you want to avoid this step, set the keyword argument init_params to the empty string '' when creating the GMM object. Likewise, if you would like just to do an initialization, set n_iter=0. Parameters X : array_like, shape (n, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. """
# initialization step X = check_array(X, dtype=np.float64) if X.shape[0] < self.n_components: raise ValueError( 'GMM estimation with %s components, but got only %s samples' % (self.n_components, X.shape[0])) max_log_prob = -np.infty for _ in range(self.n_init): if 'm' in self.init_params or not hasattr(self, 'means_'): if np.issubdtype(X.dtype, np.float32): from bhmm._external.clustering.kmeans_clustering_32 import init_centers elif np.issubdtype(X.dtype, np.float64): from bhmm._external.clustering.kmeans_clustering_64 import init_centers else: raise ValueError("Could not handle dtype %s for clustering!" % X.dtype) centers = init_centers(X, 'euclidean', self.n_components) self.means_ = centers if 'w' in self.init_params or not hasattr(self, 'weights_'): self.weights_ = np.tile(1.0 / self.n_components, self.n_components) if 'c' in self.init_params or not hasattr(self, 'covars_'): cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1]) if not cv.shape: cv.shape = (1, 1) self.covars_ = \ distribute_covar_matrix_to_match_covariance_type( cv, self.covariance_type, self.n_components) # EM algorithms current_log_likelihood = None # reset self.converged_ to False self.converged_ = False # this line should be removed when 'thresh' is removed in v0.18 tol = (self.tol if self.thresh is None else self.thresh / float(X.shape[0])) for i in range(self.n_iter): prev_log_likelihood = current_log_likelihood # Expectation step log_likelihoods, responsibilities = self.score_samples(X) current_log_likelihood = log_likelihoods.mean() # Check for convergence. # (should compare to self.tol when dreprecated 'thresh' is # removed in v0.18) if prev_log_likelihood is not None: change = abs(current_log_likelihood - prev_log_likelihood) if change < tol: self.converged_ = True break # Maximization step self._do_mstep(X, responsibilities, self.params, self.min_covar) # if the results are better, keep it if self.n_iter: if current_log_likelihood > max_log_prob: max_log_prob = current_log_likelihood best_params = {'weights': self.weights_, 'means': self.means_, 'covars': self.covars_} # check the existence of an init param that was not subject to # likelihood computation issue. if np.isneginf(max_log_prob) and self.n_iter: raise RuntimeError( "EM algorithm was never able to compute a valid likelihood " + "given initial parameters. Try different init parameters " + "(or increasing n_init) or check for degenerate data.") # self.n_iter == 0 occurs when using GMM within HMM if self.n_iter: self.covars_ = best_params['covars'] self.means_ = best_params['means'] self.weights_ = best_params['weights'] return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _do_mstep(self, X, responsibilities, params, min_covar=0): """ Perform the Mstep of the EM algorithm and return the class weihgts. """
weights = responsibilities.sum(axis=0) weighted_X_sum = np.dot(responsibilities.T, X) inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS) if 'w' in params: self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS) if 'm' in params: self.means_ = weighted_X_sum * inverse_weights if 'c' in params: covar_mstep_func = _covar_mstep_funcs[self.covariance_type] self.covars_ = covar_mstep_func( self, X, responsibilities, weighted_X_sum, inverse_weights, min_covar) return weights
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _n_parameters(self): """Return the number of free parameters in the model."""
ndim = self.means_.shape[1] if self.covariance_type == 'full': cov_params = self.n_components * ndim * (ndim + 1) / 2. elif self.covariance_type == 'diag': cov_params = self.n_components * ndim elif self.covariance_type == 'tied': cov_params = ndim * (ndim + 1) / 2. elif self.covariance_type == 'spherical': cov_params = self.n_components mean_params = ndim * self.n_components return int(cov_params + mean_params + self.n_components - 1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bic(self, X): """Bayesian information criterion for the current model fit and the proposed data Parameters X : array of shape(n_samples, n_dimensions) Returns ------- bic: float (the lower the better) """
return (-2 * self.score(X).sum() + self._n_parameters() * np.log(X.shape[0]))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def init_model_gaussian1d(observations, nstates, reversible=True): """Generate an initial model with 1D-Gaussian output densities Parameters observations : list of ndarray((T_i), dtype=float) list of arrays of length T_i with observation data nstates : int The number of states. Examples -------- Generate initial model for a gaussian output model. """
ntrajectories = len(observations) # Concatenate all observations. collected_observations = np.array([], dtype=config.dtype) for o_t in observations: collected_observations = np.append(collected_observations, o_t) # Fit a Gaussian mixture model to obtain emission distributions and state stationary probabilities. from bhmm._external.sklearn import mixture gmm = mixture.GMM(n_components=nstates) gmm.fit(collected_observations[:,None]) from bhmm import GaussianOutputModel output_model = GaussianOutputModel(nstates, means=gmm.means_[:,0], sigmas=np.sqrt(gmm.covars_[:,0])) logger().info("Gaussian output model:\n"+str(output_model)) # Extract stationary distributions. Pi = np.zeros([nstates], np.float64) Pi[:] = gmm.weights_[:] logger().info("GMM weights: %s" % str(gmm.weights_)) # Compute fractional state memberships. Nij = np.zeros([nstates, nstates], np.float64) for o_t in observations: # length of trajectory T = o_t.shape[0] # output probability pobs = output_model.p_obs(o_t) # normalize pobs /= pobs.sum(axis=1)[:,None] # Accumulate fractional transition counts from this trajectory. for t in range(T-1): Nij[:,:] = Nij[:,:] + np.outer(pobs[t,:], pobs[t+1,:]) logger().info("Nij\n"+str(Nij)) # Compute transition matrix maximum likelihood estimate. import msmtools.estimation as msmest import msmtools.analysis as msmana Tij = msmest.transition_matrix(Nij, reversible=reversible) pi = msmana.stationary_distribution(Tij) # Update model. model = HMM(pi, Tij, output_model) return model
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _p_o(self, o): """ Returns the output probability for symbol o from all hidden states Parameters o : float A single observation. Return ------ p_o : ndarray (N) p_o[i] is the probability density of the observation o from state i emission distribution Examples -------- Create an observation model. Compute the output probability of a single observation from all hidden states. """
if self.__impl__ == self.__IMPL_C__: return gc.p_o(o, self.means, self.sigmas, out=None, dtype=type(o)) elif self.__impl__ == self.__IMPL_PYTHON__: if np.any(self.sigmas < np.finfo(self.sigmas.dtype).eps): raise RuntimeError('at least one sigma is too small to continue.') C = 1.0 / (np.sqrt(2.0 * np.pi) * self.sigmas) Pobs = C * np.exp(-0.5 * ((o-self.means)/self.sigmas)**2) return Pobs else: raise RuntimeError('Implementation '+str(self.__impl__)+' not available')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def estimate(self, observations, weights): """ Fits the output model given the observations and weights Parameters observations : [ ndarray(T_k,) ] with K elements A list of K observation trajectories, each having length T_k and d dimensions weights : [ ndarray(T_k,nstates) ] with K elements A list of K weight matrices, each having length T_k weights[k][t,n] is the weight assignment from observations[k][t] to state index n Examples -------- Generate an observation model and samples from each state. Update the observation model parameters my a maximum-likelihood fit. """
# sizes N = self.nstates K = len(observations) # fit means self._means = np.zeros(N) w_sum = np.zeros(N) for k in range(K): # update nominator for i in range(N): self.means[i] += np.dot(weights[k][:, i], observations[k]) # update denominator w_sum += np.sum(weights[k], axis=0) # normalize self._means /= w_sum # fit variances self._sigmas = np.zeros(N) w_sum = np.zeros(N) for k in range(K): # update nominator for i in range(N): Y = (observations[k] - self.means[i])**2 self.sigmas[i] += np.dot(weights[k][:, i], Y) # update denominator w_sum += np.sum(weights[k], axis=0) # normalize self._sigmas /= w_sum self._sigmas = np.sqrt(self.sigmas) if np.any(self._sigmas < np.finfo(self._sigmas.dtype).eps): raise RuntimeError('at least one sigma is too small to continue.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def initial_distribution_samples(self): r""" Samples of the initial distribution """
res = np.empty((self.nsamples, self.nstates), dtype=config.dtype) for i in range(self.nsamples): res[i, :] = self._sampled_hmms[i].stationary_distribution return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transition_matrix_samples(self): r""" Samples of the transition matrix """
res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=config.dtype) for i in range(self.nsamples): res[i, :, :] = self._sampled_hmms[i].transition_matrix return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def eigenvalues_samples(self): r""" Samples of the eigenvalues """
res = np.empty((self.nsamples, self.nstates), dtype=config.dtype) for i in range(self.nsamples): res[i, :] = self._sampled_hmms[i].eigenvalues return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def eigenvectors_left_samples(self): r""" Samples of the left eigenvectors of the hidden transition matrix """
res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=config.dtype) for i in range(self.nsamples): res[i, :, :] = self._sampled_hmms[i].eigenvectors_left return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def eigenvectors_right_samples(self): r""" Samples of the right eigenvectors of the hidden transition matrix """
res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=config.dtype) for i in range(self.nsamples): res[i, :, :] = self._sampled_hmms[i].eigenvectors_right return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def log_p_obs(self, obs, out=None, dtype=np.float32): """ Returns the element-wise logarithm of the output probabilities for an entire trajectory and all hidden states This is a default implementation that will take the log of p_obs(obs) and should only be used if p_obs(obs) is numerically stable. If there is any danger of running into numerical problems *during* the calculation of p_obs, this function should be overwritten in order to compute the log-probabilities directly. Parameters obs : ndarray((T), dtype=int) a discrete trajectory of length T Return ------ p_o : ndarray (T,N) the log probability of generating the symbol at time point t from any of the N hidden states """
if out is None: return np.log(self.p_obs(obs)) else: self.p_obs(obs, out=out, dtype=dtype) np.log(out, out=out) return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def coarse_grain_transition_matrix(P, M): """ Coarse grain transition matrix P using memberships M Computes .. math: Pc = (M' M)^-1 M' P M Parameters P : ndarray(n, n) microstate transition matrix M : ndarray(n, m) membership matrix. Membership to macrostate m for each microstate. Returns ------- Pc : ndarray(m, m) coarse-grained transition matrix. """
# coarse-grain matrix: Pc = (M' M)^-1 M' P M W = np.linalg.inv(np.dot(M.T, M)) A = np.dot(np.dot(M.T, P), M) P_coarse = np.dot(W, A) # this coarse-graining can lead to negative elements. Setting them to zero here. P_coarse = np.maximum(P_coarse, 0) # and renormalize P_coarse /= P_coarse.sum(axis=1)[:, None] return P_coarse
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def regularize_hidden(p0, P, reversible=True, stationary=False, C=None, eps=None): """ Regularizes the hidden initial distribution and transition matrix. Makes sure that the hidden initial distribution and transition matrix have nonzero probabilities by setting them to eps and then renormalizing. Avoids zeros that would cause estimation algorithms to crash or get stuck in suboptimal states. Parameters p0 : ndarray(n) Initial hidden distribution of the HMM P : ndarray(n, n) Hidden transition matrix reversible : bool HMM is reversible. Will make sure it is still reversible after modification. stationary : bool p0 is the stationary distribution of P. In this case, will not regularize p0 separately. If stationary=False, the regularization will be applied to p0. C : ndarray(n, n) Hidden count matrix. Only needed for stationary=True and P disconnected. epsilon : float or None minimum value of the resulting transition matrix. Default: evaluates to 0.01 / n. The coarse-graining equation can lead to negative elements and thus epsilon should be set to at least 0. Positive settings of epsilon are similar to a prior and enforce minimum positive values for all transition probabilities. Return ------ p0 : ndarray(n) regularized initial distribution P : ndarray(n, n) regularized transition matrix """
# input n = P.shape[0] if eps is None: # default output probability, in order to avoid zero columns eps = 0.01 / n # REGULARIZE P P = np.maximum(P, eps) # and renormalize P /= P.sum(axis=1)[:, None] # ensure reversibility if reversible: P = _tmatrix_disconnected.enforce_reversible_on_closed(P) # REGULARIZE p0 if stationary: _tmatrix_disconnected.stationary_distribution(P, C=C) else: p0 = np.maximum(p0, eps) p0 /= p0.sum() return p0, P
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def regularize_pobs(B, nonempty=None, separate=None, eps=None): """ Regularizes the output probabilities. Makes sure that the output probability distributions has nonzero probabilities by setting them to eps and then renormalizing. Avoids zeros that would cause estimation algorithms to crash or get stuck in suboptimal states. Parameters B : ndarray(n, m) HMM output probabilities nonempty : None or iterable of int Nonempty set. Only regularize on this subset. separate : None or iterable of int Force the given set of observed states to stay in a separate hidden state. The remaining nstates-1 states will be assigned by a metastable decomposition. reversible : bool HMM is reversible. Will make sure it is still reversible after modification. Returns ------- B : ndarray(n, m) Regularized output probabilities """
# input B = B.copy() # modify copy n, m = B.shape # number of hidden / observable states if eps is None: # default output probability, in order to avoid zero columns eps = 0.01 / m # observable sets if nonempty is None: nonempty = np.arange(m) if separate is None: B[:, nonempty] = np.maximum(B[:, nonempty], eps) else: nonempty_nonseparate = np.array(list(set(nonempty) - set(separate)), dtype=int) nonempty_separate = np.array(list(set(nonempty).intersection(set(separate))), dtype=int) B[:n-1, nonempty_nonseparate] = np.maximum(B[:n-1, nonempty_nonseparate], eps) B[n-1, nonempty_separate] = np.maximum(B[n-1, nonempty_separate], eps) # renormalize and return copy B /= B.sum(axis=1)[:, None] return B
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def init_discrete_hmm_ml(C_full, nstates, reversible=True, stationary=True, active_set=None, P=None, eps_A=None, eps_B=None, separate=None): """Initializes discrete HMM using maximum likelihood of observation counts"""
raise NotImplementedError('ML-initialization not yet implemented')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self, Pi, Tij): r""" Updates the transition matrix and recomputes all derived quantities """
from msmtools import analysis as msmana # update transition matrix by copy self._Tij = np.array(Tij) assert msmana.is_transition_matrix(self._Tij), 'Given transition matrix is not a stochastic matrix' assert self._Tij.shape[0] == self._nstates, 'Given transition matrix has unexpected number of states ' # reset spectral decomposition self._spectral_decomp_available = False # check initial distribution assert np.all(Pi >= 0), 'Given initial distribution contains negative elements.' assert np.any(Pi > 0), 'Given initial distribution is zero' self._Pi = np.array(Pi) / np.sum(Pi)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_stationary(self): r""" Whether the MSM is stationary, i.e. whether the initial distribution is the stationary distribution of the hidden transition matrix. """
# for disconnected matrices, the stationary distribution depends on the estimator, so we can't compute # it directly. Therefore we test whether the initial distribution is stationary. return np.allclose(np.dot(self._Pi, self._Tij), self._Pi)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stationary_distribution(self): r""" Compute stationary distribution of hidden states if possible. Raises ------ ValueError if the HMM is not stationary """
assert _tmatrix_disconnected.is_connected(self._Tij, strong=False), \ 'No unique stationary distribution because transition matrix is not connected' import msmtools.analysis as msmana return msmana.stationary_distribution(self._Tij)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def timescales(self): r""" Relaxation timescales of the hidden transition matrix Returns ------- ts : ndarray(m) relaxation timescales in units of the input trajectory time step, :math:`\lambda_i` are the hidden transition matrix eigenvalues. """
from msmtools.analysis.dense.decomposition import timescales_from_eigenvalues as _timescales self._ensure_spectral_decomposition() ts = _timescales(self._eigenvalues, tau=self._lag) return ts[1:]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lifetimes(self): r""" Lifetimes of states of the hidden transition matrix Returns ------- l : ndarray(nstates) state lifetimes in units of the input trajectory time step, :math:`p_{ii}` are the diagonal entries of the hidden transition matrix. """
return -self._lag / np.log(np.diag(self.transition_matrix))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sub_hmm(self, states): r""" Returns HMM on a subset of states Returns the HMM restricted to the selected subset of states. Will raise exception if the hidden transition matrix cannot be normalized on this subset """
# restrict initial distribution pi_sub = self._Pi[states] pi_sub /= pi_sub.sum() # restrict transition matrix P_sub = self._Tij[states, :][:, states] # checks if this selection is possible assert np.all(P_sub.sum(axis=1) > 0), \ 'Illegal sub_hmm request: transition matrix cannot be normalized on ' + str(states) P_sub /= P_sub.sum(axis=1)[:, None] # restrict output model out_sub = self.output_model.sub_output_model(states) return HMM(pi_sub, P_sub, out_sub, lag=self.lag)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def count_matrix(self): # TODO: does this belong here or to the BHMM sampler, or in a subclass containing HMM with data? """Compute the transition count matrix from hidden state trajectory. Returns ------- C : numpy.array with shape (nstates,nstates) C[i,j] is the number of transitions observed from state i to state j Raises ------ RuntimeError A RuntimeError is raised if the HMM model does not yet have a hidden state trajectory associated with it. Examples -------- """
if self.hidden_state_trajectories is None: raise RuntimeError('HMM model does not have a hidden state trajectory.') C = msmest.count_matrix(self.hidden_state_trajectories, 1, nstates=self._nstates) return C.toarray()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def count_init(self): """Compute the counts at the first time step Returns ------- n : ndarray(nstates) n[i] is the number of trajectories starting in state i """
if self.hidden_state_trajectories is None: raise RuntimeError('HMM model does not have a hidden state trajectory.') n = [traj[0] for traj in self.hidden_state_trajectories] return np.bincount(n, minlength=self.nstates)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def collect_observations_in_state(self, observations, state_index): # TODO: this would work well in a subclass with data """Collect a vector of all observations belonging to a specified hidden state. Parameters observations : list of numpy.array List of observed trajectories. state_index : int The index of the hidden state for which corresponding observations are to be retrieved. dtype : numpy.dtype, optional, default=numpy.float64 The numpy dtype to use to store the collected observations. Returns ------- collected_observations : numpy.array with shape (nsamples,) The collected vector of observations belonging to the specified hidden state. Raises ------ RuntimeError A RuntimeError is raised if the HMM model does not yet have a hidden state trajectory associated with it. """
if not self.hidden_state_trajectories: raise RuntimeError('HMM model does not have a hidden state trajectory.') dtype = observations[0].dtype collected_observations = np.array([], dtype=dtype) for (s_t, o_t) in zip(self.hidden_state_trajectories, observations): indices = np.where(s_t == state_index)[0] collected_observations = np.append(collected_observations, o_t[indices]) return collected_observations
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_synthetic_state_trajectory(self, nsteps, initial_Pi=None, start=None, stop=None, dtype=np.int32): """Generate a synthetic state trajectory. Parameters nsteps : int Number of steps in the synthetic state trajectory to be generated. initial_Pi : np.array of shape (nstates,), optional, default=None The initial probability distribution, if samples are not to be taken from the intrinsic initial distribution. start : int starting state. Exclusive with initial_Pi stop : int stopping state. Trajectory will terminate when reaching the stopping state before length number of steps. dtype : numpy.dtype, optional, default=numpy.int32 The numpy dtype to use to store the synthetic trajectory. Returns ------- states : np.array of shape (nstates,) of dtype=np.int32 The trajectory of hidden states, with each element in range(0,nstates). Examples -------- Generate a synthetic state trajectory of a specified length. """
# consistency check if initial_Pi is not None and start is not None: raise ValueError('Arguments initial_Pi and start are exclusive. Only set one of them.') # Generate first state sample. if start is None: if initial_Pi is not None: start = np.random.choice(range(self._nstates), size=1, p=initial_Pi) else: start = np.random.choice(range(self._nstates), size=1, p=self._Pi) # Generate and return trajectory from msmtools import generation as msmgen traj = msmgen.generate_traj(self.transition_matrix, nsteps, start=start, stop=stop, dt=1) return traj.astype(dtype)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_synthetic_observation_trajectory(self, length, initial_Pi=None): """Generate a synthetic realization of observables. Parameters length : int Length of synthetic state trajectory to be generated. initial_Pi : np.array of shape (nstates,), optional, default=None The initial probability distribution, if samples are not to be taken from equilibrium. Returns ------- o_t : np.array of shape (nstates,) of dtype=np.float32 The trajectory of observations. s_t : np.array of shape (nstates,) of dtype=np.int32 The trajectory of hidden states, with each element in range(0,nstates). Examples -------- Generate a synthetic observation trajectory for an equilibrium realization. Use an initial nonequilibrium distribution. """
# First, generate synthetic state trajetory. s_t = self.generate_synthetic_state_trajectory(length, initial_Pi=initial_Pi) # Next, generate observations from these states. o_t = self.output_model.generate_observation_trajectory(s_t) return [o_t, s_t]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_synthetic_observation_trajectories(self, ntrajectories, length, initial_Pi=None): """Generate a number of synthetic realization of observables from this model. Parameters ntrajectories : int The number of trajectories to be generated. length : int Length of synthetic state trajectory to be generated. initial_Pi : np.array of shape (nstates,), optional, default=None The initial probability distribution, if samples are not to be taken from equilibrium. Returns ------- O : list of np.array of shape (nstates,) of dtype=np.float32 The trajectories of observations S : list of np.array of shape (nstates,) of dtype=np.int32 The trajectories of hidden states Examples -------- Generate a number of synthetic trajectories. Use an initial nonequilibrium distribution. """
O = list() # observations S = list() # state trajectories for trajectory_index in range(ntrajectories): o_t, s_t = self.generate_synthetic_observation_trajectory(length=length, initial_Pi=initial_Pi) O.append(o_t) S.append(s_t) return O, S
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def nb_to_python(nb_path): """convert notebook to python script"""
exporter = python.PythonExporter() output, resources = exporter.from_filename(nb_path) return output
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def nb_to_html(nb_path): """convert notebook to html"""
exporter = html.HTMLExporter(template_file='full') output, resources = exporter.from_filename(nb_path) header = output.split('<head>', 1)[1].split('</head>',1)[0] body = output.split('<body>', 1)[1].split('</body>',1)[0] # http://imgur.com/eR9bMRH header = header.replace('<style', '<style scoped="scoped"') header = header.replace('body {\n overflow: visible;\n padding: 8px;\n}\n', '') # Filter out styles that conflict with the sphinx theme. filter_strings = [ 'navbar', 'body{', 'alert{', 'uneditable-input{', 'collapse{', ] filter_strings.extend(['h%s{' % (i+1) for i in range(6)]) header_lines = filter( lambda x: not any([s in x for s in filter_strings]), header.split('\n')) header = '\n'.join(header_lines) # concatenate raw html lines lines = ['<div class="ipynotebook">'] lines.append(header) lines.append(body) lines.append('</div>') return '\n'.join(lines)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def estimate(self, observations, weights): """ Maximum likelihood estimation of output model given the observations and weights Parameters observations : [ ndarray(T_k) ] with K elements A list of K observation trajectories, each having length T_k weights : [ ndarray(T_k, N) ] with K elements A list of K weight matrices, each having length T_k and containing the probability of any of the states in the given time step Examples -------- Generate an observation model and samples from each state. Update the observation model parameters my a maximum-likelihood fit. """
# sizes N, M = self._output_probabilities.shape K = len(observations) # initialize output probability matrix self._output_probabilities = np.zeros((N, M)) # update output probability matrix (numerator) if self.__impl__ == self.__IMPL_C__: for k in range(K): dc.update_pout(observations[k], weights[k], self._output_probabilities, dtype=config.dtype) elif self.__impl__ == self.__IMPL_PYTHON__: for k in range(K): for o in range(M): times = np.where(observations[k] == o)[0] self._output_probabilities[:, o] += np.sum(weights[k][times, :], axis=0) else: raise RuntimeError('Implementation '+str(self.__impl__)+' not available') # normalize self._output_probabilities /= np.sum(self._output_probabilities, axis=1)[:, None]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def state_counts(gamma, T, out=None): """ Sum the probabilities of being in state i to time t Parameters gamma : ndarray((T,N), dtype = float), optional, default = None gamma[t,i] is the probabilty at time t to be in state i ! T : int number of time steps Returns ------- count : numpy.array shape (N) count[i] is the summed probabilty to be in state i ! See Also -------- state_probabilities : to calculate `gamma` """
return np.sum(gamma[0:T], axis=0, out=out)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def logger(name='BHMM', pattern='%(asctime)s %(levelname)s %(name)s: %(message)s', date_format='%H:%M:%S', handler=logging.StreamHandler(sys.stdout)): """ Retrieves the logger instance associated to the given name. :param name: The name of the logger instance. :type name: str :param pattern: The associated pattern. :type pattern: str :param date_format: The date format to be used in the pattern. :type date_format: str :param handler: The logging handler, by default console output. :type handler: FileHandler or StreamHandler or NullHandler :return: The logger. :rtype: Logger """
_logger = logging.getLogger(name) _logger.setLevel(config.log_level()) if not _logger.handlers: formatter = logging.Formatter(pattern, date_format) handler.setFormatter(formatter) handler.setLevel(config.log_level()) _logger.addHandler(handler) _logger.propagate = False return _logger
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sample(self, nsamples, nburn=0, nthin=1, save_hidden_state_trajectory=False, call_back=None): """Sample from the BHMM posterior. Parameters nsamples : int The number of samples to generate. nburn : int, optional, default=0 The number of samples to discard to burn-in, following which `nsamples` will be generated. nthin : int, optional, default=1 The number of Gibbs sampling updates used to generate each returned sample. save_hidden_state_trajectory : bool, optional, default=False If True, the hidden state trajectory for each sample will be saved as well. call_back : function, optional, default=None a call back function with no arguments, which if given is being called after each computed sample. This is useful for implementing progress bars. Returns ------- models : list of bhmm.HMM The sampled HMM models from the Bayesian posterior. Examples -------- """
# Run burn-in. for iteration in range(nburn): logger().info("Burn-in %8d / %8d" % (iteration, nburn)) self._update() # Collect data. models = list() for iteration in range(nsamples): logger().info("Iteration %8d / %8d" % (iteration, nsamples)) # Run a number of Gibbs sampling updates to generate each sample. for thin in range(nthin): self._update() # Save a copy of the current model. model_copy = copy.deepcopy(self.model) # print "Sampled: \n",repr(model_copy) if not save_hidden_state_trajectory: model_copy.hidden_state_trajectory = None models.append(model_copy) if call_back is not None: call_back() # Return the list of models saved. return models
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _update(self): """Update the current model using one round of Gibbs sampling. """
initial_time = time.time() self._updateHiddenStateTrajectories() self._updateEmissionProbabilities() self._updateTransitionMatrix() final_time = time.time() elapsed_time = final_time - initial_time logger().info("BHMM update iteration took %.3f s" % elapsed_time)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _updateTransitionMatrix(self): """ Updates the hidden-state transition matrix and the initial distribution """
# TRANSITION MATRIX C = self.model.count_matrix() + self.prior_C # posterior count matrix # check if we work with these options if self.reversible and not _tmatrix_disconnected.is_connected(C, strong=True): raise NotImplementedError('Encountered disconnected count matrix with sampling option reversible:\n ' + str(C) + '\nUse prior to ensure connectivity or use reversible=False.') # ensure consistent sparsity pattern (P0 might have additional zeros because of underflows) # TODO: these steps work around a bug in msmtools. Should be fixed there P0 = msmest.transition_matrix(C, reversible=self.reversible, maxiter=10000, warn_not_converged=False) zeros = np.where(P0 + P0.T == 0) C[zeros] = 0 # run sampler Tij = msmest.sample_tmatrix(C, nsample=1, nsteps=self.transition_matrix_sampling_steps, reversible=self.reversible) # INITIAL DISTRIBUTION if self.stationary: # p0 is consistent with P p0 = _tmatrix_disconnected.stationary_distribution(Tij, C=C) else: n0 = self.model.count_init().astype(float) first_timestep_counts_with_prior = n0 + self.prior_n0 positive = first_timestep_counts_with_prior > 0 p0 = np.zeros_like(n0) p0[positive] = np.random.dirichlet(first_timestep_counts_with_prior[positive]) # sample p0 from posterior # update HMM with new sample self.model.update(p0, Tij)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _generateInitialModel(self, output_model_type): """Initialize using an MLHMM. """
logger().info("Generating initial model for BHMM using MLHMM...") from bhmm.estimators.maximum_likelihood import MaximumLikelihoodEstimator mlhmm = MaximumLikelihoodEstimator(self.observations, self.nstates, reversible=self.reversible, output=output_model_type) model = mlhmm.fit() return model
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connected_sets(C, mincount_connectivity=0, strong=True): """ Computes the connected sets of C. C : count matrix mincount_connectivity : float Minimum count which counts as a connection. strong : boolean True: Seek strongly connected sets. False: Seek weakly connected sets. """
import msmtools.estimation as msmest Cconn = C.copy() Cconn[np.where(C <= mincount_connectivity)] = 0 # treat each connected set separately S = msmest.connected_sets(Cconn, directed=strong) return S
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def closed_sets(C, mincount_connectivity=0): """ Computes the strongly connected closed sets of C """
n = np.shape(C)[0] S = connected_sets(C, mincount_connectivity=mincount_connectivity, strong=True) closed = [] for s in S: mask = np.zeros(n, dtype=bool) mask[s] = True if C[np.ix_(mask, ~mask)].sum() == 0: # closed set, take it closed.append(s) return closed
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def nonempty_set(C, mincount_connectivity=0): """ Returns the set of states that have at least one incoming or outgoing count """
# truncate to states with at least one observed incoming or outgoing count. if mincount_connectivity > 0: C = C.copy() C[np.where(C < mincount_connectivity)] = 0 return np.where(C.sum(axis=0) + C.sum(axis=1) > 0)[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def estimate_P(C, reversible=True, fixed_statdist=None, maxiter=1000000, maxerr=1e-8, mincount_connectivity=0): """ Estimates full transition matrix for general connectivity structure Parameters C : ndarray count matrix reversible : bool estimate reversible? fixed_statdist : ndarray or None estimate with given stationary distribution maxiter : int Maximum number of reversible iterations. maxerr : float Stopping criterion for reversible iteration: Will stop when infinity norm of difference vector of two subsequent equilibrium distributions is below maxerr. mincount_connectivity : float Minimum count which counts as a connection. """
import msmtools.estimation as msmest n = np.shape(C)[0] # output matrix. Set initially to Identity matrix in order to handle empty states P = np.eye(n, dtype=np.float64) # decide if we need to proceed by weakly or strongly connected sets if reversible and fixed_statdist is None: # reversible to unknown eq. dist. - use strongly connected sets. S = connected_sets(C, mincount_connectivity=mincount_connectivity, strong=True) for s in S: mask = np.zeros(n, dtype=bool) mask[s] = True if C[np.ix_(mask, ~mask)].sum() > np.finfo(C.dtype).eps: # outgoing transitions - use partial rev algo. transition_matrix_partial_rev(C, P, mask, maxiter=maxiter, maxerr=maxerr) else: # closed set - use standard estimator I = np.ix_(mask, mask) if s.size > 1: # leave diagonal 1 if single closed state. P[I] = msmest.transition_matrix(C[I], reversible=True, warn_not_converged=False, maxiter=maxiter, maxerr=maxerr) else: # nonreversible or given equilibrium distribution - weakly connected sets S = connected_sets(C, mincount_connectivity=mincount_connectivity, strong=False) for s in S: I = np.ix_(s, s) if not reversible: Csub = C[I] # any zero rows? must set Cii = 1 to avoid dividing by zero zero_rows = np.where(Csub.sum(axis=1) == 0)[0] Csub[zero_rows, zero_rows] = 1.0 P[I] = msmest.transition_matrix(Csub, reversible=False) elif reversible and fixed_statdist is not None: P[I] = msmest.transition_matrix(C[I], reversible=True, fixed_statdist=fixed_statdist, maxiter=maxiter, maxerr=maxerr) else: # unknown case raise NotImplementedError('Transition estimation for the case reversible=' + str(reversible) + ' fixed_statdist=' + str(fixed_statdist is not None) + ' not implemented.') # done return P
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transition_matrix_partial_rev(C, P, S, maxiter=1000000, maxerr=1e-8): """Maximum likelihood estimation of transition matrix which is reversible on parts Partially-reversible estimation of transition matrix. Maximizes the likelihood: .. math: P_S &=& arg max prod_{S, :} (p_ij)^c_ij \\ \Pi_S P_{S,S} &=& \Pi_S P_{S,S} where the product runs over all elements of the rows S, and detailed balance only acts on the block with rows and columns S. :math:`\Pi_S` is the diagonal matrix of equilibrium probabilities restricted to set S. Note that this formulation Parameters C : ndarray full count matrix P : ndarray full transition matrix to write to. Will overwrite P[S] S : ndarray, bool boolean selection of reversible set with outgoing transitions maxerr : float maximum difference in matrix sums between iterations (infinity norm) in order to stop. """
# test input assert np.array_equal(C.shape, P.shape) # constants A = C[S][:, S] B = C[S][:, ~S] ATA = A + A.T countsums = C[S].sum(axis=1) # initialize X = 0.5 * ATA Y = C[S][:, ~S] # normalize X, Y totalsum = X.sum() + Y.sum() X /= totalsum Y /= totalsum # rowsums rowsums = X.sum(axis=1) + Y.sum(axis=1) err = 1.0 it = 0 while err > maxerr and it < maxiter: # update d = countsums / rowsums X = ATA / (d[:, None] + d) Y = B / d[:, None] # normalize X, Y totalsum = X.sum() + Y.sum() X /= totalsum Y /= totalsum # update sums rowsums_new = X.sum(axis=1) + Y.sum(axis=1) # compute error err = np.max(np.abs(rowsums_new - rowsums)) # update rowsums = rowsums_new it += 1 # write to P P[np.ix_(S, S)] = X P[np.ix_(S, ~S)] = Y P[S] /= P[S].sum(axis=1)[:, None]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def enforce_reversible_on_closed(P): """ Enforces transition matrix P to be reversible on its closed sets. """
import msmtools.analysis as msmana n = np.shape(P)[0] Prev = P.copy() # treat each weakly connected set separately sets = closed_sets(P) for s in sets: I = np.ix_(s, s) # compute stationary probability pi_s = msmana.stationary_distribution(P[I]) # symmetrize X_s = pi_s[:, None] * P[I] X_s = 0.5 * (X_s + X_s.T) # normalize Prev[I] = X_s / X_s.sum(axis=1)[:, None] return Prev
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_reversible(P): """ Returns if P is reversible on its weakly connected sets """
import msmtools.analysis as msmana # treat each weakly connected set separately sets = connected_sets(P, strong=False) for s in sets: Ps = P[s, :][:, s] if not msmana.is_transition_matrix(Ps): return False # isn't even a transition matrix! pi = msmana.stationary_distribution(Ps) X = pi[:, None] * Ps if not np.allclose(X, X.T): return False # survived. return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stationary_distribution(P, C=None, mincount_connectivity=0): """ Simple estimator for stationary distribution for multiple strongly connected sets """
# can be replaced by msmtools.analysis.stationary_distribution in next msmtools release from msmtools.analysis.dense.stationary_vector import stationary_distribution as msmstatdist if C is None: if is_connected(P, strong=True): return msmstatdist(P) else: raise ValueError('Computing stationary distribution for disconnected matrix. Need count matrix.') # disconnected sets n = np.shape(C)[0] ctot = np.sum(C) pi = np.zeros(n) # treat each weakly connected set separately sets = connected_sets(C, mincount_connectivity=mincount_connectivity, strong=False) for s in sets: # compute weight w = np.sum(C[s, :]) / ctot pi[s] = w * msmstatdist(P[s, :][:, s]) # reinforce normalization pi /= np.sum(pi) return pi
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def means_samples(self): r""" Samples of the Gaussian distribution means """
res = np.empty((self.nsamples, self.nstates, self.dimension), dtype=config.dtype) for i in range(self.nsamples): for j in range(self.nstates): res[i, j, :] = self._sampled_hmms[i].means[j] return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sigmas_samples(self): r""" Samples of the Gaussian distribution standard deviations """
res = np.empty((self.nsamples, self.nstates, self.dimension), dtype=config.dtype) for i in range(self.nsamples): for j in range(self.nstates): res[i, j, :] = self._sampled_hmms[i].sigmas[j] return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _guess_output_type(observations): """ Suggests a HMM model type based on the observation data Uses simple rules in order to decide which HMM model type makes sense based on observation data. If observations consist of arrays/lists of integer numbers (irrespective of whether the python type is int or float), our guess is 'discrete'. If observations consist of arrays/lists of 1D-floats, our guess is 'discrete'. In any other case, a TypeError is raised because we are not supporting that data type yet. Parameters observations : list of lists or arrays observation trajectories Returns ------- output_type : str One of {'discrete', 'gaussian'} """
from bhmm.util import types as _types o1 = _np.array(observations[0]) # CASE: vector of int? Then we want a discrete HMM if _types.is_int_vector(o1): return 'discrete' # CASE: not int type, but everything is an integral number. Then we also go for discrete if _np.allclose(o1, _np.round(o1)): isintegral = True for i in range(1, len(observations)): if not _np.allclose(observations[i], _np.round(observations[i])): isintegral = False break if isintegral: return 'discrete' # CASE: vector of double? Then we want a gaussian if _types.is_float_vector(o1): return 'gaussian' # None of the above? Then we currently do not support this format! raise TypeError('Observations is neither sequences of integers nor 1D-sequences of floats. The current version' 'does not support your input.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lag_observations(observations, lag, stride=1): r""" Create new trajectories that are subsampled at lag but shifted at lag times larger than 1 without discarding data. Do not use this function for Bayesian estimators, where data must be given such that subsequent transitions are uncorrelated. Parameters observations : list of int arrays observation trajectories lag : int lag time stride : int, default=1 will return only one trajectory for every stride. Use this for Bayesian analysis. """
obsnew = [] for obs in observations: for shift in range(0, lag, stride): obs_lagged = (obs[shift:][::lag]) if len(obs_lagged) > 1: obsnew.append(obs_lagged) return obsnew
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gaussian_hmm(pi, P, means, sigmas): """ Initializes a 1D-Gaussian HMM Parameters pi : ndarray(nstates, ) Initial distribution. P : ndarray(nstates,nstates) Hidden transition matrix means : ndarray(nstates, ) Means of Gaussian output distributions sigmas : ndarray(nstates, ) Standard deviations of Gaussian output distributions stationary : bool, optional, default=True If True: initial distribution is equal to stationary distribution of transition matrix reversible : bool, optional, default=True If True: transition matrix will fulfill detailed balance constraints. """
from bhmm.hmm.gaussian_hmm import GaussianHMM from bhmm.output_models.gaussian import GaussianOutputModel # count states nstates = _np.array(P).shape[0] # initialize output model output_model = GaussianOutputModel(nstates, means, sigmas) # initialize general HMM from bhmm.hmm.generic_hmm import HMM as _HMM ghmm = _HMM(pi, P, output_model) # turn it into a Gaussian HMM ghmm = GaussianHMM(ghmm) return ghmm
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def discrete_hmm(pi, P, pout): """ Initializes a discrete HMM Parameters pi : ndarray(nstates, ) Initial distribution. P : ndarray(nstates,nstates) Hidden transition matrix pout : ndarray(nstates,nsymbols) Output matrix from hidden states to observable symbols pi : ndarray(nstates, ) Fixed initial (if stationary=False) or fixed stationary distribution (if stationary=True). stationary : bool, optional, default=True If True: initial distribution is equal to stationary distribution of transition matrix reversible : bool, optional, default=True If True: transition matrix will fulfill detailed balance constraints. """
from bhmm.hmm.discrete_hmm import DiscreteHMM from bhmm.output_models.discrete import DiscreteOutputModel # initialize output model output_model = DiscreteOutputModel(pout) # initialize general HMM from bhmm.hmm.generic_hmm import HMM as _HMM dhmm = _HMM(pi, P, output_model) # turn it into a Gaussian HMM dhmm = DiscreteHMM(dhmm) return dhmm
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def estimate_hmm(observations, nstates, lag=1, initial_model=None, output=None, reversible=True, stationary=False, p=None, accuracy=1e-3, maxit=1000, maxit_P=100000, mincount_connectivity=1e-2): r""" Estimate maximum-likelihood HMM Generic maximum-likelihood estimation of HMMs Parameters observations : list of numpy arrays representing temporal data `observations[i]` is a 1d numpy array corresponding to the observed trajectory index `i` nstates : int The number of states in the model. lag : int the lag time at which observations should be read initial_model : HMM, optional, default=None If specified, the given initial model will be used to initialize the BHMM. Otherwise, a heuristic scheme is used to generate an initial guess. output : str, optional, default=None Output model type from [None, 'gaussian', 'discrete']. If None, will automatically select an output model type based on the format of observations. reversible : bool, optional, default=True If True, a prior that enforces reversible transition matrices (detailed balance) is used; otherwise, a standard non-reversible prior is used. stationary : bool, optional, default=False If True, the initial distribution of hidden states is self-consistently computed as the stationary distribution of the transition matrix. If False, it will be estimated from the starting states. Only set this to true if you're sure that the observation trajectories are initiated from a global equilibrium distribution. p : ndarray (nstates), optional, default=None Initial or fixed stationary distribution. If given and stationary=True, transition matrices will be estimated with the constraint that they have p as their stationary distribution. If given and stationary=False, p is the fixed initial distribution of hidden states. accuracy : float convergence threshold for EM iteration. When two the likelihood does not increase by more than accuracy, the iteration is stopped successfully. maxit : int stopping criterion for EM iteration. When so many iterations are performed without reaching the requested accuracy, the iteration is stopped without convergence (a warning is given) Return ------ hmm : :class:`HMM <bhmm.hmm.generic_hmm.HMM>` """
# select output model type if output is None: output = _guess_output_type(observations) if lag > 1: observations = lag_observations(observations, lag) # construct estimator from bhmm.estimators.maximum_likelihood import MaximumLikelihoodEstimator as _MaximumLikelihoodEstimator est = _MaximumLikelihoodEstimator(observations, nstates, initial_model=initial_model, output=output, reversible=reversible, stationary=stationary, p=p, accuracy=accuracy, maxit=maxit, maxit_P=maxit_P) # run est.fit() # set lag time est.hmm._lag = lag # return model # TODO: package into specific class (DiscreteHMM, GaussianHMM) return est.hmm
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bayesian_hmm(observations, estimated_hmm, nsample=100, reversible=True, stationary=False, p0_prior='mixed', transition_matrix_prior='mixed', store_hidden=False, call_back=None): r""" Bayesian HMM based on sampling the posterior Generic maximum-likelihood estimation of HMMs Parameters observations : list of numpy arrays representing temporal data `observations[i]` is a 1d numpy array corresponding to the observed trajectory index `i` estimated_hmm : HMM HMM estimated from estimate_hmm or initialize_hmm reversible : bool, optional, default=True If True, a prior that enforces reversible transition matrices (detailed balance) is used; otherwise, a standard non-reversible prior is used. stationary : bool, optional, default=False If True, the stationary distribution of the transition matrix will be used as initial distribution. Only use True if you are confident that the observation trajectories are started from a global equilibrium. If False, the initial distribution will be estimated as usual from the first step of the hidden trajectories. nsample : int, optional, default=100 number of Gibbs sampling steps p0_prior : None, str, float or ndarray(n) Prior for the initial distribution of the HMM. Will only be active if stationary=False (stationary=True means that p0 is identical to the stationary distribution of the transition matrix). Currently implements different versions of the Dirichlet prior that is conjugate to the Dirichlet distribution of p0. p0 is sampled from: .. math: p0 \sim \prod_i (p0)_i^{a_i + n_i - 1} where :math:`n_i` are the number of times a hidden trajectory was in state :math:`i` at time step 0 and :math:`a_i` is the prior count. Following options are available: | 'mixed' (default), :math:`a_i = p_{0,init}`, where :math:`p_{0,init}` is the initial distribution of initial_model. | 'uniform', :math:`a_i = 1` | ndarray(n) or float, the given array will be used as A. | None, :math:`a_i = 0`. This option ensures coincidence between sample mean an MLE. Will sooner or later lead to sampling problems, because as soon as zero trajectories are drawn from a given state, the sampler cannot recover and that state will never serve as a starting state subsequently. Only recommended in the large data regime and when the probability to sample zero trajectories from any state is negligible. transition_matrix_prior : str or ndarray(n, n) Prior for the HMM transition matrix. Currently implements Dirichlet priors if reversible=False and reversible transition matrix priors as described in [1]_ if reversible=True. For the nonreversible case the posterior of transition matrix :math:`P` is: .. math: P \sim \prod_{i,j} p_{ij}^{b_{ij} + c_{ij} - 1} where :math:`c_{ij}` are the number of transitions found for hidden trajectories and :math:`b_{ij}` are prior counts. | 'mixed' (default), :math:`b_{ij} = p_{ij,init}`, where :math:`p_{ij,init}` is the transition matrix of initial_model. That means one prior count will be used per row. | 'uniform', :math:`b_{ij} = 1` | ndarray(n, n) or broadcastable, the given array will be used as B. | None, :math:`b_ij = 0`. This option ensures coincidence between sample mean an MLE. Will sooner or later lead to sampling problems, because as soon as a transition :math:`ij` will not occur in a sample, the sampler cannot recover and that transition will never be sampled again. This option is not recommended unless you have a small HMM and a lot of data. store_hidden : bool, optional, default=False store hidden trajectories in sampled HMMs call_back : function, optional, default=None a call back function with no arguments, which if given is being called after each computed sample. This is useful for implementing progress bars. Return ------ hmm : :class:`SampledHMM <bhmm.hmm.generic_sampled_hmm.SampledHMM>` References .. [1] Trendelkamp-Schroer, B., H. Wu, F. Paul and F. Noe: Estimation and uncertainty of reversible Markov models. J. Chem. Phys. 143, 174101 (2015). """
# construct estimator from bhmm.estimators.bayesian_sampling import BayesianHMMSampler as _BHMM sampler = _BHMM(observations, estimated_hmm.nstates, initial_model=estimated_hmm, reversible=reversible, stationary=stationary, transition_matrix_sampling_steps=1000, p0_prior=p0_prior, transition_matrix_prior=transition_matrix_prior, output=estimated_hmm.output_model.model_type) # Sample models. sampled_hmms = sampler.sample(nsamples=nsample, save_hidden_state_trajectory=store_hidden, call_back=call_back) # return model from bhmm.hmm.generic_sampled_hmm import SampledHMM return SampledHMM(estimated_hmm, sampled_hmms)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def logsumexp(arr, axis=0): """Computes the sum of arr assuming arr is in the log domain. Returns log(sum(exp(arr))) while minimizing the possibility of over/underflow. Examples -------- 9.4586297444267107 9.4586297444267107 """
arr = np.rollaxis(arr, axis) # Use the max to normalize, as with the log this is what accumulates # the less errors vmax = arr.max(axis=0) out = np.log(np.sum(np.exp(arr - vmax), axis=0)) out += vmax return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _ensure_sparse_format(spmatrix, accept_sparse, dtype, order, copy, force_all_finite): """Convert a sparse matrix to a given format. Checks the sparse format of spmatrix and converts if necessary. Parameters spmatrix : scipy sparse matrix Input to validate and convert. accept_sparse : string, list of string or None (default=None) String[s] representing allowed sparse matrix formats ('csc', 'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse matrix input will raise an error. If the input is sparse but not in the allowed format, it will be converted to the first listed format. dtype : string, type or None (default=none) Data type of result. If None, the dtype of the input is preserved. order : 'F', 'C' or None (default=None) Whether an array will be forced to be fortran or c-style. copy : boolean (default=False) Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : boolean (default=True) Whether to raise an error on np.inf and np.nan in X. Returns ------- spmatrix_converted : scipy sparse matrix. Matrix that is ensured to have an allowed type. """
if accept_sparse is None: raise TypeError('A sparse matrix was passed, but dense ' 'data is required. Use X.toarray() to ' 'convert to a dense numpy array.') sparse_type = spmatrix.format if dtype is None: dtype = spmatrix.dtype if sparse_type in accept_sparse: # correct type if dtype == spmatrix.dtype: # correct dtype if copy: spmatrix = spmatrix.copy() else: # convert dtype spmatrix = spmatrix.astype(dtype) else: # create new spmatrix = spmatrix.asformat(accept_sparse[0]).astype(dtype) if force_all_finite: if not hasattr(spmatrix, "data"): warnings.warn("Can't check %s sparse matrix for nan or inf." % spmatrix.format) else: _assert_all_finite(spmatrix.data) if hasattr(spmatrix, "data"): spmatrix.data = np.array(spmatrix.data, copy=False, order=order) return spmatrix
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_array(array, accept_sparse=None, dtype="numeric", order=None, copy=False, force_all_finite=True, ensure_2d=True, allow_nd=False, ensure_min_samples=1, ensure_min_features=1): """Input validation on an array, list, sparse matrix or similar. By default, the input is converted to an at least 2nd numpy array. If the dtype of the array is object, attempt converting to float, raising on failure. Parameters array : object Input object to check / convert. accept_sparse : string, list of string or None (default=None) String[s] representing allowed sparse matrix formats, such as 'csc', 'csr', etc. None means that sparse matrix input will raise an error. If the input is sparse but not in the allowed format, it will be converted to the first listed format. dtype : string, type or None (default="numeric") Data type of result. If None, the dtype of the input is preserved. If "numeric", dtype is preserved unless array.dtype is object. order : 'F', 'C' or None (default=None) Whether an array will be forced to be fortran or c-style. copy : boolean (default=False) Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : boolean (default=True) Whether to raise an error on np.inf and np.nan in X. ensure_2d : boolean (default=True) Whether to make X at least 2d. allow_nd : boolean (default=False) Whether to allow X.ndim > 2. ensure_min_samples : int (default=1) Make sure that the array has a minimum number of samples in its first axis (rows for a 2D array). Setting to 0 disables this check. ensure_min_features : int (default=1) Make sure that the 2D array has some minimum number of features (columns). The default value of 1 rejects empty datasets. This check is only enforced when the input data has effectively 2 dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0 disables this check. Returns ------- X_converted : object The converted and validated X. """
if isinstance(accept_sparse, str): accept_sparse = [accept_sparse] # store whether originally we wanted numeric dtype dtype_numeric = dtype == "numeric" if sp.issparse(array): if dtype_numeric: dtype = None array = _ensure_sparse_format(array, accept_sparse, dtype, order, copy, force_all_finite) else: if ensure_2d: array = np.atleast_2d(array) if dtype_numeric: if hasattr(array, "dtype") and getattr(array.dtype, "kind", None) == "O": # if input is object, convert to float. dtype = np.float64 else: dtype = None array = np.array(array, dtype=dtype, order=order, copy=copy) # make sure we actually converted to numeric: if dtype_numeric and array.dtype.kind == "O": array = array.astype(np.float64) if not allow_nd and array.ndim >= 3: raise ValueError("Found array with dim %d. Expected <= 2" % array.ndim) if force_all_finite: _assert_all_finite(array) shape_repr = _shape_repr(array.shape) if ensure_min_samples > 0: n_samples = _num_samples(array) if n_samples < ensure_min_samples: raise ValueError("Found array with %d sample(s) (shape=%s) while a" " minimum of %d is required." % (n_samples, shape_repr, ensure_min_samples)) if ensure_min_features > 0 and array.ndim == 2: n_features = array.shape[1] if n_features < ensure_min_features: raise ValueError("Found array with %d feature(s) (shape=%s) while" " a minimum of %d is required." % (n_features, shape_repr, ensure_min_features)) return array
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def beta_confidence_intervals(ci_X, ntrials, ci=0.95): """ Compute confidence intervals of beta distributions. Parameters ci_X : numpy.array Computed confidence interval estimate from `ntrials` experiments ntrials : int The number of trials that were run. ci : float, optional, default=0.95 Confidence interval to report (e.g. 0.95 for 95% confidence interval) Returns ------- Plow : float The lower bound of the symmetric confidence interval. Phigh : float The upper bound of the symmetric confidence interval. Examples -------- """
# Compute low and high confidence interval for symmetric CI about mean. ci_low = 0.5 - ci/2; ci_high = 0.5 + ci/2; # Compute for every element of ci_X. from scipy.stats import beta Plow = ci_X * 0.0; Phigh = ci_X * 0.0; for i in range(ci_X.shape[0]): for j in range(ci_X.shape[1]): Plow[i,j] = beta.ppf(ci_low, a = ci_X[i,j] * ntrials, b = (1-ci_X[i,j]) * ntrials); Phigh[i,j] = beta.ppf(ci_high, a = ci_X[i,j] * ntrials, b = (1-ci_X[i,j]) * ntrials); return [Plow, Phigh]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def empirical_confidence_interval(sample, interval=0.95): """ Compute specified symmetric confidence interval for empirical sample. Parameters sample : numpy.array The empirical samples. interval : float, optional, default=0.95 Size of desired symmetric confidence interval (0 < interval < 1) e.g. 0.68 for 68% confidence interval, 0.95 for 95% confidence interval Returns ------- low : float The lower bound of the symmetric confidence interval. high : float The upper bound of the symmetric confidence interval. Examples -------- """
# Sort sample in increasing order. sample = np.sort(sample) # Determine sample size. N = len(sample) # Compute low and high indices. low_index = int(np.round((N-1) * (0.5 - interval/2))) + 1 high_index = int(np.round((N-1) * (0.5 + interval/2))) + 1 # Compute low and high. low = sample[low_index] high = sample[high_index] return [low, high]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def confidence_interval(data, alpha): """ Computes the mean and alpha-confidence interval of the given sample set Parameters data : ndarray a 1D-array of samples alpha : float in [0,1] the confidence level, i.e. percentage of data included in the interval Returns ------- [m,l,r] where m is the mean of the data, and (l,r) are the m-alpha/2 and m+alpha/2 confidence interval boundaries. """
if alpha < 0 or alpha > 1: raise ValueError('Not a meaningful confidence level: '+str(alpha)) # compute mean m = np.mean(data) # sort data sdata = np.sort(data) # index of the mean im = np.searchsorted(sdata, m) if im == 0 or im == len(sdata): pm = im else: pm = (im-1) + (m-sdata[im-1]) / (sdata[im]-sdata[im-1]) # left interval boundary pl = pm - alpha * pm il1 = max(0, int(math.floor(pl))) il2 = min(len(sdata)-1, int(math.ceil(pl))) l = sdata[il1] + (pl - il1)*(sdata[il2] - sdata[il1]) # right interval boundary pr = pm + alpha * (len(data)-im) ir1 = max(0, int(math.floor(pr))) ir2 = min(len(sdata)-1, int(math.ceil(pr))) r = sdata[ir1] + (pr - ir1)*(sdata[ir2] - sdata[ir1]) # return return m, l, r
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def status(self, remote=False): """ Return the connection status, both locally and remotely. The local connection status is a dictionary that gives: * the count of multiple queries sent to the server. * the count of single queries sent to the server. * the count of actions sent to the server. * the count of actions executed successfully by the server. * the count of actions queued to go to the server. The remote connection status includes whether the server is live, as well as data about version and build. The server data is cached, unless the remote flag is specified. :param remote: whether to query the server for its latest status :return: tuple of status dicts: (local, server). """
if remote: components = urlparse.urlparse(self.endpoint) try: result = self.session.get(components[0] + "://" + components[1] + "/status", timeout=self.timeout) except Exception as e: if self.logger: self.logger.debug("Failed to connect to server for status: %s", e) result = None if result and result.status_code == 200: self.server_status = result.json() self.server_status["endpoint"] = self.endpoint elif result: if self.logger: self.logger.debug("Server status response not understandable: Status: %d, Body: %s", result.status_code, result.text) self.server_status = {"endpoint": self.endpoint, "status": ("Unexpected HTTP status " + str(result.status_code) + " at: " + strftime("%d %b %Y %H:%M:%S +0000", gmtime()))} else: self.server_status = {"endpoint": self.endpoint, "status": "Unreachable at: " + strftime("%d %b %Y %H:%M:%S +0000", gmtime())} return self.local_status, self.server_status
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def append(self, **kwargs): """ Add commands at the end of the sequence. Be careful: because this runs in Python 2.x, the order of the kwargs dict may not match the order in which the args were specified. Thus, if you care about specific ordering, you must make multiple calls to append in that order. Luckily, append returns See also insert, below. :param kwargs: the key/value pairs to add :return: the action """
for k, v in six.iteritems(kwargs): self.commands.append({k: v}) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert(self, **kwargs): """ Insert commands at the beginning of the sequence. This is provided because certain commands have to come first (such as user creation), but may be need to beadded after other commands have already been specified. Later calls to insert put their commands before those in the earlier calls. Also, since the order of iterated kwargs is not guaranteed (in Python 2.x), you should really only call insert with one keyword at a time. See the doc of append for more details. :param kwargs: the key/value pair to append first """
for k, v in six.iteritems(kwargs): self.commands.insert(0, {k: v}) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def report_command_error(self, error_dict): """ Report a server error executing a command. We keep track of the command's position in the command list, and we add annotation of what the command was, to the error. :param error_dict: The server's error dict for the error encountered """
error = dict(error_dict) error["command"] = self.commands[error_dict["step"]] error["target"] = self.frame del error["index"] # throttling can change which action this was in the batch del error["step"] # throttling can change which step this was in the action self.errors.append(error)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def execution_errors(self): """ Return a list of commands that encountered execution errors, with the error. Each dictionary entry gives the command dictionary and the error dictionary :return: list of commands that gave errors, with their error information """
if self.split_actions: # throttling split this action, get errors from the split return [dict(e) for s in self.split_actions for e in s.errors] else: return [dict(e) for e in self.errors]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _next_page(self): """ Fetch the next page of the query. """
if self._last_page_seen: raise StopIteration new, self._last_page_seen = self.conn.query_multiple(self.object_type, self._next_page_index, self.url_params, self.query_params) self._next_page_index += 1 if len(new) == 0: self._last_page_seen = True # don't bother with next page if nothing was returned else: self._results += new
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _fetch_result(self): """ Fetch the queried object. """
self._result = self.conn.query_single(self.object_type, self.url_params, self.query_params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def scale2x(self, surface): """ Scales using the AdvanceMAME Scale2X algorithm which does a 'jaggie-less' scale of bitmap graphics. """
assert(self._scale == 2) return self._pygame.transform.scale2x(surface)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def smoothscale(self, surface): """ Smooth scaling using MMX or SSE extensions if available """
return self._pygame.transform.smoothscale(surface, self._output_size)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def identity(self, surface): """ Fast scale operation that does not sample the results """
return self._pygame.transform.scale(surface, self._output_size)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rgb2short(r, g, b): """ Converts RGB values to the nearest equivalent xterm-256 color. """
# Using list of snap points, convert RGB value to cube indexes r, g, b = [len(tuple(s for s in snaps if s < x)) for x in (r, g, b)] # Simple colorcube transform return (r * 36) + (g * 6) + b + 16
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def display(self, image): """ Takes an image, scales it according to the nominated transform, and stores it for later building into an animated GIF. """
assert(image.size == self.size) self._last_image = image image = self.preprocess(image) surface = self.to_surface(image, alpha=self._contrast) rawbytes = self._pygame.image.tostring(surface, "RGB", False) im = Image.frombytes("RGB", surface.get_size(), rawbytes) self._images.append(im) self._count += 1 logger.debug("Recording frame: {0}".format(self._count)) if self._max_frames and self._count >= self._max_frames: sys.exit(0)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _char_density(self, c, font=ImageFont.load_default()): """ Count the number of black pixels in a rendered character. """
image = Image.new('1', font.getsize(c), color=255) draw = ImageDraw.Draw(image) draw.text((0, 0), c, fill="white", font=font) return collections.Counter(image.getdata())[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _CSI(self, cmd): """ Control sequence introducer """
sys.stdout.write('\x1b[') sys.stdout.write(cmd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find(path, level=None, message=None, time_lower=None, time_upper=None, case_sensitive=False): # pragma: no cover """ Filter log message. **中文文档** 根据level名称, message中的关键字, 和log的时间的区间, 筛选出相关的日志 """
if level: level = level.upper() # level name has to be capitalized. if not case_sensitive: message = message.lower() with open(path, "r") as f: result = Result(path=path, level=level, message=message, time_lower=time_lower, time_upper=time_upper, case_sensitive=case_sensitive, ) for line in f: try: _time, _level, _message = [i.strip() for i in line.split(";")] if level: if _level != level: continue if time_lower: if _time < time_lower: continue if time_upper: if _time > time_upper: continue if message: if not case_sensitive: _message = _message.lower() if message not in _message: continue result.lines.append(line) except Exception as e: print(e) return result