desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Outlyingness of observations in X according to the fitted model.
Parameters
X : array-like, shape = (n_samples, n_features)
Returns
is_outliers : array, shape = (n_samples, ), dtype = bool
For each observation, tells whether or not it should be considered
as an outlier according to the fitted model.
threshold : float,
The values of the less outlying point\'s decision function.'
| def predict(self, X):
| check_is_fitted(self, 'threshold_')
X = check_array(X)
is_inlier = (- np.ones(X.shape[0], dtype=int))
if (self.contamination is not None):
values = self.decision_function(X, raw_values=True)
is_inlier[(values <= self.threshold_)] = 1
else:
raise NotImplementedError('You must provide a contamination rate.')
return is_inlier
|
'Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples,) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like, shape = (n_samples,), optional
Sample weights.
Returns
score : float
Mean accuracy of self.predict(X) wrt. y.'
| def score(self, X, y, sample_weight=None):
| return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
|
'Fits the GraphLasso model to X.
Parameters
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
y : (ignored)'
| def fit(self, X, y=None):
| X = check_array(X, ensure_min_features=2, ensure_min_samples=2, estimator=self)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(X, assume_centered=self.assume_centered)
(self.covariance_, self.precision_, self.n_iter_) = graph_lasso(emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol, enet_tol=self.enet_tol, max_iter=self.max_iter, verbose=self.verbose, return_n_iter=True)
return self
|
'Fits the GraphLasso covariance model to X.
Parameters
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
y : (ignored)'
| def fit(self, X, y=None):
| X = check_array(X, ensure_min_features=2, estimator=self)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, y, classifier=False)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, (self.verbose - 1))
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = (0.01 * alpha_1)
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1), n_alphas)[::(-1)]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
this_path = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)((delayed(graph_lasso_path)(X[train], alphas=alphas, X_test=X[test], mode=self.mode, tol=self.tol, enet_tol=self.enet_tol, max_iter=int((0.1 * self.max_iter)), verbose=inner_verbose) for (train, test) in cv.split(X, y)))
(covs, _, scores) = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
best_score = (- np.inf)
last_finite_idx = 0
for (index, (alpha, scores, _)) in enumerate(path):
this_score = np.mean(scores)
if (this_score >= (0.1 / np.finfo(np.float64).eps)):
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if (this_score >= best_score):
best_score = this_score
best_index = index
if (best_index == 0):
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif ((best_index == last_finite_idx) and (not (best_index == (len(path) - 1)))):
alpha_1 = path[best_index][0]
alpha_0 = path[(best_index + 1)][0]
elif (best_index == (len(path) - 1)):
alpha_1 = path[best_index][0]
alpha_0 = (0.01 * path[best_index][0])
else:
alpha_1 = path[(best_index - 1)][0]
alpha_0 = path[(best_index + 1)][0]
if (not isinstance(n_alphas, collections.Sequence)):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0), (n_alphas + 2))
alphas = alphas[1:(-1)]
if (self.verbose and (n_refinements > 1)):
print ('[GraphLassoCV] Done refinement % 2i out of %i: % 3is' % ((i + 1), n_refinements, (time.time() - t0)))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X, cv=cv, n_jobs=self.n_jobs, verbose=inner_verbose))
self.grid_scores_ = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
(self.covariance_, self.precision_, self.n_iter_) = graph_lasso(emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol, enet_tol=self.enet_tol, max_iter=self.max_iter, verbose=inner_verbose, return_n_iter=True)
return self
|
'Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
covariance : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.'
| def _set_covariance(self, covariance):
| covariance = check_array(covariance)
self.covariance_ = covariance
if self.store_precision:
self.precision_ = linalg.pinvh(covariance)
else:
self.precision_ = None
|
'Getter for the precision matrix.
Returns
precision_ : array-like,
The precision matrix associated to the current covariance object.'
| def get_precision(self):
| if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_)
return precision
|
'Fits the Maximum Likelihood Estimator covariance model
according to the given training data and parameters.
Parameters
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
self : object
Returns self.'
| def fit(self, X, y=None):
| X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
|
'Computes the log-likelihood of a Gaussian data set with
`self.covariance_` as an estimator of its covariance matrix.
Parameters
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y : not used, present for API consistence purpose.
Returns
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.'
| def score(self, X_test, y=None):
| test_cov = empirical_covariance((X_test - self.location_), assume_centered=True)
res = log_likelihood(test_cov, self.get_precision())
return res
|
'Computes the Mean Squared Error between two covariance estimators.
(In the sense of the Frobenius norm).
Parameters
comp_cov : array-like, shape = [n_features, n_features]
The covariance to compare with.
norm : str
The type of norm used to compute the error. Available error types:
- \'frobenius\' (default): sqrt(tr(A^t.A))
- \'spectral\': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.'
| def error_norm(self, comp_cov, norm='frobenius', scaling=True, squared=True):
| error = (comp_cov - self.covariance_)
if (norm == 'frobenius'):
squared_norm = np.sum((error ** 2))
elif (norm == 'spectral'):
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError('Only spectral and frobenius norms are implemented')
if scaling:
squared_norm = (squared_norm / error.shape[0])
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
|
'Computes the squared Mahalanobis distances of given observations.
Parameters
observations : array-like, shape = [n_observations, n_features]
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
mahalanobis_distance : array, shape = [n_observations,]
Squared Mahalanobis distances of the observations.'
| def mahalanobis(self, observations):
| precision = self.get_precision()
centered_obs = (observations - self.location_)
mahalanobis_dist = np.sum((np.dot(centered_obs, precision) * centered_obs), 1)
return mahalanobis_dist
|
'Fits the shrunk covariance model
according to the given training data and parameters.
Parameters
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
self : object
Returns self.'
| def fit(self, X, y=None):
| X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(X, assume_centered=self.assume_centered)
covariance = shrunk_covariance(covariance, self.shrinkage)
self._set_covariance(covariance)
return self
|
'Fits the Ledoit-Wolf shrunk covariance model
according to the given training data and parameters.
Parameters
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
self : object
Returns self.'
| def fit(self, X, y=None):
| X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
(covariance, shrinkage) = ledoit_wolf((X - self.location_), assume_centered=True, block_size=self.block_size)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
|
'Fits the Oracle Approximating Shrinkage covariance model
according to the given training data and parameters.
Parameters
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
self : object
Returns self.'
| def fit(self, X, y=None):
| X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
(covariance, shrinkage) = oas((X - self.location_), assume_centered=True)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
|
'Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
cov : array, shape=(n_features, n_features)
Estimated covariance of data.'
| def get_covariance(self):
| components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = (components_ * np.sqrt(exp_var[:, np.newaxis]))
exp_var_diff = np.maximum((exp_var - self.noise_variance_), 0.0)
cov = np.dot((components_.T * exp_var_diff), components_)
cov.flat[::(len(cov) + 1)] += self.noise_variance_
return cov
|
'Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
precision : array, shape=(n_features, n_features)
Estimated precision of data.'
| def get_precision(self):
| n_features = self.components_.shape[1]
if (self.n_components_ == 0):
return (np.eye(n_features) / self.noise_variance_)
if (self.n_components_ == n_features):
return linalg.inv(self.get_covariance())
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = (components_ * np.sqrt(exp_var[:, np.newaxis]))
exp_var_diff = np.maximum((exp_var - self.noise_variance_), 0.0)
precision = (np.dot(components_, components_.T) / self.noise_variance_)
precision.flat[::(len(precision) + 1)] += (1.0 / exp_var_diff)
precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_))
precision /= (- (self.noise_variance_ ** 2))
precision.flat[::(len(precision) + 1)] += (1.0 / self.noise_variance_)
return precision
|
'Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
X_new : array-like, shape (n_samples, n_components)
Examples
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP'
| def transform(self, X):
| check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if (self.mean_ is not None):
X = (X - self.mean_)
X_transformed = np.dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
|
'Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
X_original array-like, shape (n_samples, n_features)
Notes
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.'
| def inverse_transform(self, X):
| if self.whiten:
return (np.dot(X, (np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_)) + self.mean_)
else:
return (np.dot(X, self.components_) + self.mean_)
|
'Fit the model
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
compute_sources : bool
If False, sources are not computes but only the rotation matrix.
This can save memory when working with big data. Defaults to False.
Returns
X_new : array-like, shape (n_samples, n_components)'
| def _fit(self, X, compute_sources=False):
| fun_args = ({} if (self.fun_args is None) else self.fun_args)
(whitening, unmixing, sources, X_mean, self.n_iter_) = fastica(X=X, n_components=self.n_components, algorithm=self.algorithm, whiten=self.whiten, fun=self.fun, fun_args=fun_args, max_iter=self.max_iter, tol=self.tol, w_init=self.w_init, random_state=self.random_state, return_X_mean=True, compute_sources=compute_sources, return_n_iter=True)
if self.whiten:
self.components_ = np.dot(unmixing, whitening)
self.mean_ = X_mean
self.whitening_ = whitening
else:
self.components_ = unmixing
self.mixing_ = linalg.pinv(self.components_)
if compute_sources:
self.__sources = sources
return sources
|
'Fit the model and recover the sources from X.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
X_new : array-like, shape (n_samples, n_components)'
| def fit_transform(self, X, y=None):
| return self._fit(X, compute_sources=True)
|
'Fit the model to X.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
self'
| def fit(self, X, y=None):
| self._fit(X, compute_sources=False)
return self
|
'Recover the sources from X (apply the unmixing matrix).
Parameters
X : array-like, shape (n_samples, n_features)
Data to transform, where n_samples is the number of samples
and n_features is the number of features.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool (optional)
If False, data passed to fit are overwritten. Defaults to True.
Returns
X_new : array-like, shape (n_samples, n_components)'
| def transform(self, X, y='deprecated', copy=True):
| if ((not isinstance(y, string_types)) or (y != 'deprecated')):
warnings.warn('The parameter y on transform() is deprecated since 0.19 and will be removed in 0.21', DeprecationWarning)
check_is_fitted(self, 'mixing_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
if self.whiten:
X -= self.mean_
return np.dot(X, self.components_.T)
|
'Transform the sources back to the mixed data (apply mixing matrix).
Parameters
X : array-like, shape (n_samples, n_components)
Sources, where n_samples is the number of samples
and n_components is the number of components.
copy : bool (optional)
If False, data passed to fit are overwritten. Defaults to True.
Returns
X_new : array-like, shape (n_samples, n_features)'
| def inverse_transform(self, X, copy=True):
| check_is_fitted(self, 'mixing_')
X = check_array(X, copy=(copy and self.whiten), dtype=FLOAT_DTYPES)
X = np.dot(X, self.mixing_.T)
if self.whiten:
X += self.mean_
return X
|
'Fit LSI model on training data X.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
self : object
Returns the transformer object.'
| def fit(self, X, y=None):
| self.fit_transform(X)
return self
|
'Fit LSI model to X and perform dimensionality reduction on X.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.'
| def fit_transform(self, X, y=None):
| X = check_array(X, accept_sparse=['csr', 'csc'])
random_state = check_random_state(self.random_state)
if (self.algorithm == 'arpack'):
(U, Sigma, VT) = svds(X, k=self.n_components, tol=self.tol)
Sigma = Sigma[::(-1)]
(U, VT) = svd_flip(U[:, ::(-1)], VT[::(-1)])
elif (self.algorithm == 'randomized'):
k = self.n_components
n_features = X.shape[1]
if (k >= n_features):
raise ValueError(('n_components must be < n_features; got %d >= %d' % (k, n_features)))
(U, Sigma, VT) = randomized_svd(X, self.n_components, n_iter=self.n_iter, random_state=random_state)
else:
raise ValueError(('unknown algorithm %r' % self.algorithm))
self.components_ = VT
X_transformed = (U * Sigma)
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
(_, full_var) = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = (exp_var / full_var)
self.singular_values_ = Sigma
return X_transformed
|
'Perform dimensionality reduction on X.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.'
| def transform(self, X):
| X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
|
'Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
X : array-like, shape (n_samples, n_components)
New data.
Returns
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.'
| def inverse_transform(self, X):
| X = check_array(X)
return np.dot(X, self.components_)
|
'Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
X_new : array, shape (n_samples, n_components)
Transformed data'
| def transform(self, X):
| check_is_fitted(self, 'components_')
X = check_array(X)
(n_samples, n_features) = X.shape
code = sparse_encode(X, self.components_, algorithm=self.transform_algorithm, n_nonzero_coefs=self.transform_n_nonzero_coefs, alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
(n_samples, n_features) = code.shape
split_code = np.empty((n_samples, (2 * n_features)))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = (- np.minimum(code, 0))
code = split_code
return code
|
'Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
self : object
Returns the object itself'
| def fit(self, X, y=None):
| return self
|
'Fit the model from data in X.
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
self : object
Returns the object itself'
| def fit(self, X, y=None):
| random_state = check_random_state(self.random_state)
X = check_array(X)
if (self.n_components is None):
n_components = X.shape[1]
else:
n_components = self.n_components
(V, U, E, self.n_iter_) = dict_learning(X, n_components, self.alpha, tol=self.tol, max_iter=self.max_iter, method=self.fit_algorithm, n_jobs=self.n_jobs, code_init=self.code_init, dict_init=self.dict_init, verbose=self.verbose, random_state=random_state, return_n_iter=True)
self.components_ = U
self.error_ = E
return self
|
'Fit the model from data in X.
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
self : object
Returns the instance itself.'
| def fit(self, X, y=None):
| random_state = check_random_state(self.random_state)
X = check_array(X)
(U, (A, B), self.n_iter_) = dict_learning_online(X, self.n_components, self.alpha, n_iter=self.n_iter, return_code=False, method=self.fit_algorithm, n_jobs=self.n_jobs, dict_init=self.dict_init, batch_size=self.batch_size, shuffle=self.shuffle, verbose=self.verbose, random_state=random_state, return_inner_stats=True, return_n_iter=True)
self.components_ = U
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
|
'Updates the model using the data in X as a mini-batch.
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset : integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
self : object
Returns the instance itself.'
| def partial_fit(self, X, y=None, iter_offset=None):
| if (not hasattr(self, 'random_state_')):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if (iter_offset is None):
iter_offset = getattr(self, 'iter_offset_', 0)
(U, (A, B)) = dict_learning_online(X, self.n_components, self.alpha, n_iter=self.n_iter, method=self.fit_algorithm, n_jobs=self.n_jobs, dict_init=dict_init, batch_size=len(X), shuffle=False, verbose=self.verbose, return_code=False, iter_offset=iter_offset, random_state=self.random_state_, return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
self.inner_stats_ = (A, B)
self.iter_offset_ = (iter_offset + self.n_iter)
return self
|
'Fit the FactorAnalysis model to X using EM
Parameters
X : array-like, shape (n_samples, n_features)
Training data.
Returns
self'
| def fit(self, X, y=None):
| X = check_array(X, copy=self.copy, dtype=np.float64)
(n_samples, n_features) = X.shape
n_components = self.n_components
if (n_components is None):
n_components = n_features
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
nsqrt = sqrt(n_samples)
llconst = ((n_features * log((2.0 * np.pi))) + n_components)
var = np.var(X, axis=0)
if (self.noise_variance_init is None):
psi = np.ones(n_features, dtype=X.dtype)
else:
if (len(self.noise_variance_init) != n_features):
raise ValueError(('noise_variance_init dimension does not with number of features : %d != %d' % (len(self.noise_variance_init), n_features)))
psi = np.array(self.noise_variance_init)
loglike = []
old_ll = (- np.inf)
SMALL = 1e-12
if (self.svd_method == 'lapack'):
def my_svd(X):
(_, s, V) = linalg.svd(X, full_matrices=False)
return (s[:n_components], V[:n_components], squared_norm(s[n_components:]))
elif (self.svd_method == 'randomized'):
random_state = check_random_state(self.random_state)
def my_svd(X):
(_, s, V) = randomized_svd(X, n_components, random_state=random_state, n_iter=self.iterated_power)
return (s, V, (squared_norm(X) - squared_norm(s)))
else:
raise ValueError(('SVD method %s is not supported. Please consider the documentation' % self.svd_method))
for i in xrange(self.max_iter):
sqrt_psi = (np.sqrt(psi) + SMALL)
(s, V, unexp_var) = my_svd((X / (sqrt_psi * nsqrt)))
s **= 2
W = (np.sqrt(np.maximum((s - 1.0), 0.0))[:, np.newaxis] * V)
del V
W *= sqrt_psi
ll = (llconst + np.sum(np.log(s)))
ll += (unexp_var + np.sum(np.log(psi)))
ll *= ((- n_samples) / 2.0)
loglike.append(ll)
if ((ll - old_ll) < self.tol):
break
old_ll = ll
psi = np.maximum((var - np.sum((W ** 2), axis=0)), SMALL)
else:
warnings.warn((('FactorAnalysis did not converge.' + ' You might want') + ' to increase the number of iterations.'), ConvergenceWarning)
self.components_ = W
self.noise_variance_ = psi
self.loglike_ = loglike
self.n_iter_ = (i + 1)
return self
|
'Apply dimensionality reduction to X using the model.
Compute the expected mean of the latent variables.
See Barber, 21.2.33 (or Bishop, 12.66).
Parameters
X : array-like, shape (n_samples, n_features)
Training data.
Returns
X_new : array-like, shape (n_samples, n_components)
The latent variables of X.'
| def transform(self, X):
| check_is_fitted(self, 'components_')
X = check_array(X)
Ih = np.eye(len(self.components_))
X_transformed = (X - self.mean_)
Wpsi = (self.components_ / self.noise_variance_)
cov_z = linalg.inv((Ih + np.dot(Wpsi, self.components_.T)))
tmp = np.dot(X_transformed, Wpsi.T)
X_transformed = np.dot(tmp, cov_z)
return X_transformed
|
'Compute data covariance with the FactorAnalysis model.
``cov = components_.T * components_ + diag(noise_variance)``
Returns
cov : array, shape (n_features, n_features)
Estimated covariance of data.'
| def get_covariance(self):
| check_is_fitted(self, 'components_')
cov = np.dot(self.components_.T, self.components_)
cov.flat[::(len(cov) + 1)] += self.noise_variance_
return cov
|
'Compute data precision matrix with the FactorAnalysis model.
Returns
precision : array, shape (n_features, n_features)
Estimated precision of data.'
| def get_precision(self):
| check_is_fitted(self, 'components_')
n_features = self.components_.shape[1]
if (self.n_components == 0):
return np.diag((1.0 / self.noise_variance_))
if (self.n_components == n_features):
return linalg.inv(self.get_covariance())
components_ = self.components_
precision = np.dot((components_ / self.noise_variance_), components_.T)
precision.flat[::(len(precision) + 1)] += 1.0
precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_))
precision /= self.noise_variance_[:, np.newaxis]
precision /= (- self.noise_variance_[np.newaxis, :])
precision.flat[::(len(precision) + 1)] += (1.0 / self.noise_variance_)
return precision
|
'Compute the log-likelihood of each sample
Parameters
X : array, shape (n_samples, n_features)
The data
Returns
ll : array, shape (n_samples,)
Log-likelihood of each sample under the current model'
| def score_samples(self, X):
| check_is_fitted(self, 'components_')
Xr = (X - self.mean_)
precision = self.get_precision()
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
log_like = ((-0.5) * (Xr * np.dot(Xr, precision)).sum(axis=1))
log_like -= (0.5 * ((n_features * log((2.0 * np.pi))) - fast_logdet(precision)))
return log_like
|
'Compute the average log-likelihood of the samples
Parameters
X : array, shape (n_samples, n_features)
The data
Returns
ll : float
Average log-likelihood of the samples under the current model'
| def score(self, X, y=None):
| return np.mean(self.score_samples(X))
|
'Fit\'s using kernel K'
| def _fit_transform(self, K):
| K = self._centerer.fit_transform(K)
if (self.n_components is None):
n_components = K.shape[0]
else:
n_components = min(K.shape[0], self.n_components)
if (self.eigen_solver == 'auto'):
if ((K.shape[0] > 200) and (n_components < 10)):
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
else:
eigen_solver = self.eigen_solver
if (eigen_solver == 'dense'):
(self.lambdas_, self.alphas_) = linalg.eigh(K, eigvals=((K.shape[0] - n_components), (K.shape[0] - 1)))
elif (eigen_solver == 'arpack'):
random_state = check_random_state(self.random_state)
v0 = random_state.uniform((-1), 1, K.shape[0])
(self.lambdas_, self.alphas_) = eigsh(K, n_components, which='LA', tol=self.tol, maxiter=self.max_iter, v0=v0)
indices = self.lambdas_.argsort()[::(-1)]
self.lambdas_ = self.lambdas_[indices]
self.alphas_ = self.alphas_[:, indices]
if (self.remove_zero_eig or (self.n_components is None)):
self.alphas_ = self.alphas_[:, (self.lambdas_ > 0)]
self.lambdas_ = self.lambdas_[(self.lambdas_ > 0)]
return K
|
'Fit the model from data in X.
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
self : object
Returns the instance itself.'
| def fit(self, X, y=None):
| X = check_array(X, accept_sparse='csr', copy=self.copy_X)
K = self._get_kernel(X)
self._fit_transform(K)
if self.fit_inverse_transform:
sqrt_lambdas = np.diag(np.sqrt(self.lambdas_))
X_transformed = np.dot(self.alphas_, sqrt_lambdas)
self._fit_inverse_transform(X_transformed, X)
self.X_fit_ = X
return self
|
'Fit the model from data in X and transform X.
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
X_new : array-like, shape (n_samples, n_components)'
| def fit_transform(self, X, y=None, **params):
| self.fit(X, **params)
X_transformed = (self.alphas_ * np.sqrt(self.lambdas_))
if self.fit_inverse_transform:
self._fit_inverse_transform(X_transformed, X)
return X_transformed
|
'Transform X.
Parameters
X : array-like, shape (n_samples, n_features)
Returns
X_new : array-like, shape (n_samples, n_components)'
| def transform(self, X):
| check_is_fitted(self, 'X_fit_')
K = self._centerer.transform(self._get_kernel(X, self.X_fit_))
return np.dot(K, (self.alphas_ / np.sqrt(self.lambdas_)))
|
'Transform X back to original space.
Parameters
X : array-like, shape (n_samples, n_components)
Returns
X_new : array-like, shape (n_samples, n_features)
References
"Learning to Find Pre-Images", G BakIr et al, 2004.'
| def inverse_transform(self, X):
| if (not self.fit_inverse_transform):
raise NotFittedError('The fit_inverse_transform parameter was not set to True when instantiating and hence the inverse transform is not available.')
K = self._get_kernel(X, self.X_transformed_fit_)
return np.dot(K, self.dual_coef_)
|
'Fit the model from data in X.
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
self : object
Returns the instance itself.'
| def fit(self, X, y=None):
| random_state = check_random_state(self.random_state)
X = check_array(X)
if (self.n_components is None):
n_components = X.shape[1]
else:
n_components = self.n_components
code_init = (self.V_init.T if (self.V_init is not None) else None)
dict_init = (self.U_init.T if (self.U_init is not None) else None)
(Vt, _, E, self.n_iter_) = dict_learning(X.T, n_components, self.alpha, tol=self.tol, max_iter=self.max_iter, method=self.method, n_jobs=self.n_jobs, verbose=self.verbose, random_state=random_state, code_init=code_init, dict_init=dict_init, return_n_iter=True)
self.components_ = Vt.T
self.error_ = E
return self
|
'Least Squares projection of the data onto the sparse components.
To avoid instability issues in case the system is under-determined,
regularization can be applied (Ridge regression) via the
`ridge_alpha` parameter.
Note that Sparse PCA components orthogonality is not enforced as in PCA
hence one cannot use a simple linear projection.
Parameters
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
ridge_alpha : float, default: 0.01
Amount of ridge shrinkage to apply in order to improve
conditioning.
.. deprecated:: 0.19
This parameter will be removed in 0.21.
Specify ``ridge_alpha`` in the ``SparsePCA`` constructor.
Returns
X_new array, shape (n_samples, n_components)
Transformed data.'
| def transform(self, X, ridge_alpha='deprecated'):
| check_is_fitted(self, 'components_')
X = check_array(X)
if (ridge_alpha != 'deprecated'):
warnings.warn('The ridge_alpha parameter on transform() is deprecated since 0.19 and will be removed in 0.21. Specify ridge_alpha in the SparsePCA constructor.', DeprecationWarning)
if (ridge_alpha is None):
ridge_alpha = self.ridge_alpha
else:
ridge_alpha = self.ridge_alpha
U = ridge_regression(self.components_.T, X.T, ridge_alpha, solver='cholesky')
s = np.sqrt((U ** 2).sum(axis=0))
s[(s == 0)] = 1
U /= s
return U
|
'Fit the model from data in X.
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
self : object
Returns the instance itself.'
| def fit(self, X, y=None):
| random_state = check_random_state(self.random_state)
X = check_array(X)
if (self.n_components is None):
n_components = X.shape[1]
else:
n_components = self.n_components
(Vt, _, self.n_iter_) = dict_learning_online(X.T, n_components, alpha=self.alpha, n_iter=self.n_iter, return_code=True, dict_init=None, verbose=self.verbose, callback=self.callback, batch_size=self.batch_size, shuffle=self.shuffle, n_jobs=self.n_jobs, method=self.method, random_state=random_state, return_n_iter=True)
self.components_ = Vt.T
return self
|
'Fit the model with X, using minibatches of size batch_size.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : Passthrough for ``Pipeline`` compatibility.
Returns
self : object
Returns the instance itself.'
| def fit(self, X, y=None):
| self.components_ = None
self.n_samples_seen_ = 0
self.mean_ = 0.0
self.var_ = 0.0
self.singular_values_ = None
self.explained_variance_ = None
self.explained_variance_ratio_ = None
self.singular_values_ = None
self.noise_variance_ = None
X = check_array(X, copy=self.copy, dtype=[np.float64, np.float32])
(n_samples, n_features) = X.shape
if (self.batch_size is None):
self.batch_size_ = (5 * n_features)
else:
self.batch_size_ = self.batch_size
for batch in gen_batches(n_samples, self.batch_size_):
self.partial_fit(X[batch], check_input=False)
return self
|
'Incremental fit with X. All of X is processed as a single batch.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
check_input : bool
Run check_array on X.
Returns
self : object
Returns the instance itself.'
| def partial_fit(self, X, y=None, check_input=True):
| if check_input:
X = check_array(X, copy=self.copy, dtype=[np.float64, np.float32])
(n_samples, n_features) = X.shape
if (not hasattr(self, 'components_')):
self.components_ = None
if (self.n_components is None):
self.n_components_ = n_features
elif (not (1 <= self.n_components <= n_features)):
raise ValueError(('n_components=%r invalid for n_features=%d, need more rows than columns for IncrementalPCA processing' % (self.n_components, n_features)))
else:
self.n_components_ = self.n_components
if ((self.components_ is not None) and (self.components_.shape[0] != self.n_components_)):
raise ValueError(('Number of input features has changed from %i to %i between calls to partial_fit! Try setting n_components to a fixed value.' % (self.components_.shape[0], self.n_components_)))
if (not hasattr(self, 'n_samples_seen_')):
self.n_samples_seen_ = 0
self.mean_ = 0.0
self.var_ = 0.0
(col_mean, col_var, n_total_samples) = _incremental_mean_and_var(X, last_mean=self.mean_, last_variance=self.var_, last_sample_count=self.n_samples_seen_)
if (self.n_samples_seen_ == 0):
X -= col_mean
else:
col_batch_mean = np.mean(X, axis=0)
X -= col_batch_mean
mean_correction = (np.sqrt(((self.n_samples_seen_ * n_samples) / n_total_samples)) * (self.mean_ - col_batch_mean))
X = np.vstack(((self.singular_values_.reshape(((-1), 1)) * self.components_), X, mean_correction))
(U, S, V) = linalg.svd(X, full_matrices=False)
(U, V) = svd_flip(U, V, u_based_decision=False)
explained_variance = ((S ** 2) / (n_total_samples - 1))
explained_variance_ratio = ((S ** 2) / np.sum((col_var * n_total_samples)))
self.n_samples_seen_ = n_total_samples
self.components_ = V[:self.n_components_]
self.singular_values_ = S[:self.n_components_]
self.mean_ = col_mean
self.var_ = col_var
self.explained_variance_ = explained_variance[:self.n_components_]
self.explained_variance_ratio_ = explained_variance_ratio[:self.n_components_]
if (self.n_components_ < n_features):
self.noise_variance_ = explained_variance[self.n_components_:].mean()
else:
self.noise_variance_ = 0.0
return self
|
'Fit the model with X.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
self : object
Returns the instance itself.'
| def fit(self, X, y=None):
| self._fit(X)
return self
|
'Fit the model with X and apply the dimensionality reduction on X.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
X_new : array-like, shape (n_samples, n_components)'
| def fit_transform(self, X, y=None):
| (U, S, V) = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
U *= sqrt((X.shape[0] - 1))
else:
U *= S[:self.n_components_]
return U
|
'Dispatch to the right submethod depending on the chosen solver.'
| def _fit(self, X):
| if issparse(X):
raise TypeError('PCA does not support sparse input. See TruncatedSVD for a possible alternative.')
X = check_array(X, dtype=[np.float64, np.float32], ensure_2d=True, copy=self.copy)
if (self.n_components is None):
n_components = X.shape[1]
else:
n_components = self.n_components
svd_solver = self.svd_solver
if (svd_solver == 'auto'):
if (max(X.shape) <= 500):
svd_solver = 'full'
elif ((n_components >= 1) and (n_components < (0.8 * min(X.shape)))):
svd_solver = 'randomized'
else:
svd_solver = 'full'
if (svd_solver == 'full'):
return self._fit_full(X, n_components)
elif (svd_solver in ['arpack', 'randomized']):
return self._fit_truncated(X, n_components, svd_solver)
else:
raise ValueError("Unrecognized svd_solver='{0}'".format(svd_solver))
|
'Fit the model by computing full SVD on X'
| def _fit_full(self, X, n_components):
| (n_samples, n_features) = X.shape
if (n_components == 'mle'):
if (n_samples < n_features):
raise ValueError("n_components='mle' is only supported if n_samples >= n_features")
elif (not (0 <= n_components <= n_features)):
raise ValueError(("n_components=%r must be between 0 and n_features=%r with svd_solver='full'" % (n_components, n_features)))
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
(U, S, V) = linalg.svd(X, full_matrices=False)
(U, V) = svd_flip(U, V)
components_ = V
explained_variance_ = ((S ** 2) / (n_samples - 1))
total_var = explained_variance_.sum()
explained_variance_ratio_ = (explained_variance_ / total_var)
singular_values_ = S.copy()
if (n_components == 'mle'):
n_components = _infer_dimension_(explained_variance_, n_samples, n_features)
elif (0 < n_components < 1.0):
ratio_cumsum = stable_cumsum(explained_variance_ratio_)
n_components = (np.searchsorted(ratio_cumsum, n_components) + 1)
if (n_components < min(n_features, n_samples)):
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.0
(self.n_samples_, self.n_features_) = (n_samples, n_features)
self.components_ = components_[:n_components]
self.n_components_ = n_components
self.explained_variance_ = explained_variance_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.singular_values_ = singular_values_[:n_components]
return (U, S, V)
|
'Fit the model by computing truncated SVD (by ARPACK or randomized)
on X'
| def _fit_truncated(self, X, n_components, svd_solver):
| (n_samples, n_features) = X.shape
if isinstance(n_components, six.string_types):
raise ValueError(("n_components=%r cannot be a string with svd_solver='%s'" % (n_components, svd_solver)))
elif (not (1 <= n_components <= n_features)):
raise ValueError(("n_components=%r must be between 1 and n_features=%r with svd_solver='%s'" % (n_components, n_features, svd_solver)))
elif ((svd_solver == 'arpack') and (n_components == n_features)):
raise ValueError(("n_components=%r must be stricly less than n_features=%r with svd_solver='%s'" % (n_components, n_features, svd_solver)))
random_state = check_random_state(self.random_state)
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if (svd_solver == 'arpack'):
v0 = random_state.uniform((-1), 1, size=min(X.shape))
(U, S, V) = svds(X, k=n_components, tol=self.tol, v0=v0)
S = S[::(-1)]
(U, V) = svd_flip(U[:, ::(-1)], V[::(-1)])
elif (svd_solver == 'randomized'):
(U, S, V) = randomized_svd(X, n_components=n_components, n_iter=self.iterated_power, flip_sign=True, random_state=random_state)
(self.n_samples_, self.n_features_) = (n_samples, n_features)
self.components_ = V
self.n_components_ = n_components
self.explained_variance_ = ((S ** 2) / (n_samples - 1))
total_var = np.var(X, ddof=1, axis=0)
self.explained_variance_ratio_ = (self.explained_variance_ / total_var.sum())
self.singular_values_ = S.copy()
if (self.n_components_ < min(n_features, n_samples)):
self.noise_variance_ = (total_var.sum() - self.explained_variance_.sum())
self.noise_variance_ /= (min(n_features, n_samples) - n_components)
else:
self.noise_variance_ = 0.0
return (U, S, V)
|
'Return the log-likelihood of each sample.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
X : array, shape(n_samples, n_features)
The data.
Returns
ll : array, shape (n_samples,)
Log-likelihood of each sample under the current model'
| def score_samples(self, X):
| check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = (X - self.mean_)
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = ((-0.5) * (Xr * np.dot(Xr, precision)).sum(axis=1))
log_like -= (0.5 * ((n_features * log((2.0 * np.pi))) - fast_logdet(precision)))
return log_like
|
'Return the average log-likelihood of all samples.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
X : array, shape(n_samples, n_features)
The data.
Returns
ll : float
Average log-likelihood of the samples under the current model'
| def score(self, X, y=None):
| return np.mean(self.score_samples(X))
|
'Fit the model with X by extracting the first principal components.
Parameters
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
self : object
Returns the instance itself.'
| def fit(self, X, y=None):
| self._fit(check_array(X))
return self
|
'Fit the model to the data X.
Parameters
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.'
| def _fit(self, X):
| random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if (self.n_components is None):
n_components = X.shape[1]
else:
n_components = self.n_components
(U, S, V) = randomized_svd(X, n_components, n_iter=self.iterated_power, random_state=random_state)
self.explained_variance_ = exp_var = ((S ** 2) / (n_samples - 1))
full_var = np.var(X, ddof=1, axis=0).sum()
self.explained_variance_ratio_ = (exp_var / full_var)
self.singular_values_ = S
if self.whiten:
self.components_ = ((V / S[:, np.newaxis]) * sqrt(n_samples))
else:
self.components_ = V
return X
|
'Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
X_new : array-like, shape (n_samples, n_components)'
| def transform(self, X):
| check_is_fitted(self, 'mean_')
X = check_array(X)
if (self.mean_ is not None):
X = (X - self.mean_)
X = np.dot(X, self.components_.T)
return X
|
'Fit the model with X and apply the dimensionality reduction on X.
Parameters
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
X_new : array-like, shape (n_samples, n_components)'
| def fit_transform(self, X, y=None):
| X = check_array(X)
X = self._fit(X)
return np.dot(X, self.components_.T)
|
'Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
X_original array-like, shape (n_samples, n_features)
Notes
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.'
| def inverse_transform(self, X):
| check_is_fitted(self, 'mean_')
X_original = np.dot(X, self.components_)
if (self.mean_ is not None):
X_original = (X_original + self.mean_)
return X_original
|
'Check model parameters.'
| def _check_params(self):
| if (self.n_topics is not None):
self._n_components = self.n_topics
warnings.warn('n_topics has been renamed to n_components in version 0.19 and will be removed in 0.21', DeprecationWarning)
else:
self._n_components = self.n_components
if (self._n_components <= 0):
raise ValueError(("Invalid 'n_components' parameter: %r" % self._n_components))
if (self.total_samples <= 0):
raise ValueError(("Invalid 'total_samples' parameter: %r" % self.total_samples))
if (self.learning_offset < 0):
raise ValueError(("Invalid 'learning_offset' parameter: %r" % self.learning_offset))
if (self.learning_method not in ('batch', 'online', None)):
raise ValueError(("Invalid 'learning_method' parameter: %r" % self.learning_method))
|
'Initialize latent variables.'
| def _init_latent_vars(self, n_features):
| self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if (self.doc_topic_prior is None):
self.doc_topic_prior_ = (1.0 / self._n_components)
else:
self.doc_topic_prior_ = self.doc_topic_prior
if (self.topic_word_prior is None):
self.topic_word_prior_ = (1.0 / self._n_components)
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.0
init_var = (1.0 / init_gamma)
self.components_ = self.random_state_.gamma(init_gamma, init_var, (self._n_components, n_features))
self.exp_dirichlet_component_ = np.exp(_dirichlet_expectation_2d(self.components_))
|
'E-step in EM update.
Parameters
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
cal_sstats : boolean
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : boolean
Parameter that indicate whether to initialize document topic
distribution randomly in the E-step. Set it to True in training
steps.
parallel : joblib.Parallel (optional)
Pre-initialized instance of joblib.Parallel.
Returns
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each
document. In the literature, this is called `gamma`.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, it will be None.'
| def _e_step(self, X, cal_sstats, random_init, parallel=None):
| random_state = (self.random_state_ if random_init else None)
n_jobs = _get_n_jobs(self.n_jobs)
if (parallel is None):
parallel = Parallel(n_jobs=n_jobs, verbose=max(0, (self.verbose - 1)))
results = parallel((delayed(_update_doc_distribution)(X[idx_slice, :], self.exp_dirichlet_component_, self.doc_topic_prior_, self.max_doc_update_iter, self.mean_change_tol, cal_sstats, random_state) for idx_slice in gen_even_slices(X.shape[0], n_jobs)))
(doc_topics, sstats_list) = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if cal_sstats:
suff_stats = np.zeros(self.components_.shape)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
return (doc_topic_distr, suff_stats)
|
'EM update for 1 iteration.
update `_component` by batch VB or online VB.
Parameters
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
total_samples : integer
Total number of documents. It is only used when
batch_update is `False`.
batch_update : boolean
Parameter that controls updating method.
`True` for batch learning, `False` for online learning.
parallel : joblib.Parallel
Pre-initialized instance of joblib.Parallel
Returns
doc_topic_distr : array, shape=(n_samples, n_components)
Unnormalized document topic distribution.'
| def _em_step(self, X, total_samples, batch_update, parallel=None):
| (_, suff_stats) = self._e_step(X, cal_sstats=True, random_init=True, parallel=parallel)
if batch_update:
self.components_ = (self.topic_word_prior_ + suff_stats)
else:
weight = np.power((self.learning_offset + self.n_batch_iter_), (- self.learning_decay))
doc_ratio = (float(total_samples) / X.shape[0])
self.components_ *= (1 - weight)
self.components_ += (weight * (self.topic_word_prior_ + (doc_ratio * suff_stats)))
self.exp_dirichlet_component_ = np.exp(_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
|
'check X format
check X format and make sure no negative value in X.
Parameters
X : array-like or sparse matrix'
| def _check_non_neg_array(self, X, whom):
| X = check_array(X, accept_sparse='csr')
check_non_negative(X, whom)
return X
|
'Online VB with Mini-Batch update.
Parameters
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
self'
| def partial_fit(self, X, y=None):
| self._check_params()
X = self._check_non_neg_array(X, 'LatentDirichletAllocation.partial_fit')
(n_samples, n_features) = X.shape
batch_size = self.batch_size
if (not hasattr(self, 'components_')):
self._init_latent_vars(n_features)
if (n_features != self.components_.shape[1]):
raise ValueError(('The provided data has %d dimensions while the model was trained with feature size %d.' % (n_features, self.components_.shape[1])))
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0, (self.verbose - 1))) as parallel:
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :], total_samples=self.total_samples, batch_update=False, parallel=parallel)
return self
|
'Learn model for the data X with variational Bayes method.
When `learning_method` is \'online\', use mini-batch update.
Otherwise, use batch update.
Parameters
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
self'
| def fit(self, X, y=None):
| self._check_params()
X = self._check_non_neg_array(X, 'LatentDirichletAllocation.fit')
(n_samples, n_features) = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
if (learning_method is None):
warnings.warn("The default value for 'learning_method' will be changed from 'online' to 'batch' in the release 0.20. This warning was introduced in 0.18.", DeprecationWarning)
learning_method = 'online'
batch_size = self.batch_size
self._init_latent_vars(n_features)
last_bound = None
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0, (self.verbose - 1))) as parallel:
for i in xrange(max_iter):
if (learning_method == 'online'):
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :], total_samples=n_samples, batch_update=False, parallel=parallel)
else:
self._em_step(X, total_samples=n_samples, batch_update=True, parallel=parallel)
if ((evaluate_every > 0) and (((i + 1) % evaluate_every) == 0)):
(doc_topics_distr, _) = self._e_step(X, cal_sstats=False, random_init=False, parallel=parallel)
bound = self._perplexity_precomp_distr(X, doc_topics_distr, sub_sampling=False)
if self.verbose:
print ('iteration: %d of max_iter: %d, perplexity: %.4f' % ((i + 1), max_iter, bound))
if (last_bound and (abs((last_bound - bound)) < self.perp_tol)):
break
last_bound = bound
elif self.verbose:
print ('iteration: %d of max_iter: %d' % ((i + 1), max_iter))
self.n_iter_ += 1
(doc_topics_distr, _) = self._e_step(X, cal_sstats=False, random_init=False, parallel=parallel)
self.bound_ = self._perplexity_precomp_distr(X, doc_topics_distr, sub_sampling=False)
return self
|
'Transform data X according to fitted model.
Parameters
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
doc_topic_distr : shape=(n_samples, n_components)
Document topic distribution for X.'
| def _unnormalized_transform(self, X):
| if (not hasattr(self, 'components_')):
raise NotFittedError("no 'components_' attribute in model. Please fit model first.")
X = self._check_non_neg_array(X, 'LatentDirichletAllocation.transform')
(n_samples, n_features) = X.shape
if (n_features != self.components_.shape[1]):
raise ValueError(('The provided data has %d dimensions while the model was trained with feature size %d.' % (n_features, self.components_.shape[1])))
(doc_topic_distr, _) = self._e_step(X, cal_sstats=False, random_init=False)
return doc_topic_distr
|
'Transform data X according to the fitted model.
.. versionchanged:: 0.18
*doc_topic_distr* is now normalized
Parameters
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
doc_topic_distr : shape=(n_samples, n_components)
Document topic distribution for X.'
| def transform(self, X):
| doc_topic_distr = self._unnormalized_transform(X)
doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]
return doc_topic_distr
|
'Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
doc_topic_distr : array, shape=(n_samples, n_components)
Document topic distribution. In the literature, this is called
gamma.
sub_sampling : boolean, optional, (default=False)
Compensate for subsampling of documents.
It is used in calculate bound in online learning.
Returns
score : float'
| def _approx_bound(self, X, doc_topic_distr, sub_sampling):
| def _loglikelihood(prior, distr, dirichlet_distr, size):
score = np.sum(((prior - distr) * dirichlet_distr))
score += np.sum((gammaln(distr) - gammaln(prior)))
score += np.sum((gammaln((prior * size)) - gammaln(np.sum(distr, 1))))
return score
is_sparse_x = sp.issparse(X)
(n_samples, n_components) = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in xrange(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[(idx_d + 1)]]
cnts = X_data[X_indptr[idx_d]:X_indptr[(idx_d + 1)]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[(idx_d, ids)]
temp = (dirichlet_doc_topic[idx_d, :, np.newaxis] + dirichlet_component_[:, ids])
norm_phi = logsumexp(temp, axis=0)
score += np.dot(cnts, norm_phi)
score += _loglikelihood(doc_topic_prior, doc_topic_distr, dirichlet_doc_topic, self._n_components)
if sub_sampling:
doc_ratio = (float(self.total_samples) / n_samples)
score *= doc_ratio
score += _loglikelihood(topic_word_prior, self.components_, dirichlet_component_, n_features)
return score
|
'Calculate approximate log-likelihood as score.
Parameters
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
score : float
Use approximate bound as score.'
| def score(self, X, y=None):
| X = self._check_non_neg_array(X, 'LatentDirichletAllocation.score')
doc_topic_distr = self._unnormalized_transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
|
'Calculate approximate perplexity for data X with ability to accept
precomputed doc_topic_distr
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_components)
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
score : float
Perplexity score.'
| def _perplexity_precomp_distr(self, X, doc_topic_distr=None, sub_sampling=False):
| if (not hasattr(self, 'components_')):
raise NotFittedError("no 'components_' attribute in model. Please fit model first.")
X = self._check_non_neg_array(X, 'LatentDirichletAllocation.perplexity')
if (doc_topic_distr is None):
doc_topic_distr = self._unnormalized_transform(X)
else:
(n_samples, n_components) = doc_topic_distr.shape
if (n_samples != X.shape[0]):
raise ValueError('Number of samples in X and doc_topic_distr do not match.')
if (n_components != self._n_components):
raise ValueError('Number of topics does not match.')
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
if sub_sampling:
word_cnt = (X.sum() * (float(self.total_samples) / current_samples))
else:
word_cnt = X.sum()
perword_bound = (bound / word_cnt)
return np.exp(((-1.0) * perword_bound))
|
'Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
.. versionchanged:: 0.19
*doc_topic_distr* argument has been deprecated and is ignored
because user no longer has access to unnormalized distribution
Parameters
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_components)
Document topic distribution.
This argument is deprecated and is currently being ignored.
.. deprecated:: 0.19
sub_sampling : bool
Do sub-sampling or not.
Returns
score : float
Perplexity score.'
| def perplexity(self, X, doc_topic_distr='deprecated', sub_sampling=False):
| if (doc_topic_distr != 'deprecated'):
warnings.warn("Argument 'doc_topic_distr' is deprecated and is being ignored as of 0.19. Support for this argument will be removed in 0.21.", DeprecationWarning)
return self._perplexity_precomp_distr(X, sub_sampling=sub_sampling)
|
'Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
W : array-like, shape (n_samples, n_components)
If init=\'custom\', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init=\'custom\', it is used as initial guess for the solution.
Returns
W : array, shape (n_samples, n_components)
Transformed data.'
| def fit_transform(self, X, y=None, W=None, H=None):
| X = check_array(X, accept_sparse=('csr', 'csc'), dtype=float)
(W, H, n_iter_) = non_negative_factorization(X=X, W=W, H=H, n_components=self.n_components, init=self.init, update_H=True, solver=self.solver, beta_loss=self.beta_loss, tol=self.tol, max_iter=self.max_iter, alpha=self.alpha, l1_ratio=self.l1_ratio, regularization='both', random_state=self.random_state, verbose=self.verbose, shuffle=self.shuffle)
self.reconstruction_err_ = _beta_divergence(X, W, H, self.beta_loss, square_root=True)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter_
return W
|
'Learn a NMF model for the data X.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
Returns
self'
| def fit(self, X, y=None, **params):
| self.fit_transform(X, **params)
return self
|
'Transform the data X according to the fitted NMF model
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be transformed by the model
Returns
W : array, shape (n_samples, n_components)
Transformed data'
| def transform(self, X):
| check_is_fitted(self, 'n_components_')
(W, _, n_iter_) = non_negative_factorization(X=X, W=None, H=self.components_, n_components=self.n_components_, init=self.init, update_H=False, solver=self.solver, beta_loss=self.beta_loss, tol=self.tol, max_iter=self.max_iter, alpha=self.alpha, l1_ratio=self.l1_ratio, regularization='both', random_state=self.random_state, verbose=self.verbose, shuffle=self.shuffle)
return W
|
'Transform data back to its original space.
Parameters
W : {array-like, sparse matrix}, shape (n_samples, n_components)
Transformed data matrix
Returns
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix of original shape
.. versionadded:: 0.18'
| def inverse_transform(self, W):
| check_is_fitted(self, 'n_components_')
return np.dot(W, self.components_)
|
'Fit Kernel Ridge regression model
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
self : returns an instance of self.'
| def fit(self, X, y=None, sample_weight=None):
| (X, y) = check_X_y(X, y, accept_sparse=('csr', 'csc'), multi_output=True, y_numeric=True)
if ((sample_weight is not None) and (not isinstance(sample_weight, float))):
sample_weight = check_array(sample_weight, ensure_2d=False)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if (len(y.shape) == 1):
y = y.reshape((-1), 1)
ravel = True
copy = (self.kernel == 'precomputed')
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha, sample_weight, copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
|
'Predict using the kernel ridge model
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.'
| def predict(self, X):
| check_is_fitted(self, ['X_fit_', 'dual_coef_'])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
|
'Perform DBSCAN clustering from features or distance matrix.
Parameters
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric=\'precomputed\'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.'
| def fit(self, X, y=None, sample_weight=None):
| X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight, **self.get_params())
(self.core_sample_indices_, self.labels_) = clust
if len(self.core_sample_indices_):
self.components_ = X[self.core_sample_indices_].copy()
else:
self.components_ = np.empty((0, X.shape[1]))
return self
|
'Performs clustering on X and returns cluster labels.
Parameters
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric=\'precomputed\'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
y : ndarray, shape (n_samples,)
cluster labels'
| def fit_predict(self, X, y=None, sample_weight=None):
| self.fit(X, sample_weight=sample_weight)
return self.labels_
|
'Perform clustering.
Parameters
X : array-like, shape=[n_samples, n_features]
Samples to cluster.'
| def fit(self, X, y=None):
| X = check_array(X)
(self.cluster_centers_, self.labels_) = mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds, min_bin_freq=self.min_bin_freq, bin_seeding=self.bin_seeding, cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
|
'Predict the closest cluster each sample in X belongs to.
Parameters
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.'
| def predict(self, X):
| check_is_fitted(self, 'cluster_centers_')
return pairwise_distances_argmin(X, self.cluster_centers_)
|
'Verify that the number of samples given is larger than k'
| def _check_fit_data(self, X):
| X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32])
if (X.shape[0] < self.n_clusters):
raise ValueError(('n_samples=%d should be >= n_clusters=%d' % (X.shape[0], self.n_clusters)))
return X
|
'Compute k-means clustering.
Parameters
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster.'
| def fit(self, X, y=None):
| random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_) = k_means(X, n_clusters=self.n_clusters, init=self.init, n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose, precompute_distances=self.precompute_distances, tol=self.tol, random_state=random_state, copy_x=self.copy_x, n_jobs=self.n_jobs, algorithm=self.algorithm, return_n_iter=True)
return self
|
'Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.'
| def fit_predict(self, X, y=None):
| return self.fit(X).labels_
|
'Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
X_new : array, shape [n_samples, k]
X transformed in the new space.'
| def fit_transform(self, X, y=None):
| X = self._check_fit_data(X)
return self.fit(X)._transform(X)
|
'Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
X_new : array, shape [n_samples, k]
X transformed in the new space.'
| def transform(self, X):
| check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
|
'guts of transform method; no input validation'
| def _transform(self, X):
| return euclidean_distances(X, self.cluster_centers_)
|
'Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.'
| def predict(self, X):
| check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
|
'Opposite of the value of X on the K-means objective.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
score : float
Opposite of the value of X on the K-means objective.'
| def score(self, X, y=None):
| check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return (- _labels_inertia(X, x_squared_norms, self.cluster_centers_)[1])
|
'Compute the centroids on X by chunking it into mini-batches.
Parameters
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster.'
| def fit(self, X, y=None):
| random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse='csr', order='C', dtype=[np.float64, np.float32])
(n_samples, n_features) = X.shape
if (n_samples < self.n_clusters):
raise ValueError('Number of samples smaller than number of clusters.')
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if (n_init != 1):
warnings.warn(('Explicit initial center position passed: performing only one init in MiniBatchKMeans instead of n_init=%d' % self.n_init), RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if (self.tol > 0.0):
tol = _tolerance(X, self.tol)
old_center_buffer = np.zeros(n_features, dtype=X.dtype)
else:
tol = 0.0
old_center_buffer = np.zeros(0, dtype=X.dtype)
distances = np.zeros(self.batch_size, dtype=X.dtype)
n_batches = int(np.ceil((float(n_samples) / self.batch_size)))
n_iter = int((self.max_iter * n_batches))
init_size = self.init_size
if (init_size is None):
init_size = (3 * self.batch_size)
if (init_size > n_samples):
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.randint(0, n_samples, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print ('Init %d/%d with method: %s' % ((init_idx + 1), n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
cluster_centers = _init_centroids(X, self.n_clusters, self.init, random_state=random_state, x_squared_norms=x_squared_norms, init_size=init_size)
(batch_inertia, centers_squared_diff) = _mini_batch_step(X_valid, x_squared_norms[validation_indices], cluster_centers, counts, old_center_buffer, False, distances=None, verbose=self.verbose)
(_, inertia) = _labels_inertia(X_valid, x_squared_norms_valid, cluster_centers)
if self.verbose:
print ('Inertia for init %d/%d: %f' % ((init_idx + 1), n_init, inertia))
if ((best_inertia is None) or (inertia < best_inertia)):
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
convergence_context = {}
for iteration_idx in range(n_iter):
minibatch_indices = random_state.randint(0, n_samples, self.batch_size)
(batch_inertia, centers_squared_diff) = _mini_batch_step(X[minibatch_indices], x_squared_norms[minibatch_indices], self.cluster_centers_, self.counts_, old_center_buffer, (tol > 0.0), distances=distances, random_reassign=(((iteration_idx + 1) % (10 + self.counts_.min())) == 0), random_state=random_state, reassignment_ratio=self.reassignment_ratio, verbose=self.verbose)
if _mini_batch_convergence(self, iteration_idx, n_iter, tol, n_samples, centers_squared_diff, batch_inertia, convergence_context, verbose=self.verbose):
break
self.n_iter_ = (iteration_idx + 1)
if self.compute_labels:
(self.labels_, self.inertia_) = self._labels_inertia_minibatch(X)
return self
|
'Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
X : array-like, shape (n_samples, n_features)
Input data.
Returns
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.'
| def _labels_inertia_minibatch(self, X):
| if self.verbose:
print 'Computing label assignment and total inertia'
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s], self.cluster_centers_) for s in slices]
(labels, inertia) = zip(*results)
return (np.hstack(labels), np.sum(inertia))
|
'Update k means estimate on a single mini-batch X.
Parameters
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.'
| def partial_fit(self, X, y=None):
| X = check_array(X, accept_sparse='csr')
(n_samples, n_features) = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if (n_samples == 0):
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, 'random_state_', check_random_state(self.random_state))
if ((not hasattr(self, 'counts_')) or (not hasattr(self, 'cluster_centers_'))):
self.cluster_centers_ = _init_centroids(X, self.n_clusters, self.init, random_state=self.random_state_, x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
random_reassign = (self.random_state_.randint((10 * (1 + self.counts_.min()))) == 0)
distances = np.zeros(X.shape[0], dtype=X.dtype)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_, self.counts_, np.zeros(0, dtype=X.dtype), 0, random_reassign=random_reassign, distances=distances, random_state=self.random_state_, reassignment_ratio=self.reassignment_ratio, verbose=self.verbose)
if self.compute_labels:
(self.labels_, self.inertia_) = _labels_inertia(X, x_squared_norms, self.cluster_centers_)
return self
|
'Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.'
| def predict(self, X):
| check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
|
'Fit the hierarchical clustering on the data
Parameters
X : array-like, shape = [n_samples, n_features]
The samples a.k.a. observations.
Returns
self'
| def fit(self, X, y=None):
| X = check_array(X, ensure_min_samples=2, estimator=self)
memory = self.memory
if (memory is None):
memory = Memory(cachedir=None, verbose=0)
elif isinstance(memory, six.string_types):
memory = Memory(cachedir=memory, verbose=0)
elif (not isinstance(memory, Memory)):
raise ValueError("'memory' should either be a string or a sklearn.externals.joblib.Memory instance, got 'memory={!r}' instead.".format(type(memory)))
if (self.n_clusters <= 0):
raise ValueError(('n_clusters should be an integer greater than 0. %s was provided.' % str(self.n_clusters)))
if ((self.linkage == 'ward') and (self.affinity != 'euclidean')):
raise ValueError(('%s was provided as affinity. Ward can only work with euclidean distances.' % (self.affinity,)))
if (self.linkage not in _TREE_BUILDERS):
raise ValueError(('Unknown linkage type %s.Valid options are %s' % (self.linkage, _TREE_BUILDERS.keys())))
tree_builder = _TREE_BUILDERS[self.linkage]
connectivity = self.connectivity
if (self.connectivity is not None):
if callable(self.connectivity):
connectivity = self.connectivity(X)
connectivity = check_array(connectivity, accept_sparse=['csr', 'coo', 'lil'])
n_samples = len(X)
compute_full_tree = self.compute_full_tree
if (self.connectivity is None):
compute_full_tree = True
if (compute_full_tree == 'auto'):
compute_full_tree = (self.n_clusters < max(100, (0.02 * n_samples)))
n_clusters = self.n_clusters
if compute_full_tree:
n_clusters = None
kwargs = {}
if (self.linkage != 'ward'):
kwargs['linkage'] = self.linkage
kwargs['affinity'] = self.affinity
(self.children_, self.n_components_, self.n_leaves_, parents) = memory.cache(tree_builder)(X, connectivity, n_clusters=n_clusters, **kwargs)
if compute_full_tree:
self.labels_ = _hc_cut(self.n_clusters, self.children_, self.n_leaves_)
else:
labels = _hierarchical.hc_get_heads(parents, copy=False)
labels = np.copy(labels[:n_samples])
self.labels_ = np.searchsorted(np.unique(labels), labels)
return self
|
'Fit the hierarchical clustering on the data
Parameters
X : array-like, shape = [n_samples, n_features]
The data
Returns
self'
| def fit(self, X, y=None, **params):
| X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_min_features=2, estimator=self)
return AgglomerativeClustering.fit(self, X.T, **params)
|
'Transform a new matrix using the built clustering
Parameters
X : array-like, shape = [n_samples, n_features] or [n_features]
A M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
Returns
Y : array, shape = [n_samples, n_clusters] or [n_clusters]
The pooled values for each feature cluster.'
| def transform(self, X):
| check_is_fitted(self, 'labels_')
pooling_func = self.pooling_func
X = check_array(X)
nX = []
if (len(self.labels_) != X.shape[1]):
raise ValueError('X has a different number of features than during fitting.')
for l in np.unique(self.labels_):
nX.append(pooling_func(X[:, (self.labels_ == l)], axis=1))
return np.array(nX).T
|
'Inverse the transformation.
Return a vector of size nb_features with the values of Xred assigned
to each group of features
Parameters
Xred : array-like, shape=[n_samples, n_clusters] or [n_clusters,]
The values to be assigned to each cluster of samples
Returns
X : array, shape=[n_samples, n_features] or [n_features]
A vector of size n_samples with the values of Xred assigned to
each of the cluster of samples.'
| def inverse_transform(self, Xred):
| check_is_fitted(self, 'labels_')
(unil, inverse) = np.unique(self.labels_, return_inverse=True)
return Xred[..., inverse]
|
'Remove a subcluster from a node and update it with the
split subclusters.'
| def update_split_subclusters(self, subcluster, new_subcluster1, new_subcluster2):
| ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
|
'Insert a new subcluster into the node.'
| def insert_cf_subcluster(self, subcluster):
| if (not self.subclusters_):
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= (-2.0)
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
if (closest_subcluster.child_ is not None):
split_child = closest_subcluster.child_.insert_cf_subcluster(subcluster)
if (not split_child):
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = self.subclusters_[closest_index].sq_norm_
return False
else:
(new_subcluster1, new_subcluster2) = _split_node(closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(closest_subcluster, new_subcluster1, new_subcluster2)
if (len(self.subclusters_) > self.branching_factor):
return True
return False
else:
merged = closest_subcluster.merge_subcluster(subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = closest_subcluster.sq_norm_
return False
elif (len(self.subclusters_) < self.branching_factor):
self.append_subcluster(subcluster)
return False
else:
self.append_subcluster(subcluster)
return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.