desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Return the kernel k(X, Y) and optionally its gradient.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.'
| def __call__(self, X, Y=None, eval_gradient=False):
| X = np.atleast_2d(X)
if (Y is None):
dists = squareform(pdist(X, metric='sqeuclidean'))
tmp = (dists / ((2 * self.alpha) * (self.length_scale ** 2)))
base = (1 + tmp)
K = (base ** (- self.alpha))
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError('Gradient can only be evaluated when Y is None.')
dists = cdist(X, Y, metric='sqeuclidean')
K = ((1 + (dists / ((2 * self.alpha) * (self.length_scale ** 2)))) ** (- self.alpha))
if eval_gradient:
if (not self.hyperparameter_length_scale.fixed):
length_scale_gradient = ((dists * K) / ((self.length_scale ** 2) * base))
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else:
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
if (not self.hyperparameter_alpha.fixed):
alpha_gradient = (K * (((- self.alpha) * np.log(base)) + (dists / ((2 * (self.length_scale ** 2)) * base))))
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else:
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return (K, np.dstack((alpha_gradient, length_scale_gradient)))
else:
return K
|
'Return the kernel k(X, Y) and optionally its gradient.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.'
| def __call__(self, X, Y=None, eval_gradient=False):
| X = np.atleast_2d(X)
if (Y is None):
dists = squareform(pdist(X, metric='euclidean'))
arg = ((np.pi * dists) / self.periodicity)
sin_of_arg = np.sin(arg)
K = np.exp(((-2) * ((sin_of_arg / self.length_scale) ** 2)))
else:
if eval_gradient:
raise ValueError('Gradient can only be evaluated when Y is None.')
dists = cdist(X, Y, metric='euclidean')
K = np.exp(((-2) * ((np.sin(((np.pi / self.periodicity) * dists)) / self.length_scale) ** 2)))
if eval_gradient:
cos_of_arg = np.cos(arg)
if (not self.hyperparameter_length_scale.fixed):
length_scale_gradient = (((4 / (self.length_scale ** 2)) * (sin_of_arg ** 2)) * K)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else:
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
if (not self.hyperparameter_periodicity.fixed):
periodicity_gradient = (((((4 * arg) / (self.length_scale ** 2)) * cos_of_arg) * sin_of_arg) * K)
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else:
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return (K, np.dstack((length_scale_gradient, periodicity_gradient)))
else:
return K
|
'Return the kernel k(X, Y) and optionally its gradient.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.'
| def __call__(self, X, Y=None, eval_gradient=False):
| X = np.atleast_2d(X)
if (Y is None):
K = (np.inner(X, X) + (self.sigma_0 ** 2))
else:
if eval_gradient:
raise ValueError('Gradient can only be evaluated when Y is None.')
K = (np.inner(X, Y) + (self.sigma_0 ** 2))
if eval_gradient:
if (not self.hyperparameter_sigma_0.fixed):
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = (2 * (self.sigma_0 ** 2))
return (K, K_gradient)
else:
return (K, np.empty((X.shape[0], X.shape[0], 0)))
else:
return K
|
'Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)'
| def diag(self, X):
| return (np.einsum('ij,ij->i', X, X) + (self.sigma_0 ** 2))
|
'Returns whether the kernel is stationary.'
| def is_stationary(self):
| return False
|
'Return the kernel k(X, Y) and optionally its gradient.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.'
| def __call__(self, X, Y=None, eval_gradient=False):
| pairwise_kernels_kwargs = self.pairwise_kernels_kwargs
if (self.pairwise_kernels_kwargs is None):
pairwise_kernels_kwargs = {}
X = np.atleast_2d(X)
K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma, filter_params=True, **pairwise_kernels_kwargs)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return (K, np.empty((X.shape[0], X.shape[0], 0)))
else:
def f(gamma):
return pairwise_kernels(X, Y, metric=self.metric, gamma=np.exp(gamma), filter_params=True, **pairwise_kernels_kwargs)
return (K, _approx_fprime(self.theta, f, 1e-10))
else:
return K
|
'Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)'
| def diag(self, X):
| return np.apply_along_axis(self, 1, X).ravel()
|
'Returns whether the kernel is stationary.'
| def is_stationary(self):
| return (self.metric in ['rbf'])
|
'Check values of the basic parameters.
Parameters
X : array-like, shape (n_samples, n_features)'
| def _check_initial_parameters(self, X):
| if (self.n_components < 1):
raise ValueError(("Invalid value for 'n_components': %d Estimation requires at least one component" % self.n_components))
if (self.tol < 0.0):
raise ValueError(("Invalid value for 'tol': %.5f Tolerance used by the EM must be non-negative" % self.tol))
if (self.n_init < 1):
raise ValueError(("Invalid value for 'n_init': %d Estimation requires at least one run" % self.n_init))
if (self.max_iter < 1):
raise ValueError(("Invalid value for 'max_iter': %d Estimation requires at least one iteration" % self.max_iter))
if (self.reg_covar < 0.0):
raise ValueError(("Invalid value for 'reg_covar': %.5f regularization on covariance must be non-negative" % self.reg_covar))
self._check_parameters(X)
|
'Check initial parameters of the derived class.
Parameters
X : array-like, shape (n_samples, n_features)'
| @abstractmethod
def _check_parameters(self, X):
| pass
|
'Initialize the model parameters.
Parameters
X : array-like, shape (n_samples, n_features)
random_state : RandomState
A random number generator instance.'
| def _initialize_parameters(self, X, random_state):
| (n_samples, _) = X.shape
if (self.init_params == 'kmeans'):
resp = np.zeros((n_samples, self.n_components))
label = cluster.KMeans(n_clusters=self.n_components, n_init=1, random_state=random_state).fit(X).labels_
resp[(np.arange(n_samples), label)] = 1
elif (self.init_params == 'random'):
resp = random_state.rand(n_samples, self.n_components)
resp /= resp.sum(axis=1)[:, np.newaxis]
else:
raise ValueError(("Unimplemented initialization method '%s'" % self.init_params))
self._initialize(X, resp)
|
'Initialize the model parameters of the derived class.
Parameters
X : array-like, shape (n_samples, n_features)
resp : array-like, shape (n_samples, n_components)'
| @abstractmethod
def _initialize(self, X, resp):
| pass
|
'Estimate model parameters with the EM algorithm.
The method fit the model `n_init` times and set the parameters with
which the model has the largest likelihood or lower bound. Within each
trial, the method iterates between E-step and M-step for `max_iter`
times until the change of likelihood or lower bound is less than
`tol`, otherwise, a `ConvergenceWarning` is raised.
Parameters
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
self'
| def fit(self, X, y=None):
| X = _check_X(X, self.n_components)
self._check_initial_parameters(X)
do_init = (not (self.warm_start and hasattr(self, 'converged_')))
n_init = (self.n_init if do_init else 1)
max_lower_bound = (- np.infty)
self.converged_ = False
random_state = check_random_state(self.random_state)
(n_samples, _) = X.shape
for init in range(n_init):
self._print_verbose_msg_init_beg(init)
if do_init:
self._initialize_parameters(X, random_state)
self.lower_bound_ = (- np.infty)
for n_iter in range(self.max_iter):
prev_lower_bound = self.lower_bound_
(log_prob_norm, log_resp) = self._e_step(X)
self._m_step(X, log_resp)
self.lower_bound_ = self._compute_lower_bound(log_resp, log_prob_norm)
change = (self.lower_bound_ - prev_lower_bound)
self._print_verbose_msg_iter_end(n_iter, change)
if (abs(change) < self.tol):
self.converged_ = True
break
self._print_verbose_msg_init_end(self.lower_bound_)
if (self.lower_bound_ > max_lower_bound):
max_lower_bound = self.lower_bound_
best_params = self._get_parameters()
best_n_iter = n_iter
if (not self.converged_):
warnings.warn(('Initialization %d did not converge. Try different init parameters, or increase max_iter, tol or check for degenerate data.' % (init + 1)), ConvergenceWarning)
self._set_parameters(best_params)
self.n_iter_ = best_n_iter
return self
|
'E step.
Parameters
X : array-like, shape (n_samples, n_features)
Returns
log_prob_norm : float
Mean of the logarithms of the probabilities of each sample in X
log_responsibility : array, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.'
| def _e_step(self, X):
| (log_prob_norm, log_resp) = self._estimate_log_prob_resp(X)
return (np.mean(log_prob_norm), log_resp)
|
'M step.
Parameters
X : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.'
| @abstractmethod
def _m_step(self, X, log_resp):
| pass
|
'Compute the weighted log probabilities for each sample.
Parameters
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
log_prob : array, shape (n_samples,)
Log probabilities of each data point in X.'
| def score_samples(self, X):
| self._check_is_fitted()
X = _check_X(X, None, self.means_.shape[1])
return logsumexp(self._estimate_weighted_log_prob(X), axis=1)
|
'Compute the per-sample average log-likelihood of the given data X.
Parameters
X : array-like, shape (n_samples, n_dimensions)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
log_likelihood : float
Log likelihood of the Gaussian mixture given X.'
| def score(self, X, y=None):
| return self.score_samples(X).mean()
|
'Predict the labels for the data samples in X using trained model.
Parameters
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
labels : array, shape (n_samples,)
Component labels.'
| def predict(self, X):
| self._check_is_fitted()
X = _check_X(X, None, self.means_.shape[1])
return self._estimate_weighted_log_prob(X).argmax(axis=1)
|
'Predict posterior probability of each component given the data.
Parameters
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
resp : array, shape (n_samples, n_components)
Returns the probability each Gaussian (state) in
the model given each sample.'
| def predict_proba(self, X):
| self._check_is_fitted()
X = _check_X(X, None, self.means_.shape[1])
(_, log_resp) = self._estimate_log_prob_resp(X)
return np.exp(log_resp)
|
'Generate random samples from the fitted Gaussian distribution.
Parameters
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
X : array, shape (n_samples, n_features)
Randomly generated sample
y : array, shape (nsamples,)
Component labels'
| def sample(self, n_samples=1):
| self._check_is_fitted()
if (n_samples < 1):
raise ValueError(("Invalid value for 'n_samples': %d . The sampling requires at least one sample." % self.n_components))
(_, n_features) = self.means_.shape
rng = check_random_state(self.random_state)
n_samples_comp = rng.multinomial(n_samples, self.weights_)
if (self.covariance_type == 'full'):
X = np.vstack([rng.multivariate_normal(mean, covariance, int(sample)) for (mean, covariance, sample) in zip(self.means_, self.covariances_, n_samples_comp)])
elif (self.covariance_type == 'tied'):
X = np.vstack([rng.multivariate_normal(mean, self.covariances_, int(sample)) for (mean, sample) in zip(self.means_, n_samples_comp)])
else:
X = np.vstack([(mean + (rng.randn(sample, n_features) * np.sqrt(covariance))) for (mean, covariance, sample) in zip(self.means_, self.covariances_, n_samples_comp)])
y = np.concatenate([(j * np.ones(sample, dtype=int)) for (j, sample) in enumerate(n_samples_comp)])
return (X, y)
|
'Estimate the weighted log-probabilities, log P(X | Z) + log weights.
Parameters
X : array-like, shape (n_samples, n_features)
Returns
weighted_log_prob : array, shape (n_samples, n_component)'
| def _estimate_weighted_log_prob(self, X):
| return (self._estimate_log_prob(X) + self._estimate_log_weights())
|
'Estimate log-weights in EM algorithm, E[ log pi ] in VB algorithm.
Returns
log_weight : array, shape (n_components, )'
| @abstractmethod
def _estimate_log_weights(self):
| pass
|
'Estimate the log-probabilities log P(X | Z).
Compute the log-probabilities per each component for each sample.
Parameters
X : array-like, shape (n_samples, n_features)
Returns
log_prob : array, shape (n_samples, n_component)'
| @abstractmethod
def _estimate_log_prob(self, X):
| pass
|
'Estimate log probabilities and responsibilities for each sample.
Compute the log probabilities, weighted log probabilities per
component and responsibilities for each sample in X with respect to
the current state of the model.
Parameters
X : array-like, shape (n_samples, n_features)
Returns
log_prob_norm : array, shape (n_samples,)
log p(X)
log_responsibilities : array, shape (n_samples, n_components)
logarithm of the responsibilities'
| def _estimate_log_prob_resp(self, X):
| weighted_log_prob = self._estimate_weighted_log_prob(X)
log_prob_norm = logsumexp(weighted_log_prob, axis=1)
with np.errstate(under='ignore'):
log_resp = (weighted_log_prob - log_prob_norm[:, np.newaxis])
return (log_prob_norm, log_resp)
|
'Print verbose message on initialization.'
| def _print_verbose_msg_init_beg(self, n_init):
| if (self.verbose == 1):
print(('Initialization %d' % n_init))
elif (self.verbose >= 2):
print(('Initialization %d' % n_init))
self._init_prev_time = time()
self._iter_prev_time = self._init_prev_time
|
'Print verbose message on initialization.'
| def _print_verbose_msg_iter_end(self, n_iter, diff_ll):
| if ((n_iter % self.verbose_interval) == 0):
if (self.verbose == 1):
print((' Iteration %d' % n_iter))
elif (self.verbose >= 2):
cur_time = time()
print((' Iteration %d DCTB time lapse %.5fs DCTB ll change %.5f' % (n_iter, (cur_time - self._iter_prev_time), diff_ll)))
self._iter_prev_time = cur_time
|
'Print verbose message on the end of iteration.'
| def _print_verbose_msg_init_end(self, ll):
| if (self.verbose == 1):
print(('Initialization converged: %s' % self.converged_))
elif (self.verbose >= 2):
print(('Initialization converged: %s DCTB time lapse %.5fs DCTB ll %.5f' % (self.converged_, (time() - self._init_prev_time), ll)))
|
'Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if \'spherical\',
(n_features, n_features) if \'tied\',
(n_states, n_features) if \'diag\',
(n_states, n_features, n_features) if \'full\''
| def _get_covars(self):
| if (self.covariance_type == 'full'):
return self.covars_
elif (self.covariance_type == 'diag'):
return [np.diag(cov) for cov in self.covars_]
elif (self.covariance_type == 'tied'):
return ([self.covars_] * self.n_components)
elif (self.covariance_type == 'spherical'):
return [np.diag(cov) for cov in self.covars_]
|
'Provide values for covariance.'
| def _set_covars(self, covars):
| covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
|
'Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation'
| def score_samples(self, X):
| check_is_fitted(self, 'means_')
X = check_array(X)
if (X.ndim == 1):
X = X[:, np.newaxis]
if (X.size == 0):
return (np.array([]), np.empty((0, self.n_components)))
if (X.shape[1] != self.means_.shape[1]):
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_, self.covariance_type) + np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp((lpr - logprob[:, np.newaxis]))
return (logprob, responsibilities)
|
'Compute the log probability under the model.
Parameters
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X'
| def score(self, X, y=None):
| (logprob, _) = self.score_samples(X)
return logprob
|
'Predict label for data.
Parameters
X : array-like, shape = [n_samples, n_features]
Returns
C : array, shape = (n_samples,) component memberships'
| def predict(self, X):
| (logprob, responsibilities) = self.score_samples(X)
return responsibilities.argmax(axis=1)
|
'Predict posterior probability of data under each Gaussian
in the model.
Parameters
X : array-like, shape = [n_samples, n_features]
Returns
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.'
| def predict_proba(self, X):
| (logprob, responsibilities) = self.score_samples(X)
return responsibilities
|
'Generate random samples from the model.
Parameters
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
X : array_like, shape (n_samples, n_features)
List of samples'
| def sample(self, n_samples=1, random_state=None):
| check_is_fitted(self, 'means_')
if (random_state is None):
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
comps = weight_cdf.searchsorted(rand)
for comp in range(self.n_components):
comp_in_X = (comp == comps)
num_comp_in_X = comp_in_X.sum()
if (num_comp_in_X > 0):
if (self.covariance_type == 'tied'):
cv = self.covars_
elif (self.covariance_type == 'spherical'):
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = _sample_gaussian(self.means_[comp], cv, self.covariance_type, num_comp_in_X, random_state=random_state).T
return X
|
'Fit and then predict labels for data.
Warning: Due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate.
.. versionadded:: 0.17
*fit_predict* method in Gaussian Mixture Model.
Parameters
X : array-like, shape = [n_samples, n_features]
Returns
C : array, shape = (n_samples,) component memberships'
| def fit_predict(self, X, y=None):
| return self._fit(X, y).argmax(axis=1)
|
'Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string \'\' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.'
| def _fit(self, X, y=None, do_prediction=False):
| X = check_array(X, dtype=np.float64, ensure_min_samples=2, estimator=self)
if (X.shape[0] < self.n_components):
raise ValueError(('GMM estimation with %s components, but got only %s samples' % (self.n_components, X.shape[0])))
max_log_prob = (- np.infty)
if (self.verbose > 0):
print 'Expectation-maximization algorithm started.'
for init in range(self.n_init):
if (self.verbose > 0):
print ('Initialization ' + str((init + 1)))
start_init_time = time()
if (('m' in self.init_params) or (not hasattr(self, 'means_'))):
self.means_ = cluster.KMeans(n_clusters=self.n_components, random_state=self.random_state).fit(X).cluster_centers_
if (self.verbose > 1):
print ' DCTB Means have been initialized.'
if (('w' in self.init_params) or (not hasattr(self, 'weights_'))):
self.weights_ = np.tile((1.0 / self.n_components), self.n_components)
if (self.verbose > 1):
print ' DCTB Weights have been initialized.'
if (('c' in self.init_params) or (not hasattr(self, 'covars_'))):
cv = (np.cov(X.T) + (self.min_covar * np.eye(X.shape[1])))
if (not cv.shape):
cv.shape = (1, 1)
self.covars_ = distribute_covar_matrix_to_match_covariance_type(cv, self.covariance_type, self.n_components)
if (self.verbose > 1):
print ' DCTB Covariance matrices have been initialized.'
current_log_likelihood = None
self.converged_ = False
for i in range(self.n_iter):
if (self.verbose > 0):
print (' DCTB EM iteration ' + str((i + 1)))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
(log_likelihoods, responsibilities) = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
if (prev_log_likelihood is not None):
change = abs((current_log_likelihood - prev_log_likelihood))
if (self.verbose > 1):
print (' DCTB DCTB Change: ' + str(change))
if (change < self.tol):
self.converged_ = True
if (self.verbose > 0):
print ' DCTB DCTB EM algorithm converged.'
break
self._do_mstep(X, responsibilities, self.params, self.min_covar)
if (self.verbose > 1):
print ((' DCTB DCTB EM iteration ' + str((i + 1))) + ' took {0:.5f}s'.format((time() - start_iter_time)))
if self.n_iter:
if (current_log_likelihood > max_log_prob):
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_, 'means': self.means_, 'covars': self.covars_}
if (self.verbose > 1):
print ' DCTB Better parameters were found.'
if (self.verbose > 1):
print ((' DCTB Initialization ' + str((init + 1))) + ' took {0:.5f}s'.format((time() - start_init_time)))
if (np.isneginf(max_log_prob) and self.n_iter):
raise RuntimeError((('EM algorithm was never able to compute a valid likelihood ' + 'given initial parameters. Try different init parameters ') + '(or increasing n_init) or check for degenerate data.'))
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else:
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
|
'Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string \'\' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
self'
| def fit(self, X, y=None):
| self._fit(X, y)
return self
|
'Perform the Mstep of the EM algorithm and return the cluster weights.'
| def _do_mstep(self, X, responsibilities, params, min_covar=0):
| weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = (1.0 / (weights[:, np.newaxis] + (10 * EPS)))
if ('w' in params):
self.weights_ = ((weights / (weights.sum() + (10 * EPS))) + EPS)
if ('m' in params):
self.means_ = (weighted_X_sum * inverse_weights)
if ('c' in params):
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(self, X, responsibilities, weighted_X_sum, inverse_weights, min_covar)
return weights
|
'Return the number of free parameters in the model.'
| def _n_parameters(self):
| ndim = self.means_.shape[1]
if (self.covariance_type == 'full'):
cov_params = (((self.n_components * ndim) * (ndim + 1)) / 2.0)
elif (self.covariance_type == 'diag'):
cov_params = (self.n_components * ndim)
elif (self.covariance_type == 'tied'):
cov_params = ((ndim * (ndim + 1)) / 2.0)
elif (self.covariance_type == 'spherical'):
cov_params = self.n_components
mean_params = (ndim * self.n_components)
return int((((cov_params + mean_params) + self.n_components) - 1))
|
'Bayesian information criterion for the current model fit
and the proposed data.
Parameters
X : array of shape(n_samples, n_dimensions)
Returns
bic : float (the lower the better)'
| def bic(self, X):
| return (((-2) * self.score(X).sum()) + (self._n_parameters() * np.log(X.shape[0])))
|
'Akaike information criterion for the current model fit
and the proposed data.
Parameters
X : array of shape(n_samples, n_dimensions)
Returns
aic : float (the lower the better)'
| def aic(self, X):
| return (((-2) * self.score(X).sum()) + (2 * self._n_parameters()))
|
'Check that the parameters are well defined.
Parameters
X : array-like, shape (n_samples, n_features)'
| def _check_parameters(self, X):
| if (self.covariance_type not in ['spherical', 'tied', 'diag', 'full']):
raise ValueError(("Invalid value for 'covariance_type': %s 'covariance_type' should be in ['spherical', 'tied', 'diag', 'full']" % self.covariance_type))
if (self.weight_concentration_prior_type not in ['dirichlet_process', 'dirichlet_distribution']):
raise ValueError(("Invalid value for 'weight_concentration_prior_type': %s 'weight_concentration_prior_type' should be in ['dirichlet_process', 'dirichlet_distribution']" % self.weight_concentration_prior_type))
self._check_weights_parameters()
self._check_means_parameters(X)
self._check_precision_parameters(X)
self._checkcovariance_prior_parameter(X)
|
'Check the parameter of the Dirichlet distribution.'
| def _check_weights_parameters(self):
| if (self.weight_concentration_prior is None):
self.weight_concentration_prior_ = (1.0 / self.n_components)
elif (self.weight_concentration_prior > 0.0):
self.weight_concentration_prior_ = self.weight_concentration_prior
else:
raise ValueError(("The parameter 'weight_concentration_prior' should be greater than 0., but got %.3f." % self.weight_concentration_prior))
|
'Check the parameters of the Gaussian distribution.
Parameters
X : array-like, shape (n_samples, n_features)'
| def _check_means_parameters(self, X):
| (_, n_features) = X.shape
if (self.mean_precision_prior is None):
self.mean_precision_prior_ = 1.0
elif (self.mean_precision_prior > 0.0):
self.mean_precision_prior_ = self.mean_precision_prior
else:
raise ValueError(("The parameter 'mean_precision_prior' should be greater than 0., but got %.3f." % self.mean_precision_prior))
if (self.mean_prior is None):
self.mean_prior_ = X.mean(axis=0)
else:
self.mean_prior_ = check_array(self.mean_prior, dtype=[np.float64, np.float32], ensure_2d=False)
_check_shape(self.mean_prior_, (n_features,), 'means')
|
'Check the prior parameters of the precision distribution.
Parameters
X : array-like, shape (n_samples, n_features)'
| def _check_precision_parameters(self, X):
| (_, n_features) = X.shape
if (self.degrees_of_freedom_prior is None):
self.degrees_of_freedom_prior_ = n_features
elif (self.degrees_of_freedom_prior > (n_features - 1.0)):
self.degrees_of_freedom_prior_ = self.degrees_of_freedom_prior
else:
raise ValueError(("The parameter 'degrees_of_freedom_prior' should be greater than %d, but got %.3f." % ((n_features - 1), self.degrees_of_freedom_prior)))
|
'Check the `covariance_prior_`.
Parameters
X : array-like, shape (n_samples, n_features)'
| def _checkcovariance_prior_parameter(self, X):
| (_, n_features) = X.shape
if (self.covariance_prior is None):
self.covariance_prior_ = {'full': np.atleast_2d(np.cov(X.T)), 'tied': np.atleast_2d(np.cov(X.T)), 'diag': np.var(X, axis=0, ddof=1), 'spherical': np.var(X, axis=0, ddof=1).mean()}[self.covariance_type]
elif (self.covariance_type in ['full', 'tied']):
self.covariance_prior_ = check_array(self.covariance_prior, dtype=[np.float64, np.float32], ensure_2d=False)
_check_shape(self.covariance_prior_, (n_features, n_features), ('%s covariance_prior' % self.covariance_type))
_check_precision_matrix(self.covariance_prior_, self.covariance_type)
elif (self.covariance_type == 'diag'):
self.covariance_prior_ = check_array(self.covariance_prior, dtype=[np.float64, np.float32], ensure_2d=False)
_check_shape(self.covariance_prior_, (n_features,), ('%s covariance_prior' % self.covariance_type))
_check_precision_positivity(self.covariance_prior_, self.covariance_type)
elif (self.covariance_prior > 0.0):
self.covariance_prior_ = self.covariance_prior
else:
raise ValueError(("The parameter 'spherical covariance_prior' should be greater than 0., but got %.3f." % self.covariance_prior))
|
'Initialization of the mixture parameters.
Parameters
X : array-like, shape (n_samples, n_features)
resp : array-like, shape (n_samples, n_components)'
| def _initialize(self, X, resp):
| (nk, xk, sk) = _estimate_gaussian_parameters(X, resp, self.reg_covar, self.covariance_type)
self._estimate_weights(nk)
self._estimate_means(nk, xk)
self._estimate_precisions(nk, xk, sk)
|
'Estimate the parameters of the Dirichlet distribution.
Parameters
nk : array-like, shape (n_components,)'
| def _estimate_weights(self, nk):
| if (self.weight_concentration_prior_type == 'dirichlet_process'):
self.weight_concentration_ = ((1.0 + nk), (self.weight_concentration_prior_ + np.hstack((np.cumsum(nk[::(-1)])[(-2)::(-1)], 0))))
else:
self.weight_concentration_ = (self.weight_concentration_prior_ + nk)
|
'Estimate the parameters of the Gaussian distribution.
Parameters
nk : array-like, shape (n_components,)
xk : array-like, shape (n_components, n_features)'
| def _estimate_means(self, nk, xk):
| self.mean_precision_ = (self.mean_precision_prior_ + nk)
self.means_ = (((self.mean_precision_prior_ * self.mean_prior_) + (nk[:, np.newaxis] * xk)) / self.mean_precision_[:, np.newaxis])
|
'Estimate the precisions parameters of the precision distribution.
Parameters
nk : array-like, shape (n_components,)
xk : array-like, shape (n_components, n_features)
sk : array-like
The shape depends of `covariance_type`:
\'full\' : (n_components, n_features, n_features)
\'tied\' : (n_features, n_features)
\'diag\' : (n_components, n_features)
\'spherical\' : (n_components,)'
| def _estimate_precisions(self, nk, xk, sk):
| {'full': self._estimate_wishart_full, 'tied': self._estimate_wishart_tied, 'diag': self._estimate_wishart_diag, 'spherical': self._estimate_wishart_spherical}[self.covariance_type](nk, xk, sk)
self.precisions_cholesky_ = _compute_precision_cholesky(self.covariances_, self.covariance_type)
|
'Estimate the full Wishart distribution parameters.
Parameters
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
xk : array-like, shape (n_components, n_features)
sk : array-like, shape (n_components, n_features, n_features)'
| def _estimate_wishart_full(self, nk, xk, sk):
| (_, n_features) = xk.shape
self.degrees_of_freedom_ = (self.degrees_of_freedom_prior_ + nk)
self.covariances_ = np.empty((self.n_components, n_features, n_features))
for k in range(self.n_components):
diff = (xk[k] - self.mean_prior_)
self.covariances_[k] = ((self.covariance_prior_ + (nk[k] * sk[k])) + (((nk[k] * self.mean_precision_prior_) / self.mean_precision_[k]) * np.outer(diff, diff)))
self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis, np.newaxis]
|
'Estimate the tied Wishart distribution parameters.
Parameters
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
xk : array-like, shape (n_components, n_features)
sk : array-like, shape (n_features, n_features)'
| def _estimate_wishart_tied(self, nk, xk, sk):
| (_, n_features) = xk.shape
self.degrees_of_freedom_ = (self.degrees_of_freedom_prior_ + (nk.sum() / self.n_components))
diff = (xk - self.mean_prior_)
self.covariances_ = ((self.covariance_prior_ + ((sk * nk.sum()) / self.n_components)) + ((self.mean_precision_prior_ / self.n_components) * np.dot(((nk / self.mean_precision_) * diff.T), diff)))
self.covariances_ /= self.degrees_of_freedom_
|
'Estimate the diag Wishart distribution parameters.
Parameters
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
xk : array-like, shape (n_components, n_features)
sk : array-like, shape (n_components, n_features)'
| def _estimate_wishart_diag(self, nk, xk, sk):
| (_, n_features) = xk.shape
self.degrees_of_freedom_ = (self.degrees_of_freedom_prior_ + nk)
diff = (xk - self.mean_prior_)
self.covariances_ = (self.covariance_prior_ + (nk[:, np.newaxis] * (sk + ((self.mean_precision_prior_ / self.mean_precision_)[:, np.newaxis] * np.square(diff)))))
self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis]
|
'Estimate the spherical Wishart distribution parameters.
Parameters
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
xk : array-like, shape (n_components, n_features)
sk : array-like, shape (n_components,)'
| def _estimate_wishart_spherical(self, nk, xk, sk):
| (_, n_features) = xk.shape
self.degrees_of_freedom_ = (self.degrees_of_freedom_prior_ + nk)
diff = (xk - self.mean_prior_)
self.covariances_ = (self.covariance_prior_ + (nk * (sk + ((self.mean_precision_prior_ / self.mean_precision_) * np.mean(np.square(diff), 1)))))
self.covariances_ /= self.degrees_of_freedom_
|
'M step.
Parameters
X : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.'
| def _m_step(self, X, log_resp):
| (n_samples, _) = X.shape
(nk, xk, sk) = _estimate_gaussian_parameters(X, np.exp(log_resp), self.reg_covar, self.covariance_type)
self._estimate_weights(nk)
self._estimate_means(nk, xk)
self._estimate_precisions(nk, xk, sk)
|
'Estimate the lower bound of the model.
The lower bound on the likelihood (of the training data with respect to
the model) is used to detect the convergence and has to decrease at
each iteration.
Parameters
X : array-like, shape (n_samples, n_features)
log_resp : array, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
log_prob_norm : float
Logarithm of the probability of each sample in X.
Returns
lower_bound : float'
| def _compute_lower_bound(self, log_resp, log_prob_norm):
| (n_features,) = self.mean_prior_.shape
log_det_precisions_chol = (_compute_log_det_cholesky(self.precisions_cholesky_, self.covariance_type, n_features) - ((0.5 * n_features) * np.log(self.degrees_of_freedom_)))
if (self.covariance_type == 'tied'):
log_wishart = (self.n_components * np.float64(_log_wishart_norm(self.degrees_of_freedom_, log_det_precisions_chol, n_features)))
else:
log_wishart = np.sum(_log_wishart_norm(self.degrees_of_freedom_, log_det_precisions_chol, n_features))
if (self.weight_concentration_prior_type == 'dirichlet_process'):
log_norm_weight = (- np.sum(betaln(self.weight_concentration_[0], self.weight_concentration_[1])))
else:
log_norm_weight = _log_dirichlet_norm(self.weight_concentration_)
return ((((- np.sum((np.exp(log_resp) * log_resp))) - log_wishart) - log_norm_weight) - ((0.5 * n_features) * np.sum(np.log(self.mean_precision_))))
|
'Check the Gaussian mixture parameters are well defined.'
| def _check_parameters(self, X):
| (_, n_features) = X.shape
if (self.covariance_type not in ['spherical', 'tied', 'diag', 'full']):
raise ValueError(("Invalid value for 'covariance_type': %s 'covariance_type' should be in ['spherical', 'tied', 'diag', 'full']" % self.covariance_type))
if (self.weights_init is not None):
self.weights_init = _check_weights(self.weights_init, self.n_components)
if (self.means_init is not None):
self.means_init = _check_means(self.means_init, self.n_components, n_features)
if (self.precisions_init is not None):
self.precisions_init = _check_precisions(self.precisions_init, self.covariance_type, self.n_components, n_features)
|
'Initialization of the Gaussian mixture parameters.
Parameters
X : array-like, shape (n_samples, n_features)
resp : array-like, shape (n_samples, n_components)'
| def _initialize(self, X, resp):
| (n_samples, _) = X.shape
(weights, means, covariances) = _estimate_gaussian_parameters(X, resp, self.reg_covar, self.covariance_type)
weights /= n_samples
self.weights_ = (weights if (self.weights_init is None) else self.weights_init)
self.means_ = (means if (self.means_init is None) else self.means_init)
if (self.precisions_init is None):
self.covariances_ = covariances
self.precisions_cholesky_ = _compute_precision_cholesky(covariances, self.covariance_type)
elif (self.covariance_type == 'full'):
self.precisions_cholesky_ = np.array([linalg.cholesky(prec_init, lower=True) for prec_init in self.precisions_init])
elif (self.covariance_type == 'tied'):
self.precisions_cholesky_ = linalg.cholesky(self.precisions_init, lower=True)
else:
self.precisions_cholesky_ = self.precisions_init
|
'M step.
Parameters
X : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.'
| def _m_step(self, X, log_resp):
| (n_samples, _) = X.shape
(self.weights_, self.means_, self.covariances_) = _estimate_gaussian_parameters(X, np.exp(log_resp), self.reg_covar, self.covariance_type)
self.weights_ /= n_samples
self.precisions_cholesky_ = _compute_precision_cholesky(self.covariances_, self.covariance_type)
|
'Return the number of free parameters in the model.'
| def _n_parameters(self):
| (_, n_features) = self.means_.shape
if (self.covariance_type == 'full'):
cov_params = (((self.n_components * n_features) * (n_features + 1)) / 2.0)
elif (self.covariance_type == 'diag'):
cov_params = (self.n_components * n_features)
elif (self.covariance_type == 'tied'):
cov_params = ((n_features * (n_features + 1)) / 2.0)
elif (self.covariance_type == 'spherical'):
cov_params = self.n_components
mean_params = (n_features * self.n_components)
return int((((cov_params + mean_params) + self.n_components) - 1))
|
'Bayesian information criterion for the current model on the input X.
Parameters
X : array of shape (n_samples, n_dimensions)
Returns
bic : float
The lower the better.'
| def bic(self, X):
| return ((((-2) * self.score(X)) * X.shape[0]) + (self._n_parameters() * np.log(X.shape[0])))
|
'Akaike information criterion for the current model on the input X.
Parameters
X : array of shape (n_samples, n_dimensions)
Returns
aic : float
The lower the better.'
| def aic(self, X):
| return ((((-2) * self.score(X)) * X.shape[0]) + (2 * self._n_parameters()))
|
'Return precisions as a full matrix.'
| def _get_precisions(self):
| if (self.covariance_type == 'full'):
return self.precs_
elif (self.covariance_type in ['diag', 'spherical']):
return [np.diag(cov) for cov in self.precs_]
elif (self.covariance_type == 'tied'):
return ([self.precs_] * self.n_components)
|
'Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation'
| def score_samples(self, X):
| check_is_fitted(self, 'gamma_')
X = check_array(X)
if (X.ndim == 1):
X = X[:, np.newaxis]
z = np.zeros((X.shape[0], self.n_components))
sd = digamma((self.gamma_.T[1] + self.gamma_.T[2]))
dgamma1 = (digamma(self.gamma_.T[1]) - sd)
dgamma2 = np.zeros(self.n_components)
dgamma2[0] = (digamma(self.gamma_[(0, 2)]) - digamma((self.gamma_[(0, 1)] + self.gamma_[(0, 2)])))
for j in range(1, self.n_components):
dgamma2[j] = (dgamma2[(j - 1)] + digamma(self.gamma_[((j - 1), 2)]))
dgamma2[j] -= sd[(j - 1)]
dgamma = (dgamma1 + dgamma2)
del dgamma1, dgamma2, sd
if (self.covariance_type not in ['full', 'tied', 'diag', 'spherical']):
raise NotImplementedError(('This ctype is not implemented: %s' % self.covariance_type))
p = _bound_state_log_lik(X, (self._initial_bound + self.bound_prec_), self.precs_, self.means_, self.covariance_type)
z = (p + dgamma)
z = log_normalize(z, axis=(-1))
bound = np.sum((z * p), axis=(-1))
return (bound, z)
|
'Update the concentration parameters for each cluster'
| def _update_concentration(self, z):
| sz = np.sum(z, axis=0)
self.gamma_.T[1] = (1.0 + sz)
self.gamma_.T[2].fill(0)
for i in range((self.n_components - 2), (-1), (-1)):
self.gamma_[(i, 2)] = (self.gamma_[((i + 1), 2)] + sz[i])
self.gamma_.T[2] += self.alpha
|
'Update the variational distributions for the means'
| def _update_means(self, X, z):
| n_features = X.shape[1]
for k in range(self.n_components):
if (self.covariance_type in ['spherical', 'diag']):
num = np.sum((z.T[k].reshape(((-1), 1)) * X), axis=0)
num *= self.precs_[k]
den = (1.0 + (self.precs_[k] * np.sum(z.T[k])))
self.means_[k] = (num / den)
elif (self.covariance_type in ['tied', 'full']):
if (self.covariance_type == 'tied'):
cov = self.precs_
else:
cov = self.precs_[k]
den = (np.identity(n_features) + (cov * np.sum(z.T[k])))
num = np.sum((z.T[k].reshape(((-1), 1)) * X), axis=0)
num = np.dot(cov, num)
self.means_[k] = linalg.lstsq(den, num)[0]
|
'Update the variational distributions for the precisions'
| def _update_precisions(self, X, z):
| n_features = X.shape[1]
if (self.covariance_type == 'spherical'):
self.dof_ = ((0.5 * n_features) * np.sum(z, axis=0))
for k in range(self.n_components):
sq_diff = np.sum(((X - self.means_[k]) ** 2), axis=1)
self.scale_[k] = 1.0
self.scale_[k] += (0.5 * np.sum((z.T[k] * (sq_diff + n_features))))
self.bound_prec_[k] = ((0.5 * n_features) * (digamma(self.dof_[k]) - np.log(self.scale_[k])))
self.precs_ = np.tile((self.dof_ / self.scale_), [n_features, 1]).T
elif (self.covariance_type == 'diag'):
for k in range(self.n_components):
self.dof_[k].fill((1.0 + (0.5 * np.sum(z.T[k], axis=0))))
sq_diff = ((X - self.means_[k]) ** 2)
self.scale_[k] = (np.ones(n_features) + (0.5 * np.dot(z.T[k], (sq_diff + 1))))
self.precs_[k] = (self.dof_[k] / self.scale_[k])
self.bound_prec_[k] = (0.5 * np.sum((digamma(self.dof_[k]) - np.log(self.scale_[k]))))
self.bound_prec_[k] -= (0.5 * np.sum(self.precs_[k]))
elif (self.covariance_type == 'tied'):
self.dof_ = ((2 + X.shape[0]) + n_features)
self.scale_ = ((X.shape[0] + 1) * np.identity(n_features))
for k in range(self.n_components):
diff = (X - self.means_[k])
self.scale_ += np.dot(diff.T, (z[:, k:(k + 1)] * diff))
self.scale_ = pinvh(self.scale_)
self.precs_ = (self.dof_ * self.scale_)
self.det_scale_ = linalg.det(self.scale_)
self.bound_prec_ = (0.5 * wishart_log_det(self.dof_, self.scale_, self.det_scale_, n_features))
self.bound_prec_ -= ((0.5 * self.dof_) * np.trace(self.scale_))
elif (self.covariance_type == 'full'):
for k in range(self.n_components):
sum_resp = np.sum(z.T[k])
self.dof_[k] = ((2 + sum_resp) + n_features)
self.scale_[k] = ((sum_resp + 1) * np.identity(n_features))
diff = (X - self.means_[k])
self.scale_[k] += np.dot(diff.T, (z[:, k:(k + 1)] * diff))
self.scale_[k] = pinvh(self.scale_[k])
self.precs_[k] = (self.dof_[k] * self.scale_[k])
self.det_scale_[k] = linalg.det(self.scale_[k])
self.bound_prec_[k] = (0.5 * wishart_log_det(self.dof_[k], self.scale_[k], self.det_scale_[k], n_features))
self.bound_prec_[k] -= ((0.5 * self.dof_[k]) * np.trace(self.scale_[k]))
|
'Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default.'
| def _monitor(self, X, z, n, end=False):
| if (self.verbose > 0):
print(('Bound after updating %8s: %f' % (n, self.lower_bound(X, z))))
if end:
print('Cluster proportions:', self.gamma_.T[1])
print('covariance_type:', self.covariance_type)
|
'Maximize the variational lower bound
Update each of the parameters to maximize the lower bound.'
| def _do_mstep(self, X, z, params):
| self._monitor(X, z, 'z')
self._update_concentration(z)
self._monitor(X, z, 'gamma')
if ('m' in params):
self._update_means(X, z)
self._monitor(X, z, 'mu')
if ('c' in params):
self._update_precisions(X, z)
self._monitor(X, z, 'a and b', end=True)
|
'Initializes the concentration parameters'
| def _initialize_gamma(self):
| self.gamma_ = (self.alpha * np.ones((self.n_components, 3)))
|
'The variational lower bound for the concentration parameter.'
| def _bound_concentration(self):
| logprior = (gammaln(self.alpha) * self.n_components)
logprior += np.sum(((self.alpha - 1) * (digamma(self.gamma_.T[2]) - digamma((self.gamma_.T[1] + self.gamma_.T[2])))))
logprior += np.sum((- gammaln((self.gamma_.T[1] + self.gamma_.T[2]))))
logprior += np.sum((gammaln(self.gamma_.T[1]) + gammaln(self.gamma_.T[2])))
logprior -= np.sum(((self.gamma_.T[1] - 1) * (digamma(self.gamma_.T[1]) - digamma((self.gamma_.T[1] + self.gamma_.T[2])))))
logprior -= np.sum(((self.gamma_.T[2] - 1) * (digamma(self.gamma_.T[2]) - digamma((self.gamma_.T[1] + self.gamma_.T[2])))))
return logprior
|
'The variational lower bound for the mean parameters'
| def _bound_means(self):
| logprior = 0.0
logprior -= (0.5 * squared_norm(self.means_))
logprior -= ((0.5 * self.means_.shape[1]) * self.n_components)
return logprior
|
'Returns the bound term related to precisions'
| def _bound_precisions(self):
| logprior = 0.0
if (self.covariance_type == 'spherical'):
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(((self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_))))
logprior += np.sum((((- np.log(self.scale_)) + self.dof_) - self.precs_[:, 0]))
elif (self.covariance_type == 'diag'):
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(((self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_))))
logprior += np.sum((((- np.log(self.scale_)) + self.dof_) - self.precs_))
elif (self.covariance_type == 'tied'):
logprior += _bound_wishart(self.dof_, self.scale_, self.det_scale_)
elif (self.covariance_type == 'full'):
for k in range(self.n_components):
logprior += _bound_wishart(self.dof_[k], self.scale_[k], self.det_scale_[k])
return logprior
|
'Returns the bound term related to proportions'
| def _bound_proportions(self, z):
| dg12 = digamma((self.gamma_.T[1] + self.gamma_.T[2]))
dg1 = (digamma(self.gamma_.T[1]) - dg12)
dg2 = (digamma(self.gamma_.T[2]) - dg12)
cz = stable_cumsum(z[:, ::(-1)], axis=(-1))[:, (-2)::(-1)]
logprior = (np.sum((cz * dg2[:(-1)])) + np.sum((z * dg1)))
del cz
z_non_zeros = z[(z > np.finfo(np.float32).eps)]
logprior -= np.sum((z_non_zeros * np.log(z_non_zeros)))
return logprior
|
'returns a lower bound on model evidence based on X and membership'
| def lower_bound(self, X, z):
| check_is_fitted(self, 'means_')
if (self.covariance_type not in ['full', 'tied', 'diag', 'spherical']):
raise NotImplementedError(('This ctype is not implemented: %s' % self.covariance_type))
X = np.asarray(X)
if (X.ndim == 1):
X = X[:, np.newaxis]
c = np.sum((z * _bound_state_log_lik(X, (self._initial_bound + self.bound_prec_), self.precs_, self.means_, self.covariance_type)))
return (c + self._logprior(z))
|
'Estimate model parameters with the variational
algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string \'\' when creating
the object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.'
| def _fit(self, X, y=None):
| self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if (X.ndim == 1):
X = X[:, np.newaxis]
(n_samples, n_features) = X.shape
z = np.ones((n_samples, self.n_components))
z /= self.n_components
self._initial_bound = (((-0.5) * n_features) * np.log((2 * np.pi)))
self._initial_bound -= np.log(((2 * np.pi) * np.e))
if ((self.init_params != '') or (not hasattr(self, 'gamma_'))):
self._initialize_gamma()
if (('m' in self.init_params) or (not hasattr(self, 'means_'))):
self.means_ = cluster.KMeans(n_clusters=self.n_components, random_state=self.random_state_).fit(X).cluster_centers_[::(-1)]
if (('w' in self.init_params) or (not hasattr(self, 'weights_'))):
self.weights_ = np.tile((1.0 / self.n_components), self.n_components)
if (('c' in self.init_params) or (not hasattr(self, 'precs_'))):
if (self.covariance_type == 'spherical'):
self.dof_ = np.ones(self.n_components)
self.scale_ = np.ones(self.n_components)
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = ((0.5 * n_features) * (digamma(self.dof_) - np.log(self.scale_)))
elif (self.covariance_type == 'diag'):
self.dof_ = (1 + (0.5 * n_features))
self.dof_ *= np.ones((self.n_components, n_features))
self.scale_ = np.ones((self.n_components, n_features))
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = (0.5 * np.sum((digamma(self.dof_) - np.log(self.scale_)), 1))
self.bound_prec_ -= (0.5 * np.sum(self.precs_, 1))
elif (self.covariance_type == 'tied'):
self.dof_ = 1.0
self.scale_ = np.identity(n_features)
self.precs_ = np.identity(n_features)
self.det_scale_ = 1.0
self.bound_prec_ = (0.5 * wishart_log_det(self.dof_, self.scale_, self.det_scale_, n_features))
self.bound_prec_ -= ((0.5 * self.dof_) * np.trace(self.scale_))
elif (self.covariance_type == 'full'):
self.dof_ = ((1 + self.n_components) + n_samples)
self.dof_ *= np.ones(self.n_components)
self.scale_ = [(2 * np.identity(n_features)) for _ in range(self.n_components)]
self.precs_ = [np.identity(n_features) for _ in range(self.n_components)]
self.det_scale_ = np.ones(self.n_components)
self.bound_prec_ = np.zeros(self.n_components)
for k in range(self.n_components):
self.bound_prec_[k] = wishart_log_det(self.dof_[k], self.scale_[k], self.det_scale_[k], n_features)
self.bound_prec_[k] -= (self.dof_[k] * np.trace(self.scale_[k]))
self.bound_prec_ *= 0.5
current_log_likelihood = None
self.converged_ = False
for i in range(self.n_iter):
prev_log_likelihood = current_log_likelihood
(curr_logprob, z) = self.score_samples(X)
current_log_likelihood = (curr_logprob.mean() + (self._logprior(z) / n_samples))
if (prev_log_likelihood is not None):
change = abs((current_log_likelihood - prev_log_likelihood))
if (change < self.tol):
self.converged_ = True
break
self._do_mstep(X, z, self.params)
if (self.n_iter == 0):
z = np.zeros((X.shape[0], self.n_components))
self._set_weights()
return z
|
'Estimate model parameters with the variational algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the EM
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string \'\' when creating
the object. Likewise, if you just would like to do an
initialization, set n_iter=0.
Parameters
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.'
| def _fit(self, X, y=None):
| self.alpha_ = (float(self.alpha) / self.n_components)
return super(VBGMM, self)._fit(X, y)
|
'Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation'
| def score_samples(self, X):
| check_is_fitted(self, 'gamma_')
X = check_array(X)
if (X.ndim == 1):
X = X[:, np.newaxis]
dg = (digamma(self.gamma_) - digamma(np.sum(self.gamma_)))
if (self.covariance_type not in ['full', 'tied', 'diag', 'spherical']):
raise NotImplementedError(('This ctype is not implemented: %s' % self.covariance_type))
p = _bound_state_log_lik(X, (self._initial_bound + self.bound_prec_), self.precs_, self.means_, self.covariance_type)
z = (p + dg)
z = log_normalize(z, axis=(-1))
bound = np.sum((z * p), axis=(-1))
return (bound, z)
|
'Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default.'
| def _monitor(self, X, z, n, end=False):
| if (self.verbose > 0):
print(('Bound after updating %8s: %f' % (n, self.lower_bound(X, z))))
if end:
print('Cluster proportions:', self.gamma_)
print('covariance_type:', self.covariance_type)
|
'Build the f_ interp1d function.'
| def _build_f(self, X, y):
| if (self.out_of_bounds not in ['raise', 'nan', 'clip']):
raise ValueError("The argument ``out_of_bounds`` must be in 'nan', 'clip', 'raise'; got {0}".format(self.out_of_bounds))
bounds_error = (self.out_of_bounds == 'raise')
if (len(y) == 1):
self.f_ = (lambda x: y.repeat(x.shape))
else:
self.f_ = interpolate.interp1d(X, y, kind='linear', bounds_error=bounds_error)
|
'Build the y_ IsotonicRegression.'
| def _build_y(self, X, y, sample_weight, trim_duplicates=True):
| check_consistent_length(X, y, sample_weight)
(X, y) = [check_array(x, ensure_2d=False) for x in [X, y]]
y = as_float_array(y)
self._check_fit_data(X, y, sample_weight)
if (self.increasing == 'auto'):
self.increasing_ = check_increasing(X, y)
else:
self.increasing_ = self.increasing
if (sample_weight is not None):
sample_weight = check_array(sample_weight, ensure_2d=False)
mask = (sample_weight > 0)
(X, y, sample_weight) = (X[mask], y[mask], sample_weight[mask])
else:
sample_weight = np.ones(len(y))
order = np.lexsort((y, X))
(X, y, sample_weight) = [array[order].astype(np.float64, copy=False) for array in [X, y, sample_weight]]
(unique_X, unique_y, unique_sample_weight) = _make_unique(X, y, sample_weight)
self._X_ = X = unique_X
self._y_ = y = isotonic_regression(unique_y, unique_sample_weight, self.y_min, self.y_max, increasing=self.increasing_)
(self.X_min_, self.X_max_) = (np.min(X), np.max(X))
if trim_duplicates:
keep_data = np.ones((len(y),), dtype=bool)
keep_data[1:(-1)] = np.logical_or(np.not_equal(y[1:(-1)], y[:(-2)]), np.not_equal(y[1:(-1)], y[2:]))
return (X[keep_data], y[keep_data])
else:
return (X, y)
|
'Fit the model using X, y as training data.
Parameters
X : array-like, shape=(n_samples,)
Training data.
y : array-like, shape=(n_samples,)
Training target.
sample_weight : array-like, shape=(n_samples,), optional, default: None
Weights. If set to None, all weights will be set to 1 (equal
weights).
Returns
self : object
Returns an instance of self.
Notes
X is stored for future use, as `transform` needs X to interpolate
new input data.'
| def fit(self, X, y, sample_weight=None):
| (X, y) = self._build_y(X, y, sample_weight)
(self._necessary_X_, self._necessary_y_) = (X, y)
self._build_f(X, y)
return self
|
'Transform new data by linear interpolation
Parameters
T : array-like, shape=(n_samples,)
Data to transform.
Returns
T_ : array, shape=(n_samples,)
The transformed data'
| def transform(self, T):
| T = as_float_array(T)
if (len(T.shape) != 1):
raise ValueError('Isotonic regression input should be a 1d array')
if (self.out_of_bounds not in ['raise', 'nan', 'clip']):
raise ValueError("The argument ``out_of_bounds`` must be in 'nan', 'clip', 'raise'; got {0}".format(self.out_of_bounds))
if (self.out_of_bounds == 'clip'):
T = np.clip(T, self.X_min_, self.X_max_)
return self.f_(T)
|
'Predict new data by linear interpolation.
Parameters
T : array-like, shape=(n_samples,)
Data to transform.
Returns
T_ : array, shape=(n_samples,)
Transformed data.'
| def predict(self, T):
| return self.transform(T)
|
'Pickle-protocol - return state of the estimator.'
| def __getstate__(self):
| state = super(IsotonicRegression, self).__getstate__()
state.pop('f_', None)
return state
|
'Pickle-protocol - set state of the estimator.
We need to rebuild the interpolation function.'
| def __setstate__(self, state):
| super(IsotonicRegression, self).__setstate__(state)
if (hasattr(self, '_necessary_X_') and hasattr(self, '_necessary_y_')):
self._build_f(self._necessary_X_, self._necessary_y_)
|
'Fit Ridge regression model
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
self : returns an instance of self.'
| def fit(self, X, y, sample_weight=None):
| return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
|
'Fit Ridge regression model.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
.. versionadded:: 0.17
*sample_weight* support to Classifier.
Returns
self : returns an instance of self.'
| def fit(self, X, y, sample_weight=None):
| self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=(-1))
Y = self._label_binarizer.fit_transform(y)
if (not self._label_binarizer.y_type_.startswith('multilabel')):
y = column_or_1d(y, warn=True)
else:
raise ValueError(("%s doesn't support multi-label classification" % self.__class__.__name__))
if self.class_weight:
if (sample_weight is None):
sample_weight = 1.0
sample_weight = (sample_weight * compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
|
'Helper function to avoid code duplication between self._errors and
self._values.
Notes
We don\'t construct matrix G, instead compute action on y & diagonal.'
| def _errors_and_values_helper(self, alpha, y, v, Q, QT_y):
| w = (1.0 / (v + alpha))
constant_column = (np.var(Q, 0) < 1e-12)
w[constant_column] = 0
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
if (len(y.shape) != 1):
G_diag = G_diag[:, np.newaxis]
return (G_diag, c)
|
'Helper function to avoid code duplication between self._errors_svd
and self._values_svd.'
| def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y):
| constant_column = (np.var(U, 0) < 1e-12)
w = (((v + alpha) ** (-1)) - (alpha ** (-1)))
w[constant_column] = (- (alpha ** (-1)))
c = (np.dot(U, self._diag_dot(w, UT_y)) + ((alpha ** (-1)) * y))
G_diag = (self._decomp_diag(w, U) + (alpha ** (-1)))
if (len(y.shape) != 1):
G_diag = G_diag[:, np.newaxis]
return (G_diag, c)
|
'Fit Ridge regression model
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values. Will be cast to X\'s dtype if necessary
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
self : Returns self.'
| def fit(self, X, y, sample_weight=None):
| (X, y) = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64, multi_output=True, y_numeric=True)
if ((sample_weight is not None) and (not isinstance(sample_weight, float))):
sample_weight = check_array(sample_weight, ensure_2d=False)
(n_samples, n_features) = X.shape
(X, y, X_offset, y_offset, X_scale) = LinearModel._preprocess_data(X, y, self.fit_intercept, self.normalize, self.copy_X, sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if ((gcv_mode is None) or (gcv_mode == 'auto')):
if (sparse.issparse(X) or (n_features > n_samples) or with_sw):
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif ((gcv_mode == 'svd') and with_sw):
warnings.warn('non-uniform sample weights unsupported for svd, forcing usage of eigen')
gcv_mode = 'eigen'
if (gcv_mode == 'eigen'):
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif (gcv_mode == 'svd'):
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError(('bad gcv_mode "%s"' % gcv_mode))
if (sample_weight is not None):
(X, y) = _rescale_data(X, y, sample_weight)
centered_kernel = ((not sparse.issparse(X)) and self.fit_intercept)
(v, Q, QT_y) = _pre_compute(X, y, centered_kernel)
n_y = (1 if (len(y.shape) == 1) else y.shape[1])
cv_values = np.zeros(((n_samples * n_y), len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = (scorer is None)
for (i, alpha) in enumerate(self.alphas):
if error:
(out, c) = _errors(alpha, y, v, Q, QT_y)
else:
(out, c) = _values(alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
def identity_estimator():
pass
identity_estimator.decision_function = (lambda y_predict: y_predict)
identity_estimator.predict = (lambda y_predict: y_predict)
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i]) for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_values:
if (len(y.shape) == 1):
cv_values_shape = (n_samples, len(self.alphas))
else:
cv_values_shape = (n_samples, n_y, len(self.alphas))
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
|
'Fit Ridge regression model
Parameters
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values. Will be cast to X\'s dtype if necessary
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
self : Returns self.'
| def fit(self, X, y, sample_weight=None):
| if (self.cv is None):
estimator = _RidgeGCV(self.alphas, fit_intercept=self.fit_intercept, normalize=self.normalize, scoring=self.scoring, gcv_mode=self.gcv_mode, store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError('cv!=None and store_cv_values=True are incompatible')
parameters = {'alpha': self.alphas}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept, normalize=self.normalize), parameters, cv=self.cv, scoring=self.scoring)
gs.fit(X, y, sample_weight=sample_weight)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
|
'Fit the ridge classifier.
Parameters
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values. Will be cast to X\'s dtype if necessary
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
self : object
Returns self.'
| def fit(self, X, y, sample_weight=None):
| self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=(-1))
Y = self._label_binarizer.fit_transform(y)
if (not self._label_binarizer.y_type_.startswith('multilabel')):
y = column_or_1d(y, warn=True)
if self.class_weight:
if (sample_weight is None):
sample_weight = 1.0
sample_weight = (sample_weight * compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
|
'Fit model with coordinate descent.
Parameters
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target. Will be cast to X\'s dtype if necessary
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don\'t use this parameter unless you know what you do.
Notes
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.'
| def fit(self, X, y, check_input=True):
| if (self.alpha == 0):
warnings.warn('With alpha=0, this algorithm does not converge well. You are advised to use the LinearRegression estimator', stacklevel=2)
if isinstance(self.precompute, six.string_types):
raise ValueError(('precompute should be one of True, False or array-like. Got %r' % self.precompute))
if check_input:
(X, y) = check_X_y(X, y, accept_sparse='csc', order='F', dtype=[np.float64, np.float32], copy=(self.copy_X and self.fit_intercept), multi_output=True, y_numeric=True)
y = check_array(y, order='F', copy=False, dtype=X.dtype.type, ensure_2d=False)
(X, y, X_offset, y_offset, X_scale, precompute, Xy) = _pre_fit(X, y, None, self.precompute, self.normalize, self.fit_intercept, copy=False)
if (y.ndim == 1):
y = y[:, np.newaxis]
if ((Xy is not None) and (Xy.ndim == 1)):
Xy = Xy[:, np.newaxis]
(n_samples, n_features) = X.shape
n_targets = y.shape[1]
if (self.selection not in ['cyclic', 'random']):
raise ValueError('selection should be either random or cyclic.')
if ((not self.warm_start) or (not hasattr(self, 'coef_'))):
coef_ = np.zeros((n_targets, n_features), dtype=X.dtype, order='F')
else:
coef_ = self.coef_
if (coef_.ndim == 1):
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=X.dtype)
self.n_iter_ = []
for k in xrange(n_targets):
if (Xy is not None):
this_Xy = Xy[:, k]
else:
this_Xy = None
(_, this_coef, this_dual_gap, this_iter) = self.path(X, y[:, k], l1_ratio=self.l1_ratio, eps=None, n_alphas=None, alphas=[self.alpha], precompute=precompute, Xy=this_Xy, fit_intercept=False, normalize=False, copy_X=True, verbose=False, tol=self.tol, positive=self.positive, X_offset=X_offset, X_scale=X_scale, return_n_iter=True, coef_init=coef_[k], max_iter=self.max_iter, random_state=self.random_state, selection=self.selection, check_input=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if (n_targets == 1):
self.n_iter_ = self.n_iter_[0]
(self.coef_, self.dual_gap_) = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_offset, y_offset, X_scale)
self.coef_ = np.asarray(self.coef_, dtype=X.dtype)
return self
|
'sparse representation of the fitted ``coef_``'
| @property
def sparse_coef_(self):
| return sparse.csr_matrix(self.coef_)
|
'Decision function of the linear model
Parameters
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
T : array, shape (n_samples,)
The predicted decision function'
| def _decision_function(self, X):
| check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return (safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
|
'Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values'
| def fit(self, X, y):
| y = check_array(y, copy=False, dtype=[np.float64, np.float32], ensure_2d=False)
if (y.shape[0] == 0):
raise ValueError(('y has 0 samples: %r' % y))
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if (isinstance(self, ElasticNetCV) or isinstance(self, LassoCV)):
if (model_str == 'ElasticNet'):
model = ElasticNet()
else:
model = Lasso()
if ((y.ndim > 1) and (y.shape[1] > 1)):
raise ValueError(('For multi-task outputs, use MultiTask%sCV' % model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError('X should be dense but a sparse matrix waspassed')
elif (y.ndim == 1):
raise ValueError(('For mono-task outputs, use %sCV' % model_str))
if (model_str == 'ElasticNet'):
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if (self.selection not in ['random', 'cyclic']):
raise ValueError('selection should be either random or cyclic.')
copy_X = (self.copy_X and self.fit_intercept)
if (isinstance(X, np.ndarray) or sparse.isspmatrix(X)):
reference_to_old_X = X
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, 'data') and (not np.may_share_memory(reference_to_old_X.data, X.data))):
copy_X = False
elif (not np.may_share_memory(reference_to_old_X, X)):
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=[np.float64, np.float32], order='F', copy=copy_X)
copy_X = False
if (X.shape[0] != y.shape[0]):
raise ValueError(('X and y have inconsistent dimensions (%d != %d)' % (X.shape[0], y.shape[0])))
path_params = self.get_params()
if ('l1_ratio' in path_params):
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if (alphas is None):
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(X, y, l1_ratio=l1_ratio, fit_intercept=self.fit_intercept, eps=self.eps, n_alphas=self.n_alphas, normalize=self.normalize, copy_X=self.copy_X))
else:
alphas = np.tile(np.sort(alphas)[::(-1)], (n_l1_ratio, 1))
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
if (not ((self.n_jobs == 1) or (self.n_jobs is None))):
path_params['copy_X'] = False
cv = check_cv(self.cv)
folds = list(cv.split(X, y))
best_mse = np.inf
jobs = (delayed(_path_residuals)(X, y, train, test, self.path, path_params, alphas=this_alphas, l1_ratio=this_l1_ratio, X_order='F', dtype=X.dtype.type) for (this_l1_ratio, this_alphas) in zip(l1_ratios, alphas) for (train, test) in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend='threading')(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), (-1)))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for (l1_ratio, l1_alphas, mse_alphas) in zip(l1_ratios, alphas, mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if (this_best_mse < best_mse):
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if (self.alphas is None):
self.alphas_ = np.asarray(alphas)
if (n_l1_ratio == 1):
self.alphas_ = self.alphas_[0]
else:
self.alphas_ = np.asarray(alphas[0])
common_params = dict(((name, value) for (name, value) in self.get_params().items() if (name in model.get_params())))
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if (not hasattr(self, 'l1_ratio')):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
|
'Fit MultiTaskElasticNet model with coordinate descent
Parameters
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target. Will be cast to X\'s dtype if necessary
Notes
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.'
| def fit(self, X, y):
| X = check_array(X, dtype=[np.float64, np.float32], order='F', copy=(self.copy_X and self.fit_intercept))
y = check_array(y, dtype=X.dtype.type, ensure_2d=False)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if (y.ndim == 1):
raise ValueError(('For mono-task outputs, use %s' % model_str))
(n_samples, n_features) = X.shape
(_, n_tasks) = y.shape
if (n_samples != y.shape[0]):
raise ValueError(('X and y have inconsistent dimensions (%d != %d)' % (n_samples, y.shape[0])))
(X, y, X_offset, y_offset, X_scale) = _preprocess_data(X, y, self.fit_intercept, self.normalize, copy=False)
if ((not self.warm_start) or (self.coef_ is None)):
self.coef_ = np.zeros((n_tasks, n_features), dtype=X.dtype.type, order='F')
l1_reg = ((self.alpha * self.l1_ratio) * n_samples)
l2_reg = ((self.alpha * (1.0 - self.l1_ratio)) * n_samples)
self.coef_ = np.asfortranarray(self.coef_)
if (self.selection not in ['random', 'cyclic']):
raise ValueError('selection should be either random or cyclic.')
random = (self.selection == 'random')
(self.coef_, self.dual_gap_, self.eps_, self.n_iter_) = cd_fast.enet_coordinate_descent_multi_task(self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol, check_random_state(self.random_state), random)
self._set_intercept(X_offset, y_offset, X_scale)
if (self.dual_gap_ > self.eps_):
warnings.warn('Objective did not converge, you might want to increase the number of iterations', ConvergenceWarning)
return self
|
'Predict using the linear model
Parameters
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
C : array, shape = (n_samples,)
Returns predicted values.'
| def predict(self, X):
| return self._decision_function(X)
|
'Set the intercept_'
| def _set_intercept(self, X_offset, y_offset, X_scale):
| if self.fit_intercept:
self.coef_ = (self.coef_ / X_scale)
self.intercept_ = (y_offset - np.dot(X_offset, self.coef_.T))
else:
self.intercept_ = 0.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.