desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Predict confidence scores for samples. The confidence score for a sample is the signed distance of that sample to the hyperplane. Parameters X : {array-like, sparse matrix}, shape = (n_samples, n_features) Samples. Returns array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes) Confidence scores per (sample, class) combination. In the binary case, confidence score for self.classes_[1] where >0 means this class would be predicted.'
def decision_function(self, X):
if ((not hasattr(self, 'coef_')) or (self.coef_ is None)): raise NotFittedError(('This %(name)s instance is not fitted yet' % {'name': type(self).__name__})) X = check_array(X, accept_sparse='csr') n_features = self.coef_.shape[1] if (X.shape[1] != n_features): raise ValueError(('X has %d features per sample; expecting %d' % (X.shape[1], n_features))) scores = (safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_) return (scores.ravel() if (scores.shape[1] == 1) else scores)
'Predict class labels for samples in X. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Samples. Returns C : array, shape = [n_samples] Predicted class label per sample.'
def predict(self, X):
scores = self.decision_function(X) if (len(scores.shape) == 1): indices = (scores > 0).astype(np.int) else: indices = scores.argmax(axis=1) return self.classes_[indices]
'Probability estimation for OvR logistic regression. Positive class probabilities are computed as 1. / (1. + np.exp(-self.decision_function(X))); multiclass is handled by normalizing that over all classes.'
def _predict_proba_lr(self, X):
prob = self.decision_function(X) prob *= (-1) np.exp(prob, prob) prob += 1 np.reciprocal(prob, prob) if (prob.ndim == 1): return np.vstack([(1 - prob), prob]).T else: prob /= prob.sum(axis=1).reshape((prob.shape[0], (-1))) return prob
'Convert coefficient matrix to dense array format. Converts the ``coef_`` member (back) to a numpy.ndarray. This is the default format of ``coef_`` and is required for fitting, so calling this method is only required on models that have previously been sparsified; otherwise, it is a no-op. Returns self : estimator'
def densify(self):
msg = 'Estimator, %(name)s, must be fitted before densifying.' check_is_fitted(self, 'coef_', msg=msg) if sp.issparse(self.coef_): self.coef_ = self.coef_.toarray() return self
'Convert coefficient matrix to sparse format. Converts the ``coef_`` member to a scipy.sparse matrix, which for L1-regularized models can be much more memory- and storage-efficient than the usual numpy.ndarray representation. The ``intercept_`` member is not converted. Notes For non-sparse models, i.e. when there are not many zeros in ``coef_``, this may actually *increase* memory usage, so use this method with care. A rule of thumb is that the number of zero elements, which can be computed with ``(coef_ == 0).sum()``, must be more than 50% for this to provide significant benefits. After calling this method, further fitting with the partial_fit method (if any) will not work until you call densify. Returns self : estimator'
def sparsify(self):
msg = 'Estimator, %(name)s, must be fitted before sparsifying.' check_is_fitted(self, 'coef_', msg=msg) self.coef_ = sp.csr_matrix(self.coef_) return self
'Fit linear model. Parameters X : numpy array or sparse matrix of shape [n_samples,n_features] Training data y : numpy array of shape [n_samples, n_targets] Target values. Will be cast to X\'s dtype if necessary sample_weight : numpy array of shape [n_samples] Individual weights for each sample .. versionadded:: 0.17 parameter *sample_weight* support to LinearRegression. Returns self : returns an instance of self.'
def fit(self, X, y, sample_weight=None):
n_jobs_ = self.n_jobs (X, y) = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], y_numeric=True, multi_output=True) if ((sample_weight is not None) and (np.atleast_1d(sample_weight).ndim > 1)): raise ValueError('Sample weights must be 1D array or scalar') (X, y, X_offset, y_offset, X_scale) = self._preprocess_data(X, y, fit_intercept=self.fit_intercept, normalize=self.normalize, copy=self.copy_X, sample_weight=sample_weight) if (sample_weight is not None): (X, y) = _rescale_data(X, y, sample_weight) if sp.issparse(X): if (y.ndim < 2): out = sparse_lsqr(X, y) self.coef_ = out[0] self._residues = out[3] else: outs = Parallel(n_jobs=n_jobs_)((delayed(sparse_lsqr)(X, y[:, j].ravel()) for j in range(y.shape[1]))) self.coef_ = np.vstack((out[0] for out in outs)) self._residues = np.vstack((out[3] for out in outs)) else: (self.coef_, self._residues, self.rank_, self.singular_) = linalg.lstsq(X, y) self.coef_ = self.coef_.T if (y.ndim == 1): self.coef_ = np.ravel(self.coef_) self._set_intercept(X_offset, y_offset, X_scale) return self
'Fit the model using X, y as training data. Parameters X : array-like, shape = [n_samples, n_features] Training data. y : array-like, shape = [n_samples] Target values. Will be cast to X\'s dtype if necessary Returns self : object Returns an instance of self.'
def fit(self, X, y):
(X, y) = check_X_y(X, y, ['csr', 'csc'], y_numeric=True, ensure_min_samples=2, estimator=self) X = as_float_array(X, copy=False) (n_samples, n_features) = X.shape (X, y, X_offset, y_offset, X_scale) = self._preprocess_data(X, y, self.fit_intercept, self.normalize) (estimator_func, params) = self._make_estimator_and_params(X, y) memory = self.memory if (memory is None): memory = Memory(cachedir=None, verbose=0) elif isinstance(memory, six.string_types): memory = Memory(cachedir=memory, verbose=0) elif (not isinstance(memory, Memory)): raise ValueError("'memory' should either be a string or a sklearn.externals.joblib.Memory instance, got 'memory={!r}' instead.".format(type(memory))) scores_ = memory.cache(_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch'])(estimator_func, X, y, scaling=self.scaling, n_resampling=self.n_resampling, n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=self.pre_dispatch, random_state=self.random_state, sample_fraction=self.sample_fraction, **params) if (scores_.ndim == 1): scores_ = scores_[:, np.newaxis] self.all_scores_ = scores_ self.scores_ = np.max(self.all_scores_, axis=1) return self
'Return the parameters passed to the estimator'
def _make_estimator_and_params(self, X, y):
raise NotImplementedError
'Get the boolean mask indicating which features are selected. Returns support : boolean array of shape [# input features] An element is True iff its corresponding feature is selected for retention.'
def _get_support_mask(self):
check_is_fitted(self, 'scores_') return (self.scores_ > self.selection_threshold)
'Center the data in X but not in y'
def _preprocess_data(self, X, y, fit_intercept, normalize=False):
(X, _, X_offset, _, X_scale) = _preprocess_data(X, y, fit_intercept, normalize=normalize) return (X, y, X_offset, y, X_scale)
'Fit the model using X, y as training data. Parameters X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. Will be cast to X\'s dtype if necessary Returns self : object returns an instance of self.'
def fit(self, X, y):
(X, y) = check_X_y(X, y, multi_output=True, y_numeric=True) n_features = X.shape[1] (X, y, X_offset, y_offset, X_scale, Gram, Xy) = _pre_fit(X, y, None, self.precompute, self.normalize, self.fit_intercept, copy=True) if (y.ndim == 1): y = y[:, np.newaxis] if ((self.n_nonzero_coefs is None) and (self.tol is None)): self.n_nonzero_coefs_ = max(int((0.1 * n_features)), 1) else: self.n_nonzero_coefs_ = self.n_nonzero_coefs if (Gram is False): (coef_, self.n_iter_) = orthogonal_mp(X, y, self.n_nonzero_coefs_, self.tol, precompute=False, copy_X=True, return_n_iter=True) else: norms_sq = (np.sum((y ** 2), axis=0) if (self.tol is not None) else None) (coef_, self.n_iter_) = orthogonal_mp_gram(Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_, tol=self.tol, norms_squared=norms_sq, copy_Gram=True, copy_Xy=True, return_n_iter=True) self.coef_ = coef_.T self._set_intercept(X_offset, y_offset, X_scale) return self
'Fit the model using X, y as training data. Parameters X : array-like, shape [n_samples, n_features] Training data. y : array-like, shape [n_samples] Target values. Will be cast to X\'s dtype if necessary Returns self : object returns an instance of self.'
def fit(self, X, y):
(X, y) = check_X_y(X, y, y_numeric=True, ensure_min_features=2, estimator=self) X = as_float_array(X, copy=False, force_all_finite=False) cv = check_cv(self.cv, classifier=False) max_iter = (min(max(int((0.1 * X.shape[1])), 5), X.shape[1]) if (not self.max_iter) else self.max_iter) cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)((delayed(_omp_path_residues)(X[train], y[train], X[test], y[test], self.copy, self.fit_intercept, self.normalize, max_iter) for (train, test) in cv.split(X))) min_early_stop = min((fold.shape[0] for fold in cv_paths)) mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1) for fold in cv_paths]) best_n_nonzero_coefs = (np.argmin(mse_folds.mean(axis=0)) + 1) self.n_nonzero_coefs_ = best_n_nonzero_coefs omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs, fit_intercept=self.fit_intercept, normalize=self.normalize) omp.fit(X, y) self.coef_ = omp.coef_ self.intercept_ = omp.intercept_ self.n_iter_ = omp.n_iter_ return self
'Fit the model according to the given training data. Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target vector relative to X. sample_weight : array-like, shape (n_samples,) optional Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. .. versionadded:: 0.17 *sample_weight* support to LogisticRegression. Returns self : object Returns self.'
def fit(self, X, y, sample_weight=None):
if ((not isinstance(self.C, numbers.Number)) or (self.C < 0)): raise ValueError(('Penalty term must be positive; got (C=%r)' % self.C)) if ((not isinstance(self.max_iter, numbers.Number)) or (self.max_iter < 0)): raise ValueError(('Maximum number of iteration must be positive; got (max_iter=%r)' % self.max_iter)) if ((not isinstance(self.tol, numbers.Number)) or (self.tol < 0)): raise ValueError(('Tolerance for stopping criteria must be positive; got (tol=%r)' % self.tol)) if (self.solver in ['newton-cg']): _dtype = [np.float64, np.float32] else: _dtype = np.float64 (X, y) = check_X_y(X, y, accept_sparse='csr', dtype=_dtype, order='C') check_classification_targets(y) self.classes_ = np.unique(y) (n_samples, n_features) = X.shape _check_solver_option(self.solver, self.multi_class, self.penalty, self.dual) if (self.solver == 'liblinear'): if (self.n_jobs != 1): warnings.warn("'n_jobs' > 1 does not have any effect when 'solver' is set to 'liblinear'. Got 'n_jobs' = {}.".format(self.n_jobs)) (self.coef_, self.intercept_, n_iter_) = _fit_liblinear(X, y, self.C, self.fit_intercept, self.intercept_scaling, self.class_weight, self.penalty, self.dual, self.verbose, self.max_iter, self.tol, self.random_state, sample_weight=sample_weight) self.n_iter_ = np.array([n_iter_]) return self if (self.solver in ['sag', 'saga']): max_squared_sum = row_norms(X, squared=True).max() else: max_squared_sum = None n_classes = len(self.classes_) classes_ = self.classes_ if (n_classes < 2): raise ValueError(('This solver needs samples of at least 2 classes in the data, but the data contains only one class: %r' % classes_[0])) if (len(self.classes_) == 2): n_classes = 1 classes_ = classes_[1:] if self.warm_start: warm_start_coef = getattr(self, 'coef_', None) else: warm_start_coef = None if ((warm_start_coef is not None) and self.fit_intercept): warm_start_coef = np.append(warm_start_coef, self.intercept_[:, np.newaxis], axis=1) self.coef_ = list() self.intercept_ = np.zeros(n_classes) if (self.multi_class == 'multinomial'): classes_ = [None] warm_start_coef = [warm_start_coef] if (warm_start_coef is None): warm_start_coef = ([None] * n_classes) path_func = delayed(logistic_regression_path) if (self.solver in ['sag', 'saga']): backend = 'threading' else: backend = 'multiprocessing' fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend=backend)((path_func(X, y, pos_class=class_, Cs=[self.C], fit_intercept=self.fit_intercept, tol=self.tol, verbose=self.verbose, solver=self.solver, multi_class=self.multi_class, max_iter=self.max_iter, class_weight=self.class_weight, check_input=False, random_state=self.random_state, coef=warm_start_coef_, penalty=self.penalty, max_squared_sum=max_squared_sum, sample_weight=sample_weight) for (class_, warm_start_coef_) in zip(classes_, warm_start_coef))) (fold_coefs_, _, n_iter_) = zip(*fold_coefs_) self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0] if (self.multi_class == 'multinomial'): self.coef_ = fold_coefs_[0][0] else: self.coef_ = np.asarray(fold_coefs_) self.coef_ = self.coef_.reshape(n_classes, (n_features + int(self.fit_intercept))) if self.fit_intercept: self.intercept_ = self.coef_[:, (-1)] self.coef_ = self.coef_[:, :(-1)] return self
'Probability estimates. The returned estimates for all classes are ordered by the label of classes. For a multi_class problem, if multi_class is set to be "multinomial" the softmax function is used to find the predicted probability of each class. Else use a one-vs-rest approach, i.e calculate the probability of each class assuming it to be positive using the logistic function. and normalize these values across all the classes. Parameters X : array-like, shape = [n_samples, n_features] Returns T : array-like, shape = [n_samples, n_classes] Returns the probability of the sample for each class in the model, where classes are ordered as they are in ``self.classes_``.'
def predict_proba(self, X):
if (not hasattr(self, 'coef_')): raise NotFittedError('Call fit before prediction') calculate_ovr = ((self.coef_.shape[0] == 1) or (self.multi_class == 'ovr')) if calculate_ovr: return super(LogisticRegression, self)._predict_proba_lr(X) else: return softmax(self.decision_function(X), copy=False)
'Log of probability estimates. The returned estimates for all classes are ordered by the label of classes. Parameters X : array-like, shape = [n_samples, n_features] Returns T : array-like, shape = [n_samples, n_classes] Returns the log-probability of the sample for each class in the model, where classes are ordered as they are in ``self.classes_``.'
def predict_log_proba(self, X):
return np.log(self.predict_proba(X))
'Fit the model according to the given training data. Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target vector relative to X. sample_weight : array-like, shape (n_samples,) optional Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. Returns self : object Returns self.'
def fit(self, X, y, sample_weight=None):
_check_solver_option(self.solver, self.multi_class, self.penalty, self.dual) if ((not isinstance(self.max_iter, numbers.Number)) or (self.max_iter < 0)): raise ValueError(('Maximum number of iteration must be positive; got (max_iter=%r)' % self.max_iter)) if ((not isinstance(self.tol, numbers.Number)) or (self.tol < 0)): raise ValueError(('Tolerance for stopping criteria must be positive; got (tol=%r)' % self.tol)) (X, y) = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order='C') check_classification_targets(y) class_weight = self.class_weight label_encoder = LabelEncoder().fit(y) y = label_encoder.transform(y) if isinstance(class_weight, dict): class_weight = dict(((label_encoder.transform([cls])[0], v) for (cls, v) in class_weight.items())) classes = self.classes_ = label_encoder.classes_ encoded_labels = label_encoder.transform(label_encoder.classes_) if (self.solver in ['sag', 'saga']): max_squared_sum = row_norms(X, squared=True).max() else: max_squared_sum = None cv = check_cv(self.cv, y, classifier=True) folds = list(cv.split(X, y)) n_classes = len(encoded_labels) if (n_classes < 2): raise ValueError(('This solver needs samples of at least 2 classes in the data, but the data contains only one class: %r' % classes[0])) if (n_classes == 2): n_classes = 1 encoded_labels = encoded_labels[1:] classes = classes[1:] if (self.multi_class == 'multinomial'): iter_encoded_labels = iter_classes = [None] else: iter_encoded_labels = encoded_labels iter_classes = classes if (class_weight == 'balanced'): class_weight = compute_class_weight(class_weight, np.arange(len(self.classes_)), y) class_weight = dict(enumerate(class_weight)) path_func = delayed(_log_reg_scoring_path) if (self.solver in ['sag', 'saga']): backend = 'threading' else: backend = 'multiprocessing' fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend=backend)((path_func(X, y, train, test, pos_class=label, Cs=self.Cs, fit_intercept=self.fit_intercept, penalty=self.penalty, dual=self.dual, solver=self.solver, tol=self.tol, max_iter=self.max_iter, verbose=self.verbose, class_weight=class_weight, scoring=self.scoring, multi_class=self.multi_class, intercept_scaling=self.intercept_scaling, random_state=self.random_state, max_squared_sum=max_squared_sum, sample_weight=sample_weight) for label in iter_encoded_labels for (train, test) in folds)) if (self.multi_class == 'multinomial'): (multi_coefs_paths, Cs, multi_scores, n_iter_) = zip(*fold_coefs_) multi_coefs_paths = np.asarray(multi_coefs_paths) multi_scores = np.asarray(multi_scores) coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0) scores = np.tile(multi_scores, (n_classes, 1, 1)) self.Cs_ = Cs[0] self.n_iter_ = np.reshape(n_iter_, (1, len(folds), len(self.Cs_))) else: (coefs_paths, Cs, scores, n_iter_) = zip(*fold_coefs_) self.Cs_ = Cs[0] coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds), len(self.Cs_), (-1))) self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds), len(self.Cs_))) self.coefs_paths_ = dict(zip(classes, coefs_paths)) scores = np.reshape(scores, (n_classes, len(folds), (-1))) self.scores_ = dict(zip(classes, scores)) self.C_ = list() self.coef_ = np.empty((n_classes, X.shape[1])) self.intercept_ = np.zeros(n_classes) if (self.multi_class == 'multinomial'): scores = multi_scores coefs_paths = multi_coefs_paths for (index, (cls, encoded_label)) in enumerate(zip(iter_classes, iter_encoded_labels)): if (self.multi_class == 'ovr'): scores = self.scores_[cls] coefs_paths = self.coefs_paths_[cls] if self.refit: best_index = scores.sum(axis=0).argmax() C_ = self.Cs_[best_index] self.C_.append(C_) if (self.multi_class == 'multinomial'): coef_init = np.mean(coefs_paths[:, best_index, :, :], axis=0) else: coef_init = np.mean(coefs_paths[:, best_index, :], axis=0) (w, _, _) = logistic_regression_path(X, y, pos_class=encoded_label, Cs=[C_], solver=self.solver, fit_intercept=self.fit_intercept, coef=coef_init, max_iter=self.max_iter, tol=self.tol, penalty=self.penalty, class_weight=class_weight, multi_class=self.multi_class, verbose=max(0, (self.verbose - 1)), random_state=self.random_state, check_input=False, max_squared_sum=max_squared_sum, sample_weight=sample_weight) w = w[0] else: best_indices = np.argmax(scores, axis=1) w = np.mean([coefs_paths[i][best_indices[i]] for i in range(len(folds))], axis=0) self.C_.append(np.mean(self.Cs_[best_indices])) if (self.multi_class == 'multinomial'): self.C_ = np.tile(self.C_, n_classes) self.coef_ = w[:, :X.shape[1]] if self.fit_intercept: self.intercept_ = w[:, (-1)] else: self.coef_[index] = w[:X.shape[1]] if self.fit_intercept: self.intercept_[index] = w[(-1)] self.C_ = np.asarray(self.C_) return self
'Fit the model Parameters X : numpy array of shape [n_samples,n_features] Training data y : numpy array of shape [n_samples] Target values. Will be cast to X\'s dtype if necessary Returns self : returns an instance of self.'
def fit(self, X, y):
(X, y) = check_X_y(X, y, dtype=np.float64, y_numeric=True) (X, y, X_offset_, y_offset_, X_scale_) = self._preprocess_data(X, y, self.fit_intercept, self.normalize, self.copy_X) self.X_offset_ = X_offset_ self.X_scale_ = X_scale_ (n_samples, n_features) = X.shape alpha_ = (1.0 / np.var(y)) lambda_ = 1.0 verbose = self.verbose lambda_1 = self.lambda_1 lambda_2 = self.lambda_2 alpha_1 = self.alpha_1 alpha_2 = self.alpha_2 self.scores_ = list() coef_old_ = None XT_y = np.dot(X.T, y) (U, S, Vh) = linalg.svd(X, full_matrices=False) eigen_vals_ = (S ** 2) for iter_ in range(self.n_iter): if (n_samples > n_features): coef_ = np.dot(Vh.T, (Vh / (eigen_vals_ + (lambda_ / alpha_))[:, np.newaxis])) coef_ = np.dot(coef_, XT_y) if self.compute_score: logdet_sigma_ = (- np.sum(np.log((lambda_ + (alpha_ * eigen_vals_))))) else: coef_ = np.dot(X.T, np.dot((U / (eigen_vals_ + (lambda_ / alpha_))[None, :]), U.T)) coef_ = np.dot(coef_, y) if self.compute_score: logdet_sigma_ = (lambda_ * np.ones(n_features)) logdet_sigma_[:n_samples] += (alpha_ * eigen_vals_) logdet_sigma_ = (- np.sum(np.log(logdet_sigma_))) self.alpha_ = alpha_ self.lambda_ = lambda_ rmse_ = np.sum(((y - np.dot(X, coef_)) ** 2)) gamma_ = np.sum(((alpha_ * eigen_vals_) / (lambda_ + (alpha_ * eigen_vals_)))) lambda_ = ((gamma_ + (2 * lambda_1)) / (np.sum((coef_ ** 2)) + (2 * lambda_2))) alpha_ = (((n_samples - gamma_) + (2 * alpha_1)) / (rmse_ + (2 * alpha_2))) if self.compute_score: s = ((lambda_1 * log(lambda_)) - (lambda_2 * lambda_)) s += ((alpha_1 * log(alpha_)) - (alpha_2 * alpha_)) s += (0.5 * ((((((n_features * log(lambda_)) + (n_samples * log(alpha_))) - (alpha_ * rmse_)) - (lambda_ * np.sum((coef_ ** 2)))) - logdet_sigma_) - (n_samples * log((2 * np.pi))))) self.scores_.append(s) if ((iter_ != 0) and (np.sum(np.abs((coef_old_ - coef_))) < self.tol)): if verbose: print('Convergence after ', str(iter_), ' iterations') break coef_old_ = np.copy(coef_) self.coef_ = coef_ sigma_ = np.dot(Vh.T, (Vh / (eigen_vals_ + (lambda_ / alpha_))[:, np.newaxis])) self.sigma_ = ((1.0 / alpha_) * sigma_) self._set_intercept(X_offset_, y_offset_, X_scale_) return self
'Predict using the linear model. In addition to the mean of the predictive distribution, also its standard deviation can be returned. Parameters X : {array-like, sparse matrix}, shape = (n_samples, n_features) Samples. return_std : boolean, optional Whether to return the standard deviation of posterior prediction. Returns y_mean : array, shape = (n_samples,) Mean of predictive distribution of query points. y_std : array, shape = (n_samples,) Standard deviation of predictive distribution of query points.'
def predict(self, X, return_std=False):
y_mean = self._decision_function(X) if (return_std is False): return y_mean else: if self.normalize: X = ((X - self.X_offset_) / self.X_scale_) sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1) y_std = np.sqrt((sigmas_squared_data + (1.0 / self.alpha_))) return (y_mean, y_std)
'Fit the ARDRegression model according to the given training data and parameters. Iterative procedure to maximize the evidence Parameters X : array-like, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array, shape = [n_samples] Target values (integers). Will be cast to X\'s dtype if necessary Returns self : returns an instance of self.'
def fit(self, X, y):
(X, y) = check_X_y(X, y, dtype=np.float64, y_numeric=True) (n_samples, n_features) = X.shape coef_ = np.zeros(n_features) (X, y, X_offset_, y_offset_, X_scale_) = self._preprocess_data(X, y, self.fit_intercept, self.normalize, self.copy_X) keep_lambda = np.ones(n_features, dtype=bool) lambda_1 = self.lambda_1 lambda_2 = self.lambda_2 alpha_1 = self.alpha_1 alpha_2 = self.alpha_2 verbose = self.verbose alpha_ = (1.0 / np.var(y)) lambda_ = np.ones(n_features) self.scores_ = list() coef_old_ = None for iter_ in range(self.n_iter): sigma_ = pinvh(((np.eye(n_samples) / alpha_) + np.dot((X[:, keep_lambda] * np.reshape((1.0 / lambda_[keep_lambda]), [1, (-1)])), X[:, keep_lambda].T))) sigma_ = np.dot(sigma_, (X[:, keep_lambda] * np.reshape((1.0 / lambda_[keep_lambda]), [1, (-1)]))) sigma_ = (- np.dot((np.reshape((1.0 / lambda_[keep_lambda]), [(-1), 1]) * X[:, keep_lambda].T), sigma_)) sigma_.flat[::(sigma_.shape[1] + 1)] += (1.0 / lambda_[keep_lambda]) coef_[keep_lambda] = (alpha_ * np.dot(sigma_, np.dot(X[:, keep_lambda].T, y))) rmse_ = np.sum(((y - np.dot(X, coef_)) ** 2)) gamma_ = (1.0 - (lambda_[keep_lambda] * np.diag(sigma_))) lambda_[keep_lambda] = ((gamma_ + (2.0 * lambda_1)) / ((coef_[keep_lambda] ** 2) + (2.0 * lambda_2))) alpha_ = (((n_samples - gamma_.sum()) + (2.0 * alpha_1)) / (rmse_ + (2.0 * alpha_2))) keep_lambda = (lambda_ < self.threshold_lambda) coef_[(~ keep_lambda)] = 0 if self.compute_score: s = ((lambda_1 * np.log(lambda_)) - (lambda_2 * lambda_)).sum() s += ((alpha_1 * log(alpha_)) - (alpha_2 * alpha_)) s += (0.5 * ((fast_logdet(sigma_) + (n_samples * log(alpha_))) + np.sum(np.log(lambda_)))) s -= (0.5 * ((alpha_ * rmse_) + (lambda_ * (coef_ ** 2)).sum())) self.scores_.append(s) if ((iter_ > 0) and (np.sum(np.abs((coef_old_ - coef_))) < self.tol)): if verbose: print(('Converged after %s iterations' % iter_)) break coef_old_ = np.copy(coef_) self.coef_ = coef_ self.alpha_ = alpha_ self.sigma_ = sigma_ self.lambda_ = lambda_ self._set_intercept(X_offset_, y_offset_, X_scale_) return self
'Predict using the linear model. In addition to the mean of the predictive distribution, also its standard deviation can be returned. Parameters X : {array-like, sparse matrix}, shape = (n_samples, n_features) Samples. return_std : boolean, optional Whether to return the standard deviation of posterior prediction. Returns y_mean : array, shape = (n_samples,) Mean of predictive distribution of query points. y_std : array, shape = (n_samples,) Standard deviation of predictive distribution of query points.'
def predict(self, X, return_std=False):
y_mean = self._decision_function(X) if (return_std is False): return y_mean else: if self.normalize: X = ((X - self.X_offset_) / self.X_scale_) X = X[:, (self.lambda_ < self.threshold_lambda)] sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1) y_std = np.sqrt((sigmas_squared_data + (1.0 / self.alpha_))) return (y_mean, y_std)
'Validate input params.'
def _validate_params(self):
if (not isinstance(self.shuffle, bool)): raise ValueError('shuffle must be either True or False') if (self.max_iter <= 0): raise ValueError(('max_iter must be > zero. Got %f' % self.max_iter)) if (not (0.0 <= self.l1_ratio <= 1.0)): raise ValueError('l1_ratio must be in [0, 1]') if (self.alpha < 0.0): raise ValueError('alpha must be >= 0') if (self.learning_rate in ('constant', 'invscaling')): if (self.eta0 <= 0.0): raise ValueError('eta0 must be > 0') if ((self.learning_rate == 'optimal') and (self.alpha == 0)): raise ValueError("alpha must be > 0 since learning_rate is 'optimal'. alpha is used to compute the optimal learning rate.") self._get_penalty_type(self.penalty) self._get_learning_rate_type(self.learning_rate) if (self.loss not in self.loss_functions): raise ValueError(('The loss %s is not supported. ' % self.loss))
'Get concrete ``LossFunction`` object for str ``loss``.'
def _get_loss_function(self, loss):
try: loss_ = self.loss_functions[loss] (loss_class, args) = (loss_[0], loss_[1:]) if (loss in ('huber', 'epsilon_insensitive', 'squared_epsilon_insensitive')): args = (self.epsilon,) return loss_class(*args) except KeyError: raise ValueError(('The loss %s is not supported. ' % loss))
'Set the sample weight array.'
def _validate_sample_weight(self, sample_weight, n_samples):
if (sample_weight is None): sample_weight = np.ones(n_samples, dtype=np.float64, order='C') else: sample_weight = np.asarray(sample_weight, dtype=np.float64, order='C') if (sample_weight.shape[0] != n_samples): raise ValueError('Shapes of X and sample_weight do not match.') return sample_weight
'Allocate mem for parameters; initialize if provided.'
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None, intercept_init=None):
if (n_classes > 2): if (coef_init is not None): coef_init = np.asarray(coef_init, order='C') if (coef_init.shape != (n_classes, n_features)): raise ValueError('Provided ``coef_`` does not match dataset. ') self.coef_ = coef_init else: self.coef_ = np.zeros((n_classes, n_features), dtype=np.float64, order='C') if (intercept_init is not None): intercept_init = np.asarray(intercept_init, order='C') if (intercept_init.shape != (n_classes,)): raise ValueError('Provided intercept_init does not match dataset.') self.intercept_ = intercept_init else: self.intercept_ = np.zeros(n_classes, dtype=np.float64, order='C') else: if (coef_init is not None): coef_init = np.asarray(coef_init, dtype=np.float64, order='C') coef_init = coef_init.ravel() if (coef_init.shape != (n_features,)): raise ValueError('Provided coef_init does not match dataset.') self.coef_ = coef_init else: self.coef_ = np.zeros(n_features, dtype=np.float64, order='C') if (intercept_init is not None): intercept_init = np.asarray(intercept_init, dtype=np.float64) if ((intercept_init.shape != (1,)) and (intercept_init.shape != ())): raise ValueError('Provided intercept_init does not match dataset.') self.intercept_ = intercept_init.reshape(1) else: self.intercept_ = np.zeros(1, dtype=np.float64, order='C') if (self.average > 0): self.standard_coef_ = self.coef_ self.standard_intercept_ = self.intercept_ self.average_coef_ = np.zeros(self.coef_.shape, dtype=np.float64, order='C') self.average_intercept_ = np.zeros(self.standard_intercept_.shape, dtype=np.float64, order='C')
'Fit a binary classifier on X and y.'
def _fit_binary(self, X, y, alpha, C, sample_weight, learning_rate, max_iter):
(coef, intercept, n_iter_) = fit_binary(self, 1, X, y, alpha, C, learning_rate, max_iter, self._expanded_class_weight[1], self._expanded_class_weight[0], sample_weight) self.t_ += (n_iter_ * X.shape[0]) self.n_iter_ = n_iter_ if (self.average > 0): if (self.average <= (self.t_ - 1)): self.coef_ = self.average_coef_.reshape(1, (-1)) self.intercept_ = self.average_intercept_ else: self.coef_ = self.standard_coef_.reshape(1, (-1)) self.standard_intercept_ = np.atleast_1d(intercept) self.intercept_ = self.standard_intercept_ else: self.coef_ = coef.reshape(1, (-1)) self.intercept_ = np.atleast_1d(intercept)
'Fit a multi-class classifier by combining binary classifiers Each binary classifier predicts one class versus all others. This strategy is called OVA: One Versus All.'
def _fit_multiclass(self, X, y, alpha, C, learning_rate, sample_weight, max_iter):
result = Parallel(n_jobs=self.n_jobs, backend='threading', verbose=self.verbose)((delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate, max_iter, self._expanded_class_weight[i], 1.0, sample_weight) for i in range(len(self.classes_)))) n_iter_ = 0.0 for (i, (_, intercept, n_iter_i)) in enumerate(result): self.intercept_[i] = intercept n_iter_ = max(n_iter_, n_iter_i) self.t_ += (n_iter_ * X.shape[0]) self.n_iter_ = n_iter_ if (self.average > 0): if (self.average <= (self.t_ - 1.0)): self.coef_ = self.average_coef_ self.intercept_ = self.average_intercept_ else: self.coef_ = self.standard_coef_ self.standard_intercept_ = np.atleast_1d(self.intercept_) self.intercept_ = self.standard_intercept_
'Fit linear model with Stochastic Gradient Descent. Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features) Subset of the training data y : numpy array, shape (n_samples,) Subset of the target values classes : array, shape (n_classes,) Classes across all calls to partial_fit. Can be obtained by via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is required for the first call to partial_fit and can be omitted in the subsequent calls. Note that y doesn\'t need to contain all labels in `classes`. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. Returns self : returns an instance of self.'
def partial_fit(self, X, y, classes=None, sample_weight=None):
if (self.class_weight in ['balanced']): raise ValueError("class_weight '{0}' is not supported for partial_fit. In order to use 'balanced' weights, use compute_class_weight('{0}', classes, y). In place of y you can us a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.".format(self.class_weight)) return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, max_iter=1, classes=classes, sample_weight=sample_weight, coef_init=None, intercept_init=None)
'Fit linear model with Stochastic Gradient Descent. Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data y : numpy array, shape (n_samples,) Target values coef_init : array, shape (n_classes, n_features) The initial coefficients to warm-start the optimization. intercept_init : array, shape (n_classes,) The initial intercept to warm-start the optimization. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. These weights will be multiplied with class_weight (passed through the constructor) if class_weight is specified Returns self : returns an instance of self.'
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
return self._fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, coef_init=coef_init, intercept_init=intercept_init, sample_weight=sample_weight)
'Probability estimates. This method is only available for log loss and modified Huber loss. Multiclass probability estimates are derived from binary (one-vs.-rest) estimates by simple normalization, as recommended by Zadrozny and Elkan. Binary probability estimates for loss="modified_huber" are given by (clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions it is necessary to perform proper probability calibration by wrapping the classifier with :class:`sklearn.calibration.CalibratedClassifierCV` instead. Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns array, shape (n_samples, n_classes) Returns the probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. References Zadrozny and Elkan, "Transforming classifier scores into multiclass probability estimates", SIGKDD\'02, http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf The justification for the formula in the loss="modified_huber" case is in the appendix B in: http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf'
@property def predict_proba(self):
self._check_proba() return self._predict_proba
'Log of probability estimates. This method is only available for log loss and modified Huber loss. When loss="modified_huber", probability estimates may be hard zeros and ones, so taking the logarithm is not possible. See ``predict_proba`` for details. Parameters X : array-like, shape (n_samples, n_features) Returns T : array-like, shape (n_samples, n_classes) Returns the log-probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`.'
@property def predict_log_proba(self):
self._check_proba() return self._predict_log_proba
'Fit linear model with Stochastic Gradient Descent. Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features) Subset of training data y : numpy array of shape (n_samples,) Subset of target values sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. Returns self : returns an instance of self.'
def partial_fit(self, X, y, sample_weight=None):
return self._partial_fit(X, y, self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, max_iter=1, sample_weight=sample_weight, coef_init=None, intercept_init=None)
'Fit linear model with Stochastic Gradient Descent. Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data y : numpy array, shape (n_samples,) Target values coef_init : array, shape (n_features,) The initial coefficients to warm-start the optimization. intercept_init : array, shape (1,) The initial intercept to warm-start the optimization. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). Returns self : returns an instance of self.'
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
return self._fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, coef_init=coef_init, intercept_init=intercept_init, sample_weight=sample_weight)
'Predict using the linear model Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns array, shape (n_samples,) Predicted target values per element in X.'
def _decision_function(self, X):
check_is_fitted(self, ['t_', 'coef_', 'intercept_'], all_or_any=all) X = check_array(X, accept_sparse='csr') scores = (safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_) return scores.ravel()
'Predict using the linear model Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns array, shape (n_samples,) Predicted target values per element in X.'
def predict(self, X):
return self._decision_function(X)
'Fit linear model. Parameters X : numpy array of shape [n_samples, n_features] Training data y : numpy array of shape [n_samples] Target values Returns self : returns an instance of self.'
def fit(self, X, y):
random_state = check_random_state(self.random_state) (X, y) = check_X_y(X, y, y_numeric=True) (n_samples, n_features) = X.shape (n_subsamples, self.n_subpopulation_) = self._check_subparams(n_samples, n_features) self.breakdown_ = _breakdown_point(n_samples, n_subsamples) if self.verbose: print('Breakdown point: {0}'.format(self.breakdown_)) print('Number of samples: {0}'.format(n_samples)) tol_outliers = int((self.breakdown_ * n_samples)) print('Tolerable outliers: {0}'.format(tol_outliers)) print('Number of subpopulations: {0}'.format(self.n_subpopulation_)) if (np.rint(binom(n_samples, n_subsamples)) <= self.max_subpopulation): indices = list(combinations(range(n_samples), n_subsamples)) else: indices = [random_state.choice(n_samples, size=n_subsamples, replace=False) for _ in range(self.n_subpopulation_)] n_jobs = _get_n_jobs(self.n_jobs) index_list = np.array_split(indices, n_jobs) weights = Parallel(n_jobs=n_jobs, verbose=self.verbose)((delayed(_lstsq)(X, y, index_list[job], self.fit_intercept) for job in range(n_jobs))) weights = np.vstack(weights) (self.n_iter_, coefs) = _spatial_median(weights, max_iter=self.max_iter, tol=self.tol) if self.fit_intercept: self.intercept_ = coefs[0] self.coef_ = coefs[1:] else: self.intercept_ = 0.0 self.coef_ = coefs return self
'Fit estimator using RANSAC algorithm. Parameters X : array-like or sparse matrix, shape [n_samples, n_features] Training data. y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values. sample_weight : array-like, shape = [n_samples] Individual weights for each sample raises error if sample_weight is passed and base_estimator fit method does not support it. Raises ValueError If no valid consensus set could be found. This occurs if `is_data_valid` and `is_model_valid` return False for all `max_trials` randomly chosen sub-samples.'
def fit(self, X, y, sample_weight=None):
X = check_array(X, accept_sparse='csr') y = check_array(y, ensure_2d=False) check_consistent_length(X, y) if (self.base_estimator is not None): base_estimator = clone(self.base_estimator) else: base_estimator = LinearRegression() if (self.min_samples is None): min_samples = (X.shape[1] + 1) elif (0 < self.min_samples < 1): min_samples = np.ceil((self.min_samples * X.shape[0])) elif (self.min_samples >= 1): if ((self.min_samples % 1) != 0): raise ValueError('Absolute number of samples must be an integer value.') min_samples = self.min_samples else: raise ValueError('Value for `min_samples` must be scalar and positive.') if (min_samples > X.shape[0]): raise ValueError('`min_samples` may not be larger than number of samples ``X.shape[0]``.') if ((self.stop_probability < 0) or (self.stop_probability > 1)): raise ValueError('`stop_probability` must be in range [0, 1].') if (self.residual_threshold is None): residual_threshold = np.median(np.abs((y - np.median(y)))) else: residual_threshold = self.residual_threshold if (self.residual_metric is not None): warnings.warn("'residual_metric' was deprecated in version 0.18 and will be removed in version 0.20. Use 'loss' instead.", DeprecationWarning) if (self.loss == 'absolute_loss'): if (y.ndim == 1): loss_function = (lambda y_true, y_pred: np.abs((y_true - y_pred))) else: loss_function = (lambda y_true, y_pred: np.sum(np.abs((y_true - y_pred)), axis=1)) elif (self.loss == 'squared_loss'): if (y.ndim == 1): loss_function = (lambda y_true, y_pred: ((y_true - y_pred) ** 2)) else: loss_function = (lambda y_true, y_pred: np.sum(((y_true - y_pred) ** 2), axis=1)) elif callable(self.loss): loss_function = self.loss else: raise ValueError(("loss should be 'absolute_loss', 'squared_loss' or a callable.Got %s. " % self.loss)) random_state = check_random_state(self.random_state) try: base_estimator.set_params(random_state=random_state) except ValueError: pass estimator_fit_has_sample_weight = has_fit_parameter(base_estimator, 'sample_weight') estimator_name = type(base_estimator).__name__ if ((sample_weight is not None) and (not estimator_fit_has_sample_weight)): raise ValueError(('%s does not support sample_weight. Samples weights are only used for the calibration itself.' % estimator_name)) if (sample_weight is not None): sample_weight = np.asarray(sample_weight) n_inliers_best = 1 score_best = (- np.inf) inlier_mask_best = None X_inlier_best = None y_inlier_best = None self.n_skips_no_inliers_ = 0 self.n_skips_invalid_data_ = 0 self.n_skips_invalid_model_ = 0 n_samples = X.shape[0] sample_idxs = np.arange(n_samples) (n_samples, _) = X.shape self.n_trials_ = 0 max_trials = self.max_trials while (self.n_trials_ < max_trials): self.n_trials_ += 1 if (((self.n_skips_no_inliers_ + self.n_skips_invalid_data_) + self.n_skips_invalid_model_) > self.max_skips): break subset_idxs = sample_without_replacement(n_samples, min_samples, random_state=random_state) X_subset = X[subset_idxs] y_subset = y[subset_idxs] if ((self.is_data_valid is not None) and (not self.is_data_valid(X_subset, y_subset))): self.n_skips_invalid_data_ += 1 continue if (sample_weight is None): base_estimator.fit(X_subset, y_subset) else: base_estimator.fit(X_subset, y_subset, sample_weight=sample_weight[subset_idxs]) if ((self.is_model_valid is not None) and (not self.is_model_valid(base_estimator, X_subset, y_subset))): self.n_skips_invalid_model_ += 1 continue y_pred = base_estimator.predict(X) if (self.residual_metric is not None): diff = (y_pred - y) if (diff.ndim == 1): diff = diff.reshape((-1), 1) residuals_subset = self.residual_metric(diff) else: residuals_subset = loss_function(y, y_pred) inlier_mask_subset = (residuals_subset < residual_threshold) n_inliers_subset = np.sum(inlier_mask_subset) if (n_inliers_subset < n_inliers_best): self.n_skips_no_inliers_ += 1 continue inlier_idxs_subset = sample_idxs[inlier_mask_subset] X_inlier_subset = X[inlier_idxs_subset] y_inlier_subset = y[inlier_idxs_subset] score_subset = base_estimator.score(X_inlier_subset, y_inlier_subset) if ((n_inliers_subset == n_inliers_best) and (score_subset < score_best)): continue n_inliers_best = n_inliers_subset score_best = score_subset inlier_mask_best = inlier_mask_subset X_inlier_best = X_inlier_subset y_inlier_best = y_inlier_subset max_trials = min(max_trials, _dynamic_max_trials(n_inliers_best, n_samples, min_samples, self.stop_probability)) if ((n_inliers_best >= self.stop_n_inliers) or (score_best >= self.stop_score)): break if (inlier_mask_best is None): if (((self.n_skips_no_inliers_ + self.n_skips_invalid_data_) + self.n_skips_invalid_model_) > self.max_skips): raise ValueError('RANSAC skipped more iterations than `max_skips` without finding a valid consensus set. Iterations were skipped because each randomly chosen sub-sample failed the passing criteria. See estimator attributes for diagnostics (n_skips*).') else: raise ValueError('RANSAC could not find a valid consensus set. All `max_trials` iterations were skipped because each randomly chosen sub-sample failed the passing criteria. See estimator attributes for diagnostics (n_skips*).') elif (((self.n_skips_no_inliers_ + self.n_skips_invalid_data_) + self.n_skips_invalid_model_) > self.max_skips): warnings.warn('RANSAC found a valid consensus set but exited early due to skipping more iterations than `max_skips`. See estimator attributes for diagnostics (n_skips*).', UserWarning) base_estimator.fit(X_inlier_best, y_inlier_best) self.estimator_ = base_estimator self.inlier_mask_ = inlier_mask_best return self
'Predict using the estimated model. This is a wrapper for `estimator_.predict(X)`. Parameters X : numpy array of shape [n_samples, n_features] Returns y : array, shape = [n_samples] or [n_samples, n_targets] Returns predicted values.'
def predict(self, X):
check_is_fitted(self, 'estimator_') return self.estimator_.predict(X)
'Returns the score of the prediction. This is a wrapper for `estimator_.score(X, y)`. Parameters X : numpy array or sparse matrix of shape [n_samples, n_features] Training data. y : array, shape = [n_samples] or [n_samples, n_targets] Target values. Returns z : float Score of the prediction.'
def score(self, X, y):
check_is_fitted(self, 'estimator_') return self.estimator_.score(X, y)
'Auxiliary method to fit the model using X, y as training data'
def _fit(self, X, y, max_iter, alpha, fit_path, Xy=None):
n_features = X.shape[1] (X, y, X_offset, y_offset, X_scale) = self._preprocess_data(X, y, self.fit_intercept, self.normalize, self.copy_X) if (y.ndim == 1): y = y[:, np.newaxis] n_targets = y.shape[1] Gram = self._get_gram(self.precompute, X, y) self.alphas_ = [] self.n_iter_ = [] self.coef_ = np.empty((n_targets, n_features)) if fit_path: self.active_ = [] self.coef_path_ = [] for k in xrange(n_targets): this_Xy = (None if (Xy is None) else Xy[:, k]) (alphas, active, coef_path, n_iter_) = lars_path(X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X, copy_Gram=True, alpha_min=alpha, method=self.method, verbose=max(0, (self.verbose - 1)), max_iter=max_iter, eps=self.eps, return_path=True, return_n_iter=True, positive=self.positive) self.alphas_.append(alphas) self.active_.append(active) self.n_iter_.append(n_iter_) self.coef_path_.append(coef_path) self.coef_[k] = coef_path[:, (-1)] if (n_targets == 1): (self.alphas_, self.active_, self.coef_path_, self.coef_) = [a[0] for a in (self.alphas_, self.active_, self.coef_path_, self.coef_)] self.n_iter_ = self.n_iter_[0] else: for k in xrange(n_targets): this_Xy = (None if (Xy is None) else Xy[:, k]) (alphas, _, self.coef_[k], n_iter_) = lars_path(X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X, copy_Gram=True, alpha_min=alpha, method=self.method, verbose=max(0, (self.verbose - 1)), max_iter=max_iter, eps=self.eps, return_path=False, return_n_iter=True, positive=self.positive) self.alphas_.append(alphas) self.n_iter_.append(n_iter_) if (n_targets == 1): self.alphas_ = self.alphas_[0] self.n_iter_ = self.n_iter_[0] self._set_intercept(X_offset, y_offset, X_scale) return self
'Fit the model using X, y as training data. Parameters X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. Xy : array-like, shape (n_samples,) or (n_samples, n_targets), optional Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. Returns self : object returns an instance of self.'
def fit(self, X, y, Xy=None):
(X, y) = check_X_y(X, y, y_numeric=True, multi_output=True) alpha = getattr(self, 'alpha', 0.0) if hasattr(self, 'n_nonzero_coefs'): alpha = 0.0 max_iter = self.n_nonzero_coefs else: max_iter = self.max_iter self._fit(X, y, max_iter=max_iter, alpha=alpha, fit_path=self.fit_path, Xy=Xy) return self
'Fit the model using X, y as training data. Parameters X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) Target values. Returns self : object returns an instance of self.'
def fit(self, X, y):
(X, y) = check_X_y(X, y, y_numeric=True) X = as_float_array(X, copy=self.copy_X) y = as_float_array(y, copy=self.copy_X) cv = check_cv(self.cv, classifier=False) Gram = self.precompute if hasattr(Gram, '__array__'): warnings.warn(("Parameter 'precompute' cannot be an array in %s. Automatically switch to 'auto' instead." % self.__class__.__name__)) Gram = 'auto' cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)((delayed(_lars_path_residues)(X[train], y[train], X[test], y[test], Gram=Gram, copy=False, method=self.method, verbose=max(0, (self.verbose - 1)), normalize=self.normalize, fit_intercept=self.fit_intercept, max_iter=self.max_iter, eps=self.eps, positive=self.positive) for (train, test) in cv.split(X, y))) all_alphas = np.concatenate(list(zip(*cv_paths))[0]) all_alphas = np.unique(all_alphas) stride = int(max(1, int((len(all_alphas) / float(self.max_n_alphas))))) all_alphas = all_alphas[::stride] mse_path = np.empty((len(all_alphas), len(cv_paths))) for (index, (alphas, active, coefs, residues)) in enumerate(cv_paths): alphas = alphas[::(-1)] residues = residues[::(-1)] if (alphas[0] != 0): alphas = np.r_[(0, alphas)] residues = np.r_[(residues[(0, np.newaxis)], residues)] if (alphas[(-1)] != all_alphas[(-1)]): alphas = np.r_[(alphas, all_alphas[(-1)])] residues = np.r_[(residues, residues[((-1), np.newaxis)])] this_residues = interpolate.interp1d(alphas, residues, axis=0)(all_alphas) this_residues **= 2 mse_path[:, index] = np.mean(this_residues, axis=(-1)) mask = np.all(np.isfinite(mse_path), axis=(-1)) all_alphas = all_alphas[mask] mse_path = mse_path[mask] i_best_alpha = np.argmin(mse_path.mean(axis=(-1))) best_alpha = all_alphas[i_best_alpha] self.alpha_ = best_alpha self.cv_alphas_ = all_alphas self.mse_path_ = mse_path self._fit(X, y, max_iter=self.max_iter, alpha=best_alpha, Xy=None, fit_path=True) return self
'Fit the model using X, y as training data. Parameters X : array-like, shape (n_samples, n_features) training data. y : array-like, shape (n_samples,) target values. Will be cast to X\'s dtype if necessary copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. Returns self : object returns an instance of self.'
def fit(self, X, y, copy_X=True):
(X, y) = check_X_y(X, y, y_numeric=True) (X, y, Xmean, ymean, Xstd) = LinearModel._preprocess_data(X, y, self.fit_intercept, self.normalize, self.copy_X) max_iter = self.max_iter Gram = self.precompute (alphas_, active_, coef_path_, self.n_iter_) = lars_path(X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0, method='lasso', verbose=self.verbose, max_iter=max_iter, eps=self.eps, return_n_iter=True, positive=self.positive) n_samples = X.shape[0] if (self.criterion == 'aic'): K = 2 elif (self.criterion == 'bic'): K = log(n_samples) else: raise ValueError('criterion should be either bic or aic') R = (y[:, np.newaxis] - np.dot(X, coef_path_)) mean_squared_error = np.mean((R ** 2), axis=0) sigma2 = np.var(y) df = np.zeros(coef_path_.shape[1], dtype=np.int) for (k, coef) in enumerate(coef_path_.T): mask = (np.abs(coef) > np.finfo(coef.dtype).eps) if (not np.any(mask)): continue df[k] = np.sum(mask) self.alphas_ = alphas_ eps64 = np.finfo('float64').eps self.criterion_ = (((n_samples * mean_squared_error) / (sigma2 + eps64)) + (K * df)) n_best = np.argmin(self.criterion_) self.alpha_ = alphas_[n_best] self.coef_ = coef_path_[:, n_best] self._set_intercept(Xmean, ymean, Xstd) return self
'Fit linear model with Passive Aggressive algorithm. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Subset of the training data y : numpy array of shape [n_samples] Subset of the target values classes : array, shape = [n_classes] Classes across all calls to partial_fit. Can be obtained by via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is required for the first call to partial_fit and can be omitted in the subsequent calls. Note that y doesn\'t need to contain all labels in `classes`. Returns self : returns an instance of self.'
def partial_fit(self, X, y, classes=None):
if (self.class_weight == 'balanced'): raise ValueError("class_weight 'balanced' is not supported for partial_fit. For 'balanced' weights, use `sklearn.utils.compute_class_weight` with `class_weight='balanced'`. In place of y you can use a large enough subset of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.") lr = ('pa1' if (self.loss == 'hinge') else 'pa2') return self._partial_fit(X, y, alpha=1.0, C=self.C, loss='hinge', learning_rate=lr, max_iter=1, classes=classes, sample_weight=None, coef_init=None, intercept_init=None)
'Fit linear model with Passive Aggressive algorithm. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data y : numpy array of shape [n_samples] Target values coef_init : array, shape = [n_classes,n_features] The initial coefficients to warm-start the optimization. intercept_init : array, shape = [n_classes] The initial intercept to warm-start the optimization. Returns self : returns an instance of self.'
def fit(self, X, y, coef_init=None, intercept_init=None):
lr = ('pa1' if (self.loss == 'hinge') else 'pa2') return self._fit(X, y, alpha=1.0, C=self.C, loss='hinge', learning_rate=lr, coef_init=coef_init, intercept_init=intercept_init)
'Fit linear model with Passive Aggressive algorithm. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Subset of training data y : numpy array of shape [n_samples] Subset of target values Returns self : returns an instance of self.'
def partial_fit(self, X, y):
lr = ('pa1' if (self.loss == 'epsilon_insensitive') else 'pa2') return self._partial_fit(X, y, alpha=1.0, C=self.C, loss='epsilon_insensitive', learning_rate=lr, max_iter=1, sample_weight=None, coef_init=None, intercept_init=None)
'Fit linear model with Passive Aggressive algorithm. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data y : numpy array of shape [n_samples] Target values coef_init : array, shape = [n_features] The initial coefficients to warm-start the optimization. intercept_init : array, shape = [1] The initial intercept to warm-start the optimization. Returns self : returns an instance of self.'
def fit(self, X, y, coef_init=None, intercept_init=None):
lr = ('pa1' if (self.loss == 'epsilon_insensitive') else 'pa2') return self._fit(X, y, alpha=1.0, C=self.C, loss='epsilon_insensitive', learning_rate=lr, coef_init=coef_init, intercept_init=intercept_init)
'Fit the model according to the given training data. Parameters X : array-like, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target vector relative to X. sample_weight : array-like, shape (n_samples,) Weight given to each sample. Returns self : object Returns self.'
def fit(self, X, y, sample_weight=None):
(X, y) = check_X_y(X, y, copy=False, accept_sparse=['csr'], y_numeric=True) if (sample_weight is not None): sample_weight = np.array(sample_weight) check_consistent_length(y, sample_weight) else: sample_weight = np.ones_like(y) if (self.epsilon < 1.0): raise ValueError(('epsilon should be greater than or equal to 1.0, got %f' % self.epsilon)) if (self.warm_start and hasattr(self, 'coef_')): parameters = np.concatenate((self.coef_, [self.intercept_, self.scale_])) else: if self.fit_intercept: parameters = np.zeros((X.shape[1] + 2)) else: parameters = np.zeros((X.shape[1] + 1)) parameters[(-1)] = 1 bounds = np.tile([(- np.inf), np.inf], (parameters.shape[0], 1)) bounds[(-1)][0] = (np.finfo(np.float64).eps * 10) try: (parameters, f, dict_) = optimize.fmin_l_bfgs_b(_huber_loss_and_gradient, parameters, args=(X, y, self.epsilon, self.alpha, sample_weight), maxiter=self.max_iter, pgtol=self.tol, bounds=bounds, iprint=0) except TypeError: (parameters, f, dict_) = optimize.fmin_l_bfgs_b(_huber_loss_and_gradient, parameters, args=(X, y, self.epsilon, self.alpha, sample_weight), bounds=bounds) if (dict_['warnflag'] == 2): raise ValueError(('HuberRegressor convergence failed: l-BFGS-b solver terminated with %s' % dict_['task'].decode('ascii'))) self.n_iter_ = dict_.get('nit', None) self.scale_ = parameters[(-1)] if self.fit_intercept: self.intercept_ = parameters[(-2)] else: self.intercept_ = 0.0 self.coef_ = parameters[:X.shape[1]] residual = np.abs(((y - safe_sparse_dot(X, self.coef_)) - self.intercept_)) self.outliers_ = (residual > (self.scale_ * self.epsilon)) return self
'Learn a list of feature name -> indices mappings. Parameters X : Mapping or iterable over Mappings Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). y : (ignored) Returns self'
def fit(self, X, y=None):
feature_names = [] vocab = {} for x in X: for (f, v) in six.iteritems(x): if isinstance(v, six.string_types): f = ('%s%s%s' % (f, self.separator, v)) if (f not in vocab): feature_names.append(f) vocab[f] = len(vocab) if self.sort: feature_names.sort() vocab = dict(((f, i) for (i, f) in enumerate(feature_names))) self.feature_names_ = feature_names self.vocabulary_ = vocab return self
'Learn a list of feature name -> indices mappings and transform X. Like fit(X) followed by transform(X), but does not require materializing X in memory. Parameters X : Mapping or iterable over Mappings Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). y : (ignored) Returns Xa : {array, sparse matrix} Feature vectors; always 2-d.'
def fit_transform(self, X, y=None):
return self._transform(X, fitting=True)
'Transform array or sparse matrix X back to feature mappings. X must have been produced by this DictVectorizer\'s transform or fit_transform method; it may only have passed through transformers that preserve the number of features and their order. In the case of one-hot/one-of-K coding, the constructed feature names and values are returned rather than the original ones. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Sample matrix. dict_type : callable, optional Constructor for feature mappings. Must conform to the collections.Mapping API. Returns D : list of dict_type objects, length = n_samples Feature mappings for the samples in X.'
def inverse_transform(self, X, dict_type=dict):
X = check_array(X, accept_sparse=['csr', 'csc']) n_samples = X.shape[0] names = self.feature_names_ dicts = [dict_type() for _ in xrange(n_samples)] if sp.issparse(X): for (i, j) in zip(*X.nonzero()): dicts[i][names[j]] = X[(i, j)] else: for (i, d) in enumerate(dicts): for (j, v) in enumerate(X[i, :]): if (v != 0): d[names[j]] = X[(i, j)] return dicts
'Transform feature->value dicts to array or sparse matrix. Named features not encountered during fit or fit_transform will be silently ignored. Parameters X : Mapping or iterable over Mappings, length = n_samples Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). Returns Xa : {array, sparse matrix} Feature vectors; always 2-d.'
def transform(self, X):
if self.sparse: return self._transform(X, fitting=False) else: dtype = self.dtype vocab = self.vocabulary_ X = _tosequence(X) Xa = np.zeros((len(X), len(vocab)), dtype=dtype) for (i, x) in enumerate(X): for (f, v) in six.iteritems(x): if isinstance(v, six.string_types): f = ('%s%s%s' % (f, self.separator, v)) v = 1 try: Xa[(i, vocab[f])] = dtype(v) except KeyError: pass return Xa
'Returns a list of feature names, ordered by their indices. If one-of-K coding is applied to categorical features, this will include the constructed feature names but not the original ones.'
def get_feature_names(self):
return self.feature_names_
'Restrict the features to those in support using feature selection. This function modifies the estimator in-place. Parameters support : array-like Boolean mask or list of indices (as returned by the get_support member of feature selectors). indices : boolean, optional Whether support is a list of indices. Returns self Examples >>> from sklearn.feature_extraction import DictVectorizer >>> from sklearn.feature_selection import SelectKBest, chi2 >>> v = DictVectorizer() >>> D = [{\'foo\': 1, \'bar\': 2}, {\'foo\': 3, \'baz\': 1}] >>> X = v.fit_transform(D) >>> support = SelectKBest(chi2, k=2).fit(X, [0, 1]) >>> v.get_feature_names() [\'bar\', \'baz\', \'foo\'] >>> v.restrict(support.get_support()) # doctest: +ELLIPSIS DictVectorizer(dtype=..., separator=\'=\', sort=True, sparse=True) >>> v.get_feature_names() [\'bar\', \'foo\']'
def restrict(self, support, indices=False):
if (not indices): support = np.where(support)[0] names = self.feature_names_ new_vocab = {} for i in support: new_vocab[names[i]] = len(new_vocab) self.vocabulary_ = new_vocab self.feature_names_ = [f for (f, i) in sorted(six.iteritems(new_vocab), key=itemgetter(1))] return self
'No-op. This method doesn\'t do anything. It exists purely for compatibility with the scikit-learn transformer API. Parameters X : array-like Returns self : FeatureHasher'
def fit(self, X=None, y=None):
self._validate_params(self.n_features, self.input_type) return self
'Transform a sequence of instances to a scipy.sparse matrix. Parameters raw_X : iterable over iterable over raw features, length = n_samples Samples. Each sample must be iterable an (e.g., a list or tuple) containing/generating feature names (and optionally values, see the input_type constructor argument) which will be hashed. raw_X need not support the len function, so it can be the result of a generator; n_samples is determined on the fly. Returns X : scipy.sparse matrix, shape = (n_samples, self.n_features) Feature matrix, for use with estimators or further transformers.'
def transform(self, raw_X):
raw_X = iter(raw_X) if (self.input_type == 'dict'): raw_X = (_iteritems(d) for d in raw_X) elif (self.input_type == 'string'): raw_X = (((f, 1) for f in x) for x in raw_X) (indices, indptr, values) = _hashing.transform(raw_X, self.n_features, self.dtype, self.alternate_sign) n_samples = (indptr.shape[0] - 1) if (n_samples == 0): raise ValueError('Cannot vectorize empty sequence.') X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype, shape=(n_samples, self.n_features)) X.sum_duplicates() if self.non_negative: np.abs(X.data, X.data) return X
'Decode the input into a string of unicode symbols The decoding strategy depends on the vectorizer parameters.'
def decode(self, doc):
if (self.input == u'filename'): with open(doc, u'rb') as fh: doc = fh.read() elif (self.input == u'file'): doc = doc.read() if isinstance(doc, bytes): doc = doc.decode(self.encoding, self.decode_error) if (doc is np.nan): raise ValueError(u'np.nan is an invalid document, expected byte or unicode string.') return doc
'Turn tokens into a sequence of n-grams after stop words filtering'
def _word_ngrams(self, tokens, stop_words=None):
if (stop_words is not None): tokens = [w for w in tokens if (w not in stop_words)] (min_n, max_n) = self.ngram_range if (max_n != 1): original_tokens = tokens if (min_n == 1): tokens = list(original_tokens) min_n += 1 else: tokens = [] n_original_tokens = len(original_tokens) tokens_append = tokens.append space_join = u' '.join for n in xrange(min_n, min((max_n + 1), (n_original_tokens + 1))): for i in xrange(((n_original_tokens - n) + 1)): tokens_append(space_join(original_tokens[i:(i + n)])) return tokens
'Tokenize text_document into a sequence of character n-grams'
def _char_ngrams(self, text_document):
text_document = self._white_spaces.sub(u' ', text_document) text_len = len(text_document) (min_n, max_n) = self.ngram_range if (min_n == 1): ngrams = list(text_document) min_n += 1 else: ngrams = [] ngrams_append = ngrams.append for n in xrange(min_n, min((max_n + 1), (text_len + 1))): for i in xrange(((text_len - n) + 1)): ngrams_append(text_document[i:(i + n)]) return ngrams
'Whitespace sensitive char-n-gram tokenization. Tokenize text_document into a sequence of character n-grams operating only inside word boundaries. n-grams at the edges of words are padded with space.'
def _char_wb_ngrams(self, text_document):
text_document = self._white_spaces.sub(u' ', text_document) (min_n, max_n) = self.ngram_range ngrams = [] ngrams_append = ngrams.append for w in text_document.split(): w = ((u' ' + w) + u' ') w_len = len(w) for n in xrange(min_n, (max_n + 1)): offset = 0 ngrams_append(w[offset:(offset + n)]) while ((offset + n) < w_len): offset += 1 ngrams_append(w[offset:(offset + n)]) if (offset == 0): break return ngrams
'Return a function to preprocess the text before tokenization'
def build_preprocessor(self):
if (self.preprocessor is not None): return self.preprocessor noop = (lambda x: x) if (not self.strip_accents): strip_accents = noop elif callable(self.strip_accents): strip_accents = self.strip_accents elif (self.strip_accents == u'ascii'): strip_accents = strip_accents_ascii elif (self.strip_accents == u'unicode'): strip_accents = strip_accents_unicode else: raise ValueError((u'Invalid value for "strip_accents": %s' % self.strip_accents)) if self.lowercase: return (lambda x: strip_accents(x.lower())) else: return strip_accents
'Return a function that splits a string into a sequence of tokens'
def build_tokenizer(self):
if (self.tokenizer is not None): return self.tokenizer token_pattern = re.compile(self.token_pattern) return (lambda doc: token_pattern.findall(doc))
'Build or fetch the effective stop words list'
def get_stop_words(self):
return _check_stop_list(self.stop_words)
'Return a callable that handles preprocessing and tokenization'
def build_analyzer(self):
if callable(self.analyzer): return self.analyzer preprocess = self.build_preprocessor() if (self.analyzer == u'char'): return (lambda doc: self._char_ngrams(preprocess(self.decode(doc)))) elif (self.analyzer == u'char_wb'): return (lambda doc: self._char_wb_ngrams(preprocess(self.decode(doc)))) elif (self.analyzer == u'word'): stop_words = self.get_stop_words() tokenize = self.build_tokenizer() return (lambda doc: self._word_ngrams(tokenize(preprocess(self.decode(doc))), stop_words)) else: raise ValueError((u'%s is not a valid tokenization scheme/analyzer' % self.analyzer))
'Check if vocabulary is empty or missing (not fit-ed)'
def _check_vocabulary(self):
msg = u"%(name)s - Vocabulary wasn't fitted." (check_is_fitted(self, u'vocabulary_', msg=msg),) if (len(self.vocabulary_) == 0): raise ValueError(u'Vocabulary is empty')
'Does nothing: this transformer is stateless. This method is just there to mark the fact that this transformer can work in a streaming setup.'
def partial_fit(self, X, y=None):
return self
'Does nothing: this transformer is stateless.'
def fit(self, X, y=None):
if isinstance(X, six.string_types): raise ValueError(u'Iterable over raw text documents expected, string object received.') self._get_hasher().fit(X, y=y) return self
'Transform a sequence of documents to a document-term matrix. Parameters X : iterable over raw text documents, length = n_samples Samples. Each sample must be a text document (either bytes or unicode strings, file name or file object depending on the constructor argument) which will be tokenized and hashed. Returns X : scipy.sparse matrix, shape = (n_samples, self.n_features) Document-term matrix.'
def transform(self, X):
if isinstance(X, six.string_types): raise ValueError(u'Iterable over raw text documents expected, string object received.') analyzer = self.build_analyzer() X = self._get_hasher().transform((analyzer(doc) for doc in X)) if self.binary: X.data.fill(1) if (self.norm is not None): X = normalize(X, norm=self.norm, copy=False) return X
'Sort features by name Returns a reordered matrix and modifies the vocabulary in place'
def _sort_features(self, X, vocabulary):
sorted_features = sorted(six.iteritems(vocabulary)) map_index = np.empty(len(sorted_features), dtype=np.int32) for (new_val, (term, old_val)) in enumerate(sorted_features): vocabulary[term] = new_val map_index[old_val] = new_val X.indices = map_index.take(X.indices, mode=u'clip') return X
'Remove too rare or too common features. Prune features that are non zero in more samples than high or less documents than low, modifying the vocabulary, and restricting it to at most the limit most frequent. This does not prune samples with zero features.'
def _limit_features(self, X, vocabulary, high=None, low=None, limit=None):
if ((high is None) and (low is None) and (limit is None)): return (X, set()) dfs = _document_frequency(X) tfs = np.asarray(X.sum(axis=0)).ravel() mask = np.ones(len(dfs), dtype=bool) if (high is not None): mask &= (dfs <= high) if (low is not None): mask &= (dfs >= low) if ((limit is not None) and (mask.sum() > limit)): mask_inds = (- tfs[mask]).argsort()[:limit] new_mask = np.zeros(len(dfs), dtype=bool) new_mask[np.where(mask)[0][mask_inds]] = True mask = new_mask new_indices = (np.cumsum(mask) - 1) removed_terms = set() for (term, old_index) in list(six.iteritems(vocabulary)): if mask[old_index]: vocabulary[term] = new_indices[old_index] else: del vocabulary[term] removed_terms.add(term) kept_indices = np.where(mask)[0] if (len(kept_indices) == 0): raise ValueError(u'After pruning, no terms remain. Try a lower min_df or a higher max_df.') return (X[:, kept_indices], removed_terms)
'Create sparse feature matrix, and vocabulary where fixed_vocab=False'
def _count_vocab(self, raw_documents, fixed_vocab):
if fixed_vocab: vocabulary = self.vocabulary_ else: vocabulary = defaultdict() vocabulary.default_factory = vocabulary.__len__ analyze = self.build_analyzer() j_indices = [] indptr = _make_int_array() values = _make_int_array() indptr.append(0) for doc in raw_documents: feature_counter = {} for feature in analyze(doc): try: feature_idx = vocabulary[feature] if (feature_idx not in feature_counter): feature_counter[feature_idx] = 1 else: feature_counter[feature_idx] += 1 except KeyError: continue j_indices.extend(feature_counter.keys()) values.extend(feature_counter.values()) indptr.append(len(j_indices)) if (not fixed_vocab): vocabulary = dict(vocabulary) if (not vocabulary): raise ValueError(u'empty vocabulary; perhaps the documents only contain stop words') j_indices = np.asarray(j_indices, dtype=np.intc) indptr = np.frombuffer(indptr, dtype=np.intc) values = np.frombuffer(values, dtype=np.intc) X = sp.csr_matrix((values, j_indices, indptr), shape=((len(indptr) - 1), len(vocabulary)), dtype=self.dtype) X.sort_indices() return (vocabulary, X)
'Learn a vocabulary dictionary of all tokens in the raw documents. Parameters raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns self'
def fit(self, raw_documents, y=None):
self.fit_transform(raw_documents) return self
'Learn the vocabulary dictionary and return term-document matrix. This is equivalent to fit followed by transform, but more efficiently implemented. Parameters raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns X : array, [n_samples, n_features] Document-term matrix.'
def fit_transform(self, raw_documents, y=None):
if isinstance(raw_documents, six.string_types): raise ValueError(u'Iterable over raw text documents expected, string object received.') self._validate_vocabulary() max_df = self.max_df min_df = self.min_df max_features = self.max_features (vocabulary, X) = self._count_vocab(raw_documents, self.fixed_vocabulary_) if self.binary: X.data.fill(1) if (not self.fixed_vocabulary_): X = self._sort_features(X, vocabulary) n_doc = X.shape[0] max_doc_count = (max_df if isinstance(max_df, numbers.Integral) else (max_df * n_doc)) min_doc_count = (min_df if isinstance(min_df, numbers.Integral) else (min_df * n_doc)) if (max_doc_count < min_doc_count): raise ValueError(u'max_df corresponds to < documents than min_df') (X, self.stop_words_) = self._limit_features(X, vocabulary, max_doc_count, min_doc_count, max_features) self.vocabulary_ = vocabulary return X
'Transform documents to document-term matrix. Extract token counts out of raw text documents using the vocabulary fitted with fit or the one provided to the constructor. Parameters raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns X : sparse matrix, [n_samples, n_features] Document-term matrix.'
def transform(self, raw_documents):
if isinstance(raw_documents, six.string_types): raise ValueError(u'Iterable over raw text documents expected, string object received.') if (not hasattr(self, u'vocabulary_')): self._validate_vocabulary() self._check_vocabulary() (_, X) = self._count_vocab(raw_documents, fixed_vocab=True) if self.binary: X.data.fill(1) return X
'Return terms per document with nonzero entries in X. Parameters X : {array, sparse matrix}, shape = [n_samples, n_features] Returns X_inv : list of arrays, len = n_samples List of arrays of terms.'
def inverse_transform(self, X):
self._check_vocabulary() if sp.issparse(X): X = X.tocsr() else: X = np.asmatrix(X) n_samples = X.shape[0] terms = np.array(list(self.vocabulary_.keys())) indices = np.array(list(self.vocabulary_.values())) inverse_vocabulary = terms[np.argsort(indices)] return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel() for i in range(n_samples)]
'Array mapping from feature integer indices to feature name'
def get_feature_names(self):
self._check_vocabulary() return [t for (t, i) in sorted(six.iteritems(self.vocabulary_), key=itemgetter(1))]
'Learn the idf vector (global term weights) Parameters X : sparse matrix, [n_samples, n_features] a matrix of term/token counts'
def fit(self, X, y=None):
if (not sp.issparse(X)): X = sp.csc_matrix(X) if self.use_idf: (n_samples, n_features) = X.shape df = _document_frequency(X) df += int(self.smooth_idf) n_samples += int(self.smooth_idf) idf = (np.log((float(n_samples) / df)) + 1.0) self._idf_diag = sp.spdiags(idf, diags=0, m=n_features, n=n_features, format=u'csr') return self
'Transform a count matrix to a tf or tf-idf representation Parameters X : sparse matrix, [n_samples, n_features] a matrix of term/token counts copy : boolean, default True Whether to copy X and operate on the copy or perform in-place operations. Returns vectors : sparse matrix, [n_samples, n_features]'
def transform(self, X, copy=True):
if (hasattr(X, u'dtype') and np.issubdtype(X.dtype, np.float)): X = sp.csr_matrix(X, copy=copy) else: X = sp.csr_matrix(X, dtype=np.float64, copy=copy) (n_samples, n_features) = X.shape if self.sublinear_tf: np.log(X.data, X.data) X.data += 1 if self.use_idf: check_is_fitted(self, u'_idf_diag', u'idf vector is not fitted') expected_n_features = self._idf_diag.shape[0] if (n_features != expected_n_features): raise ValueError((u'Input has n_features=%d while the model has been trained with n_features=%d' % (n_features, expected_n_features))) X = (X * self._idf_diag) if self.norm: X = normalize(X, norm=self.norm, copy=False) return X
'Learn vocabulary and idf from training set. Parameters raw_documents : iterable an iterable which yields either str, unicode or file objects Returns self : TfidfVectorizer'
def fit(self, raw_documents, y=None):
X = super(TfidfVectorizer, self).fit_transform(raw_documents) self._tfidf.fit(X) return self
'Learn vocabulary and idf, return term-document matrix. This is equivalent to fit followed by transform, but more efficiently implemented. Parameters raw_documents : iterable an iterable which yields either str, unicode or file objects Returns X : sparse matrix, [n_samples, n_features] Tf-idf-weighted document-term matrix.'
def fit_transform(self, raw_documents, y=None):
X = super(TfidfVectorizer, self).fit_transform(raw_documents) self._tfidf.fit(X) return self._tfidf.transform(X, copy=False)
'Transform documents to document-term matrix. Uses the vocabulary and document frequencies (df) learned by fit (or fit_transform). Parameters raw_documents : iterable an iterable which yields either str, unicode or file objects copy : boolean, default True Whether to copy X and operate on the copy or perform in-place operations. Returns X : sparse matrix, [n_samples, n_features] Tf-idf-weighted document-term matrix.'
def transform(self, raw_documents, copy=True):
check_is_fitted(self, u'_tfidf', u'The tfidf vector is not fitted') X = super(TfidfVectorizer, self).transform(raw_documents) return self._tfidf.transform(X, copy=False)
'Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines.'
def fit(self, X, y=None):
return self
'Transforms the image samples in X into a matrix of patch data. Parameters X : array, shape = (n_samples, image_height, image_width) or (n_samples, image_height, image_width, n_channels) Array of images from which to extract patches. For color images, the last dimension specifies the channel: a RGB image would have `n_channels=3`. Returns patches : array, shape = (n_patches, patch_height, patch_width) or (n_patches, patch_height, patch_width, n_channels) The collection of patches extracted from the images, where `n_patches` is either `n_samples * max_patches` or the total number of patches that can be extracted.'
def transform(self, X):
self.random_state = check_random_state(self.random_state) (n_images, i_h, i_w) = X.shape[:3] X = np.reshape(X, (n_images, i_h, i_w, (-1))) n_channels = X.shape[(-1)] if (self.patch_size is None): patch_size = ((i_h // 10), (i_w // 10)) else: patch_size = self.patch_size (p_h, p_w) = patch_size n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches) patches_shape = (((n_images * n_patches),) + patch_size) if (n_channels > 1): patches_shape += (n_channels,) patches = np.empty(patches_shape) for (ii, image) in enumerate(X): patches[(ii * n_patches):((ii + 1) * n_patches)] = extract_patches_2d(image, patch_size, self.max_patches, self.random_state) return patches
'The dummy arguments are to test that this fit function can accept non-array arguments through cross-validation, such as: - int - str (this is actually array-like) - object - function'
def fit(self, X, Y=None, sample_weight=None, class_prior=None, sparse_sample_weight=None, sparse_param=None, dummy_int=None, dummy_str=None, dummy_obj=None, callback=None):
self.dummy_int = dummy_int self.dummy_str = dummy_str self.dummy_obj = dummy_obj if (callback is not None): callback(self) if self.allow_nd: X = X.reshape(len(X), (-1)) if ((X.ndim >= 3) and (not self.allow_nd)): raise ValueError('X cannot be d') if (sample_weight is not None): assert_true((sample_weight.shape[0] == X.shape[0]), 'MockClassifier extra fit_param sample_weight.shape[0] is {0}, should be {1}'.format(sample_weight.shape[0], X.shape[0])) if (class_prior is not None): assert_true((class_prior.shape[0] == len(np.unique(y))), 'MockClassifier extra fit_param class_prior.shape[0] is {0}, should be {1}'.format(class_prior.shape[0], len(np.unique(y)))) if (sparse_sample_weight is not None): fmt = 'MockClassifier extra fit_param sparse_sample_weight.shape[0] is {0}, should be {1}' assert_true((sparse_sample_weight.shape[0] == X.shape[0]), fmt.format(sparse_sample_weight.shape[0], X.shape[0])) if (sparse_param is not None): fmt = 'MockClassifier extra fit_param sparse_param.shape is ({0}, {1}), should be ({2}, {3})' assert_true((sparse_param.shape == P_sparse.shape), fmt.format(sparse_param.shape[0], sparse_param.shape[1], P_sparse.shape[0], P_sparse.shape[1])) return self
'Fit the SVM model according to the given training data. Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. For kernel="precomputed", the expected shape of X is (n_samples, n_samples). y : array-like, shape (n_samples,) Target values (class labels in classification, real numbers in regression) sample_weight : array-like, shape (n_samples,) Per-sample weights. Rescale C per sample. Higher weights force the classifier to put more emphasis on these points. Returns self : object Returns self. Notes If X and y are not C-ordered and contiguous arrays of np.float64 and X is not a scipy.sparse.csr_matrix, X and/or y may be copied. If X is a dense array, then the other methods will not support sparse matrices as input.'
def fit(self, X, y, sample_weight=None):
rnd = check_random_state(self.random_state) sparse = sp.isspmatrix(X) if (sparse and (self.kernel == 'precomputed')): raise TypeError('Sparse precomputed kernels are not supported.') self._sparse = (sparse and (not callable(self.kernel))) (X, y) = check_X_y(X, y, dtype=np.float64, order='C', accept_sparse='csr') y = self._validate_targets(y) sample_weight = np.asarray(([] if (sample_weight is None) else sample_weight), dtype=np.float64) solver_type = LIBSVM_IMPL.index(self._impl) if ((solver_type != 2) and (X.shape[0] != y.shape[0])): raise ValueError(('X and y have incompatible shapes.\n' + ('X has %s samples, but y has %s.' % (X.shape[0], y.shape[0])))) if ((self.kernel == 'precomputed') and (X.shape[0] != X.shape[1])): raise ValueError('X.shape[0] should be equal to X.shape[1]') if ((sample_weight.shape[0] > 0) and (sample_weight.shape[0] != X.shape[0])): raise ValueError(('sample_weight and X have incompatible shapes: %r vs %r\nNote: Sparse matrices cannot be indexed w/boolean masks (use `indices=True` in CV).' % (sample_weight.shape, X.shape))) if (self.gamma == 'auto'): self._gamma = (1.0 / X.shape[1]) else: self._gamma = self.gamma kernel = self.kernel if callable(kernel): kernel = 'precomputed' fit = (self._sparse_fit if self._sparse else self._dense_fit) if self.verbose: print('[LibSVM]', end='') seed = rnd.randint(np.iinfo('i').max) fit(X, y, sample_weight, solver_type, kernel, random_seed=seed) self.shape_fit_ = X.shape self._intercept_ = self.intercept_.copy() self._dual_coef_ = self.dual_coef_ if ((self._impl in ['c_svc', 'nu_svc']) and (len(self.classes_) == 2)): self.intercept_ *= (-1) self.dual_coef_ = (- self.dual_coef_) return self
'Validation of y and class_weight. Default implementation for SVR and one-class; overridden in BaseSVC.'
def _validate_targets(self, y):
self.class_weight_ = np.empty(0) return column_or_1d(y, warn=True).astype(np.float64)
'Perform regression on samples in X. For an one-class model, +1 (inlier) or -1 (outlier) is returned. Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features) For kernel="precomputed", the expected shape of X is (n_samples_test, n_samples_train). Returns y_pred : array, shape (n_samples,)'
def predict(self, X):
X = self._validate_for_predict(X) predict = (self._sparse_predict if self._sparse else self._dense_predict) return predict(X)
'Return the data transformed by a callable kernel'
def _compute_kernel(self, X):
if callable(self.kernel): kernel = self.kernel(X, self.__Xfit) if sp.issparse(kernel): kernel = kernel.toarray() X = np.asarray(kernel, dtype=np.float64, order='C') return X
'Distance of the samples X to the separating hyperplane. Parameters X : array-like, shape (n_samples, n_features) Returns X : array-like, shape (n_samples, n_class * (n_class-1) / 2) Returns the decision function of the sample for each class in the model.'
def _decision_function(self, X):
X = self._validate_for_predict(X) X = self._compute_kernel(X) if self._sparse: dec_func = self._sparse_decision_function(X) else: dec_func = self._dense_decision_function(X) if ((self._impl in ['c_svc', 'nu_svc']) and (len(self.classes_) == 2)): return (- dec_func.ravel()) return dec_func
'Distance of the samples X to the separating hyperplane. Parameters X : array-like, shape (n_samples, n_features) Returns X : array-like, shape (n_samples, n_classes * (n_classes-1) / 2) Returns the decision function of the sample for each class in the model. If decision_function_shape=\'ovr\', the shape is (n_samples, n_classes)'
def decision_function(self, X):
dec = self._decision_function(X) if ((self.decision_function_shape == 'ovr') and (len(self.classes_) > 2)): return _ovr_decision_function((dec < 0), (- dec), len(self.classes_)) return dec
'Perform classification on samples in X. For an one-class model, +1 or -1 is returned. Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features) For kernel="precomputed", the expected shape of X is [n_samples_test, n_samples_train] Returns y_pred : array, shape (n_samples,) Class labels for samples in X.'
def predict(self, X):
y = super(BaseSVC, self).predict(X) return self.classes_.take(np.asarray(y, dtype=np.intp))
'Compute probabilities of possible outcomes for samples in X. The model need to have probability information computed at training time: fit with attribute `probability` set to True. Parameters X : array-like, shape (n_samples, n_features) For kernel="precomputed", the expected shape of X is [n_samples_test, n_samples_train] Returns T : array-like, shape (n_samples, n_classes) Returns the probability of the sample for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. Notes The probability model is created using cross validation, so the results can be slightly different than those obtained by predict. Also, it will produce meaningless results on very small datasets.'
@property def predict_proba(self):
self._check_proba() return self._predict_proba
'Compute log probabilities of possible outcomes for samples in X. The model need to have probability information computed at training time: fit with attribute `probability` set to True. Parameters X : array-like, shape (n_samples, n_features) For kernel="precomputed", the expected shape of X is [n_samples_test, n_samples_train] Returns T : array-like, shape (n_samples, n_classes) Returns the log-probabilities of the sample for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. Notes The probability model is created using cross validation, so the results can be slightly different than those obtained by predict. Also, it will produce meaningless results on very small datasets.'
@property def predict_log_proba(self):
self._check_proba() return self._predict_log_proba
'Fit the model according to the given training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target vector relative to X sample_weight : array-like, shape = [n_samples], optional Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. Returns self : object Returns self.'
def fit(self, X, y, sample_weight=None):
msg = "loss='%s' has been deprecated in favor of loss='%s' as of 0.16. Backward compatibility for the loss='%s' will be removed in %s" if (self.loss in ('l1', 'l2')): old_loss = self.loss self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(self.loss) warnings.warn((msg % (old_loss, self.loss, old_loss, '1.0')), DeprecationWarning) if (self.C < 0): raise ValueError(('Penalty term must be positive; got (C=%r)' % self.C)) (X, y) = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order='C') check_classification_targets(y) self.classes_ = np.unique(y) (self.coef_, self.intercept_, self.n_iter_) = _fit_liblinear(X, y, self.C, self.fit_intercept, self.intercept_scaling, self.class_weight, self.penalty, self.dual, self.verbose, self.max_iter, self.tol, self.random_state, self.multi_class, self.loss, sample_weight=sample_weight) if ((self.multi_class == 'crammer_singer') and (len(self.classes_) == 2)): self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, (-1)) if self.fit_intercept: intercept = (self.intercept_[1] - self.intercept_[0]) self.intercept_ = np.array([intercept]) return self
'Fit the model according to the given training data. Parameters X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target vector relative to X sample_weight : array-like, shape = [n_samples], optional Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. Returns self : object Returns self.'
def fit(self, X, y, sample_weight=None):
msg = "loss='%s' has been deprecated in favor of loss='%s' as of 0.16. Backward compatibility for the loss='%s' will be removed in %s" if (self.loss in ('l1', 'l2')): old_loss = self.loss self.loss = {'l1': 'epsilon_insensitive', 'l2': 'squared_epsilon_insensitive'}.get(self.loss) warnings.warn((msg % (old_loss, self.loss, old_loss, '1.0')), DeprecationWarning) if (self.C < 0): raise ValueError(('Penalty term must be positive; got (C=%r)' % self.C)) (X, y) = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order='C') penalty = 'l2' (self.coef_, self.intercept_, self.n_iter_) = _fit_liblinear(X, y, self.C, self.fit_intercept, self.intercept_scaling, None, penalty, self.dual, self.verbose, self.max_iter, self.tol, self.random_state, loss=self.loss, epsilon=self.epsilon, sample_weight=sample_weight) self.coef_ = self.coef_.ravel() return self
'Detects the soft boundary of the set of samples X. Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features) Set of samples, where n_samples is the number of samples and n_features is the number of features. sample_weight : array-like, shape (n_samples,) Per-sample weights. Rescale C per sample. Higher weights force the classifier to put more emphasis on these points. Returns self : object Returns self. Notes If X is not a C-ordered contiguous array it is copied.'
def fit(self, X, y=None, sample_weight=None, **params):
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight, **params) return self
'Signed distance to the separating hyperplane. Signed distance is positive for an inlier and negative for an outlier. Parameters X : array-like, shape (n_samples, n_features) Returns X : array-like, shape (n_samples,) Returns the decision function of the samples.'
def decision_function(self, X):
dec = self._decision_function(X) return dec
'Perform classification on samples in X. For an one-class model, +1 or -1 is returned. Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features) For kernel="precomputed", the expected shape of X is [n_samples_test, n_samples_train] Returns y_pred : array, shape (n_samples,) Class labels for samples in X.'
def predict(self, X):
y = super(OneClassSVM, self).predict(X) return np.asarray(y, dtype=np.intp)
'Return non-default make_scorer arguments for repr.'
def _factory_args(self):
return ''
'Evaluate predicted target values for X relative to y_true. Parameters estimator : object Trained estimator to use for scoring. Must have a predict_proba method; the output of that is used to compute the score. X : array-like or sparse matrix Test data that will be fed to estimator.predict. y_true : array-like Gold standard target values for X. sample_weight : array-like, optional (default=None) Sample weights. Returns score : float Score function applied to prediction of estimator on X.'
def __call__(self, estimator, X, y_true, sample_weight=None):
super(_PredictScorer, self).__call__(estimator, X, y_true, sample_weight=sample_weight) y_pred = estimator.predict(X) if (sample_weight is not None): return (self._sign * self._score_func(y_true, y_pred, sample_weight=sample_weight, **self._kwargs)) else: return (self._sign * self._score_func(y_true, y_pred, **self._kwargs))
'Evaluate predicted probabilities for X relative to y_true. Parameters clf : object Trained classifier to use for scoring. Must have a predict_proba method; the output of that is used to compute the score. X : array-like or sparse matrix Test data that will be fed to clf.predict_proba. y : array-like Gold standard target values for X. These must be class labels, not probabilities. sample_weight : array-like, optional (default=None) Sample weights. Returns score : float Score function applied to prediction of estimator on X.'
def __call__(self, clf, X, y, sample_weight=None):
super(_ProbaScorer, self).__call__(clf, X, y, sample_weight=sample_weight) y_pred = clf.predict_proba(X) if (sample_weight is not None): return (self._sign * self._score_func(y, y_pred, sample_weight=sample_weight, **self._kwargs)) else: return (self._sign * self._score_func(y, y_pred, **self._kwargs))