desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Evaluate decision function output for X relative to y_true.
Parameters
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
score : float
Score function applied to prediction of estimator on X.'
| def __call__(self, clf, X, y, sample_weight=None):
| super(_ThresholdScorer, self).__call__(clf, X, y, sample_weight=sample_weight)
y_type = type_of_target(y)
if (y_type not in ('binary', 'multilabel-indicator')):
raise ValueError('{0} format is not supported'.format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
if isinstance(y_pred, list):
y_pred = np.vstack((p for p in y_pred)).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if (y_type == 'binary'):
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, (-1)] for p in y_pred]).T
if (sample_weight is not None):
return (self._sign * self._score_func(y, y_pred, sample_weight=sample_weight, **self._kwargs))
else:
return (self._sign * self._score_func(y, y_pred, **self._kwargs))
|
'Fit model to data.
Parameters
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
Y : array-like, shape = [n_samples, n_targets]
Target vectors, where n_samples is the number of samples and
n_targets is the number of response variables.'
| def fit(self, X, Y):
| check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if (Y.ndim == 1):
Y = Y.reshape((-1), 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if ((self.n_components < 1) or (self.n_components > p)):
raise ValueError(('Invalid number of components: %d' % self.n_components))
if (self.algorithm not in ('svd', 'nipals')):
raise ValueError(("Got algorithm %s when only 'svd' and 'nipals' are known" % self.algorithm))
if ((self.algorithm == 'svd') and (self.mode == 'B')):
raise ValueError('Incompatible configuration: mode B is not implemented with svd algorithm')
if (self.deflation_mode not in ['canonical', 'regression']):
raise ValueError('The deflation mode is unknown')
(X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_) = _center_scale_xy(X, Y, self.scale)
Xk = X
Yk = Y
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
for k in range(self.n_components):
if np.all((np.dot(Yk.T, Yk) < np.finfo(np.double).eps)):
warnings.warn(('Y residual constant at iteration %s' % k))
break
if (self.algorithm == 'nipals'):
(x_weights, y_weights, n_iter_) = _nipals_twoblocks_inner_loop(X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter, tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif (self.algorithm == 'svd'):
(x_weights, y_weights) = _svd_cross_product(X=Xk, Y=Yk)
(x_weights, y_weights) = svd_flip(x_weights, y_weights.T)
y_weights = y_weights.T
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = (np.dot(Yk, y_weights) / y_ss)
if (np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps):
warnings.warn(('X scores are null at iteration %s' % k))
break
x_loadings = (np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores))
Xk -= np.dot(x_scores, x_loadings.T)
if (self.deflation_mode == 'canonical'):
y_loadings = (np.dot(Yk.T, y_scores) / np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if (self.deflation_mode == 'regression'):
y_loadings = (np.dot(Yk.T, x_scores) / np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
self.x_scores_[:, k] = x_scores.ravel()
self.y_scores_[:, k] = y_scores.ravel()
self.x_weights_[:, k] = x_weights.ravel()
self.y_weights_[:, k] = y_weights.ravel()
self.x_loadings_[:, k] = x_loadings.ravel()
self.y_loadings_[:, k] = y_loadings.ravel()
self.x_rotations_ = np.dot(self.x_weights_, pinv2(np.dot(self.x_loadings_.T, self.x_weights_), check_finite=False))
if (Y.shape[1] > 1):
self.y_rotations_ = np.dot(self.y_weights_, pinv2(np.dot(self.y_loadings_.T, self.y_weights_), check_finite=False))
else:
self.y_rotations_ = np.ones(1)
if (True or (self.deflation_mode == 'regression')):
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (self.coef_ * self.y_std_)
return self
|
'Apply the dimension reduction learned on the train data.
Parameters
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
Y : array-like, shape = [n_samples, n_targets]
Target vectors, where n_samples is the number of samples and
n_targets is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
x_scores if Y is not given, (x_scores, y_scores) otherwise.'
| def transform(self, X, Y=None, copy=True):
| check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
X -= self.x_mean_
X /= self.x_std_
x_scores = np.dot(X, self.x_rotations_)
if (Y is not None):
Y = check_array(Y, ensure_2d=False, copy=copy, dtype=FLOAT_DTYPES)
if (Y.ndim == 1):
Y = Y.reshape((-1), 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return (x_scores, y_scores)
return x_scores
|
'Apply the dimension reduction learned on the train data.
Parameters
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.'
| def predict(self, X, copy=True):
| check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return (Ypred + self.y_mean_)
|
'Learn and apply the dimension reduction on the train data.
Parameters
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
y : array-like, shape = [n_samples, n_targets]
Target vectors, where n_samples is the number of samples and
n_targets is the number of response variables.
Returns
x_scores if Y is not given, (x_scores, y_scores) otherwise.'
| def fit_transform(self, X, y=None):
| return self.fit(X, y).transform(X, y)
|
'Fit model to data.
Parameters
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
Y : array-like, shape = [n_samples, n_targets]
Target vectors, where n_samples is the number of samples and
n_targets is the number of response variables.'
| def fit(self, X, Y):
| check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if (Y.ndim == 1):
Y = Y.reshape((-1), 1)
if (self.n_components > max(Y.shape[1], X.shape[1])):
raise ValueError(('Invalid number of components n_components=%d with X of shape %s and Y of shape %s.' % (self.n_components, str(X.shape), str(Y.shape))))
(X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_) = _center_scale_xy(X, Y, self.scale)
C = np.dot(X.T, Y)
if (self.n_components >= np.min(C.shape)):
(U, s, V) = svd(C, full_matrices=False)
else:
(U, s, V) = svds(C, k=self.n_components)
(U, V) = svd_flip(U, V)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
|
'Apply the dimension reduction learned on the train data.
Parameters
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
Y : array-like, shape = [n_samples, n_targets]
Target vectors, where n_samples is the number of samples and
n_targets is the number of response variables.'
| def transform(self, X, Y=None):
| check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = ((X - self.x_mean_) / self.x_std_)
x_scores = np.dot(Xr, self.x_weights_)
if (Y is not None):
if (Y.ndim == 1):
Y = Y.reshape((-1), 1)
Yr = ((Y - self.y_mean_) / self.y_std_)
y_scores = np.dot(Yr, self.y_weights_)
return (x_scores, y_scores)
return x_scores
|
'Learn and apply the dimension reduction on the train data.
Parameters
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of predictors.
y : array-like, shape = [n_samples, n_targets]
Target vectors, where n_samples is the number of samples and
n_targets is the number of response variables.
Returns
x_scores if Y is not given, (x_scores, y_scores) otherwise.'
| def fit_transform(self, X, y=None):
| return self.fit(X, y).transform(X, y)
|
'Fit the imputer on X.
Parameters
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
self : object
Returns self.'
| def fit(self, X, y=None):
| allowed_strategies = ['mean', 'median', 'most_frequent']
if (self.strategy not in allowed_strategies):
raise ValueError('Can only use these strategies: {0} got strategy={1}'.format(allowed_strategies, self.strategy))
if (self.axis not in [0, 1]):
raise ValueError('Can only impute missing values on axis 0 and 1, got axis={0}'.format(self.axis))
if (self.axis == 0):
X = check_array(X, accept_sparse='csc', dtype=np.float64, force_all_finite=False)
if sparse.issparse(X):
self.statistics_ = self._sparse_fit(X, self.strategy, self.missing_values, self.axis)
else:
self.statistics_ = self._dense_fit(X, self.strategy, self.missing_values, self.axis)
return self
|
'Fit the transformer on sparse data.'
| def _sparse_fit(self, X, strategy, missing_values, axis):
| if (axis == 1):
X = X.tocsr()
else:
X = X.tocsc()
if (missing_values == 0):
n_zeros_axis = np.zeros(X.shape[(not axis)], dtype=int)
else:
n_zeros_axis = (X.shape[axis] - np.diff(X.indptr))
if (strategy == 'mean'):
if (missing_values != 0):
n_non_missing = n_zeros_axis
mask_missing_values = _get_mask(X.data, missing_values)
mask_valids = np.logical_not(mask_missing_values)
new_data = X.data.copy()
new_data[mask_missing_values] = 0
X = sparse.csc_matrix((new_data, X.indices, X.indptr), copy=False)
sums = X.sum(axis=0)
mask_non_zeros = sparse.csc_matrix((mask_valids.astype(np.float64), X.indices, X.indptr), copy=False)
s = mask_non_zeros.sum(axis=0)
n_non_missing = np.add(n_non_missing, s)
else:
sums = X.sum(axis=axis)
n_non_missing = np.diff(X.indptr)
with np.errstate(all='ignore'):
return (np.ravel(sums) / np.ravel(n_non_missing))
else:
columns_all = np.hsplit(X.data, X.indptr[1:(-1)])
mask_missing_values = _get_mask(X.data, missing_values)
mask_valids = np.hsplit(np.logical_not(mask_missing_values), X.indptr[1:(-1)])
columns = [col[mask.astype(bool, copy=False)] for (col, mask) in zip(columns_all, mask_valids)]
if (strategy == 'median'):
median = np.empty(len(columns))
for (i, column) in enumerate(columns):
median[i] = _get_median(column, n_zeros_axis[i])
return median
elif (strategy == 'most_frequent'):
most_frequent = np.empty(len(columns))
for (i, column) in enumerate(columns):
most_frequent[i] = _most_frequent(column, 0, n_zeros_axis[i])
return most_frequent
|
'Fit the transformer on dense data.'
| def _dense_fit(self, X, strategy, missing_values, axis):
| X = check_array(X, force_all_finite=False)
mask = _get_mask(X, missing_values)
masked_X = ma.masked_array(X, mask=mask)
if (strategy == 'mean'):
mean_masked = np.ma.mean(masked_X, axis=axis)
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = np.nan
return mean
elif (strategy == 'median'):
if (tuple((int(v) for v in np.__version__.split('.')[:2])) < (1, 5)):
masked_X.mask = np.logical_or(masked_X.mask, np.isnan(X))
median_masked = np.ma.median(masked_X, axis=axis)
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = np.nan
return median
elif (strategy == 'most_frequent'):
if (axis == 0):
X = X.transpose()
mask = mask.transpose()
most_frequent = np.empty(X.shape[0])
for (i, (row, row_mask)) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(np.bool)
row = row[row_mask]
most_frequent[i] = _most_frequent(row, np.nan, 0)
return most_frequent
|
'Impute all missing values in X.
Parameters
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The input data to complete.'
| def transform(self, X):
| if (self.axis == 0):
check_is_fitted(self, 'statistics_')
X = check_array(X, accept_sparse='csc', dtype=FLOAT_DTYPES, force_all_finite=False, copy=self.copy)
statistics = self.statistics_
if (X.shape[1] != statistics.shape[0]):
raise ValueError(('X has %d features per sample, expected %d' % (X.shape[1], self.statistics_.shape[0])))
else:
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES, force_all_finite=False, copy=self.copy)
if sparse.issparse(X):
statistics = self._sparse_fit(X, self.strategy, self.missing_values, self.axis)
else:
statistics = self._dense_fit(X, self.strategy, self.missing_values, self.axis)
invalid_mask = np.isnan(statistics)
valid_mask = np.logical_not(invalid_mask)
valid_statistics = statistics[valid_mask]
valid_statistics_indexes = np.where(valid_mask)[0]
missing = np.arange(X.shape[(not self.axis)])[invalid_mask]
if ((self.axis == 0) and invalid_mask.any()):
if self.verbose:
warnings.warn(('Deleting features without observed values: %s' % missing))
X = X[:, valid_statistics_indexes]
elif ((self.axis == 1) and invalid_mask.any()):
raise ValueError(('Some rows only contain missing values: %s' % missing))
if (sparse.issparse(X) and (self.missing_values != 0)):
mask = _get_mask(X.data, self.missing_values)
indexes = np.repeat(np.arange((len(X.indptr) - 1), dtype=np.int), np.diff(X.indptr))[mask]
X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False)
else:
if sparse.issparse(X):
X = X.toarray()
mask = _get_mask(X, self.missing_values)
n_missing = np.sum(mask, axis=self.axis)
values = np.repeat(valid_statistics, n_missing)
if (self.axis == 0):
coordinates = np.where(mask.transpose())[::(-1)]
else:
coordinates = mask
X[coordinates] = values
return X
|
'Fit transformer by checking X.
If ``validate`` is ``True``, ``X`` will be checked.
Parameters
X : array-like, shape (n_samples, n_features)
Input array.
Returns
self'
| def fit(self, X, y=None):
| if self.validate:
check_array(X, self.accept_sparse)
return self
|
'Transform X using the forward function.
Parameters
X : array-like, shape (n_samples, n_features)
Input array.
y : (ignored)
.. deprecated::0.19
Returns
X_out : array-like, shape (n_samples, n_features)
Transformed input.'
| def transform(self, X, y='deprecated'):
| if ((not isinstance(y, string_types)) or (y != 'deprecated')):
warnings.warn('The parameter y on transform() is deprecated since 0.19 and will be removed in 0.21', DeprecationWarning)
return self._transform(X, y=y, func=self.func, kw_args=self.kw_args)
|
'Transform X using the inverse function.
Parameters
X : array-like, shape (n_samples, n_features)
Input array.
y : (ignored)
.. deprecated::0.19
Returns
X_out : array-like, shape (n_samples, n_features)
Transformed input.'
| def inverse_transform(self, X, y='deprecated'):
| if ((not isinstance(y, string_types)) or (y != 'deprecated')):
warnings.warn('The parameter y on inverse_transform() is deprecated since 0.19 and will be removed in 0.21', DeprecationWarning)
return self._transform(X, y=y, func=self.inverse_func, kw_args=self.inv_kw_args)
|
'Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.'
| def _reset(self):
| if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
|
'Compute the minimum and maximum to be used for later scaling.
Parameters
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.'
| def fit(self, X, y=None):
| self._reset()
return self.partial_fit(X, y)
|
'Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.'
| def partial_fit(self, X, y=None):
| feature_range = self.feature_range
if (feature_range[0] >= feature_range[1]):
raise ValueError(('Minimum of desired feature range must be smaller than maximum. Got %s.' % str(feature_range)))
if sparse.issparse(X):
raise TypeError('MinMaxScaler does no support sparse input. You may consider to use MaxAbsScaler instead.')
X = check_array(X, copy=self.copy, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
if (not hasattr(self, 'n_samples_seen_')):
self.n_samples_seen_ = X.shape[0]
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = (data_max - data_min)
self.scale_ = ((feature_range[1] - feature_range[0]) / _handle_zeros_in_scale(data_range))
self.min_ = (feature_range[0] - (data_min * self.scale_))
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
|
'Scaling features of X according to feature_range.
Parameters
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.'
| def transform(self, X):
| check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X *= self.scale_
X += self.min_
return X
|
'Undo the scaling of X according to feature_range.
Parameters
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.'
| def inverse_transform(self, X):
| check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X -= self.min_
X /= self.scale_
return X
|
'Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.'
| def _reset(self):
| if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
|
'Compute the mean and std to be used for later scaling.
Parameters
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.'
| def fit(self, X, y=None):
| self._reset()
return self.partial_fit(X, y)
|
'Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.'
| def partial_fit(self, X, y=None):
| X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError('Cannot center sparse matrices: pass `with_mean=False` instead. See docstring for motivation and alternatives.')
if self.with_std:
if (not hasattr(self, 'n_samples_seen_')):
(self.mean_, self.var_) = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
else:
(self.mean_, self.var_, self.n_samples_seen_) = incr_mean_variance_axis(X, axis=0, last_mean=self.mean_, last_var=self.var_, last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
if (not hasattr(self, 'n_samples_seen_')):
self.mean_ = 0.0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = 0.0
else:
self.var_ = None
(self.mean_, self.var_, self.n_samples_seen_) = _incremental_mean_and_var(X, self.mean_, self.var_, self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
|
'Perform standardization by centering and scaling
Parameters
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool, optional (default: None)
Copy the input X or not.'
| def transform(self, X, y='deprecated', copy=None):
| if ((not isinstance(y, string_types)) or (y != 'deprecated')):
warnings.warn('The parameter y on transform() is deprecated since 0.19 and will be removed in 0.21', DeprecationWarning)
check_is_fitted(self, 'scale_')
copy = (copy if (copy is not None) else self.copy)
X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError('Cannot center sparse matrices: pass `with_mean=False` instead. See docstring for motivation and alternatives.')
if (self.scale_ is not None):
inplace_column_scale(X, (1 / self.scale_))
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
|
'Scale back the data to the original representation
Parameters
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
copy : bool, optional (default: None)
Copy the input X or not.
Returns
X_tr : array-like, shape [n_samples, n_features]
Transformed array.'
| def inverse_transform(self, X, copy=None):
| check_is_fitted(self, 'scale_')
copy = (copy if (copy is not None) else self.copy)
if sparse.issparse(X):
if self.with_mean:
raise ValueError('Cannot uncenter sparse matrices: pass `with_mean=False` instead See docstring for motivation and alternatives.')
if (not sparse.isspmatrix_csr(X)):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if (self.scale_ is not None):
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
|
'Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.'
| def _reset(self):
| if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
|
'Compute the maximum absolute value to be used for later scaling.
Parameters
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.'
| def fit(self, X, y=None):
| self._reset()
return self.partial_fit(X, y)
|
'Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.'
| def partial_fit(self, X, y=None):
| X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
(mins, maxs) = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
if (not hasattr(self, 'n_samples_seen_')):
self.n_samples_seen_ = X.shape[0]
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
|
'Scale the data
Parameters
X : {array-like, sparse matrix}
The data that should be scaled.'
| def transform(self, X):
| check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, (1.0 / self.scale_))
else:
X /= self.scale_
return X
|
'Scale back the data to the original representation
Parameters
X : {array-like, sparse matrix}
The data that should be transformed back.'
| def inverse_transform(self, X):
| check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
|
'Makes sure centering is not enabled for sparse matrices.'
| def _check_array(self, X, copy):
| X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError('Cannot center sparse matrices: use `with_centering=False` instead. See docstring for motivation and alternatives.')
return X
|
'Compute the median and quantiles to be used for scaling.
Parameters
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.'
| def fit(self, X, y=None):
| if sparse.issparse(X):
raise TypeError('RobustScaler cannot be fitted on sparse inputs')
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
(q_min, q_max) = self.quantile_range
if (not (0 <= q_min <= q_max <= 100)):
raise ValueError(('Invalid quantile range: %s' % str(self.quantile_range)))
q = np.percentile(X, self.quantile_range, axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
|
'Center and scale the data.
Can be called on sparse input, provided that ``RobustScaler`` has been
fitted to dense input and ``with_centering=False``.
Parameters
X : {array-like, sparse matrix}
The data used to scale along the specified axis.'
| def transform(self, X):
| if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, (1.0 / self.scale_))
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
|
'Scale back the data to the original representation
Parameters
X : array-like
The data used to scale along the specified axis.'
| def inverse_transform(self, X):
| if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
|
'Return feature names for output features
Parameters
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
output_feature_names : list of string, length n_output_features'
| def get_feature_names(self, input_features=None):
| powers = self.powers_
if (input_features is None):
input_features = [('x%d' % i) for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = ' '.join(((('%s^%d' % (input_features[ind], exp)) if (exp != 1) else input_features[ind]) for (ind, exp) in zip(inds, row[inds])))
else:
name = '1'
feature_names.append(name)
return feature_names
|
'Compute number of output features.
Parameters
X : array-like, shape (n_samples, n_features)
The data.
Returns
self : instance'
| def fit(self, X, y=None):
| (n_samples, n_features) = check_array(X).shape
combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum((1 for _ in combinations))
return self
|
'Transform data to polynomial features
Parameters
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.'
| def transform(self, X):
| check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
(n_samples, n_features) = X.shape
if (n_features != self.n_input_features_):
raise ValueError('X shape does not match training shape')
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias)
for (i, c) in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
|
'Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
X : array-like'
| def fit(self, X, y=None):
| X = check_array(X, accept_sparse='csr')
return self
|
'Scale each non zero row of X to unit norm
Parameters
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool, optional (default: None)
Copy the input X or not.'
| def transform(self, X, y='deprecated', copy=None):
| if ((not isinstance(y, string_types)) or (y != 'deprecated')):
warnings.warn('The parameter y on transform() is deprecated since 0.19 and will be removed in 0.21', DeprecationWarning)
copy = (copy if (copy is not None) else self.copy)
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
|
'Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
X : array-like'
| def fit(self, X, y=None):
| check_array(X, accept_sparse='csr')
return self
|
'Binarize each element of X
Parameters
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool
Copy the input X or not.'
| def transform(self, X, y='deprecated', copy=None):
| if ((not isinstance(y, string_types)) or (y != 'deprecated')):
warnings.warn('The parameter y on transform() is deprecated since 0.19 and will be removed in 0.21', DeprecationWarning)
copy = (copy if (copy is not None) else self.copy)
return binarize(X, threshold=self.threshold, copy=copy)
|
'Fit KernelCenterer
Parameters
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
self : returns an instance of self.'
| def fit(self, K, y=None):
| K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = (np.sum(K, axis=0) / n_samples)
self.K_fit_all_ = (self.K_fit_rows_.sum() / n_samples)
return self
|
'Center kernel matrix.
Parameters
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
K_new : numpy array of shape [n_samples1, n_samples2]'
| def transform(self, K, y='deprecated', copy=True):
| if ((not isinstance(y, string_types)) or (y != 'deprecated')):
warnings.warn('The parameter y on transform() is deprecated since 0.19 and will be removed in 0.21', DeprecationWarning)
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) / self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
|
'Fit OneHotEncoder to X.
Parameters
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
self'
| def fit(self, X, y=None):
| self.fit_transform(X)
return self
|
'Assumes X contains only categorical features.'
| def _fit_transform(self, X):
| X = check_array(X, dtype=np.int)
if np.any((X < 0)):
raise ValueError('X needs to contain only non-negative integers.')
(n_samples, n_features) = X.shape
if (isinstance(self.n_values, six.string_types) and (self.n_values == 'auto')):
n_values = (np.max(X, axis=0) + 1)
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError(('Feature out of bounds for n_values=%d' % self.n_values))
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError(("Wrong type for parameter `n_values`. Expected 'auto', int or array of ints, got %r" % type(X)))
if ((n_values.ndim < 1) or (n_values.shape[0] != X.shape[1])):
raise ValueError('Shape mismatch: if n_values is an array, it has to be of shape (n_features,).')
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:(-1)]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features)
data = np.ones((n_samples * n_features))
out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[(-1)]), dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and (self.n_values == 'auto')):
mask = (np.array(out.sum(axis=0)).ravel() != 0)
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return (out if self.sparse else out.toarray())
|
'Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
Parameters
X : array-like, shape [n_samples, n_feature]
Input array of type int.'
| def fit_transform(self, X, y=None):
| return _transform_selected(X, self._fit_transform, self.categorical_features, copy=True)
|
'Assumes X contains only categorical features.'
| def _transform(self, X):
| X = check_array(X, dtype=np.int)
if np.any((X < 0)):
raise ValueError('X needs to contain only non-negative integers.')
(n_samples, n_features) = X.shape
indices = self.feature_indices_
if (n_features != (indices.shape[0] - 1)):
raise ValueError(('X has different shape than during fitting. Expected %d, got %d.' % ((indices.shape[0] - 1), n_features)))
mask = (X < self.n_values_).ravel()
if np.any((~ mask)):
if (self.handle_unknown not in ['error', 'ignore']):
raise ValueError(('handle_unknown should be either error or unknown got %s' % self.handle_unknown))
if (self.handle_unknown == 'error'):
raise ValueError(('unknown categorical feature present %s during transform.' % X.ravel()[(~ mask)]))
column_indices = (X + indices[:(-1)]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[(-1)]), dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and (self.n_values == 'auto')):
out = out[:, self.active_features_]
return (out if self.sparse else out.toarray())
|
'Transform X using one-hot encoding.
Parameters
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.'
| def transform(self, X):
| return _transform_selected(X, self._transform, self.categorical_features, copy=True)
|
'Compute percentiles for dense matrices.
Parameters
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.'
| def _dense_fit(self, X, random_state):
| if self.ignore_implicit_zeros:
warnings.warn("'ignore_implicit_zeros' takes effect only with sparse matrix. This parameter has no effect.")
(n_samples, n_features) = X.shape
references = (self.references_ * 100).tolist()
self.quantiles_ = []
for col in X.T:
if (self.subsample < n_samples):
subsample_idx = random_state.choice(n_samples, size=self.subsample, replace=False)
col = col.take(subsample_idx, mode='clip')
self.quantiles_.append(np.percentile(col, references))
self.quantiles_ = np.transpose(self.quantiles_)
|
'Compute percentiles for sparse matrices.
Parameters
X : sparse matrix CSC, shape (n_samples, n_features)
The data used to scale along the features axis. The sparse matrix
needs to be nonnegative.'
| def _sparse_fit(self, X, random_state):
| (n_samples, n_features) = X.shape
references = list(map((lambda x: (x * 100)), self.references_))
self.quantiles_ = []
for feature_idx in range(n_features):
column_nnz_data = X.data[X.indptr[feature_idx]:X.indptr[(feature_idx + 1)]]
if (len(column_nnz_data) > self.subsample):
column_subsample = ((self.subsample * len(column_nnz_data)) // n_samples)
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=column_subsample, dtype=X.dtype)
else:
column_data = np.zeros(shape=self.subsample, dtype=X.dtype)
column_data[:column_subsample] = random_state.choice(column_nnz_data, size=column_subsample, replace=False)
else:
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=len(column_nnz_data), dtype=X.dtype)
else:
column_data = np.zeros(shape=n_samples, dtype=X.dtype)
column_data[:len(column_nnz_data)] = column_nnz_data
if (not column_data.size):
self.quantiles_.append(([0] * len(references)))
else:
self.quantiles_.append(np.percentile(column_data, references))
self.quantiles_ = np.transpose(self.quantiles_)
|
'Compute the quantiles used for transforming.
Parameters
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
self : object
Returns self'
| def fit(self, X, y=None):
| if (self.n_quantiles <= 0):
raise ValueError(("Invalid value for 'n_quantiles': %d. The number of quantiles must be at least one." % self.n_quantiles))
if (self.subsample <= 0):
raise ValueError(("Invalid value for 'subsample': %d. The number of subsamples must be at least one." % self.subsample))
if (self.n_quantiles > self.subsample):
raise ValueError('The number of quantiles cannot be greater than the number of samples used. Got {} quantiles and {} samples.'.format(self.n_quantiles, self.subsample))
X = self._check_inputs(X)
rng = check_random_state(self.random_state)
self.references_ = np.linspace(0, 1, self.n_quantiles, endpoint=True)
if sparse.issparse(X):
self._sparse_fit(X, rng)
else:
self._dense_fit(X, rng)
return self
|
'Private function to transform a single feature'
| def _transform_col(self, X_col, quantiles, inverse):
| if (self.output_distribution == 'normal'):
output_distribution = 'norm'
else:
output_distribution = self.output_distribution
output_distribution = getattr(stats, output_distribution)
if (not inverse):
lower_bound_x = quantiles[0]
upper_bound_x = quantiles[(-1)]
lower_bound_y = 0
upper_bound_y = 1
else:
lower_bound_x = 0
upper_bound_x = 1
lower_bound_y = quantiles[0]
upper_bound_y = quantiles[(-1)]
X_col = output_distribution.cdf(X_col)
lower_bounds_idx = ((X_col - BOUNDS_THRESHOLD) < lower_bound_x)
upper_bounds_idx = ((X_col + BOUNDS_THRESHOLD) > upper_bound_x)
if (not inverse):
X_col = (0.5 * (np.interp(X_col, quantiles, self.references_) - np.interp((- X_col), (- quantiles[::(-1)]), (- self.references_[::(-1)]))))
else:
X_col = np.interp(X_col, self.references_, quantiles)
X_col[upper_bounds_idx] = upper_bound_y
X_col[lower_bounds_idx] = lower_bound_y
if (not inverse):
X_col = output_distribution.ppf(X_col)
clip_min = output_distribution.ppf((BOUNDS_THRESHOLD - np.spacing(1)))
clip_max = output_distribution.ppf((1 - (BOUNDS_THRESHOLD - np.spacing(1))))
X_col = np.clip(X_col, clip_min, clip_max)
return X_col
|
'Check inputs before fit and transform'
| def _check_inputs(self, X, accept_sparse_negative=False):
| X = check_array(X, accept_sparse='csc', copy=self.copy, dtype=[np.float64, np.float32])
if ((not accept_sparse_negative) and (not self.ignore_implicit_zeros) and (sparse.issparse(X) and np.any((X.data < 0)))):
raise ValueError('QuantileTransformer only accepts non-negative sparse matrices.')
if (self.output_distribution not in ('normal', 'uniform')):
raise ValueError("'output_distribution' has to be either 'normal' or 'uniform'. Got '{}' instead.".format(self.output_distribution))
return X
|
'Check the inputs before transforming'
| def _check_is_fitted(self, X):
| check_is_fitted(self, 'quantiles_')
if (X.shape[1] != self.quantiles_.shape[1]):
raise ValueError('X does not have the same number of features as the previously fitted data. Got {} instead of {}.'.format(X.shape[1], self.quantiles_.shape[1]))
|
'Forward and inverse transform.
Parameters
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.
inverse : bool, optional (default=False)
If False, apply forward transform. If True, apply
inverse transform.
Returns
X : ndarray, shape (n_samples, n_features)
Projected data'
| def _transform(self, X, inverse=False):
| if sparse.issparse(X):
for feature_idx in range(X.shape[1]):
column_slice = slice(X.indptr[feature_idx], X.indptr[(feature_idx + 1)])
X.data[column_slice] = self._transform_col(X.data[column_slice], self.quantiles_[:, feature_idx], inverse)
else:
for feature_idx in range(X.shape[1]):
X[:, feature_idx] = self._transform_col(X[:, feature_idx], self.quantiles_[:, feature_idx], inverse)
return X
|
'Feature-wise transformation of the data.
Parameters
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The projected data.'
| def transform(self, X):
| X = self._check_inputs(X)
self._check_is_fitted(X)
return self._transform(X, inverse=False)
|
'Back-projection to the original space.
Parameters
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The projected data.'
| def inverse_transform(self, X):
| X = self._check_inputs(X, accept_sparse_negative=True)
self._check_is_fitted(X)
return self._transform(X, inverse=True)
|
'Fit label encoder
Parameters
y : array-like of shape (n_samples,)
Target values.
Returns
self : returns an instance of self.'
| def fit(self, y):
| y = column_or_1d(y, warn=True)
self.classes_ = np.unique(y)
return self
|
'Fit label encoder and return encoded labels
Parameters
y : array-like of shape [n_samples]
Target values.
Returns
y : array-like of shape [n_samples]'
| def fit_transform(self, y):
| y = column_or_1d(y, warn=True)
(self.classes_, y) = np.unique(y, return_inverse=True)
return y
|
'Transform labels to normalized encoding.
Parameters
y : array-like of shape [n_samples]
Target values.
Returns
y : array-like of shape [n_samples]'
| def transform(self, y):
| check_is_fitted(self, 'classes_')
y = column_or_1d(y, warn=True)
classes = np.unique(y)
if (len(np.intersect1d(classes, self.classes_)) < len(classes)):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError(('y contains new labels: %s' % str(diff)))
return np.searchsorted(self.classes_, y)
|
'Transform labels back to original encoding.
Parameters
y : numpy array of shape [n_samples]
Target values.
Returns
y : numpy array of shape [n_samples]'
| def inverse_transform(self, y):
| check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError(('y contains new labels: %s' % str(diff)))
y = np.asarray(y)
return self.classes_[y]
|
'Fit label binarizer
Parameters
y : array of shape [n_samples,] or [n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
self : returns an instance of self.'
| def fit(self, y):
| self.y_type_ = type_of_target(y)
if ('multioutput' in self.y_type_):
raise ValueError('Multioutput target data is not supported with label binarization')
if (_num_samples(y) == 0):
raise ValueError(('y has 0 samples: %r' % y))
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
|
'Fit label binarizer and transform multi-class labels to binary
labels.
The output of transform is sometimes referred to as
the 1-of-K coding scheme.
Parameters
y : array or sparse matrix of shape [n_samples,] or [n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
Y : array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.'
| def fit_transform(self, y):
| return self.fit(y).transform(y)
|
'Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as
the 1-of-K coding scheme.
Parameters
y : array or sparse matrix of shape [n_samples,] or [n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.'
| def transform(self, y):
| check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if (y_is_multilabel and (not self.y_type_.startswith('multilabel'))):
raise ValueError('The object was not fitted with multilabel input.')
return label_binarize(y, self.classes_, pos_label=self.pos_label, neg_label=self.neg_label, sparse_output=self.sparse_output)
|
'Transform binary labels back to multi-class labels
Parameters
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when ``Y`` contains the output of decision_function
(classifier).
Use 0.5 when ``Y`` contains the output of predict_proba.
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model\'s decision_function method directly as the input
of inverse_transform.'
| def inverse_transform(self, Y, threshold=None):
| check_is_fitted(self, 'classes_')
if (threshold is None):
threshold = ((self.pos_label + self.neg_label) / 2.0)
if (self.y_type_ == 'multiclass'):
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_, self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
|
'Fit the label sets binarizer, storing `classes_`
Parameters
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
self : returns this MultiLabelBinarizer instance'
| def fit(self, y):
| if (self.classes is None):
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = (np.int if all((isinstance(c, int) for c in classes)) else object)
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
|
'Fit the label sets binarizer and transform the given label sets
Parameters
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.'
| def fit_transform(self, y):
| if (self.classes is not None):
return self.fit(y).transform(y)
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
tmp = sorted(class_mapping, key=class_mapping.get)
dtype = (np.int if all((isinstance(c, int) for c in tmp)) else object)
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
(self.classes_, inverse) = np.unique(class_mapping, return_inverse=True)
yt.indices = np.array(inverse[yt.indices], dtype=yt.indices.dtype, copy=False)
if (not self.sparse_output):
yt = yt.toarray()
return yt
|
'Transform the given label sets
Parameters
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.'
| def transform(self, y):
| check_is_fitted(self, 'classes_')
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if (not self.sparse_output):
yt = yt.toarray()
return yt
|
'Transforms the label sets with a given mapping
Parameters
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix'
| def _transform(self, y, class_mapping):
| indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set((class_mapping[label] for label in labels)))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr), shape=((len(indptr) - 1), len(class_mapping)))
|
'Transform the given indicator matrix into label sets
Parameters
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.'
| def inverse_transform(self, yt):
| check_is_fitted(self, 'classes_')
if (yt.shape[1] != len(self.classes_)):
raise ValueError('Expected indicator for {0} classes, but got {1}'.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if ((len(yt.data) != 0) and (len(np.setdiff1d(yt.data, [0, 1])) > 0)):
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end])) for (start, end) in zip(yt.indptr[:(-1)], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if (len(unexpected) > 0):
raise ValueError('Expected only 0s and 1s in label indicator. Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators in yt]
|
'Check the estimator and the n_estimator attribute, set the
`base_estimator_` attribute.'
| def _validate_estimator(self, default=None):
| if (not isinstance(self.n_estimators, (numbers.Integral, np.integer))):
raise ValueError('n_estimators must be an integer, got {0}.'.format(type(self.n_estimators)))
if (self.n_estimators <= 0):
raise ValueError('n_estimators must be greater than zero, got {0}.'.format(self.n_estimators))
if (self.base_estimator is not None):
self.base_estimator_ = self.base_estimator
else:
self.base_estimator_ = default
if (self.base_estimator_ is None):
raise ValueError('base_estimator cannot be None')
|
'Make and configure a copy of the `base_estimator_` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators.'
| def _make_estimator(self, append=True, random_state=None):
| estimator = clone(self.base_estimator_)
estimator.set_params(**dict(((p, getattr(self, p)) for p in self.estimator_params)))
if (random_state is not None):
_set_random_states(estimator, random_state)
if append:
self.estimators_.append(estimator)
return estimator
|
'Returns the number of estimators in the ensemble.'
| def __len__(self):
| return len(self.estimators_)
|
'Returns the index\'th estimator in the ensemble.'
| def __getitem__(self, index):
| return self.estimators_[index]
|
'Returns iterator over estimators in the ensemble.'
| def __iter__(self):
| return iter(self.estimators_)
|
'Default ``init`` estimator for loss function.'
| def init_estimator(self):
| raise NotImplementedError()
|
'Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.'
| def update_terminal_regions(self, tree, X, y, residual, y_pred, sample_weight, sample_mask, learning_rate=1.0, k=0):
| terminal_regions = tree.apply(X)
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[(~ sample_mask)] = (-1)
for leaf in np.where((tree.children_left == TREE_LEAF))[0]:
self._update_terminal_region(tree, masked_terminal_regions, leaf, X, y, residual, y_pred[:, k], sample_weight)
y_pred[:, k] += (learning_rate * tree.value[:, 0, 0].take(terminal_regions, axis=0))
|
'Least squares does not need to update terminal regions.
But it has to update the predictions.'
| def update_terminal_regions(self, tree, X, y, residual, y_pred, sample_weight, sample_mask, learning_rate=1.0, k=0):
| y_pred[:, k] += (learning_rate * tree.predict(X).ravel())
|
'1.0 if y - pred > 0.0 else -1.0'
| def negative_gradient(self, y, pred, **kargs):
| pred = pred.ravel()
return ((2.0 * ((y - pred) > 0.0)) - 1.0)
|
'LAD updates terminal regions to median estimates.'
| def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight):
| terminal_region = np.where((terminal_regions == leaf))[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = (y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0))
tree.value[(leaf, 0, 0)] = _weighted_percentile(diff, sample_weight, percentile=50)
|
'Template method to convert scores to probabilities.
the does not support probabilities raises AttributeError.'
| def _score_to_proba(self, score):
| raise TypeError(('%s does not support predict_proba' % type(self).__name__))
|
'Compute the deviance (= 2 * negative log-likelihood).'
| def __call__(self, y, pred, sample_weight=None):
| pred = pred.ravel()
if (sample_weight is None):
return ((-2.0) * np.mean(((y * pred) - np.logaddexp(0.0, pred))))
else:
return (((-2.0) / sample_weight.sum()) * np.sum((sample_weight * ((y * pred) - np.logaddexp(0.0, pred)))))
|
'Compute the residual (= negative gradient).'
| def negative_gradient(self, y, pred, **kargs):
| return (y - expit(pred.ravel()))
|
'Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual'
| def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight):
| terminal_region = np.where((terminal_regions == leaf))[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum((sample_weight * residual))
denominator = np.sum(((sample_weight * (y - residual)) * ((1 - y) + residual)))
if (abs(denominator) < 1e-150):
tree.value[(leaf, 0, 0)] = 0.0
else:
tree.value[(leaf, 0, 0)] = (numerator / denominator)
|
'Compute negative gradient for the ``k``-th class.'
| def negative_gradient(self, y, pred, k=0, **kwargs):
| return (y - np.nan_to_num(np.exp((pred[:, k] - logsumexp(pred, axis=1)))))
|
'Make a single Newton-Raphson step.'
| def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight):
| terminal_region = np.where((terminal_regions == leaf))[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum((sample_weight * residual))
numerator *= ((self.K - 1) / self.K)
denominator = np.sum(((sample_weight * (y - residual)) * ((1.0 - y) + residual)))
if (abs(denominator) < 1e-150):
tree.value[(leaf, 0, 0)] = 0.0
else:
tree.value[(leaf, 0, 0)] = (numerator / denominator)
|
'Update reporter with new iteration.'
| def update(self, j, est):
| do_oob = (est.subsample < 1)
i = (j - self.begin_at_stage)
if (((i + 1) % self.verbose_mod) == 0):
oob_impr = (est.oob_improvement_[j] if do_oob else 0)
remaining_time = (((est.n_estimators - (j + 1)) * (time() - self.start_time)) / float((i + 1)))
if (remaining_time > 60):
remaining_time = '{0:.2f}m'.format((remaining_time / 60.0))
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=(j + 1), train_score=est.train_score_[j], oob_impr=oob_impr, remaining_time=remaining_time))
if ((self.verbose == 1) and (((i + 1) // (self.verbose_mod * 10)) > 0)):
self.verbose_mod *= 10
|
'Fit another stage of ``n_classes_`` trees to the boosting model.'
| def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask, random_state, X_idx_sorted, X_csc=None, X_csr=None):
| assert (sample_mask.dtype == np.bool)
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array((original_y == k), dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k, sample_weight=sample_weight)
tree = DecisionTreeRegressor(criterion=self.criterion, splitter='best', max_depth=self.max_depth, min_samples_split=self.min_samples_split, min_samples_leaf=self.min_samples_leaf, min_weight_fraction_leaf=self.min_weight_fraction_leaf, min_impurity_decrease=self.min_impurity_decrease, min_impurity_split=self.min_impurity_split, max_features=self.max_features, max_leaf_nodes=self.max_leaf_nodes, random_state=random_state, presort=self.presort)
if (self.subsample < 1.0):
sample_weight = (sample_weight * sample_mask.astype(np.float64))
if (X_csc is not None):
tree.fit(X_csc, residual, sample_weight=sample_weight, check_input=False, X_idx_sorted=X_idx_sorted)
else:
tree.fit(X, residual, sample_weight=sample_weight, check_input=False, X_idx_sorted=X_idx_sorted)
if (X_csr is not None):
loss.update_terminal_regions(tree.tree_, X_csr, y, residual, y_pred, sample_weight, sample_mask, self.learning_rate, k=k)
else:
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred, sample_weight, sample_mask, self.learning_rate, k=k)
self.estimators_[(i, k)] = tree
return y_pred
|
'Check validity of parameters and raise ValueError if not valid.'
| def _check_params(self):
| if (self.n_estimators <= 0):
raise ValueError(('n_estimators must be greater than 0 but was %r' % self.n_estimators))
if (self.learning_rate <= 0.0):
raise ValueError(('learning_rate must be greater than 0 but was %r' % self.learning_rate))
if ((self.loss not in self._SUPPORTED_LOSS) or (self.loss not in LOSS_FUNCTIONS)):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if (self.loss == 'deviance'):
loss_class = (MultinomialDeviance if (len(self.classes_) > 2) else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if (self.loss in ('huber', 'quantile')):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if (not (0.0 < self.subsample <= 1.0)):
raise ValueError(('subsample must be in (0,1] but was %r' % self.subsample))
if (self.init is not None):
if isinstance(self.init, six.string_types):
if (self.init not in INIT_ESTIMATORS):
raise ValueError(('init="%s" is not supported' % self.init))
elif ((not hasattr(self.init, 'fit')) or (not hasattr(self.init, 'predict'))):
raise ValueError(('init=%r must be valid BaseEstimator and support both fit and predict' % self.init))
if (not (0.0 < self.alpha < 1.0)):
raise ValueError(('alpha must be in (0.0, 1.0) but was %r' % self.alpha))
if isinstance(self.max_features, six.string_types):
if (self.max_features == 'auto'):
if (self.n_classes_ > 1):
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif (self.max_features == 'sqrt'):
max_features = max(1, int(np.sqrt(self.n_features_)))
elif (self.max_features == 'log2'):
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(("Invalid value for max_features: %r. Allowed string values are 'auto', 'sqrt' or 'log2'." % self.max_features))
elif (self.max_features is None):
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
elif (0.0 < self.max_features <= 1.0):
max_features = max(int((self.max_features * self.n_features_)), 1)
else:
raise ValueError('max_features must be in (0, n_features]')
self.max_features_ = max_features
if (not isinstance(self.n_iter_no_change, (numbers.Integral, np.integer, type(None)))):
raise ValueError(('n_iter_no_change should either be None or an integer. %r was passed' % self.n_iter_no_change))
|
'Initialize model state and allocate model state data structures.'
| def _init_state(self):
| if (self.init is None):
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K), dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
if (self.subsample < 1.0):
self.oob_improvement_ = np.zeros(self.n_estimators, dtype=np.float64)
|
'Clear the state of the gradient boosting model.'
| def _clear_state(self):
| if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
if hasattr(self, '_rng'):
del self._rng
|
'Add additional ``n_estimators`` entries to all attributes.'
| def _resize_state(self):
| total_n_estimators = self.n_estimators
if (total_n_estimators < self.estimators_.shape[0]):
raise ValueError(('resize with smaller n_estimators %d < %d' % (total_n_estimators, self.estimators_[0])))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if ((self.subsample < 1) or hasattr(self, 'oob_improvement_')):
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,), dtype=np.float64)
|
'Check that the estimator is initialized, raising an error if not.'
| def _check_initialized(self):
| check_is_fitted(self, 'estimators_')
|
'Fit the gradient boosting model.
Parameters
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
self : object
Returns self.'
| def fit(self, X, y, sample_weight=None, monitor=None):
| if (not self.warm_start):
self._clear_state()
(X, y) = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE)
(n_samples, self.n_features_) = X.shape
if (sample_weight is None):
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
if (self.n_iter_no_change is not None):
(X, X_val, y, y_val, sample_weight, sample_weight_val) = train_test_split(X, y, sample_weight, random_state=self.random_state, test_size=self.validation_fraction)
else:
X_val = y_val = sample_weight_val = None
self._check_params()
if (not self._is_initialized()):
self._init_state()
self.init_.fit(X, y, sample_weight)
y_pred = self.init_.predict(X)
begin_at_stage = 0
self._rng = check_random_state(self.random_state)
else:
if (self.n_estimators < self.estimators_.shape[0]):
raise ValueError(('n_estimators=%d must be larger or equal to estimators_.shape[0]=%d when warm_start==True' % (self.n_estimators, self.estimators_.shape[0])))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
X_idx_sorted = None
presort = self.presort
if ((presort == 'auto') and issparse(X)):
presort = False
elif (presort == 'auto'):
presort = True
if (presort == True):
if issparse(X):
raise ValueError('Presorting is not supported for sparse matrices.')
else:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0), dtype=np.int32)
n_stages = self._fit_stages(X, y, y_pred, sample_weight, self._rng, X_val, y_val, sample_weight_val, begin_at_stage, monitor, X_idx_sorted)
if (n_stages != self.estimators_.shape[0]):
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
self.n_estimators_ = n_stages
return self
|
'Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.'
| def _fit_stages(self, X, y, y_pred, sample_weight, random_state, X_val, y_val, sample_weight_val, begin_at_stage=0, monitor=None, X_idx_sorted=None):
| n_samples = X.shape[0]
do_oob = (self.subsample < 1.0)
sample_mask = np.ones((n_samples,), dtype=np.bool)
n_inbag = max(1, int((self.subsample * n_samples)))
loss_ = self.loss_
if ((self.min_weight_fraction_leaf != 0.0) and (sample_weight is not None)):
min_weight_leaf = (self.min_weight_fraction_leaf * np.sum(sample_weight))
else:
min_weight_leaf = 0.0
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
X_csc = (csc_matrix(X) if issparse(X) else None)
X_csr = (csr_matrix(X) if issparse(X) else None)
if (self.n_iter_no_change is not None):
loss_history = (np.ones(self.n_iter_no_change) * np.inf)
y_val_pred_iter = self._staged_decision_function(X_val)
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag, random_state)
old_oob_score = loss_(y[(~ sample_mask)], y_pred[(~ sample_mask)], sample_weight[(~ sample_mask)])
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight, sample_mask, random_state, X_idx_sorted, X_csc, X_csr)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask], y_pred[sample_mask], sample_weight[sample_mask])
self.oob_improvement_[i] = (old_oob_score - loss_(y[(~ sample_mask)], y_pred[(~ sample_mask)], sample_weight[(~ sample_mask)]))
else:
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if (self.verbose > 0):
verbose_reporter.update(i, self)
if (monitor is not None):
early_stopping = monitor(i, self, locals())
if early_stopping:
break
if (self.n_iter_no_change is not None):
validation_loss = loss_(y_val, next(y_val_pred_iter), sample_weight_val)
if np.any(((validation_loss + self.tol) < loss_history)):
loss_history[(i % len(loss_history))] = validation_loss
else:
break
return (i + 1)
|
'Check input and compute prediction of ``init``.'
| def _init_decision_function(self, X):
| self._check_initialized()
X = self.estimators_[(0, 0)]._validate_X_predict(X, check_input=True)
if (X.shape[1] != self.n_features_):
raise ValueError('X.shape[1] should be {0:d}, not {1:d}.'.format(self.n_features_, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
|
'Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.'
| def _staged_decision_function(self, X):
| X = check_array(X, dtype=DTYPE, order='C', accept_sparse='csr')
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
(yield score.copy())
|
'Return the feature importances (the higher, the more important the
feature).
Returns
feature_importances_ : array, shape = [n_features]'
| @property
def feature_importances_(self):
| self._check_initialized()
total_sum = np.zeros((self.n_features_,), dtype=np.float64)
for stage in self.estimators_:
stage_sum = (sum((tree.feature_importances_ for tree in stage)) / len(stage))
total_sum += stage_sum
importances = (total_sum / len(self.estimators_))
return importances
|
'Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will
be converted to a sparse ``csr_matrix``.
Returns
X_leaves : array_like, shape = [n_samples, n_estimators, n_classes]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in each estimator.
In the case of binary classification n_classes is 1.'
| def apply(self, X):
| self._check_initialized()
X = self.estimators_[(0, 0)]._validate_X_predict(X, check_input=True)
(n_estimators, n_classes) = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[(i, j)]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
|
'Compute the decision function of ``X``.
Parameters
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].'
| def decision_function(self, X):
| X = check_array(X, dtype=DTYPE, order='C', accept_sparse='csr')
score = self._decision_function(X)
if (score.shape[1] == 1):
return score.ravel()
return score
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.