repository
stringclasses 11
values | repo_id
stringlengths 1
3
| target_module_path
stringlengths 16
72
| prompt
stringlengths 298
21.7k
| relavent_test_path
stringlengths 50
99
| full_function
stringlengths 336
33.8k
| function_name
stringlengths 2
51
|
---|---|---|---|---|---|---|
scikit-learn | 222 | sklearn/mixture/_gaussian_mixture.py | def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):
"""Compute the log-det of the cholesky decomposition of matrices.
Parameters
----------
matrix_chol : array-like
Cholesky decompositions of the matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
n_features : int
Number of features.
Returns
-------
log_det_precision_chol : array-like of shape (n_components,)
The determinant of the precision matrix for each component.
"""
| /usr/src/app/target_test_cases/failed_tests__compute_log_det_cholesky.txt | def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):
"""Compute the log-det of the cholesky decomposition of matrices.
Parameters
----------
matrix_chol : array-like
Cholesky decompositions of the matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
n_features : int
Number of features.
Returns
-------
log_det_precision_chol : array-like of shape (n_components,)
The determinant of the precision matrix for each component.
"""
if covariance_type == "full":
n_components, _, _ = matrix_chol.shape
log_det_chol = np.sum(
np.log(matrix_chol.reshape(n_components, -1)[:, :: n_features + 1]), 1
)
elif covariance_type == "tied":
log_det_chol = np.sum(np.log(np.diag(matrix_chol)))
elif covariance_type == "diag":
log_det_chol = np.sum(np.log(matrix_chol), axis=1)
else:
log_det_chol = n_features * (np.log(matrix_chol))
return log_det_chol
| _compute_log_det_cholesky |
scikit-learn | 223 | sklearn/mixture/_gaussian_mixture.py | def _compute_precision_cholesky(covariances, covariance_type):
"""Compute the Cholesky decomposition of the precisions.
Parameters
----------
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
precisions_cholesky : array-like
The cholesky decomposition of sample precisions of the current
components. The shape depends of the covariance_type.
"""
| /usr/src/app/target_test_cases/failed_tests__compute_precision_cholesky.txt | def _compute_precision_cholesky(covariances, covariance_type):
"""Compute the Cholesky decomposition of the precisions.
Parameters
----------
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
precisions_cholesky : array-like
The cholesky decomposition of sample precisions of the current
components. The shape depends of the covariance_type.
"""
estimate_precision_error_message = (
"Fitting the mixture model failed because some components have "
"ill-defined empirical covariance (for instance caused by singleton "
"or collapsed samples). Try to decrease the number of components, "
"or increase reg_covar."
)
if covariance_type == "full":
n_components, n_features, _ = covariances.shape
precisions_chol = np.empty((n_components, n_features, n_features))
for k, covariance in enumerate(covariances):
try:
cov_chol = linalg.cholesky(covariance, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol[k] = linalg.solve_triangular(
cov_chol, np.eye(n_features), lower=True
).T
elif covariance_type == "tied":
_, n_features = covariances.shape
try:
cov_chol = linalg.cholesky(covariances, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol = linalg.solve_triangular(
cov_chol, np.eye(n_features), lower=True
).T
else:
if np.any(np.less_equal(covariances, 0.0)):
raise ValueError(estimate_precision_error_message)
precisions_chol = 1.0 / np.sqrt(covariances)
return precisions_chol
| _compute_precision_cholesky |
scikit-learn | 224 | sklearn/utils/_testing.py | def _convert_container(
container,
constructor_name,
columns_name=None,
dtype=None,
minversion=None,
categorical_feature_names=None,
):
"""Convert a given container to a specific array-like with a dtype.
Parameters
----------
container : array-like
The container to convert.
constructor_name : {"list", "tuple", "array", "sparse", "dataframe", \
"series", "index", "slice", "sparse_csr", "sparse_csc", \
"sparse_csr_array", "sparse_csc_array", "pyarrow", "polars", \
"polars_series"}
The type of the returned container.
columns_name : index or array-like, default=None
For pandas container supporting `columns_names`, it will affect
specific names.
dtype : dtype, default=None
Force the dtype of the container. Does not apply to `"slice"`
container.
minversion : str, default=None
Minimum version for package to install.
categorical_feature_names : list of str, default=None
List of column names to cast to categorical dtype.
Returns
-------
converted_container
"""
| /usr/src/app/target_test_cases/failed_tests__convert_container.txt | def _convert_container(
container,
constructor_name,
columns_name=None,
dtype=None,
minversion=None,
categorical_feature_names=None,
):
"""Convert a given container to a specific array-like with a dtype.
Parameters
----------
container : array-like
The container to convert.
constructor_name : {"list", "tuple", "array", "sparse", "dataframe", \
"series", "index", "slice", "sparse_csr", "sparse_csc", \
"sparse_csr_array", "sparse_csc_array", "pyarrow", "polars", \
"polars_series"}
The type of the returned container.
columns_name : index or array-like, default=None
For pandas container supporting `columns_names`, it will affect
specific names.
dtype : dtype, default=None
Force the dtype of the container. Does not apply to `"slice"`
container.
minversion : str, default=None
Minimum version for package to install.
categorical_feature_names : list of str, default=None
List of column names to cast to categorical dtype.
Returns
-------
converted_container
"""
if constructor_name == "list":
if dtype is None:
return list(container)
else:
return np.asarray(container, dtype=dtype).tolist()
elif constructor_name == "tuple":
if dtype is None:
return tuple(container)
else:
return tuple(np.asarray(container, dtype=dtype).tolist())
elif constructor_name == "array":
return np.asarray(container, dtype=dtype)
elif constructor_name in ("pandas", "dataframe"):
pd = pytest.importorskip("pandas", minversion=minversion)
result = pd.DataFrame(container, columns=columns_name, dtype=dtype, copy=False)
if categorical_feature_names is not None:
for col_name in categorical_feature_names:
result[col_name] = result[col_name].astype("category")
return result
elif constructor_name == "pyarrow":
pa = pytest.importorskip("pyarrow", minversion=minversion)
array = np.asarray(container)
if columns_name is None:
columns_name = [f"col{i}" for i in range(array.shape[1])]
data = {name: array[:, i] for i, name in enumerate(columns_name)}
result = pa.Table.from_pydict(data)
if categorical_feature_names is not None:
for col_idx, col_name in enumerate(result.column_names):
if col_name in categorical_feature_names:
result = result.set_column(
col_idx, col_name, result.column(col_name).dictionary_encode()
)
return result
elif constructor_name == "polars":
pl = pytest.importorskip("polars", minversion=minversion)
result = pl.DataFrame(container, schema=columns_name, orient="row")
if categorical_feature_names is not None:
for col_name in categorical_feature_names:
result = result.with_columns(pl.col(col_name).cast(pl.Categorical))
return result
elif constructor_name == "series":
pd = pytest.importorskip("pandas", minversion=minversion)
return pd.Series(container, dtype=dtype)
elif constructor_name == "polars_series":
pl = pytest.importorskip("polars", minversion=minversion)
return pl.Series(values=container)
elif constructor_name == "index":
pd = pytest.importorskip("pandas", minversion=minversion)
return pd.Index(container, dtype=dtype)
elif constructor_name == "slice":
return slice(container[0], container[1])
elif "sparse" in constructor_name:
if not sp.sparse.issparse(container):
# For scipy >= 1.13, sparse array constructed from 1d array may be
# 1d or raise an exception. To avoid this, we make sure that the
# input container is 2d. For more details, see
# https://github.com/scipy/scipy/pull/18530#issuecomment-1878005149
container = np.atleast_2d(container)
if "array" in constructor_name and sp_version < parse_version("1.8"):
raise ValueError(
f"{constructor_name} is only available with scipy>=1.8.0, got "
f"{sp_version}"
)
if constructor_name in ("sparse", "sparse_csr"):
# sparse and sparse_csr are equivalent for legacy reasons
return sp.sparse.csr_matrix(container, dtype=dtype)
elif constructor_name == "sparse_csr_array":
return sp.sparse.csr_array(container, dtype=dtype)
elif constructor_name == "sparse_csc":
return sp.sparse.csc_matrix(container, dtype=dtype)
elif constructor_name == "sparse_csc_array":
return sp.sparse.csc_array(container, dtype=dtype)
| _convert_container |
scikit-learn | 225 | sklearn/discriminant_analysis.py | def _cov(X, shrinkage=None, covariance_estimator=None):
"""Estimate covariance matrix (using optional covariance_estimator).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
shrinkage : {'empirical', 'auto'} or float, default=None
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Shrinkage parameter is ignored if `covariance_estimator`
is not None.
covariance_estimator : estimator, default=None
If not None, `covariance_estimator` is used to estimate
the covariance matrices instead of relying on the empirical
covariance estimator (with potential shrinkage).
The object should have a fit method and a ``covariance_`` attribute
like the estimators in :mod:`sklearn.covariance``.
if None the shrinkage parameter drives the estimate.
.. versionadded:: 0.24
Returns
-------
s : ndarray of shape (n_features, n_features)
Estimated covariance matrix.
"""
| /usr/src/app/target_test_cases/failed_tests__cov.txt | def _cov(X, shrinkage=None, covariance_estimator=None):
"""Estimate covariance matrix (using optional covariance_estimator).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
shrinkage : {'empirical', 'auto'} or float, default=None
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Shrinkage parameter is ignored if `covariance_estimator`
is not None.
covariance_estimator : estimator, default=None
If not None, `covariance_estimator` is used to estimate
the covariance matrices instead of relying on the empirical
covariance estimator (with potential shrinkage).
The object should have a fit method and a ``covariance_`` attribute
like the estimators in :mod:`sklearn.covariance``.
if None the shrinkage parameter drives the estimate.
.. versionadded:: 0.24
Returns
-------
s : ndarray of shape (n_features, n_features)
Estimated covariance matrix.
"""
if covariance_estimator is None:
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, str):
if shrinkage == "auto":
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
# rescale
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]
elif shrinkage == "empirical":
s = empirical_covariance(X)
elif isinstance(shrinkage, Real):
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
if shrinkage is not None and shrinkage != 0:
raise ValueError(
"covariance_estimator and shrinkage parameters "
"are not None. Only one of the two can be set."
)
covariance_estimator.fit(X)
if not hasattr(covariance_estimator, "covariance_"):
raise ValueError(
"%s does not have a covariance_ attribute"
% covariance_estimator.__class__.__name__
)
s = covariance_estimator.covariance_
return s
| _cov |
scikit-learn | 226 | sklearn/metrics/_ranking.py | def _dcg_sample_scores(y_true, y_score, k=None, log_base=2, ignore_ties=False):
"""Compute Discounted Cumulative Gain.
Sum the true scores ranked in the order induced by the predicted scores,
after applying a logarithmic discount.
This ranking metric yields a high value if true labels are ranked high by
``y_score``.
Parameters
----------
y_true : ndarray of shape (n_samples, n_labels)
True targets of multilabel classification, or true scores of entities
to be ranked.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates, confidence values,
or non-thresholded measure of decisions (as returned by
"decision_function" on some classifiers).
k : int, default=None
Only consider the highest k scores in the ranking. If `None`, use all
outputs.
log_base : float, default=2
Base of the logarithm used for the discount. A low value means a
sharper discount (top results are more important).
ignore_ties : bool, default=False
Assume that there are no ties in y_score (which is likely to be the
case if y_score is continuous) for efficiency gains.
Returns
-------
discounted_cumulative_gain : ndarray of shape (n_samples,)
The DCG score for each sample.
See Also
--------
ndcg_score : The Discounted Cumulative Gain divided by the Ideal Discounted
Cumulative Gain (the DCG obtained for a perfect ranking), in order to
have a score between 0 and 1.
"""
| /usr/src/app/target_test_cases/failed_tests__dcg_sample_scores.txt | def _dcg_sample_scores(y_true, y_score, k=None, log_base=2, ignore_ties=False):
"""Compute Discounted Cumulative Gain.
Sum the true scores ranked in the order induced by the predicted scores,
after applying a logarithmic discount.
This ranking metric yields a high value if true labels are ranked high by
``y_score``.
Parameters
----------
y_true : ndarray of shape (n_samples, n_labels)
True targets of multilabel classification, or true scores of entities
to be ranked.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates, confidence values,
or non-thresholded measure of decisions (as returned by
"decision_function" on some classifiers).
k : int, default=None
Only consider the highest k scores in the ranking. If `None`, use all
outputs.
log_base : float, default=2
Base of the logarithm used for the discount. A low value means a
sharper discount (top results are more important).
ignore_ties : bool, default=False
Assume that there are no ties in y_score (which is likely to be the
case if y_score is continuous) for efficiency gains.
Returns
-------
discounted_cumulative_gain : ndarray of shape (n_samples,)
The DCG score for each sample.
See Also
--------
ndcg_score : The Discounted Cumulative Gain divided by the Ideal Discounted
Cumulative Gain (the DCG obtained for a perfect ranking), in order to
have a score between 0 and 1.
"""
discount = 1 / (np.log(np.arange(y_true.shape[1]) + 2) / np.log(log_base))
if k is not None:
discount[k:] = 0
if ignore_ties:
ranking = np.argsort(y_score)[:, ::-1]
ranked = y_true[np.arange(ranking.shape[0])[:, np.newaxis], ranking]
cumulative_gains = discount.dot(ranked.T)
else:
discount_cumsum = np.cumsum(discount)
cumulative_gains = [
_tie_averaged_dcg(y_t, y_s, discount_cumsum)
for y_t, y_s in zip(y_true, y_score)
]
cumulative_gains = np.asarray(cumulative_gains)
return cumulative_gains
| _dcg_sample_scores |
scikit-learn | 227 | sklearn/linear_model/_ransac.py | def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
| /usr/src/app/target_test_cases/failed_tests__dynamic_max_trials.txt | def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio**min_samples)
if nom == 1:
return 0
if denom == 1:
return float("inf")
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
| _dynamic_max_trials |
scikit-learn | 228 | sklearn/utils/_encode.py | def _encode(values, *, uniques, check_unknown=True):
"""Helper function to encode values into [0, n_uniques - 1].
Uses pure python method for object dtype, and numpy method for
all other dtypes.
The numpy method has the limitation that the `uniques` need to
be sorted. Importantly, this is not checked but assumed to already be
the case. The calling method needs to ensure this for all non-object
values.
Parameters
----------
values : ndarray
Values to encode.
uniques : ndarray
The unique values in `values`. If the dtype is not object, then
`uniques` needs to be sorted.
check_unknown : bool, default=True
If True, check for values in `values` that are not in `unique`
and raise an error. This is ignored for object dtype, and treated as
True in this case. This parameter is useful for
_BaseEncoder._transform() to avoid calling _check_unknown()
twice.
Returns
-------
encoded : ndarray
Encoded values
"""
| /usr/src/app/target_test_cases/failed_tests__encode.txt | def _encode(values, *, uniques, check_unknown=True):
"""Helper function to encode values into [0, n_uniques - 1].
Uses pure python method for object dtype, and numpy method for
all other dtypes.
The numpy method has the limitation that the `uniques` need to
be sorted. Importantly, this is not checked but assumed to already be
the case. The calling method needs to ensure this for all non-object
values.
Parameters
----------
values : ndarray
Values to encode.
uniques : ndarray
The unique values in `values`. If the dtype is not object, then
`uniques` needs to be sorted.
check_unknown : bool, default=True
If True, check for values in `values` that are not in `unique`
and raise an error. This is ignored for object dtype, and treated as
True in this case. This parameter is useful for
_BaseEncoder._transform() to avoid calling _check_unknown()
twice.
Returns
-------
encoded : ndarray
Encoded values
"""
xp, _ = get_namespace(values, uniques)
if not xp.isdtype(values.dtype, "numeric"):
try:
return _map_to_integer(values, uniques)
except KeyError as e:
raise ValueError(f"y contains previously unseen labels: {str(e)}")
else:
if check_unknown:
diff = _check_unknown(values, uniques)
if diff:
raise ValueError(f"y contains previously unseen labels: {str(diff)}")
return _searchsorted(xp, uniques, values)
| _encode |
scikit-learn | 229 | sklearn/mixture/_gaussian_mixture.py | def _estimate_gaussian_covariances_full(resp, X, nk, means, reg_covar):
"""Estimate the full covariance matrices.
Parameters
----------
resp : array-like of shape (n_samples, n_components)
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
means : array-like of shape (n_components, n_features)
reg_covar : float
Returns
-------
covariances : array, shape (n_components, n_features, n_features)
The covariance matrix of the current components.
"""
| /usr/src/app/target_test_cases/failed_tests__estimate_gaussian_covariances_full.txt | def _estimate_gaussian_covariances_full(resp, X, nk, means, reg_covar):
"""Estimate the full covariance matrices.
Parameters
----------
resp : array-like of shape (n_samples, n_components)
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
means : array-like of shape (n_components, n_features)
reg_covar : float
Returns
-------
covariances : array, shape (n_components, n_features, n_features)
The covariance matrix of the current components.
"""
n_components, n_features = means.shape
covariances = np.empty((n_components, n_features, n_features))
for k in range(n_components):
diff = X - means[k]
covariances[k] = np.dot(resp[:, k] * diff.T, diff) / nk[k]
covariances[k].flat[:: n_features + 1] += reg_covar
return covariances
| _estimate_gaussian_covariances_full |
scikit-learn | 230 | sklearn/mixture/_gaussian_mixture.py | def _estimate_gaussian_parameters(X, resp, reg_covar, covariance_type):
"""Estimate the Gaussian distribution parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data array.
resp : array-like of shape (n_samples, n_components)
The responsibilities for each data sample in X.
reg_covar : float
The regularization added to the diagonal of the covariance matrices.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
nk : array-like of shape (n_components,)
The numbers of data samples in the current components.
means : array-like of shape (n_components, n_features)
The centers of the current components.
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
"""
| /usr/src/app/target_test_cases/failed_tests__estimate_gaussian_parameters.txt | def _estimate_gaussian_parameters(X, resp, reg_covar, covariance_type):
"""Estimate the Gaussian distribution parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data array.
resp : array-like of shape (n_samples, n_components)
The responsibilities for each data sample in X.
reg_covar : float
The regularization added to the diagonal of the covariance matrices.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
nk : array-like of shape (n_components,)
The numbers of data samples in the current components.
means : array-like of shape (n_components, n_features)
The centers of the current components.
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
"""
nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps
means = np.dot(resp.T, X) / nk[:, np.newaxis]
covariances = {
"full": _estimate_gaussian_covariances_full,
"tied": _estimate_gaussian_covariances_tied,
"diag": _estimate_gaussian_covariances_diag,
"spherical": _estimate_gaussian_covariances_spherical,
}[covariance_type](resp, X, nk, means, reg_covar)
return nk, means, covariances
| _estimate_gaussian_parameters |
scikit-learn | 231 | sklearn/feature_extraction/image.py | def _extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : int or tuple of length arr.ndim.default=8
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : int or tuple of length arr.ndim, default=1
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
| /usr/src/app/target_test_cases/failed_tests__extract_patches.txt | def _extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : int or tuple of length arr.ndim.default=8
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : int or tuple of length arr.ndim, default=1
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = tuple(slice(None, None, st) for st in extraction_step)
indexing_strides = arr[slices].strides
patch_indices_shape = (
(np.array(arr.shape) - np.array(patch_shape)) // np.array(extraction_step)
) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
| _extract_patches |
scikit-learn | 232 | sklearn/datasets/_base.py | def _fetch_remote(remote, dirname=None, n_retries=3, delay=1):
"""Helper function to download a remote dataset.
Fetch a dataset pointed by remote's url, save into path using remote's
filename and ensure its integrity based on the SHA256 checksum of the
downloaded file.
.. versionchanged:: 1.6
If the file already exists locally and the SHA256 checksums match, the
path to the local file is returned without re-downloading.
Parameters
----------
remote : RemoteFileMetadata
Named tuple containing remote dataset meta information: url, filename
and checksum.
dirname : str or Path, default=None
Directory to save the file to. If None, the current working directory
is used.
n_retries : int, default=3
Number of retries when HTTP errors are encountered.
.. versionadded:: 1.5
delay : int, default=1
Number of seconds between retries.
.. versionadded:: 1.5
Returns
-------
file_path: Path
Full path of the created file.
"""
| /usr/src/app/target_test_cases/failed_tests__fetch_remote.txt | def _fetch_remote(remote, dirname=None, n_retries=3, delay=1):
"""Helper function to download a remote dataset.
Fetch a dataset pointed by remote's url, save into path using remote's
filename and ensure its integrity based on the SHA256 checksum of the
downloaded file.
.. versionchanged:: 1.6
If the file already exists locally and the SHA256 checksums match, the
path to the local file is returned without re-downloading.
Parameters
----------
remote : RemoteFileMetadata
Named tuple containing remote dataset meta information: url, filename
and checksum.
dirname : str or Path, default=None
Directory to save the file to. If None, the current working directory
is used.
n_retries : int, default=3
Number of retries when HTTP errors are encountered.
.. versionadded:: 1.5
delay : int, default=1
Number of seconds between retries.
.. versionadded:: 1.5
Returns
-------
file_path: Path
Full path of the created file.
"""
if dirname is None:
folder_path = Path(".")
else:
folder_path = Path(dirname)
file_path = folder_path / remote.filename
if file_path.exists():
if remote.checksum is None:
return file_path
checksum = _sha256(file_path)
if checksum == remote.checksum:
return file_path
else:
warnings.warn(
f"SHA256 checksum of existing local file {file_path.name} "
f"({checksum}) differs from expected ({remote.checksum}): "
f"re-downloading from {remote.url} ."
)
# We create a temporary file dedicated to this particular download to avoid
# conflicts with parallel downloads. If the download is successful, the
# temporary file is atomically renamed to the final file path (with
# `shutil.move`). We therefore pass `delete=False` to `NamedTemporaryFile`.
# Otherwise, garbage collecting temp_file would raise an error when
# attempting to delete a file that was already renamed. If the download
# fails or the result does not match the expected SHA256 digest, the
# temporary file is removed manually in the except block.
temp_file = NamedTemporaryFile(
prefix=remote.filename + ".part_", dir=folder_path, delete=False
)
# Note that Python 3.12's `delete_on_close=True` is ignored as we set
# `delete=False` explicitly. So after this line the empty temporary file still
# exists on disk to make sure that it's uniquely reserved for this specific call of
# `_fetch_remote` and therefore it protects against any corruption by parallel
# calls.
temp_file.close()
try:
temp_file_path = Path(temp_file.name)
while True:
try:
urlretrieve(remote.url, temp_file_path)
break
except (URLError, TimeoutError):
if n_retries == 0:
# If no more retries are left, re-raise the caught exception.
raise
warnings.warn(f"Retry downloading from url: {remote.url}")
n_retries -= 1
time.sleep(delay)
checksum = _sha256(temp_file_path)
if remote.checksum is not None and remote.checksum != checksum:
raise OSError(
f"The SHA256 checksum of {remote.filename} ({checksum}) "
f"differs from expected ({remote.checksum})."
)
except (Exception, KeyboardInterrupt):
os.unlink(temp_file.name)
raise
# The following renaming is atomic whenever temp_file_path and
# file_path are on the same filesystem. This should be the case most of
# the time, but we still use shutil.move instead of os.rename in case
# they are not.
shutil.move(temp_file_path, file_path)
return file_path
| _fetch_remote |
scikit-learn | 233 | sklearn/ensemble/_hist_gradient_boosting/binning.py | def _find_binning_thresholds(col_data, max_bins):
"""Extract quantiles from a continuous feature.
Missing values are ignored for finding the thresholds.
Parameters
----------
col_data : array-like, shape (n_samples,)
The continuous feature to bin.
max_bins: int
The maximum number of bins to use for non-missing values. If for a
given feature the number of unique values is less than ``max_bins``,
then those unique values will be used to compute the bin thresholds,
instead of the quantiles
Return
------
binning_thresholds : ndarray of shape(min(max_bins, n_unique_values) - 1,)
The increasing numeric values that can be used to separate the bins.
A given value x will be mapped into bin value i iff
bining_thresholds[i - 1] < x <= binning_thresholds[i]
"""
| /usr/src/app/target_test_cases/failed_tests__find_binning_thresholds.txt | def _find_binning_thresholds(col_data, max_bins):
"""Extract quantiles from a continuous feature.
Missing values are ignored for finding the thresholds.
Parameters
----------
col_data : array-like, shape (n_samples,)
The continuous feature to bin.
max_bins: int
The maximum number of bins to use for non-missing values. If for a
given feature the number of unique values is less than ``max_bins``,
then those unique values will be used to compute the bin thresholds,
instead of the quantiles
Return
------
binning_thresholds : ndarray of shape(min(max_bins, n_unique_values) - 1,)
The increasing numeric values that can be used to separate the bins.
A given value x will be mapped into bin value i iff
bining_thresholds[i - 1] < x <= binning_thresholds[i]
"""
# ignore missing values when computing bin thresholds
missing_mask = np.isnan(col_data)
if missing_mask.any():
col_data = col_data[~missing_mask]
# The data will be sorted anyway in np.unique and again in percentile, so we do it
# here. Sorting also returns a contiguous array.
col_data = np.sort(col_data)
distinct_values = np.unique(col_data).astype(X_DTYPE)
if len(distinct_values) <= max_bins:
midpoints = distinct_values[:-1] + distinct_values[1:]
midpoints *= 0.5
else:
# We could compute approximate midpoint percentiles using the output of
# np.unique(col_data, return_counts) instead but this is more
# work and the performance benefit will be limited because we
# work on a fixed-size subsample of the full data.
percentiles = np.linspace(0, 100, num=max_bins + 1)
percentiles = percentiles[1:-1]
midpoints = percentile(col_data, percentiles, method="midpoint").astype(X_DTYPE)
assert midpoints.shape[0] == max_bins - 1
# We avoid having +inf thresholds: +inf thresholds are only allowed in
# a "split on nan" situation.
np.clip(midpoints, a_min=None, a_max=ALMOST_INF, out=midpoints)
return midpoints
| _find_binning_thresholds |
scikit-learn | 234 | sklearn/model_selection/_validation.py | def _fit_and_score(
estimator,
X,
y,
*,
scorer,
train,
test,
verbose,
parameters,
fit_params,
score_params,
return_train_score=False,
return_parameters=False,
return_n_test_samples=False,
return_times=False,
return_estimator=False,
split_progress=None,
candidate_progress=None,
error_score=np.nan,
):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
scorer : A single callable or dict mapping scorer name to the callable
If it is a single callable, the return value for ``train_scores`` and
``test_scores`` is a single float.
For a dict, it should be one mapping the scorer name to the scorer
callable object / function.
The callable object / fn should have signature
``scorer(estimator, X, y)``.
train : array-like of shape (n_train_samples,)
Indices of training samples.
test : array-like of shape (n_test_samples,)
Indices of test samples.
verbose : int
The verbosity level.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
score_params : dict or None
Parameters that will be passed to the scorer.
return_train_score : bool, default=False
Compute and return score on training set.
return_parameters : bool, default=False
Return parameters that has been used for the estimator.
split_progress : {list, tuple} of int, default=None
A list or tuple of format (<current_split_id>, <total_num_of_splits>).
candidate_progress : {list, tuple} of int, default=None
A list or tuple of format
(<current_candidate_id>, <total_number_of_candidates>).
return_n_test_samples : bool, default=False
Whether to return the ``n_test_samples``.
return_times : bool, default=False
Whether to return the fit/score times.
return_estimator : bool, default=False
Whether to return the fitted estimator.
Returns
-------
result : dict with the following attributes
train_scores : dict of scorer name -> float
Score on training set (for all the scorers),
returned only if `return_train_score` is `True`.
test_scores : dict of scorer name -> float
Score on testing set (for all the scorers).
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None
The parameters that have been evaluated.
estimator : estimator object
The fitted estimator.
fit_error : str or None
Traceback str if the fit failed, None if the fit succeeded.
"""
| /usr/src/app/target_test_cases/failed_tests__fit_and_score.txt | def _fit_and_score(
estimator,
X,
y,
*,
scorer,
train,
test,
verbose,
parameters,
fit_params,
score_params,
return_train_score=False,
return_parameters=False,
return_n_test_samples=False,
return_times=False,
return_estimator=False,
split_progress=None,
candidate_progress=None,
error_score=np.nan,
):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
scorer : A single callable or dict mapping scorer name to the callable
If it is a single callable, the return value for ``train_scores`` and
``test_scores`` is a single float.
For a dict, it should be one mapping the scorer name to the scorer
callable object / function.
The callable object / fn should have signature
``scorer(estimator, X, y)``.
train : array-like of shape (n_train_samples,)
Indices of training samples.
test : array-like of shape (n_test_samples,)
Indices of test samples.
verbose : int
The verbosity level.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
score_params : dict or None
Parameters that will be passed to the scorer.
return_train_score : bool, default=False
Compute and return score on training set.
return_parameters : bool, default=False
Return parameters that has been used for the estimator.
split_progress : {list, tuple} of int, default=None
A list or tuple of format (<current_split_id>, <total_num_of_splits>).
candidate_progress : {list, tuple} of int, default=None
A list or tuple of format
(<current_candidate_id>, <total_number_of_candidates>).
return_n_test_samples : bool, default=False
Whether to return the ``n_test_samples``.
return_times : bool, default=False
Whether to return the fit/score times.
return_estimator : bool, default=False
Whether to return the fitted estimator.
Returns
-------
result : dict with the following attributes
train_scores : dict of scorer name -> float
Score on training set (for all the scorers),
returned only if `return_train_score` is `True`.
test_scores : dict of scorer name -> float
Score on testing set (for all the scorers).
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None
The parameters that have been evaluated.
estimator : estimator object
The fitted estimator.
fit_error : str or None
Traceback str if the fit failed, None if the fit succeeded.
"""
xp, _ = get_namespace(X)
X_device = device(X)
# Make sure that we can fancy index X even if train and test are provided
# as NumPy arrays by NumPy only cross-validation splitters.
train, test = xp.asarray(train, device=X_device), xp.asarray(test, device=X_device)
if not isinstance(error_score, numbers.Number) and error_score != "raise":
raise ValueError(
"error_score must be the string 'raise' or a numeric value. "
"(Hint: if using 'raise', please make sure that it has been "
"spelled correctly.)"
)
progress_msg = ""
if verbose > 2:
if split_progress is not None:
progress_msg = f" {split_progress[0]+1}/{split_progress[1]}"
if candidate_progress and verbose > 9:
progress_msg += f"; {candidate_progress[0]+1}/{candidate_progress[1]}"
if verbose > 1:
if parameters is None:
params_msg = ""
else:
sorted_keys = sorted(parameters) # Ensure deterministic o/p
params_msg = ", ".join(f"{k}={parameters[k]}" for k in sorted_keys)
if verbose > 9:
start_msg = f"[CV{progress_msg}] START {params_msg}"
print(f"{start_msg}{(80 - len(start_msg)) * '.'}")
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = _check_method_params(X, params=fit_params, indices=train)
score_params = score_params if score_params is not None else {}
score_params_train = _check_method_params(X, params=score_params, indices=train)
score_params_test = _check_method_params(X, params=score_params, indices=test)
if parameters is not None:
# here we clone the parameters, since sometimes the parameters
# themselves might be estimators, e.g. when we search over different
# estimators in a pipeline.
# ref: https://github.com/scikit-learn/scikit-learn/pull/26786
estimator = estimator.set_params(**clone(parameters, safe=False))
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
result = {}
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == "raise":
raise
elif isinstance(error_score, numbers.Number):
if isinstance(scorer, _MultimetricScorer):
test_scores = {name: error_score for name in scorer._scorers}
if return_train_score:
train_scores = test_scores.copy()
else:
test_scores = error_score
if return_train_score:
train_scores = error_score
result["fit_error"] = format_exc()
else:
result["fit_error"] = None
fit_time = time.time() - start_time
test_scores = _score(
estimator, X_test, y_test, scorer, score_params_test, error_score
)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_scores = _score(
estimator, X_train, y_train, scorer, score_params_train, error_score
)
if verbose > 1:
total_time = score_time + fit_time
end_msg = f"[CV{progress_msg}] END "
result_msg = params_msg + (";" if params_msg else "")
if verbose > 2:
if isinstance(test_scores, dict):
for scorer_name in sorted(test_scores):
result_msg += f" {scorer_name}: ("
if return_train_score:
scorer_scores = train_scores[scorer_name]
result_msg += f"train={scorer_scores:.3f}, "
result_msg += f"test={test_scores[scorer_name]:.3f})"
else:
result_msg += ", score="
if return_train_score:
result_msg += f"(train={train_scores:.3f}, test={test_scores:.3f})"
else:
result_msg += f"{test_scores:.3f}"
result_msg += f" total time={logger.short_format_time(total_time)}"
# Right align the result_msg
end_msg += "." * (80 - len(end_msg) - len(result_msg))
end_msg += result_msg
print(end_msg)
result["test_scores"] = test_scores
if return_train_score:
result["train_scores"] = train_scores
if return_n_test_samples:
result["n_test_samples"] = _num_samples(X_test)
if return_times:
result["fit_time"] = fit_time
result["score_time"] = score_time
if return_parameters:
result["parameters"] = parameters
if return_estimator:
result["estimator"] = estimator
return result
| _fit_and_score |
scikit-learn | 235 | sklearn/model_selection/_classification_threshold.py | def _fit_and_score_over_thresholds(
classifier,
X,
y,
*,
fit_params,
train_idx,
val_idx,
curve_scorer,
score_params,
):
"""Fit a classifier and compute the scores for different decision thresholds.
Parameters
----------
classifier : estimator instance
The classifier to fit and use for scoring. If `classifier` is already fitted,
it will be used as is.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The entire dataset.
y : array-like of shape (n_samples,)
The entire target vector.
fit_params : dict
Parameters to pass to the `fit` method of the underlying classifier.
train_idx : ndarray of shape (n_train_samples,) or None
The indices of the training set. If `None`, `classifier` is expected to be
already fitted.
val_idx : ndarray of shape (n_val_samples,)
The indices of the validation set used to score `classifier`. If `train_idx`,
the entire set will be used.
curve_scorer : scorer instance
The scorer taking `classifier` and the validation set as input and outputting
decision thresholds and scores as a curve. Note that this is different from
the usual scorer that output a single score value:
* when `score_method` is one of the four constraint metrics, the curve scorer
will output a curve of two scores parametrized by the decision threshold, e.g.
TPR/TNR or precision/recall curves for each threshold;
* otherwise, the curve scorer will output a single score value for each
threshold.
score_params : dict
Parameters to pass to the `score` method of the underlying scorer.
Returns
-------
scores : ndarray of shape (thresholds,) or tuple of such arrays
The scores computed for each decision threshold. When TPR/TNR or precision/
recall are computed, `scores` is a tuple of two arrays.
potential_thresholds : ndarray of shape (thresholds,)
The decision thresholds used to compute the scores. They are returned in
ascending order.
"""
| /usr/src/app/target_test_cases/failed_tests__fit_and_score_over_thresholds.txt | def _fit_and_score_over_thresholds(
classifier,
X,
y,
*,
fit_params,
train_idx,
val_idx,
curve_scorer,
score_params,
):
"""Fit a classifier and compute the scores for different decision thresholds.
Parameters
----------
classifier : estimator instance
The classifier to fit and use for scoring. If `classifier` is already fitted,
it will be used as is.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The entire dataset.
y : array-like of shape (n_samples,)
The entire target vector.
fit_params : dict
Parameters to pass to the `fit` method of the underlying classifier.
train_idx : ndarray of shape (n_train_samples,) or None
The indices of the training set. If `None`, `classifier` is expected to be
already fitted.
val_idx : ndarray of shape (n_val_samples,)
The indices of the validation set used to score `classifier`. If `train_idx`,
the entire set will be used.
curve_scorer : scorer instance
The scorer taking `classifier` and the validation set as input and outputting
decision thresholds and scores as a curve. Note that this is different from
the usual scorer that output a single score value:
* when `score_method` is one of the four constraint metrics, the curve scorer
will output a curve of two scores parametrized by the decision threshold, e.g.
TPR/TNR or precision/recall curves for each threshold;
* otherwise, the curve scorer will output a single score value for each
threshold.
score_params : dict
Parameters to pass to the `score` method of the underlying scorer.
Returns
-------
scores : ndarray of shape (thresholds,) or tuple of such arrays
The scores computed for each decision threshold. When TPR/TNR or precision/
recall are computed, `scores` is a tuple of two arrays.
potential_thresholds : ndarray of shape (thresholds,)
The decision thresholds used to compute the scores. They are returned in
ascending order.
"""
if train_idx is not None:
X_train, X_val = _safe_indexing(X, train_idx), _safe_indexing(X, val_idx)
y_train, y_val = _safe_indexing(y, train_idx), _safe_indexing(y, val_idx)
fit_params_train = _check_method_params(X, fit_params, indices=train_idx)
score_params_val = _check_method_params(X, score_params, indices=val_idx)
classifier.fit(X_train, y_train, **fit_params_train)
else: # prefit estimator, only a validation set is provided
X_val, y_val, score_params_val = X, y, score_params
return curve_scorer(classifier, X_val, y_val, **score_params_val)
| _fit_and_score_over_thresholds |
scikit-learn | 236 | sklearn/utils/graph.py | def _fix_connected_components(
X,
graph,
n_connected_components,
component_labels,
mode="distance",
metric="euclidean",
**kwargs,
):
"""Add connections to sparse graph to connect unconnected components.
For each pair of unconnected components, compute all pairwise distances
from one component to the other, and add a connection on the closest pair
of samples. This is a hacky way to get a graph with a single connected
component, which is necessary for example to compute a shortest path
between all pairs of samples in the graph.
Parameters
----------
X : array of shape (n_samples, n_features) or (n_samples, n_samples)
Features to compute the pairwise distances. If `metric =
"precomputed"`, X is the matrix of pairwise distances.
graph : sparse matrix of shape (n_samples, n_samples)
Graph of connection between samples.
n_connected_components : int
Number of connected components, as computed by
`scipy.sparse.csgraph.connected_components`.
component_labels : array of shape (n_samples)
Labels of connected components, as computed by
`scipy.sparse.csgraph.connected_components`.
mode : {'connectivity', 'distance'}, default='distance'
Type of graph matrix: 'connectivity' corresponds to the connectivity
matrix with ones and zeros, and 'distance' corresponds to the distances
between neighbors according to the given metric.
metric : str
Metric used in `sklearn.metrics.pairwise.pairwise_distances`.
kwargs : kwargs
Keyword arguments passed to
`sklearn.metrics.pairwise.pairwise_distances`.
Returns
-------
graph : sparse matrix of shape (n_samples, n_samples)
Graph of connection between samples, with a single connected component.
"""
| /usr/src/app/target_test_cases/failed_tests__fix_connected_components.txt | def _fix_connected_components(
X,
graph,
n_connected_components,
component_labels,
mode="distance",
metric="euclidean",
**kwargs,
):
"""Add connections to sparse graph to connect unconnected components.
For each pair of unconnected components, compute all pairwise distances
from one component to the other, and add a connection on the closest pair
of samples. This is a hacky way to get a graph with a single connected
component, which is necessary for example to compute a shortest path
between all pairs of samples in the graph.
Parameters
----------
X : array of shape (n_samples, n_features) or (n_samples, n_samples)
Features to compute the pairwise distances. If `metric =
"precomputed"`, X is the matrix of pairwise distances.
graph : sparse matrix of shape (n_samples, n_samples)
Graph of connection between samples.
n_connected_components : int
Number of connected components, as computed by
`scipy.sparse.csgraph.connected_components`.
component_labels : array of shape (n_samples)
Labels of connected components, as computed by
`scipy.sparse.csgraph.connected_components`.
mode : {'connectivity', 'distance'}, default='distance'
Type of graph matrix: 'connectivity' corresponds to the connectivity
matrix with ones and zeros, and 'distance' corresponds to the distances
between neighbors according to the given metric.
metric : str
Metric used in `sklearn.metrics.pairwise.pairwise_distances`.
kwargs : kwargs
Keyword arguments passed to
`sklearn.metrics.pairwise.pairwise_distances`.
Returns
-------
graph : sparse matrix of shape (n_samples, n_samples)
Graph of connection between samples, with a single connected component.
"""
if metric == "precomputed" and sparse.issparse(X):
raise RuntimeError(
"_fix_connected_components with metric='precomputed' requires the "
"full distance matrix in X, and does not work with a sparse "
"neighbors graph."
)
for i in range(n_connected_components):
idx_i = np.flatnonzero(component_labels == i)
Xi = X[idx_i]
for j in range(i):
idx_j = np.flatnonzero(component_labels == j)
Xj = X[idx_j]
if metric == "precomputed":
D = X[np.ix_(idx_i, idx_j)]
else:
D = pairwise_distances(Xi, Xj, metric=metric, **kwargs)
ii, jj = np.unravel_index(D.argmin(axis=None), D.shape)
if mode == "connectivity":
graph[idx_i[ii], idx_j[jj]] = 1
graph[idx_j[jj], idx_i[ii]] = 1
elif mode == "distance":
graph[idx_i[ii], idx_j[jj]] = D[ii, jj]
graph[idx_j[jj], idx_i[ii]] = D[ii, jj]
else:
raise ValueError(
"Unknown mode=%r, should be one of ['connectivity', 'distance']."
% mode
)
return graph
| _fix_connected_components |
scikit-learn | 237 | sklearn/cluster/_agglomerative.py | def _fix_connectivity(X, connectivity, affinity):
"""
Fixes the connectivity matrix.
The different steps are:
- copies it
- makes it symmetric
- converts it to LIL if necessary
- completes it if necessary.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Feature matrix representing `n_samples` samples to be clustered.
connectivity : sparse matrix, default=None
Connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is `None`, i.e, the Ward algorithm is unstructured.
affinity : {"euclidean", "precomputed"}, default="euclidean"
Which affinity to use. At the moment `precomputed` and
``euclidean`` are supported. `euclidean` uses the
negative squared Euclidean distance between points.
Returns
-------
connectivity : sparse matrix
The fixed connectivity matrix.
n_connected_components : int
The number of connected components in the graph.
"""
| /usr/src/app/target_test_cases/failed_tests__fix_connectivity.txt | def _fix_connectivity(X, connectivity, affinity):
"""
Fixes the connectivity matrix.
The different steps are:
- copies it
- makes it symmetric
- converts it to LIL if necessary
- completes it if necessary.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Feature matrix representing `n_samples` samples to be clustered.
connectivity : sparse matrix, default=None
Connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is `None`, i.e, the Ward algorithm is unstructured.
affinity : {"euclidean", "precomputed"}, default="euclidean"
Which affinity to use. At the moment `precomputed` and
``euclidean`` are supported. `euclidean` uses the
negative squared Euclidean distance between points.
Returns
-------
connectivity : sparse matrix
The fixed connectivity matrix.
n_connected_components : int
The number of connected components in the graph.
"""
n_samples = X.shape[0]
if connectivity.shape[0] != n_samples or connectivity.shape[1] != n_samples:
raise ValueError(
"Wrong shape for connectivity matrix: %s when X is %s"
% (connectivity.shape, X.shape)
)
# Make the connectivity matrix symmetric:
connectivity = connectivity + connectivity.T
# Convert connectivity matrix to LIL
if not sparse.issparse(connectivity):
connectivity = sparse.lil_matrix(connectivity)
# `connectivity` is a sparse matrix at this point
if connectivity.format != "lil":
connectivity = connectivity.tolil()
# Compute the number of nodes
n_connected_components, labels = connected_components(connectivity)
if n_connected_components > 1:
warnings.warn(
"the number of connected components of the "
"connectivity matrix is %d > 1. Completing it to avoid "
"stopping the tree early." % n_connected_components,
stacklevel=2,
)
# XXX: Can we do without completing the matrix?
connectivity = _fix_connected_components(
X=X,
graph=connectivity,
n_connected_components=n_connected_components,
component_labels=labels,
metric=affinity,
mode="connectivity",
)
return connectivity, n_connected_components
| _fix_connectivity |
scikit-learn | 238 | sklearn/inspection/_pd_utils.py | def _get_feature_index(fx, feature_names=None):
"""Get feature index.
Parameters
----------
fx : int or str
Feature index or name.
feature_names : list of str, default=None
All feature names from which to search the indices.
Returns
-------
idx : int
Feature index.
"""
| /usr/src/app/target_test_cases/failed_tests__get_feature_index.txt | def _get_feature_index(fx, feature_names=None):
"""Get feature index.
Parameters
----------
fx : int or str
Feature index or name.
feature_names : list of str, default=None
All feature names from which to search the indices.
Returns
-------
idx : int
Feature index.
"""
if isinstance(fx, str):
if feature_names is None:
raise ValueError(
f"Cannot plot partial dependence for feature {fx!r} since "
"the list of feature names was not provided, neither as "
"column names of a pandas data-frame nor via the feature_names "
"parameter."
)
try:
return feature_names.index(fx)
except ValueError as e:
raise ValueError(f"Feature {fx!r} not in feature_names") from e
return fx
| _get_feature_index |
scikit-learn | 239 | sklearn/ensemble/_forest.py | def _get_n_samples_bootstrap(n_samples, max_samples):
"""
Get the number of samples in a bootstrap sample.
Parameters
----------
n_samples : int
Number of samples in the dataset.
max_samples : int or float
The maximum number of samples to draw from the total available:
- if float, this indicates a fraction of the total and should be
the interval `(0.0, 1.0]`;
- if int, this indicates the exact number of samples;
- if None, this indicates the total number of samples.
Returns
-------
n_samples_bootstrap : int
The total number of samples to draw for the bootstrap sample.
"""
| /usr/src/app/target_test_cases/failed_tests__get_n_samples_bootstrap.txt | def _get_n_samples_bootstrap(n_samples, max_samples):
"""
Get the number of samples in a bootstrap sample.
Parameters
----------
n_samples : int
Number of samples in the dataset.
max_samples : int or float
The maximum number of samples to draw from the total available:
- if float, this indicates a fraction of the total and should be
the interval `(0.0, 1.0]`;
- if int, this indicates the exact number of samples;
- if None, this indicates the total number of samples.
Returns
-------
n_samples_bootstrap : int
The total number of samples to draw for the bootstrap sample.
"""
if max_samples is None:
return n_samples
if isinstance(max_samples, Integral):
if max_samples > n_samples:
msg = "`max_samples` must be <= n_samples={} but got value {}"
raise ValueError(msg.format(n_samples, max_samples))
return max_samples
if isinstance(max_samples, Real):
return max(round(n_samples * max_samples), 1)
| _get_n_samples_bootstrap |
scikit-learn | 240 | sklearn/utils/_response.py | def _get_response_values(
estimator,
X,
response_method,
pos_label=None,
return_response_method_used=False,
):
"""Compute the response values of a classifier, an outlier detector, or a regressor.
The response values are predictions such that it follows the following shape:
- for binary classification, it is a 1d array of shape `(n_samples,)`;
- for multiclass classification, it is a 2d array of shape `(n_samples, n_classes)`;
- for multilabel classification, it is a 2d array of shape `(n_samples, n_outputs)`;
- for outlier detection, it is a 1d array of shape `(n_samples,)`;
- for regression, it is a 1d array of shape `(n_samples,)`.
If `estimator` is a binary classifier, also return the label for the
effective positive class.
This utility is used primarily in the displays and the scikit-learn scorers.
.. versionadded:: 1.3
Parameters
----------
estimator : estimator instance
Fitted classifier, outlier detector, or regressor or a
fitted :class:`~sklearn.pipeline.Pipeline` in which the last estimator is a
classifier, an outlier detector, or a regressor.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
response_method : {"predict_proba", "predict_log_proba", "decision_function", \
"predict"} or list of such str
Specifies the response method to use get prediction from an estimator
(i.e. :term:`predict_proba`, :term:`predict_log_proba`,
:term:`decision_function` or :term:`predict`). Possible choices are:
- if `str`, it corresponds to the name to the method to return;
- if a list of `str`, it provides the method names in order of
preference. The method returned corresponds to the first method in
the list and which is implemented by `estimator`.
pos_label : int, float, bool or str, default=None
The class considered as the positive class when computing
the metrics. If `None` and target is 'binary', `estimators.classes_[1]` is
considered as the positive class.
return_response_method_used : bool, default=False
Whether to return the response method used to compute the response
values.
.. versionadded:: 1.4
Returns
-------
y_pred : ndarray of shape (n_samples,), (n_samples, n_classes) or \
(n_samples, n_outputs)
Target scores calculated from the provided `response_method`
and `pos_label`.
pos_label : int, float, bool, str or None
The class considered as the positive class when computing
the metrics. Returns `None` if `estimator` is a regressor or an outlier
detector.
response_method_used : str
The response method used to compute the response values. Only returned
if `return_response_method_used` is `True`.
.. versionadded:: 1.4
Raises
------
ValueError
If `pos_label` is not a valid label.
If the shape of `y_pred` is not consistent for binary classifier.
If the response method can be applied to a classifier only and
`estimator` is a regressor.
"""
| /usr/src/app/target_test_cases/failed_tests__get_response_values.txt | def _get_response_values(
estimator,
X,
response_method,
pos_label=None,
return_response_method_used=False,
):
"""Compute the response values of a classifier, an outlier detector, or a regressor.
The response values are predictions such that it follows the following shape:
- for binary classification, it is a 1d array of shape `(n_samples,)`;
- for multiclass classification, it is a 2d array of shape `(n_samples, n_classes)`;
- for multilabel classification, it is a 2d array of shape `(n_samples, n_outputs)`;
- for outlier detection, it is a 1d array of shape `(n_samples,)`;
- for regression, it is a 1d array of shape `(n_samples,)`.
If `estimator` is a binary classifier, also return the label for the
effective positive class.
This utility is used primarily in the displays and the scikit-learn scorers.
.. versionadded:: 1.3
Parameters
----------
estimator : estimator instance
Fitted classifier, outlier detector, or regressor or a
fitted :class:`~sklearn.pipeline.Pipeline` in which the last estimator is a
classifier, an outlier detector, or a regressor.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
response_method : {"predict_proba", "predict_log_proba", "decision_function", \
"predict"} or list of such str
Specifies the response method to use get prediction from an estimator
(i.e. :term:`predict_proba`, :term:`predict_log_proba`,
:term:`decision_function` or :term:`predict`). Possible choices are:
- if `str`, it corresponds to the name to the method to return;
- if a list of `str`, it provides the method names in order of
preference. The method returned corresponds to the first method in
the list and which is implemented by `estimator`.
pos_label : int, float, bool or str, default=None
The class considered as the positive class when computing
the metrics. If `None` and target is 'binary', `estimators.classes_[1]` is
considered as the positive class.
return_response_method_used : bool, default=False
Whether to return the response method used to compute the response
values.
.. versionadded:: 1.4
Returns
-------
y_pred : ndarray of shape (n_samples,), (n_samples, n_classes) or \
(n_samples, n_outputs)
Target scores calculated from the provided `response_method`
and `pos_label`.
pos_label : int, float, bool, str or None
The class considered as the positive class when computing
the metrics. Returns `None` if `estimator` is a regressor or an outlier
detector.
response_method_used : str
The response method used to compute the response values. Only returned
if `return_response_method_used` is `True`.
.. versionadded:: 1.4
Raises
------
ValueError
If `pos_label` is not a valid label.
If the shape of `y_pred` is not consistent for binary classifier.
If the response method can be applied to a classifier only and
`estimator` is a regressor.
"""
from sklearn.base import is_classifier, is_outlier_detector # noqa
if is_classifier(estimator):
prediction_method = _check_response_method(estimator, response_method)
classes = estimator.classes_
target_type = type_of_target(classes)
if target_type in ("binary", "multiclass"):
if pos_label is not None and pos_label not in classes.tolist():
raise ValueError(
f"pos_label={pos_label} is not a valid label: It should be "
f"one of {classes}"
)
elif pos_label is None and target_type == "binary":
pos_label = classes[-1]
y_pred = prediction_method(X)
if prediction_method.__name__ in ("predict_proba", "predict_log_proba"):
y_pred = _process_predict_proba(
y_pred=y_pred,
target_type=target_type,
classes=classes,
pos_label=pos_label,
)
elif prediction_method.__name__ == "decision_function":
y_pred = _process_decision_function(
y_pred=y_pred,
target_type=target_type,
classes=classes,
pos_label=pos_label,
)
elif is_outlier_detector(estimator):
prediction_method = _check_response_method(estimator, response_method)
y_pred, pos_label = prediction_method(X), None
else: # estimator is a regressor
if response_method != "predict":
raise ValueError(
f"{estimator.__class__.__name__} should either be a classifier to be "
f"used with response_method={response_method} or the response_method "
"should be 'predict'. Got a regressor with response_method="
f"{response_method} instead."
)
prediction_method = estimator.predict
y_pred, pos_label = prediction_method(X), None
if return_response_method_used:
return y_pred, pos_label, prediction_method.__name__
return y_pred, pos_label
| _get_response_values |
scikit-learn | 241 | sklearn/utils/_response.py | def _get_response_values_binary(
estimator, X, response_method, pos_label=None, return_response_method_used=False
):
"""Compute the response values of a binary classifier.
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a binary classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
response_method : {'auto', 'predict_proba', 'decision_function'}
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
pos_label : int, float, bool or str, default=None
The class considered as the positive class when computing
the metrics. By default, `estimators.classes_[1]` is
considered as the positive class.
return_response_method_used : bool, default=False
Whether to return the response method used to compute the response
values.
.. versionadded:: 1.5
Returns
-------
y_pred : ndarray of shape (n_samples,)
Target scores calculated from the provided response_method
and pos_label.
pos_label : int, float, bool or str
The class considered as the positive class when computing
the metrics.
response_method_used : str
The response method used to compute the response values. Only returned
if `return_response_method_used` is `True`.
.. versionadded:: 1.5
"""
| /usr/src/app/target_test_cases/failed_tests__get_response_values_binary.txt | def _get_response_values_binary(
estimator, X, response_method, pos_label=None, return_response_method_used=False
):
"""Compute the response values of a binary classifier.
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a binary classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
response_method : {'auto', 'predict_proba', 'decision_function'}
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
pos_label : int, float, bool or str, default=None
The class considered as the positive class when computing
the metrics. By default, `estimators.classes_[1]` is
considered as the positive class.
return_response_method_used : bool, default=False
Whether to return the response method used to compute the response
values.
.. versionadded:: 1.5
Returns
-------
y_pred : ndarray of shape (n_samples,)
Target scores calculated from the provided response_method
and pos_label.
pos_label : int, float, bool or str
The class considered as the positive class when computing
the metrics.
response_method_used : str
The response method used to compute the response values. Only returned
if `return_response_method_used` is `True`.
.. versionadded:: 1.5
"""
classification_error = "Expected 'estimator' to be a binary classifier."
check_is_fitted(estimator)
if not is_classifier(estimator):
raise ValueError(
classification_error + f" Got {estimator.__class__.__name__} instead."
)
elif len(estimator.classes_) != 2:
raise ValueError(
classification_error + f" Got {len(estimator.classes_)} classes instead."
)
if response_method == "auto":
response_method = ["predict_proba", "decision_function"]
return _get_response_values(
estimator,
X,
response_method,
pos_label=pos_label,
return_response_method_used=return_response_method_used,
)
| _get_response_values_binary |
scikit-learn | 242 | sklearn/manifold/_t_sne.py | def _gradient_descent(
objective,
p0,
it,
max_iter,
n_iter_check=1,
n_iter_without_progress=300,
momentum=0.8,
learning_rate=200.0,
min_gain=0.01,
min_grad_norm=1e-7,
verbose=0,
args=None,
kwargs=None,
):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like of shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
max_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int, default=1
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, default=300
Maximum number of iterations without progress before we abort the
optimization.
momentum : float within (0.0, 1.0), default=0.8
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, default=200.0
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, default=0.01
Minimum individual gain for each parameter.
min_grad_norm : float, default=1e-7
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, default=0
Verbosity level.
args : sequence, default=None
Arguments to pass to objective function.
kwargs : dict, default=None
Keyword arguments to pass to objective function.
Returns
-------
p : ndarray of shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
| /usr/src/app/target_test_cases/failed_tests__gradient_descent.txt | def _gradient_descent(
objective,
p0,
it,
max_iter,
n_iter_check=1,
n_iter_without_progress=300,
momentum=0.8,
learning_rate=200.0,
min_gain=0.01,
min_grad_norm=1e-7,
verbose=0,
args=None,
kwargs=None,
):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like of shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
max_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int, default=1
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, default=300
Maximum number of iterations without progress before we abort the
optimization.
momentum : float within (0.0, 1.0), default=0.8
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, default=200.0
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, default=0.01
Minimum individual gain for each parameter.
min_grad_norm : float, default=1e-7
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, default=0
Verbosity level.
args : sequence, default=None
Arguments to pass to objective function.
kwargs : dict, default=None
Keyword arguments to pass to objective function.
Returns
-------
p : ndarray of shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(float).max
best_error = np.finfo(float).max
best_iter = i = it
tic = time()
for i in range(it, max_iter):
check_convergence = (i + 1) % n_iter_check == 0
# only compute the error when needed
kwargs["compute_error"] = check_convergence or i == max_iter - 1
error, grad = objective(p, *args, **kwargs)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if check_convergence:
toc = time()
duration = toc - tic
tic = toc
grad_norm = linalg.norm(grad)
if verbose >= 2:
print(
"[t-SNE] Iteration %d: error = %.7f,"
" gradient norm = %.7f"
" (%s iterations in %0.3fs)"
% (i + 1, error, grad_norm, n_iter_check, duration)
)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print(
"[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress)
)
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print(
"[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm)
)
break
return p, error, i
| _gradient_descent |
scikit-learn | 243 | sklearn/manifold/_spectral_embedding.py | def _graph_connected_component(graph, node_id):
"""Find the largest graph connected components that contains one
given node.
Parameters
----------
graph : array-like of shape (n_samples, n_samples)
Adjacency matrix of the graph, non-zero weight means an edge
between the nodes.
node_id : int
The index of the query node of the graph.
Returns
-------
connected_components_matrix : array-like of shape (n_samples,)
An array of bool value indicating the indexes of the nodes
belonging to the largest connected components of the given query
node.
"""
| /usr/src/app/target_test_cases/failed_tests__graph_connected_component.txt | def _graph_connected_component(graph, node_id):
"""Find the largest graph connected components that contains one
given node.
Parameters
----------
graph : array-like of shape (n_samples, n_samples)
Adjacency matrix of the graph, non-zero weight means an edge
between the nodes.
node_id : int
The index of the query node of the graph.
Returns
-------
connected_components_matrix : array-like of shape (n_samples,)
An array of bool value indicating the indexes of the nodes
belonging to the largest connected components of the given query
node.
"""
n_node = graph.shape[0]
if sparse.issparse(graph):
# speed up row-wise access to boolean connection mask
graph = graph.tocsr()
connected_nodes = np.zeros(n_node, dtype=bool)
nodes_to_explore = np.zeros(n_node, dtype=bool)
nodes_to_explore[node_id] = True
for _ in range(n_node):
last_num_component = connected_nodes.sum()
np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)
if last_num_component >= connected_nodes.sum():
break
indices = np.where(nodes_to_explore)[0]
nodes_to_explore.fill(False)
for i in indices:
if sparse.issparse(graph):
# scipy not yet implemented 1D sparse slices; can be changed back to
# `neighbors = graph[i].toarray().ravel()` once implemented
neighbors = graph[[i], :].toarray().ravel()
else:
neighbors = graph[i]
np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore)
return connected_nodes
| _graph_connected_component |
scikit-learn | 244 | sklearn/manifold/_spectral_embedding.py | def _graph_is_connected(graph):
"""Return whether the graph is connected (True) or Not (False).
Parameters
----------
graph : {array-like, sparse matrix} of shape (n_samples, n_samples)
Adjacency matrix of the graph, non-zero weight means an edge
between the nodes.
Returns
-------
is_connected : bool
True means the graph is fully connected and False means not.
"""
| /usr/src/app/target_test_cases/failed_tests__graph_is_connected.txt | def _graph_is_connected(graph):
"""Return whether the graph is connected (True) or Not (False).
Parameters
----------
graph : {array-like, sparse matrix} of shape (n_samples, n_samples)
Adjacency matrix of the graph, non-zero weight means an edge
between the nodes.
Returns
-------
is_connected : bool
True means the graph is fully connected and False means not.
"""
if sparse.issparse(graph):
# Before Scipy 1.11.3, `connected_components` only supports 32-bit indices.
# PR: https://github.com/scipy/scipy/pull/18913
# First integration in 1.11.3: https://github.com/scipy/scipy/pull/19279
# TODO(jjerphan): Once SciPy 1.11.3 is the minimum supported version, use
# `accept_large_sparse=True`.
accept_large_sparse = sp_version >= parse_version("1.11.3")
graph = check_array(
graph, accept_sparse=True, accept_large_sparse=accept_large_sparse
)
# sparse graph, find all the connected components
n_connected_components, _ = connected_components(graph)
return n_connected_components == 1
else:
# dense graph, find all connected components start from node 0
return _graph_connected_component(graph, 0).sum() == graph.shape[0]
| _graph_is_connected |
scikit-learn | 245 | sklearn/inspection/_partial_dependence.py | def _grid_from_X(X, percentiles, is_categorical, grid_resolution):
"""Generate a grid of points based on the percentiles of X.
The grid is a cartesian product between the columns of ``values``. The
ith column of ``values`` consists in ``grid_resolution`` equally-spaced
points between the percentiles of the jth column of X.
If ``grid_resolution`` is bigger than the number of unique values in the
j-th column of X or if the feature is a categorical feature (by inspecting
`is_categorical`) , then those unique values will be used instead.
Parameters
----------
X : array-like of shape (n_samples, n_target_features)
The data.
percentiles : tuple of float
The percentiles which are used to construct the extreme values of
the grid. Must be in [0, 1].
is_categorical : list of bool
For each feature, tells whether it is categorical or not. If a feature
is categorical, then the values used will be the unique ones
(i.e. categories) instead of the percentiles.
grid_resolution : int
The number of equally spaced points to be placed on the grid for each
feature.
Returns
-------
grid : ndarray of shape (n_points, n_target_features)
A value for each feature at each point in the grid. ``n_points`` is
always ``<= grid_resolution ** X.shape[1]``.
values : list of 1d ndarrays
The values with which the grid has been created. The size of each
array ``values[j]`` is either ``grid_resolution``, or the number of
unique values in ``X[:, j]``, whichever is smaller.
"""
| /usr/src/app/target_test_cases/failed_tests__grid_from_X.txt | def _grid_from_X(X, percentiles, is_categorical, grid_resolution):
"""Generate a grid of points based on the percentiles of X.
The grid is a cartesian product between the columns of ``values``. The
ith column of ``values`` consists in ``grid_resolution`` equally-spaced
points between the percentiles of the jth column of X.
If ``grid_resolution`` is bigger than the number of unique values in the
j-th column of X or if the feature is a categorical feature (by inspecting
`is_categorical`) , then those unique values will be used instead.
Parameters
----------
X : array-like of shape (n_samples, n_target_features)
The data.
percentiles : tuple of float
The percentiles which are used to construct the extreme values of
the grid. Must be in [0, 1].
is_categorical : list of bool
For each feature, tells whether it is categorical or not. If a feature
is categorical, then the values used will be the unique ones
(i.e. categories) instead of the percentiles.
grid_resolution : int
The number of equally spaced points to be placed on the grid for each
feature.
Returns
-------
grid : ndarray of shape (n_points, n_target_features)
A value for each feature at each point in the grid. ``n_points`` is
always ``<= grid_resolution ** X.shape[1]``.
values : list of 1d ndarrays
The values with which the grid has been created. The size of each
array ``values[j]`` is either ``grid_resolution``, or the number of
unique values in ``X[:, j]``, whichever is smaller.
"""
if not isinstance(percentiles, Iterable) or len(percentiles) != 2:
raise ValueError("'percentiles' must be a sequence of 2 elements.")
if not all(0 <= x <= 1 for x in percentiles):
raise ValueError("'percentiles' values must be in [0, 1].")
if percentiles[0] >= percentiles[1]:
raise ValueError("percentiles[0] must be strictly less than percentiles[1].")
if grid_resolution <= 1:
raise ValueError("'grid_resolution' must be strictly greater than 1.")
values = []
# TODO: we should handle missing values (i.e. `np.nan`) specifically and store them
# in a different Bunch attribute.
for feature, is_cat in enumerate(is_categorical):
try:
uniques = np.unique(_safe_indexing(X, feature, axis=1))
except TypeError as exc:
# `np.unique` will fail in the presence of `np.nan` and `str` categories
# due to sorting. Temporary, we reraise an error explaining the problem.
raise ValueError(
f"The column #{feature} contains mixed data types. Finding unique "
"categories fail due to sorting. It usually means that the column "
"contains `np.nan` values together with `str` categories. Such use "
"case is not yet supported in scikit-learn."
) from exc
if is_cat or uniques.shape[0] < grid_resolution:
# Use the unique values either because:
# - feature has low resolution use unique values
# - feature is categorical
axis = uniques
else:
# create axis based on percentiles and grid resolution
emp_percentiles = mquantiles(
_safe_indexing(X, feature, axis=1), prob=percentiles, axis=0
)
if np.allclose(emp_percentiles[0], emp_percentiles[1]):
raise ValueError(
"percentiles are too close to each other, "
"unable to build the grid. Please choose percentiles "
"that are further apart."
)
axis = np.linspace(
emp_percentiles[0],
emp_percentiles[1],
num=grid_resolution,
endpoint=True,
)
values.append(axis)
return cartesian(values), values
| _grid_from_X |
scikit-learn | 246 | sklearn/cluster/_agglomerative.py | def _hc_cut(n_clusters, children, n_leaves):
"""Function cutting the ward tree for a given number of clusters.
Parameters
----------
n_clusters : int or ndarray
The number of clusters to form.
children : ndarray of shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`.
n_leaves : int
Number of leaves of the tree.
Returns
-------
labels : array [n_samples]
Cluster labels for each point.
"""
| /usr/src/app/target_test_cases/failed_tests__hc_cut.txt | def _hc_cut(n_clusters, children, n_leaves):
"""Function cutting the ward tree for a given number of clusters.
Parameters
----------
n_clusters : int or ndarray
The number of clusters to form.
children : ndarray of shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`.
n_leaves : int
Number of leaves of the tree.
Returns
-------
labels : array [n_samples]
Cluster labels for each point.
"""
if n_clusters > n_leaves:
raise ValueError(
"Cannot extract more clusters than samples: "
f"{n_clusters} clusters were given for a tree with {n_leaves} leaves."
)
# In this function, we store nodes as a heap to avoid recomputing
# the max of the nodes: the first element is always the smallest
# We use negated indices as heaps work on smallest elements, and we
# are interested in largest elements
# children[-1] is the root of the tree
nodes = [-(max(children[-1]) + 1)]
for _ in range(n_clusters - 1):
# As we have a heap, nodes[0] is the smallest element
these_children = children[-nodes[0] - n_leaves]
# Insert the 2 children and remove the largest node
heappush(nodes, -these_children[0])
heappushpop(nodes, -these_children[1])
label = np.zeros(n_leaves, dtype=np.intp)
for i, node in enumerate(nodes):
label[_hierarchical._hc_get_descendent(-node, children, n_leaves)] = i
return label
| _hc_cut |
scikit-learn | 247 | sklearn/linear_model/_huber.py | def _huber_loss_and_gradient(w, X, y, epsilon, alpha, sample_weight=None):
"""Returns the Huber loss and the gradient.
Parameters
----------
w : ndarray, shape (n_features + 1,) or (n_features + 2,)
Feature vector.
w[:n_features] gives the coefficients
w[-1] gives the scale factor and if the intercept is fit w[-2]
gives the intercept factor.
X : ndarray of shape (n_samples, n_features)
Input data.
y : ndarray of shape (n_samples,)
Target vector.
epsilon : float
Robustness of the Huber estimator.
alpha : float
Regularization parameter.
sample_weight : ndarray of shape (n_samples,), default=None
Weight assigned to each sample.
Returns
-------
loss : float
Huber loss.
gradient : ndarray, shape (len(w))
Returns the derivative of the Huber loss with respect to each
coefficient, intercept and the scale as a vector.
"""
| /usr/src/app/target_test_cases/failed_tests__huber_loss_and_gradient.txt | def _huber_loss_and_gradient(w, X, y, epsilon, alpha, sample_weight=None):
"""Returns the Huber loss and the gradient.
Parameters
----------
w : ndarray, shape (n_features + 1,) or (n_features + 2,)
Feature vector.
w[:n_features] gives the coefficients
w[-1] gives the scale factor and if the intercept is fit w[-2]
gives the intercept factor.
X : ndarray of shape (n_samples, n_features)
Input data.
y : ndarray of shape (n_samples,)
Target vector.
epsilon : float
Robustness of the Huber estimator.
alpha : float
Regularization parameter.
sample_weight : ndarray of shape (n_samples,), default=None
Weight assigned to each sample.
Returns
-------
loss : float
Huber loss.
gradient : ndarray, shape (len(w))
Returns the derivative of the Huber loss with respect to each
coefficient, intercept and the scale as a vector.
"""
_, n_features = X.shape
fit_intercept = n_features + 2 == w.shape[0]
if fit_intercept:
intercept = w[-2]
sigma = w[-1]
w = w[:n_features]
n_samples = np.sum(sample_weight)
# Calculate the values where |y - X'w -c / sigma| > epsilon
# The values above this threshold are outliers.
linear_loss = y - safe_sparse_dot(X, w)
if fit_intercept:
linear_loss -= intercept
abs_linear_loss = np.abs(linear_loss)
outliers_mask = abs_linear_loss > epsilon * sigma
# Calculate the linear loss due to the outliers.
# This is equal to (2 * M * |y - X'w -c / sigma| - M**2) * sigma
outliers = abs_linear_loss[outliers_mask]
num_outliers = np.count_nonzero(outliers_mask)
n_non_outliers = X.shape[0] - num_outliers
# n_sq_outliers includes the weight give to the outliers while
# num_outliers is just the number of outliers.
outliers_sw = sample_weight[outliers_mask]
n_sw_outliers = np.sum(outliers_sw)
outlier_loss = (
2.0 * epsilon * np.sum(outliers_sw * outliers)
- sigma * n_sw_outliers * epsilon**2
)
# Calculate the quadratic loss due to the non-outliers.-
# This is equal to |(y - X'w - c)**2 / sigma**2| * sigma
non_outliers = linear_loss[~outliers_mask]
weighted_non_outliers = sample_weight[~outliers_mask] * non_outliers
weighted_loss = np.dot(weighted_non_outliers.T, non_outliers)
squared_loss = weighted_loss / sigma
if fit_intercept:
grad = np.zeros(n_features + 2)
else:
grad = np.zeros(n_features + 1)
# Gradient due to the squared loss.
X_non_outliers = -axis0_safe_slice(X, ~outliers_mask, n_non_outliers)
grad[:n_features] = (
2.0 / sigma * safe_sparse_dot(weighted_non_outliers, X_non_outliers)
)
# Gradient due to the linear loss.
signed_outliers = np.ones_like(outliers)
signed_outliers_mask = linear_loss[outliers_mask] < 0
signed_outliers[signed_outliers_mask] = -1.0
X_outliers = axis0_safe_slice(X, outliers_mask, num_outliers)
sw_outliers = sample_weight[outliers_mask] * signed_outliers
grad[:n_features] -= 2.0 * epsilon * (safe_sparse_dot(sw_outliers, X_outliers))
# Gradient due to the penalty.
grad[:n_features] += alpha * 2.0 * w
# Gradient due to sigma.
grad[-1] = n_samples
grad[-1] -= n_sw_outliers * epsilon**2
grad[-1] -= squared_loss / sigma
# Gradient due to the intercept.
if fit_intercept:
grad[-2] = -2.0 * np.sum(weighted_non_outliers) / sigma
grad[-2] -= 2.0 * epsilon * np.sum(sw_outliers)
loss = n_samples * sigma + squared_loss + outlier_loss
loss += alpha * np.dot(w, w)
return loss, grad
| _huber_loss_and_gradient |
scikit-learn | 248 | sklearn/manifold/_t_sne.py | def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : ndarray of shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : ndarray of shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
| /usr/src/app/target_test_cases/failed_tests__joint_probabilities.txt | def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : ndarray of shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : ndarray of shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = distances.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose
)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
| _joint_probabilities |
scikit-learn | 249 | sklearn/manifold/_t_sne.py | def _joint_probabilities_nn(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : sparse matrix of shape (n_samples, n_samples)
Distances of samples to its n_neighbors nearest neighbors. All other
distances are left to zero (and are not materialized in memory).
Matrix should be of CSR format.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : sparse matrix of shape (n_samples, n_samples)
Condensed joint probability matrix with only nearest neighbors. Matrix
will be of CSR format.
"""
| /usr/src/app/target_test_cases/failed_tests__joint_probabilities_nn.txt | def _joint_probabilities_nn(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : sparse matrix of shape (n_samples, n_samples)
Distances of samples to its n_neighbors nearest neighbors. All other
distances are left to zero (and are not materialized in memory).
Matrix should be of CSR format.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : sparse matrix of shape (n_samples, n_samples)
Condensed joint probability matrix with only nearest neighbors. Matrix
will be of CSR format.
"""
t0 = time()
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances.sort_indices()
n_samples = distances.shape[0]
distances_data = distances.data.reshape(n_samples, -1)
distances_data = distances_data.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances_data, desired_perplexity, verbose
)
assert np.all(np.isfinite(conditional_P)), "All probabilities should be finite"
# Symmetrize the joint probability distribution using sparse operations
P = csr_matrix(
(conditional_P.ravel(), distances.indices, distances.indptr),
shape=(n_samples, n_samples),
)
P = P + P.T
# Normalize the joint probability distribution
sum_P = np.maximum(P.sum(), MACHINE_EPSILON)
P /= sum_P
assert np.all(np.abs(P.data) <= 1.0)
if verbose >= 2:
duration = time() - t0
print("[t-SNE] Computed conditional probabilities in {:.3f}s".format(duration))
return P
| _joint_probabilities_nn |
scikit-learn | 250 | sklearn/manifold/_t_sne.py | def _kl_divergence(
params,
P,
degrees_of_freedom,
n_samples,
n_components,
skip_num_points=0,
compute_error=True,
):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : ndarray of shape (n_params,)
Unraveled embedding.
P : ndarray of shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : int
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int, default=0
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
compute_error: bool, default=True
If False, the kl_divergence is not computed and returns NaN.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : ndarray of shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
| /usr/src/app/target_test_cases/failed_tests__kl_divergence.txt | def _kl_divergence(
params,
P,
degrees_of_freedom,
n_samples,
n_components,
skip_num_points=0,
compute_error=True,
):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : ndarray of shape (n_params,)
Unraveled embedding.
P : ndarray of shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : int
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int, default=0
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
compute_error: bool, default=True
If False, the kl_divergence is not computed and returns NaN.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : ndarray of shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
dist = pdist(X_embedded, "sqeuclidean")
dist /= degrees_of_freedom
dist += 1.0
dist **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
if compute_error:
kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))
else:
kl_divergence = np.nan
# Gradient: dC/dY
# pdist always returns double precision distances. Thus we need to take
grad = np.ndarray((n_samples, n_components), dtype=params.dtype)
PQd = squareform((P - Q) * dist)
for i in range(skip_num_points, n_samples):
grad[i] = np.dot(np.ravel(PQd[i], order="K"), X_embedded[i] - X_embedded)
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
| _kl_divergence |
scikit-learn | 251 | sklearn/manifold/_t_sne.py | def _kl_divergence_bh(
params,
P,
degrees_of_freedom,
n_samples,
n_components,
angle=0.5,
skip_num_points=0,
verbose=False,
compute_error=True,
num_threads=1,
):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2).
Parameters
----------
params : ndarray of shape (n_params,)
Unraveled embedding.
P : sparse matrix of shape (n_samples, n_sample)
Sparse approximate joint probability matrix, computed only for the
k nearest-neighbors and symmetrized. Matrix should be of CSR format.
degrees_of_freedom : int
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float, default=0.5
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int, default=0
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int, default=False
Verbosity level.
compute_error: bool, default=True
If False, the kl_divergence is not computed and returns NaN.
num_threads : int, default=1
Number of threads used to compute the gradient. This is set here to
avoid calling _openmp_effective_n_threads for each gradient step.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : ndarray of shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
| /usr/src/app/target_test_cases/failed_tests__kl_divergence_bh.txt | def _kl_divergence_bh(
params,
P,
degrees_of_freedom,
n_samples,
n_components,
angle=0.5,
skip_num_points=0,
verbose=False,
compute_error=True,
num_threads=1,
):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2).
Parameters
----------
params : ndarray of shape (n_params,)
Unraveled embedding.
P : sparse matrix of shape (n_samples, n_sample)
Sparse approximate joint probability matrix, computed only for the
k nearest-neighbors and symmetrized. Matrix should be of CSR format.
degrees_of_freedom : int
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float, default=0.5
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int, default=0
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int, default=False
Verbosity level.
compute_error: bool, default=True
If False, the kl_divergence is not computed and returns NaN.
num_threads : int, default=1
Number of threads used to compute the gradient. This is set here to
avoid calling _openmp_effective_n_threads for each gradient step.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : ndarray of shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = params.astype(np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
val_P = P.data.astype(np.float32, copy=False)
neighbors = P.indices.astype(np.int64, copy=False)
indptr = P.indptr.astype(np.int64, copy=False)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(
val_P,
X_embedded,
neighbors,
indptr,
grad,
angle,
n_components,
verbose,
dof=degrees_of_freedom,
compute_error=compute_error,
num_threads=num_threads,
)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
| _kl_divergence_bh |
scikit-learn | 252 | sklearn/cluster/_kmeans.py | def _labels_inertia(X, sample_weight, centers, n_threads=1, return_inertia=True):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples to assign to the labels. If sparse matrix, must
be in CSR format.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X.
x_squared_norms : ndarray of shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers : ndarray of shape (n_clusters, n_features)
The cluster centers.
n_threads : int, default=1
The number of OpenMP threads to use for the computation. Parallelism is
sample-wise on the main cython loop which assigns each sample to its
closest center.
return_inertia : bool, default=True
Whether to compute and return the inertia.
Returns
-------
labels : ndarray of shape (n_samples,)
The resulting assignment.
inertia : float
Sum of squared distances of samples to their closest cluster center.
Inertia is only returned if return_inertia is True.
"""
| /usr/src/app/target_test_cases/failed_tests__labels_inertia.txt | def _labels_inertia(X, sample_weight, centers, n_threads=1, return_inertia=True):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples to assign to the labels. If sparse matrix, must
be in CSR format.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X.
x_squared_norms : ndarray of shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers : ndarray of shape (n_clusters, n_features)
The cluster centers.
n_threads : int, default=1
The number of OpenMP threads to use for the computation. Parallelism is
sample-wise on the main cython loop which assigns each sample to its
closest center.
return_inertia : bool, default=True
Whether to compute and return the inertia.
Returns
-------
labels : ndarray of shape (n_samples,)
The resulting assignment.
inertia : float
Sum of squared distances of samples to their closest cluster center.
Inertia is only returned if return_inertia is True.
"""
n_samples = X.shape[0]
n_clusters = centers.shape[0]
labels = np.full(n_samples, -1, dtype=np.int32)
center_shift = np.zeros(n_clusters, dtype=centers.dtype)
if sp.issparse(X):
_labels = lloyd_iter_chunked_sparse
_inertia = _inertia_sparse
else:
_labels = lloyd_iter_chunked_dense
_inertia = _inertia_dense
_labels(
X,
sample_weight,
centers,
centers_new=None,
weight_in_clusters=None,
labels=labels,
center_shift=center_shift,
n_threads=n_threads,
update_centers=False,
)
if return_inertia:
inertia = _inertia(X, sample_weight, centers, labels, n_threads)
return labels, inertia
return labels
| _labels_inertia |
scikit-learn | 253 | sklearn/linear_model/_least_angle.py | def _lars_path_residues(
X_train,
y_train,
X_test,
y_test,
Gram=None,
copy=True,
method="lar",
verbose=False,
fit_intercept=True,
max_iter=500,
eps=np.finfo(float).eps,
positive=False,
):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array-like of shape (n_samples, n_features)
The data to fit the LARS on
y_train : array-like of shape (n_samples,)
The target variable to fit LARS on
X_test : array-like of shape (n_samples, n_features)
The data to compute the residues on
y_test : array-like of shape (n_samples,)
The target variable to compute the residues on
Gram : None, 'auto' or array-like of shape (n_features, n_features), \
default=None
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : bool, default=True
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : {'lar' , 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : bool or int, default=False
Sets the amount of verbosity
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
max_iter : int, default=500
Maximum number of iterations to perform.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array-like of shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas)
Coefficients along the path
residues : array-like of shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
| /usr/src/app/target_test_cases/failed_tests__lars_path_residues.txt | def _lars_path_residues(
X_train,
y_train,
X_test,
y_test,
Gram=None,
copy=True,
method="lar",
verbose=False,
fit_intercept=True,
max_iter=500,
eps=np.finfo(float).eps,
positive=False,
):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array-like of shape (n_samples, n_features)
The data to fit the LARS on
y_train : array-like of shape (n_samples,)
The target variable to fit LARS on
X_test : array-like of shape (n_samples, n_features)
The data to compute the residues on
y_test : array-like of shape (n_samples,)
The target variable to compute the residues on
Gram : None, 'auto' or array-like of shape (n_features, n_features), \
default=None
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : bool, default=True
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : {'lar' , 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : bool or int, default=False
Sets the amount of verbosity
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
max_iter : int, default=500
Maximum number of iterations to perform.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array-like of shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas)
Coefficients along the path
residues : array-like of shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
alphas, active, coefs = lars_path(
X_train,
y_train,
Gram=Gram,
copy_X=False,
copy_Gram=False,
method=method,
verbose=max(0, verbose - 1),
max_iter=max_iter,
eps=eps,
positive=positive,
)
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
| _lars_path_residues |
scikit-learn | 254 | sklearn/linear_model/_logistic.py | def _log_reg_scoring_path(
X,
y,
train,
test,
*,
pos_class,
Cs,
scoring,
fit_intercept,
max_iter,
tol,
class_weight,
verbose,
solver,
penalty,
dual,
intercept_scaling,
multi_class,
random_state,
max_squared_sum,
sample_weight,
l1_ratio,
score_params,
):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int or list of floats
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
scoring : callable
A string (see :ref:`scoring_parameter`) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced'
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}
Decides which solver to use.
penalty : {'l1', 'l2', 'elasticnet'}
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'auto', 'ovr', 'multinomial'}
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
random_state : int, RandomState instance
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
max_squared_sum : float
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like of shape(n_samples,)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
score_params : dict
Parameters to pass to the `score` method of the underlying scorer.
Returns
-------
coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray of shape (n_cs,)
Scores obtained for each Cs.
n_iter : ndarray of shape(n_cs,)
Actual number of iteration for each Cs.
"""
| /usr/src/app/target_test_cases/failed_tests__log_reg_scoring_path.txt | def _log_reg_scoring_path(
X,
y,
train,
test,
*,
pos_class,
Cs,
scoring,
fit_intercept,
max_iter,
tol,
class_weight,
verbose,
solver,
penalty,
dual,
intercept_scaling,
multi_class,
random_state,
max_squared_sum,
sample_weight,
l1_ratio,
score_params,
):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int or list of floats
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
scoring : callable
A string (see :ref:`scoring_parameter`) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced'
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}
Decides which solver to use.
penalty : {'l1', 'l2', 'elasticnet'}
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'auto', 'ovr', 'multinomial'}
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
random_state : int, RandomState instance
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
max_squared_sum : float
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like of shape(n_samples,)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
score_params : dict
Parameters to pass to the `score` method of the underlying scorer.
Returns
-------
coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray of shape (n_cs,)
Scores obtained for each Cs.
n_iter : ndarray of shape(n_cs,)
Actual number of iteration for each Cs.
"""
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
sw_train, sw_test = None, None
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
sw_train = sample_weight[train]
sw_test = sample_weight[test]
coefs, Cs, n_iter = _logistic_regression_path(
X_train,
y_train,
Cs=Cs,
l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
class_weight=class_weight,
pos_class=pos_class,
multi_class=multi_class,
tol=tol,
verbose=verbose,
dual=dual,
penalty=penalty,
intercept_scaling=intercept_scaling,
random_state=random_state,
check_input=False,
max_squared_sum=max_squared_sum,
sample_weight=sw_train,
)
log_reg = LogisticRegression(solver=solver, multi_class=multi_class)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == "ovr":
log_reg.classes_ = np.array([-1, 1])
elif multi_class == "multinomial":
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError(
"multi_class should be either multinomial or ovr, got %d" % multi_class
)
if pos_class is not None:
mask = y_test == pos_class
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.0
scores = list()
scoring = get_scorer(scoring)
for w in coefs:
if multi_class == "ovr":
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.0
if scoring is None:
scores.append(log_reg.score(X_test, y_test, sample_weight=sw_test))
else:
score_params = score_params or {}
score_params = _check_method_params(X=X, params=score_params, indices=test)
scores.append(scoring(log_reg, X_test, y_test, **score_params))
return coefs, Cs, np.array(scores), n_iter
| _log_reg_scoring_path |
scikit-learn | 255 | sklearn/mixture/_bayesian_mixture.py | def _log_wishart_norm(degrees_of_freedom, log_det_precisions_chol, n_features):
"""Compute the log of the Wishart distribution normalization term.
Parameters
----------
degrees_of_freedom : array-like of shape (n_components,)
The number of degrees of freedom on the covariance Wishart
distributions.
log_det_precision_chol : array-like of shape (n_components,)
The determinant of the precision matrix for each component.
n_features : int
The number of features.
Return
------
log_wishart_norm : array-like of shape (n_components,)
The log normalization of the Wishart distribution.
"""
| /usr/src/app/target_test_cases/failed_tests__log_wishart_norm.txt | def _log_wishart_norm(degrees_of_freedom, log_det_precisions_chol, n_features):
"""Compute the log of the Wishart distribution normalization term.
Parameters
----------
degrees_of_freedom : array-like of shape (n_components,)
The number of degrees of freedom on the covariance Wishart
distributions.
log_det_precision_chol : array-like of shape (n_components,)
The determinant of the precision matrix for each component.
n_features : int
The number of features.
Return
------
log_wishart_norm : array-like of shape (n_components,)
The log normalization of the Wishart distribution.
"""
# To simplify the computation we have removed the np.log(np.pi) term
return -(
degrees_of_freedom * log_det_precisions_chol
+ degrees_of_freedom * n_features * 0.5 * math.log(2.0)
+ np.sum(
gammaln(0.5 * (degrees_of_freedom - np.arange(n_features)[:, np.newaxis])),
0,
)
)
| _log_wishart_norm |
scikit-learn | 256 | sklearn/linear_model/_logistic.py | def _logistic_regression_path(
X,
y,
pos_class=None,
Cs=10,
fit_intercept=True,
max_iter=100,
tol=1e-4,
verbose=0,
solver="lbfgs",
coef=None,
class_weight=None,
dual=False,
penalty="l2",
intercept_scaling=1.0,
multi_class="auto",
random_state=None,
check_input=True,
max_squared_sum=None,
sample_weight=None,
l1_ratio=None,
n_threads=1,
):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, default=None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int or array-like of shape (n_cs,), default=10
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool, default=True
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int, default=100
Maximum number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int, default=0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, \
default='lbfgs'
Numerical solver to use.
coef : array-like of shape (n_features,), default=None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
intercept_scaling : float, default=1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'ovr', 'multinomial', 'auto'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.22
Default changed from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like of shape(n_samples,), default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float, default=None
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
n_threads : int, default=1
Number of OpenMP threads to use.
Returns
-------
coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array of shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
| /usr/src/app/target_test_cases/failed_tests__logistic_regression_path.txt | def _logistic_regression_path(
X,
y,
pos_class=None,
Cs=10,
fit_intercept=True,
max_iter=100,
tol=1e-4,
verbose=0,
solver="lbfgs",
coef=None,
class_weight=None,
dual=False,
penalty="l2",
intercept_scaling=1.0,
multi_class="auto",
random_state=None,
check_input=True,
max_squared_sum=None,
sample_weight=None,
l1_ratio=None,
n_threads=1,
):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, default=None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int or array-like of shape (n_cs,), default=10
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool, default=True
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int, default=100
Maximum number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int, default=0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, \
default='lbfgs'
Numerical solver to use.
coef : array-like of shape (n_features,), default=None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
intercept_scaling : float, default=1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'ovr', 'multinomial', 'auto'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.22
Default changed from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like of shape(n_samples,), default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float, default=None
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
n_threads : int, default=1
Number of OpenMP threads to use.
Returns
-------
coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array of shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
solver = _check_solver(solver, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(
X,
accept_sparse="csr",
dtype=np.float64,
accept_large_sparse=solver not in ["liblinear", "sag", "saga"],
)
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
n_samples, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
multi_class = _check_multi_class(multi_class, solver, len(classes))
if pos_class is None and multi_class != "multinomial":
if classes.size > 2:
raise ValueError("To fit OvR, use the pos_class argument")
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
if sample_weight is not None or class_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype, copy=True)
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or (
multi_class == "multinomial" and class_weight is not None
):
class_weight_ = compute_class_weight(class_weight, classes=classes, y=y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. For the
# multinomial case this is not necessary.
if multi_class == "ovr":
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
mask = y == pos_class
y_bin = np.ones(y.shape, dtype=X.dtype)
if solver == "liblinear":
mask_classes = np.array([-1, 1])
y_bin[~mask] = -1.0
else:
# HalfBinomialLoss, used for those solvers, represents y in [0, 1] instead
# of in [-1, 1].
mask_classes = np.array([0, 1])
y_bin[~mask] = 0.0
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(
class_weight, classes=mask_classes, y=y_bin
)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver in ["sag", "saga", "lbfgs", "newton-cg"]:
# SAG, lbfgs and newton-cg multinomial solvers need LabelEncoder,
# not LabelBinarizer, i.e. y as a 1d-array of integers.
# LabelEncoder also saves memory compared to LabelBinarizer, especially
# when n_classes is large.
le = LabelEncoder()
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
else:
# For liblinear solver, apply LabelBinarizer, i.e. y is one-hot encoded.
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
w0 = np.zeros(
(classes.size, n_features + int(fit_intercept)), order="F", dtype=X.dtype
)
# IMPORTANT NOTE:
# All solvers relying on LinearModelLoss need to scale the penalty with n_samples
# or the sum of sample weights because the implemented logistic regression
# objective here is (unfortunately)
# C * sum(pointwise_loss) + penalty
# instead of (as LinearModelLoss does)
# mean(pointwise_loss) + 1/C * penalty
if solver in ["lbfgs", "newton-cg", "newton-cholesky"]:
# This needs to be calculated after sample_weight is multiplied by
# class_weight. It is even tested that passing class_weight is equivalent to
# passing sample_weights according to class_weight.
sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
if coef is not None:
# it must work both giving the bias term and not
if multi_class == "ovr":
if coef.size not in (n_features, w0.size):
raise ValueError(
"Initialization coef is of shape %d, expected shape %d or %d"
% (coef.size, n_features, w0.size)
)
w0[: coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if coef.shape[0] != n_classes or coef.shape[1] not in (
n_features,
n_features + 1,
):
raise ValueError(
"Initialization coef is of shape (%d, %d), expected "
"shape (%d, %d) or (%d, %d)"
% (
coef.shape[0],
coef.shape[1],
classes.size,
n_features,
classes.size,
n_features + 1,
)
)
if n_classes == 1:
w0[0, : coef.shape[1]] = -coef
w0[1, : coef.shape[1]] = coef
else:
w0[:, : coef.shape[1]] = coef
if multi_class == "multinomial":
if solver in ["lbfgs", "newton-cg"]:
# scipy.optimize.minimize and newton-cg accept only ravelled parameters,
# i.e. 1d-arrays. LinearModelLoss expects classes to be contiguous and
# reconstructs the 2d-array via w0.reshape((n_classes, -1), order="F").
# As w0 is F-contiguous, ravel(order="F") also avoids a copy.
w0 = w0.ravel(order="F")
loss = LinearModelLoss(
base_loss=HalfMultinomialLoss(n_classes=classes.size),
fit_intercept=fit_intercept,
)
target = Y_multi
if solver == "lbfgs":
func = loss.loss_gradient
elif solver == "newton-cg":
func = loss.loss
grad = loss.gradient
hess = loss.gradient_hessian_product # hess = [gradient, hessp]
warm_start_sag = {"coef": w0.T}
else:
target = y_bin
if solver == "lbfgs":
loss = LinearModelLoss(
base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept
)
func = loss.loss_gradient
elif solver == "newton-cg":
loss = LinearModelLoss(
base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept
)
func = loss.loss
grad = loss.gradient
hess = loss.gradient_hessian_product # hess = [gradient, hessp]
elif solver == "newton-cholesky":
loss = LinearModelLoss(
base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept
)
warm_start_sag = {"coef": np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == "lbfgs":
l2_reg_strength = 1.0 / (C * sw_sum)
iprint = [-1, 50, 1, 100, 101][
np.searchsorted(np.array([0, 1, 2, 3]), verbose)
]
opt_res = optimize.minimize(
func,
w0,
method="L-BFGS-B",
jac=True,
args=(X, target, sample_weight, l2_reg_strength, n_threads),
options={
"maxiter": max_iter,
"maxls": 50, # default is 20
"iprint": iprint,
"gtol": tol,
"ftol": 64 * np.finfo(float).eps,
},
)
n_iter_i = _check_optimize_result(
solver,
opt_res,
max_iter,
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG,
)
w0, loss = opt_res.x, opt_res.fun
elif solver == "newton-cg":
l2_reg_strength = 1.0 / (C * sw_sum)
args = (X, target, sample_weight, l2_reg_strength, n_threads)
w0, n_iter_i = _newton_cg(
grad_hess=hess,
func=func,
grad=grad,
x0=w0,
args=args,
maxiter=max_iter,
tol=tol,
verbose=verbose,
)
elif solver == "newton-cholesky":
l2_reg_strength = 1.0 / (C * sw_sum)
sol = NewtonCholeskySolver(
coef=w0,
linear_loss=loss,
l2_reg_strength=l2_reg_strength,
tol=tol,
max_iter=max_iter,
n_threads=n_threads,
verbose=verbose,
)
w0 = sol.solve(X=X, y=target, sample_weight=sample_weight)
n_iter_i = sol.iteration
elif solver == "liblinear":
(
coef_,
intercept_,
n_iter_i,
) = _fit_liblinear(
X,
target,
C,
fit_intercept,
intercept_scaling,
None,
penalty,
dual,
verbose,
max_iter,
tol,
random_state,
sample_weight=sample_weight,
)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
# n_iter_i is an array for each class. However, `target` is always encoded
# in {-1, 1}, so we only take the first element of n_iter_i.
n_iter_i = n_iter_i.item()
elif solver in ["sag", "saga"]:
if multi_class == "multinomial":
target = target.astype(X.dtype, copy=False)
loss = "multinomial"
else:
loss = "log"
# alpha is for L2-norm, beta is for L1-norm
if penalty == "l1":
alpha = 0.0
beta = 1.0 / C
elif penalty == "l2":
alpha = 1.0 / C
beta = 0.0
else: # Elastic-Net penalty
alpha = (1.0 / C) * (1 - l1_ratio)
beta = (1.0 / C) * l1_ratio
w0, n_iter_i, warm_start_sag = sag_solver(
X,
target,
sample_weight,
loss,
alpha,
beta,
max_iter,
tol,
verbose,
random_state,
False,
max_squared_sum,
warm_start_sag,
is_saga=(solver == "saga"),
)
else:
raise ValueError(
"solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver
)
if multi_class == "multinomial":
n_classes = max(2, classes.size)
if solver in ["lbfgs", "newton-cg"]:
multi_w0 = np.reshape(w0, (n_classes, -1), order="F")
else:
multi_w0 = w0
if n_classes == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0.copy())
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return np.array(coefs), np.array(Cs), n_iter
| _logistic_regression_path |
scikit-learn | 257 | sklearn/cluster/_kmeans.py | def _mini_batch_step(
X,
sample_weight,
centers,
centers_new,
weight_sums,
random_state,
random_reassign=False,
reassignment_ratio=0.01,
verbose=False,
n_threads=1,
):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The original data array. If sparse, must be in CSR format.
x_squared_norms : ndarray of shape (n_samples,)
Squared euclidean norm of each data point.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in `X`.
centers : ndarray of shape (n_clusters, n_features)
The cluster centers before the current iteration
centers_new : ndarray of shape (n_clusters, n_features)
The cluster centers after the current iteration. Modified in-place.
weight_sums : ndarray of shape (n_clusters,)
The vector in which we keep track of the numbers of points in a
cluster. This array is modified in place.
random_state : RandomState instance
Determines random number generation for low count centers reassignment.
See :term:`Glossary <random_state>`.
random_reassign : boolean, default=False
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, default=0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, default=False
Controls the verbosity.
n_threads : int, default=1
The number of OpenMP threads to use for the computation.
Returns
-------
inertia : float
Sum of squared distances of samples to their closest cluster center.
The inertia is computed after finding the labels and before updating
the centers.
"""
| /usr/src/app/target_test_cases/failed_tests__mini_batch_step.txt | def _mini_batch_step(
X,
sample_weight,
centers,
centers_new,
weight_sums,
random_state,
random_reassign=False,
reassignment_ratio=0.01,
verbose=False,
n_threads=1,
):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The original data array. If sparse, must be in CSR format.
x_squared_norms : ndarray of shape (n_samples,)
Squared euclidean norm of each data point.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in `X`.
centers : ndarray of shape (n_clusters, n_features)
The cluster centers before the current iteration
centers_new : ndarray of shape (n_clusters, n_features)
The cluster centers after the current iteration. Modified in-place.
weight_sums : ndarray of shape (n_clusters,)
The vector in which we keep track of the numbers of points in a
cluster. This array is modified in place.
random_state : RandomState instance
Determines random number generation for low count centers reassignment.
See :term:`Glossary <random_state>`.
random_reassign : boolean, default=False
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, default=0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, default=False
Controls the verbosity.
n_threads : int, default=1
The number of OpenMP threads to use for the computation.
Returns
-------
inertia : float
Sum of squared distances of samples to their closest cluster center.
The inertia is computed after finding the labels and before updating
the centers.
"""
# Perform label assignment to nearest centers
# For better efficiency, it's better to run _mini_batch_step in a
# threadpool_limit context than using _labels_inertia_threadpool_limit here
labels, inertia = _labels_inertia(X, sample_weight, centers, n_threads=n_threads)
# Update centers according to the labels
if sp.issparse(X):
_minibatch_update_sparse(
X, sample_weight, centers, centers_new, weight_sums, labels, n_threads
)
else:
_minibatch_update_dense(
X,
sample_weight,
centers,
centers_new,
weight_sums,
labels,
n_threads,
)
# Reassign clusters that have very low weight
if random_reassign and reassignment_ratio > 0:
to_reassign = weight_sums < reassignment_ratio * weight_sums.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > 0.5 * X.shape[0]:
indices_dont_reassign = np.argsort(weight_sums)[int(0.5 * X.shape[0]) :]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = random_state.choice(
X.shape[0], replace=False, size=n_reassigns
)
if verbose:
print(f"[MiniBatchKMeans] Reassigning {n_reassigns} cluster centers.")
if sp.issparse(X):
assign_rows_csr(
X,
new_centers.astype(np.intp, copy=False),
np.where(to_reassign)[0].astype(np.intp, copy=False),
centers_new,
)
else:
centers_new[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
weight_sums[to_reassign] = np.min(weight_sums[~to_reassign])
return inertia
| _mini_batch_step |
scikit-learn | 258 | sklearn/linear_model/_theil_sen.py | def _modified_weiszfeld_step(X, x_old):
"""Modified Weiszfeld step.
This function defines one iteration step in order to approximate the
spatial median (L1 median). It is a form of an iteratively re-weighted
least squares method.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
x_old : ndarray of shape = (n_features,)
Current start vector.
Returns
-------
x_new : ndarray of shape (n_features,)
New iteration step.
References
----------
- On Computation of Spatial Median for Robust Data Mining, 2005
T. Kärkkäinen and S. Äyrämö
http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
"""
| /usr/src/app/target_test_cases/failed_tests__modified_weiszfeld_step.txt | def _modified_weiszfeld_step(X, x_old):
"""Modified Weiszfeld step.
This function defines one iteration step in order to approximate the
spatial median (L1 median). It is a form of an iteratively re-weighted
least squares method.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
x_old : ndarray of shape = (n_features,)
Current start vector.
Returns
-------
x_new : ndarray of shape (n_features,)
New iteration step.
References
----------
- On Computation of Spatial Median for Robust Data Mining, 2005
T. Kärkkäinen and S. Äyrämö
http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
"""
diff = X - x_old
diff_norm = np.sqrt(np.sum(diff**2, axis=1))
mask = diff_norm >= _EPSILON
# x_old equals one of our samples
is_x_old_in_X = int(mask.sum() < X.shape[0])
diff = diff[mask]
diff_norm = diff_norm[mask][:, np.newaxis]
quotient_norm = linalg.norm(np.sum(diff / diff_norm, axis=0))
if quotient_norm > _EPSILON: # to avoid division by zero
new_direction = np.sum(X[mask, :] / diff_norm, axis=0) / np.sum(
1 / diff_norm, axis=0
)
else:
new_direction = 1.0
quotient_norm = 1.0
return (
max(0.0, 1.0 - is_x_old_in_X / quotient_norm) * new_direction
+ min(1.0, is_x_old_in_X / quotient_norm) * x_old
)
| _modified_weiszfeld_step |
scikit-learn | 259 | sklearn/utils/extmath.py | def _nanaverage(a, weights=None):
"""Compute the weighted average, ignoring NaNs.
Parameters
----------
a : ndarray
Array containing data to be averaged.
weights : array-like, default=None
An array of weights associated with the values in a. Each value in a
contributes to the average according to its associated weight. The
weights array can either be 1-D of the same shape as a. If `weights=None`,
then all data in a are assumed to have a weight equal to one.
Returns
-------
weighted_average : float
The weighted average.
Notes
-----
This wrapper to combine :func:`numpy.average` and :func:`numpy.nanmean`, so
that :func:`np.nan` values are ignored from the average and weights can
be passed. Note that when possible, we delegate to the prime methods.
"""
| /usr/src/app/target_test_cases/failed_tests__nanaverage.txt | def _nanaverage(a, weights=None):
"""Compute the weighted average, ignoring NaNs.
Parameters
----------
a : ndarray
Array containing data to be averaged.
weights : array-like, default=None
An array of weights associated with the values in a. Each value in a
contributes to the average according to its associated weight. The
weights array can either be 1-D of the same shape as a. If `weights=None`,
then all data in a are assumed to have a weight equal to one.
Returns
-------
weighted_average : float
The weighted average.
Notes
-----
This wrapper to combine :func:`numpy.average` and :func:`numpy.nanmean`, so
that :func:`np.nan` values are ignored from the average and weights can
be passed. Note that when possible, we delegate to the prime methods.
"""
if len(a) == 0:
return np.nan
mask = np.isnan(a)
if mask.all():
return np.nan
if weights is None:
return np.nanmean(a)
weights = np.asarray(weights)
a, weights = a[~mask], weights[~mask]
try:
return np.average(a, weights=weights)
except ZeroDivisionError:
# this is when all weights are zero, then ignore them
return np.average(a)
| _nanaverage |
scikit-learn | 260 | sklearn/utils/optimize.py | def _newton_cg(
grad_hess,
func,
grad,
x0,
args=(),
tol=1e-4,
maxiter=100,
maxinner=200,
line_search=True,
warn=True,
verbose=0,
):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
Parameters
----------
grad_hess : callable
Should return the gradient and a callable returning the matvec product
of the Hessian.
func : callable
Should return the value of the function.
grad : callable
Should return the function value and the gradient. This is used
by the linesearch functions.
x0 : array of float
Initial guess.
args : tuple, default=()
Arguments passed to func_grad_hess, func and grad.
tol : float, default=1e-4
Stopping criterion. The iteration will stop when
``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
maxiter : int, default=100
Number of Newton iterations.
maxinner : int, default=200
Number of CG iterations.
line_search : bool, default=True
Whether to use a line search or not.
warn : bool, default=True
Whether to warn when didn't converge.
Returns
-------
xk : ndarray of float
Estimated minimum.
"""
| /usr/src/app/target_test_cases/failed_tests__newton_cg.txt | def _newton_cg(
grad_hess,
func,
grad,
x0,
args=(),
tol=1e-4,
maxiter=100,
maxinner=200,
line_search=True,
warn=True,
verbose=0,
):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
Parameters
----------
grad_hess : callable
Should return the gradient and a callable returning the matvec product
of the Hessian.
func : callable
Should return the value of the function.
grad : callable
Should return the function value and the gradient. This is used
by the linesearch functions.
x0 : array of float
Initial guess.
args : tuple, default=()
Arguments passed to func_grad_hess, func and grad.
tol : float, default=1e-4
Stopping criterion. The iteration will stop when
``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
maxiter : int, default=100
Number of Newton iterations.
maxinner : int, default=200
Number of CG iterations.
line_search : bool, default=True
Whether to use a line search or not.
warn : bool, default=True
Whether to warn when didn't converge.
Returns
-------
xk : ndarray of float
Estimated minimum.
"""
x0 = np.asarray(x0).flatten()
xk = np.copy(x0)
k = 0
if line_search:
old_fval = func(x0, *args)
old_old_fval = None
else:
old_fval = 0
is_verbose = verbose > 0
# Outer loop: our Newton iteration
while k < maxiter:
# Compute a search direction pk by applying the CG method to
# del2 f(xk) p = - fgrad f(xk) starting from 0.
fgrad, fhess_p = grad_hess(xk, *args)
absgrad = np.abs(fgrad)
max_absgrad = np.max(absgrad)
check = max_absgrad <= tol
if is_verbose:
print(f"Newton-CG iter = {k}")
print(" Check Convergence")
print(f" max |gradient| <= tol: {max_absgrad} <= {tol} {check}")
if check:
break
maggrad = np.sum(absgrad)
eta = min([0.5, np.sqrt(maggrad)])
termcond = eta * maggrad
# Inner loop: solve the Newton update by conjugate gradient, to
# avoid inverting the Hessian
xsupi = _cg(fhess_p, fgrad, maxiter=maxinner, tol=termcond, verbose=verbose)
alphak = 1.0
if line_search:
try:
alphak, fc, gc, old_fval, old_old_fval, gfkp1 = _line_search_wolfe12(
func,
grad,
xk,
xsupi,
fgrad,
old_fval,
old_old_fval,
verbose=verbose,
args=args,
)
except _LineSearchError:
warnings.warn("Line Search failed")
break
xk += alphak * xsupi # upcast if necessary
k += 1
if warn and k >= maxiter:
warnings.warn(
(
f"newton-cg failed to converge at loss = {old_fval}. Increase the"
" number of iterations."
),
ConvergenceWarning,
)
elif is_verbose:
print(f" Solver did converge at loss = {old_fval}.")
return xk, k
| _newton_cg |
scikit-learn | 261 | sklearn/datasets/_openml.py | def _open_openml_url(
openml_path: str, data_home: Optional[str], n_retries: int = 3, delay: float = 1.0
):
"""
Returns a resource from OpenML.org. Caches it to data_home if required.
Parameters
----------
openml_path : str
OpenML URL that will be accessed. This will be prefixes with
_OPENML_PREFIX.
data_home : str
Directory to which the files will be cached. If None, no caching will
be applied.
n_retries : int, default=3
Number of retries when HTTP errors are encountered. Error with status
code 412 won't be retried as they represent OpenML generic errors.
delay : float, default=1.0
Number of seconds between retries.
Returns
-------
result : stream
A stream to the OpenML resource.
"""
| /usr/src/app/target_test_cases/failed_tests__open_openml_url.txt | def _open_openml_url(
openml_path: str, data_home: Optional[str], n_retries: int = 3, delay: float = 1.0
):
"""
Returns a resource from OpenML.org. Caches it to data_home if required.
Parameters
----------
openml_path : str
OpenML URL that will be accessed. This will be prefixes with
_OPENML_PREFIX.
data_home : str
Directory to which the files will be cached. If None, no caching will
be applied.
n_retries : int, default=3
Number of retries when HTTP errors are encountered. Error with status
code 412 won't be retried as they represent OpenML generic errors.
delay : float, default=1.0
Number of seconds between retries.
Returns
-------
result : stream
A stream to the OpenML resource.
"""
def is_gzip_encoded(_fsrc):
return _fsrc.info().get("Content-Encoding", "") == "gzip"
req = Request(_OPENML_PREFIX + openml_path)
req.add_header("Accept-encoding", "gzip")
if data_home is None:
fsrc = _retry_on_network_error(n_retries, delay, req.full_url)(urlopen)(req)
if is_gzip_encoded(fsrc):
return gzip.GzipFile(fileobj=fsrc, mode="rb")
return fsrc
local_path = _get_local_path(openml_path, data_home)
dir_name, file_name = os.path.split(local_path)
if not os.path.exists(local_path):
os.makedirs(dir_name, exist_ok=True)
try:
# Create a tmpdir as a subfolder of dir_name where the final file will
# be moved to if the download is successful. This guarantees that the
# renaming operation to the final location is atomic to ensure the
# concurrence safety of the dataset caching mechanism.
with TemporaryDirectory(dir=dir_name) as tmpdir:
with closing(
_retry_on_network_error(n_retries, delay, req.full_url)(urlopen)(
req
)
) as fsrc:
opener: Callable
if is_gzip_encoded(fsrc):
opener = open
else:
opener = gzip.GzipFile
with opener(os.path.join(tmpdir, file_name), "wb") as fdst:
shutil.copyfileobj(fsrc, fdst)
shutil.move(fdst.name, local_path)
except Exception:
if os.path.exists(local_path):
os.unlink(local_path)
raise
# XXX: First time, decompression will not be necessary (by using fsrc), but
# it will happen nonetheless
return gzip.GzipFile(local_path, "rb")
| _open_openml_url |
scikit-learn | 262 | sklearn/utils/multiclass.py | def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking OvR decision function from OvO.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like of shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like of shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``.
"""
| /usr/src/app/target_test_cases/failed_tests__ovr_decision_function.txt | def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking OvR decision function from OvO.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like of shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like of shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``.
"""
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
# Monotonically transform the sum_of_confidences to (-1/3, 1/3)
# and add it with votes. The monotonic transformation is
# f: x -> x / (3 * (|x| + 1)), it uses 1/3 instead of 1/2
# to ensure that we won't reach the limits and change vote order.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
transformed_confidences = sum_of_confidences / (
3 * (np.abs(sum_of_confidences) + 1)
)
return votes + transformed_confidences
| _ovr_decision_function |
scikit-learn | 263 | sklearn/inspection/_partial_dependence.py | def _partial_dependence_brute(
est, grid, features, X, response_method, sample_weight=None
):
"""Calculate partial dependence via the brute force method.
The brute method explicitly averages the predictions of an estimator over a
grid of feature values.
For each `grid` value, all the samples from `X` have their variables of
interest replaced by that specific `grid` value. The predictions are then made
and averaged across the samples.
This method is slower than the `'recursion'`
(:func:`~sklearn.inspection._partial_dependence._partial_dependence_recursion`)
version for estimators with this second option. However, with the `'brute'`
force method, the average will be done with the given `X` and not the `X`
used during training, as it is done in the `'recursion'` version. Therefore
the average can always accept `sample_weight` (even when the estimator was
fitted without).
Parameters
----------
est : BaseEstimator
A fitted estimator object implementing :term:`predict`,
:term:`predict_proba`, or :term:`decision_function`.
Multioutput-multiclass classifiers are not supported.
grid : array-like of shape (n_points, n_target_features)
The grid of feature values for which the partial dependence is calculated.
Note that `n_points` is the number of points in the grid and `n_target_features`
is the number of features you are doing partial dependence at.
features : array-like of {int, str}
The feature (e.g. `[0]`) or pair of interacting features
(e.g. `[(0, 1)]`) for which the partial dependency should be computed.
X : array-like of shape (n_samples, n_features)
`X` is used to generate values for the complement features. That is, for
each value in `grid`, the method will average the prediction of each
sample from `X` having that grid value for `features`.
response_method : {'auto', 'predict_proba', 'decision_function'}, \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. For regressors
this parameter is ignored and the response is always the output of
:term:`predict`. By default, :term:`predict_proba` is tried first
and we revert to :term:`decision_function` if it doesn't exist.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights are used to calculate weighted means when averaging the
model output. If `None`, then samples are equally weighted. Note that
`sample_weight` does not change the individual predictions.
Returns
-------
averaged_predictions : array-like of shape (n_targets, n_points)
The averaged predictions for the given `grid` of features values.
Note that `n_targets` is the number of targets (e.g. 1 for binary
classification, `n_tasks` for multi-output regression, and `n_classes` for
multiclass classification) and `n_points` is the number of points in the `grid`.
predictions : array-like
The predictions for the given `grid` of features values over the samples
from `X`. For non-multioutput regression and binary classification the
shape is `(n_instances, n_points)` and for multi-output regression and
multiclass classification the shape is `(n_targets, n_instances, n_points)`,
where `n_targets` is the number of targets (`n_tasks` for multi-output
regression, and `n_classes` for multiclass classification), `n_instances`
is the number of instances in `X`, and `n_points` is the number of points
in the `grid`.
"""
| /usr/src/app/target_test_cases/failed_tests__partial_dependence_brute.txt | def _partial_dependence_brute(
est, grid, features, X, response_method, sample_weight=None
):
"""Calculate partial dependence via the brute force method.
The brute method explicitly averages the predictions of an estimator over a
grid of feature values.
For each `grid` value, all the samples from `X` have their variables of
interest replaced by that specific `grid` value. The predictions are then made
and averaged across the samples.
This method is slower than the `'recursion'`
(:func:`~sklearn.inspection._partial_dependence._partial_dependence_recursion`)
version for estimators with this second option. However, with the `'brute'`
force method, the average will be done with the given `X` and not the `X`
used during training, as it is done in the `'recursion'` version. Therefore
the average can always accept `sample_weight` (even when the estimator was
fitted without).
Parameters
----------
est : BaseEstimator
A fitted estimator object implementing :term:`predict`,
:term:`predict_proba`, or :term:`decision_function`.
Multioutput-multiclass classifiers are not supported.
grid : array-like of shape (n_points, n_target_features)
The grid of feature values for which the partial dependence is calculated.
Note that `n_points` is the number of points in the grid and `n_target_features`
is the number of features you are doing partial dependence at.
features : array-like of {int, str}
The feature (e.g. `[0]`) or pair of interacting features
(e.g. `[(0, 1)]`) for which the partial dependency should be computed.
X : array-like of shape (n_samples, n_features)
`X` is used to generate values for the complement features. That is, for
each value in `grid`, the method will average the prediction of each
sample from `X` having that grid value for `features`.
response_method : {'auto', 'predict_proba', 'decision_function'}, \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. For regressors
this parameter is ignored and the response is always the output of
:term:`predict`. By default, :term:`predict_proba` is tried first
and we revert to :term:`decision_function` if it doesn't exist.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights are used to calculate weighted means when averaging the
model output. If `None`, then samples are equally weighted. Note that
`sample_weight` does not change the individual predictions.
Returns
-------
averaged_predictions : array-like of shape (n_targets, n_points)
The averaged predictions for the given `grid` of features values.
Note that `n_targets` is the number of targets (e.g. 1 for binary
classification, `n_tasks` for multi-output regression, and `n_classes` for
multiclass classification) and `n_points` is the number of points in the `grid`.
predictions : array-like
The predictions for the given `grid` of features values over the samples
from `X`. For non-multioutput regression and binary classification the
shape is `(n_instances, n_points)` and for multi-output regression and
multiclass classification the shape is `(n_targets, n_instances, n_points)`,
where `n_targets` is the number of targets (`n_tasks` for multi-output
regression, and `n_classes` for multiclass classification), `n_instances`
is the number of instances in `X`, and `n_points` is the number of points
in the `grid`.
"""
predictions = []
averaged_predictions = []
# define the prediction_method (predict, predict_proba, decision_function).
if is_regressor(est):
prediction_method = est.predict
else:
predict_proba = getattr(est, "predict_proba", None)
decision_function = getattr(est, "decision_function", None)
if response_method == "auto":
# try predict_proba, then decision_function if it doesn't exist
prediction_method = predict_proba or decision_function
else:
prediction_method = (
predict_proba
if response_method == "predict_proba"
else decision_function
)
if prediction_method is None:
if response_method == "auto":
raise ValueError(
"The estimator has no predict_proba and no "
"decision_function method."
)
elif response_method == "predict_proba":
raise ValueError("The estimator has no predict_proba method.")
else:
raise ValueError("The estimator has no decision_function method.")
X_eval = X.copy()
for new_values in grid:
for i, variable in enumerate(features):
_safe_assign(X_eval, new_values[i], column_indexer=variable)
try:
# Note: predictions is of shape
# (n_points,) for non-multioutput regressors
# (n_points, n_tasks) for multioutput regressors
# (n_points, 1) for the regressors in cross_decomposition (I think)
# (n_points, 2) for binary classification
# (n_points, n_classes) for multiclass classification
pred = prediction_method(X_eval)
predictions.append(pred)
# average over samples
averaged_predictions.append(np.average(pred, axis=0, weights=sample_weight))
except NotFittedError as e:
raise ValueError("'estimator' parameter must be a fitted estimator") from e
n_samples = X.shape[0]
# reshape to (n_targets, n_instances, n_points) where n_targets is:
# - 1 for non-multioutput regression and binary classification (shape is
# already correct in those cases)
# - n_tasks for multi-output regression
# - n_classes for multiclass classification.
predictions = np.array(predictions).T
if is_regressor(est) and predictions.ndim == 2:
# non-multioutput regression, shape is (n_instances, n_points,)
predictions = predictions.reshape(n_samples, -1)
elif is_classifier(est) and predictions.shape[0] == 2:
# Binary classification, shape is (2, n_instances, n_points).
# we output the effect of **positive** class
predictions = predictions[1]
predictions = predictions.reshape(n_samples, -1)
# reshape averaged_predictions to (n_targets, n_points) where n_targets is:
# - 1 for non-multioutput regression and binary classification (shape is
# already correct in those cases)
# - n_tasks for multi-output regression
# - n_classes for multiclass classification.
averaged_predictions = np.array(averaged_predictions).T
if is_regressor(est) and averaged_predictions.ndim == 1:
# non-multioutput regression, shape is (n_points,)
averaged_predictions = averaged_predictions.reshape(1, -1)
elif is_classifier(est) and averaged_predictions.shape[0] == 2:
# Binary classification, shape is (2, n_points).
# we output the effect of **positive** class
averaged_predictions = averaged_predictions[1]
averaged_predictions = averaged_predictions.reshape(1, -1)
return averaged_predictions, predictions
| _partial_dependence_brute |
scikit-learn | 264 | sklearn/datasets/_arff_parser.py | def _post_process_frame(frame, feature_names, target_names):
"""Post process a dataframe to select the desired columns in `X` and `y`.
Parameters
----------
frame : dataframe
The dataframe to split into `X` and `y`.
feature_names : list of str
The list of feature names to populate `X`.
target_names : list of str
The list of target names to populate `y`.
Returns
-------
X : dataframe
The dataframe containing the features.
y : {series, dataframe} or None
The series or dataframe containing the target.
"""
| /usr/src/app/target_test_cases/failed_tests__post_process_frame.txt | def _post_process_frame(frame, feature_names, target_names):
"""Post process a dataframe to select the desired columns in `X` and `y`.
Parameters
----------
frame : dataframe
The dataframe to split into `X` and `y`.
feature_names : list of str
The list of feature names to populate `X`.
target_names : list of str
The list of target names to populate `y`.
Returns
-------
X : dataframe
The dataframe containing the features.
y : {series, dataframe} or None
The series or dataframe containing the target.
"""
X = frame[feature_names]
if len(target_names) >= 2:
y = frame[target_names]
elif len(target_names) == 1:
y = frame[target_names[0]]
else:
y = None
return X, y
| _post_process_frame |
scikit-learn | 265 | sklearn/utils/random.py | def _random_choice_csc(n_samples, classes, class_probability=None, random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of \
shape (n_classes,), default=None
Class distribution of each column. If None, uniform distribution is
assumed.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the sampled classes.
See :term:`Glossary <random_state>`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
| /usr/src/app/target_test_cases/failed_tests__random_choice_csc.txt | def _random_choice_csc(n_samples, classes, class_probability=None, random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of \
shape (n_classes,), default=None
Class distribution of each column. If None, uniform distribution is
assumed.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the sampled classes.
See :term:`Glossary <random_state>`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array("i")
indices = array.array("i")
indptr = array.array("i", [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != "i":
raise ValueError("class dtype %s is not supported" % classes[j].dtype)
classes[j] = classes[j].astype(np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if not np.isclose(np.sum(class_prob_j), 1.0):
raise ValueError(
"Probability array at index {0} does not sum to one".format(j)
)
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError(
"classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(
j, classes[j].shape[0], class_prob_j.shape[0]
)
)
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
index_class_0 = np.flatnonzero(classes[j] == 0).item()
p_nonzero = 1 - class_prob_j[index_class_0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(
n_population=n_samples, n_samples=nnz, random_state=random_state
)
indices.extend(ind_sample)
# Normalize probabilities for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = class_probability_nz / np.sum(
class_probability_nz
)
classes_ind = np.searchsorted(
class_probability_nz_norm.cumsum(), rng.uniform(size=nnz)
)
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr), (n_samples, len(classes)), dtype=int)
| _random_choice_csc |
scikit-learn | 266 | sklearn/utils/extmath.py | def _randomized_eigsh(
M,
n_components,
*,
n_oversamples=10,
n_iter="auto",
power_iteration_normalizer="auto",
selection="module",
random_state=None,
):
"""Computes a truncated eigendecomposition using randomized methods
This method solves the fixed-rank approximation problem described in the
Halko et al paper.
The choice of which components to select can be tuned with the `selection`
parameter.
.. versionadded:: 0.24
Parameters
----------
M : ndarray or sparse matrix
Matrix to decompose, it should be real symmetric square or complex
hermitian
n_components : int
Number of eigenvalues and vectors to extract.
n_oversamples : int, default=10
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of eigenvectors and eigenvalues. Users might wish
to increase this parameter up to `2*k - n_components` where k is the
effective rank, for large matrices, noisy problems, matrices with
slowly decaying spectrums, or to increase precision accuracy. See Halko
et al (pages 5, 23 and 26).
n_iter : int or 'auto', default='auto'
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) in which case `n_iter` is set to 7.
This improves precision with few components. Note that in general
users should rather increase `n_oversamples` before increasing `n_iter`
as the principle of the randomized method is to avoid usage of these
more costly power iterations steps. When `n_components` is equal
or greater to the effective matrix rank and the spectrum does not
present a slow decay, `n_iter=0` or `1` should even work fine in theory
(see Halko et al paper, page 9).
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
selection : {'value', 'module'}, default='module'
Strategy used to select the n components. When `selection` is `'value'`
(not yet implemented, will become the default when implemented), the
components corresponding to the n largest eigenvalues are returned.
When `selection` is `'module'`, the components corresponding to the n
eigenvalues with largest modules are returned.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator to use when shuffling
the data, i.e. getting the random vectors to initialize the algorithm.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Notes
-----
This algorithm finds a (usually very good) approximate truncated
eigendecomposition using randomized methods to speed up the computations.
This method is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision). To increase the precision it is recommended to
increase `n_oversamples`, up to `2*k-n_components` where k is the
effective rank. Usually, `n_components` is chosen to be greater than k
so increasing `n_oversamples` up to `n_components` should be enough.
Strategy 'value': not implemented yet.
Algorithms 5.3, 5.4 and 5.5 in the Halko et al paper should provide good
candidates for a future implementation.
Strategy 'module':
The principle is that for diagonalizable matrices, the singular values and
eigenvalues are related: if t is an eigenvalue of A, then :math:`|t|` is a
singular value of A. This method relies on a randomized SVD to find the n
singular components corresponding to the n singular values with largest
modules, and then uses the signs of the singular vectors to find the true
sign of t: if the sign of left and right singular vectors are different
then the corresponding eigenvalue is negative.
Returns
-------
eigvals : 1D array of shape (n_components,) containing the `n_components`
eigenvalues selected (see ``selection`` parameter).
eigvecs : 2D array of shape (M.shape[0], n_components) containing the
`n_components` eigenvectors corresponding to the `eigvals`, in the
corresponding order. Note that this follows the `scipy.linalg.eigh`
convention.
See Also
--------
:func:`randomized_svd`
References
----------
* :arxiv:`"Finding structure with randomness:
Stochastic algorithms for constructing approximate matrix decompositions"
(Algorithm 4.3 for strategy 'module') <0909.4061>`
Halko, et al. (2009)
"""
| /usr/src/app/target_test_cases/failed_tests__randomized_eigsh.txt | def _randomized_eigsh(
M,
n_components,
*,
n_oversamples=10,
n_iter="auto",
power_iteration_normalizer="auto",
selection="module",
random_state=None,
):
"""Computes a truncated eigendecomposition using randomized methods
This method solves the fixed-rank approximation problem described in the
Halko et al paper.
The choice of which components to select can be tuned with the `selection`
parameter.
.. versionadded:: 0.24
Parameters
----------
M : ndarray or sparse matrix
Matrix to decompose, it should be real symmetric square or complex
hermitian
n_components : int
Number of eigenvalues and vectors to extract.
n_oversamples : int, default=10
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of eigenvectors and eigenvalues. Users might wish
to increase this parameter up to `2*k - n_components` where k is the
effective rank, for large matrices, noisy problems, matrices with
slowly decaying spectrums, or to increase precision accuracy. See Halko
et al (pages 5, 23 and 26).
n_iter : int or 'auto', default='auto'
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) in which case `n_iter` is set to 7.
This improves precision with few components. Note that in general
users should rather increase `n_oversamples` before increasing `n_iter`
as the principle of the randomized method is to avoid usage of these
more costly power iterations steps. When `n_components` is equal
or greater to the effective matrix rank and the spectrum does not
present a slow decay, `n_iter=0` or `1` should even work fine in theory
(see Halko et al paper, page 9).
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
selection : {'value', 'module'}, default='module'
Strategy used to select the n components. When `selection` is `'value'`
(not yet implemented, will become the default when implemented), the
components corresponding to the n largest eigenvalues are returned.
When `selection` is `'module'`, the components corresponding to the n
eigenvalues with largest modules are returned.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator to use when shuffling
the data, i.e. getting the random vectors to initialize the algorithm.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Notes
-----
This algorithm finds a (usually very good) approximate truncated
eigendecomposition using randomized methods to speed up the computations.
This method is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision). To increase the precision it is recommended to
increase `n_oversamples`, up to `2*k-n_components` where k is the
effective rank. Usually, `n_components` is chosen to be greater than k
so increasing `n_oversamples` up to `n_components` should be enough.
Strategy 'value': not implemented yet.
Algorithms 5.3, 5.4 and 5.5 in the Halko et al paper should provide good
candidates for a future implementation.
Strategy 'module':
The principle is that for diagonalizable matrices, the singular values and
eigenvalues are related: if t is an eigenvalue of A, then :math:`|t|` is a
singular value of A. This method relies on a randomized SVD to find the n
singular components corresponding to the n singular values with largest
modules, and then uses the signs of the singular vectors to find the true
sign of t: if the sign of left and right singular vectors are different
then the corresponding eigenvalue is negative.
Returns
-------
eigvals : 1D array of shape (n_components,) containing the `n_components`
eigenvalues selected (see ``selection`` parameter).
eigvecs : 2D array of shape (M.shape[0], n_components) containing the
`n_components` eigenvectors corresponding to the `eigvals`, in the
corresponding order. Note that this follows the `scipy.linalg.eigh`
convention.
See Also
--------
:func:`randomized_svd`
References
----------
* :arxiv:`"Finding structure with randomness:
Stochastic algorithms for constructing approximate matrix decompositions"
(Algorithm 4.3 for strategy 'module') <0909.4061>`
Halko, et al. (2009)
"""
if selection == "value": # pragma: no cover
# to do : an algorithm can be found in the Halko et al reference
raise NotImplementedError()
elif selection == "module":
# Note: no need for deterministic U and Vt (flip_sign=True),
# as we only use the dot product UVt afterwards
U, S, Vt = randomized_svd(
M,
n_components=n_components,
n_oversamples=n_oversamples,
n_iter=n_iter,
power_iteration_normalizer=power_iteration_normalizer,
flip_sign=False,
random_state=random_state,
)
eigvecs = U[:, :n_components]
eigvals = S[:n_components]
# Conversion of Singular values into Eigenvalues:
# For any eigenvalue t, the corresponding singular value is |t|.
# So if there is a negative eigenvalue t, the corresponding singular
# value will be -t, and the left (U) and right (V) singular vectors
# will have opposite signs.
# Fastest way: see <https://stackoverflow.com/a/61974002/7262247>
diag_VtU = np.einsum("ji,ij->j", Vt[:n_components, :], U[:, :n_components])
signs = np.sign(diag_VtU)
eigvals = eigvals * signs
else: # pragma: no cover
raise ValueError("Invalid `selection`: %r" % selection)
return eigvals, eigvecs
| _randomized_eigsh |
scikit-learn | 267 | sklearn/linear_model/_base.py | def _rescale_data(X, y, sample_weight, inplace=False):
"""Rescale data sample-wise by square root of sample_weight.
For many linear models, this enables easy support for sample_weight because
(y - X w)' S (y - X w)
with S = diag(sample_weight) becomes
||y_rescaled - X_rescaled w||_2^2
when setting
y_rescaled = sqrt(S) y
X_rescaled = sqrt(S) X
Returns
-------
X_rescaled : {array-like, sparse matrix}
y_rescaled : {array-like, sparse matrix}
"""
| /usr/src/app/target_test_cases/failed_tests__rescale_data.txt | def _rescale_data(X, y, sample_weight, inplace=False):
"""Rescale data sample-wise by square root of sample_weight.
For many linear models, this enables easy support for sample_weight because
(y - X w)' S (y - X w)
with S = diag(sample_weight) becomes
||y_rescaled - X_rescaled w||_2^2
when setting
y_rescaled = sqrt(S) y
X_rescaled = sqrt(S) X
Returns
-------
X_rescaled : {array-like, sparse matrix}
y_rescaled : {array-like, sparse matrix}
"""
# Assume that _validate_data and _check_sample_weight have been called by
# the caller.
xp, _ = get_namespace(X, y, sample_weight)
n_samples = X.shape[0]
sample_weight_sqrt = xp.sqrt(sample_weight)
if sp.issparse(X) or sp.issparse(y):
sw_matrix = sparse.dia_matrix(
(sample_weight_sqrt, 0), shape=(n_samples, n_samples)
)
if sp.issparse(X):
X = safe_sparse_dot(sw_matrix, X)
else:
if inplace:
X *= sample_weight_sqrt[:, None]
else:
X = X * sample_weight_sqrt[:, None]
if sp.issparse(y):
y = safe_sparse_dot(sw_matrix, y)
else:
if inplace:
if y.ndim == 1:
y *= sample_weight_sqrt
else:
y *= sample_weight_sqrt[:, None]
else:
if y.ndim == 1:
y = y * sample_weight_sqrt
else:
y = y * sample_weight_sqrt[:, None]
return X, y, sample_weight_sqrt
| _rescale_data |
scikit-learn | 268 | sklearn/utils/_indexing.py | def _safe_assign(X, values, *, row_indexer=None, column_indexer=None):
"""Safe assignment to a numpy array, sparse matrix, or pandas dataframe.
Parameters
----------
X : {ndarray, sparse-matrix, dataframe}
Array to be modified. It is expected to be 2-dimensional.
values : ndarray
The values to be assigned to `X`.
row_indexer : array-like, dtype={int, bool}, default=None
A 1-dimensional array to select the rows of interest. If `None`, all
rows are selected.
column_indexer : array-like, dtype={int, bool}, default=None
A 1-dimensional array to select the columns of interest. If `None`, all
columns are selected.
"""
| /usr/src/app/target_test_cases/failed_tests__safe_assign.txt | def _safe_assign(X, values, *, row_indexer=None, column_indexer=None):
"""Safe assignment to a numpy array, sparse matrix, or pandas dataframe.
Parameters
----------
X : {ndarray, sparse-matrix, dataframe}
Array to be modified. It is expected to be 2-dimensional.
values : ndarray
The values to be assigned to `X`.
row_indexer : array-like, dtype={int, bool}, default=None
A 1-dimensional array to select the rows of interest. If `None`, all
rows are selected.
column_indexer : array-like, dtype={int, bool}, default=None
A 1-dimensional array to select the columns of interest. If `None`, all
columns are selected.
"""
row_indexer = slice(None, None, None) if row_indexer is None else row_indexer
column_indexer = (
slice(None, None, None) if column_indexer is None else column_indexer
)
if hasattr(X, "iloc"): # pandas dataframe
with warnings.catch_warnings():
# pandas >= 1.5 raises a warning when using iloc to set values in a column
# that does not have the same type as the column being set. It happens
# for instance when setting a categorical column with a string.
# In the future the behavior won't change and the warning should disappear.
# TODO(1.3): check if the warning is still raised or remove the filter.
warnings.simplefilter("ignore", FutureWarning)
X.iloc[row_indexer, column_indexer] = values
else: # numpy array or sparse matrix
X[row_indexer, column_indexer] = values
| _safe_assign |
scikit-learn | 269 | sklearn/utils/_set_output.py | def _safe_set_output(estimator, *, transform=None):
"""Safely call estimator.set_output and error if it not available.
This is used by meta-estimators to set the output for child estimators.
Parameters
----------
estimator : estimator instance
Estimator instance.
transform : {"default", "pandas", "polars"}, default=None
Configure output of the following estimator's methods:
- `"transform"`
- `"fit_transform"`
If `None`, this operation is a no-op.
Returns
-------
estimator : estimator instance
Estimator instance.
"""
| /usr/src/app/target_test_cases/failed_tests__safe_set_output.txt | def _safe_set_output(estimator, *, transform=None):
"""Safely call estimator.set_output and error if it not available.
This is used by meta-estimators to set the output for child estimators.
Parameters
----------
estimator : estimator instance
Estimator instance.
transform : {"default", "pandas", "polars"}, default=None
Configure output of the following estimator's methods:
- `"transform"`
- `"fit_transform"`
If `None`, this operation is a no-op.
Returns
-------
estimator : estimator instance
Estimator instance.
"""
set_output_for_transform = (
hasattr(estimator, "transform")
or hasattr(estimator, "fit_transform")
and transform is not None
)
if not set_output_for_transform:
# If estimator can not transform, then `set_output` does not need to be
# called.
return
if not hasattr(estimator, "set_output"):
raise ValueError(
f"Unable to configure output for {estimator} because `set_output` "
"is not available."
)
return estimator.set_output(transform=transform)
| _safe_set_output |
scikit-learn | 270 | sklearn/utils/metaestimators.py | def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels.
Slice X, y according to indices for cross-validation, but take care of
precomputed kernel-matrices or pairwise affinities / distances.
If ``estimator._pairwise is True``, X needs to be square and
we slice rows and columns. If ``train_indices`` is not None,
we slice rows using ``indices`` (assumed the test set) and columns
using ``train_indices``, indicating the training set.
Labels y will always be indexed only along the first axis.
Parameters
----------
estimator : object
Estimator to determine whether we should slice only rows or rows and
columns.
X : array-like, sparse matrix or iterable
Data to be indexed. If ``estimator._pairwise is True``,
this needs to be a square array-like or sparse matrix.
y : array-like, sparse matrix or iterable
Targets to be indexed.
indices : array of int
Rows to select from X and y.
If ``estimator._pairwise is True`` and ``train_indices is None``
then ``indices`` will also be used to slice columns.
train_indices : array of int or None, default=None
If ``estimator._pairwise is True`` and ``train_indices is not None``,
then ``train_indices`` will be use to slice the columns of X.
Returns
-------
X_subset : array-like, sparse matrix or list
Indexed data.
y_subset : array-like, sparse matrix or list
Indexed targets.
"""
| /usr/src/app/target_test_cases/failed_tests__safe_split.txt | def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels.
Slice X, y according to indices for cross-validation, but take care of
precomputed kernel-matrices or pairwise affinities / distances.
If ``estimator._pairwise is True``, X needs to be square and
we slice rows and columns. If ``train_indices`` is not None,
we slice rows using ``indices`` (assumed the test set) and columns
using ``train_indices``, indicating the training set.
Labels y will always be indexed only along the first axis.
Parameters
----------
estimator : object
Estimator to determine whether we should slice only rows or rows and
columns.
X : array-like, sparse matrix or iterable
Data to be indexed. If ``estimator._pairwise is True``,
this needs to be a square array-like or sparse matrix.
y : array-like, sparse matrix or iterable
Targets to be indexed.
indices : array of int
Rows to select from X and y.
If ``estimator._pairwise is True`` and ``train_indices is None``
then ``indices`` will also be used to slice columns.
train_indices : array of int or None, default=None
If ``estimator._pairwise is True`` and ``train_indices is not None``,
then ``train_indices`` will be use to slice the columns of X.
Returns
-------
X_subset : array-like, sparse matrix or list
Indexed data.
y_subset : array-like, sparse matrix or list
Indexed targets.
"""
if get_tags(estimator).input_tags.pairwise:
if not hasattr(X, "shape"):
raise ValueError(
"Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices."
)
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = _safe_indexing(X, indices)
if y is not None:
y_subset = _safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
| _safe_split |
scikit-learn | 271 | sklearn/linear_model/_coordinate_descent.py | def _set_order(X, y, order="C"):
"""Change the order of X and y if necessary.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
order : {None, 'C', 'F'}
If 'C', dense arrays are returned as C-ordered, sparse matrices in csr
format. If 'F', dense arrays are return as F-ordered, sparse matrices
in csc format.
Returns
-------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data with guaranteed order.
y : ndarray of shape (n_samples,)
Target values with guaranteed order.
"""
| /usr/src/app/target_test_cases/failed_tests__set_order.txt | def _set_order(X, y, order="C"):
"""Change the order of X and y if necessary.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
order : {None, 'C', 'F'}
If 'C', dense arrays are returned as C-ordered, sparse matrices in csr
format. If 'F', dense arrays are return as F-ordered, sparse matrices
in csc format.
Returns
-------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data with guaranteed order.
y : ndarray of shape (n_samples,)
Target values with guaranteed order.
"""
if order not in [None, "C", "F"]:
raise ValueError(
"Unknown value for order. Got {} instead of None, 'C' or 'F'.".format(order)
)
sparse_X = sparse.issparse(X)
sparse_y = sparse.issparse(y)
if order is not None:
sparse_format = "csc" if order == "F" else "csr"
if sparse_X:
X = X.asformat(sparse_format, copy=False)
else:
X = np.asarray(X, order=order)
if sparse_y:
y = y.asformat(sparse_format)
else:
y = np.asarray(y, order=order)
return X, y
| _set_order |
scikit-learn | 272 | sklearn/ensemble/_base.py | def _set_random_states(estimator, random_state=None):
"""Set fixed random_state parameters for an estimator.
Finds all parameters ending ``random_state`` and sets them to integers
derived from ``random_state``.
Parameters
----------
estimator : estimator supporting get/set_params
Estimator with potential randomness managed by random_state
parameters.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the generation of the random
integers. Pass an int for reproducible output across multiple function
calls.
See :term:`Glossary <random_state>`.
Notes
-----
This does not necessarily set *all* ``random_state`` attributes that
control an estimator's randomness, only those accessible through
``estimator.get_params()``. ``random_state``s not controlled include
those belonging to:
* cross-validation splitters
* ``scipy.stats`` rvs
"""
| /usr/src/app/target_test_cases/failed_tests__set_random_states.txt | def _set_random_states(estimator, random_state=None):
"""Set fixed random_state parameters for an estimator.
Finds all parameters ending ``random_state`` and sets them to integers
derived from ``random_state``.
Parameters
----------
estimator : estimator supporting get/set_params
Estimator with potential randomness managed by random_state
parameters.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the generation of the random
integers. Pass an int for reproducible output across multiple function
calls.
See :term:`Glossary <random_state>`.
Notes
-----
This does not necessarily set *all* ``random_state`` attributes that
control an estimator's randomness, only those accessible through
``estimator.get_params()``. ``random_state``s not controlled include
those belonging to:
* cross-validation splitters
* ``scipy.stats`` rvs
"""
random_state = check_random_state(random_state)
to_set = {}
for key in sorted(estimator.get_params(deep=True)):
if key == "random_state" or key.endswith("__random_state"):
to_set[key] = random_state.randint(np.iinfo(np.int32).max)
if to_set:
estimator.set_params(**to_set)
| _set_random_states |
scikit-learn | 273 | sklearn/calibration.py | def _sigmoid_calibration(
predictions, y, sample_weight=None, max_abs_prediction_threshold=30
):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
predictions : ndarray of shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray of shape (n_samples,)
The targets.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
| /usr/src/app/target_test_cases/failed_tests__sigmoid_calibration.txt | def _sigmoid_calibration(
predictions, y, sample_weight=None, max_abs_prediction_threshold=30
):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
predictions : ndarray of shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray of shape (n_samples,)
The targets.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
predictions = column_or_1d(predictions)
y = column_or_1d(y)
F = predictions # F follows Platt's notations
scale_constant = 1.0
max_prediction = np.max(np.abs(F))
# If the predictions have large values we scale them in order to bring
# them within a suitable range. This has no effect on the final
# (prediction) result because linear models like Logisitic Regression
# without a penalty are invariant to multiplying the features by a
# constant.
if max_prediction >= max_abs_prediction_threshold:
scale_constant = max_prediction
# We rescale the features in a copy: inplace rescaling could confuse
# the caller and make the code harder to reason about.
F = F / scale_constant
# Bayesian priors (see Platt end of section 2.2):
# It corresponds to the number of samples, taking into account the
# `sample_weight`.
mask_negative_samples = y <= 0
if sample_weight is not None:
prior0 = (sample_weight[mask_negative_samples]).sum()
prior1 = (sample_weight[~mask_negative_samples]).sum()
else:
prior0 = float(np.sum(mask_negative_samples))
prior1 = y.shape[0] - prior0
T = np.zeros_like(y, dtype=predictions.dtype)
T[y > 0] = (prior1 + 1.0) / (prior1 + 2.0)
T[y <= 0] = 1.0 / (prior0 + 2.0)
bin_loss = HalfBinomialLoss()
def loss_grad(AB):
# .astype below is needed to ensure y_true and raw_prediction have the
# same dtype. With result = np.float64(0) * np.array([1, 2], dtype=np.float32)
# - in Numpy 2, result.dtype is float64
# - in Numpy<2, result.dtype is float32
raw_prediction = -(AB[0] * F + AB[1]).astype(dtype=predictions.dtype)
l, g = bin_loss.loss_gradient(
y_true=T,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
)
loss = l.sum()
# TODO: Remove casting to np.float64 when minimum supported SciPy is 1.11.2
# With SciPy >= 1.11.2, the LBFGS implementation will cast to float64
# https://github.com/scipy/scipy/pull/18825.
# Here we cast to float64 to support SciPy < 1.11.2
grad = np.asarray([-g @ F, -g.sum()], dtype=np.float64)
return loss, grad
AB0 = np.array([0.0, log((prior0 + 1.0) / (prior1 + 1.0))])
opt_result = minimize(
loss_grad,
AB0,
method="L-BFGS-B",
jac=True,
options={
"gtol": 1e-6,
"ftol": 64 * np.finfo(float).eps,
},
)
AB_ = opt_result.x
# The tuned multiplicative parameter is converted back to the original
# input feature scale. The offset parameter does not need rescaling since
# we did not rescale the outcome variable.
return AB_[0] / scale_constant, AB_[1]
| _sigmoid_calibration |
scikit-learn | 274 | sklearn/metrics/cluster/_unsupervised.py | def _silhouette_reduce(D_chunk, start, labels, label_freqs):
"""Accumulate silhouette statistics for vertical chunk of X.
Parameters
----------
D_chunk : {array-like, sparse matrix} of shape (n_chunk_samples, n_samples)
Precomputed distances for a chunk. If a sparse matrix is provided,
only CSR format is accepted.
start : int
First index in the chunk.
labels : array-like of shape (n_samples,)
Corresponding cluster labels, encoded as {0, ..., n_clusters-1}.
label_freqs : array-like
Distribution of cluster labels in ``labels``.
"""
| /usr/src/app/target_test_cases/failed_tests__silhouette_reduce.txt | def _silhouette_reduce(D_chunk, start, labels, label_freqs):
"""Accumulate silhouette statistics for vertical chunk of X.
Parameters
----------
D_chunk : {array-like, sparse matrix} of shape (n_chunk_samples, n_samples)
Precomputed distances for a chunk. If a sparse matrix is provided,
only CSR format is accepted.
start : int
First index in the chunk.
labels : array-like of shape (n_samples,)
Corresponding cluster labels, encoded as {0, ..., n_clusters-1}.
label_freqs : array-like
Distribution of cluster labels in ``labels``.
"""
n_chunk_samples = D_chunk.shape[0]
# accumulate distances from each sample to each cluster
cluster_distances = np.zeros(
(n_chunk_samples, len(label_freqs)), dtype=D_chunk.dtype
)
if issparse(D_chunk):
if D_chunk.format != "csr":
raise TypeError(
"Expected CSR matrix. Please pass sparse matrix in CSR format."
)
for i in range(n_chunk_samples):
indptr = D_chunk.indptr
indices = D_chunk.indices[indptr[i] : indptr[i + 1]]
sample_weights = D_chunk.data[indptr[i] : indptr[i + 1]]
sample_labels = np.take(labels, indices)
cluster_distances[i] += np.bincount(
sample_labels, weights=sample_weights, minlength=len(label_freqs)
)
else:
for i in range(n_chunk_samples):
sample_weights = D_chunk[i]
sample_labels = labels
cluster_distances[i] += np.bincount(
sample_labels, weights=sample_weights, minlength=len(label_freqs)
)
# intra_index selects intra-cluster distances within cluster_distances
end = start + n_chunk_samples
intra_index = (np.arange(n_chunk_samples), labels[start:end])
# intra_cluster_distances are averaged over cluster size outside this function
intra_cluster_distances = cluster_distances[intra_index]
# of the remaining distances we normalise and extract the minimum
cluster_distances[intra_index] = np.inf
cluster_distances /= label_freqs
inter_cluster_distances = cluster_distances.min(axis=1)
return intra_cluster_distances, inter_cluster_distances
| _silhouette_reduce |
scikit-learn | 275 | sklearn/utils/fixes.py | def _smallest_admissible_index_dtype(arrays=(), maxval=None, check_contents=False):
"""Based on input (integer) arrays `a`, determine a suitable index data
type that can hold the data in the arrays.
This function returns `np.int64` if it either required by `maxval` or based on the
largest precision of the dtype of the arrays passed as argument, or by the their
contents (when `check_contents is True`). If none of the condition requires
`np.int64` then this function returns `np.int32`.
Parameters
----------
arrays : ndarray or tuple of ndarrays, default=()
Input arrays whose types/contents to check.
maxval : float, default=None
Maximum value needed.
check_contents : bool, default=False
Whether to check the values in the arrays and not just their types.
By default, check only the types.
Returns
-------
dtype : {np.int32, np.int64}
Suitable index data type (int32 or int64).
"""
| /usr/src/app/target_test_cases/failed_tests__smallest_admissible_index_dtype.txt | def _smallest_admissible_index_dtype(arrays=(), maxval=None, check_contents=False):
"""Based on input (integer) arrays `a`, determine a suitable index data
type that can hold the data in the arrays.
This function returns `np.int64` if it either required by `maxval` or based on the
largest precision of the dtype of the arrays passed as argument, or by the their
contents (when `check_contents is True`). If none of the condition requires
`np.int64` then this function returns `np.int32`.
Parameters
----------
arrays : ndarray or tuple of ndarrays, default=()
Input arrays whose types/contents to check.
maxval : float, default=None
Maximum value needed.
check_contents : bool, default=False
Whether to check the values in the arrays and not just their types.
By default, check only the types.
Returns
-------
dtype : {np.int32, np.int64}
Suitable index data type (int32 or int64).
"""
int32min = np.int32(np.iinfo(np.int32).min)
int32max = np.int32(np.iinfo(np.int32).max)
if maxval is not None:
if maxval > np.iinfo(np.int64).max:
raise ValueError(
f"maxval={maxval} is to large to be represented as np.int64."
)
if maxval > int32max:
return np.int64
if isinstance(arrays, np.ndarray):
arrays = (arrays,)
for arr in arrays:
if not isinstance(arr, np.ndarray):
raise TypeError(
f"Arrays should be of type np.ndarray, got {type(arr)} instead."
)
if not np.issubdtype(arr.dtype, np.integer):
raise ValueError(
f"Array dtype {arr.dtype} is not supported for index dtype. We expect "
"integral values."
)
if not np.can_cast(arr.dtype, np.int32):
if not check_contents:
# when `check_contents` is False, we stay on the safe side and return
# np.int64.
return np.int64
if arr.size == 0:
# a bigger type not needed yet, let's look at the next array
continue
else:
maxval = arr.max()
minval = arr.min()
if minval < int32min or maxval > int32max:
# a big index type is actually needed
return np.int64
return np.int32
| _smallest_admissible_index_dtype |
scikit-learn | 276 | sklearn/decomposition/_dict_learning.py | def _update_dict(
dictionary,
Y,
code,
A=None,
B=None,
verbose=False,
random_state=None,
positive=False,
):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary : ndarray of shape (n_components, n_features)
Value of the dictionary at the previous iteration.
Y : ndarray of shape (n_samples, n_features)
Data matrix.
code : ndarray of shape (n_samples, n_components)
Sparse coding of the data against which to optimize the dictionary.
A : ndarray of shape (n_components, n_components), default=None
Together with `B`, sufficient stats of the online model to update the
dictionary.
B : ndarray of shape (n_features, n_components), default=None
Together with `A`, sufficient stats of the online model to update the
dictionary.
verbose: bool, default=False
Degree of output the procedure will print.
random_state : int, RandomState instance or None, default=None
Used for randomly initializing the dictionary. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
positive : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
"""
| /usr/src/app/target_test_cases/failed_tests__update_dict.txt | def _update_dict(
dictionary,
Y,
code,
A=None,
B=None,
verbose=False,
random_state=None,
positive=False,
):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary : ndarray of shape (n_components, n_features)
Value of the dictionary at the previous iteration.
Y : ndarray of shape (n_samples, n_features)
Data matrix.
code : ndarray of shape (n_samples, n_components)
Sparse coding of the data against which to optimize the dictionary.
A : ndarray of shape (n_components, n_components), default=None
Together with `B`, sufficient stats of the online model to update the
dictionary.
B : ndarray of shape (n_features, n_components), default=None
Together with `A`, sufficient stats of the online model to update the
dictionary.
verbose: bool, default=False
Degree of output the procedure will print.
random_state : int, RandomState instance or None, default=None
Used for randomly initializing the dictionary. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
positive : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
"""
n_samples, n_components = code.shape
random_state = check_random_state(random_state)
if A is None:
A = code.T @ code
if B is None:
B = Y.T @ code
n_unused = 0
for k in range(n_components):
if A[k, k] > 1e-6:
# 1e-6 is arbitrary but consistent with the spams implementation
dictionary[k] += (B[:, k] - A[k] @ dictionary) / A[k, k]
else:
# kth atom is almost never used -> sample a new one from the data
newd = Y[random_state.choice(n_samples)]
# add small noise to avoid making the sparse coding ill conditioned
noise_level = 0.01 * (newd.std() or 1) # avoid 0 std
noise = random_state.normal(0, noise_level, size=len(newd))
dictionary[k] = newd + noise
code[:, k] = 0
n_unused += 1
if positive:
np.clip(dictionary[k], 0, None, out=dictionary[k])
# Projection on the constraint set ||V_k|| <= 1
dictionary[k] /= max(linalg.norm(dictionary[k]), 1)
if verbose and n_unused > 0:
print(f"{n_unused} unused atoms resampled.")
| _update_dict |
scikit-learn | 277 | sklearn/utils/stats.py | def _weighted_percentile(array, sample_weight, percentile=50):
"""Compute weighted percentile
Computes lower weighted percentile. If `array` is a 2D array, the
`percentile` is computed along the axis 0.
.. versionchanged:: 0.24
Accepts 2D `array`.
Parameters
----------
array : 1D or 2D array
Values to take the weighted percentile of.
sample_weight: 1D or 2D array
Weights for each value in `array`. Must be same shape as `array` or
of shape `(array.shape[0],)`.
percentile: int or float, default=50
Percentile to compute. Must be value between 0 and 100.
Returns
-------
percentile : int if `array` 1D, ndarray if `array` 2D
Weighted percentile.
"""
| /usr/src/app/target_test_cases/failed_tests__weighted_percentile.txt | def _weighted_percentile(array, sample_weight, percentile=50):
"""Compute weighted percentile
Computes lower weighted percentile. If `array` is a 2D array, the
`percentile` is computed along the axis 0.
.. versionchanged:: 0.24
Accepts 2D `array`.
Parameters
----------
array : 1D or 2D array
Values to take the weighted percentile of.
sample_weight: 1D or 2D array
Weights for each value in `array`. Must be same shape as `array` or
of shape `(array.shape[0],)`.
percentile: int or float, default=50
Percentile to compute. Must be value between 0 and 100.
Returns
-------
percentile : int if `array` 1D, ndarray if `array` 2D
Weighted percentile.
"""
n_dim = array.ndim
if n_dim == 0:
return array[()]
if array.ndim == 1:
array = array.reshape((-1, 1))
# When sample_weight 1D, repeat for each array.shape[1]
if array.shape != sample_weight.shape and array.shape[0] == sample_weight.shape[0]:
sample_weight = np.tile(sample_weight, (array.shape[1], 1)).T
sorted_idx = np.argsort(array, axis=0)
sorted_weights = np.take_along_axis(sample_weight, sorted_idx, axis=0)
# Find index of median prediction for each sample
weight_cdf = stable_cumsum(sorted_weights, axis=0)
adjusted_percentile = percentile / 100 * weight_cdf[-1]
# For percentile=0, ignore leading observations with sample_weight=0. GH20528
mask = adjusted_percentile == 0
adjusted_percentile[mask] = np.nextafter(
adjusted_percentile[mask], adjusted_percentile[mask] + 1
)
percentile_idx = np.array(
[
np.searchsorted(weight_cdf[:, i], adjusted_percentile[i])
for i in range(weight_cdf.shape[1])
]
)
percentile_idx = np.array(percentile_idx)
# In rare cases, percentile_idx equals to sorted_idx.shape[0]
max_idx = sorted_idx.shape[0] - 1
percentile_idx = np.apply_along_axis(
lambda x: np.clip(x, 0, max_idx), axis=0, arr=percentile_idx
)
col_index = np.arange(array.shape[1])
percentile_in_sorted = sorted_idx[percentile_idx, col_index]
percentile = array[percentile_in_sorted, col_index]
return percentile[0] if n_dim == 1 else percentile
| _weighted_percentile |
scikit-learn | 278 | sklearn/utils/_estimator_html_repr.py | def _write_label_html(
out,
name,
name_details,
name_caption=None,
doc_link_label=None,
outer_class="sk-label-container",
inner_class="sk-label",
checked=False,
doc_link="",
is_fitted_css_class="",
is_fitted_icon="",
):
"""Write labeled html with or without a dropdown with named details.
Parameters
----------
out : file-like object
The file to write the HTML representation to.
name : str
The label for the estimator. It corresponds either to the estimator class name
for a simple estimator or in the case of a `Pipeline` and `ColumnTransformer`,
it corresponds to the name of the step.
name_details : str
The details to show as content in the dropdown part of the toggleable label. It
can contain information such as non-default parameters or column information for
`ColumnTransformer`.
name_caption : str, default=None
The caption below the name. If `None`, no caption will be created.
doc_link_label : str, default=None
The label for the documentation link. If provided, the label would be
"Documentation for {doc_link_label}". Otherwise it will look for `name`.
outer_class : {"sk-label-container", "sk-item"}, default="sk-label-container"
The CSS class for the outer container.
inner_class : {"sk-label", "sk-estimator"}, default="sk-label"
The CSS class for the inner container.
checked : bool, default=False
Whether the dropdown is folded or not. With a single estimator, we intend to
unfold the content.
doc_link : str, default=""
The link to the documentation for the estimator. If an empty string, no link is
added to the diagram. This can be generated for an estimator if it uses the
`_HTMLDocumentationLinkMixin`.
is_fitted_css_class : {"", "fitted"}
The CSS class to indicate whether or not the estimator is fitted. The
empty string means that the estimator is not fitted and "fitted" means that the
estimator is fitted.
is_fitted_icon : str, default=""
The HTML representation to show the fitted information in the diagram. An empty
string means that no information is shown.
"""
| /usr/src/app/target_test_cases/failed_tests__write_label_html.txt | def _write_label_html(
out,
name,
name_details,
name_caption=None,
doc_link_label=None,
outer_class="sk-label-container",
inner_class="sk-label",
checked=False,
doc_link="",
is_fitted_css_class="",
is_fitted_icon="",
):
"""Write labeled html with or without a dropdown with named details.
Parameters
----------
out : file-like object
The file to write the HTML representation to.
name : str
The label for the estimator. It corresponds either to the estimator class name
for a simple estimator or in the case of a `Pipeline` and `ColumnTransformer`,
it corresponds to the name of the step.
name_details : str
The details to show as content in the dropdown part of the toggleable label. It
can contain information such as non-default parameters or column information for
`ColumnTransformer`.
name_caption : str, default=None
The caption below the name. If `None`, no caption will be created.
doc_link_label : str, default=None
The label for the documentation link. If provided, the label would be
"Documentation for {doc_link_label}". Otherwise it will look for `name`.
outer_class : {"sk-label-container", "sk-item"}, default="sk-label-container"
The CSS class for the outer container.
inner_class : {"sk-label", "sk-estimator"}, default="sk-label"
The CSS class for the inner container.
checked : bool, default=False
Whether the dropdown is folded or not. With a single estimator, we intend to
unfold the content.
doc_link : str, default=""
The link to the documentation for the estimator. If an empty string, no link is
added to the diagram. This can be generated for an estimator if it uses the
`_HTMLDocumentationLinkMixin`.
is_fitted_css_class : {"", "fitted"}
The CSS class to indicate whether or not the estimator is fitted. The
empty string means that the estimator is not fitted and "fitted" means that the
estimator is fitted.
is_fitted_icon : str, default=""
The HTML representation to show the fitted information in the diagram. An empty
string means that no information is shown.
"""
out.write(
f'<div class="{outer_class}"><div'
f' class="{inner_class} {is_fitted_css_class} sk-toggleable">'
)
name = html.escape(name)
if name_details is not None:
name_details = html.escape(str(name_details))
checked_str = "checked" if checked else ""
est_id = _ESTIMATOR_ID_COUNTER.get_id()
if doc_link:
doc_label = "<span>Online documentation</span>"
if doc_link_label is not None:
doc_label = f"<span>Documentation for {doc_link_label}</span>"
elif name is not None:
doc_label = f"<span>Documentation for {name}</span>"
doc_link = (
f'<a class="sk-estimator-doc-link {is_fitted_css_class}"'
f' rel="noreferrer" target="_blank" href="{doc_link}">?{doc_label}</a>'
)
name_caption_div = (
""
if name_caption is None
else f'<div class="caption">{html.escape(name_caption)}</div>'
)
name_caption_div = f"<div><div>{name}</div>{name_caption_div}</div>"
links_div = (
f"<div>{doc_link}{is_fitted_icon}</div>"
if doc_link or is_fitted_icon
else ""
)
label_html = (
f'<label for="{est_id}" class="sk-toggleable__label {is_fitted_css_class} '
f'sk-toggleable__label-arrow">{name_caption_div}{links_div}</label>'
)
fmt_str = (
f'<input class="sk-toggleable__control sk-hidden--visually" id="{est_id}" '
f'type="checkbox" {checked_str}>{label_html}<div '
f'class="sk-toggleable__content {is_fitted_css_class}"><pre>{name_details}'
"</pre></div> "
)
out.write(fmt_str)
else:
out.write(f"<label>{name}</label>")
out.write("</div></div>") # outer_class inner_class
| _write_label_html |
scikit-learn | 279 | sklearn/model_selection/_search.py | def _yield_masked_array_for_each_param(candidate_params):
"""
Yield a masked array for each candidate param.
`candidate_params` is a sequence of params which were used in
a `GridSearchCV`. We use masked arrays for the results, as not
all params are necessarily present in each element of
`candidate_params`. For example, if using `GridSearchCV` with
a `SVC` model, then one might search over params like:
- kernel=["rbf"], gamma=[0.1, 1]
- kernel=["poly"], degree=[1, 2]
and then param `'gamma'` would not be present in entries of
`candidate_params` corresponding to `kernel='poly'`.
"""
| /usr/src/app/target_test_cases/failed_tests__yield_masked_array_for_each_param.txt | def _yield_masked_array_for_each_param(candidate_params):
"""
Yield a masked array for each candidate param.
`candidate_params` is a sequence of params which were used in
a `GridSearchCV`. We use masked arrays for the results, as not
all params are necessarily present in each element of
`candidate_params`. For example, if using `GridSearchCV` with
a `SVC` model, then one might search over params like:
- kernel=["rbf"], gamma=[0.1, 1]
- kernel=["poly"], degree=[1, 2]
and then param `'gamma'` would not be present in entries of
`candidate_params` corresponding to `kernel='poly'`.
"""
n_candidates = len(candidate_params)
param_results = defaultdict(dict)
for cand_idx, params in enumerate(candidate_params):
for name, value in params.items():
param_results["param_%s" % name][cand_idx] = value
for key, param_result in param_results.items():
param_list = list(param_result.values())
try:
arr = np.array(param_list)
except ValueError:
# This can happen when param_list contains lists of different
# lengths, for example:
# param_list=[[1], [2, 3]]
arr_dtype = np.dtype(object)
else:
# There are two cases when we don't use the automatically inferred
# dtype when creating the array and we use object instead:
# - string dtype
# - when array.ndim > 1, that means that param_list was something
# like a list of same-size sequences, which gets turned into a
# multi-dimensional array but we want a 1d array
arr_dtype = arr.dtype if arr.dtype.kind != "U" and arr.ndim == 1 else object
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate (which may not contain all the params).
ma = MaskedArray(np.empty(n_candidates), mask=True, dtype=arr_dtype)
for index, value in param_result.items():
# Setting the value at an index unmasks that index
ma[index] = value
yield (key, ma)
| _yield_masked_array_for_each_param |
scikit-learn | 280 | sklearn/utils/_testing.py | def assert_allclose_dense_sparse(x, y, rtol=1e-07, atol=1e-9, err_msg=""):
"""Assert allclose for sparse and dense data.
Both x and y need to be either sparse or dense, they
can't be mixed.
Parameters
----------
x : {array-like, sparse matrix}
First array to compare.
y : {array-like, sparse matrix}
Second array to compare.
rtol : float, default=1e-07
relative tolerance; see numpy.allclose.
atol : float, default=1e-9
absolute tolerance; see numpy.allclose. Note that the default here is
more tolerant than the default for numpy.testing.assert_allclose, where
atol=0.
err_msg : str, default=''
Error message to raise.
"""
| /usr/src/app/target_test_cases/failed_tests_assert_allclose_dense_sparse.txt | def assert_allclose_dense_sparse(x, y, rtol=1e-07, atol=1e-9, err_msg=""):
"""Assert allclose for sparse and dense data.
Both x and y need to be either sparse or dense, they
can't be mixed.
Parameters
----------
x : {array-like, sparse matrix}
First array to compare.
y : {array-like, sparse matrix}
Second array to compare.
rtol : float, default=1e-07
relative tolerance; see numpy.allclose.
atol : float, default=1e-9
absolute tolerance; see numpy.allclose. Note that the default here is
more tolerant than the default for numpy.testing.assert_allclose, where
atol=0.
err_msg : str, default=''
Error message to raise.
"""
if sp.sparse.issparse(x) and sp.sparse.issparse(y):
x = x.tocsr()
y = y.tocsr()
x.sum_duplicates()
y.sum_duplicates()
assert_array_equal(x.indices, y.indices, err_msg=err_msg)
assert_array_equal(x.indptr, y.indptr, err_msg=err_msg)
assert_allclose(x.data, y.data, rtol=rtol, atol=atol, err_msg=err_msg)
elif not sp.sparse.issparse(x) and not sp.sparse.issparse(y):
# both dense
assert_allclose(x, y, rtol=rtol, atol=atol, err_msg=err_msg)
else:
raise ValueError(
"Can only compare two sparse matrices, not a sparse matrix and an array."
)
| assert_allclose_dense_sparse |
scikit-learn | 281 | sklearn/metrics/tests/test_pairwise_distances_reduction.py | def assert_compatible_argkmin_results(
neighbors_dists_a,
neighbors_dists_b,
neighbors_indices_a,
neighbors_indices_b,
rtol=1e-5,
atol=1e-6,
):
"""Assert that argkmin results are valid up to rounding errors.
This function asserts that the results of argkmin queries are valid up to:
- rounding error tolerance on distance values;
- permutations of indices for distances values that differ up to the
expected precision level.
Furthermore, the distances must be sorted.
To be used for testing neighbors queries on float32 datasets: we accept
neighbors rank swaps only if they are caused by small rounding errors on
the distance computations.
"""
| /usr/src/app/target_test_cases/failed_tests_assert_compatible_argkmin_results.txt | def assert_compatible_argkmin_results(
neighbors_dists_a,
neighbors_dists_b,
neighbors_indices_a,
neighbors_indices_b,
rtol=1e-5,
atol=1e-6,
):
"""Assert that argkmin results are valid up to rounding errors.
This function asserts that the results of argkmin queries are valid up to:
- rounding error tolerance on distance values;
- permutations of indices for distances values that differ up to the
expected precision level.
Furthermore, the distances must be sorted.
To be used for testing neighbors queries on float32 datasets: we accept
neighbors rank swaps only if they are caused by small rounding errors on
the distance computations.
"""
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
assert (
neighbors_dists_a.shape
== neighbors_dists_b.shape
== neighbors_indices_a.shape
== neighbors_indices_b.shape
), "Arrays of results have incompatible shapes."
n_queries, _ = neighbors_dists_a.shape
# Asserting equality results one row at a time
for query_idx in range(n_queries):
dist_row_a = neighbors_dists_a[query_idx]
dist_row_b = neighbors_dists_b[query_idx]
indices_row_a = neighbors_indices_a[query_idx]
indices_row_b = neighbors_indices_b[query_idx]
assert is_sorted(dist_row_a), f"Distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row_b), f"Distances aren't sorted on row {query_idx}"
assert_same_distances_for_common_neighbors(
query_idx,
dist_row_a,
dist_row_b,
indices_row_a,
indices_row_b,
rtol,
atol,
)
# Check that any neighbor with distances below the rounding error
# threshold have matching indices. The threshold is the distance to the
# k-th neighbors minus the expected precision level:
#
# (1 - rtol) * dist_k - atol
#
# Where dist_k is defined as the maximum distance to the kth-neighbor
# among the two result sets. This way of defining the threshold is
# stricter than taking the minimum of the two.
threshold = (1 - rtol) * np.maximum(
np.max(dist_row_a), np.max(dist_row_b)
) - atol
assert_no_missing_neighbors(
query_idx,
dist_row_a,
dist_row_b,
indices_row_a,
indices_row_b,
threshold,
)
| assert_compatible_argkmin_results |
scikit-learn | 282 | sklearn/metrics/tests/test_pairwise_distances_reduction.py | def assert_compatible_radius_results(
neighbors_dists_a,
neighbors_dists_b,
neighbors_indices_a,
neighbors_indices_b,
radius,
check_sorted=True,
rtol=1e-5,
atol=1e-6,
):
"""Assert that radius neighborhood results are valid up to:
- relative and absolute tolerance on computed distance values
- permutations of indices for distances values that differ up to
a precision level
- missing or extra last elements if their distance is
close to the radius
To be used for testing neighbors queries on float32 datasets: we
accept neighbors rank swaps only if they are caused by small
rounding errors on the distance computations.
Input arrays must be sorted w.r.t distances.
"""
| /usr/src/app/target_test_cases/failed_tests_assert_compatible_radius_results.txt | def assert_compatible_radius_results(
neighbors_dists_a,
neighbors_dists_b,
neighbors_indices_a,
neighbors_indices_b,
radius,
check_sorted=True,
rtol=1e-5,
atol=1e-6,
):
"""Assert that radius neighborhood results are valid up to:
- relative and absolute tolerance on computed distance values
- permutations of indices for distances values that differ up to
a precision level
- missing or extra last elements if their distance is
close to the radius
To be used for testing neighbors queries on float32 datasets: we
accept neighbors rank swaps only if they are caused by small
rounding errors on the distance computations.
Input arrays must be sorted w.r.t distances.
"""
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
assert (
len(neighbors_dists_a)
== len(neighbors_dists_b)
== len(neighbors_indices_a)
== len(neighbors_indices_b)
)
n_queries = len(neighbors_dists_a)
# Asserting equality of results one vector at a time
for query_idx in range(n_queries):
dist_row_a = neighbors_dists_a[query_idx]
dist_row_b = neighbors_dists_b[query_idx]
indices_row_a = neighbors_indices_a[query_idx]
indices_row_b = neighbors_indices_b[query_idx]
if check_sorted:
assert is_sorted(dist_row_a), f"Distances aren't sorted on row {query_idx}"
assert is_sorted(dist_row_b), f"Distances aren't sorted on row {query_idx}"
assert len(dist_row_a) == len(indices_row_a)
assert len(dist_row_b) == len(indices_row_b)
# Check that all distances are within the requested radius
if len(dist_row_a) > 0:
max_dist_a = np.max(dist_row_a)
assert max_dist_a <= radius, (
f"Largest returned distance {max_dist_a} not within requested"
f" radius {radius} on row {query_idx}"
)
if len(dist_row_b) > 0:
max_dist_b = np.max(dist_row_b)
assert max_dist_b <= radius, (
f"Largest returned distance {max_dist_b} not within requested"
f" radius {radius} on row {query_idx}"
)
assert_same_distances_for_common_neighbors(
query_idx,
dist_row_a,
dist_row_b,
indices_row_a,
indices_row_b,
rtol,
atol,
)
threshold = (1 - rtol) * radius - atol
assert_no_missing_neighbors(
query_idx,
dist_row_a,
dist_row_b,
indices_row_a,
indices_row_b,
threshold,
)
| assert_compatible_radius_results |
scikit-learn | 283 | sklearn/utils/_testing.py | def assert_run_python_script_without_output(source_code, pattern=".+", timeout=60):
"""Utility to check assertions in an independent Python subprocess.
The script provided in the source code should return 0 and the stdtout +
stderr should not match the pattern `pattern`.
This is a port from cloudpickle https://github.com/cloudpipe/cloudpickle
Parameters
----------
source_code : str
The Python source code to execute.
pattern : str
Pattern that the stdout + stderr should not match. By default, unless
stdout + stderr are both empty, an error will be raised.
timeout : int, default=60
Time in seconds before timeout.
"""
| /usr/src/app/target_test_cases/failed_tests_assert_run_python_script_without_output.txt | def assert_run_python_script_without_output(source_code, pattern=".+", timeout=60):
"""Utility to check assertions in an independent Python subprocess.
The script provided in the source code should return 0 and the stdtout +
stderr should not match the pattern `pattern`.
This is a port from cloudpickle https://github.com/cloudpipe/cloudpickle
Parameters
----------
source_code : str
The Python source code to execute.
pattern : str
Pattern that the stdout + stderr should not match. By default, unless
stdout + stderr are both empty, an error will be raised.
timeout : int, default=60
Time in seconds before timeout.
"""
fd, source_file = tempfile.mkstemp(suffix="_src_test_sklearn.py")
os.close(fd)
try:
with open(source_file, "wb") as f:
f.write(source_code.encode("utf-8"))
cmd = [sys.executable, source_file]
cwd = op.normpath(op.join(op.dirname(sklearn.__file__), ".."))
env = os.environ.copy()
try:
env["PYTHONPATH"] = os.pathsep.join([cwd, env["PYTHONPATH"]])
except KeyError:
env["PYTHONPATH"] = cwd
kwargs = {"cwd": cwd, "stderr": STDOUT, "env": env}
# If coverage is running, pass the config file to the subprocess
coverage_rc = os.environ.get("COVERAGE_PROCESS_START")
if coverage_rc:
kwargs["env"]["COVERAGE_PROCESS_START"] = coverage_rc
kwargs["timeout"] = timeout
try:
try:
out = check_output(cmd, **kwargs)
except CalledProcessError as e:
raise RuntimeError(
"script errored with output:\n%s" % e.output.decode("utf-8")
)
out = out.decode("utf-8")
if re.search(pattern, out):
if pattern == ".+":
expectation = "Expected no output"
else:
expectation = f"The output was not supposed to match {pattern!r}"
message = f"{expectation}, got the following output instead: {out!r}"
raise AssertionError(message)
except TimeoutExpired as e:
raise RuntimeError(
"script timeout, output so far:\n%s" % e.output.decode("utf-8")
)
finally:
os.unlink(source_file)
| assert_run_python_script_without_output |
scikit-learn | 284 | sklearn/manifold/_locally_linear.py | def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=None):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array or a NearestNeighbors object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, default=1e-3
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
n_jobs : int or None, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See Also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
| /usr/src/app/target_test_cases/failed_tests_barycenter_kneighbors_graph.txt | def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=None):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array or a NearestNeighbors object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, default=1e-3
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
n_jobs : int or None, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See Also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs).fit(X)
X = knn._fit_X
n_samples = knn.n_samples_fit_
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X, ind, reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr), shape=(n_samples, n_samples))
| barycenter_kneighbors_graph |
scikit-learn | 285 | sklearn/utils/validation.py | def check_X_y(
X,
y,
accept_sparse=False,
*,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_writeable=False,
force_all_finite="deprecated",
ensure_all_finite=None,
ensure_2d=True,
allow_nd=False,
multi_output=False,
ensure_min_samples=1,
ensure_min_features=1,
y_numeric=False,
estimator=None,
):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X to be 2D and y 1D. By
default, X is checked to be non-empty and containing only finite values.
Standard input checks are also applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2D and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : {ndarray, list, sparse matrix}
Input data.
y : {ndarray, list, sparse matrix}
Labels.
accept_sparse : str, bool or list of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse will cause it to be accepted only
if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'}, default=None
Whether an array will be forced to be fortran or c-style. If
`None`, then the input data's order is preserved when possible.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_writeable : bool, default=False
Whether to force the output array to be writeable. If True, the returned array
is guaranteed to be writeable, which may require a copy. Otherwise the
writeability of the input array is preserved.
.. versionadded:: 1.6
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. This parameter
does not influence whether y can have np.inf, np.nan, pd.NA values.
The possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
.. deprecated:: 1.6
`force_all_finite` was renamed to `ensure_all_finite` and will be removed
in 1.8.
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. This parameter
does not influence whether y can have np.inf, np.nan, pd.NA values.
The possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
ensure_2d : bool, default=True
Whether to raise a value error if X is not 2D.
allow_nd : bool, default=False
Whether to allow X.ndim > 2.
multi_output : bool, default=False
Whether to allow 2D y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int, default=1
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : bool, default=False
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
Examples
--------
>>> from sklearn.utils.validation import check_X_y
>>> X = [[1, 2], [3, 4], [5, 6]]
>>> y = [1, 2, 3]
>>> X, y = check_X_y(X, y)
>>> X
array([[1, 2],
[3, 4],
[5, 6]])
>>> y
array([1, 2, 3])
"""
| /usr/src/app/target_test_cases/failed_tests_check_X_y.txt | def check_X_y(
X,
y,
accept_sparse=False,
*,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_writeable=False,
force_all_finite="deprecated",
ensure_all_finite=None,
ensure_2d=True,
allow_nd=False,
multi_output=False,
ensure_min_samples=1,
ensure_min_features=1,
y_numeric=False,
estimator=None,
):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X to be 2D and y 1D. By
default, X is checked to be non-empty and containing only finite values.
Standard input checks are also applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2D and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : {ndarray, list, sparse matrix}
Input data.
y : {ndarray, list, sparse matrix}
Labels.
accept_sparse : str, bool or list of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse will cause it to be accepted only
if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'}, default=None
Whether an array will be forced to be fortran or c-style. If
`None`, then the input data's order is preserved when possible.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_writeable : bool, default=False
Whether to force the output array to be writeable. If True, the returned array
is guaranteed to be writeable, which may require a copy. Otherwise the
writeability of the input array is preserved.
.. versionadded:: 1.6
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. This parameter
does not influence whether y can have np.inf, np.nan, pd.NA values.
The possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
.. deprecated:: 1.6
`force_all_finite` was renamed to `ensure_all_finite` and will be removed
in 1.8.
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. This parameter
does not influence whether y can have np.inf, np.nan, pd.NA values.
The possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
ensure_2d : bool, default=True
Whether to raise a value error if X is not 2D.
allow_nd : bool, default=False
Whether to allow X.ndim > 2.
multi_output : bool, default=False
Whether to allow 2D y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int, default=1
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : bool, default=False
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
Examples
--------
>>> from sklearn.utils.validation import check_X_y
>>> X = [[1, 2], [3, 4], [5, 6]]
>>> y = [1, 2, 3]
>>> X, y = check_X_y(X, y)
>>> X
array([[1, 2],
[3, 4],
[5, 6]])
>>> y
array([1, 2, 3])
"""
if y is None:
if estimator is None:
estimator_name = "estimator"
else:
estimator_name = _check_estimator_name(estimator)
raise ValueError(
f"{estimator_name} requires y to be passed, but the target y is None"
)
ensure_all_finite = _deprecate_force_all_finite(force_all_finite, ensure_all_finite)
X = check_array(
X,
accept_sparse=accept_sparse,
accept_large_sparse=accept_large_sparse,
dtype=dtype,
order=order,
copy=copy,
force_writeable=force_writeable,
ensure_all_finite=ensure_all_finite,
ensure_2d=ensure_2d,
allow_nd=allow_nd,
ensure_min_samples=ensure_min_samples,
ensure_min_features=ensure_min_features,
estimator=estimator,
input_name="X",
)
y = _check_y(y, multi_output=multi_output, y_numeric=y_numeric, estimator=estimator)
check_consistent_length(X, y)
return X, y
| check_X_y |
scikit-learn | 286 | sklearn/utils/validation.py | def check_array(
array,
accept_sparse=False,
*,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_writeable=False,
force_all_finite="deprecated",
ensure_all_finite=None,
ensure_non_negative=False,
ensure_2d=True,
allow_nd=False,
ensure_min_samples=1,
ensure_min_features=1,
estimator=None,
input_name="",
):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is checked to be a non-empty 2D array containing
only finite values. If the dtype of the array is object, attempt
converting to float, raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : str, bool or list/tuple of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse=False will cause it to be accepted
only if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'} or None, default=None
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_writeable : bool, default=False
Whether to force the output array to be writeable. If True, the returned array
is guaranteed to be writeable, which may require a copy. Otherwise the
writeability of the input array is preserved.
.. versionadded:: 1.6
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
.. deprecated:: 1.6
`force_all_finite` was renamed to `ensure_all_finite` and will be removed
in 1.8.
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
ensure_non_negative : bool, default=False
Make sure the array has only non-negative values. If True, an array that
contains negative values will raise a ValueError.
.. versionadded:: 1.6
ensure_2d : bool, default=True
Whether to raise a value error if array is not 2D.
allow_nd : bool, default=False
Whether to allow array.ndim > 2.
ensure_min_samples : int, default=1
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
input_name : str, default=""
The data name used to construct the error message. In particular
if `input_name` is "X" and the data has NaN values and
allow_nan is False, the error message will link to the imputer
documentation.
.. versionadded:: 1.1.0
Returns
-------
array_converted : object
The converted and validated array.
Examples
--------
>>> from sklearn.utils.validation import check_array
>>> X = [[1, 2, 3], [4, 5, 6]]
>>> X_checked = check_array(X)
>>> X_checked
array([[1, 2, 3], [4, 5, 6]])
"""
| /usr/src/app/target_test_cases/failed_tests_check_array.txt | def check_array(
array,
accept_sparse=False,
*,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_writeable=False,
force_all_finite="deprecated",
ensure_all_finite=None,
ensure_non_negative=False,
ensure_2d=True,
allow_nd=False,
ensure_min_samples=1,
ensure_min_features=1,
estimator=None,
input_name="",
):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is checked to be a non-empty 2D array containing
only finite values. If the dtype of the array is object, attempt
converting to float, raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : str, bool or list/tuple of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse=False will cause it to be accepted
only if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'} or None, default=None
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_writeable : bool, default=False
Whether to force the output array to be writeable. If True, the returned array
is guaranteed to be writeable, which may require a copy. Otherwise the
writeability of the input array is preserved.
.. versionadded:: 1.6
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
.. deprecated:: 1.6
`force_all_finite` was renamed to `ensure_all_finite` and will be removed
in 1.8.
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
ensure_non_negative : bool, default=False
Make sure the array has only non-negative values. If True, an array that
contains negative values will raise a ValueError.
.. versionadded:: 1.6
ensure_2d : bool, default=True
Whether to raise a value error if array is not 2D.
allow_nd : bool, default=False
Whether to allow array.ndim > 2.
ensure_min_samples : int, default=1
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
input_name : str, default=""
The data name used to construct the error message. In particular
if `input_name` is "X" and the data has NaN values and
allow_nan is False, the error message will link to the imputer
documentation.
.. versionadded:: 1.1.0
Returns
-------
array_converted : object
The converted and validated array.
Examples
--------
>>> from sklearn.utils.validation import check_array
>>> X = [[1, 2, 3], [4, 5, 6]]
>>> X_checked = check_array(X)
>>> X_checked
array([[1, 2, 3], [4, 5, 6]])
"""
ensure_all_finite = _deprecate_force_all_finite(force_all_finite, ensure_all_finite)
if isinstance(array, np.matrix):
raise TypeError(
"np.matrix is not supported. Please convert to a numpy array with "
"np.asarray. For more information see: "
"https://numpy.org/doc/stable/reference/generated/numpy.matrix.html"
)
xp, is_array_api_compliant = get_namespace(array)
# store reference to original array to check if copy is needed when
# function returns
array_orig = array
# store whether originally we wanted numeric dtype
dtype_numeric = isinstance(dtype, str) and dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not is_array_api_compliant and not hasattr(dtype_orig, "kind"):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
# check if the object contains several dtypes (typically a pandas
# DataFrame), and store them. If not, store None.
dtypes_orig = None
pandas_requires_conversion = False
# track if we have a Series-like object to raise a better error message
type_if_series = None
if hasattr(array, "dtypes") and hasattr(array.dtypes, "__array__"):
# throw warning if columns are sparse. If all columns are sparse, then
# array.sparse exists and sparsity will be preserved (later).
with suppress(ImportError):
from pandas import SparseDtype
def is_sparse(dtype):
return isinstance(dtype, SparseDtype)
if not hasattr(array, "sparse") and array.dtypes.apply(is_sparse).any():
warnings.warn(
"pandas.DataFrame with sparse columns found."
"It will be converted to a dense numpy array."
)
dtypes_orig = list(array.dtypes)
pandas_requires_conversion = any(
_pandas_dtype_needs_early_conversion(i) for i in dtypes_orig
)
if all(isinstance(dtype_iter, np.dtype) for dtype_iter in dtypes_orig):
dtype_orig = np.result_type(*dtypes_orig)
elif pandas_requires_conversion and any(d == object for d in dtypes_orig):
# Force object if any of the dtypes is an object
dtype_orig = object
elif (_is_extension_array_dtype(array) or hasattr(array, "iloc")) and hasattr(
array, "dtype"
):
# array is a pandas series
type_if_series = type(array)
pandas_requires_conversion = _pandas_dtype_needs_early_conversion(array.dtype)
if isinstance(array.dtype, np.dtype):
dtype_orig = array.dtype
else:
# Set to None to let array.astype work out the best dtype
dtype_orig = None
if dtype_numeric:
if (
dtype_orig is not None
and hasattr(dtype_orig, "kind")
and dtype_orig.kind == "O"
):
# if input is object, convert to float.
dtype = xp.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if pandas_requires_conversion:
# pandas dataframe requires conversion earlier to handle extension dtypes with
# nans
# Use the original dtype for conversion if dtype is None
new_dtype = dtype_orig if dtype is None else dtype
array = array.astype(new_dtype)
# Since we converted here, we do not need to convert again later
dtype = None
if ensure_all_finite not in (True, False, "allow-nan"):
raise ValueError(
"ensure_all_finite should be a bool or 'allow-nan'. Got "
f"{ensure_all_finite!r} instead."
)
if dtype is not None and _is_numpy_namespace(xp):
# convert to dtype object to conform to Array API to be use `xp.isdtype` later
dtype = np.dtype(dtype)
estimator_name = _check_estimator_name(estimator)
context = " by %s" % estimator_name if estimator is not None else ""
# When all dataframe columns are sparse, convert to a sparse array
if hasattr(array, "sparse") and array.ndim > 1:
with suppress(ImportError):
from pandas import SparseDtype # noqa: F811
def is_sparse(dtype):
return isinstance(dtype, SparseDtype)
if array.dtypes.apply(is_sparse).all():
# DataFrame.sparse only supports `to_coo`
array = array.sparse.to_coo()
if array.dtype == np.dtype("object"):
unique_dtypes = set([dt.subtype.name for dt in array_orig.dtypes])
if len(unique_dtypes) > 1:
raise ValueError(
"Pandas DataFrame with mixed sparse extension arrays "
"generated a sparse matrix with object dtype which "
"can not be converted to a scipy sparse matrix."
"Sparse extension arrays should all have the same "
"numeric type."
)
if sp.issparse(array):
_ensure_no_complex_data(array)
array = _ensure_sparse_format(
array,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
ensure_all_finite=ensure_all_finite,
accept_large_sparse=accept_large_sparse,
estimator_name=estimator_name,
input_name=input_name,
)
if ensure_2d and array.ndim < 2:
raise ValueError(
f"Expected 2D input, got input with shape {array.shape}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample."
)
else:
# If np.array(..) gives ComplexWarning, then we convert the warning
# to an error. This is needed because specifying a non complex
# dtype to the function converts complex to real dtype,
# thereby passing the test made in the lines following the scope
# of warnings context manager.
with warnings.catch_warnings():
try:
warnings.simplefilter("error", ComplexWarning)
if dtype is not None and xp.isdtype(dtype, "integral"):
# Conversion float -> int should not contain NaN or
# inf (numpy#14412). We cannot use casting='safe' because
# then conversion float -> int would be disallowed.
array = _asarray_with_order(array, order=order, xp=xp)
if xp.isdtype(array.dtype, ("real floating", "complex floating")):
_assert_all_finite(
array,
allow_nan=False,
msg_dtype=dtype,
estimator_name=estimator_name,
input_name=input_name,
)
array = xp.astype(array, dtype, copy=False)
else:
array = _asarray_with_order(array, order=order, dtype=dtype, xp=xp)
except ComplexWarning as complex_warning:
raise ValueError(
"Complex data not supported\n{}\n".format(array)
) from complex_warning
# It is possible that the np.array(..) gave no warning. This happens
# when no dtype conversion happened, for example dtype = None. The
# result is that np.array(..) produces an array of complex dtype
# and we need to catch and raise exception for such cases.
_ensure_no_complex_data(array)
if ensure_2d:
# If input is scalar raise error
if array.ndim == 0:
raise ValueError(
"Expected 2D array, got scalar array instead:\narray={}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample.".format(array)
)
# If input is 1D raise error
if array.ndim == 1:
# If input is a Series-like object (eg. pandas Series or polars Series)
if type_if_series is not None:
msg = (
f"Expected a 2-dimensional container but got {type_if_series} "
"instead. Pass a DataFrame containing a single row (i.e. "
"single sample) or a single column (i.e. single feature) "
"instead."
)
else:
msg = (
f"Expected 2D array, got 1D array instead:\narray={array}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample."
)
raise ValueError(msg)
if dtype_numeric and hasattr(array.dtype, "kind") and array.dtype.kind in "USV":
raise ValueError(
"dtype='numeric' is not compatible with arrays of bytes/strings."
"Convert your data to numeric values explicitly instead."
)
if not allow_nd and array.ndim >= 3:
raise ValueError(
"Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name)
)
if ensure_all_finite:
_assert_all_finite(
array,
input_name=input_name,
estimator_name=estimator_name,
allow_nan=ensure_all_finite == "allow-nan",
)
if copy:
if _is_numpy_namespace(xp):
# only make a copy if `array` and `array_orig` may share memory`
if np.may_share_memory(array, array_orig):
array = _asarray_with_order(
array, dtype=dtype, order=order, copy=True, xp=xp
)
else:
# always make a copy for non-numpy arrays
array = _asarray_with_order(
array, dtype=dtype, order=order, copy=True, xp=xp
)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError(
"Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, array.shape, ensure_min_samples, context)
)
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError(
"Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, array.shape, ensure_min_features, context)
)
if ensure_non_negative:
whom = input_name
if estimator_name:
whom += f" in {estimator_name}"
check_non_negative(array, whom)
if force_writeable:
# By default, array.copy() creates a C-ordered copy. We set order=K to
# preserve the order of the array.
copy_params = {"order": "K"} if not sp.issparse(array) else {}
array_data = array.data if sp.issparse(array) else array
flags = getattr(array_data, "flags", None)
if not getattr(flags, "writeable", True):
# This situation can only happen when copy=False, the array is read-only and
# a writeable output is requested. This is an ambiguous setting so we chose
# to always (except for one specific setting, see below) make a copy to
# ensure that the output is writeable, even if avoidable, to not overwrite
# the user's data by surprise.
if _is_pandas_df_or_series(array_orig):
try:
# In pandas >= 3, np.asarray(df), called earlier in check_array,
# returns a read-only intermediate array. It can be made writeable
# safely without copy because if the original DataFrame was backed
# by a read-only array, trying to change the flag would raise an
# error, in which case we make a copy.
array_data.flags.writeable = True
except ValueError:
array = array.copy(**copy_params)
else:
array = array.copy(**copy_params)
return array
| check_array |
scikit-learn | 287 | sklearn/metrics/pairwise.py | def check_paired_arrays(X, Y):
"""Set X and Y appropriately and checks inputs for paired distances.
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
Returns
-------
safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
| /usr/src/app/target_test_cases/failed_tests_check_paired_arrays.txt | def check_paired_arrays(X, Y):
"""Set X and Y appropriately and checks inputs for paired distances.
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
Returns
-------
safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError(
"X and Y should be of same shape. They were respectively %r and %r long."
% (X.shape, Y.shape)
)
return X, Y
| check_paired_arrays |
scikit-learn | 288 | sklearn/metrics/pairwise.py | def check_pairwise_arrays(
X,
Y,
*,
precomputed=False,
dtype="infer_float",
accept_sparse="csr",
force_all_finite="deprecated",
ensure_all_finite=None,
ensure_2d=True,
copy=False,
):
"""Set X and Y appropriately and checks inputs.
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
precomputed : bool, default=False
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : str, type, list of type or None default="infer_float"
Data type required for X and Y. If "infer_float", the dtype will be an
appropriate float type selected by _return_float_dtype. If None, the
dtype of the input is preserved.
.. versionadded:: 0.18
accept_sparse : str, bool or list/tuple of str, default='csr'
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.22
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`.
.. deprecated:: 1.6
`force_all_finite` was renamed to `ensure_all_finite` and will be removed
in 1.8.
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
ensure_2d : bool, default=True
Whether to raise an error when the input arrays are not 2-dimensional. Setting
this to `False` is necessary when using a custom metric with certain
non-numerical inputs (e.g. a list of strings).
.. versionadded:: 1.5
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
.. versionadded:: 0.22
Returns
-------
safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
| /usr/src/app/target_test_cases/failed_tests_check_pairwise_arrays.txt | def check_pairwise_arrays(
X,
Y,
*,
precomputed=False,
dtype="infer_float",
accept_sparse="csr",
force_all_finite="deprecated",
ensure_all_finite=None,
ensure_2d=True,
copy=False,
):
"""Set X and Y appropriately and checks inputs.
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
precomputed : bool, default=False
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : str, type, list of type or None default="infer_float"
Data type required for X and Y. If "infer_float", the dtype will be an
appropriate float type selected by _return_float_dtype. If None, the
dtype of the input is preserved.
.. versionadded:: 0.18
accept_sparse : str, bool or list/tuple of str, default='csr'
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.22
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`.
.. deprecated:: 1.6
`force_all_finite` was renamed to `ensure_all_finite` and will be removed
in 1.8.
ensure_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 1.6
`force_all_finite` was renamed to `ensure_all_finite`.
ensure_2d : bool, default=True
Whether to raise an error when the input arrays are not 2-dimensional. Setting
this to `False` is necessary when using a custom metric with certain
non-numerical inputs (e.g. a list of strings).
.. versionadded:: 1.5
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
.. versionadded:: 0.22
Returns
-------
safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
ensure_all_finite = _deprecate_force_all_finite(force_all_finite, ensure_all_finite)
xp, _ = get_namespace(X, Y)
if any([issparse(X), issparse(Y)]) or _is_numpy_namespace(xp):
X, Y, dtype_float = _return_float_dtype(X, Y)
else:
dtype_float = _find_matching_floating_dtype(X, Y, xp=xp)
estimator = "check_pairwise_arrays"
if dtype == "infer_float":
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(
X,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
ensure_all_finite=ensure_all_finite,
estimator=estimator,
ensure_2d=ensure_2d,
)
else:
X = check_array(
X,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
ensure_all_finite=ensure_all_finite,
estimator=estimator,
ensure_2d=ensure_2d,
)
Y = check_array(
Y,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
ensure_all_finite=ensure_all_finite,
estimator=estimator,
ensure_2d=ensure_2d,
)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError(
"Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." % (X.shape[0], X.shape[1], Y.shape[0])
)
elif ensure_2d and X.shape[1] != Y.shape[1]:
# Only check the number of features if 2d arrays are enforced. Otherwise,
# validation is left to the user for custom metrics.
raise ValueError(
"Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (X.shape[1], Y.shape[1])
)
return X, Y
| check_pairwise_arrays |
scikit-learn | 289 | sklearn/tests/metadata_routing_common.py | def check_recorded_metadata(obj, method, parent, split_params=tuple(), **kwargs):
"""Check whether the expected metadata is passed to the object's method.
Parameters
----------
obj : estimator object
sub-estimator to check routed params for
method : str
sub-estimator's method where metadata is routed to, or otherwise in
the context of metadata routing referred to as 'callee'
parent : str
the parent method which should have called `method`, or otherwise in
the context of metadata routing referred to as 'caller'
split_params : tuple, default=empty
specifies any parameters which are to be checked as being a subset
of the original values
**kwargs : dict
passed metadata
"""
| /usr/src/app/target_test_cases/failed_tests_check_recorded_metadata.txt | def check_recorded_metadata(obj, method, parent, split_params=tuple(), **kwargs):
"""Check whether the expected metadata is passed to the object's method.
Parameters
----------
obj : estimator object
sub-estimator to check routed params for
method : str
sub-estimator's method where metadata is routed to, or otherwise in
the context of metadata routing referred to as 'callee'
parent : str
the parent method which should have called `method`, or otherwise in
the context of metadata routing referred to as 'caller'
split_params : tuple, default=empty
specifies any parameters which are to be checked as being a subset
of the original values
**kwargs : dict
passed metadata
"""
all_records = (
getattr(obj, "_records", dict()).get(method, dict()).get(parent, list())
)
for record in all_records:
# first check that the names of the metadata passed are the same as
# expected. The names are stored as keys in `record`.
assert set(kwargs.keys()) == set(
record.keys()
), f"Expected {kwargs.keys()} vs {record.keys()}"
for key, value in kwargs.items():
recorded_value = record[key]
# The following condition is used to check for any specified parameters
# being a subset of the original values
if key in split_params and recorded_value is not None:
assert np.isin(recorded_value, value).all()
else:
if isinstance(recorded_value, np.ndarray):
assert_array_equal(recorded_value, value)
else:
assert (
recorded_value is value
), f"Expected {recorded_value} vs {value}. Method: {method}"
| check_recorded_metadata |
scikit-learn | 290 | sklearn/utils/multiclass.py | def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data.
Parameters
----------
y : {array-like, sparse matrix} of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
classes : list of size n_outputs of ndarray of size (n_classes,)
List of classes for each column.
n_classes : list of int of size n_outputs
Number of classes in each column.
class_prior : list of size n_outputs of ndarray of size (n_classes,)
Class distribution of each column.
"""
| /usr/src/app/target_test_cases/failed_tests_class_distribution.txt | def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data.
Parameters
----------
y : {array-like, sparse matrix} of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
classes : list of size n_outputs of ndarray of size (n_classes,)
List of classes for each column.
n_classes : list of int of size n_outputs
Number of classes in each column.
class_prior : list of size n_outputs of ndarray of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k] : y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = sample_weight[col_nonzero]
zeros_samp_weight_sum = np.sum(sample_weight) - np.sum(nz_samp_weight)
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(
y.data[y.indptr[k] : y.indptr[k + 1]], return_inverse=True
)
class_prior_k = np.bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0, zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = np.bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| class_distribution |
scikit-learn | 291 | sklearn/utils/sparsefuncs.py | def count_nonzero(X, axis=None, sample_weight=None):
"""A variant of X.getnnz() with extension to weighting on axis 0.
Useful in efficiently calculating multilabel metrics.
Parameters
----------
X : sparse matrix of shape (n_samples, n_labels)
Input data. It should be of CSR format.
axis : {0, 1}, default=None
The axis on which the data is aggregated.
sample_weight : array-like of shape (n_samples,), default=None
Weight for each row of X.
Returns
-------
nnz : int, float, ndarray of shape (n_samples,) or ndarray of shape (n_features,)
Number of non-zero values in the array along a given axis. Otherwise,
the total number of non-zero values in the array is returned.
"""
| /usr/src/app/target_test_cases/failed_tests_count_nonzero.txt | def count_nonzero(X, axis=None, sample_weight=None):
"""A variant of X.getnnz() with extension to weighting on axis 0.
Useful in efficiently calculating multilabel metrics.
Parameters
----------
X : sparse matrix of shape (n_samples, n_labels)
Input data. It should be of CSR format.
axis : {0, 1}, default=None
The axis on which the data is aggregated.
sample_weight : array-like of shape (n_samples,), default=None
Weight for each row of X.
Returns
-------
nnz : int, float, ndarray of shape (n_samples,) or ndarray of shape (n_features,)
Number of non-zero values in the array along a given axis. Otherwise,
the total number of non-zero values in the array is returned.
"""
if axis == -1:
axis = 1
elif axis == -2:
axis = 0
elif X.format != "csr":
raise TypeError("Expected CSR sparse format, got {0}".format(X.format))
# We rely here on the fact that np.diff(Y.indptr) for a CSR
# will return the number of nonzero entries in each row.
# A bincount over Y.indices will return the number of nonzeros
# in each column. See ``csr_matrix.getnnz`` in scipy >= 0.14.
if axis is None:
if sample_weight is None:
return X.nnz
else:
return np.dot(np.diff(X.indptr), sample_weight)
elif axis == 1:
out = np.diff(X.indptr)
if sample_weight is None:
# astype here is for consistency with axis=0 dtype
return out.astype("intp")
return out * sample_weight
elif axis == 0:
if sample_weight is None:
return np.bincount(X.indices, minlength=X.shape[1])
else:
weights = np.repeat(sample_weight, np.diff(X.indptr))
return np.bincount(X.indices, minlength=X.shape[1], weights=weights)
else:
raise ValueError("Unsupported axis: {0}".format(axis))
| count_nonzero |
scikit-learn | 292 | sklearn/utils/sparsefuncs.py | def csc_median_axis_0(X):
"""Find the median across axis 0 of a CSC matrix.
It is equivalent to doing np.median(X, axis=0).
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Input data. It should be of CSC format.
Returns
-------
median : ndarray of shape (n_features,)
Median.
"""
| /usr/src/app/target_test_cases/failed_tests_csc_median_axis_0.txt | def csc_median_axis_0(X):
"""Find the median across axis 0 of a CSC matrix.
It is equivalent to doing np.median(X, axis=0).
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Input data. It should be of CSC format.
Returns
-------
median : ndarray of shape (n_features,)
Median.
"""
if not (sp.issparse(X) and X.format == "csc"):
raise TypeError("Expected matrix of CSC format, got %s" % X.format)
indptr = X.indptr
n_samples, n_features = X.shape
median = np.zeros(n_features)
for f_ind, (start, end) in enumerate(zip(indptr[:-1], indptr[1:])):
# Prevent modifying X in place
data = np.copy(X.data[start:end])
nz = n_samples - data.size
median[f_ind] = _get_median(data, nz)
return median
| csc_median_axis_0 |
scikit-learn | 293 | sklearn/cluster/_spectral.py | def discretize(
vectors, *, copy=True, max_svd_restarts=30, n_iter_max=20, random_state=None
):
"""Search for a partition matrix which is closest to the eigenvector embedding.
This implementation was proposed in [1]_.
Parameters
----------
vectors : array-like of shape (n_samples, n_clusters)
The embedding space of the samples.
copy : bool, default=True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, default=30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, default=30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state : int, RandomState instance, default=None
Determines random number generation for rotation matrix initialization.
Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
.. [1] `Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
<https://people.eecs.berkeley.edu/~jordan/courses/281B-spring04/readings/yu-shi.pdf>`_
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
| /usr/src/app/target_test_cases/failed_tests_discretize.txt | def discretize(
vectors, *, copy=True, max_svd_restarts=30, n_iter_max=20, random_state=None
):
"""Search for a partition matrix which is closest to the eigenvector embedding.
This implementation was proposed in [1]_.
Parameters
----------
vectors : array-like of shape (n_samples, n_clusters)
The embedding space of the samples.
copy : bool, default=True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, default=30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, default=30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state : int, RandomState instance, default=None
Determines random number generation for rotation matrix initialization.
Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
.. [1] `Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
<https://people.eecs.berkeley.edu/~jordan/courses/281B-spring04/readings/yu-shi.pdf>`_
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) * norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors**2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components),
)
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
except LinAlgError:
svd_restarts += 1
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if (abs(ncut_value - last_objective_value) < eps) or (n_iter > n_iter_max):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError("SVD did not converge")
return labels
| discretize |
scikit-learn | 294 | sklearn/feature_selection/_univariate_selection.py | def f_oneway(*args):
"""Perform a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
*args : {array-like, sparse matrix}
Sample1, sample2... The sample measurements should be given as
arguments.
Returns
-------
f_statistic : float
The computed F-value of the test.
p_value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://vassarstats.net/textbook
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
| /usr/src/app/target_test_cases/failed_tests_f_oneway.txt | def f_oneway(*args):
"""Perform a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
*args : {array-like, sparse matrix}
Sample1, sample2... The sample measurements should be given as
arguments.
Returns
-------
f_statistic : float
The computed F-value of the test.
p_value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://vassarstats.net/textbook
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s**2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.0
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.0)[0]
if np.nonzero(msb)[0].size != msb.size and constant_features_idx.size:
warnings.warn("Features %s are constant." % constant_features_idx, UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = special.fdtrc(dfbn, dfwn, f)
return f, prob
| f_oneway |
scikit-learn | 295 | sklearn/covariance/_robust_covariance.py | def fast_mcd(
X,
support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None,
):
"""Estimate the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, default=None
The proportion of points to be included in the support of the raw
MCD estimate. Default is `None`, which implies that the minimum
value of `support_fraction` will be used within the algorithm:
`(n_samples + n_features + 1) / 2 * n_samples`. This parameter must be
in the range (0, 1).
cov_computation_method : callable, \
default=:func:`sklearn.covariance.empirical_covariance`
The function which will be used to compute the covariance.
Must return an array of shape (n_features, n_features).
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling the data.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
location : ndarray of shape (n_features,)
Robust location of the data.
covariance : ndarray of shape (n_features, n_features)
Robust covariance of the features.
support : ndarray of shape (n_samples,), dtype=bool
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [RouseeuwVan]_,
see the MinCovDet object.
References
----------
.. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
"""
| /usr/src/app/target_test_cases/failed_tests_fast_mcd.txt | def fast_mcd(
X,
support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None,
):
"""Estimate the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, default=None
The proportion of points to be included in the support of the raw
MCD estimate. Default is `None`, which implies that the minimum
value of `support_fraction` will be used within the algorithm:
`(n_samples + n_features + 1) / 2 * n_samples`. This parameter must be
in the range (0, 1).
cov_computation_method : callable, \
default=:func:`sklearn.covariance.empirical_covariance`
The function which will be used to compute the covariance.
Must return an array of shape (n_features, n_features).
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling the data.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
location : ndarray of shape (n_features,)
Robust location of the data.
covariance : ndarray of shape (n_features, n_features)
Robust covariance of the features.
support : ndarray of shape (n_samples,), dtype=bool
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [RouseeuwVan]_,
see the MinCovDet object.
References
----------
.. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
"""
random_state = check_random_state(random_state)
X = check_array(X, ensure_min_samples=2, estimator="fast_mcd")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[: (n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = (
0.5
* (X_sorted[n_support + halves_start] + X_sorted[halves_start]).mean()
)
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = linalg.pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = linalg.pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets * (n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features, n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
n_best_tot = 10
all_best_covariances = np.zeros((n_best_tot, n_features, n_features))
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset,
h_subset,
n_trials,
select=n_best_sub,
n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged * (n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = select_candidates(
X[selection],
h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = select_candidates(
X,
n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X,
n_support,
n_trials=n_trials,
select=n_best,
n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X,
n_support,
n_trials=(locations_best, covariances_best),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
| fast_mcd |
scikit-learn | 296 | sklearn/linear_model/_sag.py | def get_auto_step_size(
max_squared_sum, alpha_scaled, loss, fit_intercept, n_samples=None, is_saga=False
):
"""Compute automatic step size for SAG solver.
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : {'log', 'squared', 'multinomial'}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
n_samples : int, default=None
Number of rows in X. Useful if is_saga=True.
is_saga : bool, default=False
Whether to return step size for the SAGA algorithm or the SAG
algorithm.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
:arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
"SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives" <1407.0202>`
"""
| /usr/src/app/target_test_cases/failed_tests_get_auto_step_size.txt | def get_auto_step_size(
max_squared_sum, alpha_scaled, loss, fit_intercept, n_samples=None, is_saga=False
):
"""Compute automatic step size for SAG solver.
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : {'log', 'squared', 'multinomial'}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
n_samples : int, default=None
Number of rows in X. Useful if is_saga=True.
is_saga : bool, default=False
Whether to return step size for the SAGA algorithm or the SAG
algorithm.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
:arxiv:`Defazio, A., Bach F. & Lacoste-Julien S. (2014).
"SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives" <1407.0202>`
"""
if loss in ("log", "multinomial"):
L = 0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled
elif loss == "squared":
# inverse Lipschitz constant for squared loss
L = max_squared_sum + int(fit_intercept) + alpha_scaled
else:
raise ValueError(
"Unknown loss function for SAG solver, got %s instead of 'log' or 'squared'"
% loss
)
if is_saga:
# SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n))
# See Defazio et al. 2014
mun = min(2 * n_samples * alpha_scaled, L)
step = 1.0 / (2 * L + mun)
else:
# SAG theoretical step size is 1/16L but it is recommended to use 1 / L
# see http://www.birs.ca//workshops//2014/14w5003/files/schmidt.pdf,
# slide 65
step = 1.0 / L
return step
| get_auto_step_size |
scikit-learn | 297 | sklearn/cluster/_mean_shift.py | def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Find seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : int, default=1
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like of shape (n_samples, n_features)
Points used as initial kernel positions in clustering.mean_shift.
"""
| /usr/src/app/target_test_cases/failed_tests_get_bin_seeds.txt | def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Find seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : int, default=1
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like of shape (n_samples, n_features)
Points used as initial kernel positions in clustering.mean_shift.
"""
if bin_size == 0:
return X
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array(
[point for point, freq in bin_sizes.items() if freq >= min_bin_freq],
dtype=np.float32,
)
if len(bin_seeds) == len(X):
warnings.warn(
"Binning data failed with provided bin_size=%f, using data points as seeds."
% bin_size
)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
| get_bin_seeds |
scikit-learn | 298 | sklearn/utils/_chunking.py | def get_chunk_n_rows(row_bytes, *, max_n_rows=None, working_memory=None):
"""Calculate how many rows can be processed within `working_memory`.
Parameters
----------
row_bytes : int
The expected number of bytes of memory that will be consumed
during the processing of each row.
max_n_rows : int, default=None
The maximum return value.
working_memory : int or float, default=None
The number of rows to fit inside this number of MiB will be
returned. When None (default), the value of
``sklearn.get_config()['working_memory']`` is used.
Returns
-------
int
The number of rows which can be processed within `working_memory`.
Warns
-----
Issues a UserWarning if `row_bytes exceeds `working_memory` MiB.
"""
| /usr/src/app/target_test_cases/failed_tests_get_chunk_n_rows.txt | def get_chunk_n_rows(row_bytes, *, max_n_rows=None, working_memory=None):
"""Calculate how many rows can be processed within `working_memory`.
Parameters
----------
row_bytes : int
The expected number of bytes of memory that will be consumed
during the processing of each row.
max_n_rows : int, default=None
The maximum return value.
working_memory : int or float, default=None
The number of rows to fit inside this number of MiB will be
returned. When None (default), the value of
``sklearn.get_config()['working_memory']`` is used.
Returns
-------
int
The number of rows which can be processed within `working_memory`.
Warns
-----
Issues a UserWarning if `row_bytes exceeds `working_memory` MiB.
"""
if working_memory is None:
working_memory = get_config()["working_memory"]
chunk_n_rows = int(working_memory * (2**20) // row_bytes)
if max_n_rows is not None:
chunk_n_rows = min(chunk_n_rows, max_n_rows)
if chunk_n_rows < 1:
warnings.warn(
"Could not adhere to working_memory config. "
"Currently %.0fMiB, %.0fMiB required."
% (working_memory, np.ceil(row_bytes * 2**-20))
)
chunk_n_rows = 1
return chunk_n_rows
| get_chunk_n_rows |
scikit-learn | 299 | sklearn/utils/sparsefuncs.py | def incr_mean_variance_axis(X, *, axis, last_mean, last_var, last_n, weights=None):
"""Compute incremental mean and variance along an axis on a CSR or CSC matrix.
last_mean, last_var are the statistics computed at the last step by this
function. Both must be initialized to 0-arrays of the proper size, i.e.
the number of features in X. last_n is the number of samples encountered
until now.
Parameters
----------
X : CSR or CSC sparse matrix of shape (n_samples, n_features)
Input data.
axis : {0, 1}
Axis along which the axis should be computed.
last_mean : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Array of means to update with the new data X.
Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1.
last_var : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Array of variances to update with the new data X.
Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1.
last_n : float or ndarray of shape (n_features,) or (n_samples,), \
dtype=floating
Sum of the weights seen so far, excluding the current weights
If not float, it should be of shape (n_features,) if
axis=0 or (n_samples,) if axis=1. If float it corresponds to
having same weights for all samples (or features).
weights : ndarray of shape (n_samples,) or (n_features,), default=None
If axis is set to 0 shape is (n_samples,) or
if axis is set to 1 shape is (n_features,).
If it is set to None, then samples are equally weighted.
.. versionadded:: 0.24
Returns
-------
means : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Updated feature-wise means if axis = 0 or
sample-wise means if axis = 1.
variances : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Updated feature-wise variances if axis = 0 or
sample-wise variances if axis = 1.
n : ndarray of shape (n_features,) or (n_samples,), dtype=integral
Updated number of seen samples per feature if axis=0
or number of seen features per sample if axis=1.
If weights is not None, n is a sum of the weights of the seen
samples or features instead of the actual number of seen
samples or features.
Notes
-----
NaNs are ignored in the algorithm.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 3, 4, 4, 4])
>>> indices = np.array([0, 1, 2, 2])
>>> data = np.array([8, 1, 2, 5])
>>> scale = np.array([2, 3, 2])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.incr_mean_variance_axis(
... csr, axis=0, last_mean=np.zeros(3), last_var=np.zeros(3), last_n=2
... )
(array([1.3..., 0.1..., 1.1...]), array([8.8..., 0.1..., 3.4...]),
array([6., 6., 6.]))
"""
| /usr/src/app/target_test_cases/failed_tests_incr_mean_variance_axis.txt | def incr_mean_variance_axis(X, *, axis, last_mean, last_var, last_n, weights=None):
"""Compute incremental mean and variance along an axis on a CSR or CSC matrix.
last_mean, last_var are the statistics computed at the last step by this
function. Both must be initialized to 0-arrays of the proper size, i.e.
the number of features in X. last_n is the number of samples encountered
until now.
Parameters
----------
X : CSR or CSC sparse matrix of shape (n_samples, n_features)
Input data.
axis : {0, 1}
Axis along which the axis should be computed.
last_mean : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Array of means to update with the new data X.
Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1.
last_var : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Array of variances to update with the new data X.
Should be of shape (n_features,) if axis=0 or (n_samples,) if axis=1.
last_n : float or ndarray of shape (n_features,) or (n_samples,), \
dtype=floating
Sum of the weights seen so far, excluding the current weights
If not float, it should be of shape (n_features,) if
axis=0 or (n_samples,) if axis=1. If float it corresponds to
having same weights for all samples (or features).
weights : ndarray of shape (n_samples,) or (n_features,), default=None
If axis is set to 0 shape is (n_samples,) or
if axis is set to 1 shape is (n_features,).
If it is set to None, then samples are equally weighted.
.. versionadded:: 0.24
Returns
-------
means : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Updated feature-wise means if axis = 0 or
sample-wise means if axis = 1.
variances : ndarray of shape (n_features,) or (n_samples,), dtype=floating
Updated feature-wise variances if axis = 0 or
sample-wise variances if axis = 1.
n : ndarray of shape (n_features,) or (n_samples,), dtype=integral
Updated number of seen samples per feature if axis=0
or number of seen features per sample if axis=1.
If weights is not None, n is a sum of the weights of the seen
samples or features instead of the actual number of seen
samples or features.
Notes
-----
NaNs are ignored in the algorithm.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 3, 4, 4, 4])
>>> indices = np.array([0, 1, 2, 2])
>>> data = np.array([8, 1, 2, 5])
>>> scale = np.array([2, 3, 2])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.incr_mean_variance_axis(
... csr, axis=0, last_mean=np.zeros(3), last_var=np.zeros(3), last_n=2
... )
(array([1.3..., 0.1..., 1.1...]), array([8.8..., 0.1..., 3.4...]),
array([6., 6., 6.]))
"""
_raise_error_wrong_axis(axis)
if not (sp.issparse(X) and X.format in ("csc", "csr")):
_raise_typeerror(X)
if np.size(last_n) == 1:
last_n = np.full(last_mean.shape, last_n, dtype=last_mean.dtype)
if not (np.size(last_mean) == np.size(last_var) == np.size(last_n)):
raise ValueError("last_mean, last_var, last_n do not have the same shapes.")
if axis == 1:
if np.size(last_mean) != X.shape[0]:
raise ValueError(
"If axis=1, then last_mean, last_n, last_var should be of "
f"size n_samples {X.shape[0]} (Got {np.size(last_mean)})."
)
else: # axis == 0
if np.size(last_mean) != X.shape[1]:
raise ValueError(
"If axis=0, then last_mean, last_n, last_var should be of "
f"size n_features {X.shape[1]} (Got {np.size(last_mean)})."
)
X = X.T if axis == 1 else X
if weights is not None:
weights = _check_sample_weight(weights, X, dtype=X.dtype)
return _incr_mean_var_axis0(
X, last_mean=last_mean, last_var=last_var, last_n=last_n, weights=weights
)
| incr_mean_variance_axis |
scikit-learn | 300 | sklearn/neighbors/_graph.py | def kneighbors_graph(
X,
n_neighbors,
*,
mode="connectivity",
metric="minkowski",
p=2,
metric_params=None,
include_self=False,
n_jobs=None,
):
"""Compute the (weighted) graph of k-Neighbors for points in X.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Sample data.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : str, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
p : float, default=2
Power parameter for the Minkowski metric. When p = 1, this is equivalent
to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2.
For arbitrary p, minkowski_distance (l_p) is used. This parameter is expected
to be positive.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
include_self : bool or 'auto', default=False
Whether or not to mark each sample as the first nearest neighbor to
itself. If 'auto', then True is used for mode='connectivity' and False
for mode='distance'.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
A : sparse matrix of shape (n_samples, n_samples)
Graph where A[i, j] is assigned the weight of edge that
connects i to j. The matrix is of CSR format.
See Also
--------
radius_neighbors_graph: Compute the (weighted) graph of Neighbors for points in X.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 1.],
[1., 0., 1.]])
"""
| /usr/src/app/target_test_cases/failed_tests_kneighbors_graph.txt | def kneighbors_graph(
X,
n_neighbors,
*,
mode="connectivity",
metric="minkowski",
p=2,
metric_params=None,
include_self=False,
n_jobs=None,
):
"""Compute the (weighted) graph of k-Neighbors for points in X.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Sample data.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : str, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
p : float, default=2
Power parameter for the Minkowski metric. When p = 1, this is equivalent
to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2.
For arbitrary p, minkowski_distance (l_p) is used. This parameter is expected
to be positive.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
include_self : bool or 'auto', default=False
Whether or not to mark each sample as the first nearest neighbor to
itself. If 'auto', then True is used for mode='connectivity' and False
for mode='distance'.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
A : sparse matrix of shape (n_samples, n_samples)
Graph where A[i, j] is assigned the weight of edge that
connects i to j. The matrix is of CSR format.
See Also
--------
radius_neighbors_graph: Compute the (weighted) graph of Neighbors for points in X.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 1.],
[1., 0., 1.]])
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(
n_neighbors=n_neighbors,
metric=metric,
p=p,
metric_params=metric_params,
n_jobs=n_jobs,
).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X._fit_X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
| kneighbors_graph |
scikit-learn | 301 | sklearn/cluster/_agglomerative.py | def linkage_tree(
X,
connectivity=None,
n_clusters=None,
linkage="complete",
affinity="euclidean",
return_distance=False,
):
"""Linkage agglomerative clustering based on a Feature matrix.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Feature matrix representing `n_samples` samples to be clustered.
connectivity : sparse matrix, default=None
Connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is `None`, i.e, the Ward algorithm is unstructured.
n_clusters : int, default=None
Stop early the construction of the tree at `n_clusters`. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. In this case, the
complete tree is not computed, thus the 'children' output is of
limited use, and the 'parents' output should rather be used.
This option is valid only when specifying a connectivity matrix.
linkage : {"average", "complete", "single"}, default="complete"
Which linkage criteria to use. The linkage criterion determines which
distance to use between sets of observation.
- "average" uses the average of the distances of each observation of
the two sets.
- "complete" or maximum linkage uses the maximum distances between
all observations of the two sets.
- "single" uses the minimum of the distances between all
observations of the two sets.
affinity : str or callable, default='euclidean'
Which metric to use. Can be 'euclidean', 'manhattan', or any
distance known to paired distance (see metric.pairwise).
return_distance : bool, default=False
Whether or not to return the distances between the clusters.
Returns
-------
children : ndarray of shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`.
n_connected_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree.
parents : ndarray of shape (n_nodes, ) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : ndarray of shape (n_nodes-1,)
Returned when `return_distance` is set to `True`.
distances[i] refers to the distance between children[i][0] and
children[i][1] when they are merged.
See Also
--------
ward_tree : Hierarchical clustering with ward linkage.
"""
| /usr/src/app/target_test_cases/failed_tests_linkage_tree.txt | def linkage_tree(
X,
connectivity=None,
n_clusters=None,
linkage="complete",
affinity="euclidean",
return_distance=False,
):
"""Linkage agglomerative clustering based on a Feature matrix.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Feature matrix representing `n_samples` samples to be clustered.
connectivity : sparse matrix, default=None
Connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is `None`, i.e, the Ward algorithm is unstructured.
n_clusters : int, default=None
Stop early the construction of the tree at `n_clusters`. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. In this case, the
complete tree is not computed, thus the 'children' output is of
limited use, and the 'parents' output should rather be used.
This option is valid only when specifying a connectivity matrix.
linkage : {"average", "complete", "single"}, default="complete"
Which linkage criteria to use. The linkage criterion determines which
distance to use between sets of observation.
- "average" uses the average of the distances of each observation of
the two sets.
- "complete" or maximum linkage uses the maximum distances between
all observations of the two sets.
- "single" uses the minimum of the distances between all
observations of the two sets.
affinity : str or callable, default='euclidean'
Which metric to use. Can be 'euclidean', 'manhattan', or any
distance known to paired distance (see metric.pairwise).
return_distance : bool, default=False
Whether or not to return the distances between the clusters.
Returns
-------
children : ndarray of shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`.
n_connected_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree.
parents : ndarray of shape (n_nodes, ) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : ndarray of shape (n_nodes-1,)
Returned when `return_distance` is set to `True`.
distances[i] refers to the distance between children[i][0] and
children[i][1] when they are merged.
See Also
--------
ward_tree : Hierarchical clustering with ward linkage.
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (-1, 1))
n_samples, n_features = X.shape
linkage_choices = {
"complete": _hierarchical.max_merge,
"average": _hierarchical.average_merge,
"single": None,
} # Single linkage is handled differently
try:
join_func = linkage_choices[linkage]
except KeyError as e:
raise ValueError(
"Unknown linkage option, linkage should be one of %s, but %s was given"
% (linkage_choices.keys(), linkage)
) from e
if affinity == "cosine" and np.any(~np.any(X, axis=1)):
raise ValueError("Cosine affinity cannot be used when X contains zero vectors")
if connectivity is None:
from scipy.cluster import hierarchy # imports PIL
if n_clusters is not None:
warnings.warn(
(
"Partial build of the tree is implemented "
"only for structured clustering (i.e. with "
"explicit connectivity). The algorithm "
"will build the full tree and only "
"retain the lower branches required "
"for the specified number of clusters"
),
stacklevel=2,
)
if affinity == "precomputed":
# for the linkage function of hierarchy to work on precomputed
# data, provide as first argument an ndarray of the shape returned
# by sklearn.metrics.pairwise_distances.
if X.shape[0] != X.shape[1]:
raise ValueError(
f"Distance matrix should be square, got matrix of shape {X.shape}"
)
i, j = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
elif affinity == "l2":
# Translate to something understood by scipy
affinity = "euclidean"
elif affinity in ("l1", "manhattan"):
affinity = "cityblock"
elif callable(affinity):
X = affinity(X)
i, j = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
if (
linkage == "single"
and affinity != "precomputed"
and not callable(affinity)
and affinity in METRIC_MAPPING64
):
# We need the fast cythonized metric from neighbors
dist_metric = DistanceMetric.get_metric(affinity)
# The Cython routines used require contiguous arrays
X = np.ascontiguousarray(X, dtype=np.double)
mst = _hierarchical.mst_linkage_core(X, dist_metric)
# Sort edges of the min_spanning_tree by weight
mst = mst[np.argsort(mst.T[2], kind="mergesort"), :]
# Convert edge list into standard hierarchical clustering format
out = _hierarchical.single_linkage_label(mst)
else:
out = hierarchy.linkage(X, method=linkage, metric=affinity)
children_ = out[:, :2].astype(int, copy=False)
if return_distance:
distances = out[:, 2]
return children_, 1, n_samples, None, distances
return children_, 1, n_samples, None
connectivity, n_connected_components = _fix_connectivity(
X, connectivity, affinity=affinity
)
connectivity = connectivity.tocoo()
# Put the diagonal to zero
diag_mask = connectivity.row != connectivity.col
connectivity.row = connectivity.row[diag_mask]
connectivity.col = connectivity.col[diag_mask]
connectivity.data = connectivity.data[diag_mask]
del diag_mask
if affinity == "precomputed":
distances = X[connectivity.row, connectivity.col].astype(np.float64, copy=False)
else:
# FIXME We compute all the distances, while we could have only computed
# the "interesting" distances
distances = paired_distances(
X[connectivity.row], X[connectivity.col], metric=affinity
)
connectivity.data = distances
if n_clusters is None:
n_nodes = 2 * n_samples - 1
else:
assert n_clusters <= n_samples
n_nodes = 2 * n_samples - n_clusters
if linkage == "single":
return _single_linkage_tree(
connectivity,
n_samples,
n_nodes,
n_clusters,
n_connected_components,
return_distance,
)
if return_distance:
distances = np.empty(n_nodes - n_samples)
# create inertia heap and connection matrix
A = np.empty(n_nodes, dtype=object)
inertia = list()
# LIL seems to the best format to access the rows quickly,
# without the numpy overhead of slicing CSR indices and data.
connectivity = connectivity.tolil()
# We are storing the graph in a list of IntFloatDict
for ind, (data, row) in enumerate(zip(connectivity.data, connectivity.rows)):
A[ind] = IntFloatDict(
np.asarray(row, dtype=np.intp), np.asarray(data, dtype=np.float64)
)
# We keep only the upper triangular for the heap
# Generator expressions are faster than arrays on the following
inertia.extend(
_hierarchical.WeightedEdge(d, ind, r) for r, d in zip(row, data) if r < ind
)
del connectivity
heapify(inertia)
# prepare the main fields
parent = np.arange(n_nodes, dtype=np.intp)
used_node = np.ones(n_nodes, dtype=np.intp)
children = []
# recursive merge loop
for k in range(n_samples, n_nodes):
# identify the merge
while True:
edge = heappop(inertia)
if used_node[edge.a] and used_node[edge.b]:
break
i = edge.a
j = edge.b
if return_distance:
# store distances
distances[k - n_samples] = edge.weight
parent[i] = parent[j] = k
children.append((i, j))
# Keep track of the number of elements per cluster
n_i = used_node[i]
n_j = used_node[j]
used_node[k] = n_i + n_j
used_node[i] = used_node[j] = False
# update the structure matrix A and the inertia matrix
# a clever 'min', or 'max' operation between A[i] and A[j]
coord_col = join_func(A[i], A[j], used_node, n_i, n_j)
for col, d in coord_col:
A[col].append(k, d)
# Here we use the information from coord_col (containing the
# distances) to update the heap
heappush(inertia, _hierarchical.WeightedEdge(d, k, col))
A[k] = coord_col
# Clear A[i] and A[j] to save memory
A[i] = A[j] = 0
# Separate leaves in children (empty lists up to now)
n_leaves = n_samples
# # return numpy array for efficient caching
children = np.array(children)[:, ::-1]
if return_distance:
return children, n_connected_components, n_leaves, parent, distances
return children, n_connected_components, n_leaves, parent
| linkage_tree |
scikit-learn | 302 | sklearn/datasets/_arff_parser.py | def load_arff_from_gzip_file(
gzip_file,
parser,
output_type,
openml_columns_info,
feature_names_to_select,
target_names_to_select,
shape=None,
read_csv_kwargs=None,
):
"""Load a compressed ARFF file using a given parser.
Parameters
----------
gzip_file : GzipFile instance
The file compressed to be read.
parser : {"pandas", "liac-arff"}
The parser used to parse the ARFF file. "pandas" is recommended
but only supports loading dense datasets.
output_type : {"numpy", "sparse", "pandas"}
The type of the arrays that will be returned. The possibilities ara:
- `"numpy"`: both `X` and `y` will be NumPy arrays;
- `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array;
- `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a
pandas Series or DataFrame.
openml_columns_info : dict
The information provided by OpenML regarding the columns of the ARFF
file.
feature_names_to_select : list of str
A list of the feature names to be selected.
target_names_to_select : list of str
A list of the target names to be selected.
read_csv_kwargs : dict, default=None
Keyword arguments to pass to `pandas.read_csv`. It allows to overwrite
the default options.
Returns
-------
X : {ndarray, sparse matrix, dataframe}
The data matrix.
y : {ndarray, dataframe, series}
The target.
frame : dataframe or None
A dataframe containing both `X` and `y`. `None` if
`output_array_type != "pandas"`.
categories : list of str or None
The names of the features that are categorical. `None` if
`output_array_type == "pandas"`.
"""
| /usr/src/app/target_test_cases/failed_tests_load_arff_from_gzip_file.txt | def load_arff_from_gzip_file(
gzip_file,
parser,
output_type,
openml_columns_info,
feature_names_to_select,
target_names_to_select,
shape=None,
read_csv_kwargs=None,
):
"""Load a compressed ARFF file using a given parser.
Parameters
----------
gzip_file : GzipFile instance
The file compressed to be read.
parser : {"pandas", "liac-arff"}
The parser used to parse the ARFF file. "pandas" is recommended
but only supports loading dense datasets.
output_type : {"numpy", "sparse", "pandas"}
The type of the arrays that will be returned. The possibilities ara:
- `"numpy"`: both `X` and `y` will be NumPy arrays;
- `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array;
- `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a
pandas Series or DataFrame.
openml_columns_info : dict
The information provided by OpenML regarding the columns of the ARFF
file.
feature_names_to_select : list of str
A list of the feature names to be selected.
target_names_to_select : list of str
A list of the target names to be selected.
read_csv_kwargs : dict, default=None
Keyword arguments to pass to `pandas.read_csv`. It allows to overwrite
the default options.
Returns
-------
X : {ndarray, sparse matrix, dataframe}
The data matrix.
y : {ndarray, dataframe, series}
The target.
frame : dataframe or None
A dataframe containing both `X` and `y`. `None` if
`output_array_type != "pandas"`.
categories : list of str or None
The names of the features that are categorical. `None` if
`output_array_type == "pandas"`.
"""
if parser == "liac-arff":
return _liac_arff_parser(
gzip_file,
output_type,
openml_columns_info,
feature_names_to_select,
target_names_to_select,
shape,
)
elif parser == "pandas":
return _pandas_arff_parser(
gzip_file,
output_type,
openml_columns_info,
feature_names_to_select,
target_names_to_select,
read_csv_kwargs,
)
else:
raise ValueError(
f"Unknown parser: '{parser}'. Should be 'liac-arff' or 'pandas'."
)
| load_arff_from_gzip_file |
scikit-learn | 303 | sklearn/datasets/_base.py | def load_diabetes(*, return_X_y=False, as_frame=False, scaled=True):
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
.. note::
The meaning of each feature (i.e. `feature_names`) might be unclear
(especially for `ltg`) as the documentation of the original dataset is
not explicit. We provide information that seems correct in regard with
the scientific literature in this field of research.
Read more in the :ref:`User Guide <diabetes_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
scaled : bool, default=True
If True, the feature variables are mean centered and scaled by the
standard deviation times the square root of `n_samples`.
If False, raw data is returned for the feature variables.
.. versionadded:: 1.1
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (442, 10)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target: {ndarray, Series} of shape (442,)
The regression target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names: list
The names of the dataset columns.
frame: DataFrame of shape (442, 11)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR: str
The full description of the dataset.
data_filename: str
The path to the location of the data.
target_filename: str
The path to the location of the target.
(data, target) : tuple if ``return_X_y`` is True
Returns a tuple of two ndarray of shape (n_samples, n_features)
A 2D array with each row representing one sample and each column
representing the features and/or target of a given sample.
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> diabetes = load_diabetes()
>>> diabetes.target[:3]
array([151., 75., 141.])
>>> diabetes.data.shape
(442, 10)
"""
| /usr/src/app/target_test_cases/failed_tests_load_diabetes.txt | def load_diabetes(*, return_X_y=False, as_frame=False, scaled=True):
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
.. note::
The meaning of each feature (i.e. `feature_names`) might be unclear
(especially for `ltg`) as the documentation of the original dataset is
not explicit. We provide information that seems correct in regard with
the scientific literature in this field of research.
Read more in the :ref:`User Guide <diabetes_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
scaled : bool, default=True
If True, the feature variables are mean centered and scaled by the
standard deviation times the square root of `n_samples`.
If False, raw data is returned for the feature variables.
.. versionadded:: 1.1
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (442, 10)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target: {ndarray, Series} of shape (442,)
The regression target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names: list
The names of the dataset columns.
frame: DataFrame of shape (442, 11)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR: str
The full description of the dataset.
data_filename: str
The path to the location of the data.
target_filename: str
The path to the location of the target.
(data, target) : tuple if ``return_X_y`` is True
Returns a tuple of two ndarray of shape (n_samples, n_features)
A 2D array with each row representing one sample and each column
representing the features and/or target of a given sample.
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> diabetes = load_diabetes()
>>> diabetes.target[:3]
array([151., 75., 141.])
>>> diabetes.data.shape
(442, 10)
"""
data_filename = "diabetes_data_raw.csv.gz"
target_filename = "diabetes_target.csv.gz"
data = load_gzip_compressed_csv_data(data_filename)
target = load_gzip_compressed_csv_data(target_filename)
if scaled:
data = scale(data, copy=False)
data /= data.shape[0] ** 0.5
fdescr = load_descr("diabetes.rst")
feature_names = ["age", "sex", "bmi", "bp", "s1", "s2", "s3", "s4", "s5", "s6"]
frame = None
target_columns = [
"target",
]
if as_frame:
frame, data, target = _convert_data_dataframe(
"load_diabetes", data, target, feature_names, target_columns
)
if return_X_y:
return data, target
return Bunch(
data=data,
target=target,
frame=frame,
DESCR=fdescr,
feature_names=feature_names,
data_filename=data_filename,
target_filename=target_filename,
data_module=DATA_MODULE,
)
| load_diabetes |
scikit-learn | 304 | sklearn/neural_network/_base.py | def log_loss(y_true, y_prob):
"""Compute Logistic loss for classification.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels.
y_prob : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
Returns
-------
loss : float
The degree to which the samples are correctly predicted.
"""
| /usr/src/app/target_test_cases/failed_tests_log_loss.txt | def log_loss(y_true, y_prob):
"""Compute Logistic loss for classification.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels.
y_prob : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
Returns
-------
loss : float
The degree to which the samples are correctly predicted.
"""
eps = np.finfo(y_prob.dtype).eps
y_prob = np.clip(y_prob, eps, 1 - eps)
if y_prob.shape[1] == 1:
y_prob = np.append(1 - y_prob, y_prob, axis=1)
if y_true.shape[1] == 1:
y_true = np.append(1 - y_true, y_true, axis=1)
return -xlogy(y_true, y_prob).sum() / y_prob.shape[0]
| log_loss |
scikit-learn | 305 | sklearn/datasets/_samples_generator.py | def make_blobs(
n_samples=100,
n_features=2,
*,
centers=None,
cluster_std=1.0,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=None,
return_centers=False,
):
"""Generate isotropic Gaussian blobs for clustering.
For an example of usage, see
:ref:`sphx_glr_auto_examples_datasets_plot_random_dataset.py`.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or array-like, default=100
If int, it is the total number of points equally divided among
clusters.
If array-like, each element of the sequence indicates
the number of samples per cluster.
.. versionchanged:: v0.20
one can now pass an array-like to the ``n_samples`` parameter
n_features : int, default=2
The number of features for each sample.
centers : int or array-like of shape (n_centers, n_features), default=None
The number of centers to generate, or the fixed center locations.
If n_samples is an int and centers is None, 3 centers are generated.
If n_samples is array-like, centers must be
either None or an array of length equal to the length of n_samples.
cluster_std : float or array-like of float, default=1.0
The standard deviation of the clusters.
center_box : tuple of float (min, max), default=(-10.0, 10.0)
The bounding box for each cluster center when centers are
generated at random.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
return_centers : bool, default=False
If True, then return the centers of each cluster.
.. versionadded:: 0.23
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels for cluster membership of each sample.
centers : ndarray of shape (n_centers, n_features)
The centers of each cluster. Only returned if
``return_centers=True``.
See Also
--------
make_classification : A more intricate variant.
Examples
--------
>>> from sklearn.datasets import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
>>> X, y = make_blobs(n_samples=[3, 3, 4], centers=None, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 1, 2, 0, 2, 2, 2, 1, 1, 0])
"""
| /usr/src/app/target_test_cases/failed_tests_make_blobs.txt | def make_blobs(
n_samples=100,
n_features=2,
*,
centers=None,
cluster_std=1.0,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=None,
return_centers=False,
):
"""Generate isotropic Gaussian blobs for clustering.
For an example of usage, see
:ref:`sphx_glr_auto_examples_datasets_plot_random_dataset.py`.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or array-like, default=100
If int, it is the total number of points equally divided among
clusters.
If array-like, each element of the sequence indicates
the number of samples per cluster.
.. versionchanged:: v0.20
one can now pass an array-like to the ``n_samples`` parameter
n_features : int, default=2
The number of features for each sample.
centers : int or array-like of shape (n_centers, n_features), default=None
The number of centers to generate, or the fixed center locations.
If n_samples is an int and centers is None, 3 centers are generated.
If n_samples is array-like, centers must be
either None or an array of length equal to the length of n_samples.
cluster_std : float or array-like of float, default=1.0
The standard deviation of the clusters.
center_box : tuple of float (min, max), default=(-10.0, 10.0)
The bounding box for each cluster center when centers are
generated at random.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
return_centers : bool, default=False
If True, then return the centers of each cluster.
.. versionadded:: 0.23
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels for cluster membership of each sample.
centers : ndarray of shape (n_centers, n_features)
The centers of each cluster. Only returned if
``return_centers=True``.
See Also
--------
make_classification : A more intricate variant.
Examples
--------
>>> from sklearn.datasets import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
>>> X, y = make_blobs(n_samples=[3, 3, 4], centers=None, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 1, 2, 0, 2, 2, 2, 1, 1, 0])
"""
generator = check_random_state(random_state)
if isinstance(n_samples, numbers.Integral):
# Set n_centers by looking at centers arg
if centers is None:
centers = 3
if isinstance(centers, numbers.Integral):
n_centers = centers
centers = generator.uniform(
center_box[0], center_box[1], size=(n_centers, n_features)
)
else:
centers = check_array(centers)
n_features = centers.shape[1]
n_centers = centers.shape[0]
else:
# Set n_centers by looking at [n_samples] arg
n_centers = len(n_samples)
if centers is None:
centers = generator.uniform(
center_box[0], center_box[1], size=(n_centers, n_features)
)
if not isinstance(centers, Iterable):
raise ValueError(
"Parameter `centers` must be array-like. Got {!r} instead".format(
centers
)
)
if len(centers) != n_centers:
raise ValueError(
"Length of `n_samples` not consistent with number of "
f"centers. Got n_samples = {n_samples} and centers = {centers}"
)
centers = check_array(centers)
n_features = centers.shape[1]
# stds: if cluster_std is given as list, it must be consistent
# with the n_centers
if hasattr(cluster_std, "__len__") and len(cluster_std) != n_centers:
raise ValueError(
"Length of `clusters_std` not consistent with "
"number of centers. Got centers = {} "
"and cluster_std = {}".format(centers, cluster_std)
)
if isinstance(cluster_std, numbers.Real):
cluster_std = np.full(len(centers), cluster_std)
if isinstance(n_samples, Iterable):
n_samples_per_center = n_samples
else:
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
cum_sum_n_samples = np.cumsum(n_samples_per_center)
X = np.empty(shape=(sum(n_samples_per_center), n_features), dtype=np.float64)
y = np.empty(shape=(sum(n_samples_per_center),), dtype=int)
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
start_idx = cum_sum_n_samples[i - 1] if i > 0 else 0
end_idx = cum_sum_n_samples[i]
X[start_idx:end_idx] = generator.normal(
loc=centers[i], scale=std, size=(n, n_features)
)
y[start_idx:end_idx] = i
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if return_centers:
return X, y, centers
else:
return X, y
| make_blobs |
scikit-learn | 306 | sklearn/compose/_column_transformer.py | def make_column_transformer(
*transformers,
remainder="drop",
sparse_threshold=0.3,
n_jobs=None,
verbose=False,
verbose_feature_names_out=True,
force_int_remainder_cols=True,
):
"""Construct a ColumnTransformer from the given transformers.
This is a shorthand for the ColumnTransformer constructor; it does not
require, and does not permit, naming the transformers. Instead, they will
be given names automatically based on their types. It also does not allow
weighting with ``transformer_weights``.
Read more in the :ref:`User Guide <make_column_transformer>`.
Parameters
----------
*transformers : tuples
Tuples of the form (transformer, columns) specifying the
transformer objects to be applied to subsets of the data.
transformer : {'drop', 'passthrough'} or estimator
Estimator must support :term:`fit` and :term:`transform`.
Special-cased strings 'drop' and 'passthrough' are accepted as
well, to indicate to drop the columns or to pass them through
untransformed, respectively.
columns : str, array-like of str, int, array-like of int, slice, \
array-like of bool or callable
Indexes the data on its second axis. Integers are interpreted as
positional columns, while strings can reference DataFrame columns
by name. A scalar string or int should be used where
``transformer`` expects X to be a 1d array-like (vector),
otherwise a 2d array will be passed to the transformer.
A callable is passed the input data `X` and can return any of the
above. To select multiple columns by name or dtype, you can use
:obj:`make_column_selector`.
remainder : {'drop', 'passthrough'} or estimator, default='drop'
By default, only the specified columns in `transformers` are
transformed and combined in the output, and the non-specified
columns are dropped. (default of ``'drop'``).
By specifying ``remainder='passthrough'``, all remaining columns that
were not specified in `transformers` will be automatically passed
through. This subset of columns is concatenated with the output of
the transformers.
By setting ``remainder`` to be an estimator, the remaining
non-specified columns will use the ``remainder`` estimator. The
estimator must support :term:`fit` and :term:`transform`.
sparse_threshold : float, default=0.3
If the transformed output consists of a mix of sparse and dense data,
it will be stacked as a sparse matrix if the density is lower than this
value. Use ``sparse_threshold=0`` to always return dense.
When the transformed output consists of all sparse or all dense data,
the stacked result will be sparse or dense, respectively, and this
keyword will be ignored.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : bool, default=False
If True, the time elapsed while fitting each transformer will be
printed as it is completed.
verbose_feature_names_out : bool, default=True
If True, :meth:`ColumnTransformer.get_feature_names_out` will prefix
all feature names with the name of the transformer that generated that
feature.
If False, :meth:`ColumnTransformer.get_feature_names_out` will not
prefix any feature names and will error if feature names are not
unique.
.. versionadded:: 1.0
force_int_remainder_cols : bool, default=True
Force the columns of the last entry of `transformers_`, which
corresponds to the "remainder" transformer, to always be stored as
indices (int) rather than column names (str). See description of the
:attr:`ColumnTransformer.transformers_` attribute for details.
.. note::
If you do not access the list of columns for the remainder columns
in the :attr:`ColumnTransformer.transformers_` fitted attribute,
you do not need to set this parameter.
.. versionadded:: 1.5
.. versionchanged:: 1.7
The default value for `force_int_remainder_cols` will change from
`True` to `False` in version 1.7.
Returns
-------
ct : ColumnTransformer
Returns a :class:`ColumnTransformer` object.
See Also
--------
ColumnTransformer : Class that allows combining the
outputs of multiple transformer objects used on column subsets
of the data into a single feature space.
Examples
--------
>>> from sklearn.preprocessing import StandardScaler, OneHotEncoder
>>> from sklearn.compose import make_column_transformer
>>> make_column_transformer(
... (StandardScaler(), ['numerical_column']),
... (OneHotEncoder(), ['categorical_column']))
ColumnTransformer(transformers=[('standardscaler', StandardScaler(...),
['numerical_column']),
('onehotencoder', OneHotEncoder(...),
['categorical_column'])])
"""
| /usr/src/app/target_test_cases/failed_tests_make_column_transformer.txt | def make_column_transformer(
*transformers,
remainder="drop",
sparse_threshold=0.3,
n_jobs=None,
verbose=False,
verbose_feature_names_out=True,
force_int_remainder_cols=True,
):
"""Construct a ColumnTransformer from the given transformers.
This is a shorthand for the ColumnTransformer constructor; it does not
require, and does not permit, naming the transformers. Instead, they will
be given names automatically based on their types. It also does not allow
weighting with ``transformer_weights``.
Read more in the :ref:`User Guide <make_column_transformer>`.
Parameters
----------
*transformers : tuples
Tuples of the form (transformer, columns) specifying the
transformer objects to be applied to subsets of the data.
transformer : {'drop', 'passthrough'} or estimator
Estimator must support :term:`fit` and :term:`transform`.
Special-cased strings 'drop' and 'passthrough' are accepted as
well, to indicate to drop the columns or to pass them through
untransformed, respectively.
columns : str, array-like of str, int, array-like of int, slice, \
array-like of bool or callable
Indexes the data on its second axis. Integers are interpreted as
positional columns, while strings can reference DataFrame columns
by name. A scalar string or int should be used where
``transformer`` expects X to be a 1d array-like (vector),
otherwise a 2d array will be passed to the transformer.
A callable is passed the input data `X` and can return any of the
above. To select multiple columns by name or dtype, you can use
:obj:`make_column_selector`.
remainder : {'drop', 'passthrough'} or estimator, default='drop'
By default, only the specified columns in `transformers` are
transformed and combined in the output, and the non-specified
columns are dropped. (default of ``'drop'``).
By specifying ``remainder='passthrough'``, all remaining columns that
were not specified in `transformers` will be automatically passed
through. This subset of columns is concatenated with the output of
the transformers.
By setting ``remainder`` to be an estimator, the remaining
non-specified columns will use the ``remainder`` estimator. The
estimator must support :term:`fit` and :term:`transform`.
sparse_threshold : float, default=0.3
If the transformed output consists of a mix of sparse and dense data,
it will be stacked as a sparse matrix if the density is lower than this
value. Use ``sparse_threshold=0`` to always return dense.
When the transformed output consists of all sparse or all dense data,
the stacked result will be sparse or dense, respectively, and this
keyword will be ignored.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : bool, default=False
If True, the time elapsed while fitting each transformer will be
printed as it is completed.
verbose_feature_names_out : bool, default=True
If True, :meth:`ColumnTransformer.get_feature_names_out` will prefix
all feature names with the name of the transformer that generated that
feature.
If False, :meth:`ColumnTransformer.get_feature_names_out` will not
prefix any feature names and will error if feature names are not
unique.
.. versionadded:: 1.0
force_int_remainder_cols : bool, default=True
Force the columns of the last entry of `transformers_`, which
corresponds to the "remainder" transformer, to always be stored as
indices (int) rather than column names (str). See description of the
:attr:`ColumnTransformer.transformers_` attribute for details.
.. note::
If you do not access the list of columns for the remainder columns
in the :attr:`ColumnTransformer.transformers_` fitted attribute,
you do not need to set this parameter.
.. versionadded:: 1.5
.. versionchanged:: 1.7
The default value for `force_int_remainder_cols` will change from
`True` to `False` in version 1.7.
Returns
-------
ct : ColumnTransformer
Returns a :class:`ColumnTransformer` object.
See Also
--------
ColumnTransformer : Class that allows combining the
outputs of multiple transformer objects used on column subsets
of the data into a single feature space.
Examples
--------
>>> from sklearn.preprocessing import StandardScaler, OneHotEncoder
>>> from sklearn.compose import make_column_transformer
>>> make_column_transformer(
... (StandardScaler(), ['numerical_column']),
... (OneHotEncoder(), ['categorical_column']))
ColumnTransformer(transformers=[('standardscaler', StandardScaler(...),
['numerical_column']),
('onehotencoder', OneHotEncoder(...),
['categorical_column'])])
"""
# transformer_weights keyword is not passed through because the user
# would need to know the automatically generated names of the transformers
transformer_list = _get_transformer_list(transformers)
return ColumnTransformer(
transformer_list,
n_jobs=n_jobs,
remainder=remainder,
sparse_threshold=sparse_threshold,
verbose=verbose,
verbose_feature_names_out=verbose_feature_names_out,
force_int_remainder_cols=force_int_remainder_cols,
)
| make_column_transformer |
scikit-learn | 307 | sklearn/linear_model/_base.py | def make_dataset(X, y, sample_weight, random_state=None):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data
y : array-like, shape (n_samples, )
Target values.
sample_weight : numpy array of shape (n_samples,)
The weight of each sample
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset random sampling. It is not
used for dataset shuffling.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
dataset
The ``Dataset`` abstraction
intercept_decay
The intercept decay
"""
| /usr/src/app/target_test_cases/failed_tests_make_dataset.txt | def make_dataset(X, y, sample_weight, random_state=None):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data
y : array-like, shape (n_samples, )
Target values.
sample_weight : numpy array of shape (n_samples,)
The weight of each sample
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset random sampling. It is not
used for dataset shuffling.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
dataset
The ``Dataset`` abstraction
intercept_decay
The intercept decay
"""
rng = check_random_state(random_state)
# seed should never be 0 in SequentialDataset64
seed = rng.randint(1, np.iinfo(np.int32).max)
if X.dtype == np.float32:
CSRData = CSRDataset32
ArrayData = ArrayDataset32
else:
CSRData = CSRDataset64
ArrayData = ArrayDataset64
if sp.issparse(X):
dataset = CSRData(X.data, X.indptr, X.indices, y, sample_weight, seed=seed)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
X = np.ascontiguousarray(X)
dataset = ArrayData(X, y, sample_weight, seed=seed)
intercept_decay = 1.0
return dataset, intercept_decay
| make_dataset |
scikit-learn | 308 | sklearn/utils/sparsefuncs.py | def min_max_axis(X, axis, ignore_nan=False):
"""Compute minimum and maximum along an axis on a CSR or CSC matrix.
Optionally ignore NaN values.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Input data. It should be of CSR or CSC format.
axis : {0, 1}
Axis along which the axis should be computed.
ignore_nan : bool, default=False
Ignore or passing through NaN values.
.. versionadded:: 0.20
Returns
-------
mins : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Feature-wise minima.
maxs : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Feature-wise maxima.
"""
| /usr/src/app/target_test_cases/failed_tests_min_max_axis.txt | def min_max_axis(X, axis, ignore_nan=False):
"""Compute minimum and maximum along an axis on a CSR or CSC matrix.
Optionally ignore NaN values.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Input data. It should be of CSR or CSC format.
axis : {0, 1}
Axis along which the axis should be computed.
ignore_nan : bool, default=False
Ignore or passing through NaN values.
.. versionadded:: 0.20
Returns
-------
mins : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Feature-wise minima.
maxs : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Feature-wise maxima.
"""
if sp.issparse(X) and X.format in ("csr", "csc"):
if ignore_nan:
return _sparse_nan_min_max(X, axis=axis)
else:
return _sparse_min_max(X, axis=axis)
else:
_raise_typeerror(X)
| min_max_axis |
scikit-learn | 309 | sklearn/utils/extmath.py | def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X : array-like of float of shape (M, N)
Argument to the logistic function.
copy : bool, default=True
Copy X or not.
Returns
-------
out : ndarray of shape (M, N)
Softmax function evaluated at every point in x.
"""
| /usr/src/app/target_test_cases/failed_tests_softmax.txt | def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X : array-like of float of shape (M, N)
Argument to the logistic function.
copy : bool, default=True
Copy X or not.
Returns
-------
out : ndarray of shape (M, N)
Softmax function evaluated at every point in x.
"""
xp, is_array_api_compliant = get_namespace(X)
if copy:
X = xp.asarray(X, copy=True)
max_prob = xp.reshape(xp.max(X, axis=1), (-1, 1))
X -= max_prob
if _is_numpy_namespace(xp):
# optimization for NumPy arrays
np.exp(X, out=np.asarray(X))
else:
# array_api does not have `out=`
X = xp.exp(X)
sum_prob = xp.reshape(xp.sum(X, axis=1), (-1, 1))
X /= sum_prob
return X
| softmax |
scikit-learn | 310 | sklearn/metrics/_pairwise_distances_reduction/_dispatcher.py | def sqeuclidean_row_norms(X, num_threads):
"""Compute the squared euclidean norm of the rows of X in parallel.
Parameters
----------
X : ndarray or CSR matrix of shape (n_samples, n_features)
Input data. Must be c-contiguous.
num_threads : int
The number of OpenMP threads to use.
Returns
-------
sqeuclidean_row_norms : ndarray of shape (n_samples,)
Arrays containing the squared euclidean norm of each row of X.
"""
| /usr/src/app/target_test_cases/failed_tests_sqeuclidean_row_norms.txt | def sqeuclidean_row_norms(X, num_threads):
"""Compute the squared euclidean norm of the rows of X in parallel.
Parameters
----------
X : ndarray or CSR matrix of shape (n_samples, n_features)
Input data. Must be c-contiguous.
num_threads : int
The number of OpenMP threads to use.
Returns
-------
sqeuclidean_row_norms : ndarray of shape (n_samples,)
Arrays containing the squared euclidean norm of each row of X.
"""
if X.dtype == np.float64:
return np.asarray(_sqeuclidean_row_norms64(X, num_threads))
if X.dtype == np.float32:
return np.asarray(_sqeuclidean_row_norms32(X, num_threads))
raise ValueError(
"Only float64 or float32 datasets are supported at this time, "
f"got: X.dtype={X.dtype}."
)
| sqeuclidean_row_norms |
scikit-learn | 311 | sklearn/utils/extmath.py | def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):
"""Use high precision for cumsum and check that final value matches sum.
Warns if the final cumulative sum does not match the sum (up to the chosen
tolerance).
Parameters
----------
arr : array-like
To be cumulatively summed as flat.
axis : int, default=None
Axis along which the cumulative sum is computed.
The default (None) is to compute the cumsum over the flattened array.
rtol : float, default=1e-05
Relative tolerance, see ``np.allclose``.
atol : float, default=1e-08
Absolute tolerance, see ``np.allclose``.
Returns
-------
out : ndarray
Array with the cumulative sums along the chosen axis.
"""
| /usr/src/app/target_test_cases/failed_tests_stable_cumsum.txt | def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):
"""Use high precision for cumsum and check that final value matches sum.
Warns if the final cumulative sum does not match the sum (up to the chosen
tolerance).
Parameters
----------
arr : array-like
To be cumulatively summed as flat.
axis : int, default=None
Axis along which the cumulative sum is computed.
The default (None) is to compute the cumsum over the flattened array.
rtol : float, default=1e-05
Relative tolerance, see ``np.allclose``.
atol : float, default=1e-08
Absolute tolerance, see ``np.allclose``.
Returns
-------
out : ndarray
Array with the cumulative sums along the chosen axis.
"""
out = np.cumsum(arr, axis=axis, dtype=np.float64)
expected = np.sum(arr, axis=axis, dtype=np.float64)
if not np.allclose(
out.take(-1, axis=axis), expected, rtol=rtol, atol=atol, equal_nan=True
):
warnings.warn(
(
"cumsum was found to be unstable: "
"its last element does not correspond to sum"
),
RuntimeWarning,
)
return out
| stable_cumsum |
scikit-learn | 312 | sklearn/utils/extmath.py | def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
If u_based_decision is False, then the same sign correction is applied to
so that the rows in v that are largest in absolute value are always
positive.
Parameters
----------
u : ndarray
Parameters u and v are the output of `linalg.svd` or
:func:`~sklearn.utils.extmath.randomized_svd`, with matching inner
dimensions so one can compute `np.dot(u * s, v)`.
u can be None if `u_based_decision` is False.
v : ndarray
Parameters u and v are the output of `linalg.svd` or
:func:`~sklearn.utils.extmath.randomized_svd`, with matching inner
dimensions so one can compute `np.dot(u * s, v)`. The input v should
really be called vt to be consistent with scipy's output.
v can be None if `u_based_decision` is True.
u_based_decision : bool, default=True
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted : ndarray
Array u with adjusted columns and the same dimensions as u.
v_adjusted : ndarray
Array v with adjusted rows and the same dimensions as v.
"""
| /usr/src/app/target_test_cases/failed_tests_svd_flip.txt | def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
If u_based_decision is False, then the same sign correction is applied to
so that the rows in v that are largest in absolute value are always
positive.
Parameters
----------
u : ndarray
Parameters u and v are the output of `linalg.svd` or
:func:`~sklearn.utils.extmath.randomized_svd`, with matching inner
dimensions so one can compute `np.dot(u * s, v)`.
u can be None if `u_based_decision` is False.
v : ndarray
Parameters u and v are the output of `linalg.svd` or
:func:`~sklearn.utils.extmath.randomized_svd`, with matching inner
dimensions so one can compute `np.dot(u * s, v)`. The input v should
really be called vt to be consistent with scipy's output.
v can be None if `u_based_decision` is True.
u_based_decision : bool, default=True
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted : ndarray
Array u with adjusted columns and the same dimensions as u.
v_adjusted : ndarray
Array v with adjusted rows and the same dimensions as v.
"""
xp, _ = get_namespace(*[a for a in [u, v] if a is not None])
if u_based_decision:
# columns of u, rows of v, or equivalently rows of u.T and v
max_abs_u_cols = xp.argmax(xp.abs(u.T), axis=1)
shift = xp.arange(u.T.shape[0], device=device(u))
indices = max_abs_u_cols + shift * u.T.shape[1]
signs = xp.sign(xp.take(xp.reshape(u.T, (-1,)), indices, axis=0))
u *= signs[np.newaxis, :]
if v is not None:
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_v_rows = xp.argmax(xp.abs(v), axis=1)
shift = xp.arange(v.shape[0], device=device(v))
indices = max_abs_v_rows + shift * v.shape[1]
signs = xp.sign(xp.take(xp.reshape(v, (-1,)), indices, axis=0))
if u is not None:
u *= signs[np.newaxis, :]
v *= signs[:, np.newaxis]
return u, v
| svd_flip |
scikit-learn | 313 | sklearn/utils/multiclass.py | def type_of_target(y, input_name=""):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with ``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : {array-like, sparse matrix}
Target values. If a sparse matrix, `y` is expected to be a
CSR/CSC matrix.
input_name : str, default=""
The data name used to construct the error message.
.. versionadded:: 1.1.0
Returns
-------
target_type : str
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> from sklearn.utils.multiclass import type_of_target
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multilabel-indicator'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
| /usr/src/app/target_test_cases/failed_tests_type_of_target.txt | def type_of_target(y, input_name=""):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with ``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : {array-like, sparse matrix}
Target values. If a sparse matrix, `y` is expected to be a
CSR/CSC matrix.
input_name : str, default=""
The data name used to construct the error message.
.. versionadded:: 1.1.0
Returns
-------
target_type : str
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> from sklearn.utils.multiclass import type_of_target
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multilabel-indicator'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
xp, is_array_api_compliant = get_namespace(y)
valid = (
(isinstance(y, Sequence) or issparse(y) or hasattr(y, "__array__"))
and not isinstance(y, str)
or is_array_api_compliant
)
if not valid:
raise ValueError(
"Expected array-like (array or non-string sequence), got %r" % y
)
sparse_pandas = y.__class__.__name__ in ["SparseSeries", "SparseArray"]
if sparse_pandas:
raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'")
if is_multilabel(y):
return "multilabel-indicator"
# DeprecationWarning will be replaced by ValueError, see NEP 34
# https://numpy.org/neps/nep-0034-infer-dtype-is-object.html
# We therefore catch both deprecation (NumPy < 1.24) warning and
# value error (NumPy >= 1.24).
check_y_kwargs = dict(
accept_sparse=True,
allow_nd=True,
ensure_all_finite=False,
ensure_2d=False,
ensure_min_samples=0,
ensure_min_features=0,
)
with warnings.catch_warnings():
warnings.simplefilter("error", VisibleDeprecationWarning)
if not issparse(y):
try:
y = check_array(y, dtype=None, **check_y_kwargs)
except (VisibleDeprecationWarning, ValueError) as e:
if str(e).startswith("Complex data not supported"):
raise
# dtype=object should be provided explicitly for ragged arrays,
# see NEP 34
y = check_array(y, dtype=object, **check_y_kwargs)
try:
# TODO(1.7): Change to ValueError when byte labels is deprecated.
# labels in bytes format
first_row_or_val = y[[0], :] if issparse(y) else y[0]
if isinstance(first_row_or_val, bytes):
warnings.warn(
(
"Support for labels represented as bytes is deprecated in v1.5 and"
" will error in v1.7. Convert the labels to a string or integer"
" format."
),
FutureWarning,
)
# The old sequence of sequences format
if (
not hasattr(first_row_or_val, "__array__")
and isinstance(first_row_or_val, Sequence)
and not isinstance(first_row_or_val, str)
):
raise ValueError(
"You appear to be using a legacy multi-label data"
" representation. Sequence of sequences are no"
" longer supported; use a binary array or sparse"
" matrix instead - the MultiLabelBinarizer"
" transformer can convert to this format."
)
except IndexError:
pass
# Invalid inputs
if y.ndim not in (1, 2):
# Number of dimension greater than 2: [[[1, 2]]]
return "unknown"
if not min(y.shape):
# Empty ndarray: []/[[]]
if y.ndim == 1:
# 1-D empty array: []
return "binary" # []
# 2-D empty array: [[]]
return "unknown"
if not issparse(y) and y.dtype == object and not isinstance(y.flat[0], str):
# [obj_1] and not ["label_1"]
return "unknown"
# Check if multioutput
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# Check float and contains non-integer float values
if xp.isdtype(y.dtype, "real floating"):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
data = y.data if issparse(y) else y
if xp.any(data != xp.astype(data, int)):
_assert_all_finite(data, input_name=input_name)
return "continuous" + suffix
# Check multiclass
if issparse(first_row_or_val):
first_row_or_val = first_row_or_val.data
if cached_unique(y).shape[0] > 2 or (y.ndim == 2 and len(first_row_or_val) > 1):
# [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
return "multiclass" + suffix
else:
return "binary" # [1, 2] or [["a"], ["b"]]
| type_of_target |
astropy | 0 | astropy/modeling/physical_models.py | def evaluate(self, x, temperature, scale):
"""Evaluate the model.
Parameters
----------
x : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['frequency']
Frequency at which to compute the blackbody. If no units are given,
this defaults to Hz (or AA if `scale` was initialized with units
equivalent to erg / (cm ** 2 * s * AA * sr)).
temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Temperature of the blackbody. If no units are given, this defaults
to Kelvin.
scale : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['dimensionless']
Desired scale for the blackbody.
Returns
-------
y : number or ndarray
Blackbody spectrum. The units are determined from the units of
``scale``.
.. note::
Use `numpy.errstate` to suppress Numpy warnings, if desired.
.. warning::
Output values might contain ``nan`` and ``inf``.
Raises
------
ValueError
Invalid temperature.
ZeroDivisionError
Wavelength is zero (when converting to frequency).
"""
| /usr/src/app/target_test_cases/failed_tests_BlackBody.evaluate.txt | def evaluate(self, x, temperature, scale):
"""Evaluate the model.
Parameters
----------
x : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['frequency']
Frequency at which to compute the blackbody. If no units are given,
this defaults to Hz (or AA if `scale` was initialized with units
equivalent to erg / (cm ** 2 * s * AA * sr)).
temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Temperature of the blackbody. If no units are given, this defaults
to Kelvin.
scale : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['dimensionless']
Desired scale for the blackbody.
Returns
-------
y : number or ndarray
Blackbody spectrum. The units are determined from the units of
``scale``.
.. note::
Use `numpy.errstate` to suppress Numpy warnings, if desired.
.. warning::
Output values might contain ``nan`` and ``inf``.
Raises
------
ValueError
Invalid temperature.
ZeroDivisionError
Wavelength is zero (when converting to frequency).
"""
if not isinstance(temperature, u.Quantity):
in_temp = u.Quantity(temperature, u.K)
else:
in_temp = temperature
if not isinstance(x, u.Quantity):
# then we assume it has input_units which depends on the
# requested output units (either Hz or AA)
in_x = u.Quantity(x, self.input_units["x"])
else:
in_x = x
# Convert to units for calculations, also force double precision
with u.add_enabled_equivalencies(u.spectral() + u.temperature()):
freq = u.Quantity(in_x, u.Hz, dtype=np.float64)
temp = u.Quantity(in_temp, u.K)
# Check if input values are physically possible
if np.any(temp < 0):
raise ValueError(f"Temperature should be positive: {temp}")
if not np.all(np.isfinite(freq)) or np.any(freq <= 0):
warnings.warn(
"Input contains invalid wavelength/frequency value(s)",
AstropyUserWarning,
)
log_boltz = const.h * freq / (const.k_B * temp)
boltzm1 = np.expm1(log_boltz)
# Calculate blackbody flux
bb_nu = 2.0 * const.h * freq**3 / (const.c**2 * boltzm1) / u.sr
if self.scale.unit is not None:
# Will be dimensionless at this point, but may not be dimensionless_unscaled
if not hasattr(scale, "unit"):
# during fitting, scale will be passed without units
# but we still need to convert from the input dimensionless
# to dimensionless unscaled
scale = scale * self.scale.unit
scale = scale.to(u.dimensionless_unscaled).value
# NOTE: scale is already stripped of any input units
y = scale * bb_nu.to(self._output_units, u.spectral_density(freq))
# If the temperature parameter has no unit, we should return a unitless
# value. This occurs for instance during fitting, since we drop the
# units temporarily.
if hasattr(temperature, "unit"):
return y
return y.value
| BlackBody.evaluate |
astropy | 1 | astropy/timeseries/periodograms/bls/core.py | def autoperiod(
self,
duration,
minimum_period=None,
maximum_period=None,
minimum_n_transit=3,
frequency_factor=1.0,
):
"""Determine a suitable grid of periods.
This method uses a set of heuristics to select a conservative period
grid that is uniform in frequency. This grid might be too fine for
some user's needs depending on the precision requirements or the
sampling of the data. The grid can be made coarser by increasing
``frequency_factor``.
Parameters
----------
duration : float, array-like, or `~astropy.units.Quantity` ['time']
The set of durations that will be considered.
minimum_period, maximum_period : float or `~astropy.units.Quantity` ['time'], optional
The minimum/maximum periods to search. If not provided, these will
be computed as described in the notes below.
minimum_n_transit : int, optional
If ``maximum_period`` is not provided, this is used to compute the
maximum period to search by asserting that any systems with at
least ``minimum_n_transits`` will be within the range of searched
periods. Note that this is not the same as requiring that
``minimum_n_transits`` be required for detection. The default
value is ``3``.
frequency_factor : float, optional
A factor to control the frequency spacing as described in the
notes below. The default value is ``1.0``.
Returns
-------
period : array-like or `~astropy.units.Quantity` ['time']
The set of periods computed using these heuristics with the same
units as ``t``.
Notes
-----
The default minimum period is chosen to be twice the maximum duration
because there won't be much sensitivity to periods shorter than that.
The default maximum period is computed as
.. code-block:: python
maximum_period = (max(t) - min(t)) / minimum_n_transits
ensuring that any systems with at least ``minimum_n_transits`` are
within the range of searched periods.
The frequency spacing is given by
.. code-block:: python
df = frequency_factor * min(duration) / (max(t) - min(t))**2
so the grid can be made finer by decreasing ``frequency_factor`` or
coarser by increasing ``frequency_factor``.
"""
| /usr/src/app/target_test_cases/failed_tests_BoxLeastSquares.autoperiod.txt | def autoperiod(
self,
duration,
minimum_period=None,
maximum_period=None,
minimum_n_transit=3,
frequency_factor=1.0,
):
"""Determine a suitable grid of periods.
This method uses a set of heuristics to select a conservative period
grid that is uniform in frequency. This grid might be too fine for
some user's needs depending on the precision requirements or the
sampling of the data. The grid can be made coarser by increasing
``frequency_factor``.
Parameters
----------
duration : float, array-like, or `~astropy.units.Quantity` ['time']
The set of durations that will be considered.
minimum_period, maximum_period : float or `~astropy.units.Quantity` ['time'], optional
The minimum/maximum periods to search. If not provided, these will
be computed as described in the notes below.
minimum_n_transit : int, optional
If ``maximum_period`` is not provided, this is used to compute the
maximum period to search by asserting that any systems with at
least ``minimum_n_transits`` will be within the range of searched
periods. Note that this is not the same as requiring that
``minimum_n_transits`` be required for detection. The default
value is ``3``.
frequency_factor : float, optional
A factor to control the frequency spacing as described in the
notes below. The default value is ``1.0``.
Returns
-------
period : array-like or `~astropy.units.Quantity` ['time']
The set of periods computed using these heuristics with the same
units as ``t``.
Notes
-----
The default minimum period is chosen to be twice the maximum duration
because there won't be much sensitivity to periods shorter than that.
The default maximum period is computed as
.. code-block:: python
maximum_period = (max(t) - min(t)) / minimum_n_transits
ensuring that any systems with at least ``minimum_n_transits`` are
within the range of searched periods.
The frequency spacing is given by
.. code-block:: python
df = frequency_factor * min(duration) / (max(t) - min(t))**2
so the grid can be made finer by decreasing ``frequency_factor`` or
coarser by increasing ``frequency_factor``.
"""
duration = self._validate_duration(duration)
baseline = strip_units(self._trel.max() - self._trel.min())
min_duration = strip_units(np.min(duration))
# Estimate the required frequency spacing
# Because of the sparsity of a transit, this must be much finer than
# the frequency resolution for a sinusoidal fit. For a sinusoidal fit,
# df would be 1/baseline (see LombScargle), but here this should be
# scaled proportionally to the duration in units of baseline.
df = frequency_factor * min_duration / baseline**2
# If a minimum period is not provided, choose one that is twice the
# maximum duration because we won't be sensitive to any periods
# shorter than that.
if minimum_period is None:
minimum_period = 2.0 * strip_units(np.max(duration))
else:
minimum_period = validate_unit_consistency(self._trel, minimum_period)
minimum_period = strip_units(minimum_period)
# If no maximum period is provided, choose one by requiring that
# all signals with at least minimum_n_transit should be detectable.
if maximum_period is None:
if minimum_n_transit <= 1:
raise ValueError("minimum_n_transit must be greater than 1")
maximum_period = baseline / (minimum_n_transit - 1)
else:
maximum_period = validate_unit_consistency(self._trel, maximum_period)
maximum_period = strip_units(maximum_period)
if maximum_period < minimum_period:
minimum_period, maximum_period = maximum_period, minimum_period
if minimum_period <= 0.0:
raise ValueError("minimum_period must be positive")
# Convert bounds to frequency
minimum_frequency = 1.0 / strip_units(maximum_period)
maximum_frequency = 1.0 / strip_units(minimum_period)
# Compute the number of frequencies and the frequency grid
nf = 1 + int(np.round((maximum_frequency - minimum_frequency) / df))
return 1.0 / (maximum_frequency - df * np.arange(nf)) * self._t_unit()
| BoxLeastSquares.autoperiod |
astropy | 2 | astropy/timeseries/periodograms/bls/core.py | def compute_stats(self, period, duration, transit_time):
"""Compute descriptive statistics for a given transit model.
These statistics are commonly used for vetting of transit candidates.
Parameters
----------
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
stats : dict
A dictionary containing several descriptive statistics:
- ``depth``: The depth and uncertainty (as a tuple with two
values) on the depth for the fiducial model.
- ``depth_odd``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period.
- ``depth_even``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period and the
phase is offset by one orbital period.
- ``depth_half``: The depth and uncertainty for a model with a
period of half the fiducial period.
- ``depth_phased``: The depth and uncertainty for a model with the
fiducial period and the phase offset by half a period.
- ``harmonic_amplitude``: The amplitude of the best fit sinusoidal
model.
- ``harmonic_delta_log_likelihood``: The difference in log
likelihood between a sinusoidal model and the transit model.
If ``harmonic_delta_log_likelihood`` is greater than zero, the
sinusoidal model is preferred.
- ``transit_times``: The mid-transit time for each transit in the
baseline.
- ``per_transit_count``: An array with a count of the number of
data points in each unique transit included in the baseline.
- ``per_transit_log_likelihood``: An array with the value of the
log likelihood for each unique transit included in the
baseline.
"""
| /usr/src/app/target_test_cases/failed_tests_BoxLeastSquares.compute_stats.txt | def compute_stats(self, period, duration, transit_time):
"""Compute descriptive statistics for a given transit model.
These statistics are commonly used for vetting of transit candidates.
Parameters
----------
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
stats : dict
A dictionary containing several descriptive statistics:
- ``depth``: The depth and uncertainty (as a tuple with two
values) on the depth for the fiducial model.
- ``depth_odd``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period.
- ``depth_even``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period and the
phase is offset by one orbital period.
- ``depth_half``: The depth and uncertainty for a model with a
period of half the fiducial period.
- ``depth_phased``: The depth and uncertainty for a model with the
fiducial period and the phase offset by half a period.
- ``harmonic_amplitude``: The amplitude of the best fit sinusoidal
model.
- ``harmonic_delta_log_likelihood``: The difference in log
likelihood between a sinusoidal model and the transit model.
If ``harmonic_delta_log_likelihood`` is greater than zero, the
sinusoidal model is preferred.
- ``transit_times``: The mid-transit time for each transit in the
baseline.
- ``per_transit_count``: An array with a count of the number of
data points in each unique transit included in the baseline.
- ``per_transit_log_likelihood``: An array with the value of the
log likelihood for each unique transit included in the
baseline.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time("transit_time", transit_time)
period = float(strip_units(period[0]))
duration = float(strip_units(duration[0]))
transit_time = float(strip_units(transit_time))
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = (
1.0 / np.ascontiguousarray(strip_units(self.dy), dtype=np.float64) ** 2
)
# This a helper function that will compute the depth for several
# different hypothesized transit models with different parameters
def _compute_depth(m, y_out=None, var_out=None):
if np.any(m) and (var_out is None or np.isfinite(var_out)):
var_m = 1.0 / np.sum(ivar[m])
y_m = np.sum(y[m] * ivar[m]) * var_m
if y_out is None:
return y_m, var_m
return y_out - y_m, np.sqrt(var_m + var_out)
return 0.0, np.inf
# Compute the depth of the fiducial model and the two models at twice
# the period
hp = 0.5 * period
m_in = np.abs((t - transit_time + hp) % period - hp) < 0.5 * duration
m_out = ~m_in
m_odd = np.abs((t - transit_time) % (2 * period) - period) < 0.5 * duration
m_even = (
np.abs((t - transit_time + period) % (2 * period) - period) < 0.5 * duration
)
y_out, var_out = _compute_depth(m_out)
depth = _compute_depth(m_in, y_out, var_out)
depth_odd = _compute_depth(m_odd, y_out, var_out)
depth_even = _compute_depth(m_even, y_out, var_out)
y_in = y_out - depth[0]
# Compute the depth of the model at a phase of 0.5*period
m_phase = np.abs((t - transit_time) % period - hp) < 0.5 * duration
depth_phase = _compute_depth(m_phase, *_compute_depth((~m_phase) & m_out))
# Compute the depth of a model with a period of 0.5*period
m_half = (
np.abs((t - transit_time + 0.25 * period) % (0.5 * period) - 0.25 * period)
< 0.5 * duration
)
depth_half = _compute_depth(m_half, *_compute_depth(~m_half))
# Compute the number of points in each transit
transit_id = np.round((t[m_in] - transit_time) / period).astype(int)
transit_times = (
period * np.arange(transit_id.min(), transit_id.max() + 1) + transit_time
)
unique_ids, unique_counts = np.unique(transit_id, return_counts=True)
unique_ids -= np.min(transit_id)
transit_id -= np.min(transit_id)
counts = np.zeros(np.max(transit_id) + 1, dtype=int)
counts[unique_ids] = unique_counts
# Compute the per-transit log likelihood
ll = -0.5 * ivar[m_in] * ((y[m_in] - y_in) ** 2 - (y[m_in] - y_out) ** 2)
lls = np.zeros(len(counts))
for i in unique_ids:
lls[i] = np.sum(ll[transit_id == i])
full_ll = -0.5 * np.sum(ivar[m_in] * (y[m_in] - y_in) ** 2)
full_ll -= 0.5 * np.sum(ivar[m_out] * (y[m_out] - y_out) ** 2)
# Compute the log likelihood of a sine model
A = np.vstack(
(
np.sin(2 * np.pi * t / period),
np.cos(2 * np.pi * t / period),
np.ones_like(t),
)
).T
w = np.linalg.solve(np.dot(A.T, A * ivar[:, None]), np.dot(A.T, y * ivar))
mod = np.dot(A, w)
sin_ll = -0.5 * np.sum((y - mod) ** 2 * ivar)
# Format the results
y_unit = self._y_unit()
ll_unit = 1
if self.dy is None:
ll_unit = y_unit * y_unit
return dict(
transit_times=self._as_absolute_time_if_needed(
"transit_times", transit_times * self._t_unit()
),
per_transit_count=counts,
per_transit_log_likelihood=lls * ll_unit,
depth=(depth[0] * y_unit, depth[1] * y_unit),
depth_phased=(depth_phase[0] * y_unit, depth_phase[1] * y_unit),
depth_half=(depth_half[0] * y_unit, depth_half[1] * y_unit),
depth_odd=(depth_odd[0] * y_unit, depth_odd[1] * y_unit),
depth_even=(depth_even[0] * y_unit, depth_even[1] * y_unit),
harmonic_amplitude=np.sqrt(np.sum(w[:2] ** 2)) * y_unit,
harmonic_delta_log_likelihood=(sin_ll - full_ll) * ll_unit,
)
| BoxLeastSquares.compute_stats |
astropy | 3 | astropy/timeseries/periodograms/bls/core.py | def model(self, t_model, period, duration, transit_time):
"""Compute the transit model at the given period, duration, and phase.
Parameters
----------
t_model : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time`
Times at which to compute the model.
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
y_model : array-like or `~astropy.units.Quantity`
The model evaluated at the times ``t_model`` with units of ``y``.
"""
| /usr/src/app/target_test_cases/failed_tests_BoxLeastSquares.model.txt | def model(self, t_model, period, duration, transit_time):
"""Compute the transit model at the given period, duration, and phase.
Parameters
----------
t_model : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time`
Times at which to compute the model.
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
y_model : array-like or `~astropy.units.Quantity`
The model evaluated at the times ``t_model`` with units of ``y``.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time("transit_time", transit_time)
t_model = strip_units(self._as_relative_time("t_model", t_model))
period = float(strip_units(period[0]))
duration = float(strip_units(duration[0]))
transit_time = float(strip_units(transit_time))
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = (
1.0 / np.ascontiguousarray(strip_units(self.dy), dtype=np.float64) ** 2
)
# Compute the depth
hp = 0.5 * period
m_in = np.abs((t - transit_time + hp) % period - hp) < 0.5 * duration
m_out = ~m_in
y_in = np.sum(y[m_in] * ivar[m_in]) / np.sum(ivar[m_in])
y_out = np.sum(y[m_out] * ivar[m_out]) / np.sum(ivar[m_out])
# Evaluate the model
y_model = y_out + np.zeros_like(t_model)
m_model = np.abs((t_model - transit_time + hp) % period - hp) < 0.5 * duration
y_model[m_model] = y_in
return y_model * self._y_unit()
| BoxLeastSquares.model |
astropy | 4 | astropy/timeseries/periodograms/bls/core.py | def power(self, period, duration, objective=None, method=None, oversample=10):
"""Compute the periodogram for a set of periods.
Parameters
----------
period : array-like or `~astropy.units.Quantity` ['time']
The periods where the power should be computed
duration : float, array-like, or `~astropy.units.Quantity` ['time']
The set of durations to test
objective : {'likelihood', 'snr'}, optional
The scalar that should be optimized to find the best fit phase,
duration, and depth. This can be either ``'likelihood'`` (default)
to optimize the log-likelihood of the model, or ``'snr'`` to
optimize the signal-to-noise with which the transit depth is
measured.
method : {'fast', 'slow'}, optional
The computational method used to compute the periodogram. This is
mainly included for the purposes of testing and most users will
want to use the optimized ``'fast'`` method (default) that is
implemented in Cython. ``'slow'`` is a brute-force method that is
used to test the results of the ``'fast'`` method.
oversample : int, optional
The number of bins per duration that should be used. This sets the
time resolution of the phase fit with larger values of
``oversample`` yielding a finer grid and higher computational cost.
Returns
-------
results : BoxLeastSquaresResults
The periodogram results as a :class:`BoxLeastSquaresResults`
object.
Raises
------
ValueError
If ``oversample`` is not an integer greater than 0 or if
``objective`` or ``method`` are not valid.
"""
| /usr/src/app/target_test_cases/failed_tests_BoxLeastSquares.power.txt | def power(self, period, duration, objective=None, method=None, oversample=10):
"""Compute the periodogram for a set of periods.
Parameters
----------
period : array-like or `~astropy.units.Quantity` ['time']
The periods where the power should be computed
duration : float, array-like, or `~astropy.units.Quantity` ['time']
The set of durations to test
objective : {'likelihood', 'snr'}, optional
The scalar that should be optimized to find the best fit phase,
duration, and depth. This can be either ``'likelihood'`` (default)
to optimize the log-likelihood of the model, or ``'snr'`` to
optimize the signal-to-noise with which the transit depth is
measured.
method : {'fast', 'slow'}, optional
The computational method used to compute the periodogram. This is
mainly included for the purposes of testing and most users will
want to use the optimized ``'fast'`` method (default) that is
implemented in Cython. ``'slow'`` is a brute-force method that is
used to test the results of the ``'fast'`` method.
oversample : int, optional
The number of bins per duration that should be used. This sets the
time resolution of the phase fit with larger values of
``oversample`` yielding a finer grid and higher computational cost.
Returns
-------
results : BoxLeastSquaresResults
The periodogram results as a :class:`BoxLeastSquaresResults`
object.
Raises
------
ValueError
If ``oversample`` is not an integer greater than 0 or if
``objective`` or ``method`` are not valid.
"""
period, duration = self._validate_period_and_duration(period, duration)
# Check for absurdities in the ``oversample`` choice
try:
oversample = int(oversample)
except TypeError:
raise ValueError(f"oversample must be an int, got {oversample}")
if oversample < 1:
raise ValueError("oversample must be greater than or equal to 1")
# Select the periodogram objective
if objective is None:
objective = "likelihood"
allowed_objectives = ["snr", "likelihood"]
if objective not in allowed_objectives:
raise ValueError(
f"Unrecognized method '{objective}'\n"
f"allowed methods are: {allowed_objectives}"
)
use_likelihood = objective == "likelihood"
# Select the computational method
if method is None:
method = "fast"
allowed_methods = ["fast", "slow"]
if method not in allowed_methods:
raise ValueError(
f"Unrecognized method '{method}'\n"
f"allowed methods are: {allowed_methods}"
)
# Format and check the input arrays
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
t_ref = np.min(t)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = (
1.0 / np.ascontiguousarray(strip_units(self.dy), dtype=np.float64) ** 2
)
# Make sure that the period and duration arrays are C-order
period_fmt = np.ascontiguousarray(strip_units(period), dtype=np.float64)
duration = np.ascontiguousarray(strip_units(duration), dtype=np.float64)
# Select the correct implementation for the chosen method
if method == "fast":
bls = methods.bls_fast
else:
bls = methods.bls_slow
# Run the implementation
results = bls(
t - t_ref,
y - np.median(y),
ivar,
period_fmt,
duration,
oversample,
use_likelihood,
)
return self._format_results(t_ref, objective, period, results)
| BoxLeastSquares.power |
astropy | 5 | astropy/timeseries/periodograms/bls/core.py | def transit_mask(self, t, period, duration, transit_time):
"""Compute which data points are in transit for a given parameter set.
Parameters
----------
t : array-like or `~astropy.units.Quantity` ['time']
Times where the mask should be evaluated.
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
transit_mask : array-like
A boolean array where ``True`` indicates and in transit point and
``False`` indicates and out-of-transit point.
"""
| /usr/src/app/target_test_cases/failed_tests_BoxLeastSquares.transit_mask.txt | def transit_mask(self, t, period, duration, transit_time):
"""Compute which data points are in transit for a given parameter set.
Parameters
----------
t : array-like or `~astropy.units.Quantity` ['time']
Times where the mask should be evaluated.
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
transit_mask : array-like
A boolean array where ``True`` indicates and in transit point and
``False`` indicates and out-of-transit point.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time("transit_time", transit_time)
t = strip_units(self._as_relative_time("t", t))
period = float(strip_units(period[0]))
duration = float(strip_units(duration[0]))
transit_time = float(strip_units(transit_time))
hp = 0.5 * period
return np.abs((t - transit_time + hp) % period - hp) < 0.5 * duration
| BoxLeastSquares.transit_mask |
astropy | 6 | astropy/nddata/ccddata.py | def to_hdu(
self,
hdu_mask="MASK",
hdu_uncertainty="UNCERT",
hdu_flags=None,
wcs_relax=True,
key_uncertainty_type="UTYPE",
as_image_hdu=False,
hdu_psf="PSFIMAGE",
):
"""Creates an HDUList object from a CCDData object.
Parameters
----------
hdu_mask, hdu_uncertainty, hdu_flags, hdu_psf : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty,
``'PSFIMAGE'`` for psf, and `None` for flags.
wcs_relax : bool
Value of the ``relax`` parameter to use in converting the WCS to a
FITS header using `~astropy.wcs.WCS.to_header`. The common
``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires
``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be
preserved.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
as_image_hdu : bool
If this option is `True`, the first item of the returned
`~astropy.io.fits.HDUList` is a `~astropy.io.fits.ImageHDU`, instead
of the default `~astropy.io.fits.PrimaryHDU`.
Raises
------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a astropy uncertainty type.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
| /usr/src/app/target_test_cases/failed_tests_CCDData.to_hdu.txt | def to_hdu(
self,
hdu_mask="MASK",
hdu_uncertainty="UNCERT",
hdu_flags=None,
wcs_relax=True,
key_uncertainty_type="UTYPE",
as_image_hdu=False,
hdu_psf="PSFIMAGE",
):
"""Creates an HDUList object from a CCDData object.
Parameters
----------
hdu_mask, hdu_uncertainty, hdu_flags, hdu_psf : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty,
``'PSFIMAGE'`` for psf, and `None` for flags.
wcs_relax : bool
Value of the ``relax`` parameter to use in converting the WCS to a
FITS header using `~astropy.wcs.WCS.to_header`. The common
``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires
``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be
preserved.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
as_image_hdu : bool
If this option is `True`, the first item of the returned
`~astropy.io.fits.HDUList` is a `~astropy.io.fits.ImageHDU`, instead
of the default `~astropy.io.fits.PrimaryHDU`.
Raises
------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a astropy uncertainty type.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
if isinstance(self.header, fits.Header):
# Copy here so that we can modify the HDU header by adding WCS
# information without changing the header of the CCDData object.
header = self.header.copy()
else:
# Because _insert_in_metadata_fits_safe is written as a method
# we need to create a dummy CCDData instance to hold the FITS
# header we are constructing. This probably indicates that
# _insert_in_metadata_fits_safe should be rewritten in a more
# sensible way...
dummy_ccd = CCDData([1], meta=fits.Header(), unit="adu")
for k, v in self.header.items():
dummy_ccd._insert_in_metadata_fits_safe(k, v)
header = dummy_ccd.header
if self.unit is not u.dimensionless_unscaled:
header["bunit"] = self.unit.to_string()
if self.wcs:
# Simply extending the FITS header with the WCS can lead to
# duplicates of the WCS keywords; iterating over the WCS
# header should be safer.
#
# Turns out if I had read the io.fits.Header.extend docs more
# carefully, I would have realized that the keywords exist to
# avoid duplicates and preserve, as much as possible, the
# structure of the commentary cards.
#
# Note that until astropy/astropy#3967 is closed, the extend
# will fail if there are comment cards in the WCS header but
# not header.
wcs_header = self.wcs.to_header(relax=wcs_relax)
header.extend(wcs_header, useblanks=False, update=True)
if as_image_hdu:
hdus = [fits.ImageHDU(self.data, header)]
else:
hdus = [fits.PrimaryHDU(self.data, header)]
if hdu_mask and self.mask is not None:
# Always assuming that the mask is a np.ndarray (check that it has
# a 'shape').
if not hasattr(self.mask, "shape"):
raise ValueError("only a numpy.ndarray mask can be saved.")
# Convert boolean mask to uint since io.fits cannot handle bool.
hduMask = fits.ImageHDU(self.mask.astype(np.uint8), name=hdu_mask)
hdus.append(hduMask)
if hdu_uncertainty and self.uncertainty is not None:
# We need to save some kind of information which uncertainty was
# used so that loading the HDUList can infer the uncertainty type.
# No idea how this can be done so only allow StdDevUncertainty.
uncertainty_cls = self.uncertainty.__class__
if uncertainty_cls not in _known_uncertainties:
raise ValueError(
f"only uncertainties of type {_known_uncertainties} can be saved."
)
uncertainty_name = _unc_cls_to_name[uncertainty_cls]
hdr_uncertainty = fits.Header()
hdr_uncertainty[key_uncertainty_type] = uncertainty_name
# Assuming uncertainty is an StdDevUncertainty save just the array
# this might be problematic if the Uncertainty has a unit differing
# from the data so abort for different units. This is important for
# astropy > 1.2
if hasattr(self.uncertainty, "unit") and self.uncertainty.unit is not None:
if not _uncertainty_unit_equivalent_to_parent(
uncertainty_cls, self.uncertainty.unit, self.unit
):
raise ValueError(
"saving uncertainties with a unit that is not "
"equivalent to the unit from the data unit is not "
"supported."
)
hduUncert = fits.ImageHDU(
self.uncertainty.array, hdr_uncertainty, name=hdu_uncertainty
)
hdus.append(hduUncert)
if hdu_flags and self.flags:
raise NotImplementedError(
"adding the flags to a HDU is not supported at this time."
)
if hdu_psf and self.psf is not None:
# The PSF is an image, so write it as a separate ImageHDU.
hdu_psf = fits.ImageHDU(self.psf, name=hdu_psf)
hdus.append(hdu_psf)
hdulist = fits.HDUList(hdus)
return hdulist
| CCDData.to_hdu |
astropy | 7 | astropy/uncertainty/core.py | def pdf_histogram(self, **kwargs):
"""
Compute histogram over the samples in the distribution.
Parameters
----------
All keyword arguments are passed into `astropy.stats.histogram`. Note
That some of these options may not be valid for some multidimensional
distributions.
Returns
-------
hist : array
The values of the histogram. Trailing dimension is the histogram
dimension.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``. Trailing dimension is the
bin histogram dimension.
"""
| /usr/src/app/target_test_cases/failed_tests_Distribution.pdf_histogram.txt | def pdf_histogram(self, **kwargs):
"""
Compute histogram over the samples in the distribution.
Parameters
----------
All keyword arguments are passed into `astropy.stats.histogram`. Note
That some of these options may not be valid for some multidimensional
distributions.
Returns
-------
hist : array
The values of the histogram. Trailing dimension is the histogram
dimension.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``. Trailing dimension is the
bin histogram dimension.
"""
distr = self.distribution
raveled_distr = distr.reshape(distr.size // distr.shape[-1], distr.shape[-1])
nhists = []
bin_edges = []
for d in raveled_distr:
nhist, bin_edge = stats.histogram(d, **kwargs)
nhists.append(nhist)
bin_edges.append(bin_edge)
nhists = np.array(nhists)
nh_shape = self.shape + (nhists.size // self.size,)
bin_edges = np.array(bin_edges)
be_shape = self.shape + (bin_edges.size // self.size,)
return nhists.reshape(nh_shape), bin_edges.reshape(be_shape)
| Distribution.pdf_histogram |