index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
4,752 | optbinning.binning.binning | _fit_optimizer | null | def _fit_optimizer(self, splits, n_nonevent, n_event):
if self.verbose:
logger.info("Optimizer started.")
time_init = time.perf_counter()
if len(n_nonevent) <= 1:
self._status = "OPTIMAL"
self._splits_optimal = splits
self._solution = np.zeros(len(splits), dtype=bool)
if self.verbose:
logger.warning("Optimizer: {} bins after pre-binning."
.format(len(n_nonevent)))
logger.warning("Optimizer: solver not run.")
logger.info("Optimizer terminated. Time: 0s")
return
# Min/max number of bins
if self.min_bin_size is not None:
min_bin_size = int(np.ceil(self.min_bin_size * self._n_samples))
else:
min_bin_size = self.min_bin_size
if self.max_bin_size is not None:
max_bin_size = int(np.ceil(self.max_bin_size * self._n_samples))
else:
max_bin_size = self.max_bin_size
# Min number of event and nonevent per bin
if (self.divergence in ("hellinger", "triangular") and
self._flag_min_n_event_nonevent):
if self.min_bin_n_nonevent is None:
min_bin_n_nonevent = 1
else:
min_bin_n_nonevent = max(self.min_bin_n_nonevent, 1)
if self.min_bin_n_event is None:
min_bin_n_event = 1
else:
min_bin_n_event = max(self.min_bin_n_event, 1)
else:
min_bin_n_nonevent = self.min_bin_n_nonevent
min_bin_n_event = self.min_bin_n_event
# Monotonic trend
trend_change = None
if self.dtype == "numerical":
auto_monotonic_modes = ("auto", "auto_heuristic", "auto_asc_desc")
if self.monotonic_trend in auto_monotonic_modes:
monotonic = auto_monotonic(n_nonevent, n_event,
self.monotonic_trend)
if self.monotonic_trend == "auto_heuristic":
if monotonic in ("peak", "valley"):
if monotonic == "peak":
monotonic = "peak_heuristic"
else:
monotonic = "valley_heuristic"
event_rate = n_event / (n_nonevent + n_event)
trend_change = peak_valley_trend_change_heuristic(
event_rate, monotonic)
if self.verbose:
logger.info("Optimizer: classifier predicts {} "
"monotonic trend.".format(monotonic))
else:
monotonic = self.monotonic_trend
if monotonic in ("peak_heuristic", "valley_heuristic"):
event_rate = n_event / (n_nonevent + n_event)
trend_change = peak_valley_trend_change_heuristic(
event_rate, monotonic)
if self.verbose:
logger.info("Optimizer: trend change position {}."
.format(trend_change))
else:
monotonic = self.monotonic_trend
if monotonic is not None:
monotonic = "ascending"
if self.verbose:
if monotonic is None:
logger.info("Optimizer: monotonic trend not set.")
else:
logger.info("Optimizer: monotonic trend set to {}."
.format(monotonic))
if self.solver == "cp":
optimizer = BinningCP(monotonic, self.min_n_bins, self.max_n_bins,
min_bin_size, max_bin_size,
min_bin_n_event, self.max_bin_n_event,
min_bin_n_nonevent, self.max_bin_n_nonevent,
self.min_event_rate_diff, self.max_pvalue,
self.max_pvalue_policy, self.gamma,
self.user_splits_fixed, self.time_limit)
elif self.solver == "mip":
optimizer = BinningMIP(monotonic, self.min_n_bins, self.max_n_bins,
min_bin_size, max_bin_size,
min_bin_n_event, self.max_bin_n_event,
min_bin_n_nonevent, self.max_bin_n_nonevent,
self.min_event_rate_diff, self.max_pvalue,
self.max_pvalue_policy, self.gamma,
self.user_splits_fixed, self.mip_solver,
self.time_limit)
elif self.solver == "ls":
optimizer = BinningLS(monotonic, self.min_n_bins, self.max_n_bins,
min_bin_size, max_bin_size,
min_bin_n_event, self.max_bin_n_event,
min_bin_n_nonevent, self.max_bin_n_nonevent,
self.min_event_rate_diff, self.max_pvalue,
self.max_pvalue_policy,
self.user_splits_fixed, self.time_limit)
if self.verbose:
logger.info("Optimizer: build model...")
optimizer.build_model(self.divergence, n_nonevent, n_event,
trend_change)
if self.verbose:
logger.info("Optimizer: solve...")
status, solution = optimizer.solve()
self._solution = solution
self._optimizer, self._time_optimizer = solver_statistics(
self.solver, optimizer.solver_)
self._status = status
if self.dtype == "categorical" and self.user_splits is not None:
self._splits_optimal = splits[solution]
else:
self._splits_optimal = splits[solution[:-1]]
self._time_solver = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimizer terminated. Time: {:.4f}s"
.format(self._time_solver))
| (self, splits, n_nonevent, n_event) |
4,763 | optbinning.binning.binning | fit | Fit the optimal binning according to the given training data.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Only applied if ``prebinning_method="cart"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : OptimalBinning
Fitted optimal binning.
| def fit(self, x, y, sample_weight=None, check_input=False):
"""Fit the optimal binning according to the given training data.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Only applied if ``prebinning_method="cart"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : OptimalBinning
Fitted optimal binning.
"""
return self._fit(x, y, sample_weight, check_input)
| (self, x, y, sample_weight=None, check_input=False) |
4,764 | optbinning.binning.binning | fit_transform | Fit the optimal binning according to the given training data, then
transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Only applied if ``prebinning_method="cart"``.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
| def fit_transform(self, x, y, sample_weight=None, metric="woe",
metric_special=0, metric_missing=0, show_digits=2,
check_input=False):
"""Fit the optimal binning according to the given training data, then
transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Only applied if ``prebinning_method="cart"``.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
"""
return self.fit(x, y, sample_weight, check_input).transform(
x, metric, metric_special, metric_missing, show_digits,
check_input)
| (self, x, y, sample_weight=None, metric='woe', metric_special=0, metric_missing=0, show_digits=2, check_input=False) |
4,773 | optbinning.binning.binning | transform | Transform given data to Weight of Evidence (WoE) or event rate using
bins from the fitted optimal binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
Notes
-----
Transformation of data including categories not present during training
return zero WoE or event rate.
| def transform(self, x, metric="woe", metric_special=0,
metric_missing=0, show_digits=2, check_input=False):
"""Transform given data to Weight of Evidence (WoE) or event rate using
bins from the fitted optimal binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
Notes
-----
Transformation of data including categories not present during training
return zero WoE or event rate.
"""
self._check_is_fitted()
return transform_binary_target(self._splits_optimal, self.dtype, x,
self._n_nonevent, self._n_event,
self.special_codes, self._categories,
self._cat_others, self.cat_unknown,
metric, metric_special, metric_missing,
self.user_splits, show_digits,
check_input)
| (self, x, metric='woe', metric_special=0, metric_missing=0, show_digits=2, check_input=False) |
4,774 | optbinning.binning.multidimensional.binning_2d | OptimalBinning2D | Optimal binning of two numerical variables with respect to a binary
target.
Parameters
----------
name_x : str, optional (default="")
The name of variable x.
name_y : str, optional (default="")
The name of variable y.
dtype_x : str, optional (default="numerical")
The data type of variable x. Supported data types are "numerical" for
continuous and ordinal variables and "categorical" for categorical
and nominal variables.
dtype_y : str, optional (default="numerical")
The data type of variable y. Supported data types are "numerical" for
continuous and ordinal variables and "categorical" for categorical
and nominal variables.
prebinning_method : str, optional (default="cart")
The pre-binning method. Supported methods are "cart" for a CART
decision tree, "mdlp" for Minimum Description Length Principle (MDLP),
"quantile" to generate prebins with approximately same frequency and
"uniform" to generate prebins with equal width. Method "cart" uses
`sklearn.tree.DecistionTreeClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeClassifier.html>`_.
strategy: str, optional (default="grid")
The strategy used to create the initial prebinning 2D after computing
prebinning splits on the x and y axis. The strategy "grid" creates a
prebinning 2D with n_prebins_x times n_prebins_y elements. The strategy
"cart" (experimental) reduces the number of elements by pruning. The
latter is recommended when the number of prebins is large.
solver : str, optional (default="cp")
The optimizer to solve the optimal binning problem. Supported solvers
are "mip" to choose a mixed-integer programming solver, and "cp" to
choose a constrained programming solver.
divergence : str, optional (default="iv")
The divergence measure in the objective function to be maximized.
Supported divergences are "iv" (Information Value or Jeffrey's
divergence), "js" (Jensen-Shannon), "hellinger" (Hellinger divergence)
and "triangular" (triangular discrimination).
max_n_prebins_x : int (default=5)
The maximum number of bins on variable x after pre-binning (prebins).
max_n_prebins_y : int (default=5)
The maximum number of bins on variable y after pre-binning (prebins).
min_prebin_size_x : float (default=0.05)
The fraction of mininum number of records for each prebin on variable
x.
min_prebin_size_y : float (default=0.05)
The fraction of mininum number of records for each prebin on variable
y.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
min_bin_n_nonevent : int or None, optional (default=None)
The minimum number of non-event records for each bin. If None,
``min_bin_n_nonevent = 1``.
max_bin_n_nonevent : int or None, optional (default=None)
The maximum number of non-event records for each bin. If None, then an
unlimited number of non-event records for each bin.
min_bin_n_event : int or None, optional (default=None)
The minimum number of event records for each bin. If None,
``min_bin_n_event = 1``.
max_bin_n_event : int or None, optional (default=None)
The maximum number of event records for each bin. If None, then an
unlimited number of event records for each bin.
monotonic_trend_x : str or None, optional (default=None)
The **event rate** monotonic trend on the x axis. Supported trends are
“ascending”, and "descending". If None, then the monotonic constraint
is disabled.
monotonic_trend_y : str or None, optional (default=None)
The **event rate** monotonic trend on the y axis. Supported trends are
“ascending”, and "descending". If None, then the monotonic constraint
is disabled.
min_event_rate_diff_x : float, optional (default=0)
The minimum event rate difference between consecutives bins on the x
axis.
min_event_rate_diff_y : float, optional (default=0)
The minimum event rate difference between consecutives bins on the y
axis.
gamma : float, optional (default=0)
Regularization strength to reduce the number of dominating bins. Larger
values specify stronger regularization.
special_codes_x : array-like or None, optional (default=None)
List of special codes for the variable x. Use special codes to specify
the data values that must be treated separately.
special_codes_y : array-like or None, optional (default=None)
List of special codes for the variable y. Use special codes to specify
the data values that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
n_jobs : int or None, optional (default=None)
Number of cores to run in parallel while binning variables.
``None`` means 1 core. ``-1`` means using all processors.
time_limit : int (default=100)
The maximum time in seconds to run the optimization solver.
verbose : bool (default=False)
Enable verbose output.
| class OptimalBinning2D(OptimalBinning):
"""Optimal binning of two numerical variables with respect to a binary
target.
Parameters
----------
name_x : str, optional (default="")
The name of variable x.
name_y : str, optional (default="")
The name of variable y.
dtype_x : str, optional (default="numerical")
The data type of variable x. Supported data types are "numerical" for
continuous and ordinal variables and "categorical" for categorical
and nominal variables.
dtype_y : str, optional (default="numerical")
The data type of variable y. Supported data types are "numerical" for
continuous and ordinal variables and "categorical" for categorical
and nominal variables.
prebinning_method : str, optional (default="cart")
The pre-binning method. Supported methods are "cart" for a CART
decision tree, "mdlp" for Minimum Description Length Principle (MDLP),
"quantile" to generate prebins with approximately same frequency and
"uniform" to generate prebins with equal width. Method "cart" uses
`sklearn.tree.DecistionTreeClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeClassifier.html>`_.
strategy: str, optional (default="grid")
The strategy used to create the initial prebinning 2D after computing
prebinning splits on the x and y axis. The strategy "grid" creates a
prebinning 2D with n_prebins_x times n_prebins_y elements. The strategy
"cart" (experimental) reduces the number of elements by pruning. The
latter is recommended when the number of prebins is large.
solver : str, optional (default="cp")
The optimizer to solve the optimal binning problem. Supported solvers
are "mip" to choose a mixed-integer programming solver, and "cp" to
choose a constrained programming solver.
divergence : str, optional (default="iv")
The divergence measure in the objective function to be maximized.
Supported divergences are "iv" (Information Value or Jeffrey's
divergence), "js" (Jensen-Shannon), "hellinger" (Hellinger divergence)
and "triangular" (triangular discrimination).
max_n_prebins_x : int (default=5)
The maximum number of bins on variable x after pre-binning (prebins).
max_n_prebins_y : int (default=5)
The maximum number of bins on variable y after pre-binning (prebins).
min_prebin_size_x : float (default=0.05)
The fraction of mininum number of records for each prebin on variable
x.
min_prebin_size_y : float (default=0.05)
The fraction of mininum number of records for each prebin on variable
y.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
min_bin_n_nonevent : int or None, optional (default=None)
The minimum number of non-event records for each bin. If None,
``min_bin_n_nonevent = 1``.
max_bin_n_nonevent : int or None, optional (default=None)
The maximum number of non-event records for each bin. If None, then an
unlimited number of non-event records for each bin.
min_bin_n_event : int or None, optional (default=None)
The minimum number of event records for each bin. If None,
``min_bin_n_event = 1``.
max_bin_n_event : int or None, optional (default=None)
The maximum number of event records for each bin. If None, then an
unlimited number of event records for each bin.
monotonic_trend_x : str or None, optional (default=None)
The **event rate** monotonic trend on the x axis. Supported trends are
“ascending”, and "descending". If None, then the monotonic constraint
is disabled.
monotonic_trend_y : str or None, optional (default=None)
The **event rate** monotonic trend on the y axis. Supported trends are
“ascending”, and "descending". If None, then the monotonic constraint
is disabled.
min_event_rate_diff_x : float, optional (default=0)
The minimum event rate difference between consecutives bins on the x
axis.
min_event_rate_diff_y : float, optional (default=0)
The minimum event rate difference between consecutives bins on the y
axis.
gamma : float, optional (default=0)
Regularization strength to reduce the number of dominating bins. Larger
values specify stronger regularization.
special_codes_x : array-like or None, optional (default=None)
List of special codes for the variable x. Use special codes to specify
the data values that must be treated separately.
special_codes_y : array-like or None, optional (default=None)
List of special codes for the variable y. Use special codes to specify
the data values that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
n_jobs : int or None, optional (default=None)
Number of cores to run in parallel while binning variables.
``None`` means 1 core. ``-1`` means using all processors.
time_limit : int (default=100)
The maximum time in seconds to run the optimization solver.
verbose : bool (default=False)
Enable verbose output.
"""
def __init__(self, name_x="", name_y="", dtype_x="numerical",
dtype_y="numerical", prebinning_method="cart",
strategy="grid", solver="cp", divergence="iv",
max_n_prebins_x=5, max_n_prebins_y=5, min_prebin_size_x=0.05,
min_prebin_size_y=0.05, min_n_bins=None, max_n_bins=None,
min_bin_size=None, max_bin_size=None, min_bin_n_nonevent=None,
max_bin_n_nonevent=None, min_bin_n_event=None,
max_bin_n_event=None, monotonic_trend_x=None,
monotonic_trend_y=None, min_event_rate_diff_x=0,
min_event_rate_diff_y=0, gamma=0, special_codes_x=None,
special_codes_y=None, split_digits=None, n_jobs=1,
time_limit=100, verbose=False):
self.name_x = name_x
self.name_y = name_y
self.dtype_x = dtype_x
self.dtype_y = dtype_y
self.prebinning_method = prebinning_method
self.strategy = strategy
self.solver = solver
self.divergence = divergence
self.max_n_prebins_x = max_n_prebins_x
self.max_n_prebins_y = max_n_prebins_y
self.min_prebin_size_x = min_prebin_size_x
self.min_prebin_size_y = min_prebin_size_y
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.min_bin_n_event = min_bin_n_event
self.max_bin_n_event = max_bin_n_event
self.min_bin_n_nonevent = min_bin_n_nonevent
self.max_bin_n_nonevent = max_bin_n_nonevent
self.monotonic_trend_x = monotonic_trend_x
self.monotonic_trend_y = monotonic_trend_y
self.min_event_rate_diff_x = min_event_rate_diff_x
self.min_event_rate_diff_y = min_event_rate_diff_y
self.gamma = gamma
self.special_codes_x = special_codes_x
self.special_codes_y = special_codes_y
self.split_digits = split_digits
self.n_jobs = n_jobs
self.time_limit = time_limit
self.verbose = verbose
# auxiliary
self._categories_x = None
self._categories_y = None
self._n_event = None
self._n_nonevent = None
self._n_event_special = None
self._n_nonevent_special = None
self._n_event_missing = None
self._n_nonevent_missing = None
self._problem_type = "classification"
# info
self._binning_table = None
self._n_prebins = None
self._n_refinements = 0
self._n_samples = None
self._optimizer = None
self._solution = None
self._splits_x_optimal = None
self._splits_y_optimal = None
self._status = None
# timing
self._time_total = None
self._time_preprocessing = None
self._time_prebinning = None
self._time_solver = None
self._time_optimizer = None
self._time_postprocessing = None
self._is_fitted = False
def fit(self, x, y, z, check_input=False):
"""Fit the optimal binning 2D according to the given training data.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector x, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Training vector y, where n_samples is the number of samples.
z : array-like, shape = (n_samples,)
Target vector relative to x and y.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : OptimalBinning2D
Fitted optimal binning 2D.
"""
return self._fit(x, y, z, check_input)
def fit_transform(self, x, y, z, metric="woe", metric_special=0,
metric_missing=0, show_digits=2, check_input=False):
"""Fit the optimal binning 2D according to the given training data,
then transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector x, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Training vector y, where n_samples is the number of samples.
z : array-like, shape = (n_samples,)
Target vector relative to x and y.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
z_new : numpy array, shape = (n_samples,)
Transformed array.
"""
return self.fit(x, y, z, check_input).transform(
x, y, metric, metric_special, metric_missing, show_digits,
check_input)
def transform(self, x, y, metric="woe", metric_special=0, metric_missing=0,
show_digits=2, check_input=False):
"""Transform given data to Weight of Evidence (WoE) or event rate using
bins from the fitted optimal binning 2D.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector x, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Training vector y, where n_samples is the number of samples.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
z_new : numpy array, shape = (n_samples,)
Transformed array.
"""
self._check_is_fitted()
return transform_binary_target(
self.dtype_x, self.dtype_y, self._splits_x_optimal,
self._splits_y_optimal, x, y, self._n_nonevent, self._n_event,
self.special_codes_x, self.special_codes_y, self._categories_x,
self._categories_y, metric, metric_special, metric_missing,
show_digits, check_input)
def _fit(self, x, y, z, check_input):
time_init = time.perf_counter()
if self.verbose:
logger.info("Optimal binning started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params())
# Pre-processing
if self.verbose:
logger.info("Pre-processing started.")
self._n_samples = len(x)
if self.verbose:
logger.info("Pre-processing: number of samples: {}"
.format(self._n_samples))
time_preprocessing = time.perf_counter()
[x_clean, y_clean, z_clean, x_missing, y_missing, z_missing,
x_special, y_special, z_special,
categories_x, categories_y] = split_data_2d(
self.dtype_x, self.dtype_y, x, y, z, self.special_codes_x,
self.special_codes_y, check_input)
self._time_preprocessing = time.perf_counter() - time_preprocessing
if self.verbose:
n_clean = len(x_clean)
n_missing = len(x_missing)
n_special = len(x_special)
logger.info("Pre-processing: number of clean samples: {}"
.format(n_clean))
logger.info("Pre-processing: number of missing samples: {}"
.format(n_missing))
logger.info("Pre-processing: number of special samples: {}"
.format(n_special))
if self.dtype_x == "categorical":
logger.info("Pre-processing: number of categories in x: {}"
.format(len(categories_x)))
if self.dtype_y == "categorical":
logger.info("Pre-processing: number of categories in y: {}"
.format(len(categories_y)))
if self.verbose:
logger.info("Pre-processing terminated. Time: {:.4f}s"
.format(self._time_preprocessing))
# Pre-binning
if self.verbose:
logger.info("Pre-binning started.")
time_prebinning = time.perf_counter()
splits_x = self._fit_prebinning(self.dtype_x, x_clean, z_clean,
self.max_n_prebins_x,
self.min_prebin_size_x)
splits_y = self._fit_prebinning(self.dtype_y, y_clean, z_clean,
self.max_n_prebins_y,
self.min_prebin_size_y)
NE, E = self._prebinning_matrices(
splits_x, splits_y, x_clean, y_clean, z_clean, x_missing,
y_missing, z_missing, x_special, y_special, z_special)
if self.strategy == "cart":
if self.verbose:
logger.info("Prebinning: applying strategy cart...")
n_splits_x = len(splits_x)
n_splits_y = len(splits_y)
clf_nodes = n_splits_x * n_splits_y
indices_x = np.digitize(x_clean, splits_x, right=False)
n_bins_x = n_splits_x + 1
indices_y = np.digitize(y_clean, splits_y, right=False)
n_bins_y = n_splits_y + 1
xt = np.empty(len(x_clean), dtype=int)
yt = np.empty(len(y_clean), dtype=int)
for i in range(n_bins_x):
xt[(indices_x == i)] = i
for i in range(n_bins_y):
yt[(indices_y == i)] = i
xyt = np.c_[xt, yt]
min_prebin_size = min(self.min_prebin_size_x,
self.min_prebin_size_y) * 0.25
clf = DecisionTreeClassifier(min_samples_leaf=min_prebin_size,
max_leaf_nodes=clf_nodes)
clf.fit(xyt, z_clean)
self._clf = clf
self._categories_x = categories_x
self._categories_y = categories_y
self._time_prebinning = time.perf_counter() - time_prebinning
self._n_prebins = E.size
if self.verbose:
logger.info("Pre-binning: number of prebins: {}"
.format(self._n_prebins))
logger.info("Pre-binning terminated. Time: {:.4f}s"
.format(self._time_prebinning))
# Optimization
rows, n_nonevent, n_event = self._fit_optimizer(
splits_x, splits_y, NE, E)
# Post-processing
if self.verbose:
logger.info("Post-processing started.")
logger.info("Post-processing: compute binning information.")
time_postprocessing = time.perf_counter()
# Refinements
m, n = E.shape
self._n_refinements = (m * n * (m + 1) * (n + 1)) // 4 - len(rows)
# solution matrices
D = np.empty(m * n, dtype=float)
P = np.empty(m * n, dtype=int)
selected_rows = np.array(rows, dtype=object)[self._solution]
self._selected_rows = selected_rows
self._m, self._n = m, n
n_selected_rows = selected_rows.shape[0] + 2
opt_n_nonevent = np.empty(n_selected_rows, dtype=int)
opt_n_event = np.empty(n_selected_rows, dtype=int)
for i, r in enumerate(selected_rows):
_n_nonevent = n_nonevent[self._solution][i]
_n_event = n_event[self._solution][i]
_event_rate = _n_event / (_n_event + _n_nonevent)
P[r] = i
D[r] = _event_rate
opt_n_nonevent[i] = _n_nonevent
opt_n_event[i] = _n_event
opt_n_nonevent[-2] = self._n_nonevent_special
opt_n_event[-2] = self._n_event_special
opt_n_nonevent[-1] = self._n_nonevent_missing
opt_n_event[-1] = self._n_event_missing
self._n_nonevent = opt_n_nonevent
self._n_event = opt_n_event
D = D.reshape((m, n))
P = P.reshape((m, n))
# optimal bins
splits_x_optimal, splits_y_optimal = self._splits_xy_optimal(
selected_rows, splits_x, splits_y, P)
self._splits_x_optimal = splits_x_optimal
self._splits_y_optimal = splits_y_optimal
# instatiate binning table
self._binning_table = BinningTable2D(
self.name_x, self.name_y, self.dtype_x, self.dtype_y,
splits_x_optimal, splits_y_optimal, m, n, opt_n_nonevent,
opt_n_event, D, P, self._categories_x, self._categories_y)
self.name = "-".join((self.name_x, self.name_y))
self._time_postprocessing = time.perf_counter() - time_postprocessing
if self.verbose:
logger.info("Post-processing terminated. Time: {:.4f}s"
.format(self._time_postprocessing))
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimal binning terminated. Status: {}. Time: {:.4f}s"
.format(self._status, self._time_total))
# Completed successfully
self._is_fitted = True
return self
def _fit_prebinning(self, dtype, x, z, max_n_prebins, min_prebin_size):
# Pre-binning algorithm
min_bin_size = int(np.ceil(min_prebin_size * self._n_samples))
prebinning = PreBinning(method=self.prebinning_method,
n_bins=max_n_prebins,
min_bin_size=min_bin_size,
problem_type=self._problem_type).fit(x, z)
return prebinning.splits
def _prebinning_matrices(self, splits_x, splits_y, x_clean, y_clean,
z_clean, x_missing, y_missing, z_missing,
x_special, y_special, z_special):
z0 = z_clean == 0
z1 = ~z0
# Compute n_nonevent and n_event for special and missing
special_target_info = target_info(z_special)
self._n_nonevent_special = special_target_info[0]
self._n_event_special = special_target_info[1]
missing_target_info = target_info(z_missing)
self._n_nonevent_missing = missing_target_info[0]
self._n_event_missing = missing_target_info[1]
n_splits_x = len(splits_x)
n_splits_y = len(splits_y)
indices_x = np.digitize(x_clean, splits_x, right=False)
n_bins_x = n_splits_x + 1
indices_y = np.digitize(y_clean, splits_y, right=False)
n_bins_y = n_splits_y + 1
E = np.empty((n_bins_y, n_bins_x), dtype=int)
NE = np.empty((n_bins_y, n_bins_x), dtype=int)
for i in range(n_bins_y):
mask_y = (indices_y == i)
for j in range(n_bins_x):
mask_x = (indices_x == j)
mask = mask_x & mask_y
NE[i, j] = np.count_nonzero(z0 & mask)
E[i, j] = np.count_nonzero(z1 & mask)
return NE, E
def _fit_optimizer(self, splits_x, splits_y, NE, E):
if self.verbose:
logger.info("Optimizer started.")
time_init = time.perf_counter()
# Min/max number of bins (bin size)
if self.min_bin_size is not None:
min_bin_size = int(np.ceil(self.min_bin_size * self._n_samples))
else:
min_bin_size = self.min_bin_size
if self.max_bin_size is not None:
max_bin_size = int(np.ceil(self.max_bin_size * self._n_samples))
else:
max_bin_size = self.max_bin_size
# Number of threads
n_jobs = effective_n_jobs(self.n_jobs)
if self.verbose:
logger.info("Optimizer: {} jobs.".format(n_jobs))
if self.monotonic_trend_x is None:
logger.info(
"Optimizer: monotonic trend x not set.")
else:
logger.info("Optimizer: monotonic trend x set to {}."
.format(self.monotonic_trend_x))
if self.monotonic_trend_y is None:
logger.info(
"Optimizer: monotonic trend y not set.")
else:
logger.info("Optimizer: monotonic trend y set to {}."
.format(self.monotonic_trend_x))
if self.solver == "cp":
scale = int(1e6)
optimizer = Binning2DCP(
self.monotonic_trend_x, self.monotonic_trend_y,
self.min_n_bins, self.max_n_bins, self.min_event_rate_diff_x,
self.min_event_rate_diff_y, self.gamma, n_jobs,
self.time_limit)
elif self.solver == "mip":
scale = None
optimizer = Binning2DMIP(
self.monotonic_trend_x, self.monotonic_trend_y,
self.min_n_bins, self.max_n_bins, self.min_event_rate_diff_x,
self.min_event_rate_diff_y, self.gamma, n_jobs,
self.time_limit)
if self.verbose:
logger.info("Optimizer: model data...")
time_model_data = time.perf_counter()
if self.strategy == "cart":
[n_grid, n_rectangles, rows, cols, c, d_connected_x, d_connected_y,
event_rate, n_event, n_nonevent, n_records] = model_data_cart(
self._clf, self.divergence, NE, E, self.monotonic_trend_x,
self.monotonic_trend_y, scale, min_bin_size, max_bin_size,
self.min_bin_n_event, self.max_bin_n_event,
self.min_bin_n_nonevent, self.max_bin_n_nonevent)
else:
[n_grid, n_rectangles, rows, cols, c, d_connected_x, d_connected_y,
event_rate, n_event, n_nonevent, n_records] = model_data(
self.divergence, NE, E, self.monotonic_trend_x,
self.monotonic_trend_y, scale, min_bin_size, max_bin_size,
self.min_bin_n_event, self.max_bin_n_event,
self.min_bin_n_nonevent, self.max_bin_n_nonevent)
self._time_model_data = time.perf_counter() - time_model_data
if self.verbose:
logger.info("Optimizer: model data terminated. Time {:.4f}s"
.format(self._time_model_data))
if self.verbose:
logger.info("Optimizer: build model...")
optimizer.build_model(n_grid, n_rectangles, cols, c, d_connected_x,
d_connected_y, event_rate, n_records)
if self.verbose:
logger.info("Optimizer: solve...")
status, solution = optimizer.solve()
self._solution = solution
self._optimizer, self._time_optimizer = solver_statistics(
self.solver, optimizer.solver_)
self._status = status
self._time_solver = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimizer terminated. Time: {:.4f}s"
.format(self._time_solver))
self._cols = cols
self._rows = rows
self._c = c
return rows, n_nonevent, n_event
def _splits_xy_optimal(self, selected_rows, splits_x, splits_y, P):
bins_x = np.concatenate([[-np.inf], splits_x, [np.inf]])
bins_y = np.concatenate([[-np.inf], splits_y, [np.inf]])
bins_str_x = np.array([[bins_x[i], bins_x[i+1]]
for i in range(len(bins_x) - 1)])
bins_str_y = np.array([[bins_y[i], bins_y[i+1]]
for i in range(len(bins_y) - 1)])
splits_x_optimal = []
splits_y_optimal = []
for i in range(len(selected_rows)):
pos_y, pos_x = np.where(P == i)
mask_x = np.arange(pos_x.min(), pos_x.max() + 1)
mask_y = np.arange(pos_y.min(), pos_y.max() + 1)
bin_x = bins_str_x[mask_x]
bin_y = bins_str_y[mask_y]
splits_x_optimal.append([bin_x[0][0], bin_x[-1][1]])
splits_y_optimal.append([bin_y[0][0], bin_y[-1][1]])
return splits_x_optimal, splits_y_optimal
@property
def splits(self):
"""List of optimal split points and bins for axis x and y.
Returns
-------
splits : (numpy.ndarray, numpy.ndarray)
"""
self._check_is_fitted()
if self.dtype_x == "categorical":
splits_x_optimal = bin_categorical(
self._splits_x_optimal, self._categories_x)
else:
splits_x_optimal = self._splits_x_optimal
if self.dtype_y == "categorical":
splits_y_optimal = bin_categorical(
self._splits_y_optimal, self._categories_y)
else:
splits_y_optimal = self._splits_y_optimal
return (splits_x_optimal, splits_y_optimal)
| (name_x='', name_y='', dtype_x='numerical', dtype_y='numerical', prebinning_method='cart', strategy='grid', solver='cp', divergence='iv', max_n_prebins_x=5, max_n_prebins_y=5, min_prebin_size_x=0.05, min_prebin_size_y=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, min_bin_n_nonevent=None, max_bin_n_nonevent=None, min_bin_n_event=None, max_bin_n_event=None, monotonic_trend_x=None, monotonic_trend_y=None, min_event_rate_diff_x=0, min_event_rate_diff_y=0, gamma=0, special_codes_x=None, special_codes_y=None, split_digits=None, n_jobs=1, time_limit=100, verbose=False) |
4,776 | optbinning.binning.multidimensional.binning_2d | __init__ | null | def __init__(self, name_x="", name_y="", dtype_x="numerical",
dtype_y="numerical", prebinning_method="cart",
strategy="grid", solver="cp", divergence="iv",
max_n_prebins_x=5, max_n_prebins_y=5, min_prebin_size_x=0.05,
min_prebin_size_y=0.05, min_n_bins=None, max_n_bins=None,
min_bin_size=None, max_bin_size=None, min_bin_n_nonevent=None,
max_bin_n_nonevent=None, min_bin_n_event=None,
max_bin_n_event=None, monotonic_trend_x=None,
monotonic_trend_y=None, min_event_rate_diff_x=0,
min_event_rate_diff_y=0, gamma=0, special_codes_x=None,
special_codes_y=None, split_digits=None, n_jobs=1,
time_limit=100, verbose=False):
self.name_x = name_x
self.name_y = name_y
self.dtype_x = dtype_x
self.dtype_y = dtype_y
self.prebinning_method = prebinning_method
self.strategy = strategy
self.solver = solver
self.divergence = divergence
self.max_n_prebins_x = max_n_prebins_x
self.max_n_prebins_y = max_n_prebins_y
self.min_prebin_size_x = min_prebin_size_x
self.min_prebin_size_y = min_prebin_size_y
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.min_bin_n_event = min_bin_n_event
self.max_bin_n_event = max_bin_n_event
self.min_bin_n_nonevent = min_bin_n_nonevent
self.max_bin_n_nonevent = max_bin_n_nonevent
self.monotonic_trend_x = monotonic_trend_x
self.monotonic_trend_y = monotonic_trend_y
self.min_event_rate_diff_x = min_event_rate_diff_x
self.min_event_rate_diff_y = min_event_rate_diff_y
self.gamma = gamma
self.special_codes_x = special_codes_x
self.special_codes_y = special_codes_y
self.split_digits = split_digits
self.n_jobs = n_jobs
self.time_limit = time_limit
self.verbose = verbose
# auxiliary
self._categories_x = None
self._categories_y = None
self._n_event = None
self._n_nonevent = None
self._n_event_special = None
self._n_nonevent_special = None
self._n_event_missing = None
self._n_nonevent_missing = None
self._problem_type = "classification"
# info
self._binning_table = None
self._n_prebins = None
self._n_refinements = 0
self._n_samples = None
self._optimizer = None
self._solution = None
self._splits_x_optimal = None
self._splits_y_optimal = None
self._status = None
# timing
self._time_total = None
self._time_preprocessing = None
self._time_prebinning = None
self._time_solver = None
self._time_optimizer = None
self._time_postprocessing = None
self._is_fitted = False
| (self, name_x='', name_y='', dtype_x='numerical', dtype_y='numerical', prebinning_method='cart', strategy='grid', solver='cp', divergence='iv', max_n_prebins_x=5, max_n_prebins_y=5, min_prebin_size_x=0.05, min_prebin_size_y=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, min_bin_n_nonevent=None, max_bin_n_nonevent=None, min_bin_n_event=None, max_bin_n_event=None, monotonic_trend_x=None, monotonic_trend_y=None, min_event_rate_diff_x=0, min_event_rate_diff_y=0, gamma=0, special_codes_x=None, special_codes_y=None, split_digits=None, n_jobs=1, time_limit=100, verbose=False) |
4,784 | optbinning.binning.multidimensional.binning_2d | _fit | null | def _fit(self, x, y, z, check_input):
time_init = time.perf_counter()
if self.verbose:
logger.info("Optimal binning started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params())
# Pre-processing
if self.verbose:
logger.info("Pre-processing started.")
self._n_samples = len(x)
if self.verbose:
logger.info("Pre-processing: number of samples: {}"
.format(self._n_samples))
time_preprocessing = time.perf_counter()
[x_clean, y_clean, z_clean, x_missing, y_missing, z_missing,
x_special, y_special, z_special,
categories_x, categories_y] = split_data_2d(
self.dtype_x, self.dtype_y, x, y, z, self.special_codes_x,
self.special_codes_y, check_input)
self._time_preprocessing = time.perf_counter() - time_preprocessing
if self.verbose:
n_clean = len(x_clean)
n_missing = len(x_missing)
n_special = len(x_special)
logger.info("Pre-processing: number of clean samples: {}"
.format(n_clean))
logger.info("Pre-processing: number of missing samples: {}"
.format(n_missing))
logger.info("Pre-processing: number of special samples: {}"
.format(n_special))
if self.dtype_x == "categorical":
logger.info("Pre-processing: number of categories in x: {}"
.format(len(categories_x)))
if self.dtype_y == "categorical":
logger.info("Pre-processing: number of categories in y: {}"
.format(len(categories_y)))
if self.verbose:
logger.info("Pre-processing terminated. Time: {:.4f}s"
.format(self._time_preprocessing))
# Pre-binning
if self.verbose:
logger.info("Pre-binning started.")
time_prebinning = time.perf_counter()
splits_x = self._fit_prebinning(self.dtype_x, x_clean, z_clean,
self.max_n_prebins_x,
self.min_prebin_size_x)
splits_y = self._fit_prebinning(self.dtype_y, y_clean, z_clean,
self.max_n_prebins_y,
self.min_prebin_size_y)
NE, E = self._prebinning_matrices(
splits_x, splits_y, x_clean, y_clean, z_clean, x_missing,
y_missing, z_missing, x_special, y_special, z_special)
if self.strategy == "cart":
if self.verbose:
logger.info("Prebinning: applying strategy cart...")
n_splits_x = len(splits_x)
n_splits_y = len(splits_y)
clf_nodes = n_splits_x * n_splits_y
indices_x = np.digitize(x_clean, splits_x, right=False)
n_bins_x = n_splits_x + 1
indices_y = np.digitize(y_clean, splits_y, right=False)
n_bins_y = n_splits_y + 1
xt = np.empty(len(x_clean), dtype=int)
yt = np.empty(len(y_clean), dtype=int)
for i in range(n_bins_x):
xt[(indices_x == i)] = i
for i in range(n_bins_y):
yt[(indices_y == i)] = i
xyt = np.c_[xt, yt]
min_prebin_size = min(self.min_prebin_size_x,
self.min_prebin_size_y) * 0.25
clf = DecisionTreeClassifier(min_samples_leaf=min_prebin_size,
max_leaf_nodes=clf_nodes)
clf.fit(xyt, z_clean)
self._clf = clf
self._categories_x = categories_x
self._categories_y = categories_y
self._time_prebinning = time.perf_counter() - time_prebinning
self._n_prebins = E.size
if self.verbose:
logger.info("Pre-binning: number of prebins: {}"
.format(self._n_prebins))
logger.info("Pre-binning terminated. Time: {:.4f}s"
.format(self._time_prebinning))
# Optimization
rows, n_nonevent, n_event = self._fit_optimizer(
splits_x, splits_y, NE, E)
# Post-processing
if self.verbose:
logger.info("Post-processing started.")
logger.info("Post-processing: compute binning information.")
time_postprocessing = time.perf_counter()
# Refinements
m, n = E.shape
self._n_refinements = (m * n * (m + 1) * (n + 1)) // 4 - len(rows)
# solution matrices
D = np.empty(m * n, dtype=float)
P = np.empty(m * n, dtype=int)
selected_rows = np.array(rows, dtype=object)[self._solution]
self._selected_rows = selected_rows
self._m, self._n = m, n
n_selected_rows = selected_rows.shape[0] + 2
opt_n_nonevent = np.empty(n_selected_rows, dtype=int)
opt_n_event = np.empty(n_selected_rows, dtype=int)
for i, r in enumerate(selected_rows):
_n_nonevent = n_nonevent[self._solution][i]
_n_event = n_event[self._solution][i]
_event_rate = _n_event / (_n_event + _n_nonevent)
P[r] = i
D[r] = _event_rate
opt_n_nonevent[i] = _n_nonevent
opt_n_event[i] = _n_event
opt_n_nonevent[-2] = self._n_nonevent_special
opt_n_event[-2] = self._n_event_special
opt_n_nonevent[-1] = self._n_nonevent_missing
opt_n_event[-1] = self._n_event_missing
self._n_nonevent = opt_n_nonevent
self._n_event = opt_n_event
D = D.reshape((m, n))
P = P.reshape((m, n))
# optimal bins
splits_x_optimal, splits_y_optimal = self._splits_xy_optimal(
selected_rows, splits_x, splits_y, P)
self._splits_x_optimal = splits_x_optimal
self._splits_y_optimal = splits_y_optimal
# instatiate binning table
self._binning_table = BinningTable2D(
self.name_x, self.name_y, self.dtype_x, self.dtype_y,
splits_x_optimal, splits_y_optimal, m, n, opt_n_nonevent,
opt_n_event, D, P, self._categories_x, self._categories_y)
self.name = "-".join((self.name_x, self.name_y))
self._time_postprocessing = time.perf_counter() - time_postprocessing
if self.verbose:
logger.info("Post-processing terminated. Time: {:.4f}s"
.format(self._time_postprocessing))
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimal binning terminated. Status: {}. Time: {:.4f}s"
.format(self._status, self._time_total))
# Completed successfully
self._is_fitted = True
return self
| (self, x, y, z, check_input) |
4,785 | optbinning.binning.multidimensional.binning_2d | _fit_optimizer | null | def _fit_optimizer(self, splits_x, splits_y, NE, E):
if self.verbose:
logger.info("Optimizer started.")
time_init = time.perf_counter()
# Min/max number of bins (bin size)
if self.min_bin_size is not None:
min_bin_size = int(np.ceil(self.min_bin_size * self._n_samples))
else:
min_bin_size = self.min_bin_size
if self.max_bin_size is not None:
max_bin_size = int(np.ceil(self.max_bin_size * self._n_samples))
else:
max_bin_size = self.max_bin_size
# Number of threads
n_jobs = effective_n_jobs(self.n_jobs)
if self.verbose:
logger.info("Optimizer: {} jobs.".format(n_jobs))
if self.monotonic_trend_x is None:
logger.info(
"Optimizer: monotonic trend x not set.")
else:
logger.info("Optimizer: monotonic trend x set to {}."
.format(self.monotonic_trend_x))
if self.monotonic_trend_y is None:
logger.info(
"Optimizer: monotonic trend y not set.")
else:
logger.info("Optimizer: monotonic trend y set to {}."
.format(self.monotonic_trend_x))
if self.solver == "cp":
scale = int(1e6)
optimizer = Binning2DCP(
self.monotonic_trend_x, self.monotonic_trend_y,
self.min_n_bins, self.max_n_bins, self.min_event_rate_diff_x,
self.min_event_rate_diff_y, self.gamma, n_jobs,
self.time_limit)
elif self.solver == "mip":
scale = None
optimizer = Binning2DMIP(
self.monotonic_trend_x, self.monotonic_trend_y,
self.min_n_bins, self.max_n_bins, self.min_event_rate_diff_x,
self.min_event_rate_diff_y, self.gamma, n_jobs,
self.time_limit)
if self.verbose:
logger.info("Optimizer: model data...")
time_model_data = time.perf_counter()
if self.strategy == "cart":
[n_grid, n_rectangles, rows, cols, c, d_connected_x, d_connected_y,
event_rate, n_event, n_nonevent, n_records] = model_data_cart(
self._clf, self.divergence, NE, E, self.monotonic_trend_x,
self.monotonic_trend_y, scale, min_bin_size, max_bin_size,
self.min_bin_n_event, self.max_bin_n_event,
self.min_bin_n_nonevent, self.max_bin_n_nonevent)
else:
[n_grid, n_rectangles, rows, cols, c, d_connected_x, d_connected_y,
event_rate, n_event, n_nonevent, n_records] = model_data(
self.divergence, NE, E, self.monotonic_trend_x,
self.monotonic_trend_y, scale, min_bin_size, max_bin_size,
self.min_bin_n_event, self.max_bin_n_event,
self.min_bin_n_nonevent, self.max_bin_n_nonevent)
self._time_model_data = time.perf_counter() - time_model_data
if self.verbose:
logger.info("Optimizer: model data terminated. Time {:.4f}s"
.format(self._time_model_data))
if self.verbose:
logger.info("Optimizer: build model...")
optimizer.build_model(n_grid, n_rectangles, cols, c, d_connected_x,
d_connected_y, event_rate, n_records)
if self.verbose:
logger.info("Optimizer: solve...")
status, solution = optimizer.solve()
self._solution = solution
self._optimizer, self._time_optimizer = solver_statistics(
self.solver, optimizer.solver_)
self._status = status
self._time_solver = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimizer terminated. Time: {:.4f}s"
.format(self._time_solver))
self._cols = cols
self._rows = rows
self._c = c
return rows, n_nonevent, n_event
| (self, splits_x, splits_y, NE, E) |
4,791 | optbinning.binning.multidimensional.binning_2d | _prebinning_matrices | null | def _prebinning_matrices(self, splits_x, splits_y, x_clean, y_clean,
z_clean, x_missing, y_missing, z_missing,
x_special, y_special, z_special):
z0 = z_clean == 0
z1 = ~z0
# Compute n_nonevent and n_event for special and missing
special_target_info = target_info(z_special)
self._n_nonevent_special = special_target_info[0]
self._n_event_special = special_target_info[1]
missing_target_info = target_info(z_missing)
self._n_nonevent_missing = missing_target_info[0]
self._n_event_missing = missing_target_info[1]
n_splits_x = len(splits_x)
n_splits_y = len(splits_y)
indices_x = np.digitize(x_clean, splits_x, right=False)
n_bins_x = n_splits_x + 1
indices_y = np.digitize(y_clean, splits_y, right=False)
n_bins_y = n_splits_y + 1
E = np.empty((n_bins_y, n_bins_x), dtype=int)
NE = np.empty((n_bins_y, n_bins_x), dtype=int)
for i in range(n_bins_y):
mask_y = (indices_y == i)
for j in range(n_bins_x):
mask_x = (indices_x == j)
mask = mask_x & mask_y
NE[i, j] = np.count_nonzero(z0 & mask)
E[i, j] = np.count_nonzero(z1 & mask)
return NE, E
| (self, splits_x, splits_y, x_clean, y_clean, z_clean, x_missing, y_missing, z_missing, x_special, y_special, z_special) |
4,798 | optbinning.binning.multidimensional.binning_2d | fit | Fit the optimal binning 2D according to the given training data.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector x, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Training vector y, where n_samples is the number of samples.
z : array-like, shape = (n_samples,)
Target vector relative to x and y.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : OptimalBinning2D
Fitted optimal binning 2D.
| def fit(self, x, y, z, check_input=False):
"""Fit the optimal binning 2D according to the given training data.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector x, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Training vector y, where n_samples is the number of samples.
z : array-like, shape = (n_samples,)
Target vector relative to x and y.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : OptimalBinning2D
Fitted optimal binning 2D.
"""
return self._fit(x, y, z, check_input)
| (self, x, y, z, check_input=False) |
4,799 | optbinning.binning.multidimensional.binning_2d | fit_transform | Fit the optimal binning 2D according to the given training data,
then transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector x, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Training vector y, where n_samples is the number of samples.
z : array-like, shape = (n_samples,)
Target vector relative to x and y.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
z_new : numpy array, shape = (n_samples,)
Transformed array.
| def fit_transform(self, x, y, z, metric="woe", metric_special=0,
metric_missing=0, show_digits=2, check_input=False):
"""Fit the optimal binning 2D according to the given training data,
then transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector x, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Training vector y, where n_samples is the number of samples.
z : array-like, shape = (n_samples,)
Target vector relative to x and y.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
z_new : numpy array, shape = (n_samples,)
Transformed array.
"""
return self.fit(x, y, z, check_input).transform(
x, y, metric, metric_special, metric_missing, show_digits,
check_input)
| (self, x, y, z, metric='woe', metric_special=0, metric_missing=0, show_digits=2, check_input=False) |
4,808 | optbinning.binning.multidimensional.binning_2d | transform | Transform given data to Weight of Evidence (WoE) or event rate using
bins from the fitted optimal binning 2D.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector x, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Training vector y, where n_samples is the number of samples.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
z_new : numpy array, shape = (n_samples,)
Transformed array.
| def transform(self, x, y, metric="woe", metric_special=0, metric_missing=0,
show_digits=2, check_input=False):
"""Transform given data to Weight of Evidence (WoE) or event rate using
bins from the fitted optimal binning 2D.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector x, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Training vector y, where n_samples is the number of samples.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
z_new : numpy array, shape = (n_samples,)
Transformed array.
"""
self._check_is_fitted()
return transform_binary_target(
self.dtype_x, self.dtype_y, self._splits_x_optimal,
self._splits_y_optimal, x, y, self._n_nonevent, self._n_event,
self.special_codes_x, self.special_codes_y, self._categories_x,
self._categories_y, metric, metric_special, metric_missing,
show_digits, check_input)
| (self, x, y, metric='woe', metric_special=0, metric_missing=0, show_digits=2, check_input=False) |
4,809 | optbinning.binning.distributed.binning_sketch | OptimalBinningSketch | Optimal binning over data streams of a numerical or categorical
variable with respect to a binary target.
Parameters
----------
name : str, optional (default="")
The variable name.
dtype : str, optional (default="numerical")
The variable data type. Supported data types are "numerical" for
continuous and ordinal variables and "categorical" for categorical
and nominal variables.
sketch : str, optional (default="gk")
Sketch algorithm. Supported algorithms are "gk" (Greenwald-Khanna's)
and "t-digest" (Ted Dunning) algorithm. Algorithm "t-digest" relies on
`tdigest <https://github.com/CamDavidsonPilon/tdigest>`_.
eps : float, optional (default=1e-4)
Relative error epsilon. For ``sketch="gk"`` this is the absolute
precision, whereas for ``sketch="t-digest"`` is the relative precision.
K : int, optional (default=25)
Parameter excess growth K to compute compress threshold in t-digest.
solver : str, optional (default="cp")
The optimizer to solve the optimal binning problem. Supported solvers
are "mip" to choose a mixed-integer programming solver, "cp" to choose
a constrained programming solver or "ls" to choose `LocalSolver
<https://www.localsolver.com/>`_.
divergence : str, optional (default="iv")
The divergence measure in the objective function to be maximized.
Supported divergences are "iv" (Information Value or Jeffrey's
divergence), "js" (Jensen-Shannon), "hellinger" (Hellinger divergence)
and "triangular" (triangular discrimination).
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
min_bin_n_nonevent : int or None, optional (default=None)
The minimum number of non-event records for each bin. If None,
``min_bin_n_nonevent = 1``.
max_bin_n_nonevent : int or None, optional (default=None)
The maximum number of non-event records for each bin. If None, then an
unlimited number of non-event records for each bin.
min_bin_n_event : int or None, optional (default=None)
The minimum number of event records for each bin. If None,
``min_bin_n_event = 1``.
max_bin_n_event : int or None, optional (default=None)
The maximum number of event records for each bin. If None, then an
unlimited number of event records for each bin.
monotonic_trend : str or None, optional (default="auto")
The **event rate** monotonic trend. Supported trends are “auto”,
"auto_heuristic" and "auto_asc_desc" to automatically determine the
trend maximizing IV using a machine learning classifier, "ascending",
"descending", "concave", "convex", "peak" and "peak_heuristic" to allow
a peak change point, and "valley" and "valley_heuristic" to allow a
valley change point. Trends "auto_heuristic", "peak_heuristic" and
"valley_heuristic" use a heuristic to determine the change point,
and are significantly faster for large size instances (``max_n_prebins
> 20``). Trend "auto_asc_desc" is used to automatically select the best
monotonic trend between "ascending" and "descending". If None, then the
monotonic constraint is disabled.
min_event_rate_diff : float, optional (default=0)
The minimum event rate difference between consecutives bins.
max_pvalue : float or None, optional (default=None)
The maximum p-value among bins. The Z-test is used to detect bins
not satisfying the p-value constraint.
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
gamma : float, optional (default=0)
Regularization strength to reduce the number of dominating bins. Larger
values specify stronger regularization.
cat_cutoff : float or None, optional (default=None)
Generate bin others with categories in which the fraction of
occurrences is below the ``cat_cutoff`` value. This option is
available when ``dtype`` is "categorical".
cat_heuristic: bool (default=False):
Whether to merge categories to guarantee max_n_prebins. If True,
this option will be triggered when the number of categories >=
max_n_prebins. This option is recommended if the number of categories,
in the long run, can increase considerably, and recurrent calls to
method ``solve`` are required.
special_codes : array-like or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
mip_solver : str, optional (default="bop")
The mixed-integer programming solver. Supported solvers are "bop" to
choose the Google OR-Tools binary optimizer or "cbc" to choose the
COIN-OR Branch-and-Cut solver CBC.
time_limit : int (default=100)
The maximum time in seconds to run the optimization solver.
verbose : bool (default=False)
Enable verbose output.
Notes
-----
The parameter ``sketch`` is neglected when ``dtype=categorical``. The
sketch parameter ``K`` is only applicable when ``sketch=t-digest``.
Both quantile sketch algorithms produce good results, being the t-digest
the most accurate. Note, however, the t-digest algorithm implementation is
significantly slower than the GK implementation, thus, GK is the
recommended algorithm when handling partitions. **Besides, GK is
deterministic, therefore returning reproducible results.**
| class OptimalBinningSketch(BaseSketch, BaseEstimator):
"""Optimal binning over data streams of a numerical or categorical
variable with respect to a binary target.
Parameters
----------
name : str, optional (default="")
The variable name.
dtype : str, optional (default="numerical")
The variable data type. Supported data types are "numerical" for
continuous and ordinal variables and "categorical" for categorical
and nominal variables.
sketch : str, optional (default="gk")
Sketch algorithm. Supported algorithms are "gk" (Greenwald-Khanna's)
and "t-digest" (Ted Dunning) algorithm. Algorithm "t-digest" relies on
`tdigest <https://github.com/CamDavidsonPilon/tdigest>`_.
eps : float, optional (default=1e-4)
Relative error epsilon. For ``sketch="gk"`` this is the absolute
precision, whereas for ``sketch="t-digest"`` is the relative precision.
K : int, optional (default=25)
Parameter excess growth K to compute compress threshold in t-digest.
solver : str, optional (default="cp")
The optimizer to solve the optimal binning problem. Supported solvers
are "mip" to choose a mixed-integer programming solver, "cp" to choose
a constrained programming solver or "ls" to choose `LocalSolver
<https://www.localsolver.com/>`_.
divergence : str, optional (default="iv")
The divergence measure in the objective function to be maximized.
Supported divergences are "iv" (Information Value or Jeffrey's
divergence), "js" (Jensen-Shannon), "hellinger" (Hellinger divergence)
and "triangular" (triangular discrimination).
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
min_bin_n_nonevent : int or None, optional (default=None)
The minimum number of non-event records for each bin. If None,
``min_bin_n_nonevent = 1``.
max_bin_n_nonevent : int or None, optional (default=None)
The maximum number of non-event records for each bin. If None, then an
unlimited number of non-event records for each bin.
min_bin_n_event : int or None, optional (default=None)
The minimum number of event records for each bin. If None,
``min_bin_n_event = 1``.
max_bin_n_event : int or None, optional (default=None)
The maximum number of event records for each bin. If None, then an
unlimited number of event records for each bin.
monotonic_trend : str or None, optional (default="auto")
The **event rate** monotonic trend. Supported trends are “auto”,
"auto_heuristic" and "auto_asc_desc" to automatically determine the
trend maximizing IV using a machine learning classifier, "ascending",
"descending", "concave", "convex", "peak" and "peak_heuristic" to allow
a peak change point, and "valley" and "valley_heuristic" to allow a
valley change point. Trends "auto_heuristic", "peak_heuristic" and
"valley_heuristic" use a heuristic to determine the change point,
and are significantly faster for large size instances (``max_n_prebins
> 20``). Trend "auto_asc_desc" is used to automatically select the best
monotonic trend between "ascending" and "descending". If None, then the
monotonic constraint is disabled.
min_event_rate_diff : float, optional (default=0)
The minimum event rate difference between consecutives bins.
max_pvalue : float or None, optional (default=None)
The maximum p-value among bins. The Z-test is used to detect bins
not satisfying the p-value constraint.
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
gamma : float, optional (default=0)
Regularization strength to reduce the number of dominating bins. Larger
values specify stronger regularization.
cat_cutoff : float or None, optional (default=None)
Generate bin others with categories in which the fraction of
occurrences is below the ``cat_cutoff`` value. This option is
available when ``dtype`` is "categorical".
cat_heuristic: bool (default=False):
Whether to merge categories to guarantee max_n_prebins. If True,
this option will be triggered when the number of categories >=
max_n_prebins. This option is recommended if the number of categories,
in the long run, can increase considerably, and recurrent calls to
method ``solve`` are required.
special_codes : array-like or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
mip_solver : str, optional (default="bop")
The mixed-integer programming solver. Supported solvers are "bop" to
choose the Google OR-Tools binary optimizer or "cbc" to choose the
COIN-OR Branch-and-Cut solver CBC.
time_limit : int (default=100)
The maximum time in seconds to run the optimization solver.
verbose : bool (default=False)
Enable verbose output.
Notes
-----
The parameter ``sketch`` is neglected when ``dtype=categorical``. The
sketch parameter ``K`` is only applicable when ``sketch=t-digest``.
Both quantile sketch algorithms produce good results, being the t-digest
the most accurate. Note, however, the t-digest algorithm implementation is
significantly slower than the GK implementation, thus, GK is the
recommended algorithm when handling partitions. **Besides, GK is
deterministic, therefore returning reproducible results.**
"""
def __init__(self, name="", dtype="numerical", sketch="gk", eps=1e-4, K=25,
solver="cp", divergence="iv", max_n_prebins=20,
min_n_bins=None, max_n_bins=None, min_bin_size=None,
max_bin_size=None, min_bin_n_nonevent=None,
max_bin_n_nonevent=None, min_bin_n_event=None,
max_bin_n_event=None, monotonic_trend="auto",
min_event_rate_diff=0, max_pvalue=None,
max_pvalue_policy="consecutive", gamma=0, cat_cutoff=None,
cat_unknown=None, cat_heuristic=False, special_codes=None,
split_digits=None, mip_solver="bop", time_limit=100,
verbose=False):
self.name = name
self.dtype = dtype
self.sketch = sketch
self.eps = eps
self.K = K
self.solver = solver
self.divergence = divergence
self.max_n_prebins = max_n_prebins
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.min_bin_n_event = min_bin_n_event
self.max_bin_n_event = max_bin_n_event
self.min_bin_n_nonevent = min_bin_n_nonevent
self.max_bin_n_nonevent = max_bin_n_nonevent
self.monotonic_trend = monotonic_trend
self.min_event_rate_diff = min_event_rate_diff
self.max_pvalue = max_pvalue
self.max_pvalue_policy = max_pvalue_policy
self.gamma = gamma
self.cat_cutoff = cat_cutoff
self.cat_unknown = cat_unknown
self.cat_heuristic = cat_heuristic
self.special_codes = special_codes
self.split_digits = split_digits
self.mip_solver = mip_solver
self.time_limit = time_limit
self.verbose = verbose
# auxiliary
self._flag_min_n_event_nonevent = False
self._categories = None
self._cat_others = []
self._n_event = None
self._n_nonevent = None
self._n_nonevent_missing = None
self._n_event_missing = None
self._n_nonevent_special = None
self._n_event_special = None
self._n_event_special = None
self._n_nonevent_cat_others = None
self._n_event_cat_others = None
# data storage
self._bsketch = None
# info
self._binning_table = None
self._n_refinements = 0
self._n_prebins = None
# streaming stats
self._n_add = 0
self._n_solve = 0
self._solve_stats = {}
# timming
self._time_streaming_add = 0
self._time_streaming_solve = 0
self._time_total = None
self._time_prebinning = None
self._time_solver = None
self._time_optimizer = None
self._time_postprocessing = None
self._is_solved = False
# Check parameters
_check_parameters(**self.get_params())
def add(self, x, y, check_input=False):
"""Add new data x, y to the binning sketch.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
check_input : bool (default=False)
Whether to check input arrays.
"""
if self._bsketch is None:
if self.dtype == "numerical":
self._bsketch = BSketch(self.sketch, self.eps, self.K,
self.special_codes)
else:
self._bsketch = BCatSketch(self.cat_cutoff, self.special_codes)
# Add new data stream
time_add = time.perf_counter()
self._bsketch.add(x, y, check_input)
self._n_add += 1
self._time_streaming_add += time.perf_counter() - time_add
if self.verbose:
logger.info("Sketch: added new data.")
def information(self, print_level=1):
"""Print overview information about the options settings, problem
statistics, and the solution of the computation.
Parameters
----------
print_level : int (default=1)
Level of details.
"""
self._check_is_solved()
if not isinstance(print_level, numbers.Integral) or print_level < 0:
raise ValueError("print_level must be an integer >= 0; got {}."
.format(print_level))
binning_type = self.__class__.__name__.lower()
# Optimizer
if self._optimizer is not None:
solver = self._optimizer
time_solver = self._time_solver
else:
solver = None
time_solver = 0
# Sketch memory usage
memory_usage = asizeof.asizeof(self._bsketch) * 1e-6
dict_user_options = self.get_params()
print_binning_information(binning_type, print_level, self.name,
self._status, self.solver, solver,
self._time_total, self._time_prebinning,
time_solver, self._time_optimizer,
self._time_postprocessing, self._n_prebins,
self._n_refinements, self._bsketch.n,
self._n_add, self._time_streaming_add,
self._n_solve, self._time_streaming_solve,
memory_usage, dict_user_options)
def merge(self, optbsketch):
"""Merge current instance with another OptimalBinningSketch instance.
Parameters
----------
optbsketch : object
OptimalBinningSketch instance.
"""
if not self.mergeable(optbsketch):
raise Exception("optbsketch does not share signature.")
self._bsketch.merge(optbsketch._bsketch)
if self.verbose:
logger.info("Sketch: current sketch was merged.")
def mergeable(self, optbsketch):
"""Check whether two OptimalBinningSketch instances can be merged.
Parameters
----------
optbsketch : object
OptimalBinningSketch instance.
Returns
-------
mergeable : bool
"""
return self.get_params() == optbsketch.get_params()
def plot_progress(self):
"""Plot divergence measure progress."""
self._check_is_solved()
df = pd.DataFrame.from_dict(self._solve_stats).T
plot_progress_divergence(df, self.divergence)
def solve(self):
"""Solve optimal binning using added data.
Returns
-------
self : OptimalBinningSketch
Current fitted optimal binning.
"""
time_init = time.perf_counter()
# Check if data was added
if not self._n_add:
raise NotFittedError("No data was added. Add data before solving.")
# Pre-binning
if self.verbose:
logger.info("Pre-binning started.")
time_prebinning = time.perf_counter()
splits, n_nonevent, n_event = self._prebinning_data()
self._n_prebins = len(splits) + 1
self._time_prebinning = time.perf_counter() - time_prebinning
if self.verbose:
logger.info("Pre-binning: number of prebins: {}"
.format(self._n_prebins))
logger.info("Pre-binning: number of refinements: {}"
.format(self._n_refinements))
logger.info("Pre-binning terminated. Time: {:.4f}s"
.format(self._time_prebinning))
# Optimization
self._fit_optimizer(splits, n_nonevent, n_event)
# Post-processing
if self.verbose:
logger.info("Post-processing started.")
logger.info("Post-processing: compute binning information.")
time_postprocessing = time.perf_counter()
if not len(splits):
n_nonevent = np.array([self._t_n_nonevent])
n_event = np.array([self._t_n_event])
self._n_nonevent, self._n_event = bin_info(
self._solution, n_nonevent, n_event, self._n_nonevent_missing,
self._n_event_missing, self._n_nonevent_special,
self._n_event_special, self._n_nonevent_cat_others,
self._n_event_cat_others, self._cat_others)
self._binning_table = BinningTable(
self.name, self.dtype, self.special_codes, self._splits_optimal,
self._n_nonevent, self._n_event, None, None, self._categories,
self._cat_others, None)
self._time_postprocessing = time.perf_counter() - time_postprocessing
if self.verbose:
logger.info("Post-processing terminated. Time: {:.4f}s"
.format(self._time_postprocessing))
self._time_total = time.perf_counter() - time_init
self._time_streaming_solve += self._time_total
self._n_solve += 1
if self.verbose:
logger.info("Optimal binning terminated. Status: {}. Time: {:.4f}s"
.format(self._status, self._time_total))
# Completed successfully
self._is_solved = True
self._update_streaming_stats()
return self
def transform(self, x, metric="woe", metric_special=0,
metric_missing=0, show_digits=2, check_input=False):
"""Transform given data to Weight of Evidence (WoE) or event rate using
bins from the current fitted optimal binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
Notes
-----
Transformation of data including categories not present during training
return zero WoE or event rate.
"""
self._check_is_solved()
return transform_binary_target(self._splits_optimal, self.dtype, x,
self._n_nonevent, self._n_event,
self.special_codes, self._categories,
self._cat_others, self.cat_unknown,
metric, metric_special, metric_missing,
None, show_digits, check_input)
def _prebinning_data(self):
self._n_nonevent_missing = self._bsketch._count_missing_ne
self._n_nonevent_special = self._bsketch._count_special_ne
self._n_event_missing = self._bsketch._count_missing_e
self._n_event_special = self._bsketch._count_special_e
self._t_n_nonevent = self._bsketch.n_nonevent
self._t_n_event = self._bsketch.n_event
if self.dtype == "numerical":
sketch_all = self._bsketch.merge_sketches()
if self.sketch == "gk":
percentiles = np.linspace(0, 1, self.max_n_prebins + 1)
splits = np.array([sketch_all.quantile(p)
for p in percentiles[1:-1]])
elif self.sketch == "t-digest":
percentiles = np.linspace(0, 100, self.max_n_prebins + 1)
splits = np.array([sketch_all.percentile(p)
for p in percentiles[1:-1]])
splits, n_nonevent, n_event = self._compute_prebins(splits)
else:
[splits, categories, n_nonevent, n_event, cat_others,
n_nonevent_others, n_event_others] = self._bsketch.bins()
self._categories = categories
self._cat_others = cat_others
self._n_nonevent_cat_others = n_nonevent_others
self._n_event_cat_others = n_event_others
[splits, categories, n_nonevent,
n_event] = self._compute_cat_prebins(splits, categories,
n_nonevent, n_event)
self._splits_prebinning = splits
return splits, n_nonevent, n_event
def _compute_prebins(self, splits):
self._n_refinements = 0
n_event, n_nonevent = self._bsketch.bins(splits)
mask_remove = (n_nonevent == 0) | (n_event == 0)
if np.any(mask_remove):
if self.divergence in ("hellinger", "triangular"):
self._flag_min_n_event_nonevent = True
else:
self._n_refinements += 1
mask_splits = np.concatenate(
[mask_remove[:-2], [mask_remove[-2] | mask_remove[-1]]])
splits = splits[~mask_splits]
splits, n_nonevent, n_event = self._compute_prebins(splits)
return splits, n_nonevent, n_event
def _compute_cat_prebins(self, splits, categories, n_nonevent, n_event):
self._n_refinements = 0
mask_remove = (n_nonevent == 0) | (n_event == 0)
if self.cat_heuristic and len(categories) > self.max_n_prebins:
n_records = n_nonevent + n_event
mask_size = n_records < self._bsketch.n / self.max_n_prebins
mask_remove |= mask_size
if np.any(mask_remove):
if self.divergence in ("hellinger", "triangular"):
self._flag_min_n_event_nonevent = True
if self.cat_heuristic:
mask_remove = mask_size
else:
self._n_refinements += 1
mask_splits = np.concatenate(
[mask_remove[:-2], [mask_remove[-2] | mask_remove[-1]]])
splits = splits[~mask_splits]
splits_int = np.ceil(splits).astype(int)
indices = np.digitize(np.arange(len(categories)), splits_int,
right=False)
n_bins = len(splits) + 1
new_nonevent = np.empty(n_bins, dtype=np.int64)
new_event = np.empty(n_bins, dtype=np.int64)
new_categories = []
for i in range(n_bins):
mask = (indices == i)
new_categories.append(categories[mask])
new_nonevent[i] = n_nonevent[mask].sum()
new_event[i] = n_event[mask].sum()
new_categories = np.array(new_categories, dtype=object)
[splits, categories, n_nonevent,
n_event] = self._compute_cat_prebins(
splits, new_categories, new_nonevent, new_event)
return splits, categories, n_nonevent, n_event
def _fit_optimizer(self, splits, n_nonevent, n_event):
if self.verbose:
logger.info("Optimizer started.")
time_init = time.perf_counter()
if len(n_nonevent) <= 1:
self._status = "OPTIMAL"
self._splits_optimal = splits
self._solution = np.zeros(len(splits)).astype(bool)
if self.verbose:
logger.warning("Optimizer: {} bins after pre-binning."
.format(len(n_nonevent)))
logger.warning("Optimizer: solver not run.")
logger.info("Optimizer terminated. Time: 0s")
return
# Min/max number of bins
if self.min_bin_size is not None:
min_bin_size = int(np.ceil(self.min_bin_size * self._bsketch.n))
else:
min_bin_size = self.min_bin_size
if self.max_bin_size is not None:
max_bin_size = int(np.ceil(self.max_bin_size * self._bsketch.n))
else:
max_bin_size = self.max_bin_size
# Min number of event and nonevent per bin
if (self.divergence in ("hellinger", "triangular") and
self._flag_min_n_event_nonevent):
if self.min_bin_n_nonevent is None:
min_bin_n_nonevent = 1
else:
min_bin_n_nonevent = max(self.min_bin_n_nonevent, 1)
if self.min_bin_n_event is None:
min_bin_n_event = 1
else:
min_bin_n_event = max(self.min_bin_n_event, 1)
else:
min_bin_n_nonevent = self.min_bin_n_nonevent
min_bin_n_event = self.min_bin_n_event
# Monotonic trend
trend_change = None
if self.dtype == "numerical":
auto_monotonic_modes = ("auto", "auto_heuristic", "auto_asc_desc")
if self.monotonic_trend in auto_monotonic_modes:
monotonic = auto_monotonic(n_nonevent, n_event,
self.monotonic_trend)
if self.monotonic_trend == "auto_heuristic":
if monotonic in ("peak", "valley"):
if monotonic == "peak":
monotonic = "peak_heuristic"
else:
monotonic = "valley_heuristic"
event_rate = n_event / (n_nonevent + n_event)
trend_change = peak_valley_trend_change_heuristic(
event_rate, monotonic)
if self.verbose:
logger.info("Optimizer: classifier predicts {} "
"monotonic trend.".format(monotonic))
else:
monotonic = self.monotonic_trend
if monotonic in ("peak_heuristic", "valley_heuristic"):
event_rate = n_event / (n_nonevent + n_event)
trend_change = peak_valley_trend_change_heuristic(
event_rate, monotonic)
if self.verbose:
logger.info("Optimizer: trend change position {}."
.format(trend_change))
else:
monotonic = self.monotonic_trend
if monotonic is not None:
monotonic = "ascending"
if self.verbose:
if monotonic is None:
logger.info(
"Optimizer: monotonic trend not set.")
else:
logger.info("Optimizer: monotonic trend set to {}."
.format(monotonic))
if self.solver == "cp":
optimizer = BinningCP(monotonic, self.min_n_bins, self.max_n_bins,
min_bin_size, max_bin_size,
min_bin_n_event, self.max_bin_n_event,
min_bin_n_nonevent, self.max_bin_n_nonevent,
self.min_event_rate_diff, self.max_pvalue,
self.max_pvalue_policy, self.gamma,
None, self.time_limit)
elif self.solver == "mip":
optimizer = BinningMIP(monotonic, self.min_n_bins, self.max_n_bins,
min_bin_size, max_bin_size,
min_bin_n_event, self.max_bin_n_event,
min_bin_n_nonevent, self.max_bin_n_nonevent,
self.min_event_rate_diff, self.max_pvalue,
self.max_pvalue_policy, self.gamma,
None, self.mip_solver, self.time_limit)
if self.verbose:
logger.info("Optimizer: build model...")
optimizer.build_model(self.divergence, n_nonevent, n_event,
trend_change)
if self.verbose:
logger.info("Optimizer: solve...")
status, solution = optimizer.solve()
self._solution = solution
self._optimizer, self._time_optimizer = solver_statistics(
self.solver, optimizer.solver_)
self._status = status
self._splits_optimal = splits[solution[:-1]]
self._time_solver = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimizer terminated. Time: {:.4f}s"
.format(self._time_solver))
def _update_streaming_stats(self):
self._binning_table.build()
if self.divergence == "iv":
dv = self._binning_table.iv
elif self.divergence == "js":
dv = self._binning_table.js
elif self.divergence == "hellinger":
dv = self._binning_table.hellinger
elif self.divergence == "triangular":
dv = self._binning_table.triangular
self._solve_stats[self._n_solve] = {
"n_add": self._n_add,
"n_records": self._bsketch.n,
"divergence".format(self.divergence): dv
}
@property
def binning_table(self):
"""Return an instantiated binning table. Please refer to
:ref:`Binning table: binary target`.
Returns
-------
binning_table : BinningTable.
"""
self._check_is_solved()
return self._binning_table
@property
def splits(self):
"""List of optimal split points when ``dtype`` is set to "numerical" or
list of optimal bins when ``dtype`` is set to "categorical".
Returns
-------
splits : numpy.ndarray
"""
self._check_is_solved()
if self.dtype == "numerical":
return self._splits_optimal
else:
return bin_categorical(self._splits_optimal, self._categories,
self._cat_others, None)
@property
def status(self):
"""The status of the underlying optimization solver.
Returns
-------
status : str
"""
self._check_is_solved()
return self._status
| (name='', dtype='numerical', sketch='gk', eps=0.0001, K=25, solver='cp', divergence='iv', max_n_prebins=20, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, min_bin_n_nonevent=None, max_bin_n_nonevent=None, min_bin_n_event=None, max_bin_n_event=None, monotonic_trend='auto', min_event_rate_diff=0, max_pvalue=None, max_pvalue_policy='consecutive', gamma=0, cat_cutoff=None, cat_unknown=None, cat_heuristic=False, special_codes=None, split_digits=None, mip_solver='bop', time_limit=100, verbose=False) |
4,811 | optbinning.binning.distributed.binning_sketch | __init__ | null | def __init__(self, name="", dtype="numerical", sketch="gk", eps=1e-4, K=25,
solver="cp", divergence="iv", max_n_prebins=20,
min_n_bins=None, max_n_bins=None, min_bin_size=None,
max_bin_size=None, min_bin_n_nonevent=None,
max_bin_n_nonevent=None, min_bin_n_event=None,
max_bin_n_event=None, monotonic_trend="auto",
min_event_rate_diff=0, max_pvalue=None,
max_pvalue_policy="consecutive", gamma=0, cat_cutoff=None,
cat_unknown=None, cat_heuristic=False, special_codes=None,
split_digits=None, mip_solver="bop", time_limit=100,
verbose=False):
self.name = name
self.dtype = dtype
self.sketch = sketch
self.eps = eps
self.K = K
self.solver = solver
self.divergence = divergence
self.max_n_prebins = max_n_prebins
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.min_bin_n_event = min_bin_n_event
self.max_bin_n_event = max_bin_n_event
self.min_bin_n_nonevent = min_bin_n_nonevent
self.max_bin_n_nonevent = max_bin_n_nonevent
self.monotonic_trend = monotonic_trend
self.min_event_rate_diff = min_event_rate_diff
self.max_pvalue = max_pvalue
self.max_pvalue_policy = max_pvalue_policy
self.gamma = gamma
self.cat_cutoff = cat_cutoff
self.cat_unknown = cat_unknown
self.cat_heuristic = cat_heuristic
self.special_codes = special_codes
self.split_digits = split_digits
self.mip_solver = mip_solver
self.time_limit = time_limit
self.verbose = verbose
# auxiliary
self._flag_min_n_event_nonevent = False
self._categories = None
self._cat_others = []
self._n_event = None
self._n_nonevent = None
self._n_nonevent_missing = None
self._n_event_missing = None
self._n_nonevent_special = None
self._n_event_special = None
self._n_event_special = None
self._n_nonevent_cat_others = None
self._n_event_cat_others = None
# data storage
self._bsketch = None
# info
self._binning_table = None
self._n_refinements = 0
self._n_prebins = None
# streaming stats
self._n_add = 0
self._n_solve = 0
self._solve_stats = {}
# timming
self._time_streaming_add = 0
self._time_streaming_solve = 0
self._time_total = None
self._time_prebinning = None
self._time_solver = None
self._time_optimizer = None
self._time_postprocessing = None
self._is_solved = False
# Check parameters
_check_parameters(**self.get_params())
| (self, name='', dtype='numerical', sketch='gk', eps=0.0001, K=25, solver='cp', divergence='iv', max_n_prebins=20, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, min_bin_n_nonevent=None, max_bin_n_nonevent=None, min_bin_n_event=None, max_bin_n_event=None, monotonic_trend='auto', min_event_rate_diff=0, max_pvalue=None, max_pvalue_policy='consecutive', gamma=0, cat_cutoff=None, cat_unknown=None, cat_heuristic=False, special_codes=None, split_digits=None, mip_solver='bop', time_limit=100, verbose=False) |
4,818 | optbinning.binning.distributed.binning_sketch | _compute_cat_prebins | null | def _compute_cat_prebins(self, splits, categories, n_nonevent, n_event):
self._n_refinements = 0
mask_remove = (n_nonevent == 0) | (n_event == 0)
if self.cat_heuristic and len(categories) > self.max_n_prebins:
n_records = n_nonevent + n_event
mask_size = n_records < self._bsketch.n / self.max_n_prebins
mask_remove |= mask_size
if np.any(mask_remove):
if self.divergence in ("hellinger", "triangular"):
self._flag_min_n_event_nonevent = True
if self.cat_heuristic:
mask_remove = mask_size
else:
self._n_refinements += 1
mask_splits = np.concatenate(
[mask_remove[:-2], [mask_remove[-2] | mask_remove[-1]]])
splits = splits[~mask_splits]
splits_int = np.ceil(splits).astype(int)
indices = np.digitize(np.arange(len(categories)), splits_int,
right=False)
n_bins = len(splits) + 1
new_nonevent = np.empty(n_bins, dtype=np.int64)
new_event = np.empty(n_bins, dtype=np.int64)
new_categories = []
for i in range(n_bins):
mask = (indices == i)
new_categories.append(categories[mask])
new_nonevent[i] = n_nonevent[mask].sum()
new_event[i] = n_event[mask].sum()
new_categories = np.array(new_categories, dtype=object)
[splits, categories, n_nonevent,
n_event] = self._compute_cat_prebins(
splits, new_categories, new_nonevent, new_event)
return splits, categories, n_nonevent, n_event
| (self, splits, categories, n_nonevent, n_event) |
4,819 | optbinning.binning.distributed.binning_sketch | _compute_prebins | null | def _compute_prebins(self, splits):
self._n_refinements = 0
n_event, n_nonevent = self._bsketch.bins(splits)
mask_remove = (n_nonevent == 0) | (n_event == 0)
if np.any(mask_remove):
if self.divergence in ("hellinger", "triangular"):
self._flag_min_n_event_nonevent = True
else:
self._n_refinements += 1
mask_splits = np.concatenate(
[mask_remove[:-2], [mask_remove[-2] | mask_remove[-1]]])
splits = splits[~mask_splits]
splits, n_nonevent, n_event = self._compute_prebins(splits)
return splits, n_nonevent, n_event
| (self, splits) |
4,820 | optbinning.binning.distributed.binning_sketch | _fit_optimizer | null | def _fit_optimizer(self, splits, n_nonevent, n_event):
if self.verbose:
logger.info("Optimizer started.")
time_init = time.perf_counter()
if len(n_nonevent) <= 1:
self._status = "OPTIMAL"
self._splits_optimal = splits
self._solution = np.zeros(len(splits)).astype(bool)
if self.verbose:
logger.warning("Optimizer: {} bins after pre-binning."
.format(len(n_nonevent)))
logger.warning("Optimizer: solver not run.")
logger.info("Optimizer terminated. Time: 0s")
return
# Min/max number of bins
if self.min_bin_size is not None:
min_bin_size = int(np.ceil(self.min_bin_size * self._bsketch.n))
else:
min_bin_size = self.min_bin_size
if self.max_bin_size is not None:
max_bin_size = int(np.ceil(self.max_bin_size * self._bsketch.n))
else:
max_bin_size = self.max_bin_size
# Min number of event and nonevent per bin
if (self.divergence in ("hellinger", "triangular") and
self._flag_min_n_event_nonevent):
if self.min_bin_n_nonevent is None:
min_bin_n_nonevent = 1
else:
min_bin_n_nonevent = max(self.min_bin_n_nonevent, 1)
if self.min_bin_n_event is None:
min_bin_n_event = 1
else:
min_bin_n_event = max(self.min_bin_n_event, 1)
else:
min_bin_n_nonevent = self.min_bin_n_nonevent
min_bin_n_event = self.min_bin_n_event
# Monotonic trend
trend_change = None
if self.dtype == "numerical":
auto_monotonic_modes = ("auto", "auto_heuristic", "auto_asc_desc")
if self.monotonic_trend in auto_monotonic_modes:
monotonic = auto_monotonic(n_nonevent, n_event,
self.monotonic_trend)
if self.monotonic_trend == "auto_heuristic":
if monotonic in ("peak", "valley"):
if monotonic == "peak":
monotonic = "peak_heuristic"
else:
monotonic = "valley_heuristic"
event_rate = n_event / (n_nonevent + n_event)
trend_change = peak_valley_trend_change_heuristic(
event_rate, monotonic)
if self.verbose:
logger.info("Optimizer: classifier predicts {} "
"monotonic trend.".format(monotonic))
else:
monotonic = self.monotonic_trend
if monotonic in ("peak_heuristic", "valley_heuristic"):
event_rate = n_event / (n_nonevent + n_event)
trend_change = peak_valley_trend_change_heuristic(
event_rate, monotonic)
if self.verbose:
logger.info("Optimizer: trend change position {}."
.format(trend_change))
else:
monotonic = self.monotonic_trend
if monotonic is not None:
monotonic = "ascending"
if self.verbose:
if monotonic is None:
logger.info(
"Optimizer: monotonic trend not set.")
else:
logger.info("Optimizer: monotonic trend set to {}."
.format(monotonic))
if self.solver == "cp":
optimizer = BinningCP(monotonic, self.min_n_bins, self.max_n_bins,
min_bin_size, max_bin_size,
min_bin_n_event, self.max_bin_n_event,
min_bin_n_nonevent, self.max_bin_n_nonevent,
self.min_event_rate_diff, self.max_pvalue,
self.max_pvalue_policy, self.gamma,
None, self.time_limit)
elif self.solver == "mip":
optimizer = BinningMIP(monotonic, self.min_n_bins, self.max_n_bins,
min_bin_size, max_bin_size,
min_bin_n_event, self.max_bin_n_event,
min_bin_n_nonevent, self.max_bin_n_nonevent,
self.min_event_rate_diff, self.max_pvalue,
self.max_pvalue_policy, self.gamma,
None, self.mip_solver, self.time_limit)
if self.verbose:
logger.info("Optimizer: build model...")
optimizer.build_model(self.divergence, n_nonevent, n_event,
trend_change)
if self.verbose:
logger.info("Optimizer: solve...")
status, solution = optimizer.solve()
self._solution = solution
self._optimizer, self._time_optimizer = solver_statistics(
self.solver, optimizer.solver_)
self._status = status
self._splits_optimal = splits[solution[:-1]]
self._time_solver = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimizer terminated. Time: {:.4f}s"
.format(self._time_solver))
| (self, splits, n_nonevent, n_event) |
4,825 | optbinning.binning.distributed.binning_sketch | _prebinning_data | null | def _prebinning_data(self):
self._n_nonevent_missing = self._bsketch._count_missing_ne
self._n_nonevent_special = self._bsketch._count_special_ne
self._n_event_missing = self._bsketch._count_missing_e
self._n_event_special = self._bsketch._count_special_e
self._t_n_nonevent = self._bsketch.n_nonevent
self._t_n_event = self._bsketch.n_event
if self.dtype == "numerical":
sketch_all = self._bsketch.merge_sketches()
if self.sketch == "gk":
percentiles = np.linspace(0, 1, self.max_n_prebins + 1)
splits = np.array([sketch_all.quantile(p)
for p in percentiles[1:-1]])
elif self.sketch == "t-digest":
percentiles = np.linspace(0, 100, self.max_n_prebins + 1)
splits = np.array([sketch_all.percentile(p)
for p in percentiles[1:-1]])
splits, n_nonevent, n_event = self._compute_prebins(splits)
else:
[splits, categories, n_nonevent, n_event, cat_others,
n_nonevent_others, n_event_others] = self._bsketch.bins()
self._categories = categories
self._cat_others = cat_others
self._n_nonevent_cat_others = n_nonevent_others
self._n_event_cat_others = n_event_others
[splits, categories, n_nonevent,
n_event] = self._compute_cat_prebins(splits, categories,
n_nonevent, n_event)
self._splits_prebinning = splits
return splits, n_nonevent, n_event
| (self) |
4,828 | optbinning.binning.distributed.binning_sketch | _update_streaming_stats | null | def _update_streaming_stats(self):
self._binning_table.build()
if self.divergence == "iv":
dv = self._binning_table.iv
elif self.divergence == "js":
dv = self._binning_table.js
elif self.divergence == "hellinger":
dv = self._binning_table.hellinger
elif self.divergence == "triangular":
dv = self._binning_table.triangular
self._solve_stats[self._n_solve] = {
"n_add": self._n_add,
"n_records": self._bsketch.n,
"divergence".format(self.divergence): dv
}
| (self) |
4,831 | optbinning.binning.distributed.binning_sketch | add | Add new data x, y to the binning sketch.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
check_input : bool (default=False)
Whether to check input arrays.
| def add(self, x, y, check_input=False):
"""Add new data x, y to the binning sketch.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
check_input : bool (default=False)
Whether to check input arrays.
"""
if self._bsketch is None:
if self.dtype == "numerical":
self._bsketch = BSketch(self.sketch, self.eps, self.K,
self.special_codes)
else:
self._bsketch = BCatSketch(self.cat_cutoff, self.special_codes)
# Add new data stream
time_add = time.perf_counter()
self._bsketch.add(x, y, check_input)
self._n_add += 1
self._time_streaming_add += time.perf_counter() - time_add
if self.verbose:
logger.info("Sketch: added new data.")
| (self, x, y, check_input=False) |
4,834 | optbinning.binning.distributed.binning_sketch | information | Print overview information about the options settings, problem
statistics, and the solution of the computation.
Parameters
----------
print_level : int (default=1)
Level of details.
| def information(self, print_level=1):
"""Print overview information about the options settings, problem
statistics, and the solution of the computation.
Parameters
----------
print_level : int (default=1)
Level of details.
"""
self._check_is_solved()
if not isinstance(print_level, numbers.Integral) or print_level < 0:
raise ValueError("print_level must be an integer >= 0; got {}."
.format(print_level))
binning_type = self.__class__.__name__.lower()
# Optimizer
if self._optimizer is not None:
solver = self._optimizer
time_solver = self._time_solver
else:
solver = None
time_solver = 0
# Sketch memory usage
memory_usage = asizeof.asizeof(self._bsketch) * 1e-6
dict_user_options = self.get_params()
print_binning_information(binning_type, print_level, self.name,
self._status, self.solver, solver,
self._time_total, self._time_prebinning,
time_solver, self._time_optimizer,
self._time_postprocessing, self._n_prebins,
self._n_refinements, self._bsketch.n,
self._n_add, self._time_streaming_add,
self._n_solve, self._time_streaming_solve,
memory_usage, dict_user_options)
| (self, print_level=1) |
4,835 | optbinning.binning.distributed.binning_sketch | merge | Merge current instance with another OptimalBinningSketch instance.
Parameters
----------
optbsketch : object
OptimalBinningSketch instance.
| def merge(self, optbsketch):
"""Merge current instance with another OptimalBinningSketch instance.
Parameters
----------
optbsketch : object
OptimalBinningSketch instance.
"""
if not self.mergeable(optbsketch):
raise Exception("optbsketch does not share signature.")
self._bsketch.merge(optbsketch._bsketch)
if self.verbose:
logger.info("Sketch: current sketch was merged.")
| (self, optbsketch) |
4,836 | optbinning.binning.distributed.binning_sketch | mergeable | Check whether two OptimalBinningSketch instances can be merged.
Parameters
----------
optbsketch : object
OptimalBinningSketch instance.
Returns
-------
mergeable : bool
| def mergeable(self, optbsketch):
"""Check whether two OptimalBinningSketch instances can be merged.
Parameters
----------
optbsketch : object
OptimalBinningSketch instance.
Returns
-------
mergeable : bool
"""
return self.get_params() == optbsketch.get_params()
| (self, optbsketch) |
4,837 | optbinning.binning.distributed.binning_sketch | plot_progress | Plot divergence measure progress. | def plot_progress(self):
"""Plot divergence measure progress."""
self._check_is_solved()
df = pd.DataFrame.from_dict(self._solve_stats).T
plot_progress_divergence(df, self.divergence)
| (self) |
4,840 | optbinning.binning.distributed.binning_sketch | solve | Solve optimal binning using added data.
Returns
-------
self : OptimalBinningSketch
Current fitted optimal binning.
| def solve(self):
"""Solve optimal binning using added data.
Returns
-------
self : OptimalBinningSketch
Current fitted optimal binning.
"""
time_init = time.perf_counter()
# Check if data was added
if not self._n_add:
raise NotFittedError("No data was added. Add data before solving.")
# Pre-binning
if self.verbose:
logger.info("Pre-binning started.")
time_prebinning = time.perf_counter()
splits, n_nonevent, n_event = self._prebinning_data()
self._n_prebins = len(splits) + 1
self._time_prebinning = time.perf_counter() - time_prebinning
if self.verbose:
logger.info("Pre-binning: number of prebins: {}"
.format(self._n_prebins))
logger.info("Pre-binning: number of refinements: {}"
.format(self._n_refinements))
logger.info("Pre-binning terminated. Time: {:.4f}s"
.format(self._time_prebinning))
# Optimization
self._fit_optimizer(splits, n_nonevent, n_event)
# Post-processing
if self.verbose:
logger.info("Post-processing started.")
logger.info("Post-processing: compute binning information.")
time_postprocessing = time.perf_counter()
if not len(splits):
n_nonevent = np.array([self._t_n_nonevent])
n_event = np.array([self._t_n_event])
self._n_nonevent, self._n_event = bin_info(
self._solution, n_nonevent, n_event, self._n_nonevent_missing,
self._n_event_missing, self._n_nonevent_special,
self._n_event_special, self._n_nonevent_cat_others,
self._n_event_cat_others, self._cat_others)
self._binning_table = BinningTable(
self.name, self.dtype, self.special_codes, self._splits_optimal,
self._n_nonevent, self._n_event, None, None, self._categories,
self._cat_others, None)
self._time_postprocessing = time.perf_counter() - time_postprocessing
if self.verbose:
logger.info("Post-processing terminated. Time: {:.4f}s"
.format(self._time_postprocessing))
self._time_total = time.perf_counter() - time_init
self._time_streaming_solve += self._time_total
self._n_solve += 1
if self.verbose:
logger.info("Optimal binning terminated. Status: {}. Time: {:.4f}s"
.format(self._status, self._time_total))
# Completed successfully
self._is_solved = True
self._update_streaming_stats()
return self
| (self) |
4,841 | optbinning.binning.distributed.binning_sketch | transform | Transform given data to Weight of Evidence (WoE) or event rate using
bins from the current fitted optimal binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
Notes
-----
Transformation of data including categories not present during training
return zero WoE or event rate.
| def transform(self, x, metric="woe", metric_special=0,
metric_missing=0, show_digits=2, check_input=False):
"""Transform given data to Weight of Evidence (WoE) or event rate using
bins from the current fitted optimal binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
Notes
-----
Transformation of data including categories not present during training
return zero WoE or event rate.
"""
self._check_is_solved()
return transform_binary_target(self._splits_optimal, self.dtype, x,
self._n_nonevent, self._n_event,
self.special_codes, self._categories,
self._cat_others, self.cat_unknown,
metric, metric_special, metric_missing,
None, show_digits, check_input)
| (self, x, metric='woe', metric_special=0, metric_missing=0, show_digits=2, check_input=False) |
4,842 | optbinning.binning.piecewise.binning | OptimalPWBinning | Optimal Piecewise binning of a numerical variable with respect to a
binary target.
Parameters
----------
name : str, optional (default="")
The variable name.
estimator : object or None (default=None)
An esimator to compute probability estimates. If None, it uses
`sklearn.linear_model.LogisticRegression
<https://scikit-learn.org/stable/modules/generated/
sklearn.linear_model.LogisticRegression.html>`_. The estimator must be
an object with method `predict_proba`.
objective : str, optional (default="l2")
The objective function. Supported objectives are "l2", "l1", "huber"
and "quantile". Note that "l1", "huber" and "quantile" are robust
objective functions.
degree : int (default=1)
The degree of the polynomials.
* degree = 0: piecewise constant functions.
* degree = 1: piecewise linear functions.
* degree > 1: piecewise polynomial functions.
continuous : bool (default=True)
Whether to fit a continuous or discontinuous piecewise regression.
continuous_deriv : bool (default=True)
Whether to fit a polynomial with continuous derivatives. This option
fits a smooth degree d-polynomial with d-1 continuity in derivatives
(splines).
prebinning_method : str, optional (default="cart")
The pre-binning method. Supported methods are "cart" for a CART
decision tree, "quantile" to generate prebins with approximately same
frequency and "uniform" to generate prebins with equal width. Method
"cart" uses `sklearn.tree.DecistionTreeClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeClassifier.html>`_.
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_prebin_size : float (default=0.05)
The fraction of mininum number of records for each prebin.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
monotonic_trend : str or None, optional (default="auto")
The **event rate** monotonic trend. Supported trends are “auto”,
"auto_heuristic" and "auto_asc_desc" to automatically determine the
trend maximizing IV using a machine learning classifier, "ascending",
"descending", "concave", "convex", "peak" and "peak_heuristic" to allow
a peak change point, and "valley" and "valley_heuristic" to allow a
valley change point. Trends "auto_heuristic", "peak_heuristic" and
"valley_heuristic" use a heuristic to determine the change point,
and are significantly faster for large size instances (``max_n_prebins
> 20``). Trend "auto_asc_desc" is used to automatically select the best
monotonic trend between "ascending" and "descending". If None, then the
monotonic constraint is disabled.
n_subsamples : int or None (default=None)
Number of subsamples to fit the piecewise regression algorithm. If
None, all values are considered.
max_pvalue : float or None, optional (default=None)
The maximum p-value among bins. The Z-test is used to detect bins
not satisfying the p-value constraint. Option supported by solvers
"cp" and "mip".
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
outlier_detector : str or None, optional (default=None)
The outlier detection method. Supported methods are "range" to use
the interquartile range based method, "zcore" to use the modified
Z-score method or "yquantile" to use the y-axis detector over
quantiles.
outlier_params : dict or None, optional (default=None)
Dictionary of parameters to pass to the outlier detection method.
user_splits : array-like or None, optional (default=None)
The list of pre-binning split points when ``dtype`` is "numerical" or
the list of prebins when ``dtype`` is "categorical".
user_splits_fixed : array-like or None (default=None)
The list of pre-binning split points that must be fixed.
special_codes : array-like, dict or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
solver : str, optional (default="auto")
The optimizer to solve the underlying mathematical optimization
problem. Supported solvers are `"ecos"
<https://github.com/embotech/ecos>`_, `"osqp"
<https://github.com/oxfordcontrol/osqp>`_, "direct", to choose the
direct solver, and "auto", to choose the most appropriate solver for
the problem. Version 0.16.1 added support to solvers
`"scs" <https://github.com/cvxgrp/scs>`_ and `"highs"
<https://github.com/ERGO-Code/HiGHS>`_.
h_epsilon: float (default=1.35)
The parameter h_epsilon used when ``objective="huber"``, controls the
number of samples that should be classified as outliers.
quantile : float (default=0.5)
The parameter quantile is the q-th quantile to be used when
``objective="quantile"``.
regularization: str or None (default=None)
Type of regularization. Supported regularization are "l1" (Lasso) and
"l2" (Ridge). If None, no regularization is applied.
reg_l1 : float (default=1.0)
L1 regularization term. Increasing this value will smooth the
regression model. Only applicable if ``regularization="l1"``.
reg_l2 : float (default=1.0)
L2 regularization term. Increasing this value will smooth the
regression model. Only applicable if ``regularization="l2"``.
random_state : int, RandomState instance or None, (default=None)
If ``n_subsamples < n_samples``, controls the shuffling applied to the
data before applying the split.
verbose : bool (default=False)
Enable verbose output.
| class OptimalPWBinning(BasePWBinning):
"""Optimal Piecewise binning of a numerical variable with respect to a
binary target.
Parameters
----------
name : str, optional (default="")
The variable name.
estimator : object or None (default=None)
An esimator to compute probability estimates. If None, it uses
`sklearn.linear_model.LogisticRegression
<https://scikit-learn.org/stable/modules/generated/
sklearn.linear_model.LogisticRegression.html>`_. The estimator must be
an object with method `predict_proba`.
objective : str, optional (default="l2")
The objective function. Supported objectives are "l2", "l1", "huber"
and "quantile". Note that "l1", "huber" and "quantile" are robust
objective functions.
degree : int (default=1)
The degree of the polynomials.
* degree = 0: piecewise constant functions.
* degree = 1: piecewise linear functions.
* degree > 1: piecewise polynomial functions.
continuous : bool (default=True)
Whether to fit a continuous or discontinuous piecewise regression.
continuous_deriv : bool (default=True)
Whether to fit a polynomial with continuous derivatives. This option
fits a smooth degree d-polynomial with d-1 continuity in derivatives
(splines).
prebinning_method : str, optional (default="cart")
The pre-binning method. Supported methods are "cart" for a CART
decision tree, "quantile" to generate prebins with approximately same
frequency and "uniform" to generate prebins with equal width. Method
"cart" uses `sklearn.tree.DecistionTreeClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeClassifier.html>`_.
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_prebin_size : float (default=0.05)
The fraction of mininum number of records for each prebin.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
monotonic_trend : str or None, optional (default="auto")
The **event rate** monotonic trend. Supported trends are “auto”,
"auto_heuristic" and "auto_asc_desc" to automatically determine the
trend maximizing IV using a machine learning classifier, "ascending",
"descending", "concave", "convex", "peak" and "peak_heuristic" to allow
a peak change point, and "valley" and "valley_heuristic" to allow a
valley change point. Trends "auto_heuristic", "peak_heuristic" and
"valley_heuristic" use a heuristic to determine the change point,
and are significantly faster for large size instances (``max_n_prebins
> 20``). Trend "auto_asc_desc" is used to automatically select the best
monotonic trend between "ascending" and "descending". If None, then the
monotonic constraint is disabled.
n_subsamples : int or None (default=None)
Number of subsamples to fit the piecewise regression algorithm. If
None, all values are considered.
max_pvalue : float or None, optional (default=None)
The maximum p-value among bins. The Z-test is used to detect bins
not satisfying the p-value constraint. Option supported by solvers
"cp" and "mip".
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
outlier_detector : str or None, optional (default=None)
The outlier detection method. Supported methods are "range" to use
the interquartile range based method, "zcore" to use the modified
Z-score method or "yquantile" to use the y-axis detector over
quantiles.
outlier_params : dict or None, optional (default=None)
Dictionary of parameters to pass to the outlier detection method.
user_splits : array-like or None, optional (default=None)
The list of pre-binning split points when ``dtype`` is "numerical" or
the list of prebins when ``dtype`` is "categorical".
user_splits_fixed : array-like or None (default=None)
The list of pre-binning split points that must be fixed.
special_codes : array-like, dict or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
solver : str, optional (default="auto")
The optimizer to solve the underlying mathematical optimization
problem. Supported solvers are `"ecos"
<https://github.com/embotech/ecos>`_, `"osqp"
<https://github.com/oxfordcontrol/osqp>`_, "direct", to choose the
direct solver, and "auto", to choose the most appropriate solver for
the problem. Version 0.16.1 added support to solvers
`"scs" <https://github.com/cvxgrp/scs>`_ and `"highs"
<https://github.com/ERGO-Code/HiGHS>`_.
h_epsilon: float (default=1.35)
The parameter h_epsilon used when ``objective="huber"``, controls the
number of samples that should be classified as outliers.
quantile : float (default=0.5)
The parameter quantile is the q-th quantile to be used when
``objective="quantile"``.
regularization: str or None (default=None)
Type of regularization. Supported regularization are "l1" (Lasso) and
"l2" (Ridge). If None, no regularization is applied.
reg_l1 : float (default=1.0)
L1 regularization term. Increasing this value will smooth the
regression model. Only applicable if ``regularization="l1"``.
reg_l2 : float (default=1.0)
L2 regularization term. Increasing this value will smooth the
regression model. Only applicable if ``regularization="l2"``.
random_state : int, RandomState instance or None, (default=None)
If ``n_subsamples < n_samples``, controls the shuffling applied to the
data before applying the split.
verbose : bool (default=False)
Enable verbose output.
"""
def __init__(self, name="", estimator=None, objective="l2", degree=1,
continuous=True, continuous_deriv=True,
prebinning_method="cart", max_n_prebins=20,
min_prebin_size=0.05, min_n_bins=None, max_n_bins=None,
min_bin_size=None, max_bin_size=None, monotonic_trend="auto",
n_subsamples=None, max_pvalue=None,
max_pvalue_policy="consecutive", outlier_detector=None,
outlier_params=None, user_splits=None, user_splits_fixed=None,
special_codes=None, split_digits=None, solver="auto",
h_epsilon=1.35, quantile=0.5, regularization=None, reg_l1=1.0,
reg_l2=1.0, random_state=None, verbose=False):
super().__init__(name, estimator, objective, degree, continuous,
continuous_deriv, prebinning_method, max_n_prebins,
min_prebin_size, min_n_bins, max_n_bins, min_bin_size,
max_bin_size, monotonic_trend, n_subsamples,
max_pvalue, max_pvalue_policy, outlier_detector,
outlier_params, user_splits, user_splits_fixed,
special_codes, split_digits, solver, h_epsilon,
quantile, regularization, reg_l1, reg_l2,
random_state, verbose)
self._problem_type = "classification"
self._n_nonevent_special = None
self._n_nonevent_missing = None
self._n_event_special = None
self._n_event_missing = None
self._t_n_nonevent = None
self._t_n_event = None
def fit_transform(self, x, y, metric="woe", metric_special=0,
metric_missing=0, lb=None, ub=None, check_input=False):
"""Fit the optimal piecewise binning according to the given training
data, then transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence and "event_rate" to
choose the event rate.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
lb : float or None (default=None)
Avoid values below the lower bound lb.
ub : float or None (default=None)
Avoid values above the upper bound ub.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
"""
return self.fit(x, y, lb, ub, check_input).transform(
x, metric, metric_special, metric_missing, lb, ub, check_input)
def transform(self, x, metric="woe", metric_special=0, metric_missing=0,
lb=None, ub=None, check_input=False):
"""Transform given data to Weight of Evidence (WoE) or event rate using
bins from the fitted optimal piecewise binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence and "event_rate" to
choose the event rate.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
lb : float or None (default=None)
Avoid values below the lower bound lb.
ub : float or None (default=None)
Avoid values above the upper bound ub.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
"""
self._check_is_fitted()
return transform_binary_target(
self._optb.splits, x, self._c, lb, ub, self._t_n_nonevent,
self._t_n_event, self._n_nonevent_special, self._n_event_special,
self._n_nonevent_missing, self._n_event_missing,
self.special_codes, metric, metric_special, metric_missing,
check_input)
def _fit(self, x, y, lb, ub, check_input):
time_init = time.perf_counter()
if self.verbose:
logger.info("Optimal piecewise binning started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params(deep=False),
problem_type=self._problem_type)
# Pre-processing
if self.verbose:
logger.info("Pre-processing started.")
self._n_samples = len(x)
if self.verbose:
logger.info("Pre-processing: number of samples: {}"
.format(self._n_samples))
time_preprocessing = time.perf_counter()
[x_clean, y_clean, x_missing, y_missing, x_special, y_special,
_, _, _, _, _, sw_special, _] = self._fit_preprocessing(
x, y, check_input)
self._time_preprocessing = time.perf_counter() - time_preprocessing
if self.verbose:
n_clean = len(x_clean)
n_missing = len(x_missing)
n_special = len(x_special)
logger.info("Pre-processing: number of clean samples: {}"
.format(n_clean))
logger.info("Pre-processing: number of missing samples: {}"
.format(n_missing))
logger.info("Pre-processing: number of special samples: {}"
.format(n_special))
if self.outlier_detector is not None:
n_outlier = self._n_samples-(n_clean + n_missing + n_special)
logger.info("Pre-processing: number of outlier samples: {}"
.format(n_outlier))
logger.info("Pre-processing terminated. Time: {:.4f}s"
.format(self._time_preprocessing))
# Pre-binning
# Fit estimator and compute event_rate = P[Y=1, X=x]
time_estimator = time.perf_counter()
if self.estimator is None:
self.estimator = LogisticRegression()
if self.verbose:
logger.info("Pre-binning: set logistic regression as an "
"estimator.")
if self.verbose:
logger.info("Pre-binning: estimator fitting started.")
self.estimator.fit(x_clean.reshape(-1, 1), y_clean)
event_rate = self.estimator.predict_proba(x_clean.reshape(-1, 1))[:, 1]
self._time_estimator = time.perf_counter() - time_estimator
if self.verbose:
logger.info("Pre-binning: estimator terminated. Time {:.4f}s."
.format(self._time_estimator))
# Fit optimal binning algorithm for continuous target. Use optimal
# split points to compute optimal piecewise functions
self._fit_binning(x_clean, y_clean, event_rate, lb, ub)
# Post-processing
if self.verbose:
logger.info("Post-processing started.")
logger.info("Post-processing: compute binning information.")
time_postprocessing = time.perf_counter()
# Compute n_nonevent and n_event for special and missing
self._n_nonevent_special, self._n_event_special = target_info_special(
self.special_codes, x_special, y_special, sw_special)
missing_target_info = target_info(y_missing)
self._n_nonevent_missing = missing_target_info[0]
self._n_event_missing = missing_target_info[1]
bt = self._optb.binning_table.build(add_totals=False)
n_nonevent = bt["Non-event"].values[:-2]
n_event = bt["Event"].values[:-2]
n_nonevent = np.r_[n_nonevent, self._n_nonevent_special]
n_event = np.r_[n_event, self._n_event_special]
n_nonevent = np.r_[n_nonevent, self._n_nonevent_missing]
n_event = np.r_[n_event, self._n_event_missing]
self._t_n_nonevent = n_nonevent.sum()
self._t_n_event = n_event.sum()
# Compute metrics
if self.verbose:
logger.info("Post-processing: compute performance metrics.")
d_metrics = binary_metrics(
x_clean, y_clean, self._optb.splits, self._c, self._t_n_nonevent,
self._t_n_event, self._n_nonevent_special, self._n_event_special,
self._n_nonevent_missing, self._n_event_missing,
self.special_codes)
# Binning table
self._binning_table = PWBinningTable(
self.name, self.special_codes, self._optb.splits, self._c,
n_nonevent, n_event, x_clean.min(), x_clean.max(), d_metrics)
self._time_postprocessing = time.perf_counter() - time_postprocessing
if self.verbose:
logger.info("Post-processing terminated. Time: {:.4f}s"
.format(self._time_postprocessing))
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimal piecewise binning terminated. Status: {}. "
"Time: {:.4f}s".format(self._status, self._time_total))
# Completed successfully
self._is_fitted = True
return self
| (name='', estimator=None, objective='l2', degree=1, continuous=True, continuous_deriv=True, prebinning_method='cart', max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, monotonic_trend='auto', n_subsamples=None, max_pvalue=None, max_pvalue_policy='consecutive', outlier_detector=None, outlier_params=None, user_splits=None, user_splits_fixed=None, special_codes=None, split_digits=None, solver='auto', h_epsilon=1.35, quantile=0.5, regularization=None, reg_l1=1.0, reg_l2=1.0, random_state=None, verbose=False) |
4,844 | optbinning.binning.piecewise.binning | __init__ | null | def __init__(self, name="", estimator=None, objective="l2", degree=1,
continuous=True, continuous_deriv=True,
prebinning_method="cart", max_n_prebins=20,
min_prebin_size=0.05, min_n_bins=None, max_n_bins=None,
min_bin_size=None, max_bin_size=None, monotonic_trend="auto",
n_subsamples=None, max_pvalue=None,
max_pvalue_policy="consecutive", outlier_detector=None,
outlier_params=None, user_splits=None, user_splits_fixed=None,
special_codes=None, split_digits=None, solver="auto",
h_epsilon=1.35, quantile=0.5, regularization=None, reg_l1=1.0,
reg_l2=1.0, random_state=None, verbose=False):
super().__init__(name, estimator, objective, degree, continuous,
continuous_deriv, prebinning_method, max_n_prebins,
min_prebin_size, min_n_bins, max_n_bins, min_bin_size,
max_bin_size, monotonic_trend, n_subsamples,
max_pvalue, max_pvalue_policy, outlier_detector,
outlier_params, user_splits, user_splits_fixed,
special_codes, split_digits, solver, h_epsilon,
quantile, regularization, reg_l1, reg_l2,
random_state, verbose)
self._problem_type = "classification"
self._n_nonevent_special = None
self._n_nonevent_missing = None
self._n_event_special = None
self._n_event_missing = None
self._t_n_nonevent = None
self._t_n_event = None
| (self, name='', estimator=None, objective='l2', degree=1, continuous=True, continuous_deriv=True, prebinning_method='cart', max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, monotonic_trend='auto', n_subsamples=None, max_pvalue=None, max_pvalue_policy='consecutive', outlier_detector=None, outlier_params=None, user_splits=None, user_splits_fixed=None, special_codes=None, split_digits=None, solver='auto', h_epsilon=1.35, quantile=0.5, regularization=None, reg_l1=1.0, reg_l2=1.0, random_state=None, verbose=False) |
4,851 | optbinning.binning.piecewise.binning | _fit | null | def _fit(self, x, y, lb, ub, check_input):
time_init = time.perf_counter()
if self.verbose:
logger.info("Optimal piecewise binning started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params(deep=False),
problem_type=self._problem_type)
# Pre-processing
if self.verbose:
logger.info("Pre-processing started.")
self._n_samples = len(x)
if self.verbose:
logger.info("Pre-processing: number of samples: {}"
.format(self._n_samples))
time_preprocessing = time.perf_counter()
[x_clean, y_clean, x_missing, y_missing, x_special, y_special,
_, _, _, _, _, sw_special, _] = self._fit_preprocessing(
x, y, check_input)
self._time_preprocessing = time.perf_counter() - time_preprocessing
if self.verbose:
n_clean = len(x_clean)
n_missing = len(x_missing)
n_special = len(x_special)
logger.info("Pre-processing: number of clean samples: {}"
.format(n_clean))
logger.info("Pre-processing: number of missing samples: {}"
.format(n_missing))
logger.info("Pre-processing: number of special samples: {}"
.format(n_special))
if self.outlier_detector is not None:
n_outlier = self._n_samples-(n_clean + n_missing + n_special)
logger.info("Pre-processing: number of outlier samples: {}"
.format(n_outlier))
logger.info("Pre-processing terminated. Time: {:.4f}s"
.format(self._time_preprocessing))
# Pre-binning
# Fit estimator and compute event_rate = P[Y=1, X=x]
time_estimator = time.perf_counter()
if self.estimator is None:
self.estimator = LogisticRegression()
if self.verbose:
logger.info("Pre-binning: set logistic regression as an "
"estimator.")
if self.verbose:
logger.info("Pre-binning: estimator fitting started.")
self.estimator.fit(x_clean.reshape(-1, 1), y_clean)
event_rate = self.estimator.predict_proba(x_clean.reshape(-1, 1))[:, 1]
self._time_estimator = time.perf_counter() - time_estimator
if self.verbose:
logger.info("Pre-binning: estimator terminated. Time {:.4f}s."
.format(self._time_estimator))
# Fit optimal binning algorithm for continuous target. Use optimal
# split points to compute optimal piecewise functions
self._fit_binning(x_clean, y_clean, event_rate, lb, ub)
# Post-processing
if self.verbose:
logger.info("Post-processing started.")
logger.info("Post-processing: compute binning information.")
time_postprocessing = time.perf_counter()
# Compute n_nonevent and n_event for special and missing
self._n_nonevent_special, self._n_event_special = target_info_special(
self.special_codes, x_special, y_special, sw_special)
missing_target_info = target_info(y_missing)
self._n_nonevent_missing = missing_target_info[0]
self._n_event_missing = missing_target_info[1]
bt = self._optb.binning_table.build(add_totals=False)
n_nonevent = bt["Non-event"].values[:-2]
n_event = bt["Event"].values[:-2]
n_nonevent = np.r_[n_nonevent, self._n_nonevent_special]
n_event = np.r_[n_event, self._n_event_special]
n_nonevent = np.r_[n_nonevent, self._n_nonevent_missing]
n_event = np.r_[n_event, self._n_event_missing]
self._t_n_nonevent = n_nonevent.sum()
self._t_n_event = n_event.sum()
# Compute metrics
if self.verbose:
logger.info("Post-processing: compute performance metrics.")
d_metrics = binary_metrics(
x_clean, y_clean, self._optb.splits, self._c, self._t_n_nonevent,
self._t_n_event, self._n_nonevent_special, self._n_event_special,
self._n_nonevent_missing, self._n_event_missing,
self.special_codes)
# Binning table
self._binning_table = PWBinningTable(
self.name, self.special_codes, self._optb.splits, self._c,
n_nonevent, n_event, x_clean.min(), x_clean.max(), d_metrics)
self._time_postprocessing = time.perf_counter() - time_postprocessing
if self.verbose:
logger.info("Post-processing terminated. Time: {:.4f}s"
.format(self._time_postprocessing))
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimal piecewise binning terminated. Status: {}. "
"Time: {:.4f}s".format(self._status, self._time_total))
# Completed successfully
self._is_fitted = True
return self
| (self, x, y, lb, ub, check_input) |
4,863 | optbinning.binning.piecewise.binning | fit_transform | Fit the optimal piecewise binning according to the given training
data, then transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence and "event_rate" to
choose the event rate.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
lb : float or None (default=None)
Avoid values below the lower bound lb.
ub : float or None (default=None)
Avoid values above the upper bound ub.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
| def fit_transform(self, x, y, metric="woe", metric_special=0,
metric_missing=0, lb=None, ub=None, check_input=False):
"""Fit the optimal piecewise binning according to the given training
data, then transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence and "event_rate" to
choose the event rate.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
lb : float or None (default=None)
Avoid values below the lower bound lb.
ub : float or None (default=None)
Avoid values above the upper bound ub.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
"""
return self.fit(x, y, lb, ub, check_input).transform(
x, metric, metric_special, metric_missing, lb, ub, check_input)
| (self, x, y, metric='woe', metric_special=0, metric_missing=0, lb=None, ub=None, check_input=False) |
4,869 | sklearn.utils._metadata_requests | set_transform_request | Request metadata passed to the ``transform`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``transform`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``transform``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
check_input : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``check_input`` parameter in ``transform``.
lb : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``lb`` parameter in ``transform``.
metric : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``metric`` parameter in ``transform``.
metric_missing : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``metric_missing`` parameter in ``transform``.
metric_special : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``metric_special`` parameter in ``transform``.
ub : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``ub`` parameter in ``transform``.
x : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``x`` parameter in ``transform``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: optbinning.binning.piecewise.binning.OptimalPWBinning, *, check_input: Union[bool, NoneType, str] = '$UNCHANGED$', lb: Union[bool, NoneType, str] = '$UNCHANGED$', metric: Union[bool, NoneType, str] = '$UNCHANGED$', metric_missing: Union[bool, NoneType, str] = '$UNCHANGED$', metric_special: Union[bool, NoneType, str] = '$UNCHANGED$', ub: Union[bool, NoneType, str] = '$UNCHANGED$', x: Union[bool, NoneType, str] = '$UNCHANGED$') -> optbinning.binning.piecewise.binning.OptimalPWBinning |
4,870 | optbinning.binning.piecewise.binning | transform | Transform given data to Weight of Evidence (WoE) or event rate using
bins from the fitted optimal piecewise binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence and "event_rate" to
choose the event rate.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
lb : float or None (default=None)
Avoid values below the lower bound lb.
ub : float or None (default=None)
Avoid values above the upper bound ub.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
| def transform(self, x, metric="woe", metric_special=0, metric_missing=0,
lb=None, ub=None, check_input=False):
"""Transform given data to Weight of Evidence (WoE) or event rate using
bins from the fitted optimal piecewise binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence and "event_rate" to
choose the event rate.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
lb : float or None (default=None)
Avoid values below the lower bound lb.
ub : float or None (default=None)
Avoid values above the upper bound ub.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
"""
self._check_is_fitted()
return transform_binary_target(
self._optb.splits, x, self._c, lb, ub, self._t_n_nonevent,
self._t_n_event, self._n_nonevent_special, self._n_event_special,
self._n_nonevent_missing, self._n_event_missing,
self.special_codes, metric, metric_special, metric_missing,
check_input)
| (self, x, metric='woe', metric_special=0, metric_missing=0, lb=None, ub=None, check_input=False) |
4,871 | optbinning.binning.uncertainty.binning_scenarios | SBOptimalBinning | Scenario-based stochastic optimal binning of a numerical variable with
respect to a binary target.
Extensive form of the stochastic optimal binning given a finite number of
scenarios. The goal is to maximize the expected IV obtaining a solution
feasible for all scenarios.
Parameters
----------
name : str, optional (default="")
The variable name.
prebinning_method : str, optional (default="cart")
The pre-binning method. Supported methods are "cart" for a CART
decision tree, "quantile" to generate prebins with approximately same
frequency and "uniform" to generate prebins with equal width. Method
"cart" uses `sklearn.tree.DecistionTreeClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeClassifier.html>`_.
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_prebin_size : float (default=0.05)
The fraction of mininum number of records for each prebin.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
monotonic_trend : str or None, optional (default=None)
The **event rate** monotonic trend. Supported trends are "ascending",
"descending", "concave", "convex", "peak" and "valley". If None, then
the monotonic constraint is disabled.
min_event_rate_diff : float, optional (default=0)
The minimum event rate difference between consecutives bins.
max_pvalue : float or None, optional (default=None)
The maximum p-value among bins. The Z-test is used to detect bins
not satisfying the p-value constraint.
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
class_weight : dict, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If None, all classes are supposed to have weight one. Check
`sklearn.tree.DecistionTreeClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeClassifier.html>`_.
user_splits : array-like or None, optional (default=None)
The list of pre-binning split points when ``dtype`` is "numerical" or
the list of prebins when ``dtype`` is "categorical".
user_splits_fixed : array-like or None (default=None)
The list of pre-binning split points that must be fixed.
special_codes : array-like or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
time_limit : int (default=100)
The maximum time in seconds to run the optimization solver.
verbose : bool (default=False)
Enable verbose output.
| class SBOptimalBinning(OptimalBinning):
"""Scenario-based stochastic optimal binning of a numerical variable with
respect to a binary target.
Extensive form of the stochastic optimal binning given a finite number of
scenarios. The goal is to maximize the expected IV obtaining a solution
feasible for all scenarios.
Parameters
----------
name : str, optional (default="")
The variable name.
prebinning_method : str, optional (default="cart")
The pre-binning method. Supported methods are "cart" for a CART
decision tree, "quantile" to generate prebins with approximately same
frequency and "uniform" to generate prebins with equal width. Method
"cart" uses `sklearn.tree.DecistionTreeClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeClassifier.html>`_.
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_prebin_size : float (default=0.05)
The fraction of mininum number of records for each prebin.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
monotonic_trend : str or None, optional (default=None)
The **event rate** monotonic trend. Supported trends are "ascending",
"descending", "concave", "convex", "peak" and "valley". If None, then
the monotonic constraint is disabled.
min_event_rate_diff : float, optional (default=0)
The minimum event rate difference between consecutives bins.
max_pvalue : float or None, optional (default=None)
The maximum p-value among bins. The Z-test is used to detect bins
not satisfying the p-value constraint.
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
class_weight : dict, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If None, all classes are supposed to have weight one. Check
`sklearn.tree.DecistionTreeClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeClassifier.html>`_.
user_splits : array-like or None, optional (default=None)
The list of pre-binning split points when ``dtype`` is "numerical" or
the list of prebins when ``dtype`` is "categorical".
user_splits_fixed : array-like or None (default=None)
The list of pre-binning split points that must be fixed.
special_codes : array-like or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
time_limit : int (default=100)
The maximum time in seconds to run the optimization solver.
verbose : bool (default=False)
Enable verbose output.
"""
def __init__(self, name="", prebinning_method="cart", max_n_prebins=20,
min_prebin_size=0.05, min_n_bins=None, max_n_bins=None,
min_bin_size=None, max_bin_size=None, monotonic_trend=None,
min_event_rate_diff=0, max_pvalue=None,
max_pvalue_policy="consecutive", class_weight=None,
user_splits=None, user_splits_fixed=None, special_codes=None,
split_digits=None, time_limit=100, verbose=False):
self.name = name
self.dtype = "numerical"
self.prebinning_method = prebinning_method
self.solver = "cp"
self.max_n_prebins = max_n_prebins
self.min_prebin_size = min_prebin_size
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.monotonic_trend = monotonic_trend
self.min_event_rate_diff = min_event_rate_diff
self.max_pvalue = max_pvalue
self.max_pvalue_policy = max_pvalue_policy
self.class_weight = class_weight
self.user_splits = user_splits
self.user_splits_fixed = user_splits_fixed
self.special_codes = special_codes
self.split_digits = split_digits
self.time_limit = time_limit
self.verbose = verbose
# auxiliary
self._categories = None
self._cat_others = None
self._n_scenarios = None
self._n_event = None
self._n_nonevent = None
self._n_nonevent_missing = None
self._n_event_missing = None
self._n_nonevent_special = None
self._n_event_special = None
self._problem_type = "classification"
self._user_splits = user_splits
self._user_splits_fixed = user_splits_fixed
# info
self._binning_table = None
self._binning_tables = None
self._n_prebins = None
self._n_refinements = 0
self._n_samples_scenario = None
self._n_samples = None
self._optimizer = None
self._splits_optimal = None
self._status = None
# timing
self._time_total = None
self._time_preprocessing = None
self._time_prebinning = None
self._time_solver = None
self._time_optimizer = None
self._time_postprocessing = None
self._is_fitted = False
def fit(self, X, Y, weights=None, check_input=False):
"""Fit the optimal binning given a list of scenarios.
Parameters
----------
X : array-like, shape = (n_scenarios,)
Lit of training vectors, where n_scenarios is the number of
scenarios.
Y : array-like, shape = (n_scenarios,)
List of target vectors relative to X.
weights : array-like, shape = (n_scenarios,)
Scenarios weights. If None, then scenarios are equally weighted.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : SBOptimalBinning
Fitted optimal binning.
"""
return self._fit(X, Y, weights, check_input)
def fit_transform(self, x, X, Y, weights=None, metric="woe",
metric_special=0, metric_missing=0, show_digits=2,
check_input=False):
"""Fit the optimal binning given a list of scenarios, then
transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
X : array-like, shape = (n_scenarios,)
Lit of training vectors, where n_scenarios is the number of
scenarios.
Y : array-like, shape = (n_scenarios,)
List of target vectors relative to X.
weights : array-like, shape = (n_scenarios,)
Scenarios weights. If None, then scenarios are equally weighted.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
"""
return self.fit(X, Y, weights, check_input).transform(
x, metric, metric_special, metric_missing, show_digits,
check_input)
def transform(self, x, metric="woe", metric_special=0,
metric_missing=0, show_digits=2, check_input=False):
"""Transform given data to Weight of Evidence (WoE) or event rate using
bins from the fitted optimal binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
Notes
-----
Transformation of data including categories not present during training
return zero WoE or event rate.
"""
self._check_is_fitted()
return transform_binary_target(self._splits_optimal, self.dtype, x,
self._n_nonevent, self._n_event,
self.special_codes, self._categories,
self._cat_others, None,
metric, metric_special, metric_missing,
self.user_splits, show_digits,
check_input)
def _fit(self, X, Y, weights, check_input):
time_init = time.perf_counter()
# Check parameters and input arrays
_check_parameters(**self.get_params())
_check_X_Y_weights(X, Y, weights)
self._n_scenarios = len(X)
if self.verbose:
logger.info("Optimal binning started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params())
# Pre-processing
if self.verbose:
logger.info("Pre-processing started.")
time_preprocessing = time.perf_counter()
self._n_samples_scenario = [len(x) for x in X]
self._n_samples = sum(self._n_samples_scenario)
if self.verbose:
logger.info("Pre-processing: number of samples: {}"
.format(self._n_samples))
[x_clean, y_clean, x_missing, y_missing, x_special, y_special,
w] = split_data_scenarios(X, Y, weights, self.special_codes,
check_input)
self._time_preprocessing = time.perf_counter() - time_preprocessing
if self.verbose:
n_clean = len(x_clean)
n_missing = len(x_missing)
n_special = len(x_special)
logger.info("Pre-processing: number of clean samples: {}"
.format(n_clean))
logger.info("Pre-processing: number of missing samples: {}"
.format(n_missing))
logger.info("Pre-processing: number of special samples: {}"
.format(n_special))
logger.info("Pre-processing terminated. Time: {:.4f}s"
.format(self._time_preprocessing))
# Pre-binning
if self.verbose:
logger.info("Pre-binning started.")
time_prebinning = time.perf_counter()
if self.user_splits is not None:
user_splits = check_array(
self.user_splits, ensure_2d=False, dtype=None,
force_all_finite=True)
if len(set(user_splits)) != len(user_splits):
raise ValueError("User splits are not unique.")
sorted_idx = np.argsort(user_splits)
user_splits = user_splits[sorted_idx]
if self.user_splits_fixed is not None:
self.user_splits_fixed = np.asarray(
self.user_splits_fixed)[sorted_idx]
splits, n_nonevent, n_event = self._prebinning_refinement(
user_splits, x_clean, y_clean, y_missing, y_special)
else:
splits, n_nonevent, n_event = self._fit_prebinning(
w, x_clean, y_clean, y_missing, y_special, self.class_weight)
self._n_prebins = len(n_nonevent)
self._time_prebinning = time.perf_counter() - time_prebinning
if self.verbose:
logger.info("Pre-binning: number of prebins: {}"
.format(self._n_prebins))
logger.info("Pre-binning: number of refinements: {}"
.format(self._n_refinements))
logger.info("Pre-binning terminated. Time: {:.4f}s"
.format(self._time_prebinning))
# Optimization
self._fit_optimizer(splits, n_nonevent, n_event, weights)
# Post-processing
if self.verbose:
logger.info("Post-processing started.")
logger.info("Post-processing: compute binning information.")
time_postprocessing = time.perf_counter()
self._n_nonevent = 0
self._n_event = 0
self._binning_tables = []
min_x = np.inf
max_x = -np.inf
for s in range(self._n_scenarios):
min_xs = x_clean[s].min()
max_xs = x_clean[s].max()
if min_xs < min_x:
min_x = min_xs
if max_xs > max_x:
max_x = max_xs
s_n_nonevent, s_n_event = bin_info(
self._solution, n_nonevent[:, s], n_event[:, s],
self._n_nonevent_missing[s], self._n_event_missing[s],
self._n_nonevent_special[s], self._n_event_special[s], None,
None, [])
self._n_nonevent += s_n_nonevent
self._n_event += s_n_event
binning_table = BinningTable(
self.name, self.dtype, self.special_codes,
self._splits_optimal, s_n_nonevent, s_n_event, min_xs, max_xs,
None, None, self.user_splits)
self._binning_tables.append(binning_table)
self._binning_table = BinningTable(
self.name, self.dtype, self.special_codes, self._splits_optimal,
self._n_nonevent, self._n_event, min_x, max_x, None, None,
self.user_splits)
self._time_postprocessing = time.perf_counter() - time_postprocessing
if self.verbose:
logger.info("Post-processing terminated. Time: {:.4f}s"
.format(self._time_postprocessing))
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimal binning terminated. Status: {}. Time: {:.4f}s"
.format(self._status, self._time_total))
# Completed successfully
self._is_fitted = True
return self
def _fit_prebinning(self, weights, x_clean, y_clean, y_missing, y_special,
class_weight=None):
x = []
y = []
for s in range(self._n_scenarios):
x.extend(x_clean[s])
y.extend(y_clean[s])
x = np.array(x)
y = np.array(y)
min_bin_size = int(np.ceil(self.min_prebin_size * self._n_samples))
prebinning = PreBinning(method=self.prebinning_method,
n_bins=self.max_n_prebins,
min_bin_size=min_bin_size,
problem_type=self._problem_type,
class_weight=class_weight).fit(x, y, weights)
return self._prebinning_refinement(prebinning.splits, x_clean, y_clean,
y_missing, y_special)
def _prebinning_refinement(self, splits_prebinning, x, y, y_missing,
y_special):
self._n_nonevent_special = []
self._n_event_special = []
self._n_nonevent_missing = []
self._n_event_missing = []
for s in range(self._n_scenarios):
s_n_nonevent, s_n_event = target_info(y_special[s])
m_n_nonevent, m_n_event = target_info(y_missing[s])
self._n_nonevent_special.append(s_n_nonevent)
self._n_event_special.append(s_n_event)
self._n_nonevent_missing.append(m_n_nonevent)
self._n_event_missing.append(m_n_event)
n_splits = len(splits_prebinning)
if not n_splits:
return splits_prebinning, np.array([]), np.array([])
if self.split_digits is not None:
splits_prebinning = np.round(splits_prebinning, self.split_digits)
splits_prebinning, n_nonevent, n_event = self._compute_prebins(
splits_prebinning, x, y)
return splits_prebinning, n_nonevent, n_event
def _compute_prebins(self, splits_prebinning, x, y):
n_splits = len(splits_prebinning)
if not n_splits:
return splits_prebinning, np.array([]), np.array([])
n_bins = n_splits + 1
n_nonevent = np.empty((n_bins, self._n_scenarios), dtype=np.int64)
n_event = np.empty((n_bins, self._n_scenarios), dtype=np.int64)
mask_remove = np.zeros(n_bins, dtype=bool)
for s in range(self._n_scenarios):
y0 = (y[s] == 0)
y1 = ~y0
indices = np.digitize(x[s], splits_prebinning, right=False)
for i in range(n_bins):
mask = (indices == i)
n_nonevent[i, s] = np.count_nonzero(y0 & mask)
n_event[i, s] = np.count_nonzero(y1 & mask)
mask_remove |= (n_nonevent[:, s] == 0) | (n_event[:, s] == 0)
if np.any(mask_remove):
self._n_refinements += 1
mask_splits = np.concatenate(
[mask_remove[:-2], [mask_remove[-2] | mask_remove[-1]]])
if self.user_splits_fixed is not None:
user_splits_fixed = np.asarray(self._user_splits_fixed)
user_splits = np.asarray(self._user_splits)
fixed_remove = user_splits_fixed & mask_splits
if any(fixed_remove):
raise ValueError("Fixed user_splits {} are removed "
"because produce pure prebins. Provide "
"different splits to be fixed."
.format(user_splits[fixed_remove]))
# Update boolean array of fixed user splits.
self._user_splits_fixed = user_splits_fixed[~mask_splits]
self._user_splits = user_splits[~mask_splits]
splits = splits_prebinning[~mask_splits]
if self.verbose:
logger.info("Pre-binning: number prebins removed: {}"
.format(np.count_nonzero(mask_remove)))
[splits_prebinning, n_nonevent, n_event] = self._compute_prebins(
splits, x, y)
return splits_prebinning, n_nonevent, n_event
def _fit_optimizer(self, splits, n_nonevent, n_event, weights):
time_init = time.perf_counter()
if not len(n_nonevent):
self._status = "OPTIMAL"
self._splits_optimal = splits
self._solution = np.zeros(len(splits), dtype=bool)
if self.verbose:
logger.warning("Optimizer: no bins after pre-binning.")
logger.warning("Optimizer: solver not run.")
logger.info("Optimizer terminated. Time: 0s")
return
if self.min_bin_size is not None:
min_bin_size = [int(np.ceil(
self.min_bin_size * self._n_samples_scenario[s]))
for s in range(self._n_scenarios)]
else:
min_bin_size = self.min_bin_size
if self.max_bin_size is not None:
max_bin_size = [int(np.ceil(
self.max_bin_size * self._n_samples_scenario[s]))
for s in range(self._n_scenarios)]
else:
max_bin_size = self.max_bin_size
optimizer = BinningCP(self.monotonic_trend, self.min_n_bins,
self.max_n_bins, min_bin_size, max_bin_size,
None, None, None, None, self.min_event_rate_diff,
self.max_pvalue, self.max_pvalue_policy, None,
self.user_splits_fixed, self.time_limit)
if weights is None:
weights = np.ones(self._n_scenarios, int)
if self.verbose:
logger.info("Optimizer: build model...")
optimizer.build_model_scenarios(n_nonevent, n_event, weights)
status, solution = optimizer.solve()
if self.verbose:
logger.info("Optimizer: solve...")
self._solution = solution
self._optimizer, self._time_optimizer = solver_statistics(
self.solver, optimizer.solver_)
self._status = status
self._splits_optimal = splits[solution[:-1]]
self._time_solver = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimizer terminated. Time: {:.4f}s"
.format(self._time_solver))
def binning_table_scenario(self, scenario_id):
"""Return the instantiated binning table corresponding to
``scenario_id``. Please refer to :ref:`Binning table: binary target`.
Parameters
----------
scenario_id : int
Scenario identifier.
Returns
-------
binning_table : BinningTable
"""
self._check_is_fitted()
if (not isinstance(scenario_id, numbers.Integral) or
not 0 <= scenario_id < self._n_scenarios):
raise ValueError("scenario_id must be < {}; got {}."
.format(self._n_scenarios, scenario_id))
return self._binning_tables[scenario_id]
@property
def splits(self):
"""List of optimal split points.
Returns
-------
splits : numpy.ndarray
"""
self._check_is_fitted()
return self._splits_optimal
| (name='', prebinning_method='cart', max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, monotonic_trend=None, min_event_rate_diff=0, max_pvalue=None, max_pvalue_policy='consecutive', class_weight=None, user_splits=None, user_splits_fixed=None, special_codes=None, split_digits=None, time_limit=100, verbose=False) |
4,873 | optbinning.binning.uncertainty.binning_scenarios | __init__ | null | def __init__(self, name="", prebinning_method="cart", max_n_prebins=20,
min_prebin_size=0.05, min_n_bins=None, max_n_bins=None,
min_bin_size=None, max_bin_size=None, monotonic_trend=None,
min_event_rate_diff=0, max_pvalue=None,
max_pvalue_policy="consecutive", class_weight=None,
user_splits=None, user_splits_fixed=None, special_codes=None,
split_digits=None, time_limit=100, verbose=False):
self.name = name
self.dtype = "numerical"
self.prebinning_method = prebinning_method
self.solver = "cp"
self.max_n_prebins = max_n_prebins
self.min_prebin_size = min_prebin_size
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.monotonic_trend = monotonic_trend
self.min_event_rate_diff = min_event_rate_diff
self.max_pvalue = max_pvalue
self.max_pvalue_policy = max_pvalue_policy
self.class_weight = class_weight
self.user_splits = user_splits
self.user_splits_fixed = user_splits_fixed
self.special_codes = special_codes
self.split_digits = split_digits
self.time_limit = time_limit
self.verbose = verbose
# auxiliary
self._categories = None
self._cat_others = None
self._n_scenarios = None
self._n_event = None
self._n_nonevent = None
self._n_nonevent_missing = None
self._n_event_missing = None
self._n_nonevent_special = None
self._n_event_special = None
self._problem_type = "classification"
self._user_splits = user_splits
self._user_splits_fixed = user_splits_fixed
# info
self._binning_table = None
self._binning_tables = None
self._n_prebins = None
self._n_refinements = 0
self._n_samples_scenario = None
self._n_samples = None
self._optimizer = None
self._splits_optimal = None
self._status = None
# timing
self._time_total = None
self._time_preprocessing = None
self._time_prebinning = None
self._time_solver = None
self._time_optimizer = None
self._time_postprocessing = None
self._is_fitted = False
| (self, name='', prebinning_method='cart', max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, monotonic_trend=None, min_event_rate_diff=0, max_pvalue=None, max_pvalue_policy='consecutive', class_weight=None, user_splits=None, user_splits_fixed=None, special_codes=None, split_digits=None, time_limit=100, verbose=False) |
4,880 | optbinning.binning.uncertainty.binning_scenarios | _compute_prebins | null | def _compute_prebins(self, splits_prebinning, x, y):
n_splits = len(splits_prebinning)
if not n_splits:
return splits_prebinning, np.array([]), np.array([])
n_bins = n_splits + 1
n_nonevent = np.empty((n_bins, self._n_scenarios), dtype=np.int64)
n_event = np.empty((n_bins, self._n_scenarios), dtype=np.int64)
mask_remove = np.zeros(n_bins, dtype=bool)
for s in range(self._n_scenarios):
y0 = (y[s] == 0)
y1 = ~y0
indices = np.digitize(x[s], splits_prebinning, right=False)
for i in range(n_bins):
mask = (indices == i)
n_nonevent[i, s] = np.count_nonzero(y0 & mask)
n_event[i, s] = np.count_nonzero(y1 & mask)
mask_remove |= (n_nonevent[:, s] == 0) | (n_event[:, s] == 0)
if np.any(mask_remove):
self._n_refinements += 1
mask_splits = np.concatenate(
[mask_remove[:-2], [mask_remove[-2] | mask_remove[-1]]])
if self.user_splits_fixed is not None:
user_splits_fixed = np.asarray(self._user_splits_fixed)
user_splits = np.asarray(self._user_splits)
fixed_remove = user_splits_fixed & mask_splits
if any(fixed_remove):
raise ValueError("Fixed user_splits {} are removed "
"because produce pure prebins. Provide "
"different splits to be fixed."
.format(user_splits[fixed_remove]))
# Update boolean array of fixed user splits.
self._user_splits_fixed = user_splits_fixed[~mask_splits]
self._user_splits = user_splits[~mask_splits]
splits = splits_prebinning[~mask_splits]
if self.verbose:
logger.info("Pre-binning: number prebins removed: {}"
.format(np.count_nonzero(mask_remove)))
[splits_prebinning, n_nonevent, n_event] = self._compute_prebins(
splits, x, y)
return splits_prebinning, n_nonevent, n_event
| (self, splits_prebinning, x, y) |
4,881 | optbinning.binning.uncertainty.binning_scenarios | _fit | null | def _fit(self, X, Y, weights, check_input):
time_init = time.perf_counter()
# Check parameters and input arrays
_check_parameters(**self.get_params())
_check_X_Y_weights(X, Y, weights)
self._n_scenarios = len(X)
if self.verbose:
logger.info("Optimal binning started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params())
# Pre-processing
if self.verbose:
logger.info("Pre-processing started.")
time_preprocessing = time.perf_counter()
self._n_samples_scenario = [len(x) for x in X]
self._n_samples = sum(self._n_samples_scenario)
if self.verbose:
logger.info("Pre-processing: number of samples: {}"
.format(self._n_samples))
[x_clean, y_clean, x_missing, y_missing, x_special, y_special,
w] = split_data_scenarios(X, Y, weights, self.special_codes,
check_input)
self._time_preprocessing = time.perf_counter() - time_preprocessing
if self.verbose:
n_clean = len(x_clean)
n_missing = len(x_missing)
n_special = len(x_special)
logger.info("Pre-processing: number of clean samples: {}"
.format(n_clean))
logger.info("Pre-processing: number of missing samples: {}"
.format(n_missing))
logger.info("Pre-processing: number of special samples: {}"
.format(n_special))
logger.info("Pre-processing terminated. Time: {:.4f}s"
.format(self._time_preprocessing))
# Pre-binning
if self.verbose:
logger.info("Pre-binning started.")
time_prebinning = time.perf_counter()
if self.user_splits is not None:
user_splits = check_array(
self.user_splits, ensure_2d=False, dtype=None,
force_all_finite=True)
if len(set(user_splits)) != len(user_splits):
raise ValueError("User splits are not unique.")
sorted_idx = np.argsort(user_splits)
user_splits = user_splits[sorted_idx]
if self.user_splits_fixed is not None:
self.user_splits_fixed = np.asarray(
self.user_splits_fixed)[sorted_idx]
splits, n_nonevent, n_event = self._prebinning_refinement(
user_splits, x_clean, y_clean, y_missing, y_special)
else:
splits, n_nonevent, n_event = self._fit_prebinning(
w, x_clean, y_clean, y_missing, y_special, self.class_weight)
self._n_prebins = len(n_nonevent)
self._time_prebinning = time.perf_counter() - time_prebinning
if self.verbose:
logger.info("Pre-binning: number of prebins: {}"
.format(self._n_prebins))
logger.info("Pre-binning: number of refinements: {}"
.format(self._n_refinements))
logger.info("Pre-binning terminated. Time: {:.4f}s"
.format(self._time_prebinning))
# Optimization
self._fit_optimizer(splits, n_nonevent, n_event, weights)
# Post-processing
if self.verbose:
logger.info("Post-processing started.")
logger.info("Post-processing: compute binning information.")
time_postprocessing = time.perf_counter()
self._n_nonevent = 0
self._n_event = 0
self._binning_tables = []
min_x = np.inf
max_x = -np.inf
for s in range(self._n_scenarios):
min_xs = x_clean[s].min()
max_xs = x_clean[s].max()
if min_xs < min_x:
min_x = min_xs
if max_xs > max_x:
max_x = max_xs
s_n_nonevent, s_n_event = bin_info(
self._solution, n_nonevent[:, s], n_event[:, s],
self._n_nonevent_missing[s], self._n_event_missing[s],
self._n_nonevent_special[s], self._n_event_special[s], None,
None, [])
self._n_nonevent += s_n_nonevent
self._n_event += s_n_event
binning_table = BinningTable(
self.name, self.dtype, self.special_codes,
self._splits_optimal, s_n_nonevent, s_n_event, min_xs, max_xs,
None, None, self.user_splits)
self._binning_tables.append(binning_table)
self._binning_table = BinningTable(
self.name, self.dtype, self.special_codes, self._splits_optimal,
self._n_nonevent, self._n_event, min_x, max_x, None, None,
self.user_splits)
self._time_postprocessing = time.perf_counter() - time_postprocessing
if self.verbose:
logger.info("Post-processing terminated. Time: {:.4f}s"
.format(self._time_postprocessing))
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimal binning terminated. Status: {}. Time: {:.4f}s"
.format(self._status, self._time_total))
# Completed successfully
self._is_fitted = True
return self
| (self, X, Y, weights, check_input) |
4,882 | optbinning.binning.uncertainty.binning_scenarios | _fit_optimizer | null | def _fit_optimizer(self, splits, n_nonevent, n_event, weights):
time_init = time.perf_counter()
if not len(n_nonevent):
self._status = "OPTIMAL"
self._splits_optimal = splits
self._solution = np.zeros(len(splits), dtype=bool)
if self.verbose:
logger.warning("Optimizer: no bins after pre-binning.")
logger.warning("Optimizer: solver not run.")
logger.info("Optimizer terminated. Time: 0s")
return
if self.min_bin_size is not None:
min_bin_size = [int(np.ceil(
self.min_bin_size * self._n_samples_scenario[s]))
for s in range(self._n_scenarios)]
else:
min_bin_size = self.min_bin_size
if self.max_bin_size is not None:
max_bin_size = [int(np.ceil(
self.max_bin_size * self._n_samples_scenario[s]))
for s in range(self._n_scenarios)]
else:
max_bin_size = self.max_bin_size
optimizer = BinningCP(self.monotonic_trend, self.min_n_bins,
self.max_n_bins, min_bin_size, max_bin_size,
None, None, None, None, self.min_event_rate_diff,
self.max_pvalue, self.max_pvalue_policy, None,
self.user_splits_fixed, self.time_limit)
if weights is None:
weights = np.ones(self._n_scenarios, int)
if self.verbose:
logger.info("Optimizer: build model...")
optimizer.build_model_scenarios(n_nonevent, n_event, weights)
status, solution = optimizer.solve()
if self.verbose:
logger.info("Optimizer: solve...")
self._solution = solution
self._optimizer, self._time_optimizer = solver_statistics(
self.solver, optimizer.solver_)
self._status = status
self._splits_optimal = splits[solution[:-1]]
self._time_solver = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimizer terminated. Time: {:.4f}s"
.format(self._time_solver))
| (self, splits, n_nonevent, n_event, weights) |
4,883 | optbinning.binning.uncertainty.binning_scenarios | _fit_prebinning | null | def _fit_prebinning(self, weights, x_clean, y_clean, y_missing, y_special,
class_weight=None):
x = []
y = []
for s in range(self._n_scenarios):
x.extend(x_clean[s])
y.extend(y_clean[s])
x = np.array(x)
y = np.array(y)
min_bin_size = int(np.ceil(self.min_prebin_size * self._n_samples))
prebinning = PreBinning(method=self.prebinning_method,
n_bins=self.max_n_prebins,
min_bin_size=min_bin_size,
problem_type=self._problem_type,
class_weight=class_weight).fit(x, y, weights)
return self._prebinning_refinement(prebinning.splits, x_clean, y_clean,
y_missing, y_special)
| (self, weights, x_clean, y_clean, y_missing, y_special, class_weight=None) |
4,888 | optbinning.binning.uncertainty.binning_scenarios | _prebinning_refinement | null | def _prebinning_refinement(self, splits_prebinning, x, y, y_missing,
y_special):
self._n_nonevent_special = []
self._n_event_special = []
self._n_nonevent_missing = []
self._n_event_missing = []
for s in range(self._n_scenarios):
s_n_nonevent, s_n_event = target_info(y_special[s])
m_n_nonevent, m_n_event = target_info(y_missing[s])
self._n_nonevent_special.append(s_n_nonevent)
self._n_event_special.append(s_n_event)
self._n_nonevent_missing.append(m_n_nonevent)
self._n_event_missing.append(m_n_event)
n_splits = len(splits_prebinning)
if not n_splits:
return splits_prebinning, np.array([]), np.array([])
if self.split_digits is not None:
splits_prebinning = np.round(splits_prebinning, self.split_digits)
splits_prebinning, n_nonevent, n_event = self._compute_prebins(
splits_prebinning, x, y)
return splits_prebinning, n_nonevent, n_event
| (self, splits_prebinning, x, y, y_missing, y_special) |
4,893 | optbinning.binning.uncertainty.binning_scenarios | binning_table_scenario | Return the instantiated binning table corresponding to
``scenario_id``. Please refer to :ref:`Binning table: binary target`.
Parameters
----------
scenario_id : int
Scenario identifier.
Returns
-------
binning_table : BinningTable
| def binning_table_scenario(self, scenario_id):
"""Return the instantiated binning table corresponding to
``scenario_id``. Please refer to :ref:`Binning table: binary target`.
Parameters
----------
scenario_id : int
Scenario identifier.
Returns
-------
binning_table : BinningTable
"""
self._check_is_fitted()
if (not isinstance(scenario_id, numbers.Integral) or
not 0 <= scenario_id < self._n_scenarios):
raise ValueError("scenario_id must be < {}; got {}."
.format(self._n_scenarios, scenario_id))
return self._binning_tables[scenario_id]
| (self, scenario_id) |
4,894 | optbinning.binning.uncertainty.binning_scenarios | fit | Fit the optimal binning given a list of scenarios.
Parameters
----------
X : array-like, shape = (n_scenarios,)
Lit of training vectors, where n_scenarios is the number of
scenarios.
Y : array-like, shape = (n_scenarios,)
List of target vectors relative to X.
weights : array-like, shape = (n_scenarios,)
Scenarios weights. If None, then scenarios are equally weighted.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : SBOptimalBinning
Fitted optimal binning.
| def fit(self, X, Y, weights=None, check_input=False):
"""Fit the optimal binning given a list of scenarios.
Parameters
----------
X : array-like, shape = (n_scenarios,)
Lit of training vectors, where n_scenarios is the number of
scenarios.
Y : array-like, shape = (n_scenarios,)
List of target vectors relative to X.
weights : array-like, shape = (n_scenarios,)
Scenarios weights. If None, then scenarios are equally weighted.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : SBOptimalBinning
Fitted optimal binning.
"""
return self._fit(X, Y, weights, check_input)
| (self, X, Y, weights=None, check_input=False) |
4,895 | optbinning.binning.uncertainty.binning_scenarios | fit_transform | Fit the optimal binning given a list of scenarios, then
transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
X : array-like, shape = (n_scenarios,)
Lit of training vectors, where n_scenarios is the number of
scenarios.
Y : array-like, shape = (n_scenarios,)
List of target vectors relative to X.
weights : array-like, shape = (n_scenarios,)
Scenarios weights. If None, then scenarios are equally weighted.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
| def fit_transform(self, x, X, Y, weights=None, metric="woe",
metric_special=0, metric_missing=0, show_digits=2,
check_input=False):
"""Fit the optimal binning given a list of scenarios, then
transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
X : array-like, shape = (n_scenarios,)
Lit of training vectors, where n_scenarios is the number of
scenarios.
Y : array-like, shape = (n_scenarios,)
List of target vectors relative to X.
weights : array-like, shape = (n_scenarios,)
Scenarios weights. If None, then scenarios are equally weighted.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
"""
return self.fit(X, Y, weights, check_input).transform(
x, metric, metric_special, metric_missing, show_digits,
check_input)
| (self, x, X, Y, weights=None, metric='woe', metric_special=0, metric_missing=0, show_digits=2, check_input=False) |
4,900 | sklearn.utils._metadata_requests | set_fit_request | Request metadata passed to the ``fit`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
check_input : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``check_input`` parameter in ``fit``.
weights : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``weights`` parameter in ``fit``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: optbinning.binning.uncertainty.binning_scenarios.SBOptimalBinning, *, check_input: Union[bool, NoneType, str] = '$UNCHANGED$', weights: Union[bool, NoneType, str] = '$UNCHANGED$') -> optbinning.binning.uncertainty.binning_scenarios.SBOptimalBinning |
4,904 | optbinning.binning.uncertainty.binning_scenarios | transform | Transform given data to Weight of Evidence (WoE) or event rate using
bins from the fitted optimal binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
Notes
-----
Transformation of data including categories not present during training
return zero WoE or event rate.
| def transform(self, x, metric="woe", metric_special=0,
metric_missing=0, show_digits=2, check_input=False):
"""Transform given data to Weight of Evidence (WoE) or event rate using
bins from the fitted optimal binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
Notes
-----
Transformation of data including categories not present during training
return zero WoE or event rate.
"""
self._check_is_fitted()
return transform_binary_target(self._splits_optimal, self.dtype, x,
self._n_nonevent, self._n_event,
self.special_codes, self._categories,
self._cat_others, None,
metric, metric_special, metric_missing,
self.user_splits, show_digits,
check_input)
| (self, x, metric='woe', metric_special=0, metric_missing=0, show_digits=2, check_input=False) |
4,905 | optbinning.scorecard.scorecard | Scorecard | Scorecard development given a binary or continuous target dtype.
Parameters
----------
binning_process : object
A ``BinningProcess`` instance.
estimator : object
A supervised learning estimator with a ``fit`` and ``predict`` method
that provides information about feature coefficients through a
``coef_`` attribute. For binary classification, the estimator must
include a ``predict_proba`` method.
scaling_method : str or None (default=None)
The scaling method to control the range of the scores. Supported
methods are "pdo_odds" and "min_max". Method "pdo_odds" is only
applicable for binary classification. If None, no scaling is applied.
scaling_method_params : dict or None (default=None)
Dictionary with scaling method parameters. If
``scaling_method="pdo_odds"`` parameters required are: "pdo", "odds",
and "scorecard_points". If ``scaling_method="min_max"`` parameters
required are "min" and "max". If ``scaling_method=None``, this
parameter is not used.
intercept_based : bool (default=False)
Build a intercept-based scorecard. A intercept-based scorecard modifies
the original scorecard by setting the smallest point for each variable
to zero and updating the intercept accordingly.
reverse_scorecard: bool (default=False)
Whether to change the sense of the relationship between predictions and
scorecard points to ascending/descending.
rounding : bool (default=False)
Whether to round scorecard points. If ``scaling_method="min_max"`` a
mixed-integer programming problem is solved to guarantee the
minimum/maximum score after rounding. Otherwise, the scorecard points
are round to the nearest integer.
verbose : bool (default=False)
Enable verbose output.
Attributes
----------
binning_process_ : object
The external binning process.
estimator_ : object
The external estimator fit on the reduced dataset.
intercept_ : float
The intercept if ``intercept_based=True``.
| class Scorecard(Base, BaseEstimator):
"""Scorecard development given a binary or continuous target dtype.
Parameters
----------
binning_process : object
A ``BinningProcess`` instance.
estimator : object
A supervised learning estimator with a ``fit`` and ``predict`` method
that provides information about feature coefficients through a
``coef_`` attribute. For binary classification, the estimator must
include a ``predict_proba`` method.
scaling_method : str or None (default=None)
The scaling method to control the range of the scores. Supported
methods are "pdo_odds" and "min_max". Method "pdo_odds" is only
applicable for binary classification. If None, no scaling is applied.
scaling_method_params : dict or None (default=None)
Dictionary with scaling method parameters. If
``scaling_method="pdo_odds"`` parameters required are: "pdo", "odds",
and "scorecard_points". If ``scaling_method="min_max"`` parameters
required are "min" and "max". If ``scaling_method=None``, this
parameter is not used.
intercept_based : bool (default=False)
Build a intercept-based scorecard. A intercept-based scorecard modifies
the original scorecard by setting the smallest point for each variable
to zero and updating the intercept accordingly.
reverse_scorecard: bool (default=False)
Whether to change the sense of the relationship between predictions and
scorecard points to ascending/descending.
rounding : bool (default=False)
Whether to round scorecard points. If ``scaling_method="min_max"`` a
mixed-integer programming problem is solved to guarantee the
minimum/maximum score after rounding. Otherwise, the scorecard points
are round to the nearest integer.
verbose : bool (default=False)
Enable verbose output.
Attributes
----------
binning_process_ : object
The external binning process.
estimator_ : object
The external estimator fit on the reduced dataset.
intercept_ : float
The intercept if ``intercept_based=True``.
"""
def __init__(self, binning_process, estimator, scaling_method=None,
scaling_method_params=None, intercept_based=False,
reverse_scorecard=False, rounding=False, verbose=False):
self.binning_process = binning_process
self.estimator = estimator
self.scaling_method = scaling_method
self.scaling_method_params = scaling_method_params
self.intercept_based = intercept_based
self.reverse_scorecard = reverse_scorecard
self.rounding = rounding
self.verbose = verbose
# attributes
self.binning_process_ = None
self.estimator_ = None
self.intercept_ = 0
self._metric_special = None
self._metric_missing = None
# auxiliary
self._target_dtype = None
# timing
self._time_total = None
self._time_binning_process = None
self._time_estimator = None
self._time_build_scorecard = None
self._time_rounding = None
self._is_fitted = False
def fit(self, X, y, sample_weight=None, metric_special=0, metric_missing=0,
show_digits=2, check_input=False):
"""Fit scorecard.
Parameters
----------
X : pandas.DataFrame (n_samples, n_features)
Training vector, where n_samples is the number of samples.
y : array-like of shape (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
This option is only available for a binary target.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
check_input : bool (default=False)
Whether to check input arrays.
show_digits : int, optional (default=2)
The number of significant digits of the bin column.
Returns
-------
self : Scorecard
Fitted scorecard.
"""
return self._fit(X, y, sample_weight, metric_special, metric_missing,
show_digits, check_input)
def information(self, print_level=1):
"""Print overview information about the options settings and
statistics.
Parameters
----------
print_level : int (default=1)
Level of details.
"""
self._check_is_fitted()
if not isinstance(print_level, numbers.Integral) or print_level < 0:
raise ValueError("print_level must be an integer >= 0; got {}."
.format(print_level))
n_numerical = list(
self.binning_process_._variable_dtypes.values()).count("numerical")
n_categorical = self.binning_process_._n_variables - n_numerical
n_selected = np.count_nonzero(self.binning_process_._support)
dict_user_options = self.get_params(deep=False)
print_scorecard_information(
print_level, self.binning_process_._n_samples,
self.binning_process_._n_variables, self._target_dtype,
n_numerical, n_categorical, n_selected, self._time_total,
self._time_binning_process, self._time_estimator,
self._time_build_scorecard, self._time_rounding, dict_user_options)
def predict(self, X):
"""Predict using the fitted underlying estimator and the reduced
dataset.
Parameters
----------
X : pandas.DataFrame (n_samples, n_features)
Training vector, where n_samples is the number of samples.
Returns
-------
pred: array of shape (n_samples)
The predicted target values.
"""
X_t = self._transform(
X=X, metric=None, metric_special=self._metric_special,
metric_missing=self._metric_missing)
return self.estimator_.predict(X_t)
def predict_proba(self, X):
"""Predict class probabilities using the fitted underlying estimator
and the reduced dataset.
Parameters
----------
X : pandas.DataFrame (n_samples, n_features)
Training vector, where n_samples is the number of samples.
Returns
-------
p: array of shape (n_samples, n_classes)
The class probabilities of the input samples.
"""
X_t = self._transform(
X=X, metric=None, metric_special=self._metric_special,
metric_missing=self._metric_missing)
return self.estimator_.predict_proba(X_t)
def decision_function(self, X):
"""Predict confidence scores for samples.
The confidence score for a sample is proportional to the signed
distance of that sample to the hyperplane.
Parameters
----------
X : pandas.DataFrame (n_samples, n_features)
The data matrix for which we want to get the confidence scores.
Returns
-------
scores : array of shape (n_samples, n_classes)
Confidence scores per (n_samples, n_classes) combination.
"""
X_t = self._transform(
X=X, metric=None, metric_special=self._metric_special,
metric_missing=self._metric_missing)
return self.estimator_.decision_function(X_t)
def score(self, X):
"""Score of the dataset.
Parameters
----------
X : pandas.DataFrame (n_samples, n_features)
Training vector, where n_samples is the number of samples.
Returns
-------
score: array of shape (n_samples)
The score of the input samples.
"""
X_t = self._transform(
X=X, metric="indices", metric_special="empirical",
metric_missing="empirical")
score_ = np.zeros(X_t.shape[0])
selected_variables = self.binning_process_.get_support(names=True)
for variable in selected_variables:
mask = self._df_scorecard.Variable == variable
points = self._df_scorecard[mask].Points.values
score_ += points[X_t[variable]]
return score_ + self.intercept_
def table(self, style="summary"):
"""Scorecard table.
Parameters
----------
style : str, optional (default="summary")
Scorecard's style. Supported styles are "summary" and "detailed".
Summary only includes columns variable, bin description and points.
Detailed contained additional columns with bin information and
estimator coefficients.
Returns
-------
table : pandas.DataFrame
The scorecard table.
"""
self._check_is_fitted()
if style not in ("summary", "detailed"):
raise ValueError('Invalid value for style. Allowed string '
'values are "summary" and "detailed".')
if style == "summary":
columns = ["Variable", "Bin", "Points"]
elif style == "detailed":
main_columns = ["Variable", "Bin id", "Bin"]
columns = self._df_scorecard.columns
rest_columns = [col for col in columns if col not in main_columns]
columns = main_columns + rest_columns
return self._df_scorecard[columns]
@classmethod
def load(cls, path):
"""Load scorecard from pickle file.
Parameters
----------
path : str
Pickle file path.
Example
-------
>>> from optbinning import Scorecard
>>> scorecard = Scorecard.load("my_scorecard.pkl")
"""
if not isinstance(path, str):
raise TypeError("path must be a string.")
with open(path, "rb") as f:
return pickle.load(f)
def save(self, path):
"""Save scorecard to pickle file.
Parameters
----------
path : str
Pickle file path.
"""
if not isinstance(path, str):
raise TypeError("path must be a string.")
with open(path, "wb") as f:
pickle.dump(self, f)
def _fit(self, X, y, sample_weight, metric_special, metric_missing,
show_digits, check_input):
# Store the metrics for missing and special bins for predictions
self._metric_special = metric_special
self._metric_missing = metric_missing
time_init = time.perf_counter()
if self.verbose:
logger.info("Scorecard building process started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params(deep=False))
# Check X dtype
if not isinstance(X, pd.DataFrame):
raise TypeError("X must be a pandas.DataFrame.")
# Target type and metric
self._target_dtype = type_of_target(y)
if self._target_dtype not in ("binary", "continuous"):
raise ValueError("Target type {} is not supported."
.format(self._target_dtype))
_check_scorecard_scaling(self.scaling_method,
self.scaling_method_params,
self.rounding,
self._target_dtype)
# Check sample weight
if sample_weight is not None and self._target_dtype != "binary":
raise ValueError("Target type {} does not support sample weight."
.format(self._target_dtype))
if self._target_dtype == "binary":
metric = "woe"
bt_metric = "WoE"
elif self._target_dtype == "continuous":
metric = "mean"
bt_metric = "Mean"
if self.verbose:
logger.info("Dataset: {} target.".format(self._target_dtype))
# Fit binning process
if self.verbose:
logger.info("Binning process started.")
time_binning_process = time.perf_counter()
self.binning_process_ = clone(self.binning_process)
# Suppress binning process verbosity
self.binning_process_.set_params(verbose=False)
X_t = self.binning_process_.fit_transform(
X[self.binning_process.variable_names], y, sample_weight, metric,
metric_special, metric_missing, show_digits, check_input)
self._time_binning_process = time.perf_counter() - time_binning_process
if self.verbose:
logger.info("Binning process terminated. Time: {:.4f}s"
.format(self._time_binning_process))
# Fit estimator
time_estimator = time.perf_counter()
if self.verbose:
logger.info("Fitting estimator.")
self.estimator_ = clone(self.estimator)
if sample_weight is not None:
self.estimator_.fit(X_t, y, sample_weight=sample_weight)
else:
self.estimator_.fit(X_t, y)
self._time_estimator = time.perf_counter() - time_estimator
if self.verbose:
logger.info("Fitting terminated. Time {:.4f}s"
.format(self._time_estimator))
# Get coefs
intercept = 0
if hasattr(self.estimator_, 'coef_'):
coefs = self.estimator_.coef_.flatten()
if hasattr(self.estimator_, 'intercept_'):
intercept = self.estimator_.intercept_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" attribute.')
# Build scorecard
time_build_scorecard = time.perf_counter()
if self.verbose:
logger.info("Scorecard table building started.")
selected_variables = self.binning_process_.get_support(names=True)
binning_tables = []
for i, variable in enumerate(selected_variables):
optb = self.binning_process_.get_binned_variable(variable)
binning_table = optb.binning_table.build(
show_digits=show_digits, add_totals=False)
c = coefs[i]
binning_table.loc[:, "Variable"] = variable
binning_table.loc[:, "Coefficient"] = c
binning_table.loc[:, "Points"] = binning_table[bt_metric] * c
nt = len(binning_table)
if metric_special != 'empirical':
if isinstance(optb.special_codes, dict):
n_specials = len(optb.special_codes)
else:
n_specials = 1
binning_table.loc[
nt-1-n_specials:nt-2, "Points"] = metric_special * c
if metric_missing != 'empirical':
binning_table.loc[nt-1, "Points"] = metric_missing * c
binning_table.index.names = ['Bin id']
binning_table.reset_index(level=0, inplace=True)
binning_tables.append(binning_table)
df_scorecard = pd.concat(binning_tables)
df_scorecard.reset_index()
# Apply score points
if self.scaling_method is not None:
points = df_scorecard["Points"]
scaled_points = _compute_scorecard_points(
points, binning_tables, self.scaling_method,
self.scaling_method_params, intercept, self.reverse_scorecard)
df_scorecard.loc[:, "Points"] = scaled_points
if self.intercept_based:
scaled_points, self.intercept_ = _compute_intercept_based(
df_scorecard)
df_scorecard.loc[:, "Points"] = scaled_points
time_rounding = time.perf_counter()
if self.rounding:
points = df_scorecard["Points"]
if self.scaling_method in ("pdo_odds", None):
round_points = np.rint(points)
if self.intercept_based:
self.intercept_ = np.rint(self.intercept_)
elif self.scaling_method == "min_max":
round_mip = RoundingMIP()
round_mip.build_model(df_scorecard)
status, round_points = round_mip.solve()
if status not in ("OPTIMAL", "FEASIBLE"):
if self.verbose:
logger.warning("MIP rounding failed, method nearest "
"integer used instead.")
# Back-up method
round_points = np.rint(points)
if self.intercept_based:
self.intercept_ = np.rint(self.intercept_)
df_scorecard.loc[:, "Points"] = round_points
self._time_rounding = time.perf_counter() - time_rounding
self._df_scorecard = df_scorecard
self._time_build_scorecard = time.perf_counter() - time_build_scorecard
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Scorecard table terminated. Time: {:.4f}s"
.format(self._time_build_scorecard))
logger.info("Scorecard building process terminated. Time: {:.4f}s"
.format(self._time_total))
# Completed successfully
self._is_fitted = True
return self
def _transform(self, X, metric, metric_special, metric_missing):
self._check_is_fitted()
X_t = self.binning_process_.transform(
X=X[self.binning_process_.variable_names], metric=metric,
metric_special=metric_special, metric_missing=metric_missing)
return X_t
| (binning_process, estimator, scaling_method=None, scaling_method_params=None, intercept_based=False, reverse_scorecard=False, rounding=False, verbose=False) |
4,907 | optbinning.scorecard.scorecard | __init__ | null | def __init__(self, binning_process, estimator, scaling_method=None,
scaling_method_params=None, intercept_based=False,
reverse_scorecard=False, rounding=False, verbose=False):
self.binning_process = binning_process
self.estimator = estimator
self.scaling_method = scaling_method
self.scaling_method_params = scaling_method_params
self.intercept_based = intercept_based
self.reverse_scorecard = reverse_scorecard
self.rounding = rounding
self.verbose = verbose
# attributes
self.binning_process_ = None
self.estimator_ = None
self.intercept_ = 0
self._metric_special = None
self._metric_missing = None
# auxiliary
self._target_dtype = None
# timing
self._time_total = None
self._time_binning_process = None
self._time_estimator = None
self._time_build_scorecard = None
self._time_rounding = None
self._is_fitted = False
| (self, binning_process, estimator, scaling_method=None, scaling_method_params=None, intercept_based=False, reverse_scorecard=False, rounding=False, verbose=False) |
4,914 | optbinning.scorecard.scorecard | _fit | null | def _fit(self, X, y, sample_weight, metric_special, metric_missing,
show_digits, check_input):
# Store the metrics for missing and special bins for predictions
self._metric_special = metric_special
self._metric_missing = metric_missing
time_init = time.perf_counter()
if self.verbose:
logger.info("Scorecard building process started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params(deep=False))
# Check X dtype
if not isinstance(X, pd.DataFrame):
raise TypeError("X must be a pandas.DataFrame.")
# Target type and metric
self._target_dtype = type_of_target(y)
if self._target_dtype not in ("binary", "continuous"):
raise ValueError("Target type {} is not supported."
.format(self._target_dtype))
_check_scorecard_scaling(self.scaling_method,
self.scaling_method_params,
self.rounding,
self._target_dtype)
# Check sample weight
if sample_weight is not None and self._target_dtype != "binary":
raise ValueError("Target type {} does not support sample weight."
.format(self._target_dtype))
if self._target_dtype == "binary":
metric = "woe"
bt_metric = "WoE"
elif self._target_dtype == "continuous":
metric = "mean"
bt_metric = "Mean"
if self.verbose:
logger.info("Dataset: {} target.".format(self._target_dtype))
# Fit binning process
if self.verbose:
logger.info("Binning process started.")
time_binning_process = time.perf_counter()
self.binning_process_ = clone(self.binning_process)
# Suppress binning process verbosity
self.binning_process_.set_params(verbose=False)
X_t = self.binning_process_.fit_transform(
X[self.binning_process.variable_names], y, sample_weight, metric,
metric_special, metric_missing, show_digits, check_input)
self._time_binning_process = time.perf_counter() - time_binning_process
if self.verbose:
logger.info("Binning process terminated. Time: {:.4f}s"
.format(self._time_binning_process))
# Fit estimator
time_estimator = time.perf_counter()
if self.verbose:
logger.info("Fitting estimator.")
self.estimator_ = clone(self.estimator)
if sample_weight is not None:
self.estimator_.fit(X_t, y, sample_weight=sample_weight)
else:
self.estimator_.fit(X_t, y)
self._time_estimator = time.perf_counter() - time_estimator
if self.verbose:
logger.info("Fitting terminated. Time {:.4f}s"
.format(self._time_estimator))
# Get coefs
intercept = 0
if hasattr(self.estimator_, 'coef_'):
coefs = self.estimator_.coef_.flatten()
if hasattr(self.estimator_, 'intercept_'):
intercept = self.estimator_.intercept_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" attribute.')
# Build scorecard
time_build_scorecard = time.perf_counter()
if self.verbose:
logger.info("Scorecard table building started.")
selected_variables = self.binning_process_.get_support(names=True)
binning_tables = []
for i, variable in enumerate(selected_variables):
optb = self.binning_process_.get_binned_variable(variable)
binning_table = optb.binning_table.build(
show_digits=show_digits, add_totals=False)
c = coefs[i]
binning_table.loc[:, "Variable"] = variable
binning_table.loc[:, "Coefficient"] = c
binning_table.loc[:, "Points"] = binning_table[bt_metric] * c
nt = len(binning_table)
if metric_special != 'empirical':
if isinstance(optb.special_codes, dict):
n_specials = len(optb.special_codes)
else:
n_specials = 1
binning_table.loc[
nt-1-n_specials:nt-2, "Points"] = metric_special * c
if metric_missing != 'empirical':
binning_table.loc[nt-1, "Points"] = metric_missing * c
binning_table.index.names = ['Bin id']
binning_table.reset_index(level=0, inplace=True)
binning_tables.append(binning_table)
df_scorecard = pd.concat(binning_tables)
df_scorecard.reset_index()
# Apply score points
if self.scaling_method is not None:
points = df_scorecard["Points"]
scaled_points = _compute_scorecard_points(
points, binning_tables, self.scaling_method,
self.scaling_method_params, intercept, self.reverse_scorecard)
df_scorecard.loc[:, "Points"] = scaled_points
if self.intercept_based:
scaled_points, self.intercept_ = _compute_intercept_based(
df_scorecard)
df_scorecard.loc[:, "Points"] = scaled_points
time_rounding = time.perf_counter()
if self.rounding:
points = df_scorecard["Points"]
if self.scaling_method in ("pdo_odds", None):
round_points = np.rint(points)
if self.intercept_based:
self.intercept_ = np.rint(self.intercept_)
elif self.scaling_method == "min_max":
round_mip = RoundingMIP()
round_mip.build_model(df_scorecard)
status, round_points = round_mip.solve()
if status not in ("OPTIMAL", "FEASIBLE"):
if self.verbose:
logger.warning("MIP rounding failed, method nearest "
"integer used instead.")
# Back-up method
round_points = np.rint(points)
if self.intercept_based:
self.intercept_ = np.rint(self.intercept_)
df_scorecard.loc[:, "Points"] = round_points
self._time_rounding = time.perf_counter() - time_rounding
self._df_scorecard = df_scorecard
self._time_build_scorecard = time.perf_counter() - time_build_scorecard
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Scorecard table terminated. Time: {:.4f}s"
.format(self._time_build_scorecard))
logger.info("Scorecard building process terminated. Time: {:.4f}s"
.format(self._time_total))
# Completed successfully
self._is_fitted = True
return self
| (self, X, y, sample_weight, metric_special, metric_missing, show_digits, check_input) |
4,921 | optbinning.scorecard.scorecard | _transform | null | def _transform(self, X, metric, metric_special, metric_missing):
self._check_is_fitted()
X_t = self.binning_process_.transform(
X=X[self.binning_process_.variable_names], metric=metric,
metric_special=metric_special, metric_missing=metric_missing)
return X_t
| (self, X, metric, metric_special, metric_missing) |
4,924 | optbinning.scorecard.scorecard | decision_function | Predict confidence scores for samples.
The confidence score for a sample is proportional to the signed
distance of that sample to the hyperplane.
Parameters
----------
X : pandas.DataFrame (n_samples, n_features)
The data matrix for which we want to get the confidence scores.
Returns
-------
scores : array of shape (n_samples, n_classes)
Confidence scores per (n_samples, n_classes) combination.
| def decision_function(self, X):
"""Predict confidence scores for samples.
The confidence score for a sample is proportional to the signed
distance of that sample to the hyperplane.
Parameters
----------
X : pandas.DataFrame (n_samples, n_features)
The data matrix for which we want to get the confidence scores.
Returns
-------
scores : array of shape (n_samples, n_classes)
Confidence scores per (n_samples, n_classes) combination.
"""
X_t = self._transform(
X=X, metric=None, metric_special=self._metric_special,
metric_missing=self._metric_missing)
return self.estimator_.decision_function(X_t)
| (self, X) |
4,925 | optbinning.scorecard.scorecard | fit | Fit scorecard.
Parameters
----------
X : pandas.DataFrame (n_samples, n_features)
Training vector, where n_samples is the number of samples.
y : array-like of shape (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
This option is only available for a binary target.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
check_input : bool (default=False)
Whether to check input arrays.
show_digits : int, optional (default=2)
The number of significant digits of the bin column.
Returns
-------
self : Scorecard
Fitted scorecard.
| def fit(self, X, y, sample_weight=None, metric_special=0, metric_missing=0,
show_digits=2, check_input=False):
"""Fit scorecard.
Parameters
----------
X : pandas.DataFrame (n_samples, n_features)
Training vector, where n_samples is the number of samples.
y : array-like of shape (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
This option is only available for a binary target.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
check_input : bool (default=False)
Whether to check input arrays.
show_digits : int, optional (default=2)
The number of significant digits of the bin column.
Returns
-------
self : Scorecard
Fitted scorecard.
"""
return self._fit(X, y, sample_weight, metric_special, metric_missing,
show_digits, check_input)
| (self, X, y, sample_weight=None, metric_special=0, metric_missing=0, show_digits=2, check_input=False) |
4,928 | optbinning.scorecard.scorecard | information | Print overview information about the options settings and
statistics.
Parameters
----------
print_level : int (default=1)
Level of details.
| def information(self, print_level=1):
"""Print overview information about the options settings and
statistics.
Parameters
----------
print_level : int (default=1)
Level of details.
"""
self._check_is_fitted()
if not isinstance(print_level, numbers.Integral) or print_level < 0:
raise ValueError("print_level must be an integer >= 0; got {}."
.format(print_level))
n_numerical = list(
self.binning_process_._variable_dtypes.values()).count("numerical")
n_categorical = self.binning_process_._n_variables - n_numerical
n_selected = np.count_nonzero(self.binning_process_._support)
dict_user_options = self.get_params(deep=False)
print_scorecard_information(
print_level, self.binning_process_._n_samples,
self.binning_process_._n_variables, self._target_dtype,
n_numerical, n_categorical, n_selected, self._time_total,
self._time_binning_process, self._time_estimator,
self._time_build_scorecard, self._time_rounding, dict_user_options)
| (self, print_level=1) |
4,929 | optbinning.scorecard.scorecard | predict | Predict using the fitted underlying estimator and the reduced
dataset.
Parameters
----------
X : pandas.DataFrame (n_samples, n_features)
Training vector, where n_samples is the number of samples.
Returns
-------
pred: array of shape (n_samples)
The predicted target values.
| def predict(self, X):
"""Predict using the fitted underlying estimator and the reduced
dataset.
Parameters
----------
X : pandas.DataFrame (n_samples, n_features)
Training vector, where n_samples is the number of samples.
Returns
-------
pred: array of shape (n_samples)
The predicted target values.
"""
X_t = self._transform(
X=X, metric=None, metric_special=self._metric_special,
metric_missing=self._metric_missing)
return self.estimator_.predict(X_t)
| (self, X) |
4,930 | optbinning.scorecard.scorecard | predict_proba | Predict class probabilities using the fitted underlying estimator
and the reduced dataset.
Parameters
----------
X : pandas.DataFrame (n_samples, n_features)
Training vector, where n_samples is the number of samples.
Returns
-------
p: array of shape (n_samples, n_classes)
The class probabilities of the input samples.
| def predict_proba(self, X):
"""Predict class probabilities using the fitted underlying estimator
and the reduced dataset.
Parameters
----------
X : pandas.DataFrame (n_samples, n_features)
Training vector, where n_samples is the number of samples.
Returns
-------
p: array of shape (n_samples, n_classes)
The class probabilities of the input samples.
"""
X_t = self._transform(
X=X, metric=None, metric_special=self._metric_special,
metric_missing=self._metric_missing)
return self.estimator_.predict_proba(X_t)
| (self, X) |
4,931 | optbinning.scorecard.scorecard | save | Save scorecard to pickle file.
Parameters
----------
path : str
Pickle file path.
| def save(self, path):
"""Save scorecard to pickle file.
Parameters
----------
path : str
Pickle file path.
"""
if not isinstance(path, str):
raise TypeError("path must be a string.")
with open(path, "wb") as f:
pickle.dump(self, f)
| (self, path) |
4,932 | optbinning.scorecard.scorecard | score | Score of the dataset.
Parameters
----------
X : pandas.DataFrame (n_samples, n_features)
Training vector, where n_samples is the number of samples.
Returns
-------
score: array of shape (n_samples)
The score of the input samples.
| def score(self, X):
"""Score of the dataset.
Parameters
----------
X : pandas.DataFrame (n_samples, n_features)
Training vector, where n_samples is the number of samples.
Returns
-------
score: array of shape (n_samples)
The score of the input samples.
"""
X_t = self._transform(
X=X, metric="indices", metric_special="empirical",
metric_missing="empirical")
score_ = np.zeros(X_t.shape[0])
selected_variables = self.binning_process_.get_support(names=True)
for variable in selected_variables:
mask = self._df_scorecard.Variable == variable
points = self._df_scorecard[mask].Points.values
score_ += points[X_t[variable]]
return score_ + self.intercept_
| (self, X) |
4,933 | sklearn.utils._metadata_requests | set_fit_request | Request metadata passed to the ``fit`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
check_input : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``check_input`` parameter in ``fit``.
metric_missing : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``metric_missing`` parameter in ``fit``.
metric_special : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``metric_special`` parameter in ``fit``.
sample_weight : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``sample_weight`` parameter in ``fit``.
show_digits : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``show_digits`` parameter in ``fit``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: optbinning.scorecard.scorecard.Scorecard, *, check_input: Union[bool, NoneType, str] = '$UNCHANGED$', metric_missing: Union[bool, NoneType, str] = '$UNCHANGED$', metric_special: Union[bool, NoneType, str] = '$UNCHANGED$', sample_weight: Union[bool, NoneType, str] = '$UNCHANGED$', show_digits: Union[bool, NoneType, str] = '$UNCHANGED$') -> optbinning.scorecard.scorecard.Scorecard |
4,935 | optbinning.scorecard.scorecard | table | Scorecard table.
Parameters
----------
style : str, optional (default="summary")
Scorecard's style. Supported styles are "summary" and "detailed".
Summary only includes columns variable, bin description and points.
Detailed contained additional columns with bin information and
estimator coefficients.
Returns
-------
table : pandas.DataFrame
The scorecard table.
| def table(self, style="summary"):
"""Scorecard table.
Parameters
----------
style : str, optional (default="summary")
Scorecard's style. Supported styles are "summary" and "detailed".
Summary only includes columns variable, bin description and points.
Detailed contained additional columns with bin information and
estimator coefficients.
Returns
-------
table : pandas.DataFrame
The scorecard table.
"""
self._check_is_fitted()
if style not in ("summary", "detailed"):
raise ValueError('Invalid value for style. Allowed string '
'values are "summary" and "detailed".')
if style == "summary":
columns = ["Variable", "Bin", "Points"]
elif style == "detailed":
main_columns = ["Variable", "Bin id", "Bin"]
columns = self._df_scorecard.columns
rest_columns = [col for col in columns if col not in main_columns]
columns = main_columns + rest_columns
return self._df_scorecard[columns]
| (self, style='summary') |
4,945 | cerberus.validator | DocumentError | Raised when the target document is missing or has the wrong format | class DocumentError(Exception):
"""Raised when the target document is missing or has the wrong format"""
pass
| null |
4,946 | cerberus.schema | SchemaError |
Raised when the validation schema is missing, has the wrong format or contains
errors. | class SchemaError(Exception):
"""
Raised when the validation schema is missing, has the wrong format or contains
errors."""
pass
| null |
4,947 | cerberus.utils | TypeDefinition | TypeDefinition(name, included_types, excluded_types) | from cerberus.utils import TypeDefinition
| (name, included_types, excluded_types) |
4,949 | namedtuple_TypeDefinition | __new__ | Create new instance of TypeDefinition(name, included_types, excluded_types) | from builtins import function
| (_cls, name, included_types, excluded_types) |
4,952 | collections | _replace | Return a new TypeDefinition object replacing specified fields with new values | def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = ', '.join(field_names)
if num_fields == 1:
arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
namespace = {
'_tuple_new': tuple_new,
'__builtins__': {},
'__name__': f'namedtuple_{typename}',
}
code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
__new__ = eval(code, namespace)
__new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(self, /, **kwds):
result = self._make(_map(kwds.pop, field_names, self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return _tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (
__new__,
_make.__func__,
_replace,
__repr__,
_asdict,
__getnewargs__,
):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
'__match_args__': field_names,
}
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
| (self, /, **kwds) |
4,953 | cerberus.validator | Validator |
Validator class. Normalizes and/or validates any mapping against a
validation-schema which is provided as an argument at class instantiation
or upon calling the :meth:`~cerberus.Validator.validate`,
:meth:`~cerberus.Validator.validated` or
:meth:`~cerberus.Validator.normalized` method. An instance itself is
callable and executes a validation.
All instantiation parameters are optional.
There are the introspective properties :attr:`types`, :attr:`validators`,
:attr:`coercers`, :attr:`default_setters`, :attr:`rules`,
:attr:`normalization_rules` and :attr:`validation_rules`.
The attributes reflecting the available rules are assembled considering
constraints that are defined in the docstrings of rules' methods and is
effectively used as validation schema for :attr:`schema`.
:param schema: See :attr:`~cerberus.Validator.schema`.
Defaults to :obj:`None`.
:type schema: any :term:`mapping`
:param ignore_none_values: See :attr:`~cerberus.Validator.ignore_none_values`.
Defaults to ``False``.
:type ignore_none_values: :class:`bool`
:param allow_unknown: See :attr:`~cerberus.Validator.allow_unknown`.
Defaults to ``False``.
:type allow_unknown: :class:`bool` or any :term:`mapping`
:param require_all: See :attr:`~cerberus.Validator.require_all`.
Defaults to ``False``.
:type require_all: :class:`bool`
:param purge_unknown: See :attr:`~cerberus.Validator.purge_unknown`.
Defaults to to ``False``.
:type purge_unknown: :class:`bool`
:param purge_readonly: Removes all fields that are defined as ``readonly`` in the
normalization phase.
:type purge_readonly: :class:`bool`
:param error_handler: The error handler that formats the result of
:attr:`~cerberus.Validator.errors`.
When given as two-value tuple with an error-handler
class and a dictionary, the latter is passed to the
initialization of the error handler.
Default: :class:`~cerberus.errors.BasicErrorHandler`.
:type error_handler: class or instance based on
:class:`~cerberus.errors.BaseErrorHandler` or
:class:`tuple`
| from cerberus.validator import Validator
| (*args, **kwargs) |
4,954 | cerberus.validator | __get_rule_handler | null | def __get_rule_handler(self, domain, rule):
methodname = '_{0}_{1}'.format(domain, rule.replace(' ', '_'))
result = getattr(self, methodname, None)
if result is None:
raise RuntimeError(
"There's no handler for '{}' in the '{}' "
"domain.".format(rule, domain)
)
return result
| (self, domain, rule) |
4,955 | cerberus.validator | __init_error_handler | null | @staticmethod
def __init_error_handler(kwargs):
error_handler = kwargs.pop('error_handler', errors.BasicErrorHandler)
if isinstance(error_handler, tuple):
error_handler, eh_config = error_handler
else:
eh_config = {}
if isinstance(error_handler, type) and issubclass(
error_handler, errors.BaseErrorHandler
):
return error_handler(**eh_config)
elif isinstance(error_handler, errors.BaseErrorHandler):
return error_handler
else:
raise RuntimeError('Invalid error_handler.')
| (kwargs) |
4,956 | cerberus.validator | __init_processing | null | def __init_processing(self, document, schema=None):
self._errors = errors.ErrorList()
self.recent_error = None
self.document_error_tree = errors.DocumentErrorTree()
self.schema_error_tree = errors.SchemaErrorTree()
self.document = copy(document)
if not self.is_child:
self._is_normalized = False
if schema is not None:
self.schema = DefinitionSchema(self, schema)
elif self.schema is None:
if isinstance(self.allow_unknown, Mapping):
self._schema = {}
else:
raise SchemaError(errors.SCHEMA_ERROR_MISSING)
if document is None:
raise DocumentError(errors.DOCUMENT_MISSING)
if not isinstance(document, Mapping):
raise DocumentError(errors.DOCUMENT_FORMAT.format(document))
self.error_handler.start(self)
| (self, document, schema=None) |
4,957 | cerberus.validator | __normalize_coerce | null | def __normalize_coerce(self, processor, field, value, nullable, error):
if isinstance(processor, _str_type):
processor = self.__get_rule_handler('normalize_coerce', processor)
elif isinstance(processor, Iterable):
result = value
for p in processor:
result = self.__normalize_coerce(p, field, result, nullable, error)
if (
errors.COERCION_FAILED
in self.document_error_tree.fetch_errors_from(
self.document_path + (field,)
)
):
break
return result
try:
return processor(value)
except Exception as e:
if not (nullable and value is None):
self._error(field, error, str(e))
return value
| (self, processor, field, value, nullable, error) |
4,958 | cerberus.validator | __normalize_containers | null | def __normalize_containers(self, mapping, schema):
for field in mapping:
rules = set(schema.get(field, ()))
# TODO: This check conflates validation and normalization
if isinstance(mapping[field], Mapping):
if 'keysrules' in rules:
self.__normalize_mapping_per_keysrules(
field, mapping, schema[field]['keysrules']
)
if 'valuesrules' in rules:
self.__normalize_mapping_per_valuesrules(
field, mapping, schema[field]['valuesrules']
)
if rules & set(
('allow_unknown', 'purge_unknown', 'schema')
) or isinstance(self.allow_unknown, Mapping):
try:
self.__normalize_mapping_per_schema(field, mapping, schema)
except _SchemaRuleTypeError:
pass
elif isinstance(mapping[field], _str_type):
continue
elif isinstance(mapping[field], Sequence):
if 'schema' in rules:
self.__normalize_sequence_per_schema(field, mapping, schema)
elif 'items' in rules:
self.__normalize_sequence_per_items(field, mapping, schema)
| (self, mapping, schema) |
4,959 | cerberus.validator | __normalize_default_fields | null | def __normalize_default_fields(self, mapping, schema):
empty_fields = [
x
for x in schema
if x not in mapping
or (
mapping[x] is None # noqa: W503
and not schema[x].get('nullable', False)
) # noqa: W503
]
try:
fields_with_default = [x for x in empty_fields if 'default' in schema[x]]
except TypeError:
raise _SchemaRuleTypeError
for field in fields_with_default:
self._normalize_default(mapping, schema, field)
known_fields_states = set()
fields_with_default_setter = [
x for x in empty_fields if 'default_setter' in schema[x]
]
while fields_with_default_setter:
field = fields_with_default_setter.pop(0)
try:
self._normalize_default_setter(mapping, schema, field)
except KeyError:
fields_with_default_setter.append(field)
except Exception as e:
self._error(field, errors.SETTING_DEFAULT_FAILED, str(e))
fields_processing_state = hash(tuple(fields_with_default_setter))
if fields_processing_state in known_fields_states:
for field in fields_with_default_setter:
self._error(
field,
errors.SETTING_DEFAULT_FAILED,
'Circular dependencies of default setters.',
)
break
else:
known_fields_states.add(fields_processing_state)
| (self, mapping, schema) |
4,960 | cerberus.validator | __normalize_mapping | null | def __normalize_mapping(self, mapping, schema):
if isinstance(schema, _str_type):
schema = self._resolve_schema(schema)
schema = schema.copy()
for field in schema:
schema[field] = self._resolve_rules_set(schema[field])
self.__normalize_rename_fields(mapping, schema)
if self.purge_unknown and not self.allow_unknown:
self._normalize_purge_unknown(mapping, schema)
if self.purge_readonly:
self.__normalize_purge_readonly(mapping, schema)
# Check `readonly` fields before applying default values because
# a field's schema definition might contain both `readonly` and
# `default`.
self.__validate_readonly_fields(mapping, schema)
self.__normalize_default_fields(mapping, schema)
self._normalize_coerce(mapping, schema)
self.__normalize_containers(mapping, schema)
self._is_normalized = True
return mapping
| (self, mapping, schema) |
4,961 | cerberus.validator | __normalize_mapping_per_keysrules | null | def __normalize_mapping_per_keysrules(self, field, mapping, property_rules):
schema = dict(((k, property_rules) for k in mapping[field]))
document = dict(((k, k) for k in mapping[field]))
validator = self._get_child_validator(
document_crumb=field, schema_crumb=(field, 'keysrules'), schema=schema
)
result = validator.normalized(document, always_return_document=True)
if validator._errors:
self._drop_nodes_from_errorpaths(validator._errors, [], [2, 4])
self._error(validator._errors)
for k in result:
if k == result[k]:
continue
if result[k] in mapping[field]:
warn(
"Normalizing keys of {path}: {key} already exists, "
"its value is replaced.".format(
path='.'.join(str(x) for x in self.document_path + (field,)),
key=k,
)
)
mapping[field][result[k]] = mapping[field][k]
else:
mapping[field][result[k]] = mapping[field][k]
del mapping[field][k]
| (self, field, mapping, property_rules) |
4,962 | cerberus.validator | __normalize_mapping_per_schema | null | def __normalize_mapping_per_schema(self, field, mapping, schema):
rules = schema.get(field, {})
if not rules and isinstance(self.allow_unknown, Mapping):
rules = self.allow_unknown
validator = self._get_child_validator(
document_crumb=field,
schema_crumb=(field, 'schema'),
schema=rules.get('schema', {}),
allow_unknown=rules.get('allow_unknown', self.allow_unknown), # noqa: E501
purge_unknown=rules.get('purge_unknown', self.purge_unknown),
require_all=rules.get('require_all', self.require_all),
) # noqa: E501
value_type = type(mapping[field])
result_value = validator.normalized(mapping[field], always_return_document=True)
mapping[field] = value_type(result_value)
if validator._errors:
self._error(validator._errors)
| (self, field, mapping, schema) |
4,963 | cerberus.validator | __normalize_mapping_per_valuesrules | null | def __normalize_mapping_per_valuesrules(self, field, mapping, value_rules):
schema = dict(((k, value_rules) for k in mapping[field]))
validator = self._get_child_validator(
document_crumb=field, schema_crumb=(field, 'valuesrules'), schema=schema
)
mapping[field] = validator.normalized(
mapping[field], always_return_document=True
)
if validator._errors:
self._drop_nodes_from_errorpaths(validator._errors, [], [2])
self._error(validator._errors)
| (self, field, mapping, value_rules) |
4,964 | cerberus.validator | __normalize_purge_readonly | null | @staticmethod
def __normalize_purge_readonly(mapping, schema):
for field in [x for x in mapping if schema.get(x, {}).get('readonly', False)]:
mapping.pop(field)
return mapping
| (mapping, schema) |
4,965 | cerberus.validator | __normalize_rename_fields | null | def __normalize_rename_fields(self, mapping, schema):
for field in tuple(mapping):
if field in schema:
self._normalize_rename(mapping, schema, field)
self._normalize_rename_handler(mapping, schema, field)
elif (
isinstance(self.allow_unknown, Mapping)
and 'rename_handler' in self.allow_unknown
):
self._normalize_rename_handler(
mapping, {field: self.allow_unknown}, field
)
return mapping
| (self, mapping, schema) |
4,966 | cerberus.validator | __normalize_sequence_per_items | null | def __normalize_sequence_per_items(self, field, mapping, schema):
rules, values = schema[field]['items'], mapping[field]
if len(rules) != len(values):
return
schema = dict(((k, v) for k, v in enumerate(rules)))
document = dict((k, v) for k, v in enumerate(values))
validator = self._get_child_validator(
document_crumb=field, schema_crumb=(field, 'items'), schema=schema
)
value_type = type(mapping[field])
result = validator.normalized(document, always_return_document=True)
mapping[field] = value_type(result.values())
if validator._errors:
self._drop_nodes_from_errorpaths(validator._errors, [], [2])
self._error(validator._errors)
| (self, field, mapping, schema) |
4,967 | cerberus.validator | __normalize_sequence_per_schema | null | def __normalize_sequence_per_schema(self, field, mapping, schema):
schema = dict(
((k, schema[field]['schema']) for k in range(len(mapping[field])))
)
document = dict((k, v) for k, v in enumerate(mapping[field]))
validator = self._get_child_validator(
document_crumb=field, schema_crumb=(field, 'schema'), schema=schema
)
value_type = type(mapping[field])
result = validator.normalized(document, always_return_document=True)
mapping[field] = value_type(result.values())
if validator._errors:
self._drop_nodes_from_errorpaths(validator._errors, [], [2])
self._error(validator._errors)
| (self, field, mapping, schema) |
4,968 | cerberus.validator | __store_config | Assign args to kwargs and store configuration. | def __store_config(self, args, kwargs):
"""Assign args to kwargs and store configuration."""
signature = (
'schema',
'ignore_none_values',
'allow_unknown',
'require_all',
'purge_unknown',
'purge_readonly',
)
for i, p in enumerate(signature[: len(args)]):
if p in kwargs:
raise TypeError("__init__ got multiple values for argument " "'%s'" % p)
else:
kwargs[p] = args[i]
self._config = kwargs
""" This dictionary holds the configuration arguments that were used to
initialize the :class:`Validator` instance except the
``error_handler``. """
| (self, args, kwargs) |
4,969 | cerberus.validator | __validate_definitions | Validate a field's value against its defined rules. | def __validate_definitions(self, definitions, field):
"""Validate a field's value against its defined rules."""
def validate_rule(rule):
validator = self.__get_rule_handler('validate', rule)
return validator(definitions.get(rule, None), field, value)
definitions = self._resolve_rules_set(definitions)
value = self.document[field]
rules_queue = [
x
for x in self.priority_validations
if x in definitions or x in self.mandatory_validations
]
rules_queue.extend(
x for x in self.mandatory_validations if x not in rules_queue
)
rules_queue.extend(
x
for x in definitions
if x not in rules_queue
and x not in self.normalization_rules
and x not in ('allow_unknown', 'require_all', 'meta', 'required')
)
self._remaining_rules = rules_queue
while self._remaining_rules:
rule = self._remaining_rules.pop(0)
try:
result = validate_rule(rule)
# TODO remove on next breaking release
if result:
break
except _SchemaRuleTypeError:
break
self._drop_remaining_rules()
| (self, definitions, field) |
4,970 | cerberus.validator | __validate_dependencies_mapping | null | def __validate_dependencies_mapping(self, dependencies, field):
validated_dependencies_counter = 0
error_info = {}
for dependency_name, dependency_values in dependencies.items():
if not isinstance(dependency_values, Sequence) or isinstance(
dependency_values, _str_type
):
dependency_values = [dependency_values]
wanted_field, wanted_field_value = self._lookup_field(dependency_name)
if wanted_field_value in dependency_values:
validated_dependencies_counter += 1
else:
error_info.update({dependency_name: wanted_field_value})
if validated_dependencies_counter != len(dependencies):
self._error(field, errors.DEPENDENCIES_FIELD_VALUE, error_info)
| (self, dependencies, field) |
4,971 | cerberus.validator | __validate_dependencies_sequence | null | def __validate_dependencies_sequence(self, dependencies, field):
for dependency in dependencies:
if self._lookup_field(dependency)[0] is None:
self._error(field, errors.DEPENDENCIES_FIELD, dependency)
| (self, dependencies, field) |
4,972 | cerberus.validator | __validate_logical |
Validates value against all definitions and logs errors according to the
operator.
| def __validate_logical(self, operator, definitions, field, value):
"""
Validates value against all definitions and logs errors according to the
operator.
"""
valid_counter = 0
_errors = errors.ErrorList()
for i, definition in enumerate(definitions):
schema = {field: definition.copy()}
for rule in ('allow_unknown', 'type'):
if rule not in schema[field] and rule in self.schema[field]:
schema[field][rule] = self.schema[field][rule]
if 'allow_unknown' not in schema[field]:
schema[field]['allow_unknown'] = self.allow_unknown
validator = self._get_child_validator(
schema_crumb=(field, operator, i), schema=schema, allow_unknown=True
)
if validator(self.document, update=self.update, normalize=False):
valid_counter += 1
else:
self._drop_nodes_from_errorpaths(validator._errors, [], [3])
_errors.extend(validator._errors)
return valid_counter, _errors
| (self, operator, definitions, field, value) |
4,973 | cerberus.validator | __validate_readonly_fields | null | def __validate_readonly_fields(self, mapping, schema):
for field in (
x
for x in schema
if x in mapping and self._resolve_rules_set(schema[x]).get('readonly')
):
self._validate_readonly(schema[field]['readonly'], field, mapping[field])
| (self, mapping, schema) |
4,974 | cerberus.validator | __validate_required_fields |
Validates that required fields are not missing.
:param document: The document being validated.
| def __validate_required_fields(self, document):
"""
Validates that required fields are not missing.
:param document: The document being validated.
"""
try:
required = set(
field
for field, definition in self.schema.items()
if self._resolve_rules_set(definition).get('required', self.require_all)
is True
)
except AttributeError:
if self.is_child and self.schema_path[-1] == 'schema':
raise _SchemaRuleTypeError
else:
raise
required -= self._unrequired_by_excludes
missing = required - set(
field
for field in document
if document.get(field) is not None or not self.ignore_none_values
)
for field in missing:
self._error(field, errors.REQUIRED_FIELD)
# At least one field from self._unrequired_by_excludes should be present in
# document.
if self._unrequired_by_excludes:
fields = set(field for field in document if document.get(field) is not None)
if self._unrequired_by_excludes.isdisjoint(fields):
for field in self._unrequired_by_excludes - fields:
self._error(field, errors.REQUIRED_FIELD)
| (self, document) |
4,975 | cerberus.validator | __validate_schema_mapping | null | def __validate_schema_mapping(self, field, schema, value):
schema = self._resolve_schema(schema)
field_rules = self._resolve_rules_set(self.schema[field])
validator = self._get_child_validator(
document_crumb=field,
schema_crumb=(field, 'schema'),
schema=schema,
allow_unknown=field_rules.get('allow_unknown', self.allow_unknown),
require_all=field_rules.get('require_all', self.require_all),
)
try:
if not validator(value, update=self.update, normalize=False):
self._error(field, errors.MAPPING_SCHEMA, validator._errors)
except _SchemaRuleTypeError:
self._error(field, errors.BAD_TYPE_FOR_SCHEMA)
raise
| (self, field, schema, value) |
4,976 | cerberus.validator | __validate_schema_sequence | null | def __validate_schema_sequence(self, field, schema, value):
schema = dict(((i, schema) for i in range(len(value))))
validator = self._get_child_validator(
document_crumb=field,
schema_crumb=(field, 'schema'),
schema=schema,
allow_unknown=self.allow_unknown,
)
validator(
dict(((i, v) for i, v in enumerate(value))),
update=self.update,
normalize=False,
)
if validator._errors:
self._drop_nodes_from_errorpaths(validator._errors, [], [2])
self._error(field, errors.SEQUENCE_SCHEMA, validator._errors)
| (self, field, schema, value) |
4,977 | cerberus.validator | __validate_unknown_fields | null | def __validate_unknown_fields(self, field):
if self.allow_unknown:
value = self.document[field]
if isinstance(self.allow_unknown, (Mapping, _str_type)):
# validate that unknown fields matches the schema
# for unknown_fields
schema_crumb = 'allow_unknown' if self.is_child else '__allow_unknown__'
validator = self._get_child_validator(
schema_crumb=schema_crumb, schema={field: self.allow_unknown}
)
if not validator({field: value}, normalize=False):
self._error(validator._errors)
else:
self._error(field, errors.UNKNOWN_FIELD)
| (self, field) |
4,978 | cerberus.validator | validate |
Normalizes and validates a mapping against a validation-schema of defined rules.
:param document: The document to normalize.
:type document: any :term:`mapping`
:param schema: The validation schema. Defaults to :obj:`None`. If not
provided here, the schema must have been provided at
class instantiation.
:type schema: any :term:`mapping`
:param update: If ``True``, required fields won't be checked.
:type update: :class:`bool`
:param normalize: If ``True``, normalize the document before validation.
:type normalize: :class:`bool`
:return: ``True`` if validation succeeds, otherwise ``False``. Check
the :func:`errors` property for a list of processing errors.
:rtype: :class:`bool`
| def validate(self, document, schema=None, update=False, normalize=True):
"""
Normalizes and validates a mapping against a validation-schema of defined rules.
:param document: The document to normalize.
:type document: any :term:`mapping`
:param schema: The validation schema. Defaults to :obj:`None`. If not
provided here, the schema must have been provided at
class instantiation.
:type schema: any :term:`mapping`
:param update: If ``True``, required fields won't be checked.
:type update: :class:`bool`
:param normalize: If ``True``, normalize the document before validation.
:type normalize: :class:`bool`
:return: ``True`` if validation succeeds, otherwise ``False``. Check
the :func:`errors` property for a list of processing errors.
:rtype: :class:`bool`
"""
self.update = update
self._unrequired_by_excludes = set()
self.__init_processing(document, schema)
if normalize:
self.__normalize_mapping(self.document, self.schema)
for field in self.document:
if self.ignore_none_values and self.document[field] is None:
continue
definitions = self.schema.get(field)
if definitions is not None:
self.__validate_definitions(definitions, field)
else:
self.__validate_unknown_fields(field)
if not self.update:
self.__validate_required_fields(self.document)
self.error_handler.end(self)
return not bool(self._errors)
| (self, document, schema=None, update=False, normalize=True) |
4,979 | cerberus.validator | __init__ |
The arguments will be treated as with this signature:
__init__(self, schema=None, ignore_none_values=False,
allow_unknown=False, require_all=False,
purge_unknown=False, purge_readonly=False,
error_handler=errors.BasicErrorHandler)
| def __init__(self, *args, **kwargs):
"""
The arguments will be treated as with this signature:
__init__(self, schema=None, ignore_none_values=False,
allow_unknown=False, require_all=False,
purge_unknown=False, purge_readonly=False,
error_handler=errors.BasicErrorHandler)
"""
self.document = None
""" The document that is or was recently processed.
Type: any :term:`mapping` """
self._errors = errors.ErrorList()
""" The list of errors that were encountered since the last document
processing was invoked.
Type: :class:`~cerberus.errors.ErrorList` """
self.recent_error = None
""" The last individual error that was submitted.
Type: :class:`~cerberus.errors.ValidationError` """
self.document_error_tree = errors.DocumentErrorTree()
""" A tree representiation of encountered errors following the
structure of the document.
Type: :class:`~cerberus.errors.DocumentErrorTree` """
self.schema_error_tree = errors.SchemaErrorTree()
""" A tree representiation of encountered errors following the
structure of the schema.
Type: :class:`~cerberus.errors.SchemaErrorTree` """
self.document_path = ()
""" The path within the document to the current sub-document.
Type: :class:`tuple` """
self.schema_path = ()
""" The path within the schema to the current sub-schema.
Type: :class:`tuple` """
self.update = False
self.error_handler = self.__init_error_handler(kwargs)
""" The error handler used to format :attr:`~cerberus.Validator.errors`
and process submitted errors with
:meth:`~cerberus.Validator._error`.
Type: :class:`~cerberus.errors.BaseErrorHandler` """
self.__store_config(args, kwargs)
self.schema = kwargs.get('schema', None)
self.allow_unknown = kwargs.get('allow_unknown', False)
self.require_all = kwargs.get('require_all', False)
self._remaining_rules = []
""" Keeps track of the rules that are next in line to be evaluated
during the validation of a field.
Type: :class:`list` """
super(BareValidator, self).__init__()
| (self, *args, **kwargs) |
4,980 | cerberus.validator | _drop_nodes_from_errorpaths |
Removes nodes by index from an errorpath, relatively to the basepaths of self.
:param errors: A list of :class:`errors.ValidationError` instances.
:param dp_items: A list of integers, pointing at the nodes to drop from
the :attr:`document_path`.
:param sp_items: Alike ``dp_items``, but for :attr:`schema_path`.
| def _drop_nodes_from_errorpaths(self, _errors, dp_items, sp_items):
"""
Removes nodes by index from an errorpath, relatively to the basepaths of self.
:param errors: A list of :class:`errors.ValidationError` instances.
:param dp_items: A list of integers, pointing at the nodes to drop from
the :attr:`document_path`.
:param sp_items: Alike ``dp_items``, but for :attr:`schema_path`.
"""
dp_basedepth = len(self.document_path)
sp_basedepth = len(self.schema_path)
for error in _errors:
for i in sorted(dp_items, reverse=True):
error.document_path = drop_item_from_tuple(
error.document_path, dp_basedepth + i
)
for i in sorted(sp_items, reverse=True):
error.schema_path = drop_item_from_tuple(
error.schema_path, sp_basedepth + i
)
if error.child_errors:
self._drop_nodes_from_errorpaths(error.child_errors, dp_items, sp_items)
| (self, _errors, dp_items, sp_items) |
4,981 | cerberus.validator | _drop_remaining_rules |
Drops rules from the queue of the rules that still need to be evaluated for the
currently processed field. If no arguments are given, the whole queue is
emptied.
| def _drop_remaining_rules(self, *rules):
"""
Drops rules from the queue of the rules that still need to be evaluated for the
currently processed field. If no arguments are given, the whole queue is
emptied.
"""
if rules:
for rule in rules:
try:
self._remaining_rules.remove(rule)
except ValueError:
pass
else:
self._remaining_rules = []
| (self, *rules) |
4,982 | cerberus.validator | _error |
Creates and adds one or multiple errors.
:param args: Accepts different argument's signatures.
*1. Bulk addition of errors:*
- :term:`iterable` of
:class:`~cerberus.errors.ValidationError`-instances
The errors will be added to
:attr:`~cerberus.Validator._errors`.
*2. Custom error:*
- the invalid field's name
- the error message
A custom error containing the message will be created and
added to :attr:`~cerberus.Validator._errors`.
There will however be fewer information contained in the
error (no reference to the violated rule and its
constraint).
*3. Defined error:*
- the invalid field's name
- the error-reference, see :mod:`cerberus.errors`
- arbitrary, supplemental information about the error
A :class:`~cerberus.errors.ValidationError` instance will
be created and added to
:attr:`~cerberus.Validator._errors`.
| def _error(self, *args):
"""
Creates and adds one or multiple errors.
:param args: Accepts different argument's signatures.
*1. Bulk addition of errors:*
- :term:`iterable` of
:class:`~cerberus.errors.ValidationError`-instances
The errors will be added to
:attr:`~cerberus.Validator._errors`.
*2. Custom error:*
- the invalid field's name
- the error message
A custom error containing the message will be created and
added to :attr:`~cerberus.Validator._errors`.
There will however be fewer information contained in the
error (no reference to the violated rule and its
constraint).
*3. Defined error:*
- the invalid field's name
- the error-reference, see :mod:`cerberus.errors`
- arbitrary, supplemental information about the error
A :class:`~cerberus.errors.ValidationError` instance will
be created and added to
:attr:`~cerberus.Validator._errors`.
"""
if len(args) == 1:
self._errors.extend(args[0])
self._errors.sort()
for error in args[0]:
self.document_error_tree.add(error)
self.schema_error_tree.add(error)
self.error_handler.emit(error)
elif len(args) == 2 and isinstance(args[1], _str_type):
self._error(args[0], errors.CUSTOM, args[1])
elif len(args) >= 2:
field = args[0]
code = args[1].code
rule = args[1].rule
info = args[2:]
document_path = self.document_path + (field,)
schema_path = self.schema_path
if code != errors.UNKNOWN_FIELD.code and rule is not None:
schema_path += (field, rule)
if not rule:
constraint = None
else:
rules_set = self._resolve_rules_set(
self._resolve_schema(self.schema)[field]
)
if rule == 'nullable':
constraint = rules_set.get(rule, False)
elif rule == 'required':
constraint = rules_set.get(rule, self.require_all)
if rule not in rules_set:
schema_path = "__require_all__"
else:
constraint = rules_set[rule]
value = self.document.get(field)
self.recent_error = errors.ValidationError(
document_path, schema_path, code, rule, constraint, value, info
)
self._error([self.recent_error])
| (self, *args) |
4,983 | cerberus.validator | _get_child_validator |
Creates a new instance of Validator-(sub-)class. All initial parameters of the
parent are passed to the initialization, unless a parameter is given as an
explicit *keyword*-parameter.
:param document_crumb: Extends the
:attr:`~cerberus.Validator.document_path`
of the child-validator.
:type document_crumb: :class:`tuple` or :term:`hashable`
:param schema_crumb: Extends the
:attr:`~cerberus.Validator.schema_path`
of the child-validator.
:type schema_crumb: :class:`tuple` or hashable
:param kwargs: Overriding keyword-arguments for initialization.
:type kwargs: :class:`dict`
:return: an instance of ``self.__class__``
| def _get_child_validator(self, document_crumb=None, schema_crumb=None, **kwargs):
"""
Creates a new instance of Validator-(sub-)class. All initial parameters of the
parent are passed to the initialization, unless a parameter is given as an
explicit *keyword*-parameter.
:param document_crumb: Extends the
:attr:`~cerberus.Validator.document_path`
of the child-validator.
:type document_crumb: :class:`tuple` or :term:`hashable`
:param schema_crumb: Extends the
:attr:`~cerberus.Validator.schema_path`
of the child-validator.
:type schema_crumb: :class:`tuple` or hashable
:param kwargs: Overriding keyword-arguments for initialization.
:type kwargs: :class:`dict`
:return: an instance of ``self.__class__``
"""
child_config = self._config.copy()
child_config.update(kwargs)
if not self.is_child:
child_config['is_child'] = True
child_config['error_handler'] = toy_error_handler
child_config['root_allow_unknown'] = self.allow_unknown
child_config['root_require_all'] = self.require_all
child_config['root_document'] = self.document
child_config['root_schema'] = self.schema
child_validator = self.__class__(**child_config)
if document_crumb is None:
child_validator.document_path = self.document_path
else:
if not isinstance(document_crumb, tuple):
document_crumb = (document_crumb,)
child_validator.document_path = self.document_path + document_crumb
if schema_crumb is None:
child_validator.schema_path = self.schema_path
else:
if not isinstance(schema_crumb, tuple):
schema_crumb = (schema_crumb,)
child_validator.schema_path = self.schema_path + schema_crumb
return child_validator
| (self, document_crumb=None, schema_crumb=None, **kwargs) |
4,984 | cerberus.validator | _lookup_field |
Searches for a field as defined by path. This method is used by the
``dependency`` evaluation logic.
:param path: Path elements are separated by a ``.``. A leading ``^``
indicates that the path relates to the document root,
otherwise it relates to the currently evaluated document,
which is possibly a subdocument.
The sequence ``^^`` at the start will be interpreted as a
literal ``^``.
:type path: :class:`str`
:returns: Either the found field name and its value or :obj:`None` for
both.
:rtype: A two-value :class:`tuple`.
| def _lookup_field(self, path):
"""
Searches for a field as defined by path. This method is used by the
``dependency`` evaluation logic.
:param path: Path elements are separated by a ``.``. A leading ``^``
indicates that the path relates to the document root,
otherwise it relates to the currently evaluated document,
which is possibly a subdocument.
The sequence ``^^`` at the start will be interpreted as a
literal ``^``.
:type path: :class:`str`
:returns: Either the found field name and its value or :obj:`None` for
both.
:rtype: A two-value :class:`tuple`.
"""
if path.startswith('^'):
path = path[1:]
context = self.document if path.startswith('^') else self.root_document
else:
context = self.document
parts = path.split('.')
for part in parts:
if part not in context:
return None, None
context = context.get(part, {})
return parts[-1], context
| (self, path) |
4,985 | cerberus.validator | _normalize_coerce |
{'oneof': [
{'type': 'callable'},
{'type': 'list',
'schema': {'oneof': [{'type': 'callable'},
{'type': 'string'}]}},
{'type': 'string'}
]}
| def _normalize_coerce(self, mapping, schema):
"""
{'oneof': [
{'type': 'callable'},
{'type': 'list',
'schema': {'oneof': [{'type': 'callable'},
{'type': 'string'}]}},
{'type': 'string'}
]}
"""
error = errors.COERCION_FAILED
for field in mapping:
if field in schema and 'coerce' in schema[field]:
mapping[field] = self.__normalize_coerce(
schema[field]['coerce'],
field,
mapping[field],
schema[field].get('nullable', False),
error,
)
elif (
isinstance(self.allow_unknown, Mapping)
and 'coerce' in self.allow_unknown
):
mapping[field] = self.__normalize_coerce(
self.allow_unknown['coerce'],
field,
mapping[field],
self.allow_unknown.get('nullable', False),
error,
)
| (self, mapping, schema) |
4,986 | cerberus.validator | _normalize_default | {'nullable': True} | def _normalize_default(self, mapping, schema, field):
"""{'nullable': True}"""
mapping[field] = schema[field]['default']
| (self, mapping, schema, field) |
4,987 | cerberus.validator | _normalize_default_setter |
{'oneof': [
{'type': 'callable'},
{'type': 'string'}
]}
| def _normalize_default_setter(self, mapping, schema, field):
"""
{'oneof': [
{'type': 'callable'},
{'type': 'string'}
]}
"""
if 'default_setter' in schema[field]:
setter = schema[field]['default_setter']
if isinstance(setter, _str_type):
setter = self.__get_rule_handler('normalize_default_setter', setter)
mapping[field] = setter(mapping)
| (self, mapping, schema, field) |
4,988 | cerberus.validator | _normalize_purge_unknown | {'type': 'boolean'} | @staticmethod
def _normalize_purge_unknown(mapping, schema):
"""{'type': 'boolean'}"""
for field in [x for x in mapping if x not in schema]:
mapping.pop(field)
return mapping
| (mapping, schema) |
4,989 | cerberus.validator | _normalize_rename | {'type': 'hashable'} | def _normalize_rename(self, mapping, schema, field):
"""{'type': 'hashable'}"""
if 'rename' in schema[field]:
mapping[schema[field]['rename']] = mapping[field]
del mapping[field]
| (self, mapping, schema, field) |
4,990 | cerberus.validator | _normalize_rename_handler |
{'oneof': [
{'type': 'callable'},
{'type': 'list',
'schema': {'oneof': [{'type': 'callable'},
{'type': 'string'}]}},
{'type': 'string'}
]}
| def _normalize_rename_handler(self, mapping, schema, field):
"""
{'oneof': [
{'type': 'callable'},
{'type': 'list',
'schema': {'oneof': [{'type': 'callable'},
{'type': 'string'}]}},
{'type': 'string'}
]}
"""
if 'rename_handler' not in schema[field]:
return
new_name = self.__normalize_coerce(
schema[field]['rename_handler'], field, field, False, errors.RENAMING_FAILED
)
if new_name != field:
mapping[new_name] = mapping[field]
del mapping[field]
| (self, mapping, schema, field) |
Subsets and Splits