id
int64 0
190k
| prompt
stringlengths 21
13.4M
| docstring
stringlengths 1
12k
⌀ |
---|---|---|
502 | import typing as t
import numpy as np
import pandas as pd
from deepchecks.core import ConditionResult
from deepchecks.core.condition import ConditionCategory
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.utils.dict_funcs import get_dict_entry_by_value
from deepchecks.utils.strings import format_number, format_percent
class ConditionCategory(enum.Enum):
"""Condition result category. indicates whether the result should fail the suite."""
FAIL = 'FAIL'
WARN = 'WARN'
PASS = 'PASS'
ERROR = 'ERROR'
def get_dict_entry_by_value(x: dict, value_select_fn=max):
"""Get from dictionary the entry with value that returned from value_select_fn.
Returns
-------
Tuple: key, value
"""
if not x:
return None, None
value = value_select_fn(x.values())
index = list(x.values()).index(value)
return list(x.keys())[index], value
def format_percent(ratio: float, floating_point: int = 2, scientific_notation_threshold: int = 4,
add_positive_prefix: bool = False) -> str:
"""Format percent for elegant display.
Parameters
----------
ratio : float
Ratio to be displayed as percent
floating_point: int , default: 2
Number of floating points to display
scientific_notation_threshold: int, default: 4
Max number of floating points for which to show number as float. If number of floating points is larger than
this parameter, scientific notation (e.g. "10E-5%") will be shown.
add_positive_prefix: bool, default: False
add plus sign before positive percentages (minus sign is always added for negative percentages).
Returns
-------
str
String of ratio as percent
"""
result: str
if ratio < 0:
ratio = -ratio
prefix = '-'
else:
prefix = '+' if add_positive_prefix and ratio != 0 else ''
if int(ratio) == ratio:
result = f'{int(ratio) * 100}%'
elif ratio > 1:
result = truncate_zero_percent(ratio, floating_point)
elif ratio < 10**(-(2+floating_point)):
if ratio > 10**(-(2+scientific_notation_threshold)):
result = truncate_zero_percent(ratio, scientific_notation_threshold)
else:
result = f'{Decimal(ratio * 100):.{floating_point}E}%'
elif ratio > (1-10**(-(2+floating_point))):
if floating_point > 0:
result = f'99.{"".join(["9"]*floating_point)}%'
else:
result = '99%'
else:
result = truncate_zero_percent(ratio, floating_point)
return prefix + result
The provided code snippet includes necessary dependencies for implementing the `get_condition_train_test_relative_degradation_less_than` function. Write a Python function `def get_condition_train_test_relative_degradation_less_than(threshold: float) -> t.Callable[[pd.DataFrame], ConditionResult]` to solve the following problem:
Add condition - test performance is not degraded by more than given percentage in train. Parameters ---------- threshold : float maximum degradation ratio allowed (value between 0 and 1) Returns ------- Callable the condition function
Here is the function:
def get_condition_train_test_relative_degradation_less_than(threshold: float) -> \
t.Callable[[pd.DataFrame], ConditionResult]:
"""Add condition - test performance is not degraded by more than given percentage in train.
Parameters
----------
threshold : float
maximum degradation ratio allowed (value between 0 and 1)
Returns
-------
Callable
the condition function
"""
def _ratio_of_change_calc(score_1, score_2):
if score_1 == 0:
if score_2 == 0:
return 0
return 1
return (score_1 - score_2) / abs(score_1)
def condition(check_result: pd.DataFrame) -> ConditionResult:
test_scores = check_result.loc[check_result['Dataset'] == 'Test']
train_scores = check_result.loc[check_result['Dataset'] == 'Train']
max_degradation = ('', -np.inf)
num_failures = 0
def update_max_degradation(diffs, class_name):
nonlocal max_degradation
max_scorer, max_diff = get_dict_entry_by_value(diffs)
if max_diff > max_degradation[1]:
max_degradation_details = f'Found max degradation of {format_percent(max_diff)} for metric {max_scorer}'
if class_name is not None:
max_degradation_details += f' and class {class_name}.'
max_degradation = max_degradation_details, max_diff
if 'Class' in check_result.columns and not pd.isnull(check_result['Class']).all():
if 'Class Name' in check_result.columns and not pd.isnull(check_result['Class Name']).all():
class_column = 'Class Name'
else:
class_column = 'Class'
classes = check_result[class_column].unique()
else:
classes = None
if classes is not None:
for class_name in classes:
test_scores_class = test_scores.loc[test_scores[class_column] == class_name]
train_scores_class = train_scores.loc[train_scores[class_column] == class_name]
test_scores_dict = dict(zip(test_scores_class['Metric'], test_scores_class['Value']))
train_scores_dict = dict(zip(train_scores_class['Metric'], train_scores_class['Value']))
if len(test_scores_dict) == 0 or len(train_scores_dict) == 0:
continue
# Calculate percentage of change from train to test
diff = {score_name: _ratio_of_change_calc(score, test_scores_dict[score_name])
for score_name, score in train_scores_dict.items()}
update_max_degradation(diff, class_name)
num_failures += len([v for v in diff.values() if v >= threshold])
else:
test_scores_dict = dict(zip(test_scores['Metric'], test_scores['Value']))
train_scores_dict = dict(zip(train_scores['Metric'], train_scores['Value']))
if not (len(test_scores_dict) == 0 or len(train_scores_dict) == 0):
# Calculate percentage of change from train to test
diff = {score_name: _ratio_of_change_calc(score, test_scores_dict[score_name])
for score_name, score in train_scores_dict.items()}
update_max_degradation(diff, None)
num_failures += len([v for v in diff.values() if v >= threshold])
if num_failures > 0:
message = f'{num_failures} scores failed. ' + max_degradation[0]
return ConditionResult(ConditionCategory.FAIL, message)
else:
message = max_degradation[0]
return ConditionResult(ConditionCategory.PASS, message)
return condition | Add condition - test performance is not degraded by more than given percentage in train. Parameters ---------- threshold : float maximum degradation ratio allowed (value between 0 and 1) Returns ------- Callable the condition function |
503 | import typing as t
import numpy as np
import pandas as pd
from deepchecks.core import ConditionResult
from deepchecks.core.condition import ConditionCategory
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.utils.dict_funcs import get_dict_entry_by_value
from deepchecks.utils.strings import format_number, format_percent
class ConditionCategory(enum.Enum):
"""Condition result category. indicates whether the result should fail the suite."""
FAIL = 'FAIL'
WARN = 'WARN'
PASS = 'PASS'
ERROR = 'ERROR'
class DeepchecksValueError(DeepchecksBaseError):
"""Exception class that represent a fault parameter was passed to Deepchecks."""
pass
def format_percent(ratio: float, floating_point: int = 2, scientific_notation_threshold: int = 4,
add_positive_prefix: bool = False) -> str:
"""Format percent for elegant display.
Parameters
----------
ratio : float
Ratio to be displayed as percent
floating_point: int , default: 2
Number of floating points to display
scientific_notation_threshold: int, default: 4
Max number of floating points for which to show number as float. If number of floating points is larger than
this parameter, scientific notation (e.g. "10E-5%") will be shown.
add_positive_prefix: bool, default: False
add plus sign before positive percentages (minus sign is always added for negative percentages).
Returns
-------
str
String of ratio as percent
"""
result: str
if ratio < 0:
ratio = -ratio
prefix = '-'
else:
prefix = '+' if add_positive_prefix and ratio != 0 else ''
if int(ratio) == ratio:
result = f'{int(ratio) * 100}%'
elif ratio > 1:
result = truncate_zero_percent(ratio, floating_point)
elif ratio < 10**(-(2+floating_point)):
if ratio > 10**(-(2+scientific_notation_threshold)):
result = truncate_zero_percent(ratio, scientific_notation_threshold)
else:
result = f'{Decimal(ratio * 100):.{floating_point}E}%'
elif ratio > (1-10**(-(2+floating_point))):
if floating_point > 0:
result = f'99.{"".join(["9"]*floating_point)}%'
else:
result = '99%'
else:
result = truncate_zero_percent(ratio, floating_point)
return prefix + result
def format_number(x, floating_point: int = 2) -> str:
"""Format number for elegant display.
Parameters
----------
x
Number to be displayed
floating_point : int , default: 2
Number of floating points to display
Returns
-------
str
String of beautified number
"""
def add_commas(x):
return f'{x:,}' # yes this actually formats the number 1000 to "1,000"
if np.isnan(x):
return 'nan'
# 0 is lost in the next if case, so we have it here as a special use-case
if x == 0:
return '0'
# If x is a very small number, that would be rounded to 0, we would prefer to return it as the format 1.0E-3.
if abs(x) < 10 ** (-floating_point):
return f'{Decimal(x):.{floating_point}E}'
# If x is an integer, or if x when rounded is an integer (e.g. 1.999999), then return as integer:
if round(x) == round(x, floating_point):
return add_commas(round(x))
# If not, return as a float, but don't print unnecessary zeros at end:
else:
ret_x = round(x, floating_point)
return add_commas(ret_x).rstrip('0')
The provided code snippet includes necessary dependencies for implementing the `get_condition_class_performance_imbalance_ratio_less_than` function. Write a Python function `def get_condition_class_performance_imbalance_ratio_less_than(threshold: float, score: str) -> t.Callable[[pd.DataFrame], ConditionResult]` to solve the following problem:
Add condition - relative ratio difference between highest-class and lowest-class is less than threshold. Parameters ---------- threshold : float ratio difference threshold score : str limit score for condition Returns ------- Callable the condition function
Here is the function:
def get_condition_class_performance_imbalance_ratio_less_than(threshold: float, score: str) -> \
t.Callable[[pd.DataFrame], ConditionResult]:
"""Add condition - relative ratio difference between highest-class and lowest-class is less than threshold.
Parameters
----------
threshold : float
ratio difference threshold
score : str
limit score for condition
Returns
-------
Callable
the condition function
"""
def condition(check_result: pd.DataFrame) -> ConditionResult:
if score not in set(check_result['Metric']):
raise DeepchecksValueError(f'Data was not calculated using the scoring function: {score}')
condition_passed = True
datasets_details = []
for dataset in ['Test', 'Train']:
data = check_result.loc[(check_result['Dataset'] == dataset) & (check_result['Metric'] == score)]
min_value_index = data['Value'].idxmin()
min_row = data.loc[min_value_index]
min_class_name = min_row.get('Class Name', min_row['Class'])
min_value = min_row['Value']
max_value_index = data['Value'].idxmax()
max_row = data.loc[max_value_index]
max_class_name = max_row.get('Class Name', max_row['Class'])
max_value = max_row['Value']
relative_difference = abs((min_value - max_value) / max_value)
condition_passed &= relative_difference < threshold
details = (
f'Relative ratio difference between highest and lowest in {dataset} dataset '
f'classes is {format_percent(relative_difference)}, using {score} metric. '
f'Lowest class - {min_class_name}: {format_number(min_value)}; '
f'Highest class - {max_class_name}: {format_number(max_value)}'
)
datasets_details.append(details)
category = ConditionCategory.PASS if condition_passed else ConditionCategory.FAIL
return ConditionResult(category, details='\n'.join(datasets_details))
return condition | Add condition - relative ratio difference between highest-class and lowest-class is less than threshold. Parameters ---------- threshold : float ratio difference threshold score : str limit score for condition Returns ------- Callable the condition function |
504 | import warnings
from typing import Container, List, Tuple
import pandas as pd
import plotly.graph_objects as go
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OrdinalEncoder
from deepchecks.tabular import Dataset
from deepchecks.tabular.utils.feature_importance import N_TOP_MESSAGE, calculate_feature_importance_or_none
from deepchecks.tabular.utils.task_type import TaskType
from deepchecks.utils.dataframes import floatify_dataframe
from deepchecks.utils.distribution.drift import get_drift_plot_sidenote
from deepchecks.utils.distribution.plot import drift_score_bar_traces, feature_distribution_traces
from deepchecks.utils.distribution.rare_category_encoder import RareCategoryEncoder
from deepchecks.utils.plot import DEFAULT_DATASET_NAMES
from deepchecks.utils.strings import format_percent
from deepchecks.utils.typing import Hashable
def auc_to_drift_score(auc: float) -> float:
"""Calculate the drift score, which is 2*auc - 1, with auc being the auc of the Domain Classifier.
Parameters
----------
auc : float
auc of the Domain Classifier
"""
return max(2 * auc - 1, 0)
def build_drift_plot(score):
"""Build traffic light drift plot."""
bar_traces, x_axis, y_axis = drift_score_bar_traces(score)
x_axis['title'] = 'Drift score'
drift_plot = go.Figure(layout=dict(
title='Drift Score - Multivariable',
xaxis=x_axis,
yaxis=y_axis,
height=200
))
drift_plot.add_traces(bar_traces)
return drift_plot
def display_dist(
train_column: pd.Series,
test_column: pd.Series,
fi: pd.Series,
cat_features: Container[str],
max_num_categories: int,
show_categories_by: str,
dataset_names: Tuple[str] = DEFAULT_DATASET_NAMES
):
"""Create a distribution comparison plot for the given columns."""
column_name = train_column.name or ''
column_fi = fi.loc[column_name]
title = f'Feature: {column_name} - Explains {format_percent(column_fi)} of dataset difference'
dist_traces, xaxis_layout, yaxis_layout = feature_distribution_traces(
train_column.dropna(),
test_column.dropna(),
column_name,
is_categorical=column_name in cat_features,
max_num_categories=max_num_categories,
show_categories_by=show_categories_by,
dataset_names=dataset_names
)
fig = go.Figure()
fig.add_traces(dist_traces)
return fig.update_layout(go.Layout(
title=title,
xaxis=xaxis_layout,
yaxis=yaxis_layout,
legend=dict(
title='Dataset',
yanchor='top',
y=0.9,
xanchor='left'),
height=300
))
N_TOP_MESSAGE = 'Showing only the top %s columns, you can change it using n_top_columns param'
def calculate_feature_importance_or_none(
model: t.Any,
dataset: t.Union['tabular.Dataset', pd.DataFrame],
model_classes,
observed_classes,
task_type,
force_permutation: bool = False,
permutation_kwargs: t.Optional[t.Dict[str, t.Any]] = None,
) -> t.Tuple[t.Optional[pd.Series], t.Optional[str]]:
"""Calculate features effect on the label or None if the input is incorrect.
Parameters
----------
model : t.Any
a fitted model
dataset : t.Union['tabular.Dataset', pd.DataFrame]
dataset used to fit the model
model_classes
possible classes output for model. None for regression tasks.
observed_classes
Observed classes in the data. None for regression tasks.
task_type
The task type of the model.
force_permutation : bool , default: False
force permutation importance calculation
permutation_kwargs : t.Optional[t.Dict[str, t.Any]] , default: None
kwargs for permutation importance calculation
Returns
-------
feature_importance, calculation_type : t.Tuple[t.Optional[pd.Series], str]]
features importance normalized to 0-1 indexed by feature names, or None if the input is incorrect
Tuple of the features importance and the calculation type
(types: `permutation_importance`, `feature_importances_`, `coef_`)
"""
try:
if model is None:
return None
# calculate feature importance if dataset has a label and the model is fitted on it
fi, calculation_type = _calculate_feature_importance(
model=model,
dataset=dataset,
model_classes=model_classes,
observed_classes=observed_classes,
task_type=task_type,
force_permutation=force_permutation,
permutation_kwargs=permutation_kwargs,
)
return fi, calculation_type
except (
errors.DeepchecksValueError,
errors.NumberOfFeaturesLimitError,
errors.DeepchecksTimeoutError,
errors.ModelValidationError,
errors.DatasetValidationError,
errors.DeepchecksSkippedFeatureImportance
) as error:
# DeepchecksValueError:
# if model validation failed;
# if it was not possible to calculate features importance;
# NumberOfFeaturesLimitError:
# if the number of features limit were exceeded;
# DatasetValidationError:
# if dataset did not meet requirements
# ModelValidationError:
# if wrong type of model was provided;
# if function failed to predict on model;
get_logger().warning('Features importance was not calculated:\n%s', error)
return None, None
class TaskType(Enum):
"""Enum containing supported task types."""
REGRESSION = 'regression'
BINARY = 'binary'
MULTICLASS = 'multiclass'
def floatify_dataframe(df: pd.DataFrame):
"""Return a dataframe where all the int columns are converted to floats.
Parameters
----------
df : pd.DataFrame
dataframe to convert
Raises
------
pd.DataFrame
the dataframe where all the int columns are converted to floats
"""
dtype_dict = df.dtypes.to_dict()
for col_name, dtype in dtype_dict.items():
if is_integer_dtype(dtype):
dtype_dict[col_name] = 'float'
return df.astype(dtype_dict)
def get_drift_plot_sidenote(max_num_categories_for_display: int, show_categories_by: str) -> str:
"""
Return a sidenote for the drift score plots regarding the number of categories shown in discrete distributions.
Parameters
----------
max_num_categories_for_display: int, default: 10
Max number of categories to show in plot.
show_categories_by: str, default: 'largest_difference'
Specify which categories to show for categorical features' graphs, as the number of shown categories is limited
by max_num_categories_for_display. Possible values:
- 'train_largest': Show the largest train categories.
- 'test_largest': Show the largest test categories.
- 'largest_difference': Show the largest difference between categories.
Returns
-------
str
sidenote for the drift score plots regarding the number of categories shown in discrete distributions.
"""
param_to_print_dict = {
'train_largest': 'largest categories (by train)',
'test_largest': 'largest categories (by test)',
'largest_difference': 'categories with largest difference between train and test'
}
return f'For discrete distribution plots, ' \
f'showing the top {max_num_categories_for_display} {param_to_print_dict[show_categories_by]}.'
class RareCategoryEncoder:
"""Encodes rare categories into an "other" parameter.
Note that this encoder assumes data is received as a DataFrame.
Parameters
----------
max_num_categories : int , default: 10
Indicates the maximum number of unique categories in a single categorical column
(rare categories will be changed to a form of "other")
cols : Optional[List[Hashable]] , default: None
Columns to limit the encoder to work on. If non are given will work on all columns given in `fit`
"""
DEFAULT_OTHER_VALUE = 'OTHER_RARE_CATEGORY'
def __init__(
self,
max_num_categories: int = 10,
cols: Optional[List[Hashable]] = None
):
self.max_num_categories = max_num_categories
self.cols = cols
self._col_mapping = None
def fit(self, data: pd.DataFrame, y=None): # noqa # pylint: disable=unused-argument
"""Fit the encoder using given dataframe.
Parameters
----------
data : pd.DataFrame
data to fit from
y :
Unused, but needed for sklearn pipeline
"""
self._col_mapping = {}
if self.cols is not None:
for col in self.cols:
self._col_mapping[col] = self._fit_for_series(data[col])
else:
for col in data.columns:
self._col_mapping[col] = self._fit_for_series(data[col])
def transform(self, data: pd.DataFrame):
"""Transform given data according to columns processed in `fit`.
Parameters
----------
data : pd.DataFrame
data to transform
Returns
-------
DataFrame
transformed data
"""
if self._col_mapping is None:
raise RuntimeError('Cannot transform without fitting first')
if self.cols is not None:
data = data.copy()
data[self.cols] = data[self.cols].apply(lambda s: s.map(self._col_mapping[s.name]))
else:
data = data.apply(lambda s: s.map(self._col_mapping[s.name]))
return data
def fit_transform(self, data: pd.DataFrame, y=None): # noqa # pylint: disable=unused-argument
"""Run `fit` and `transform` on given data.
Parameters
----------
data : pd.DataFrame
data to fit on and transform
y :
Unused, but needed for sklearn pipeline
Returns
-------
DataFrame
transformed data
"""
self.fit(data)
return self.transform(data)
def _fit_for_series(self, series: pd.Series):
top_values = list(series.value_counts().head(self.max_num_categories).index)
other_value = self._get_unique_other_value(series)
mapper = defaultdict(lambda: other_value, {k: k for k in top_values})
return mapper
def _get_unique_other_value(self, series: pd.Series):
unique_values = list(series.unique())
other = self.DEFAULT_OTHER_VALUE
i = 0
while other in unique_values:
other = self.DEFAULT_OTHER_VALUE + str(i)
i += 1
return other
DEFAULT_DATASET_NAMES = ('Train', 'Test')
class Hashable(Protocol):
"""Trait for any hashable type that also defines comparison operators."""
def __hash__(self) -> int: # noqa: D105
...
def __le__(self, __value) -> bool: # noqa: D105
...
def __lt__(self, __value) -> bool: # noqa: D105
...
def __ge__(self, __value) -> bool: # noqa: D105
...
def __gt__(self, __value) -> bool: # noqa: D105
...
def __eq__(self, __value) -> bool: # noqa: D105
...
The provided code snippet includes necessary dependencies for implementing the `run_multivariable_drift` function. Write a Python function `def run_multivariable_drift(train_dataframe: pd.DataFrame, test_dataframe: pd.DataFrame, numerical_features: List[Hashable], cat_features: List[Hashable], sample_size: int, random_state: int, test_size: float, n_top_columns: int, min_feature_importance: float, max_num_categories_for_display: int, show_categories_by: str, min_meaningful_drift_score: float, with_display: bool, dataset_names: Tuple[str] = DEFAULT_DATASET_NAMES, feature_importance_timeout: int = 120, )` to solve the following problem:
Calculate multivariable drift.
Here is the function:
def run_multivariable_drift(train_dataframe: pd.DataFrame, test_dataframe: pd.DataFrame,
numerical_features: List[Hashable], cat_features: List[Hashable], sample_size: int,
random_state: int, test_size: float, n_top_columns: int, min_feature_importance: float,
max_num_categories_for_display: int, show_categories_by: str,
min_meaningful_drift_score: float,
with_display: bool,
dataset_names: Tuple[str] = DEFAULT_DATASET_NAMES,
feature_importance_timeout: int = 120,
):
"""Calculate multivariable drift."""
train_sample_df = train_dataframe.sample(sample_size, random_state=random_state)[numerical_features + cat_features]
test_sample_df = test_dataframe.sample(sample_size, random_state=random_state)[numerical_features + cat_features]
# create new dataset, with label denoting whether sample belongs to test dataset
domain_class_df = pd.concat([train_sample_df, test_sample_df])
domain_class_df[cat_features] = RareCategoryEncoder(254).fit_transform(domain_class_df[cat_features].astype(str))
domain_class_df[cat_features] = OrdinalEncoder().fit_transform(domain_class_df[cat_features].astype(str))
domain_class_labels = pd.Series([0] * len(train_sample_df) + [1] * len(test_sample_df))
x_train, x_test, y_train, y_test = train_test_split(floatify_dataframe(domain_class_df), domain_class_labels,
stratify=domain_class_labels,
random_state=random_state,
test_size=test_size)
# train a model to disguise between train and test samples
domain_classifier = HistGradientBoostingClassifier(max_depth=2, max_iter=10, random_state=random_state,
categorical_features=[x in cat_features for x in
domain_class_df.columns])
domain_classifier.fit(x_train, y_train)
y_test.name = 'belongs_to_test'
domain_test_dataset = Dataset(pd.concat([x_test.reset_index(drop=True), y_test.reset_index(drop=True)], axis=1),
cat_features=cat_features, label='belongs_to_test')
# calculate feature importance of domain_classifier, containing the information which features separate
# the dataset best.
fi, importance_type = calculate_feature_importance_or_none(
domain_classifier,
domain_test_dataset,
model_classes=[0, 1],
observed_classes=[0, 1],
task_type=TaskType.BINARY,
force_permutation=True,
permutation_kwargs={'n_repeats': 10,
'random_state': random_state,
'timeout': feature_importance_timeout,
'skip_messages': True}
)
fi = fi.sort_values(ascending=False) if fi is not None else None
domain_classifier_auc = roc_auc_score(y_test, domain_classifier.predict_proba(x_test)[:, 1])
drift_score = auc_to_drift_score(domain_classifier_auc)
values_dict = {
'domain_classifier_auc': domain_classifier_auc,
'domain_classifier_drift_score': drift_score,
'domain_classifier_feature_importance': fi.to_dict() if fi is not None else {},
}
feature_importance_note = f"""
<span>
The percents of explained dataset difference are the importance values for the feature calculated
using `{importance_type}`.
</span><br><br>
"""
if with_display and fi is not None and drift_score > min_meaningful_drift_score:
top_fi = fi.head(n_top_columns)
top_fi = top_fi.loc[top_fi > min_feature_importance]
else:
top_fi = None
if top_fi is not None and len(top_fi):
score = values_dict['domain_classifier_drift_score']
displays = [
feature_importance_note,
build_drift_plot(score),
'<h3>Main features contributing to drift</h3>',
N_TOP_MESSAGE % n_top_columns,
get_drift_plot_sidenote(max_num_categories_for_display, show_categories_by),
*(
display_dist(
train_sample_df[feature],
test_sample_df[feature],
top_fi,
cat_features,
max_num_categories_for_display,
show_categories_by,
dataset_names)
for feature in top_fi.index
)
]
else:
displays = None
return values_dict, displays | Calculate multivariable drift. |
505 | from typing import Optional, Tuple
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import deepchecks.ppscore as pps
from deepchecks.utils.plot import DEFAULT_DATASET_NAMES, colors
from deepchecks.utils.strings import format_percent
from deepchecks.utils.typing import Hashable
colors = {DEFAULT_DATASET_NAMES[0]: '#00008b', # dark blue
DEFAULT_DATASET_NAMES[1]: '#69b3a2', 'Baseline': '#b287a3', 'Generated': '#2191FB'}
def format_percent(ratio: float, floating_point: int = 2, scientific_notation_threshold: int = 4,
add_positive_prefix: bool = False) -> str:
"""Format percent for elegant display.
Parameters
----------
ratio : float
Ratio to be displayed as percent
floating_point: int , default: 2
Number of floating points to display
scientific_notation_threshold: int, default: 4
Max number of floating points for which to show number as float. If number of floating points is larger than
this parameter, scientific notation (e.g. "10E-5%") will be shown.
add_positive_prefix: bool, default: False
add plus sign before positive percentages (minus sign is always added for negative percentages).
Returns
-------
str
String of ratio as percent
"""
result: str
if ratio < 0:
ratio = -ratio
prefix = '-'
else:
prefix = '+' if add_positive_prefix and ratio != 0 else ''
if int(ratio) == ratio:
result = f'{int(ratio) * 100}%'
elif ratio > 1:
result = truncate_zero_percent(ratio, floating_point)
elif ratio < 10**(-(2+floating_point)):
if ratio > 10**(-(2+scientific_notation_threshold)):
result = truncate_zero_percent(ratio, scientific_notation_threshold)
else:
result = f'{Decimal(ratio * 100):.{floating_point}E}%'
elif ratio > (1-10**(-(2+floating_point))):
if floating_point > 0:
result = f'99.{"".join(["9"]*floating_point)}%'
else:
result = '99%'
else:
result = truncate_zero_percent(ratio, floating_point)
return prefix + result
The provided code snippet includes necessary dependencies for implementing the `pd_series_to_trace_with_diff` function. Write a Python function `def pd_series_to_trace_with_diff(s_pps: pd.Series, train_or_test: str, name: str, diffs: pd.Series)` to solve the following problem:
Create bar plotly bar trace out of pandas Series, with difference shown in percentages.
Here is the function:
def pd_series_to_trace_with_diff(s_pps: pd.Series, train_or_test: str, name: str, diffs: pd.Series):
"""Create bar plotly bar trace out of pandas Series, with difference shown in percentages."""
diffs_text = '(' + diffs.apply(format_percent, floating_point=0, add_positive_prefix=True) + ')'
text = diffs_text + '<br>' + s_pps.round(2).astype(str)
return go.Bar(x=s_pps.index,
y=s_pps,
name=name,
marker_color=colors.get(train_or_test),
text='<b>' + text + '</b>',
textposition='outside'
) | Create bar plotly bar trace out of pandas Series, with difference shown in percentages. |
506 | from typing import Optional, Tuple
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import deepchecks.ppscore as pps
from deepchecks.utils.plot import DEFAULT_DATASET_NAMES, colors
from deepchecks.utils.strings import format_percent
from deepchecks.utils.typing import Hashable
def get_pps_figure(per_class: bool, n_of_features: int, x_name: str = 'feature', xaxis_title: str = 'Column'):
"""If per_class is True, then no title is defined on the figure."""
fig = go.Figure()
fig.update_layout(
yaxis_title='Predictive Power Score (PPS)',
yaxis_range=(0, 1.05),
# NOTE:
# the range, in this case, is needed to fix a problem with
# too wide bars when there are only one or two of them`s on
# the plot, plus it also centralizes them`s on the plot
# The min value of the range (range(min. max)) is bigger because
# otherwise bars will not be centralized on the plot, they will
# appear on the left part of the plot (that is probably because of zero)
xaxis_range=(-3, n_of_features + 2),
legend=dict(x=1.0, y=1.0),
barmode='group',
height=500,
# Set the x-axis as category, since if the column names are numbers it will infer the x-axis as numerical
# and will show the values very far from each other
xaxis_type='category'
)
if per_class:
fig.update_layout(xaxis_title='Class')
else:
fig.update_layout(
title=f'Predictive Power Score (PPS) - Can a {x_name} predict the label by itself?',
xaxis_title=xaxis_title,
)
return fig
def pd_series_to_trace(s_pps: pd.Series, train_or_test: str, name: str):
"""Create bar plotly bar trace out of pandas Series."""
return go.Bar(x=s_pps.index,
y=s_pps,
name=name,
marker_color=colors.get(train_or_test),
text='<b>' + s_pps.round(2).astype(str) + '</b>',
textposition='outside'
)
DEFAULT_DATASET_NAMES = ('Train', 'Test')
class Hashable(Protocol):
"""Trait for any hashable type that also defines comparison operators."""
def __hash__(self) -> int: # noqa: D105
...
def __le__(self, __value) -> bool: # noqa: D105
...
def __lt__(self, __value) -> bool: # noqa: D105
...
def __ge__(self, __value) -> bool: # noqa: D105
...
def __gt__(self, __value) -> bool: # noqa: D105
...
def __eq__(self, __value) -> bool: # noqa: D105
...
The provided code snippet includes necessary dependencies for implementing the `get_feature_label_correlation` function. Write a Python function `def get_feature_label_correlation(train_df: pd.DataFrame, train_label_name: Optional[Hashable], test_df: pd.DataFrame, test_label_name: Optional[Hashable], ppscore_params: dict, n_show_top: int, min_pps_to_show: float = 0.05, random_state: int = None, with_display: bool = True, dataset_names: Tuple[str] = DEFAULT_DATASET_NAMES )` to solve the following problem:
Calculate the PPS for train, test and difference for feature label correlation checks. The PPS represents the ability of a feature to single-handedly predict another feature or label. This function calculates the PPS per feature for both train and test, and returns the data and display graph. Uses the ppscore package - for more info, see https://github.com/8080labs/ppscore Args: train_df: pd.DataFrame DataFrame of all train features and label train_label_name:: str name of label column in train dataframe test_df: DataFrame of all test features and label test_label_name: str name of label column in test dataframe ppscore_params: dict dictionary of additional parameters for the ppscore predictor function n_show_top: int Number of features to show, sorted by the magnitude of difference in PPS min_pps_to_show: float, default 0.05 Minimum PPS to show a class in the graph random_state: int, default None Random state for the ppscore.predictors function dataset_names: tuple, default: DEFAULT_DATASET_NAMES The names to show in the display for the first and second datasets. Returns: CheckResult value: dictionaries of PPS values for train, test and train-test difference. display: bar graph of the PPS of each feature.
Here is the function:
def get_feature_label_correlation(train_df: pd.DataFrame, train_label_name: Optional[Hashable],
test_df: pd.DataFrame,
test_label_name: Optional[Hashable], ppscore_params: dict,
n_show_top: int,
min_pps_to_show: float = 0.05,
random_state: int = None,
with_display: bool = True,
dataset_names: Tuple[str] = DEFAULT_DATASET_NAMES
):
"""
Calculate the PPS for train, test and difference for feature label correlation checks.
The PPS represents the ability of a feature to single-handedly predict another feature or label.
This function calculates the PPS per feature for both train and test, and returns the data and display graph.
Uses the ppscore package - for more info, see https://github.com/8080labs/ppscore
Args:
train_df: pd.DataFrame
DataFrame of all train features and label
train_label_name:: str
name of label column in train dataframe
test_df:
DataFrame of all test features and label
test_label_name: str
name of label column in test dataframe
ppscore_params: dict
dictionary of additional parameters for the ppscore predictor function
n_show_top: int
Number of features to show, sorted by the magnitude of difference in PPS
min_pps_to_show: float, default 0.05
Minimum PPS to show a class in the graph
random_state: int, default None
Random state for the ppscore.predictors function
dataset_names: tuple, default: DEFAULT_DATASET_NAMES
The names to show in the display for the first and second datasets.
Returns:
CheckResult
value: dictionaries of PPS values for train, test and train-test difference.
display: bar graph of the PPS of each feature.
"""
df_pps_train = pps.predictors(df=train_df, y=train_label_name,
random_seed=random_state,
**ppscore_params)
df_pps_test = pps.predictors(df=test_df,
y=test_label_name,
random_seed=random_state, **ppscore_params)
s_pps_train = df_pps_train.set_index('x', drop=True)['ppscore']
s_pps_test = df_pps_test.set_index('x', drop=True)['ppscore']
s_difference = s_pps_train - s_pps_test
ret_value = {'train': s_pps_train.to_dict(), 'test': s_pps_test.to_dict(),
'train-test difference': s_difference.to_dict()}
if not with_display:
return ret_value, None
sorted_order_for_display = np.abs(s_difference).sort_values(ascending=False).head(n_show_top).index
s_pps_train_to_display = s_pps_train[sorted_order_for_display]
s_pps_test_to_display = s_pps_test[sorted_order_for_display]
fig = get_pps_figure(per_class=False, n_of_features=len(sorted_order_for_display))
fig.add_trace(pd_series_to_trace(s_pps_train_to_display, DEFAULT_DATASET_NAMES[0], dataset_names[0]))
fig.add_trace(pd_series_to_trace(s_pps_test_to_display, DEFAULT_DATASET_NAMES[1], dataset_names[1]))
# display only if not all scores are above min_pps_to_show
display = [fig] if any(s_pps_train > min_pps_to_show) or any(s_pps_test > min_pps_to_show) else None
return ret_value, display | Calculate the PPS for train, test and difference for feature label correlation checks. The PPS represents the ability of a feature to single-handedly predict another feature or label. This function calculates the PPS per feature for both train and test, and returns the data and display graph. Uses the ppscore package - for more info, see https://github.com/8080labs/ppscore Args: train_df: pd.DataFrame DataFrame of all train features and label train_label_name:: str name of label column in train dataframe test_df: DataFrame of all test features and label test_label_name: str name of label column in test dataframe ppscore_params: dict dictionary of additional parameters for the ppscore predictor function n_show_top: int Number of features to show, sorted by the magnitude of difference in PPS min_pps_to_show: float, default 0.05 Minimum PPS to show a class in the graph random_state: int, default None Random state for the ppscore.predictors function dataset_names: tuple, default: DEFAULT_DATASET_NAMES The names to show in the display for the first and second datasets. Returns: CheckResult value: dictionaries of PPS values for train, test and train-test difference. display: bar graph of the PPS of each feature. |
507 | from typing import Optional, Tuple
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import deepchecks.ppscore as pps
from deepchecks.utils.plot import DEFAULT_DATASET_NAMES, colors
from deepchecks.utils.strings import format_percent
from deepchecks.utils.typing import Hashable
def get_pps_figure(per_class: bool, n_of_features: int, x_name: str = 'feature', xaxis_title: str = 'Column'):
"""If per_class is True, then no title is defined on the figure."""
fig = go.Figure()
fig.update_layout(
yaxis_title='Predictive Power Score (PPS)',
yaxis_range=(0, 1.05),
# NOTE:
# the range, in this case, is needed to fix a problem with
# too wide bars when there are only one or two of them`s on
# the plot, plus it also centralizes them`s on the plot
# The min value of the range (range(min. max)) is bigger because
# otherwise bars will not be centralized on the plot, they will
# appear on the left part of the plot (that is probably because of zero)
xaxis_range=(-3, n_of_features + 2),
legend=dict(x=1.0, y=1.0),
barmode='group',
height=500,
# Set the x-axis as category, since if the column names are numbers it will infer the x-axis as numerical
# and will show the values very far from each other
xaxis_type='category'
)
if per_class:
fig.update_layout(xaxis_title='Class')
else:
fig.update_layout(
title=f'Predictive Power Score (PPS) - Can a {x_name} predict the label by itself?',
xaxis_title=xaxis_title,
)
return fig
def pd_series_to_trace(s_pps: pd.Series, train_or_test: str, name: str):
"""Create bar plotly bar trace out of pandas Series."""
return go.Bar(x=s_pps.index,
y=s_pps,
name=name,
marker_color=colors.get(train_or_test),
text='<b>' + s_pps.round(2).astype(str) + '</b>',
textposition='outside'
)
DEFAULT_DATASET_NAMES = ('Train', 'Test')
class Hashable(Protocol):
"""Trait for any hashable type that also defines comparison operators."""
def __hash__(self) -> int: # noqa: D105
...
def __le__(self, __value) -> bool: # noqa: D105
...
def __lt__(self, __value) -> bool: # noqa: D105
...
def __ge__(self, __value) -> bool: # noqa: D105
...
def __gt__(self, __value) -> bool: # noqa: D105
...
def __eq__(self, __value) -> bool: # noqa: D105
...
The provided code snippet includes necessary dependencies for implementing the `get_feature_label_correlation_per_class` function. Write a Python function `def get_feature_label_correlation_per_class(train_df: pd.DataFrame, train_label_name: Optional[Hashable], test_df: pd.DataFrame, test_label_name: Optional[Hashable], ppscore_params: dict, n_show_top: int, min_pps_to_show: float = 0.05, random_state: int = None, with_display: bool = True, dataset_names: Tuple[str] = DEFAULT_DATASET_NAMES )` to solve the following problem:
Calculate the PPS for train, test and difference for feature label correlation checks per class. The PPS represents the ability of a feature to single-handedly predict another feature or label. This function calculates the PPS per feature for both train and test, and returns the data and display graph. Uses the ppscore package - for more info, see https://github.com/8080labs/ppscore Args: train_df: pd.DataFrame DataFrame of all train features and label train_label_name:: str name of label column in train dataframe test_df: DataFrame of all test features and label test_label_name: str name of label column in test dataframe ppscore_params: dict dictionary of additional parameters for the ppscore predictor function n_show_top: int Number of features to show, sorted by the magnitude of difference in PPS min_pps_to_show: float, default 0.05 Minimum PPS to show a class in the graph random_state: int, default None Random state for the ppscore.predictors function dataset_names: tuple, default: DEFAULT_DATASET_NAMES The names to show in the display for the first and second datasets. Returns: CheckResult value: dictionaries of features, each value is 3 dictionaries of PPS values for train, test and train-test difference. display: bar graphs of the PPS for each feature.
Here is the function:
def get_feature_label_correlation_per_class(train_df: pd.DataFrame, train_label_name: Optional[Hashable],
test_df: pd.DataFrame,
test_label_name: Optional[Hashable], ppscore_params: dict,
n_show_top: int,
min_pps_to_show: float = 0.05,
random_state: int = None,
with_display: bool = True,
dataset_names: Tuple[str] = DEFAULT_DATASET_NAMES
):
"""
Calculate the PPS for train, test and difference for feature label correlation checks per class.
The PPS represents the ability of a feature to single-handedly predict another feature or label.
This function calculates the PPS per feature for both train and test, and returns the data and display graph.
Uses the ppscore package - for more info, see https://github.com/8080labs/ppscore
Args:
train_df: pd.DataFrame
DataFrame of all train features and label
train_label_name:: str
name of label column in train dataframe
test_df:
DataFrame of all test features and label
test_label_name: str
name of label column in test dataframe
ppscore_params: dict
dictionary of additional parameters for the ppscore predictor function
n_show_top: int
Number of features to show, sorted by the magnitude of difference in PPS
min_pps_to_show: float, default 0.05
Minimum PPS to show a class in the graph
random_state: int, default None
Random state for the ppscore.predictors function
dataset_names: tuple, default: DEFAULT_DATASET_NAMES
The names to show in the display for the first and second datasets.
Returns:
CheckResult
value: dictionaries of features, each value is 3 dictionaries of PPS values for train, test and
train-test difference.
display: bar graphs of the PPS for each feature.
"""
df_pps_train_all = pd.DataFrame()
df_pps_test_all = pd.DataFrame()
df_pps_difference_all = pd.DataFrame()
display = []
ret_value = {}
for c in train_df[train_label_name].unique():
train_df_all_vs_one = train_df.copy()
test_df_all_vs_one = test_df.copy()
train_df_all_vs_one[train_label_name] = train_df_all_vs_one[train_label_name].apply(
lambda x: 1 if x == c else 0) # pylint: disable=cell-var-from-loop
test_df_all_vs_one[test_label_name] = test_df_all_vs_one[test_label_name].apply(
lambda x: 1 if x == c else 0) # pylint: disable=cell-var-from-loop
df_pps_train = pps.predictors(df=train_df_all_vs_one, y=train_label_name,
random_seed=random_state,
**ppscore_params)
df_pps_test = pps.predictors(df=test_df_all_vs_one,
y=test_label_name,
random_seed=random_state, **ppscore_params)
s_pps_train = df_pps_train.set_index('x', drop=True)['ppscore']
s_pps_test = df_pps_test.set_index('x', drop=True)['ppscore']
s_difference = s_pps_train - s_pps_test
df_pps_train_all[c] = s_pps_train
df_pps_test_all[c] = s_pps_test
df_pps_difference_all[c] = s_difference
for feature in df_pps_train_all.index:
s_train = df_pps_train_all.loc[feature]
s_test = df_pps_test_all.loc[feature]
s_difference = df_pps_difference_all.loc[feature]
ret_value[feature] = {'train': s_train.to_dict(), 'test': s_test.to_dict(),
'train-test difference': s_difference.to_dict()}
# display only if not all scores are above min_pps_to_show
if with_display and any(s_train > min_pps_to_show) or any(s_test > min_pps_to_show):
sorted_order_for_display = np.abs(s_difference).sort_values(ascending=False).head(n_show_top).index
s_train_to_display = s_train[sorted_order_for_display]
s_test_to_display = s_test[sorted_order_for_display]
fig = get_pps_figure(per_class=True, n_of_features=len(sorted_order_for_display))
fig.update_layout(title=f'{feature}: Predictive Power Score (PPS) Per Class')
fig.add_trace(pd_series_to_trace(s_train_to_display, DEFAULT_DATASET_NAMES[0], dataset_names[0]))
fig.add_trace(pd_series_to_trace(s_test_to_display, DEFAULT_DATASET_NAMES[1], dataset_names[1]))
display.append(fig)
return ret_value, display | Calculate the PPS for train, test and difference for feature label correlation checks per class. The PPS represents the ability of a feature to single-handedly predict another feature or label. This function calculates the PPS per feature for both train and test, and returns the data and display graph. Uses the ppscore package - for more info, see https://github.com/8080labs/ppscore Args: train_df: pd.DataFrame DataFrame of all train features and label train_label_name:: str name of label column in train dataframe test_df: DataFrame of all test features and label test_label_name: str name of label column in test dataframe ppscore_params: dict dictionary of additional parameters for the ppscore predictor function n_show_top: int Number of features to show, sorted by the magnitude of difference in PPS min_pps_to_show: float, default 0.05 Minimum PPS to show a class in the graph random_state: int, default None Random state for the ppscore.predictors function dataset_names: tuple, default: DEFAULT_DATASET_NAMES The names to show in the display for the first and second datasets. Returns: CheckResult value: dictionaries of features, each value is 3 dictionaries of PPS values for train, test and train-test difference. display: bar graphs of the PPS for each feature. |
508 | import abc
import html
import io
import pathlib
import sys
import typing as t
from multiprocessing import get_context, process
from tempfile import NamedTemporaryFile
from IPython.core.display import display, display_html
from ipywidgets import Widget
from deepchecks.core.serialization.abc import HTMLFormatter, HtmlSerializer, IPythonSerializer, WidgetSerializer
from deepchecks.utils.ipython import is_colab_env, is_databricks_env, is_kaggle_env, is_sagemaker_env, is_sphinx
from deepchecks.utils.logger import get_logger
from deepchecks.utils.strings import create_new_file_name, get_random_string, widget_to_html, widget_to_html_string
class DisplayableResult(abc.ABC):
"""Display API for the check/suite result objects."""
def widget_serializer(self) -> WidgetSerializer[t.Any]:
"""Return WidgetSerializer instance."""
raise NotImplementedError()
def ipython_serializer(self) -> IPythonSerializer[t.Any]:
"""Return IPythonSerializer instance."""
raise NotImplementedError()
def html_serializer(self) -> HtmlSerializer[t.Any]:
"""Return HtmlSerializer instance."""
raise NotImplementedError()
def show(
self,
as_widget: bool = True,
unique_id: t.Optional[str] = None,
**kwargs
) -> t.Optional[HTMLFormatter]:
"""Display result.
Parameters
----------
as_widget : bool, default True
whether to display result with help of ipywidgets or not
unique_id : Optional[str], default None
unique identifier of the result output
**kwargs :
other key-value arguments will be passed to the `Serializer.serialize`
method
Returns
-------
Optional[HTMLFormatter] :
when used by sphinx-gallery
"""
if is_sphinx():
# TODO: why we need this? add comments
html = widget_to_html_string( # pylint: disable=redefined-outer-name
self.widget_serializer.serialize(output_id=unique_id, **kwargs),
title=get_result_name(self),
requirejs=False,
connected=True,
full_html=False,
)
class TempSphinx:
def _repr_html_(self):
return html
return TempSphinx()
if is_kaggle_env() or is_databricks_env() or is_sagemaker_env():
self.show_in_iframe(as_widget=as_widget, unique_id=unique_id, **kwargs)
elif is_colab_env() and as_widget is True:
widget = self.widget_serializer.serialize(**kwargs)
content = widget_to_html_string(widget, title=get_result_name(self))
display_html(content, raw=True)
elif is_colab_env() and as_widget is False:
display(*self.ipython_serializer.serialize(**kwargs))
elif as_widget is True:
display_html(self.widget_serializer.serialize(
output_id=unique_id,
**kwargs
))
else:
display(*self.ipython_serializer.serialize(
output_id=unique_id,
**kwargs
))
def show_in_iframe(
self,
as_widget: bool = True,
unique_id: t.Optional[str] = None,
connected: bool = False,
**kwargs
):
"""Display result in an iframe.
Parameters
----------
as_widget : bool, default True
whether to display result with help of ipywidgets or not
unique_id : Optional[str], default None
unique identifier of the result output
connected: bool , default False
indicates whether internet connection is available or not,
if 'True' then CDN urls will be used to load javascript otherwise
javascript libraries will be injected directly into HTML output.
Set to 'False' to make results viewing possible when the internet
connection is not available.
**kwargs :
other key-value arguments will be passed to the `Serializer.serialize`
method
"""
output_id = unique_id or get_random_string(n=25)
if is_colab_env() and as_widget is True:
widget = self.widget_serializer.serialize(**kwargs)
content = widget_to_html_string(widget, title=get_result_name(self), connected=True)
display_html(content, raw=True)
elif is_colab_env() and as_widget is False:
display(*self.ipython_serializer.serialize(**kwargs))
elif as_widget is True:
widget = self.widget_serializer.serialize(output_id=output_id, is_for_iframe_with_srcdoc=True, **kwargs)
content = widget_to_html_string(widget, title=get_result_name(self), connected=connected)
display_html(iframe(srcdoc=content), raw=True)
else:
display_html(
iframe(srcdoc=self.html_serializer.serialize(
output_id=output_id,
full_html=True,
include_requirejs=True,
include_plotlyjs=True,
is_for_iframe_with_srcdoc=True,
connected=connected,
**kwargs
)),
raw=True
)
def show_in_window(self, **kwargs):
"""Display result in a separate window."""
display_in_gui(self)
def show_not_interactive(
self,
unique_id: t.Optional[str] = None,
**kwargs
):
"""Display the not interactive version of result output.
In this case, ipywidgets will not be used and plotly
figures will be transformed into png images.
Parameters
----------
unique_id : Optional[str], default None
unique identifier of the result output
**kwrgs :
other key-value arguments will be passed to the `Serializer.serialize`
method
"""
display(*self.ipython_serializer.serialize(
output_id=unique_id,
plotly_to_image=True,
**kwargs
))
def _ipython_display_(self, **kwargs):
"""Display result.."""
self.show(**kwargs)
def to_widget(self, **kwargs) -> Widget:
"""Serialize result into a ipywidgets.Widget instance."""
raise NotImplementedError()
def to_json(self, **kwargs) -> str:
"""Serialize result into a json string."""
raise NotImplementedError()
def to_wandb(self, **kwargs) -> 'WBValue':
"""Send result to the wandb."""
raise NotImplementedError()
def save_as_html(
self,
file: t.Union[str, io.TextIOWrapper, None] = None,
**kwargs
) -> t.Optional[str]:
"""Save a result to an HTML file.
Parameters
----------
file : filename or file-like object
The file to write the HTML output to. If None writes to output.html
Returns
-------
Optional[str] :
name of newly create file
"""
raise NotImplementedError()
def _open_file_in_window(filename: t.Union[str, pathlib.Path], exit_after: bool = True):
from PyQt5.QtCore import QUrl # pylint: disable=import-outside-toplevel
from PyQt5.QtWebEngineWidgets import QWebEngineView # pylint: disable=import-outside-toplevel
from PyQt5.QtWidgets import QApplication # pylint: disable=import-outside-toplevel
filepath = pathlib.Path(filename) if isinstance(filename, str) else filename
try:
app = QApplication.instance()
if app is None:
app = QApplication([])
app.lastWindowClosed.connect(app.quit)
web = QWebEngineView()
web.setWindowTitle('deepchecks')
web.setGeometry(0, 0, 1200, 1200)
web.load(QUrl.fromLocalFile(str(filepath)))
web.show()
exit_code = app.exec_()
if exit_after:
sys.exit(exit_code)
finally:
filepath.unlink()
def save_as_html(
serializer: t.Union[HtmlSerializer[T], WidgetSerializer[T]],
file: t.Union[str, io.TextIOWrapper, None] = None,
requirejs: bool = True,
connected: bool = False,
**kwargs
) -> t.Optional[str]:
"""Save a result to an HTML file.
Parameters
----------
serializer : Union[HtmlSerializer[T], WidgetSerializer[T]]
serializer to prepare an output
file : filename or file-like object
The file to write the HTML output to. If None writes to output.html
requirejs: bool , default: True
whether to include requirejs library into output HTML or not
connected: bool , default False
indicates whether internet connection is available or not,
if 'True' then CDN urls will be used to load javascript otherwise
javascript libraries will be injected directly into HTML output.
Set to 'False' to make results viewing possible when the internet
connection is not available.
Returns
-------
Optional[str] :
name of newly create file
"""
if file is None:
file = 'output.html'
if isinstance(file, str):
file = create_new_file_name(file)
if isinstance(serializer, WidgetSerializer):
widget_to_html(
serializer.serialize(**kwargs),
html_out=file,
title=get_result_name(serializer.value),
requirejs=requirejs,
connected=connected,
)
elif isinstance(serializer, HtmlSerializer):
html = serializer.serialize( # pylint: disable=redefined-outer-name
full_html=True,
include_requirejs=requirejs,
include_plotlyjs=True,
connected=connected,
**kwargs
)
if isinstance(file, str):
with open(file, 'w', encoding='utf-8') as f:
f.write(html)
elif isinstance(file, io.TextIOWrapper):
file.write(html)
else:
raise TypeError(f'Unsupported type of "file" parameter - {type(file)}')
else:
raise TypeError(f'Unsupported serializer type - {type(serializer)}')
if isinstance(file, str):
return file
def get_logger() -> logging.Logger:
"""Retutn the deepchecks logger."""
return _logger
The provided code snippet includes necessary dependencies for implementing the `display_in_gui` function. Write a Python function `def display_in_gui(result: DisplayableResult)` to solve the following problem:
Display suite result or check result in a new python gui window.
Here is the function:
def display_in_gui(result: DisplayableResult):
"""Display suite result or check result in a new python gui window."""
try:
from PyQt5.QtCore import QUrl # pylint: disable=W0611,C0415 # noqa
from PyQt5.QtWebEngineWidgets import QWebEngineView # pylint: disable=W0611,C0415 # noqa
from PyQt5.QtWidgets import QApplication # pylint: disable=W0611,C0415 # noqa
except ImportError:
get_logger().error(
'Missing packages in order to display result in GUI, '
'either run "pip install pyqt5, pyqtwebengine" '
'or use "result.save_as_html()" to save result'
)
else:
with NamedTemporaryFile(suffix='.html', prefix='deepchecks-', delete=False) as file:
result.save_as_html(io.TextIOWrapper(file))
filepath = file.name
# If running under "__filename__ == __main__" then we can spawn a new process. Else run the function directly.
if getattr(process.current_process(), '_inheriting', False):
_open_file_in_window(filepath)
else:
ctx = get_context('spawn')
ctx.Process(target=_open_file_in_window, args=(str(filepath),)).start() | Display suite result or check result in a new python gui window. |
509 | import abc
import html
import io
import pathlib
import sys
import typing as t
from multiprocessing import get_context, process
from tempfile import NamedTemporaryFile
from IPython.core.display import display, display_html
from ipywidgets import Widget
from deepchecks.core.serialization.abc import HTMLFormatter, HtmlSerializer, IPythonSerializer, WidgetSerializer
from deepchecks.utils.ipython import is_colab_env, is_databricks_env, is_kaggle_env, is_sagemaker_env, is_sphinx
from deepchecks.utils.logger import get_logger
from deepchecks.utils.strings import create_new_file_name, get_random_string, widget_to_html, widget_to_html_string
if t.TYPE_CHECKING:
from wandb.sdk.data_types.base_types.wb_value import WBValue # pylint: disable=unused-import
def get_random_string(n: int = 5):
"""Return random string at the given size.
Parameters
----------
n : int , default: 5
the size of the string to return.
Returns
-------
str
a random string
"""
return ''.join(random.choices(ascii_uppercase + digits, k=n))
The provided code snippet includes necessary dependencies for implementing the `iframe` function. Write a Python function `def iframe( *, id: t.Optional[str] = None, # pylint: disable=redefined-builtin height: str = '600px', width: str = '100%', allow: str = 'fullscreen', frameborder: str = '0', with_fullscreen_btn: bool = True, **attributes ) -> str` to solve the following problem:
Return html iframe tag.
Here is the function:
def iframe(
*,
id: t.Optional[str] = None, # pylint: disable=redefined-builtin
height: str = '600px',
width: str = '100%',
allow: str = 'fullscreen',
frameborder: str = '0',
with_fullscreen_btn: bool = True,
**attributes
) -> str:
"""Return html iframe tag."""
if id is None:
id = f'deepchecks-result-iframe-{get_random_string()}'
attributes = {
'id': id,
'height': height,
'width': width,
'allow': allow,
'frameborder': frameborder,
**attributes
}
attributes = {
k: v
for k, v
in attributes.items()
if v is not None
}
if 'srcdoc' in attributes:
attributes['srcdoc'] = html.escape(attributes['srcdoc'])
attributes = '\n'.join([
f'{k}="{v}"'
for k, v in attributes.items()
])
if not with_fullscreen_btn:
return f'<iframe {attributes}></iframe>'
fullscreen_script = (
f"document.querySelector('#{id}').requestFullscreen();"
)
return f"""
<div style="display: flex; justify-content: flex-end; padding: 1rem 2rem 1rem 2rem;">
<button onclick="{fullscreen_script}" >
Full Screen
</button>
</div>
<iframe allowfullscreen {attributes}></iframe>
""" | Return html iframe tag. |
510 | import textwrap
import typing as t
from plotly.basedatatypes import BaseFigure
from plotly.io import to_html
from typing_extensions import Literal
from deepchecks.core import check_result as check_types
from deepchecks.core.resources import requirejs_script
from deepchecks.core.serialization.abc import ABCDisplayItemsHandler, HtmlSerializer
from deepchecks.core.serialization.common import aggregate_conditions, form_output_anchor, plotlyjs_script
from deepchecks.core.serialization.dataframe.html import DataFrameSerializer as DataFrameHtmlSerializer
from deepchecks.utils.html import imagetag, linktag
CheckResultSection = t.Union[
Literal['condition-table'],
Literal['additional-output'],
]
The provided code snippet includes necessary dependencies for implementing the `verify_include_parameter` function. Write a Python function `def verify_include_parameter( include: t.Optional[t.Sequence[CheckResultSection]] = None ) -> t.Set[CheckResultSection]` to solve the following problem:
Verify CheckResultSection sequence.
Here is the function:
def verify_include_parameter(
include: t.Optional[t.Sequence[CheckResultSection]] = None
) -> t.Set[CheckResultSection]:
"""Verify CheckResultSection sequence."""
sections = t.cast(
t.Set[CheckResultSection],
{'condition-table', 'additional-output'}
)
if include is None:
sections_to_include = sections
elif len(include) == 0:
raise ValueError('include parameter cannot be empty')
else:
sections_to_include = set(include)
if len(sections_to_include.difference(sections)) > 0:
raise ValueError(
'include parameter must contain '
'Union[Literal["condition-table"], Literal["additional-output"]]'
)
return sections_to_include | Verify CheckResultSection sequence. |
511 | import io
import json
import textwrap
import typing as t
from contextlib import contextmanager
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.io._html as plotlyhtml
from ipywidgets import DOMWidget
from jsonpickle.pickler import Pickler
from pandas.io.formats.style import Styler
from plotly.io._utils import plotly_cdn_url
from plotly.offline.offline import get_plotlyjs
from deepchecks.core import check_result as check_types
from deepchecks.core import errors
from deepchecks.utils.dataframes import hide_index_for_display, un_numpy
from deepchecks.utils.html import linktag
from deepchecks.utils.strings import truncate_string
The provided code snippet includes necessary dependencies for implementing the `form_output_anchor` function. Write a Python function `def form_output_anchor(output_id: str) -> str` to solve the following problem:
Form unique output anchor.
Here is the function:
def form_output_anchor(output_id: str) -> str:
"""Form unique output anchor."""
return f'summary_{output_id}' | Form unique output anchor. |
512 | import io
import json
import textwrap
import typing as t
from contextlib import contextmanager
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.io._html as plotlyhtml
from ipywidgets import DOMWidget
from jsonpickle.pickler import Pickler
from pandas.io.formats.style import Styler
from plotly.io._utils import plotly_cdn_url
from plotly.offline.offline import get_plotlyjs
from deepchecks.core import check_result as check_types
from deepchecks.core import errors
from deepchecks.utils.dataframes import hide_index_for_display, un_numpy
from deepchecks.utils.html import linktag
from deepchecks.utils.strings import truncate_string
TDOMWidget = t.TypeVar('TDOMWidget', bound=DOMWidget)
The provided code snippet includes necessary dependencies for implementing the `normalize_widget_style` function. Write a Python function `def normalize_widget_style(w: TDOMWidget) -> TDOMWidget` to solve the following problem:
Add additional style classes to the widget.
Here is the function:
def normalize_widget_style(w: TDOMWidget) -> TDOMWidget:
"""Add additional style classes to the widget."""
return (
w
.add_class('rendered_html')
.add_class('jp-RenderedHTMLCommon')
.add_class('jp-RenderedHTML')
.add_class('jp-OutputArea-output')
) | Add additional style classes to the widget. |
513 | import io
import json
import textwrap
import typing as t
from contextlib import contextmanager
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.io._html as plotlyhtml
from ipywidgets import DOMWidget
from jsonpickle.pickler import Pickler
from pandas.io.formats.style import Styler
from plotly.io._utils import plotly_cdn_url
from plotly.offline.offline import get_plotlyjs
from deepchecks.core import check_result as check_types
from deepchecks.core import errors
from deepchecks.utils.dataframes import hide_index_for_display, un_numpy
from deepchecks.utils.html import linktag
from deepchecks.utils.strings import truncate_string
The provided code snippet includes necessary dependencies for implementing the `prettify` function. Write a Python function `def prettify(data: t.Any, indent: int = 3) -> str` to solve the following problem:
Prettify data.
Here is the function:
def prettify(data: t.Any, indent: int = 3) -> str:
"""Prettify data."""
return json.dumps(data, indent=indent, default=repr) | Prettify data. |
514 | import io
import json
import textwrap
import typing as t
from contextlib import contextmanager
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.io._html as plotlyhtml
from ipywidgets import DOMWidget
from jsonpickle.pickler import Pickler
from pandas.io.formats.style import Styler
from plotly.io._utils import plotly_cdn_url
from plotly.offline.offline import get_plotlyjs
from deepchecks.core import check_result as check_types
from deepchecks.core import errors
from deepchecks.utils.dataframes import hide_index_for_display, un_numpy
from deepchecks.utils.html import linktag
from deepchecks.utils.strings import truncate_string
def flatten(
l: DeepIterable[T],
stop: t.Optional[t.Callable[[t.Any], bool]] = None,
) -> t.Iterable[T]:
"""Flatten nested iterables."""
for it in l:
if callable(stop) and stop(it) is True:
yield t.cast(T, it)
elif isinstance(it, (list, tuple, set, t.Generator, t.Iterator)):
yield from flatten(it, stop=stop)
else:
yield t.cast(T, it)
def un_numpy(val):
"""Convert numpy value to native value.
Parameters
----------
val :
The value to convert.
Returns
-------
returns the numpy value in a native type.
"""
if isinstance(val, np.str_):
# NOTE:
# 'np.str_' is instance of the 'np.generic' but
# 'np.isnan(np.str_())' raises an error with a next message:
# >> TypeError: ufunc 'isnan' not supported for the input types...)
#
# therefore this 'if' statement is needed
return val.item()
if isinstance(val, np.generic):
if np.isnan(val):
return None
return val.item()
if isinstance(val, np.ndarray):
return val.tolist()
return val
The provided code snippet includes necessary dependencies for implementing the `normalize_value` function. Write a Python function `def normalize_value(value: object) -> t.Any` to solve the following problem:
Take an object and return a JSON-safe representation of it. Parameters ---------- value : object value to normilize Returns ------- Any of the basic builtin datatypes
Here is the function:
def normalize_value(value: object) -> t.Any:
"""Take an object and return a JSON-safe representation of it.
Parameters
----------
value : object
value to normilize
Returns
-------
Any of the basic builtin datatypes
"""
if isinstance(value, pd.DataFrame):
return value.to_json(orient='records')
elif isinstance(value, Styler):
return value.data.to_json(orient='records')
elif isinstance(value, (np.generic, np.ndarray)):
return un_numpy(value)
else:
return Pickler(unpicklable=False).flatten(value) | Take an object and return a JSON-safe representation of it. Parameters ---------- value : object value to normilize Returns ------- Any of the basic builtin datatypes |
515 | import io
import json
import textwrap
import typing as t
from contextlib import contextmanager
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.io._html as plotlyhtml
from ipywidgets import DOMWidget
from jsonpickle.pickler import Pickler
from pandas.io.formats.style import Styler
from plotly.io._utils import plotly_cdn_url
from plotly.offline.offline import get_plotlyjs
from deepchecks.core import check_result as check_types
from deepchecks.core import errors
from deepchecks.utils.dataframes import hide_index_for_display, un_numpy
from deepchecks.utils.html import linktag
from deepchecks.utils.strings import truncate_string
def hide_index_for_display(df: t.Union[pd.DataFrame, pd.io.formats.style.Styler]) -> pd.io.formats.style.Styler:
"""Hide the index of a dataframe for display."""
styler = df.style if isinstance(df, pd.DataFrame) else df
if hasattr(styler, 'hide'):
return styler.hide(axis='index')
return styler.hide_index()
def linktag(
text: str,
style: t.Optional[t.Dict[str, t.Any]] = None,
is_for_iframe_with_srcdoc : bool = False,
**kwargs
) -> str:
"""Return html a tag.
Parameters
----------
style : Optional[Dict[str, Any]], default None
tag style rules
is_for_iframe_with_srcdoc : bool, default False
anchor links, in order to work within iframe require additional prefix
'about:srcdoc'. This flag tells function whether to add that prefix to
the anchor link or not
**kwargs :
other tag attributes
Returns
-------
str
"""
if is_for_iframe_with_srcdoc and kwargs.get('href', '').startswith('#'):
kwargs['href'] = f'about:srcdoc{kwargs["href"]}'
if style is not None:
kwargs['style'] = '\n'.join([
f'{k}: {v};'
for k, v in style.items()
if v is not None
])
attrs = ' '.join([f'{k}="{v}"' for k, v in kwargs.items()])
return f'<a {attrs}>{text}</a>'
def truncate_string(long_string: str, max_length: int):
"""Return the long string with ellipsis if above max_length.
Parameters
----------
long_string : str
the string
max_length : int
the string maximum length
Returns
-------
str
the string with ellipsis.
"""
if len(long_string) <= max_length:
return long_string
return long_string[:max_length] + '...'
The provided code snippet includes necessary dependencies for implementing the `aggregate_conditions` function. Write a Python function `def aggregate_conditions( check_results: t.Union['check_types.CheckResult', t.Sequence['check_types.CheckResult']], max_info_len: int = 3000, include_icon: bool = True, include_check_name: bool = False, output_id: t.Optional[str] = None, is_for_iframe_with_srcdoc: bool = False ) -> pd.DataFrame` to solve the following problem:
Return the conditions table as DataFrame. Parameters ---------- check_results : Union['CheckResult', Sequence['CheckResult']] check results to show conditions of. max_info_len : int max length of the additional info. include_icon : bool , default: True if to show the html condition result icon or the enum include_check_name : bool, default False whether to include check name into dataframe or not output_id : str unique identifier of the output, it will be used to form a link (html '<a></a>' tag) to the check result full output is_for_iframe_with_srcdoc : bool, default False anchor links, in order to work within iframe require additional prefix 'about:srcdoc'. This flag tells function whether to add that prefix to the anchor links or not Returns ------- pd.Dataframe: the condition table.
Here is the function:
def aggregate_conditions(
check_results: t.Union['check_types.CheckResult', t.Sequence['check_types.CheckResult']],
max_info_len: int = 3000,
include_icon: bool = True,
include_check_name: bool = False,
output_id: t.Optional[str] = None,
is_for_iframe_with_srcdoc: bool = False
) -> pd.DataFrame:
"""Return the conditions table as DataFrame.
Parameters
----------
check_results : Union['CheckResult', Sequence['CheckResult']]
check results to show conditions of.
max_info_len : int
max length of the additional info.
include_icon : bool , default: True
if to show the html condition result icon or the enum
include_check_name : bool, default False
whether to include check name into dataframe or not
output_id : str
unique identifier of the output, it will be used to
form a link (html '<a></a>' tag) to the check result
full output
is_for_iframe_with_srcdoc : bool, default False
anchor links, in order to work within iframe require additional prefix
'about:srcdoc'. This flag tells function whether to add that prefix to
the anchor links or not
Returns
-------
pd.Dataframe:
the condition table.
"""
# NOTE: if you modified this function also modify 'sort_check_results'
check_results = (
[check_results]
if isinstance(check_results, check_types.CheckResult)
else check_results
)
data = []
for check_result in check_results:
for cond_result in check_result.conditions_results:
priority = cond_result.priority
icon = cond_result.get_icon() if include_icon else cond_result.category.value
check_header = check_result.get_header()
# If there is no display we won't generate a section to link to
if output_id and check_result.display:
link = linktag(
text=check_header,
href=f'#{check_result.get_check_id(output_id)}',
is_for_iframe_with_srcdoc=is_for_iframe_with_srcdoc
)
else:
link = check_header
# if it has no display show on bottom for the category (lower priority)
priority += 0.1
data.append([
icon, link, cond_result.name, cond_result.details, priority
])
df = pd.DataFrame(
data=data,
columns=['Status', 'Check', 'Condition', 'More Info', '$priority']
)
df.sort_values(by=['$priority'], inplace=True)
df.drop('$priority', axis=1, inplace=True)
if include_check_name is False:
df.drop('Check', axis=1, inplace=True)
df['More Info'] = df['More Info'].map(lambda x: truncate_string(x, max_info_len))
return hide_index_for_display(df) | Return the conditions table as DataFrame. Parameters ---------- check_results : Union['CheckResult', Sequence['CheckResult']] check results to show conditions of. max_info_len : int max length of the additional info. include_icon : bool , default: True if to show the html condition result icon or the enum include_check_name : bool, default False whether to include check name into dataframe or not output_id : str unique identifier of the output, it will be used to form a link (html '<a></a>' tag) to the check result full output is_for_iframe_with_srcdoc : bool, default False anchor links, in order to work within iframe require additional prefix 'about:srcdoc'. This flag tells function whether to add that prefix to the anchor links or not Returns ------- pd.Dataframe: the condition table. |
516 | import io
import json
import textwrap
import typing as t
from contextlib import contextmanager
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.io._html as plotlyhtml
from ipywidgets import DOMWidget
from jsonpickle.pickler import Pickler
from pandas.io.formats.style import Styler
from plotly.io._utils import plotly_cdn_url
from plotly.offline.offline import get_plotlyjs
from deepchecks.core import check_result as check_types
from deepchecks.core import errors
from deepchecks.utils.dataframes import hide_index_for_display, un_numpy
from deepchecks.utils.html import linktag
from deepchecks.utils.strings import truncate_string
def linktag(
text: str,
style: t.Optional[t.Dict[str, t.Any]] = None,
is_for_iframe_with_srcdoc : bool = False,
**kwargs
) -> str:
"""Return html a tag.
Parameters
----------
style : Optional[Dict[str, Any]], default None
tag style rules
is_for_iframe_with_srcdoc : bool, default False
anchor links, in order to work within iframe require additional prefix
'about:srcdoc'. This flag tells function whether to add that prefix to
the anchor link or not
**kwargs :
other tag attributes
Returns
-------
str
"""
if is_for_iframe_with_srcdoc and kwargs.get('href', '').startswith('#'):
kwargs['href'] = f'about:srcdoc{kwargs["href"]}'
if style is not None:
kwargs['style'] = '\n'.join([
f'{k}: {v};'
for k, v in style.items()
if v is not None
])
attrs = ' '.join([f'{k}="{v}"' for k, v in kwargs.items()])
return f'<a {attrs}>{text}</a>'
The provided code snippet includes necessary dependencies for implementing the `create_results_dataframe` function. Write a Python function `def create_results_dataframe( results: t.Sequence['check_types.CheckResult'], output_id: t.Optional[str] = None, is_for_iframe_with_srcdoc: bool = False, ) -> pd.DataFrame` to solve the following problem:
Create dataframe with check results. Parameters ---------- results : Sequence['CheckResult'] check results output_id : str unique identifier of the output, it will be used to form a link (html '<a></a>' tag) to the check result full output is_for_iframe_with_srcdoc : bool, default False anchor links, in order to work within iframe require additional prefix 'about:srcdoc'. This flag tells function whether to add that prefix to the anchor links or not Returns ------- pd.Dataframe: the condition table.
Here is the function:
def create_results_dataframe(
results: t.Sequence['check_types.CheckResult'],
output_id: t.Optional[str] = None,
is_for_iframe_with_srcdoc: bool = False,
) -> pd.DataFrame:
"""Create dataframe with check results.
Parameters
----------
results : Sequence['CheckResult']
check results
output_id : str
unique identifier of the output, it will be used to
form a link (html '<a></a>' tag) to the check result
full output
is_for_iframe_with_srcdoc : bool, default False
anchor links, in order to work within iframe require additional prefix
'about:srcdoc'. This flag tells function whether to add that prefix to
the anchor links or not
Returns
-------
pd.Dataframe:
the condition table.
"""
data = []
for check_result in results:
check_header = check_result.get_header()
if output_id and check_result.display:
header = linktag(
text=check_header,
href=f'#{check_result.get_check_id(output_id)}',
is_for_iframe_with_srcdoc=is_for_iframe_with_srcdoc
)
else:
header = check_header
summary = check_result.get_metadata(with_doc_link=True)['summary']
data.append([header, summary])
return pd.DataFrame(
data=data,
columns=['Check', 'Summary']
) | Create dataframe with check results. Parameters ---------- results : Sequence['CheckResult'] check results output_id : str unique identifier of the output, it will be used to form a link (html '<a></a>' tag) to the check result full output is_for_iframe_with_srcdoc : bool, default False anchor links, in order to work within iframe require additional prefix 'about:srcdoc'. This flag tells function whether to add that prefix to the anchor links or not Returns ------- pd.Dataframe: the condition table. |
517 | import io
import json
import textwrap
import typing as t
from contextlib import contextmanager
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.io._html as plotlyhtml
from ipywidgets import DOMWidget
from jsonpickle.pickler import Pickler
from pandas.io.formats.style import Styler
from plotly.io._utils import plotly_cdn_url
from plotly.offline.offline import get_plotlyjs
from deepchecks.core import check_result as check_types
from deepchecks.core import errors
from deepchecks.utils.dataframes import hide_index_for_display, un_numpy
from deepchecks.utils.html import linktag
from deepchecks.utils.strings import truncate_string
The provided code snippet includes necessary dependencies for implementing the `create_failures_dataframe` function. Write a Python function `def create_failures_dataframe( failures: t.Sequence[t.Union['check_types.CheckFailure', 'check_types.CheckResult']] ) -> pd.DataFrame` to solve the following problem:
Create dataframe with check failures. Parameters ---------- failures : Sequence[Union[CheckFailure, CheckResult]] check failures Returns ------- pd.Dataframe: the condition table.
Here is the function:
def create_failures_dataframe(
failures: t.Sequence[t.Union['check_types.CheckFailure', 'check_types.CheckResult']]
) -> pd.DataFrame:
"""Create dataframe with check failures.
Parameters
----------
failures : Sequence[Union[CheckFailure, CheckResult]]
check failures
Returns
-------
pd.Dataframe:
the condition table.
"""
data = []
for it in failures:
if isinstance(it, check_types.CheckResult):
data.append([it.get_header(), 'Nothing found', 2])
elif isinstance(it, check_types.CheckFailure):
message = (
it.exception.html
if isinstance(it.exception, errors.DeepchecksBaseError)
else str(it.exception)
)
error_types = (
errors.DatasetValidationError,
errors.DeepchecksProcessError,
)
if isinstance(it.exception, error_types):
message = f'{type(it.exception).__name__}: {message}'
data.append((it.header, message, 1))
else:
raise TypeError(f'Unknown result type - {type(it).__name__}')
df = pd.DataFrame(data=data, columns=['Check', 'Reason', 'priority'])
df.sort_values(by=['priority'], inplace=True)
df.drop('priority', axis=1, inplace=True)
return df | Create dataframe with check failures. Parameters ---------- failures : Sequence[Union[CheckFailure, CheckResult]] check failures Returns ------- pd.Dataframe: the condition table. |
518 | import io
import json
import textwrap
import typing as t
from contextlib import contextmanager
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.io._html as plotlyhtml
from ipywidgets import DOMWidget
from jsonpickle.pickler import Pickler
from pandas.io.formats.style import Styler
from plotly.io._utils import plotly_cdn_url
from plotly.offline.offline import get_plotlyjs
from deepchecks.core import check_result as check_types
from deepchecks.core import errors
from deepchecks.utils.dataframes import hide_index_for_display, un_numpy
from deepchecks.utils.html import linktag
from deepchecks.utils.strings import truncate_string
The provided code snippet includes necessary dependencies for implementing the `plotlyjs_script` function. Write a Python function `def plotlyjs_script(connected: bool = True) -> str` to solve the following problem:
Return plotly activation script in the requirejs enviroment. Parameters ---------- connected : bool, default True Returns ------- str
Here is the function:
def plotlyjs_script(connected: bool = True) -> str:
"""Return plotly activation script in the requirejs enviroment.
Parameters
----------
connected : bool, default True
Returns
-------
str
"""
if connected is True:
# Connected so we configure requirejs with the plotly CDN
script = textwrap.dedent("""
{win_config}
{mathjax_config}
<script type="text/javascript">
if (typeof require !== 'undefined') {{
require.undef("plotly");
requirejs.config({{
paths: {{'plotly': ['{plotly_cdn}']}}
}});
require(
['plotly'],
function(Plotly) {{
window._Plotly = Plotly;
window.Plotly = Plotly;
console.log('Loaded plotly successfully');
}},
function() {{console.log('Failed to load plotly')}}
);
}} else {{
console.log('requirejs is not present');
}}
</script>
""")
return script.format(
win_config=plotlyhtml._window_plotly_config,
mathjax_config=plotlyhtml._mathjax_config,
plotly_cdn=plotly_cdn_url().rstrip('.js'),
)
else:
# If not connected then we embed a copy of the plotly.js library
script = textwrap.dedent("""
{win_config}
{mathjax_config}
<script type="text/javascript">
if (typeof require !== 'undefined') {{
require.undef("plotly");
define('plotly', function(require, exports, module) {{
{script}
}});
require(
['plotly'],
function(Plotly) {{
window._Plotly = Plotly;
window.Plotly = Plotly;
console.log('Loaded plotly successfully');
}},
function() {{console.log('Failed to load plotly')}}
);
}} else {{
console.log('requirejs is not present');
}}
</script>
""")
return script.format(
script=get_plotlyjs(),
win_config=plotlyhtml._window_plotly_config,
mathjax_config=plotlyhtml._mathjax_config,
) | Return plotly activation script in the requirejs enviroment. Parameters ---------- connected : bool, default True Returns ------- str |
519 | import io
import json
import textwrap
import typing as t
from contextlib import contextmanager
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.io._html as plotlyhtml
from ipywidgets import DOMWidget
from jsonpickle.pickler import Pickler
from pandas.io.formats.style import Styler
from plotly.io._utils import plotly_cdn_url
from plotly.offline.offline import get_plotlyjs
from deepchecks.core import check_result as check_types
from deepchecks.core import errors
from deepchecks.utils.dataframes import hide_index_for_display, un_numpy
from deepchecks.utils.html import linktag
from deepchecks.utils.strings import truncate_string
The provided code snippet includes necessary dependencies for implementing the `read_matplot_figures` function. Write a Python function `def read_matplot_figures() -> t.List[io.BytesIO]` to solve the following problem:
Return all active matplot figures.
Here is the function:
def read_matplot_figures() -> t.List[io.BytesIO]:
"""Return all active matplot figures."""
output = []
figures = [plt.figure(n) for n in plt.get_fignums()]
for fig in figures:
buffer = io.BytesIO()
fig.savefig(buffer, format='png')
buffer.seek(0)
output.append(buffer)
fig.clear()
plt.close(fig)
return output | Return all active matplot figures. |
520 | import io
import json
import textwrap
import typing as t
from contextlib import contextmanager
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.io._html as plotlyhtml
from ipywidgets import DOMWidget
from jsonpickle.pickler import Pickler
from pandas.io.formats.style import Styler
from plotly.io._utils import plotly_cdn_url
from plotly.offline.offline import get_plotlyjs
from deepchecks.core import check_result as check_types
from deepchecks.core import errors
from deepchecks.utils.dataframes import hide_index_for_display, un_numpy
from deepchecks.utils.html import linktag
from deepchecks.utils.strings import truncate_string
The provided code snippet includes necessary dependencies for implementing the `switch_matplot_backend` function. Write a Python function `def switch_matplot_backend(backend: str = 'agg')` to solve the following problem:
Switch matplot backend.
Here is the function:
def switch_matplot_backend(backend: str = 'agg'):
"""Switch matplot backend."""
previous = matplotlib.get_backend()
plt.switch_backend(backend)
yield
plt.switch_backend(previous) | Switch matplot backend. |
521 | import io
import json
import textwrap
import typing as t
from contextlib import contextmanager
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.io._html as plotlyhtml
from ipywidgets import DOMWidget
from jsonpickle.pickler import Pickler
from pandas.io.formats.style import Styler
from plotly.io._utils import plotly_cdn_url
from plotly.offline.offline import get_plotlyjs
from deepchecks.core import check_result as check_types
from deepchecks.core import errors
from deepchecks.utils.dataframes import hide_index_for_display, un_numpy
from deepchecks.utils.html import linktag
from deepchecks.utils.strings import truncate_string
The provided code snippet includes necessary dependencies for implementing the `concatv_images` function. Write a Python function `def concatv_images(images, gap=10)` to solve the following problem:
Concatenate a list of images vertically. Parameters ---------- images : List[PIL.Image.Image] list of images gap : int, default 10 gap between images Returns ------- PIL.Image.Image
Here is the function:
def concatv_images(images, gap=10):
"""Concatenate a list of images vertically.
Parameters
----------
images : List[PIL.Image.Image]
list of images
gap : int, default 10
gap between images
Returns
-------
PIL.Image.Image
"""
try:
import PIL.Image as pilimage
except ImportError as e:
raise ImportError(
'concatv_images function requires the PIL package. '
'To get it, run "pip install pillow".'
) from e
else:
assert isinstance(images, list) and len(images) != 0
assert isinstance(gap, int) and gap >= 0
if len(images) == 1:
return t.cast(pilimage.Image, images[0]).copy()
max_width = max(it.width for it in images)
height = sum(it.height for it in images)
dst = pilimage.new(
t.cast(pilimage.Image, images[0]).mode, # type: ignore
(max_width, height)
)
position = 0
for img in images:
dst.paste(img, (0, position))
position = position + img.height + gap
return dst | Concatenate a list of images vertically. Parameters ---------- images : List[PIL.Image.Image] list of images gap : int, default 10 gap between images Returns ------- PIL.Image.Image |
522 | import io
import json
import textwrap
import typing as t
from contextlib import contextmanager
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.io._html as plotlyhtml
from ipywidgets import DOMWidget
from jsonpickle.pickler import Pickler
from pandas.io.formats.style import Styler
from plotly.io._utils import plotly_cdn_url
from plotly.offline.offline import get_plotlyjs
from deepchecks.core import check_result as check_types
from deepchecks.core import errors
from deepchecks.utils.dataframes import hide_index_for_display, un_numpy
from deepchecks.utils.html import linktag
from deepchecks.utils.strings import truncate_string
A = t.TypeVar('A')
B = t.TypeVar('B')
The provided code snippet includes necessary dependencies for implementing the `join` function. Write a Python function `def join(l: t.List[A], item: B) -> t.Iterator[t.Union[A, B]]` to solve the following problem:
Construct an iterator from list and put 'item' between each element of the list.
Here is the function:
def join(l: t.List[A], item: B) -> t.Iterator[t.Union[A, B]]:
"""Construct an iterator from list and put 'item' between each element of the list."""
list_len = len(l) - 1
for index, el in enumerate(l):
yield el
if index != list_len:
yield item | Construct an iterator from list and put 'item' between each element of the list. |
523 | import typing as t
import warnings
from ipywidgets import HTML, Accordion, VBox, Widget
from deepchecks.core import check_result as check_types
from deepchecks.core import suite
from deepchecks.core.serialization.abc import WidgetSerializer
from deepchecks.core.serialization.check_failure.widget import CheckFailureSerializer as CheckFailureWidgetSerializer
from deepchecks.core.serialization.check_result.widget import CheckResultSerializer as CheckResultWidgetSerializer
from deepchecks.core.serialization.common import Html as CommonHtml
from deepchecks.core.serialization.common import (aggregate_conditions, create_failures_dataframe,
create_results_dataframe, form_output_anchor, join,
normalize_widget_style)
from deepchecks.core.serialization.dataframe.widget import DataFrameSerializer
from deepchecks.utils.dataframes import hide_index_for_display
from deepchecks.utils.strings import get_random_string
from . import html
def select_serializer(result):
if isinstance(result, check_types.CheckResult):
return CheckResultWidgetSerializer(result)
elif isinstance(result, check_types.CheckFailure):
return CheckFailureWidgetSerializer(result)
else:
raise TypeError(f'Unknown type of result - {type(result)}') | null |
524 | import re
import sys
import xml.etree.ElementTree as ET
from datetime import datetime
from typing import Dict, List, Union
from six import u
from deepchecks.core import check_result as check_types
from deepchecks.core import suite
from deepchecks.core.serialization.abc import JunitSerializer
from deepchecks.core.serialization.check_failure.junit import FAILURE, SKIPPED, CheckFailureSerializer
from deepchecks.core.serialization.check_result.junit import CheckResultSerializer
The provided code snippet includes necessary dependencies for implementing the `_clean_illegal_xml_chars` function. Write a Python function `def _clean_illegal_xml_chars(string_to_clean)` to solve the following problem:
Remove any illegal unicode characters from the given XML string. @see: http://stackoverflow.com/questions/1707890/fast-way-to-filter-illegal-xml-unicode-chars-in-python
Here is the function:
def _clean_illegal_xml_chars(string_to_clean):
"""Remove any illegal unicode characters from the given XML string.
@see: http://stackoverflow.com/questions/1707890/fast-way-to-filter-illegal-xml-unicode-chars-in-python
"""
illegal_unichrs = [
(0x00, 0x08),
(0x0B, 0x1F),
(0x7F, 0x84),
(0x86, 0x9F),
(0xD800, 0xDFFF),
(0xFDD0, 0xFDDF),
(0xFFFE, 0xFFFF),
(0x1FFFE, 0x1FFFF),
(0x2FFFE, 0x2FFFF),
(0x3FFFE, 0x3FFFF),
(0x4FFFE, 0x4FFFF),
(0x5FFFE, 0x5FFFF),
(0x6FFFE, 0x6FFFF),
(0x7FFFE, 0x7FFFF),
(0x8FFFE, 0x8FFFF),
(0x9FFFE, 0x9FFFF),
(0xAFFFE, 0xAFFFF),
(0xBFFFE, 0xBFFFF),
(0xCFFFE, 0xCFFFF),
(0xDFFFE, 0xDFFFF),
(0xEFFFE, 0xEFFFF),
(0xFFFFE, 0xFFFFF),
(0x10FFFE, 0x10FFFF),
]
illegal_ranges = [f'{chr(low)}-{chr(high)}' for (low, high) in illegal_unichrs if low < sys.maxunicode]
illegal_xml_re = re.compile(u('[%s]') % u('').join(illegal_ranges))
return illegal_xml_re.sub('', string_to_clean) | Remove any illegal unicode characters from the given XML string. @see: http://stackoverflow.com/questions/1707890/fast-way-to-filter-illegal-xml-unicode-chars-in-python |
525 | import importlib
import typing as t
from typing_extensions import Literal as L
from deepchecks import __version__
from deepchecks.utils.logger import get_logger
The provided code snippet includes necessary dependencies for implementing the `importable_name` function. Write a Python function `def importable_name(obj: t.Any) -> t.Tuple[str, str]` to solve the following problem:
Return the full import name of an object type.
Here is the function:
def importable_name(obj: t.Any) -> t.Tuple[str, str]:
"""Return the full import name of an object type."""
kind = type(obj) if not isinstance(obj, type) else obj
name = kind.__qualname__
module = kind.__module__
return module, name | Return the full import name of an object type. |
526 | import importlib
import typing as t
from typing_extensions import Literal as L
from deepchecks import __version__
from deepchecks.utils.logger import get_logger
The provided code snippet includes necessary dependencies for implementing the `import_type` function. Write a Python function `def import_type( module_name: str, type_name: str, base: t.Optional[t.Type[t.Any]] = None ) -> t.Type[t.Any]` to solve the following problem:
Import and return type instance by name.
Here is the function:
def import_type(
module_name: str,
type_name: str,
base: t.Optional[t.Type[t.Any]] = None
) -> t.Type[t.Any]:
"""Import and return type instance by name."""
module = importlib.import_module(module_name)
type_ = getattr(module, type_name, None)
if not isinstance(type_, type):
name = type(type_).__qualname__ # type: ignore
raise TypeError(f'Expected to import type instance, instead get - {name}')
if base is not None and not issubclass(type_, base):
name = type(type_).__qualname__ # type: ignore
bname = type(base).__qualname__
raise TypeError(f'Expected to import a subtype of "{bname}", instead got - {name}')
return type_ | Import and return type instance by name. |
527 | import importlib
import typing as t
from typing_extensions import Literal as L
from deepchecks import __version__
from deepchecks.utils.logger import get_logger
VersionUnmatchAction = t.Union[L['raise'], L['warn'], None]
__version__ = version('deepchecks')
def get_logger() -> logging.Logger:
"""Retutn the deepchecks logger."""
return _logger
The provided code snippet includes necessary dependencies for implementing the `validate_config` function. Write a Python function `def validate_config( conf: t.Dict[str, t.Any], version_unmatch: VersionUnmatchAction = 'warn' ) -> t.Dict[str, t.Any]` to solve the following problem:
Validate check/suite configuration dictionary.
Here is the function:
def validate_config(
conf: t.Dict[str, t.Any],
version_unmatch: VersionUnmatchAction = 'warn'
) -> t.Dict[str, t.Any]:
"""Validate check/suite configuration dictionary."""
if 'module_name' not in conf or not conf['module_name']:
raise ValueError('Configuration must contain not empty "module_name" key of type string')
if 'class_name' not in conf or not conf['class_name']:
raise ValueError('Configuration must contain not empty "class_name" key of type string')
if 'version' not in conf or not conf['version']:
if version_unmatch == 'raise':
raise ValueError(
'Configuration must contain not emtpy '
'"version" key of type string'
)
elif version_unmatch == 'warn':
get_logger().warning(
'Configuration was expected to contain not emtpy '
'"version" key of type string'
)
elif conf['version'] != __version__:
if version_unmatch == 'raise':
raise ValueError(
'Configuration was formed by different version of deepchecks package.\n'
f'Configuration version: {conf["version"]}\n'
f'Deepchecks version: {__version__}\n'
)
elif version_unmatch == 'warn':
get_logger().warning(
'Configuration was formed by different version of deepchecks package.\n'
'Therefore a behavior of the check might be different than expected.\n'
'Configuration version: %s\n'
'Deepchecks version: %s\n',
conf['version'],
__version__
)
return conf | Validate check/suite configuration dictionary. |
528 | from typing import Callable, Dict, List, Union
from deepchecks.tabular import Suite
from deepchecks.tabular.checks import (BoostingOverfit, CalibrationScore, ConflictingLabels, ConfusionMatrixReport,
DataDuplicates, DatasetsSizeComparison, DateTrainTestLeakageDuplicates,
DateTrainTestLeakageOverlap, FeatureDrift, FeatureFeatureCorrelation,
FeatureLabelCorrelation, FeatureLabelCorrelationChange,
IdentifierLabelCorrelation, IndexTrainTestLeakage, IsSingleValue, LabelDrift,
MixedDataTypes, MixedNulls, ModelInferenceTime, MultivariateDrift,
NewCategoryTrainTest, NewLabelTrainTest, OutlierSampleDetection, PercentOfNulls,
PredictionDrift, RegressionErrorDistribution, RocReport, SimpleModelComparison,
SingleDatasetPerformance, SpecialCharacters, StringLengthOutOfBounds,
StringMismatch, StringMismatchComparison, TrainTestPerformance,
TrainTestSamplesMix, UnusedFeatures, WeakSegmentsPerformance)
from deepchecks.tabular.utils.task_type import TaskType
from deepchecks.utils.typing import Hashable
class TaskType(Enum):
"""Enum containing supported task types."""
REGRESSION = 'regression'
BINARY = 'binary'
MULTICLASS = 'multiclass'
class Hashable(Protocol):
"""Trait for any hashable type that also defines comparison operators."""
def __hash__(self) -> int: # noqa: D105
...
def __le__(self, __value) -> bool: # noqa: D105
...
def __lt__(self, __value) -> bool: # noqa: D105
...
def __ge__(self, __value) -> bool: # noqa: D105
...
def __gt__(self, __value) -> bool: # noqa: D105
...
def __eq__(self, __value) -> bool: # noqa: D105
...
The provided code snippet includes necessary dependencies for implementing the `production_suite` function. Write a Python function `def production_suite(task_type: str = None, is_comparative: bool = True, alternative_scorers: Dict[str, Callable] = None, columns: Union[Hashable, List[Hashable]] = None, ignore_columns: Union[Hashable, List[Hashable]] = None, n_top_columns: int = None, n_samples: int = None, random_state: int = 42, n_to_show: int = 5, **kwargs) -> Suite` to solve the following problem:
Suite for testing the model in production. The suite contains checks for evaluating the model's performance. Checks for detecting drift and checks for data integrity issues that may occur in production. List of Checks (exact checks depend on the task type and the is_comparative flag): .. list-table:: List of Checks :widths: 50 50 :header-rows: 1 * - Check Example - API Reference * - :ref:`tabular__roc_report` - :class:`~deepchecks.tabular.checks.model_evaluation.RocReport` * - :ref:`tabular__confusion_matrix_report` - :class:`~deepchecks.tabular.checks.model_evaluation.ConfusionMatrixReport` * - :ref:`tabular__weak_segments_performance` - :class:`~deepchecks.tabular.checks.model_evaluation.WeakSegmentPerformance` * - :ref:`tabular__regression_error_distribution` - :class:`~deepchecks.tabular.checks.model_evaluation.RegressionErrorDistribution` * - :ref:`tabular__string_mismatch_comparison` - :class:`~deepchecks.tabular.checks.train_test_validation.StringMismatchComparison` * - :ref:`tabular__feature_label_correlation_change` - :class:`~deepchecks.tabular.checks.train_test_validation.FeatureLabelCorrelationChange` * - :ref:`tabular__feature_drift` - :class:`~deepchecks.tabular.checks.train_test_validation.FeatureDrift` * - :ref:`tabular__label_drift` - :class:`~deepchecks.tabular.checks.train_test_validation.LabelDrift` * - :ref:`tabular__multivariate_drift` - :class:`~deepchecks.tabular.checks.train_test_validation.MultivariateDrift` * - :ref:`tabular__prediction_drift` - :class:`~deepchecks.tabular.checks.model_evaluation.PredictionDrift` * - :ref:`tabular__prediction_drift` - :class:`~deepchecks.tabular.checks.model_evaluation.PredictionDrift` * - :ref:`tabular__string_mismatch` - :class:`~deepchecks.tabular.checks.data_integrity.StringMismatch` * - :ref:`tabular__feature_label_correlation` - :class:`~deepchecks.tabular.checks.data_integrity.FeatureLabelCorrelation` * - :ref:`tabular__feature_feature_correlation` - :class:`~deepchecks.tabular.checks.data_integrity.FeatureFeatureCorrelation` * - :ref:`tabular__single_dataset_performance` - :class:`~deepchecks.tabular.checks.model_evaluation.SingleDatasetPerformance` Parameters ---------- task_type : str, default: None The type of the task. Must be one of 'binary', 'multiclass' or 'regression'. If not given, both checks for classification and regression will be added to the suite. is_comparative : bool, default: True Whether to add the checks comparing the production data to some reference data, or if False, to add the checks inspecting the production data only. alternative_scorers : Dict[str, Callable], default: None An optional dictionary of scorer name to scorer functions. If none given, use default scorers columns : Union[Hashable, List[Hashable]] , default: None The columns to be checked. If None, all columns will be checked except the ones in `ignore_columns`. ignore_columns : Union[Hashable, List[Hashable]] , default: None The columns to be ignored. If None, no columns will be ignored. n_top_columns : int , optional number of columns to show ordered by feature importance (date, index, label are first) (check dependent) n_samples : int , default: 1_000_000 number of samples to use for checks that sample data. If none, use the default n_samples per check. random_state : int, default: 42 random seed for all checks. n_to_show : int , default: 5 number of top results to show (check dependent) **kwargs : dict additional arguments to pass to the checks. Returns ------- Suite A suite for evaluating the model's performance. Examples -------- >>> from deepchecks.tabular.suites import production_suite >>> suite = production_suite(task_type='binary', n_samples=10_000) >>> result = suite.run() >>> result.show() See Also -------- :ref:`quick_full_suite`
Here is the function:
def production_suite(task_type: str = None,
is_comparative: bool = True,
alternative_scorers: Dict[str, Callable] = None,
columns: Union[Hashable, List[Hashable]] = None,
ignore_columns: Union[Hashable, List[Hashable]] = None,
n_top_columns: int = None,
n_samples: int = None,
random_state: int = 42,
n_to_show: int = 5,
**kwargs) -> Suite:
"""Suite for testing the model in production.
The suite contains checks for evaluating the model's performance. Checks for detecting drift and checks for data
integrity issues that may occur in production.
List of Checks (exact checks depend on the task type and the is_comparative flag):
.. list-table:: List of Checks
:widths: 50 50
:header-rows: 1
* - Check Example
- API Reference
* - :ref:`tabular__roc_report`
- :class:`~deepchecks.tabular.checks.model_evaluation.RocReport`
* - :ref:`tabular__confusion_matrix_report`
- :class:`~deepchecks.tabular.checks.model_evaluation.ConfusionMatrixReport`
* - :ref:`tabular__weak_segments_performance`
- :class:`~deepchecks.tabular.checks.model_evaluation.WeakSegmentPerformance`
* - :ref:`tabular__regression_error_distribution`
- :class:`~deepchecks.tabular.checks.model_evaluation.RegressionErrorDistribution`
* - :ref:`tabular__string_mismatch_comparison`
- :class:`~deepchecks.tabular.checks.train_test_validation.StringMismatchComparison`
* - :ref:`tabular__feature_label_correlation_change`
- :class:`~deepchecks.tabular.checks.train_test_validation.FeatureLabelCorrelationChange`
* - :ref:`tabular__feature_drift`
- :class:`~deepchecks.tabular.checks.train_test_validation.FeatureDrift`
* - :ref:`tabular__label_drift`
- :class:`~deepchecks.tabular.checks.train_test_validation.LabelDrift`
* - :ref:`tabular__multivariate_drift`
- :class:`~deepchecks.tabular.checks.train_test_validation.MultivariateDrift`
* - :ref:`tabular__prediction_drift`
- :class:`~deepchecks.tabular.checks.model_evaluation.PredictionDrift`
* - :ref:`tabular__prediction_drift`
- :class:`~deepchecks.tabular.checks.model_evaluation.PredictionDrift`
* - :ref:`tabular__string_mismatch`
- :class:`~deepchecks.tabular.checks.data_integrity.StringMismatch`
* - :ref:`tabular__feature_label_correlation`
- :class:`~deepchecks.tabular.checks.data_integrity.FeatureLabelCorrelation`
* - :ref:`tabular__feature_feature_correlation`
- :class:`~deepchecks.tabular.checks.data_integrity.FeatureFeatureCorrelation`
* - :ref:`tabular__single_dataset_performance`
- :class:`~deepchecks.tabular.checks.model_evaluation.SingleDatasetPerformance`
Parameters
----------
task_type : str, default: None
The type of the task. Must be one of 'binary', 'multiclass' or 'regression'. If not given, both checks for
classification and regression will be added to the suite.
is_comparative : bool, default: True
Whether to add the checks comparing the production data to some reference data, or if False, to add the
checks inspecting the production data only.
alternative_scorers : Dict[str, Callable], default: None
An optional dictionary of scorer name to scorer functions.
If none given, use default scorers
columns : Union[Hashable, List[Hashable]] , default: None
The columns to be checked. If None, all columns will be checked except the ones in `ignore_columns`.
ignore_columns : Union[Hashable, List[Hashable]] , default: None
The columns to be ignored. If None, no columns will be ignored.
n_top_columns : int , optional
number of columns to show ordered by feature importance (date, index, label are first) (check dependent)
n_samples : int , default: 1_000_000
number of samples to use for checks that sample data. If none, use the default n_samples per check.
random_state : int, default: 42
random seed for all checks.
n_to_show : int , default: 5
number of top results to show (check dependent)
**kwargs : dict
additional arguments to pass to the checks.
Returns
-------
Suite
A suite for evaluating the model's performance.
Examples
--------
>>> from deepchecks.tabular.suites import production_suite
>>> suite = production_suite(task_type='binary', n_samples=10_000)
>>> result = suite.run()
>>> result.show()
See Also
--------
:ref:`quick_full_suite`
"""
args = locals()
args.pop('kwargs')
non_none_args = {k: v for k, v in args.items() if v is not None}
kwargs = {**non_none_args, **kwargs}
checks = [WeakSegmentsPerformance(**kwargs).add_condition_segments_relative_performance_greater_than(),
PercentOfNulls(**kwargs)]
# Add checks for regression and classification
regression_checks = [RegressionErrorDistribution(**kwargs).add_condition_kurtosis_greater_than()]
classification_checks = [ConfusionMatrixReport(**kwargs), RocReport(**kwargs).add_condition_auc_greater_than()]
if task_type is None:
checks.extend(classification_checks)
checks.extend(regression_checks)
elif task_type == TaskType.REGRESSION.value:
checks.extend(regression_checks)
else:
checks.extend(classification_checks)
if is_comparative:
checks.append(StringMismatchComparison(**kwargs).add_condition_no_new_variants())
checks.append(FeatureLabelCorrelationChange(**kwargs).add_condition_feature_pps_difference_less_than())
checks.append(FeatureDrift(**kwargs).add_condition_drift_score_less_than())
checks.append(MultivariateDrift(**kwargs).add_condition_overall_drift_value_less_than())
checks.append(LabelDrift(ignore_na=True, **kwargs).add_condition_drift_score_less_than())
checks.append(PredictionDrift(**kwargs).add_condition_drift_score_less_than())
checks.append(TrainTestPerformance(**kwargs).add_condition_train_test_relative_degradation_less_than())
checks.append(NewCategoryTrainTest(**kwargs).add_condition_new_category_ratio_less_or_equal())
else:
checks.append(StringMismatch(**kwargs).add_condition_no_variants())
checks.append(FeatureLabelCorrelation(**kwargs).add_condition_feature_pps_less_than())
checks.append(FeatureFeatureCorrelation(**kwargs).add_condition_max_number_of_pairs_above_threshold())
checks.append(SingleDatasetPerformance(**kwargs))
return Suite('Production Suite', *checks) | Suite for testing the model in production. The suite contains checks for evaluating the model's performance. Checks for detecting drift and checks for data integrity issues that may occur in production. List of Checks (exact checks depend on the task type and the is_comparative flag): .. list-table:: List of Checks :widths: 50 50 :header-rows: 1 * - Check Example - API Reference * - :ref:`tabular__roc_report` - :class:`~deepchecks.tabular.checks.model_evaluation.RocReport` * - :ref:`tabular__confusion_matrix_report` - :class:`~deepchecks.tabular.checks.model_evaluation.ConfusionMatrixReport` * - :ref:`tabular__weak_segments_performance` - :class:`~deepchecks.tabular.checks.model_evaluation.WeakSegmentPerformance` * - :ref:`tabular__regression_error_distribution` - :class:`~deepchecks.tabular.checks.model_evaluation.RegressionErrorDistribution` * - :ref:`tabular__string_mismatch_comparison` - :class:`~deepchecks.tabular.checks.train_test_validation.StringMismatchComparison` * - :ref:`tabular__feature_label_correlation_change` - :class:`~deepchecks.tabular.checks.train_test_validation.FeatureLabelCorrelationChange` * - :ref:`tabular__feature_drift` - :class:`~deepchecks.tabular.checks.train_test_validation.FeatureDrift` * - :ref:`tabular__label_drift` - :class:`~deepchecks.tabular.checks.train_test_validation.LabelDrift` * - :ref:`tabular__multivariate_drift` - :class:`~deepchecks.tabular.checks.train_test_validation.MultivariateDrift` * - :ref:`tabular__prediction_drift` - :class:`~deepchecks.tabular.checks.model_evaluation.PredictionDrift` * - :ref:`tabular__prediction_drift` - :class:`~deepchecks.tabular.checks.model_evaluation.PredictionDrift` * - :ref:`tabular__string_mismatch` - :class:`~deepchecks.tabular.checks.data_integrity.StringMismatch` * - :ref:`tabular__feature_label_correlation` - :class:`~deepchecks.tabular.checks.data_integrity.FeatureLabelCorrelation` * - :ref:`tabular__feature_feature_correlation` - :class:`~deepchecks.tabular.checks.data_integrity.FeatureFeatureCorrelation` * - :ref:`tabular__single_dataset_performance` - :class:`~deepchecks.tabular.checks.model_evaluation.SingleDatasetPerformance` Parameters ---------- task_type : str, default: None The type of the task. Must be one of 'binary', 'multiclass' or 'regression'. If not given, both checks for classification and regression will be added to the suite. is_comparative : bool, default: True Whether to add the checks comparing the production data to some reference data, or if False, to add the checks inspecting the production data only. alternative_scorers : Dict[str, Callable], default: None An optional dictionary of scorer name to scorer functions. If none given, use default scorers columns : Union[Hashable, List[Hashable]] , default: None The columns to be checked. If None, all columns will be checked except the ones in `ignore_columns`. ignore_columns : Union[Hashable, List[Hashable]] , default: None The columns to be ignored. If None, no columns will be ignored. n_top_columns : int , optional number of columns to show ordered by feature importance (date, index, label are first) (check dependent) n_samples : int , default: 1_000_000 number of samples to use for checks that sample data. If none, use the default n_samples per check. random_state : int, default: 42 random seed for all checks. n_to_show : int , default: 5 number of top results to show (check dependent) **kwargs : dict additional arguments to pass to the checks. Returns ------- Suite A suite for evaluating the model's performance. Examples -------- >>> from deepchecks.tabular.suites import production_suite >>> suite = production_suite(task_type='binary', n_samples=10_000) >>> result = suite.run() >>> result.show() See Also -------- :ref:`quick_full_suite` |
529 | from typing import Callable, Dict, List, Union
from deepchecks.tabular import Suite
from deepchecks.tabular.checks import (BoostingOverfit, CalibrationScore, ConflictingLabels, ConfusionMatrixReport,
DataDuplicates, DatasetsSizeComparison, DateTrainTestLeakageDuplicates,
DateTrainTestLeakageOverlap, FeatureDrift, FeatureFeatureCorrelation,
FeatureLabelCorrelation, FeatureLabelCorrelationChange,
IdentifierLabelCorrelation, IndexTrainTestLeakage, IsSingleValue, LabelDrift,
MixedDataTypes, MixedNulls, ModelInferenceTime, MultivariateDrift,
NewCategoryTrainTest, NewLabelTrainTest, OutlierSampleDetection, PercentOfNulls,
PredictionDrift, RegressionErrorDistribution, RocReport, SimpleModelComparison,
SingleDatasetPerformance, SpecialCharacters, StringLengthOutOfBounds,
StringMismatch, StringMismatchComparison, TrainTestPerformance,
TrainTestSamplesMix, UnusedFeatures, WeakSegmentsPerformance)
from deepchecks.tabular.utils.task_type import TaskType
from deepchecks.utils.typing import Hashable
def data_integrity(columns: Union[Hashable, List[Hashable]] = None,
ignore_columns: Union[Hashable, List[Hashable]] = None,
n_top_columns: int = None,
n_samples: int = None,
random_state: int = 42,
n_to_show: int = 5,
**kwargs) -> Suite:
"""Suite for detecting integrity issues within a single dataset.
List of Checks:
.. list-table:: List of Checks
:widths: 50 50
:header-rows: 1
* - Check Example
- API Reference
* - :ref:`tabular__is_single_value`
- :class:`~deepchecks.tabular.checks.data_integrity.IsSingleValue`
* - :ref:`tabular__special_chars`
- :class:`~deepchecks.tabular.checks.data_integrity.SpecialCharacters`
* - :ref:`tabular__mixed_nulls`
- :class:`~deepchecks.tabular.checks.data_integrity.MixedNulls`
* - :ref:`tabular__mixed_data_types`
- :class:`~deepchecks.tabular.checks.data_integrity.MixedDataTypes`
* - :ref:`tabular__string_mismatch`
- :class:`~deepchecks.tabular.checks.data_integrity.StringMismatch`
* - :ref:`tabular__data_duplicates`
- :class:`~deepchecks.tabular.checks.data_integrity.DataDuplicates`
* - :ref:`tabular__string_length_out_of_bounds`
- :class:`~deepchecks.tabular.checks.data_integrity.StringLengthOutOfBounds`
* - :ref:`tabular__conflicting_labels`
- :class:`~deepchecks.tabular.checks.data_integrity.ConflictingLabels`
* - :ref:`tabular__outlier_sample_detection`
- :class:`~deepchecks.tabular.checks.data_integrity.OutlierSampleDetection`
* - :ref:`tabular__feature_label_correlation`
- :class:`~deepchecks.tabular.checks.data_integrity.FeatureLabelCorrelation`
* - :ref:`tabular__identifier_label_correlation`
- :class:`~deepchecks.tabular.checks.data_integrity.IdentifierLabelCorrelation`
* - :ref:`tabular__feature_feature_correlation`
- :class:`~deepchecks.tabular.checks.data_integrity.FeatureFeatureCorrelation`
Parameters
----------
columns : Union[Hashable, List[Hashable]] , default: None
The columns to be checked. If None, all columns will be checked except the ones in `ignore_columns`.
ignore_columns : Union[Hashable, List[Hashable]] , default: None
The columns to be ignored. If None, no columns will be ignored.
n_top_columns : int , optional
number of columns to show ordered by feature importance (date, index, label are first) (check dependent)
n_samples : int , default: 1_000_000
number of samples to use for checks that sample data. If none, using the default n_samples per check.
random_state : int, default: 42
random seed for all checks.
n_to_show : int , default: 5
number of top results to show (check dependent)
**kwargs : dict
additional arguments to pass to the checks.
Returns
-------
Suite
A suite for detecting integrity issues within a single dataset.
Examples
--------
>>> from deepchecks.tabular.suites import data_integrity
>>> suite = data_integrity(columns=['a', 'b', 'c'], n_samples=1_000_000)
>>> result = suite.run()
>>> result.show()
See Also
--------
:ref:`quick_data_integrity`
"""
args = locals()
args.pop('kwargs')
non_none_args = {k: v for k, v in args.items() if v is not None}
kwargs = {**non_none_args, **kwargs}
return Suite(
'Data Integrity Suite',
IsSingleValue(**kwargs).add_condition_not_single_value(),
SpecialCharacters(**kwargs).add_condition_ratio_of_special_characters_less_or_equal(),
MixedNulls(**kwargs).add_condition_different_nulls_less_equal_to(),
MixedDataTypes(**kwargs).add_condition_rare_type_ratio_not_in_range(),
StringMismatch(**kwargs).add_condition_no_variants(),
DataDuplicates(**kwargs).add_condition_ratio_less_or_equal(),
StringLengthOutOfBounds(**kwargs).add_condition_ratio_of_outliers_less_or_equal(),
ConflictingLabels(**kwargs).add_condition_ratio_of_conflicting_labels_less_or_equal(),
OutlierSampleDetection(**kwargs),
FeatureLabelCorrelation(**kwargs).add_condition_feature_pps_less_than(),
FeatureFeatureCorrelation(**kwargs).add_condition_max_number_of_pairs_above_threshold(),
IdentifierLabelCorrelation(**kwargs).add_condition_pps_less_or_equal()
)
def train_test_validation(columns: Union[Hashable, List[Hashable]] = None,
ignore_columns: Union[Hashable, List[Hashable]] = None,
n_top_columns: int = None,
n_samples: int = None,
random_state: int = 42,
n_to_show: int = 5,
**kwargs) -> Suite:
"""Suite for validating correctness of train-test split, including distribution, \
leakage and integrity checks.
List of Checks:
.. list-table:: List of Checks
:widths: 50 50
:header-rows: 1
* - Check Example
- API Reference
* - :ref:`tabular__datasets_size_comparison`
- :class:`~deepchecks.tabular.checks.train_test_validation.DatasetsSizeComparison`
* - :ref:`tabular__new_label`
- :class:`~deepchecks.tabular.checks.train_test_validation.NewLabelTrainTest`
* - :ref:`tabular__new_category`
- :class:`~deepchecks.tabular.checks.train_test_validation.CategoryMismatchTrainTest`
* - :ref:`tabular__string_mismatch_comparison`
- :class:`~deepchecks.tabular.checks.train_test_validation.StringMismatchComparison`
* - :ref:`tabular__date_train_test_validation_leakage_duplicates`
- :class:`~deepchecks.tabular.checks.train_test_validation.DateTrainTestLeakageDuplicates`
* - :ref:`tabular__date_train_test_validation_leakage_overlap`
- :class:`~deepchecks.tabular.checks.train_test_validation.DateTrainTestLeakageOverlap`
* - :ref:`tabular__index_leakage`
- :class:`~deepchecks.tabular.checks.train_test_validation.IndexTrainTestLeakage`
* - :ref:`tabular__train_test_samples_mix`
- :class:`~deepchecks.tabular.checks.train_test_validation.TrainTestSamplesMix`
* - :ref:`tabular__feature_label_correlation_change`
- :class:`~deepchecks.tabular.checks.train_test_validation.FeatureLabelCorrelationChange`
* - :ref:`tabular__feature_drift`
- :class:`~deepchecks.tabular.checks.train_test_validation.FeatureDrift`
* - :ref:`tabular__label_drift`
- :class:`~deepchecks.tabular.checks.train_test_validation.LabelDrift`
* - :ref:`tabular__multivariate_drift`
- :class:`~deepchecks.tabular.checks.train_test_validation.MultivariateDrift`
Parameters
----------
columns : Union[Hashable, List[Hashable]] , default: None
The columns to be checked. If None, all columns will be checked except the ones in `ignore_columns`.
ignore_columns : Union[Hashable, List[Hashable]] , default: None
The columns to be ignored. If None, no columns will be ignored.
n_top_columns : int , optional
number of columns to show ordered by feature importance (date, index, label are first) (check dependent)
n_samples : int , default: None
number of samples to use for checks that sample data. If none, using the default n_samples per check.
random_state : int, default: 42
random seed for all checkss.
n_to_show : int , default: 5
number of top results to show (check dependent)
**kwargs : dict
additional arguments to pass to the checks.
Returns
-------
Suite
A suite for validating correctness of train-test split, including distribution, \
leakage and integrity checks.
Examples
--------
>>> from deepchecks.tabular.suites import train_test_validation
>>> suite = train_test_validation(columns=['a', 'b', 'c'], n_samples=1_000_000)
>>> result = suite.run()
>>> result.show()
See Also
--------
:ref:`quick_train_test_validation`
"""
args = locals()
args.pop('kwargs')
non_none_args = {k: v for k, v in args.items() if v is not None}
kwargs = {**non_none_args, **kwargs}
return Suite(
'Train Test Validation Suite',
DatasetsSizeComparison(**kwargs).add_condition_test_train_size_ratio_greater_than(),
NewLabelTrainTest(**kwargs).add_condition_new_labels_number_less_or_equal(),
NewCategoryTrainTest(**kwargs).add_condition_new_category_ratio_less_or_equal(),
StringMismatchComparison(**kwargs).add_condition_no_new_variants(),
DateTrainTestLeakageDuplicates(**kwargs).add_condition_leakage_ratio_less_or_equal(),
DateTrainTestLeakageOverlap(**kwargs).add_condition_leakage_ratio_less_or_equal(),
IndexTrainTestLeakage(**kwargs).add_condition_ratio_less_or_equal(),
TrainTestSamplesMix(**kwargs).add_condition_duplicates_ratio_less_or_equal(),
FeatureLabelCorrelationChange(**kwargs).add_condition_feature_pps_difference_less_than()
.add_condition_feature_pps_in_train_less_than(),
FeatureDrift(**kwargs).add_condition_drift_score_less_than(),
LabelDrift(**kwargs).add_condition_drift_score_less_than(),
MultivariateDrift(**kwargs).add_condition_overall_drift_value_less_than(),
)
def model_evaluation(alternative_scorers: Dict[str, Callable] = None,
columns: Union[Hashable, List[Hashable]] = None,
ignore_columns: Union[Hashable, List[Hashable]] = None,
n_top_columns: int = None,
n_samples: int = None,
random_state: int = 42,
n_to_show: int = 5,
**kwargs) -> Suite:
"""Suite for evaluating the model's performance over different metrics, segments, error analysis, examining \
overfitting, comparing to baseline, and more.
List of Checks:
.. list-table:: List of Checks
:widths: 50 50
:header-rows: 1
* - Check Example
- API Reference
* - :ref:`tabular__roc_report`
- :class:`~deepchecks.tabular.checks.model_evaluation.RocReport`
* - :ref:`tabular__confusion_matrix_report`
- :class:`~deepchecks.tabular.checks.model_evaluation.ConfusionMatrixReport`
* - :ref:`tabular__weak_segments_performance`
- :class:`~deepchecks.tabular.checks.model_evaluation.WeakSegmentPerformance`
* - :ref:`tabular__prediction_drift`
- :class:`~deepchecks.tabular.checks.model_evaluation.PredictionDrift`
* - :ref:`tabular__simple_model_comparison`
- :class:`~deepchecks.tabular.checks.model_evaluation.SimpleModelComparison`
* - :ref:`tabular__calibration_score`
- :class:`~deepchecks.tabular.checks.model_evaluation.CalibrationScore`
* - :ref:`tabular__regression_systematic_error`
- :class:`~deepchecks.tabular.checks.model_evaluation.RegressionSystematicError`
* - :ref:`tabular__regression_error_distribution`
- :class:`~deepchecks.tabular.checks.model_evaluation.RegressionErrorDistribution`
* - :ref:`tabular__unused_features`
- :class:`~deepchecks.tabular.checks.model_evaluation.UnusedFeatures`
* - :ref:`tabular__boosting_overfit`
- :class:`~deepchecks.tabular.checks.model_evaluation.BoostingOverfit`
* - :ref:`tabular__model_inference_time`
- :class:`~deepchecks.tabular.checks.model_evaluation.ModelInferenceTime`
* - :ref:`tabular__prediction_drift`
- :class:`~deepchecks.tabular.checks.model_evaluation.PredictionDrift`
Parameters
----------
alternative_scorers : Dict[str, Callable], default: None
An optional dictionary of scorer name to scorer functions.
If none given, use default scorers
columns : Union[Hashable, List[Hashable]] , default: None
The columns to be checked. If None, all columns will be checked except the ones in `ignore_columns`.
ignore_columns : Union[Hashable, List[Hashable]] , default: None
The columns to be ignored. If None, no columns will be ignored.
n_top_columns : int , optional
number of columns to show ordered by feature importance (date, index, label are first) (check dependent)
n_samples : int , default: 1_000_000
number of samples to use for checks that sample data. If none, use the default n_samples per check.
random_state : int, default: 42
random seed for all checks.
n_to_show : int , default: 5
number of top results to show (check dependent)
**kwargs : dict
additional arguments to pass to the checks.
Returns
-------
Suite
A suite for evaluating the model's performance.
Examples
--------
>>> from deepchecks.tabular.suites import model_evaluation
>>> suite = model_evaluation(columns=['a', 'b', 'c'], n_samples=1_000_000)
>>> result = suite.run()
>>> result.show()
See Also
--------
:ref:`quick_full_suite`
"""
args = locals()
args.pop('kwargs')
non_none_args = {k: v for k, v in args.items() if v is not None}
kwargs = {**non_none_args, **kwargs}
return Suite(
'Model Evaluation Suite',
TrainTestPerformance(**kwargs).add_condition_train_test_relative_degradation_less_than(),
RocReport(**kwargs).add_condition_auc_greater_than(),
ConfusionMatrixReport(**kwargs),
PredictionDrift(**kwargs).add_condition_drift_score_less_than(),
SimpleModelComparison(**kwargs).add_condition_gain_greater_than(),
WeakSegmentsPerformance(**kwargs).add_condition_segments_relative_performance_greater_than(),
CalibrationScore(**kwargs),
RegressionErrorDistribution(
**kwargs).add_condition_kurtosis_greater_than().add_condition_systematic_error_ratio_to_rmse_less_than(),
UnusedFeatures(**kwargs).add_condition_number_of_high_variance_unused_features_less_or_equal(),
BoostingOverfit(**kwargs).add_condition_test_score_percent_decline_less_than(),
ModelInferenceTime(**kwargs).add_condition_inference_time_less_than(),
)
The provided code snippet includes necessary dependencies for implementing the `full_suite` function. Write a Python function `def full_suite(**kwargs) -> Suite` to solve the following problem:
Create a suite that includes many of the implemented checks, for a quick overview of your model and data.
Here is the function:
def full_suite(**kwargs) -> Suite:
"""Create a suite that includes many of the implemented checks, for a quick overview of your model and data."""
return Suite(
'Full Suite',
model_evaluation(**kwargs),
train_test_validation(**kwargs),
data_integrity(**kwargs),
) | Create a suite that includes many of the implemented checks, for a quick overview of your model and data. |
530 | import typing as t
import pandas as pd
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.tabular import Dataset
from deepchecks.tabular.metric_utils import DeepcheckScorer
from deepchecks.tabular.utils.feature_importance import _calculate_feature_importance
from deepchecks.tabular.utils.task_inference import (get_all_labels, infer_classes_from_model,
infer_task_type_by_class_number, infer_task_type_by_labels)
class DeepchecksValueError(DeepchecksBaseError):
"""Exception class that represent a fault parameter was passed to Deepchecks."""
pass
def _calculate_feature_importance(
model: t.Any,
dataset: t.Union['tabular.Dataset', pd.DataFrame],
model_classes,
observed_classes,
task_type,
force_permutation: bool = False,
permutation_kwargs: t.Dict[str, t.Any] = None,
) -> t.Tuple[pd.Series, str]:
"""Calculate features effect on the label.
Parameters
----------
model : t.Any
a fitted model
dataset : t.Union['tabular.Dataset', pd.DataFrame]
dataset used to fit the model
model_classes
possible classes output for model. None for regression tasks.
observed_classes
Observed classes in the data. None for regression tasks.
task_type
The task type of the model.
force_permutation : bool, default: False
force permutation importance calculation
permutation_kwargs : t.Dict[str, t.Any] , default: None
kwargs for permutation importance calculation
Returns
-------
Tuple[Series, str]:
first item - feature importance normalized to 0-1 indexed by feature names,
second item - type of feature importance calculation (types: `permutation_importance`,
`feature_importances_`, `coef_`)
Raises
------
NotFittedError
Call 'fit' with appropriate arguments before using this estimator.
DeepchecksValueError
if model validation failed.
if it was not possible to calculate features importance.
NumberOfFeaturesLimitError
if the number of features limit were exceeded.
"""
if task_type == TaskType.REGRESSION:
model_classes = None
observed_classes = None
permutation_kwargs = permutation_kwargs or {}
permutation_kwargs['random_state'] = permutation_kwargs.get('random_state', 42)
validate_model(dataset, model)
permutation_failure = None
calc_type = None
importance = None
if force_permutation:
if isinstance(dataset, pd.DataFrame):
raise errors.DeepchecksValueError('Cannot calculate permutation feature importance on a pandas Dataframe. '
'In order to force permutation feature importance, please use the Dataset'
' object.')
else:
importance = _calc_permutation_importance(model, dataset, model_classes, observed_classes,
task_type, **permutation_kwargs)
calc_type = 'permutation_importance'
# If there was no force permutation, or if it failed while trying to calculate importance,
# we don't take built-in importance in pipelines because the pipeline is changing the features
# (for example one-hot encoding) which leads to the inner model features
# being different from the original dataset features
if importance is None and not isinstance(model, Pipeline):
# Get the actual model in case of pipeline
importance, calc_type = _built_in_importance(model, dataset)
# If found importance and was force permutation failure before, show warning
if importance is not None and permutation_failure:
get_logger().warning(permutation_failure)
# If there was no permutation failure and no importance on the model, using permutation anyway
if importance is None and permutation_failure is None and isinstance(dataset, tabular.Dataset):
if not permutation_kwargs.get('skip_messages', False):
if isinstance(model, Pipeline):
pre_text = 'Cannot use model\'s built-in feature importance on a Scikit-learn Pipeline,'
else:
pre_text = 'Could not find built-in feature importance on the model,'
get_logger().warning('%s using permutation feature importance calculation instead', pre_text)
importance = _calc_permutation_importance(model, dataset, model_classes, observed_classes,
task_type, **permutation_kwargs)
calc_type = 'permutation_importance'
# If after all importance is still none raise error
if importance is None:
# FIXME: better message
raise errors.DeepchecksValueError('Was not able to calculate features importance')
return importance.fillna(0), calc_type
def infer_classes_from_model(model: Optional[BasicModel]):
"""Get classes_ attribute from model object if exists."""
if model is not None and hasattr(model, 'classes_') and len(model.classes_) > 0:
return sorted(list(model.classes_))
def get_all_labels(model, train_dataset, test_dataset=None, y_pred_train=None, y_pred_test=None):
"""Aggregate labels from all available data: labels on datasets, y_pred, and model predicitions."""
labels = np.asarray([])
if train_dataset:
if train_dataset.has_label():
labels = np.append(labels, train_dataset.label_col.to_numpy())
if model:
labels = np.append(labels, sequence_to_numpy(model.predict(train_dataset.features_columns)))
if test_dataset:
if test_dataset.has_label():
labels = np.append(labels, test_dataset.label_col.to_numpy())
if model:
labels = np.append(labels, sequence_to_numpy(model.predict(test_dataset.features_columns)))
if y_pred_train is not None:
labels = np.append(labels, y_pred_train)
if y_pred_test is not None:
labels = np.append(labels, y_pred_test)
return pd.Series(labels) if len(labels) > 0 else pd.Series(dtype='object')
def infer_task_type_by_labels(labels: pd.Series):
"""Infer task type from given dataset/labels/model_classes."""
# there are no observed labels (user didn't pass model, and datasets have no label column), then we
# have no task type
if len(labels) == 0:
return None
# Fourth, we check if the observed labels are categorical or not
if is_categorical(labels, max_categorical_ratio=0.05):
num_classes = len(labels.dropna().unique())
task_type = infer_task_type_by_class_number(num_classes)
if infer_dtype(labels) == 'integer':
get_logger().warning(
'Due to the small number of unique labels task type was inferred as %s classification in spite of '
'the label column is of type integer. '
'Initialize your Dataset with either label_type=\"%s\" or '
'label_type=\"regression\" to resolve this warning.', task_type.value, task_type.value)
return task_type
else:
return TaskType.REGRESSION
def infer_task_type_by_class_number(num_classes):
"""Infer task type of binary or multiclass."""
if num_classes == 0:
raise ValidationError('Found zero number of classes')
if num_classes == 1:
raise ValidationError('Found only one class in label column, pass the full list of possible '
'label classes via the model_classes argument of the run function.')
return TaskType.BINARY if num_classes == 2 else TaskType.MULTICLASS
The provided code snippet includes necessary dependencies for implementing the `calculate_feature_importance` function. Write a Python function `def calculate_feature_importance( model: t.Any, dataset: Dataset, n_repeats: int = 30, mask_high_variance_features: bool = False, n_samples: int = 10_000, alternative_scorer: t.Optional[DeepcheckScorer] = None, force_permutation: bool = False, random_state: int = 42 ) -> pd.Series` to solve the following problem:
Get or calculate feature importance outside of check and suite. Many checks and suites in deepchecks use feature importance as part of its calculation or output. If your model does not have built-in feature importance, the check or suite will calculate it for you. This calculation is done in every call to the check or suite ``run`` function. Therefore, when running different checks outside a suite, or running the same suite several times, this calculation will be done every time. The recalculation can be avoided by calculating the feature importance in advance. Use this function to calculate your feature importance, and then give it as an input to the check or suite ``run`` function, as follows: >>> from deepchecks.tabular.feature_importance import calculate_feature_importance >>> from deepchecks.tabular.datasets.classification.iris import load_data, load_fitted_model >>> from deepchecks.tabular.checks import UnusedFeatures >>> _, iris_test_dataset = load_data() >>> iris_model = load_fitted_model() >>> fi = calculate_feature_importance(model=iris_model, dataset=iris_test_dataset) >>> result = UnusedFeatures().run(iris_test_dataset, model=iris_model, feature_importance=fi) By defualt this function will attempt to get the feature importance from the model. If the model does not have built-in feature importance, it will calculate it using permutation importance. If you want to force the calculation of the feature importance, even if the model has built-in feature importance, use the ``force_permutation`` parameter. Parameters ---------- model: t.Any a fitted model dataset: t.Union['tabular.Dataset', pd.DataFrame] dataset used to fit the model n_repeats: int, default: 30 Number of times to permute a feature mask_high_variance_features : bool , default: False If true, features for which calculated permutation importance values varied greatly would be returned has having 0 feature importance n_samples: int, default: 10_000 The number of samples to draw from X to compute feature importance in each repeat (without replacement). alternative_scorer: t.Optional[DeepcheckScorer], default: None Scorer to use for evaluation of the model performance in the permutation_importance function. If not defined, the default deepchecks scorers are used. force_permutation : bool, default: False Force permutation importance calculation. random_state: int, default: 42 Random seed for permutation importance calculation. Returns ------- pd.Series: feature importance normalized to 0-1 indexed by feature names
Here is the function:
def calculate_feature_importance(
model: t.Any,
dataset: Dataset,
n_repeats: int = 30,
mask_high_variance_features: bool = False,
n_samples: int = 10_000,
alternative_scorer: t.Optional[DeepcheckScorer] = None,
force_permutation: bool = False,
random_state: int = 42
) -> pd.Series:
"""
Get or calculate feature importance outside of check and suite.
Many checks and suites in deepchecks use feature importance as part of its calculation or output. If your model
does not have built-in feature importance, the check or suite will calculate it for you. This calculation is done
in every call to the check or suite ``run`` function. Therefore, when running different checks outside a suite,
or running the same suite several times, this calculation will be done every time.
The recalculation can be avoided by calculating the feature importance in advance. Use this function to calculate
your feature importance, and then give it as an input to the check or suite ``run`` function, as follows:
>>> from deepchecks.tabular.feature_importance import calculate_feature_importance
>>> from deepchecks.tabular.datasets.classification.iris import load_data, load_fitted_model
>>> from deepchecks.tabular.checks import UnusedFeatures
>>> _, iris_test_dataset = load_data()
>>> iris_model = load_fitted_model()
>>> fi = calculate_feature_importance(model=iris_model, dataset=iris_test_dataset)
>>> result = UnusedFeatures().run(iris_test_dataset, model=iris_model, feature_importance=fi)
By defualt this function will attempt to get the feature importance from the model. If the model does not have
built-in feature importance, it will calculate it using permutation importance. If you want to force the
calculation of the feature importance, even if the model has built-in feature importance, use the
``force_permutation`` parameter.
Parameters
----------
model: t.Any
a fitted model
dataset: t.Union['tabular.Dataset', pd.DataFrame]
dataset used to fit the model
n_repeats: int, default: 30
Number of times to permute a feature
mask_high_variance_features : bool , default: False
If true, features for which calculated permutation importance values
varied greatly would be returned has having 0 feature importance
n_samples: int, default: 10_000
The number of samples to draw from X to compute feature importance
in each repeat (without replacement).
alternative_scorer: t.Optional[DeepcheckScorer], default: None
Scorer to use for evaluation of the model performance in the permutation_importance function. If not defined,
the default deepchecks scorers are used.
force_permutation : bool, default: False
Force permutation importance calculation.
random_state: int, default: 42
Random seed for permutation importance calculation.
Returns
-------
pd.Series:
feature importance normalized to 0-1 indexed by feature names
"""
permutation_kwargs = {
'n_repeats': n_repeats,
'mask_high_variance_features': mask_high_variance_features,
'n_samples': n_samples,
'alternative_scorer': alternative_scorer,
'random_state': random_state,
'skip_messages': False,
'timeout': None
}
if isinstance(dataset, pd.DataFrame):
raise DeepchecksValueError('Cannot calculate permutation feature importance on a pandas Dataframe. '
'In order to force permutation feature importance, please use the Dataset'
' object.')
model_classes = infer_classes_from_model(model)
labels = get_all_labels(model, dataset)
observed_classes = sorted(labels.dropna().unique().tolist())
if dataset.label_type:
task_type = dataset.label_type
elif model_classes:
task_type = infer_task_type_by_class_number(len(model_classes))
else:
task_type = infer_task_type_by_labels(labels)
fi, _ = _calculate_feature_importance(model=model,
dataset=dataset,
model_classes=model_classes or observed_classes,
observed_classes=observed_classes,
task_type=task_type,
force_permutation=force_permutation,
permutation_kwargs=permutation_kwargs)
return fi | Get or calculate feature importance outside of check and suite. Many checks and suites in deepchecks use feature importance as part of its calculation or output. If your model does not have built-in feature importance, the check or suite will calculate it for you. This calculation is done in every call to the check or suite ``run`` function. Therefore, when running different checks outside a suite, or running the same suite several times, this calculation will be done every time. The recalculation can be avoided by calculating the feature importance in advance. Use this function to calculate your feature importance, and then give it as an input to the check or suite ``run`` function, as follows: >>> from deepchecks.tabular.feature_importance import calculate_feature_importance >>> from deepchecks.tabular.datasets.classification.iris import load_data, load_fitted_model >>> from deepchecks.tabular.checks import UnusedFeatures >>> _, iris_test_dataset = load_data() >>> iris_model = load_fitted_model() >>> fi = calculate_feature_importance(model=iris_model, dataset=iris_test_dataset) >>> result = UnusedFeatures().run(iris_test_dataset, model=iris_model, feature_importance=fi) By defualt this function will attempt to get the feature importance from the model. If the model does not have built-in feature importance, it will calculate it using permutation importance. If you want to force the calculation of the feature importance, even if the model has built-in feature importance, use the ``force_permutation`` parameter. Parameters ---------- model: t.Any a fitted model dataset: t.Union['tabular.Dataset', pd.DataFrame] dataset used to fit the model n_repeats: int, default: 30 Number of times to permute a feature mask_high_variance_features : bool , default: False If true, features for which calculated permutation importance values varied greatly would be returned has having 0 feature importance n_samples: int, default: 10_000 The number of samples to draw from X to compute feature importance in each repeat (without replacement). alternative_scorer: t.Optional[DeepcheckScorer], default: None Scorer to use for evaluation of the model performance in the permutation_importance function. If not defined, the default deepchecks scorers are used. force_permutation : bool, default: False Force permutation importance calculation. random_state: int, default: 42 Random seed for permutation importance calculation. Returns ------- pd.Series: feature importance normalized to 0-1 indexed by feature names |
531 | import numpy as np
import pandas as pd
import plotly.graph_objects as go
from sklearn.base import TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.decomposition import PCA
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder, RobustScaler
from deepchecks.core import CheckResult, ConditionCategory, ConditionResult
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.tabular import Context, SingleDatasetCheck
from deepchecks.utils.function import run_available_kwargs
def run_available_kwargs(func: Callable, **kwargs):
"""Run the passed object only with available kwargs."""
avail_kwargs = list(signature(func).parameters.keys())
pass_kwargs = {}
for kwarg_name in avail_kwargs:
if kwarg_name in kwargs:
pass_kwargs[kwarg_name] = kwargs[kwarg_name]
return func(**pass_kwargs)
The provided code snippet includes necessary dependencies for implementing the `naive_encoder` function. Write a Python function `def naive_encoder(numerical_features, cat_features) -> TransformerMixin` to solve the following problem:
Create a naive encoder for categorical and numerical features. The encoder handles nans for all features and uses label encoder for categorical features. Then, all features are scaled using RobustScaler. Parameters ---------- numerical_features cat_features Returns ------- TransformerMixin A transformer object
Here is the function:
def naive_encoder(numerical_features, cat_features) -> TransformerMixin:
"""Create a naive encoder for categorical and numerical features.
The encoder handles nans for all features and uses label encoder for categorical features. Then, all features are
scaled using RobustScaler.
Parameters
----------
numerical_features
cat_features
Returns
-------
TransformerMixin
A transformer object
"""
return ColumnTransformer(
transformers=[
('num', Pipeline([
('nan_handling', SimpleImputer()),
('norm', RobustScaler())
]),
np.array(numerical_features, dtype='object')),
('cat',
Pipeline([
('nan_handling', SimpleImputer(strategy='most_frequent')),
('encode', run_available_kwargs(OrdinalEncoder, handle_unknown='use_encoded_value', unknown_value=-1)),
('norm', RobustScaler())
]),
np.array(cat_features, dtype='object'))
]
) | Create a naive encoder for categorical and numerical features. The encoder handles nans for all features and uses label encoder for categorical features. Then, all features are scaled using RobustScaler. Parameters ---------- numerical_features cat_features Returns ------- TransformerMixin A transformer object |
532 | import itertools
from typing import Callable, Dict, Tuple, Union
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from deepchecks.core import CheckResult, ConditionCategory, ConditionResult
from deepchecks.core.checks import DatasetKind
from deepchecks.core.errors import DeepchecksProcessError, DeepchecksValueError
from deepchecks.tabular import Context, SingleDatasetCheck
from deepchecks.utils.performance.partition import partition_column
from deepchecks.utils.typing import Hashable
The provided code snippet includes necessary dependencies for implementing the `expand_grid` function. Write a Python function `def expand_grid(**kwargs)` to solve the following problem:
Create combination of parameter values. Create a dataframe with one column for each named argument and rows corresponding to all possible combinations of the given arguments.
Here is the function:
def expand_grid(**kwargs):
"""
Create combination of parameter values.
Create a dataframe with one column for each named argument and rows corresponding to all
possible combinations of the given arguments.
"""
return pd.DataFrame.from_records(itertools.product(*kwargs.values()), columns=kwargs.keys()) | Create combination of parameter values. Create a dataframe with one column for each named argument and rows corresponding to all possible combinations of the given arguments. |
533 | import itertools
from typing import Callable, Dict, Tuple, Union
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from deepchecks.core import CheckResult, ConditionCategory, ConditionResult
from deepchecks.core.checks import DatasetKind
from deepchecks.core.errors import DeepchecksProcessError, DeepchecksValueError
from deepchecks.tabular import Context, SingleDatasetCheck
from deepchecks.utils.performance.partition import partition_column
from deepchecks.utils.typing import Hashable
The provided code snippet includes necessary dependencies for implementing the `combine_filters` function. Write a Python function `def combine_filters(filters, dataframe)` to solve the following problem:
Combine segment filters. Parameters ---------- filters: Series Series indexed by segment names and with values corresponding to segment filters to be applied to the data. dataframe: DataFrame DataFrame to which filters are applied. Returns ------- DataFrame Data filtered to the given combination of segments.
Here is the function:
def combine_filters(filters, dataframe):
"""
Combine segment filters.
Parameters
----------
filters: Series
Series indexed by segment names and with values corresponding to segment filters to
be applied to the data.
dataframe: DataFrame
DataFrame to which filters are applied.
Returns
-------
DataFrame
Data filtered to the given combination of segments.
"""
segments = filters.index.values
filtered_data = filters[segments[0]].filter(dataframe)
if len(segments) > 1:
for i in range(1, len(segments)):
filtered_data = filters[segments[i]].filter(filtered_data)
return filtered_data | Combine segment filters. Parameters ---------- filters: Series Series indexed by segment names and with values corresponding to segment filters to be applied to the data. dataframe: DataFrame DataFrame to which filters are applied. Returns ------- DataFrame Data filtered to the given combination of segments. |
534 | import itertools
from typing import Callable, Dict, Tuple, Union
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from deepchecks.core import CheckResult, ConditionCategory, ConditionResult
from deepchecks.core.checks import DatasetKind
from deepchecks.core.errors import DeepchecksProcessError, DeepchecksValueError
from deepchecks.tabular import Context, SingleDatasetCheck
from deepchecks.utils.performance.partition import partition_column
from deepchecks.utils.typing import Hashable
The provided code snippet includes necessary dependencies for implementing the `is_classwise` function. Write a Python function `def is_classwise(scorer, model, dataset)` to solve the following problem:
Check whether a given scorer provides an average score or a score for each class.
Here is the function:
def is_classwise(scorer, model, dataset):
"""Check whether a given scorer provides an average score or a score for each class."""
test_result = scorer(model, dataset.copy(dataset.data.head()))
return isinstance(test_result, dict) | Check whether a given scorer provides an average score or a score for each class. |
535 | from typing import Dict, List
import numpy as np
import plotly.graph_objects as go
import sklearn
from deepchecks.core import CheckResult, ConditionResult
from deepchecks.core.condition import ConditionCategory
from deepchecks.core.errors import DeepchecksNotSupportedError
from deepchecks.tabular import Context, SingleDatasetCheck
from deepchecks.tabular.utils.task_type import TaskType
from deepchecks.utils.dict_funcs import get_dict_entry_by_value
from deepchecks.utils.strings import format_number
def sensitivity_specificity_cutoff(tpr, fpr):
"""Find index of optimal cutoff point on curve.
Cut-off is determined using Youden's index defined as sensitivity + specificity - 1.
Parameters
----------
tpr : array, shape = [n_roc_points]
True positive rate per threshold
fpr : array, shape = [n_roc_points]
False positive rate per threshold
References
----------
Ewald, B. (2006). Post hoc choice of cut points introduced bias to diagnostic research.
Journal of clinical epidemiology, 59(8), 798-801.
Steyerberg, E.W., Van Calster, B., & Pencina, M.J. (2011). Performance measures for
prediction models and markers: evaluation of predictions and classifications.
Revista Espanola de Cardiologia (English Edition), 64(9), 788-794.
Jiménez-Valverde, A., & Lobo, J.M. (2007). Threshold criteria for conversion of probability
of species presence to either–or presence–absence. Acta oecologica, 31(3), 361-369.
"""
return np.argmax(tpr - fpr)
def get_cutoff_figure(tpr, fpr, thresholds, class_name):
highest_youden_index = sensitivity_specificity_cutoff(tpr, fpr)
hovertemplate = f'Class: {class_name}' + '<br>TPR: %{y:.2%}<br>FPR: %{x:.2%}' + \
f'<br>Optimal Threshold: {thresholds[highest_youden_index]:.3}'
return go.Scatter(x=[fpr[highest_youden_index]], y=[tpr[highest_youden_index]], mode='markers', marker_size=15,
hovertemplate=hovertemplate, showlegend=False, marker={'color': 'black'}) | null |
536 | import warnings
from collections import defaultdict
from numbers import Number
from typing import TYPE_CHECKING, Callable, Dict, Hashable, List, Mapping, Union
import numpy as np
import pandas as pd
import plotly.express as px
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.pipeline import Pipeline
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from deepchecks.core import CheckResult, ConditionResult
from deepchecks.core.condition import ConditionCategory
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.tabular import Context, Dataset, TrainTestCheck
from deepchecks.tabular.utils.task_type import TaskType
from deepchecks.utils.distribution.preprocessing import ScaledNumerics
from deepchecks.utils.docref import doclink
from deepchecks.utils.metrics import get_gain
from deepchecks.utils.simple_models import ClassificationUniformModel, RandomModel, RegressionUniformModel
from deepchecks.utils.strings import format_percent
def average_scores(scores, include_classes):
result = {}
for metric, classes_scores in scores.items():
origin_score = 0
simple_score = 0
total = 0
for clas, models_scores in classes_scores.items():
# Skip if class is not in class list
if include_classes is not None and clas not in include_classes:
continue
origin_score += models_scores['Origin']
simple_score += models_scores['Simple']
total += 1
result[metric] = {
'Origin': origin_score / total,
'Simple': simple_score / total
}
return result
class ConditionCategory(enum.Enum):
"""Condition result category. indicates whether the result should fail the suite."""
FAIL = 'FAIL'
WARN = 'WARN'
PASS = 'PASS'
ERROR = 'ERROR'
class TaskType(Enum):
"""Enum containing supported task types."""
REGRESSION = 'regression'
BINARY = 'binary'
MULTICLASS = 'multiclass'
def get_gain(base_score, score, perfect_score, max_gain):
"""Get gain between base score and score compared to the distance from the perfect score."""
distance_from_perfect = perfect_score - base_score
scores_diff = score - base_score
if distance_from_perfect == 0:
# If both base score and score are perfect, return 0 gain
if scores_diff == 0:
return 0
# else base_score is better than score, return -max_gain
return -max_gain
ratio = scores_diff / distance_from_perfect
if ratio < -max_gain:
return -max_gain
if ratio > max_gain:
return max_gain
return ratio
def format_percent(ratio: float, floating_point: int = 2, scientific_notation_threshold: int = 4,
add_positive_prefix: bool = False) -> str:
"""Format percent for elegant display.
Parameters
----------
ratio : float
Ratio to be displayed as percent
floating_point: int , default: 2
Number of floating points to display
scientific_notation_threshold: int, default: 4
Max number of floating points for which to show number as float. If number of floating points is larger than
this parameter, scientific notation (e.g. "10E-5%") will be shown.
add_positive_prefix: bool, default: False
add plus sign before positive percentages (minus sign is always added for negative percentages).
Returns
-------
str
String of ratio as percent
"""
result: str
if ratio < 0:
ratio = -ratio
prefix = '-'
else:
prefix = '+' if add_positive_prefix and ratio != 0 else ''
if int(ratio) == ratio:
result = f'{int(ratio) * 100}%'
elif ratio > 1:
result = truncate_zero_percent(ratio, floating_point)
elif ratio < 10**(-(2+floating_point)):
if ratio > 10**(-(2+scientific_notation_threshold)):
result = truncate_zero_percent(ratio, scientific_notation_threshold)
else:
result = f'{Decimal(ratio * 100):.{floating_point}E}%'
elif ratio > (1-10**(-(2+floating_point))):
if floating_point > 0:
result = f'99.{"".join(["9"]*floating_point)}%'
else:
result = '99%'
else:
result = truncate_zero_percent(ratio, floating_point)
return prefix + result
def condition(result: Dict, include_classes=None, average=False, max_gain=None, min_allowed_gain=None) -> \
ConditionResult:
scores = result['scores']
task_type = result['type']
scorers_perfect = result['scorers_perfect']
# If the depth of the nested scores dict is 2, average is not relevant and is set to True
inner_dict = scores[list(scores.keys())[0]]
inner_inner_dict = inner_dict[list(inner_dict.keys())[0]]
force_average = isinstance(inner_inner_dict, Number)
passed_condition = True
if task_type in [TaskType.MULTICLASS, TaskType.BINARY] and not average and not force_average:
passed_metrics = {}
failed_classes = defaultdict(dict)
perfect_metrics = []
for metric, classes_scores in scores.items():
gains = {}
metric_passed = True
for clas, models_scores in classes_scores.items():
# Skip if class is not in class list
if include_classes is not None and clas not in include_classes:
continue
# If origin model is perfect, skip the gain calculation
if models_scores['Origin'] == scorers_perfect[metric]:
continue
gains[clas] = get_gain(models_scores['Simple'],
models_scores['Origin'],
scorers_perfect[metric],
max_gain)
# Save dict of failed classes and metrics gain
if gains[clas] <= min_allowed_gain:
failed_classes[clas][metric] = format_percent(gains[clas])
metric_passed = False
if metric_passed and gains:
avg_gain = sum(gains.values()) / len(gains)
passed_metrics[metric] = format_percent(avg_gain)
elif metric_passed and not gains:
perfect_metrics.append(metric)
if failed_classes:
msg = f'Found classes with failed metric\'s gain: {dict(failed_classes)}'
passed_condition = False
elif passed_metrics:
msg = f'All classes passed, average gain for metrics: {passed_metrics}'
else:
msg = f'Found metrics with perfect score, no gain is calculated: {perfect_metrics}'
else:
passed_metrics = {}
failed_metrics = {}
perfect_metrics = []
if task_type in [TaskType.MULTICLASS, TaskType.BINARY] and not force_average:
scores = average_scores(scores, include_classes)
for metric, models_scores in scores.items():
# If origin model is perfect, skip the gain calculation
if models_scores['Origin'] == scorers_perfect[metric]:
perfect_metrics.append(metric)
continue
gain = get_gain(models_scores['Simple'],
models_scores['Origin'],
scorers_perfect[metric],
max_gain)
if gain <= min_allowed_gain:
failed_metrics[metric] = format_percent(gain)
else:
passed_metrics[metric] = format_percent(gain)
if failed_metrics:
msg = f'Found failed metrics: {failed_metrics}'
passed_condition = False
elif passed_metrics:
msg = f'All metrics passed, metric\'s gain: {passed_metrics}'
else:
msg = f'Found metrics with perfect score, no gain is calculated: {perfect_metrics}'
category = ConditionCategory.PASS if passed_condition else ConditionCategory.FAIL
return ConditionResult(category, msg) | null |
537 | from copy import deepcopy
from typing import TYPE_CHECKING, Callable, Tuple, Union
import numpy as np
import plotly.graph_objects as go
from sklearn.pipeline import Pipeline
from deepchecks.core import CheckResult, ConditionCategory, ConditionResult
from deepchecks.core.errors import DeepchecksValueError, ModelValidationError
from deepchecks.tabular import Context, TrainTestCheck
from deepchecks.utils.docref import doclink
from deepchecks.utils.model import get_model_of_pipeline
from deepchecks.utils.strings import format_percent
class PartialBoostingModel:
"""Wrapper for boosting models which limits the number of estimators being used in the prediction."""
_UNSUPPORTED_MODEL_ERROR = \
'Check is relevant for Boosting models of type {supported_models}, but received model of type {model_type}'
_NO_MODEL_ERROR = \
'Check is relevant only when receiving the model, but predictions/probabilities were received instead. ' \
'In order to use this check, please pass the model to the run() method.'
_SUPPORTED_CLASSIFICATION_MODELS = (
'AdaBoostClassifier',
'GradientBoostingClassifier',
'LGBMClassifier',
'XGBClassifier',
'CatBoostClassifier'
)
_SUPPORTED_REGRESSION_MODELS = (
'AdaBoostRegressor',
'GradientBoostingRegressor',
'LGBMRegressor',
'XGBRegressor',
'CatBoostRegressor'
)
_SUPPORTED_MODELS = _SUPPORTED_CLASSIFICATION_MODELS + _SUPPORTED_REGRESSION_MODELS
def __init__(self, model, step):
"""Construct wrapper for model with `predict` and `predict_proba` methods.
Parameters
----------
model
boosting model to wrap.
step
Number of iterations/estimators to limit the model on predictions.
"""
self.model_class = get_model_of_pipeline(model).__class__.__name__
self.step = step
if self.model_class in ['AdaBoostClassifier', 'GradientBoostingClassifier', 'AdaBoostRegressor',
'GradientBoostingRegressor']:
self.model = deepcopy(model)
if isinstance(model, Pipeline):
internal_estimator = get_model_of_pipeline(self.model)
internal_estimator.estimators_ = internal_estimator.estimators_[:self.step]
else:
self.model.estimators_ = self.model.estimators_[:self.step]
else:
self.model = model
def _raise_not_supported_model_error(cls, model_class):
if model_class != '_DummyModel':
raise ModelValidationError(cls._UNSUPPORTED_MODEL_ERROR.format(
supported_models=cls._SUPPORTED_MODELS,
model_type=model_class
))
else:
raise ModelValidationError(cls._NO_MODEL_ERROR)
def predict_proba(self, x):
if self.model_class in ['AdaBoostClassifier', 'GradientBoostingClassifier']:
return self.model.predict_proba(x)
elif self.model_class == 'LGBMClassifier':
return self.model.predict_proba(x, num_iteration=self.step)
elif self.model_class == 'XGBClassifier':
return self.model.predict_proba(x, iteration_range=(0, self.step))
elif self.model_class == 'CatBoostClassifier':
return self.model.predict_proba(x, ntree_end=self.step)
else:
self._raise_not_supported_model_error(self.model_class)
def predict(self, x):
if self.model_class in ['AdaBoostClassifier', 'GradientBoostingClassifier', 'AdaBoostRegressor',
'GradientBoostingRegressor']:
return self.model.predict(x)
elif self.model_class in ['LGBMClassifier', 'LGBMRegressor']:
return self.model.predict(x, num_iteration=self.step)
elif self.model_class in ['XGBClassifier', 'XGBRegressor']:
return self.model.predict(x, iteration_range=(0, self.step))
elif self.model_class in ['CatBoostClassifier', 'CatBoostRegressor']:
return self.model.predict(x, ntree_end=self.step)
else:
self._raise_not_supported_model_error(self.model_class)
def n_estimators(cls, model):
model = get_model_of_pipeline(model)
model_class = model.__class__.__name__
n_estimator = None
if model_class in ['AdaBoostClassifier', 'GradientBoostingClassifier', 'AdaBoostRegressor',
'GradientBoostingRegressor']:
n_estimator = len(model.estimators_)
elif model_class in ['LGBMClassifier', 'LGBMRegressor']:
n_estimator = model.n_estimators
elif model_class in ['XGBClassifier', 'XGBRegressor']:
n_estimator = model.n_estimators
elif model_class in ['CatBoostClassifier', 'CatBoostRegressor']:
n_estimator = model.tree_count_
else:
cls._raise_not_supported_model_error(model_class=model_class)
if n_estimator is None:
raise ModelValidationError('Could not extract number of estimators from model')
return n_estimator
def _partial_score(scorer, dataset, model, step):
partial_model = PartialBoostingModel(model, step)
return scorer(partial_model, dataset) | null |
538 | from copy import deepcopy
from typing import TYPE_CHECKING, Callable, Tuple, Union
import numpy as np
import plotly.graph_objects as go
from sklearn.pipeline import Pipeline
from deepchecks.core import CheckResult, ConditionCategory, ConditionResult
from deepchecks.core.errors import DeepchecksValueError, ModelValidationError
from deepchecks.tabular import Context, TrainTestCheck
from deepchecks.utils.docref import doclink
from deepchecks.utils.model import get_model_of_pipeline
from deepchecks.utils.strings import format_percent
The provided code snippet includes necessary dependencies for implementing the `_calculate_steps` function. Write a Python function `def _calculate_steps(num_steps, num_estimators)` to solve the following problem:
Calculate steps (integers between 1 to num_estimators) to work on.
Here is the function:
def _calculate_steps(num_steps, num_estimators):
"""Calculate steps (integers between 1 to num_estimators) to work on."""
if num_steps >= num_estimators:
return list(range(1, num_estimators + 1))
if num_steps <= 5:
steps_percents = np.linspace(0, 1.0, num_steps + 1)[1:]
steps_numbers = np.ceil(steps_percents * num_estimators)
steps_set = {int(s) for s in steps_numbers}
else:
steps_percents = np.linspace(5 / num_estimators, 1.0, num_steps - 4)[1:]
steps_numbers = np.ceil(steps_percents * num_estimators)
steps_set = {int(s) for s in steps_numbers}
# We want to forcefully take the first 5 estimators, since they have the largest affect on the model performance
steps_set.update({1, 2, 3, 4, 5})
return sorted(steps_set) | Calculate steps (integers between 1 to num_estimators) to work on. |
539 | from collections import defaultdict
from typing import List, Union
import pandas as pd
from pandas.api.types import infer_dtype
from deepchecks.core import CheckResult, ConditionCategory, ConditionResult
from deepchecks.tabular import Context, SingleDatasetCheck
from deepchecks.tabular.utils.feature_importance import N_TOP_MESSAGE, column_importance_sorter_df
from deepchecks.tabular.utils.messages import get_condition_passed_message
from deepchecks.utils.dataframes import select_from_dataframe
from deepchecks.utils.strings import format_percent, string_baseform
from deepchecks.utils.typing import Hashable
def _is_stringed_type(col) -> bool:
return infer_dtype(col) not in ['integer', 'decimal', 'floating']
def string_baseform(string: str, allow_empty_result: bool = False) -> str:
"""Normalize the string input to a uniform form.
If input is a string containing alphanumeric characters or if allow_empty_result is set to True,
removes all non-alphanumeric characters and convert characters to lower form.
Parameters
----------
allow_empty_result : bool , default : False
bool indicating whether to return empty result if no alphanumeric characters are present or the original input
string : str
string to remove special characters from
Returns
-------
str
original input if condition is not met or lower form alphanumeric characters of input.
"""
if not isinstance(string, str):
return string
lower_alphanumeric_form = string.translate(DEL_MAP).lower()
if len(lower_alphanumeric_form) > 0 or allow_empty_result:
return lower_alphanumeric_form
else:
return string
def _get_special_samples(column_data: pd.Series) -> Union[dict, None]:
if not _is_stringed_type(column_data):
return None
samples_to_count = defaultdict(lambda: 0)
for sample in column_data:
if isinstance(sample, str) and len(sample) > 0 and len(string_baseform(sample, True)) == 0:
samples_to_count[sample] = samples_to_count[sample] + 1
return samples_to_count or None | null |
540 | from typing import Dict, List, Tuple, Union
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from scipy import stats
from deepchecks.core import CheckResult, ConditionCategory, ConditionResult
from deepchecks.tabular import Context, SingleDatasetCheck
from deepchecks.tabular.utils.feature_importance import N_TOP_MESSAGE, column_importance_sorter_df
from deepchecks.tabular.utils.messages import get_condition_passed_message
from deepchecks.utils.dataframes import select_from_dataframe
from deepchecks.utils.strings import format_number, format_percent, is_string_column
from deepchecks.utils.typing import Hashable
The provided code snippet includes necessary dependencies for implementing the `outlier_on_percentile_histogram` function. Write a Python function `def outlier_on_percentile_histogram(percentile_histogram: Dict[float, float], iqr_percent: float = 85, outlier_factor: float = 5) -> Tuple[Tuple[float, float]]` to solve the following problem:
Get outlier ranges on histogram. Parameters ---------- percentile_histogram : Dict[float, float] histogram to search for outliers in shape [0.0-100.0]->[float] iqr_percent : float , default: 85 Interquartile range upper percentage, start searching for outliers outside IQR. outlier_factor : float , default: 5 a factor to consider outlier. Returns ------- Tuple[Tuple[float, float]] percent ranges in the histogram that are outliers, empty tuple if none is found
Here is the function:
def outlier_on_percentile_histogram(percentile_histogram: Dict[float, float], iqr_percent: float = 85,
outlier_factor: float = 5) -> Tuple[Tuple[float, float]]:
"""Get outlier ranges on histogram.
Parameters
----------
percentile_histogram : Dict[float, float]
histogram to search for outliers in shape [0.0-100.0]->[float]
iqr_percent : float , default: 85
Interquartile range upper percentage, start searching for outliers outside IQR.
outlier_factor : float , default: 5
a factor to consider outlier.
Returns
-------
Tuple[Tuple[float, float]]
percent ranges in the histogram that are outliers, empty tuple if none is found
"""
if any((k < 0) or k > 100 for k in percentile_histogram.keys()):
raise ValueError('dict keys must be percentiles between 0 and 100')
if any((v < 0) for v in percentile_histogram.values()):
raise ValueError('dict values must be counts that are non-negative numbers')
percentile_df = pd.DataFrame.from_dict(percentile_histogram, orient='index')
# calculate IQR with iqr_percent
closest_point_upper = np.argmin(np.abs(iqr_percent - percentile_df.index.values))
closest_point_lower = np.argmin(np.abs(100 - iqr_percent - percentile_df.index.values))
center_point = np.argmin(np.abs(50 - percentile_df.index.values))
iqr = np.abs(percentile_df.iloc[closest_point_upper] - percentile_df.iloc[closest_point_lower])
outlier_df = percentile_df[
(np.abs(percentile_df - percentile_df.iloc[center_point])
> outlier_factor * iqr / 2).values
]
outlier_section_list = []
lower_outlier_range = outlier_df[outlier_df.index < 50]
if lower_outlier_range.shape[0] > 0:
outlier_section_list.append((lower_outlier_range.index.values[0], lower_outlier_range.index.values[-1]))
upper_outlier_range = outlier_df[outlier_df.index > 50]
if upper_outlier_range.shape[0] > 0:
outlier_section_list.append((upper_outlier_range.index.values[0], upper_outlier_range.index.values[-1]))
return tuple(outlier_section_list) | Get outlier ranges on histogram. Parameters ---------- percentile_histogram : Dict[float, float] histogram to search for outliers in shape [0.0-100.0]->[float] iqr_percent : float , default: 85 Interquartile range upper percentage, start searching for outliers outside IQR. outlier_factor : float , default: 5 a factor to consider outlier. Returns ------- Tuple[Tuple[float, float]] percent ranges in the histogram that are outliers, empty tuple if none is found |
541 | from typing import Dict, List, Tuple, Union
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from scipy import stats
from deepchecks.core import CheckResult, ConditionCategory, ConditionResult
from deepchecks.tabular import Context, SingleDatasetCheck
from deepchecks.tabular.utils.feature_importance import N_TOP_MESSAGE, column_importance_sorter_df
from deepchecks.tabular.utils.messages import get_condition_passed_message
from deepchecks.utils.dataframes import select_from_dataframe
from deepchecks.utils.strings import format_number, format_percent, is_string_column
from deepchecks.utils.typing import Hashable
def _in_range(x, a, b):
return a <= x <= b | null |
542 | from typing import Dict, List, Tuple, Union
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from scipy import stats
from deepchecks.core import CheckResult, ConditionCategory, ConditionResult
from deepchecks.tabular import Context, SingleDatasetCheck
from deepchecks.tabular.utils.feature_importance import N_TOP_MESSAGE, column_importance_sorter_df
from deepchecks.tabular.utils.messages import get_condition_passed_message
from deepchecks.utils.dataframes import select_from_dataframe
from deepchecks.utils.strings import format_number, format_percent, is_string_column
from deepchecks.utils.typing import Hashable
def trim(x, max_length):
if len(x) <= max_length:
return x
return x[:max_length] + '...' | null |
543 | import itertools
from collections import defaultdict
from typing import Dict, List, Optional, Union
import pandas as pd
from deepchecks.core import CheckResult, ConditionCategory, ConditionResult
from deepchecks.core.reduce_classes import ReduceFeatureMixin
from deepchecks.tabular import Context, SingleDatasetCheck
from deepchecks.tabular._shared_docs import docstrings
from deepchecks.tabular.utils.feature_importance import N_TOP_MESSAGE, column_importance_sorter_df
from deepchecks.tabular.utils.messages import get_condition_passed_message
from deepchecks.utils.dataframes import select_from_dataframe
from deepchecks.utils.strings import format_percent, get_base_form_to_variants_dict, is_string_column
from deepchecks.utils.typing import Hashable
def get_condition_passed_message(sized, feature=False):
"""Get a message for a condition that passed that specifies the number of columns passed."""
verb = 'feature' if feature else 'column'
if isinstance(sized, int):
num_columns = sized
elif isinstance(sized, Sized):
num_columns = len(sized)
else:
raise TypeError('sized must be an int or a Sized')
if num_columns == 0:
return f'No relevant {verb}s to check were found'
message = f'Passed for {num_columns} relevant {verb}'
if num_columns > 1:
message += 's'
return message
def _condition_variants_number(result, num_max_variants: int, max_cols_to_show: int = 5, max_forms_to_show: int = 5):
not_passing_variants = defaultdict(list)
for col, baseforms in result['columns'].items():
for base_form, variants_list in baseforms.items():
if len(variants_list) > num_max_variants:
if len(not_passing_variants[col]) < max_forms_to_show:
not_passing_variants[col].append(base_form)
if not_passing_variants:
variants_to_show = dict(itertools.islice(not_passing_variants.items(), max_cols_to_show))
details = f'Found {len(not_passing_variants)} out of {len(result["columns"])} columns with amount of ' \
f'variants above threshold: {variants_to_show}'
return ConditionResult(ConditionCategory.WARN, details)
return ConditionResult(ConditionCategory.PASS, get_condition_passed_message(['columns'])) | null |
544 | import time
from typing import List, Union
import numpy as np
from PyNomaly import loop
from deepchecks.core import CheckResult, ConditionCategory, ConditionResult
from deepchecks.core.errors import (DeepchecksProcessError, DeepchecksTimeoutError, DeepchecksValueError,
NotEnoughSamplesError)
from deepchecks.tabular import Context, SingleDatasetCheck
from deepchecks.utils import gower_distance
from deepchecks.utils.dataframes import select_from_dataframe
from deepchecks.utils.strings import format_number, format_percent
from deepchecks.utils.typing import Hashable
def format_percent(ratio: float, floating_point: int = 2, scientific_notation_threshold: int = 4,
add_positive_prefix: bool = False) -> str:
"""Format percent for elegant display.
Parameters
----------
ratio : float
Ratio to be displayed as percent
floating_point: int , default: 2
Number of floating points to display
scientific_notation_threshold: int, default: 4
Max number of floating points for which to show number as float. If number of floating points is larger than
this parameter, scientific notation (e.g. "10E-5%") will be shown.
add_positive_prefix: bool, default: False
add plus sign before positive percentages (minus sign is always added for negative percentages).
Returns
-------
str
String of ratio as percent
"""
result: str
if ratio < 0:
ratio = -ratio
prefix = '-'
else:
prefix = '+' if add_positive_prefix and ratio != 0 else ''
if int(ratio) == ratio:
result = f'{int(ratio) * 100}%'
elif ratio > 1:
result = truncate_zero_percent(ratio, floating_point)
elif ratio < 10**(-(2+floating_point)):
if ratio > 10**(-(2+scientific_notation_threshold)):
result = truncate_zero_percent(ratio, scientific_notation_threshold)
else:
result = f'{Decimal(ratio * 100):.{floating_point}E}%'
elif ratio > (1-10**(-(2+floating_point))):
if floating_point > 0:
result = f'99.{"".join(["9"]*floating_point)}%'
else:
result = '99%'
else:
result = truncate_zero_percent(ratio, floating_point)
return prefix + result
def _condition_outliers_number(quantiles_vector: np.ndarray, outlier_score_threshold: float,
max_outliers_ratio: float = 0):
max_outliers_ratio = max(round(max_outliers_ratio, 3), 0.001)
score_at_max_outliers_ratio = quantiles_vector[int(1000 - max_outliers_ratio * 1000)]
category = ConditionCategory.WARN if score_at_max_outliers_ratio > outlier_score_threshold \
else ConditionCategory.PASS
quantiles_above_threshold = quantiles_vector > outlier_score_threshold
if quantiles_above_threshold.any():
ratio_above_threshold = round((1000 - np.argmax(quantiles_above_threshold)) / 1000, 3)
else:
ratio_above_threshold = 0
details = f'{format_percent(ratio_above_threshold)} of dataset samples above outlier threshold'
return ConditionResult(category, details) | null |
545 | import math
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
import pandas as pd
from pandas.api.types import is_categorical_dtype
from deepchecks.core import CheckResult, ConditionCategory, ConditionResult
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.core.reduce_classes import ReduceFeatureMixin
from deepchecks.tabular import Context, SingleDatasetCheck
from deepchecks.tabular._shared_docs import docstrings
from deepchecks.tabular.utils.feature_importance import N_TOP_MESSAGE
from deepchecks.tabular.utils.messages import get_condition_passed_message
from deepchecks.utils.dataframes import select_from_dataframe
from deepchecks.utils.strings import format_percent, string_baseform
from deepchecks.utils.typing import Hashable
def nan_type(x):
if x is np.nan:
return 'numpy.nan'
elif x is pd.NA:
return 'pandas.NA'
elif x is pd.NaT:
return 'pandas.NaT'
elif isinstance(x, float) and math.isnan(x):
return 'math.nan'
return str(x) | null |
546 | import typing as t
import warnings
from collections import Counter
import numpy as np
import pandas as pd
from IPython.display import HTML, display_html
from pandas.api.types import infer_dtype
from sklearn.model_selection import train_test_split
from typing_extensions import Literal as L
from deepchecks.core.errors import DatasetValidationError, DeepchecksNotSupportedError, DeepchecksValueError
from deepchecks.tabular.utils.task_type import TaskType
from deepchecks.utils.dataframes import select_from_dataframe
from deepchecks.utils.logger import get_logger
from deepchecks.utils.strings import get_docs_link
from deepchecks.utils.type_inference import infer_categorical_features, infer_numerical_features
from deepchecks.utils.typing import Hashable
def get_docs_link():
"""Return the link to the docs with current version.
Returns
-------
str
the link to the docs.
"""
if deepchecks.__version__ and deepchecks.__version__ != 'dev':
version_obj: Version = Version(deepchecks.__version__)
# The version in the docs url is without the hotfix part
version = f'{version_obj.major}.{version_obj.minor}'
else:
version = 'stable'
return f'https://docs.deepchecks.com/{version}/'
The provided code snippet includes necessary dependencies for implementing the `_get_dataset_docs_tag` function. Write a Python function `def _get_dataset_docs_tag()` to solve the following problem:
Return link to documentation for Dataset class.
Here is the function:
def _get_dataset_docs_tag():
"""Return link to documentation for Dataset class."""
link = get_docs_link() + 'user-guide/tabular/dataset_object.html?html?utm_source=display_output' \
'&utm_medium=referral&utm_campaign=check_link'
return f'<a href="{link}" target="_blank">Dataset docs</a>' | Return link to documentation for Dataset class. |
547 | from typing import Union
import numpy as np
from sklearn.metrics import confusion_matrix, roc_auc_score
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.utils.metrics import averaging_mechanism
def assert_multi_label_shape(y):
if not isinstance(y, np.ndarray):
raise DeepchecksValueError(f'Expected y to be numpy array instead got: {type(y)}')
if y.ndim != 2:
raise DeepchecksValueError(f'Expected y to be numpy array with 2 dimensions instead got {y.ndim} dimensions.')
assert_binary_values(y)
# Since the metrics are not yet supporting real multi-label, make sure there isn't any row with sum larger than 1
if y.sum(axis=1).max() > 1:
raise DeepchecksValueError('Multi label scorers are not supported yet, the sum of a row in multi-label format '
'must not be larger than 1')
def assert_single_label_shape(y):
if not isinstance(y, np.ndarray):
raise DeepchecksValueError(f'Expected y to be numpy array instead got: {type(y)}')
if y.ndim != 1:
raise DeepchecksValueError(f'Expected y to be numpy array with 1 dimension instead got {y.ndim} dimensions.')
assert_binary_values(y)
def _false_positive_rate_per_class(y_true, y_pred, classes): # False Positives / (False Positives + True Negatives)
result = []
for cls in classes:
y_true_cls, y_pred_cls = np.asarray(y_true) == cls, np.asarray(y_pred) == cls
matrix = confusion_matrix(y_true_cls, y_pred_cls, labels=[0, 1])
result.append(matrix[0, 1] / (matrix[0, 1] + matrix[1, 1]) if (matrix[0, 1] + matrix[1, 1]) > 0 else 0)
return np.asarray(result)
def _micro_false_positive_rate(y_true, y_pred, classes):
fp, tn = 0, 0
for cls in classes:
y_true_cls, y_pred_cls = np.asarray(y_true) == cls, np.asarray(y_pred) == cls
matrix = confusion_matrix(y_true_cls, y_pred_cls, labels=[0, 1])
fp += matrix[0, 1]
tn += matrix[1, 1]
return fp / (fp + tn) if (fp + tn) > 0 else 0
def averaging_mechanism(averaging_method: str, scores_per_class, weights=None) -> Union[np.ndarray, float]:
"""Receive scores per class and averaging method and returns result based on averaging_method.
Parameters
----------
averaging_method : str, default: 'per_class'
Determines which averaging method to apply, possible values are:
'per_class': Return a np array with the scores for each class (sorted by class name).
'binary': Returns the score for the positive class. Should be used only in binary classification cases.
'macro': Returns the mean of scores per class.
'weighted': Returns a weighted mean of scores based provided weights.
scores_per_class : array-like of shape (n_samples, n_classes)
The score of the metric per class when considering said class as the positive class and the remaining
classes as the negative.
weights : array-like of shape (n_samples,), default: None
True labels. Only required for 'weighted' averaging method.
Returns
-------
score : Union[np.ndarray, float]
The score for the given metric.
"""
if averaging_method == 'binary':
if len(scores_per_class) != 2:
raise DeepchecksValueError('Averaging method "binary" can only be used in binary classification.')
return scores_per_class[1]
elif averaging_method == 'per_class':
return np.asarray(scores_per_class)
elif averaging_method == 'macro':
# Classes that did not appear in the data are not considered as part of macro averaging.
return np.mean(scores_per_class) if weights is None else np.mean(scores_per_class[weights != 0])
elif averaging_method == 'weighted':
if weights is None:
raise DeepchecksValueError('Weights are required in order to apply weighted averaging method.')
return np.multiply(scores_per_class, weights).sum() / sum(weights)
else:
raise DeepchecksValueError(f'Unknown averaging {averaging_method}')
The provided code snippet includes necessary dependencies for implementing the `false_positive_rate_metric` function. Write a Python function `def false_positive_rate_metric(y_true, y_pred, averaging_method: str = 'per_class') -> Union[np.ndarray, float]` to solve the following problem:
Receive a metric which calculates false positive rate. The rate is calculated as: False Positives / (False Positives + True Negatives) Parameters ---------- y_true : array-like of shape (n_samples, n_classes) or (n_samples) for binary The labels should be passed in a sequence of sequences, with the sequence for each sample being a binary vector, representing the presence of the i-th label in that sample (multi-label). y_pred : array-like of shape (n_samples, n_classes) or (n_samples) for binary The predictions should be passed in a sequence of sequences, with the sequence for each sample being a binary vector, representing the presence of the i-th label in that sample (multi-label). averaging_method : str, default: 'per_class' Determines which averaging method to apply, possible values are: 'per_class': Return a np array with the scores for each class (sorted by class name). 'binary': Returns the score for the positive class. Should be used only in binary classification cases. 'micro': Returns the micro-averaged score. 'macro': Returns the mean of scores per class. 'weighted': Returns a weighted mean of scores based of the class size in y_true. Returns ------- score : Union[np.ndarray, float] The score for the given metric.
Here is the function:
def false_positive_rate_metric(y_true, y_pred, averaging_method: str = 'per_class') -> Union[np.ndarray, float]:
"""Receive a metric which calculates false positive rate.
The rate is calculated as: False Positives / (False Positives + True Negatives)
Parameters
----------
y_true : array-like of shape (n_samples, n_classes) or (n_samples) for binary
The labels should be passed in a sequence of sequences, with the sequence for each sample being a binary vector,
representing the presence of the i-th label in that sample (multi-label).
y_pred : array-like of shape (n_samples, n_classes) or (n_samples) for binary
The predictions should be passed in a sequence of sequences, with the sequence for each sample being a binary
vector, representing the presence of the i-th label in that sample (multi-label).
averaging_method : str, default: 'per_class'
Determines which averaging method to apply, possible values are:
'per_class': Return a np array with the scores for each class (sorted by class name).
'binary': Returns the score for the positive class. Should be used only in binary classification cases.
'micro': Returns the micro-averaged score.
'macro': Returns the mean of scores per class.
'weighted': Returns a weighted mean of scores based of the class size in y_true.
Returns
-------
score : Union[np.ndarray, float]
The score for the given metric.
"""
# Convert multi label into single label
if averaging_method != 'binary':
assert_multi_label_shape(y_true)
assert_multi_label_shape(y_pred)
classes = range(y_true.shape[1])
y_true = np.argmax(y_true, axis=1)
y_pred = np.argmax(y_pred, axis=1)
else:
assert_single_label_shape(y_true)
assert_single_label_shape(y_pred)
classes = [0, 1]
if averaging_method == 'micro':
return _micro_false_positive_rate(y_true, y_pred, classes)
scores_per_class = _false_positive_rate_per_class(y_true, y_pred, classes)
weights = [sum(y_true == cls) for cls in classes]
return averaging_mechanism(averaging_method, scores_per_class, weights) | Receive a metric which calculates false positive rate. The rate is calculated as: False Positives / (False Positives + True Negatives) Parameters ---------- y_true : array-like of shape (n_samples, n_classes) or (n_samples) for binary The labels should be passed in a sequence of sequences, with the sequence for each sample being a binary vector, representing the presence of the i-th label in that sample (multi-label). y_pred : array-like of shape (n_samples, n_classes) or (n_samples) for binary The predictions should be passed in a sequence of sequences, with the sequence for each sample being a binary vector, representing the presence of the i-th label in that sample (multi-label). averaging_method : str, default: 'per_class' Determines which averaging method to apply, possible values are: 'per_class': Return a np array with the scores for each class (sorted by class name). 'binary': Returns the score for the positive class. Should be used only in binary classification cases. 'micro': Returns the micro-averaged score. 'macro': Returns the mean of scores per class. 'weighted': Returns a weighted mean of scores based of the class size in y_true. Returns ------- score : Union[np.ndarray, float] The score for the given metric. |
548 | from typing import Union
import numpy as np
from sklearn.metrics import confusion_matrix, roc_auc_score
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.utils.metrics import averaging_mechanism
def assert_multi_label_shape(y):
if not isinstance(y, np.ndarray):
raise DeepchecksValueError(f'Expected y to be numpy array instead got: {type(y)}')
if y.ndim != 2:
raise DeepchecksValueError(f'Expected y to be numpy array with 2 dimensions instead got {y.ndim} dimensions.')
assert_binary_values(y)
# Since the metrics are not yet supporting real multi-label, make sure there isn't any row with sum larger than 1
if y.sum(axis=1).max() > 1:
raise DeepchecksValueError('Multi label scorers are not supported yet, the sum of a row in multi-label format '
'must not be larger than 1')
def assert_single_label_shape(y):
if not isinstance(y, np.ndarray):
raise DeepchecksValueError(f'Expected y to be numpy array instead got: {type(y)}')
if y.ndim != 1:
raise DeepchecksValueError(f'Expected y to be numpy array with 1 dimension instead got {y.ndim} dimensions.')
assert_binary_values(y)
def _false_negative_rate_per_class(y_true, y_pred, classes): # False Negatives / (False Negatives + True Positives)
result = []
for cls in classes:
y_true_cls, y_pred_cls = np.asarray(y_true) == cls, np.asarray(y_pred) == cls
matrix = confusion_matrix(y_true_cls, y_pred_cls)
result.append(matrix[1, 0] / (matrix[1, 0] + matrix[0, 0]) if (matrix[1, 0] + matrix[0, 0]) > 0 else 0)
return np.asarray(result)
def _micro_false_negative_rate(y_true, y_pred, classes):
fn, tp = 0, 0
for cls in classes:
y_true_cls, y_pred_cls = np.asarray(y_true) == cls, np.asarray(y_pred) == cls
matrix = confusion_matrix(y_true_cls, y_pred_cls, labels=[0, 1])
fn += matrix[1, 0]
tp += matrix[0, 0]
return fn / (fn + tp) if (fn + tp) > 0 else 0
def averaging_mechanism(averaging_method: str, scores_per_class, weights=None) -> Union[np.ndarray, float]:
"""Receive scores per class and averaging method and returns result based on averaging_method.
Parameters
----------
averaging_method : str, default: 'per_class'
Determines which averaging method to apply, possible values are:
'per_class': Return a np array with the scores for each class (sorted by class name).
'binary': Returns the score for the positive class. Should be used only in binary classification cases.
'macro': Returns the mean of scores per class.
'weighted': Returns a weighted mean of scores based provided weights.
scores_per_class : array-like of shape (n_samples, n_classes)
The score of the metric per class when considering said class as the positive class and the remaining
classes as the negative.
weights : array-like of shape (n_samples,), default: None
True labels. Only required for 'weighted' averaging method.
Returns
-------
score : Union[np.ndarray, float]
The score for the given metric.
"""
if averaging_method == 'binary':
if len(scores_per_class) != 2:
raise DeepchecksValueError('Averaging method "binary" can only be used in binary classification.')
return scores_per_class[1]
elif averaging_method == 'per_class':
return np.asarray(scores_per_class)
elif averaging_method == 'macro':
# Classes that did not appear in the data are not considered as part of macro averaging.
return np.mean(scores_per_class) if weights is None else np.mean(scores_per_class[weights != 0])
elif averaging_method == 'weighted':
if weights is None:
raise DeepchecksValueError('Weights are required in order to apply weighted averaging method.')
return np.multiply(scores_per_class, weights).sum() / sum(weights)
else:
raise DeepchecksValueError(f'Unknown averaging {averaging_method}')
The provided code snippet includes necessary dependencies for implementing the `false_negative_rate_metric` function. Write a Python function `def false_negative_rate_metric(y_true, y_pred, averaging_method: str = 'per_class') -> Union[np.ndarray, float]` to solve the following problem:
Receive a metric which calculates false negative rate. The rate is calculated as: False Negatives / (False Negatives + True Positives) Parameters ---------- y_true : array-like of shape (n_samples, n_classes) or (n_samples) for binary The labels should be passed in a sequence of sequences, with the sequence for each sample being a binary vector, representing the presence of the i-th label in that sample (multi-label). y_pred : array-like of shape (n_samples, n_classes) or (n_samples) for binary The predictions should be passed in a sequence of sequences, with the sequence for each sample being a binary vector, representing the presence of the i-th label in that sample (multi-label). averaging_method : str, default: 'per_class' Determines which averaging method to apply, possible values are: 'per_class': Return a np array with the scores for each class (sorted by class name). 'binary': Returns the score for the positive class. Should be used only in binary classification cases. 'micro': Returns the micro-averaged score. 'macro': Returns the mean of scores per class. 'weighted': Returns a weighted mean of scores based of the class size in y_true. Returns ------- score : Union[np.ndarray, float] The score for the given metric.
Here is the function:
def false_negative_rate_metric(y_true, y_pred, averaging_method: str = 'per_class') -> Union[np.ndarray, float]:
"""Receive a metric which calculates false negative rate.
The rate is calculated as: False Negatives / (False Negatives + True Positives)
Parameters
----------
y_true : array-like of shape (n_samples, n_classes) or (n_samples) for binary
The labels should be passed in a sequence of sequences, with the sequence for each sample being a binary vector,
representing the presence of the i-th label in that sample (multi-label).
y_pred : array-like of shape (n_samples, n_classes) or (n_samples) for binary
The predictions should be passed in a sequence of sequences, with the sequence for each sample being a binary
vector, representing the presence of the i-th label in that sample (multi-label).
averaging_method : str, default: 'per_class'
Determines which averaging method to apply, possible values are:
'per_class': Return a np array with the scores for each class (sorted by class name).
'binary': Returns the score for the positive class. Should be used only in binary classification cases.
'micro': Returns the micro-averaged score.
'macro': Returns the mean of scores per class.
'weighted': Returns a weighted mean of scores based of the class size in y_true.
Returns
-------
score : Union[np.ndarray, float]
The score for the given metric.
"""
# Convert multi label into single label
# Convert multi label into single label
if averaging_method != 'binary':
assert_multi_label_shape(y_true)
assert_multi_label_shape(y_pred)
classes = range(y_true.shape[1])
y_true = np.argmax(y_true, axis=1)
y_pred = np.argmax(y_pred, axis=1)
else:
assert_single_label_shape(y_true)
assert_single_label_shape(y_pred)
classes = [0, 1]
if averaging_method == 'micro':
return _micro_false_negative_rate(y_true, y_pred, classes)
scores_per_class = _false_negative_rate_per_class(y_true, y_pred, classes)
weights = [sum(y_true == cls) for cls in classes]
return averaging_mechanism(averaging_method, scores_per_class, weights) | Receive a metric which calculates false negative rate. The rate is calculated as: False Negatives / (False Negatives + True Positives) Parameters ---------- y_true : array-like of shape (n_samples, n_classes) or (n_samples) for binary The labels should be passed in a sequence of sequences, with the sequence for each sample being a binary vector, representing the presence of the i-th label in that sample (multi-label). y_pred : array-like of shape (n_samples, n_classes) or (n_samples) for binary The predictions should be passed in a sequence of sequences, with the sequence for each sample being a binary vector, representing the presence of the i-th label in that sample (multi-label). averaging_method : str, default: 'per_class' Determines which averaging method to apply, possible values are: 'per_class': Return a np array with the scores for each class (sorted by class name). 'binary': Returns the score for the positive class. Should be used only in binary classification cases. 'micro': Returns the micro-averaged score. 'macro': Returns the mean of scores per class. 'weighted': Returns a weighted mean of scores based of the class size in y_true. Returns ------- score : Union[np.ndarray, float] The score for the given metric. |
549 | from typing import Union
import numpy as np
from sklearn.metrics import confusion_matrix, roc_auc_score
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.utils.metrics import averaging_mechanism
def assert_multi_label_shape(y):
if not isinstance(y, np.ndarray):
raise DeepchecksValueError(f'Expected y to be numpy array instead got: {type(y)}')
if y.ndim != 2:
raise DeepchecksValueError(f'Expected y to be numpy array with 2 dimensions instead got {y.ndim} dimensions.')
assert_binary_values(y)
# Since the metrics are not yet supporting real multi-label, make sure there isn't any row with sum larger than 1
if y.sum(axis=1).max() > 1:
raise DeepchecksValueError('Multi label scorers are not supported yet, the sum of a row in multi-label format '
'must not be larger than 1')
def assert_single_label_shape(y):
if not isinstance(y, np.ndarray):
raise DeepchecksValueError(f'Expected y to be numpy array instead got: {type(y)}')
if y.ndim != 1:
raise DeepchecksValueError(f'Expected y to be numpy array with 1 dimension instead got {y.ndim} dimensions.')
assert_binary_values(y)
def _true_negative_rate_per_class(y_true, y_pred, classes): # True Negatives / (True Negatives + False Positives)
result = []
for cls in classes:
y_true_cls, y_pred_cls = np.asarray(y_true) == cls, np.asarray(y_pred) == cls
matrix = confusion_matrix(y_true_cls, y_pred_cls, labels=[0, 1])
result.append(matrix[1, 1] / (matrix[1, 1] + matrix[0, 1]) if (matrix[1, 1] + matrix[0, 1]) > 0 else 0)
return np.asarray(result)
def _micro_true_negative_rate(y_true, y_pred, classes):
tn, fp = 0, 0
for cls in classes:
y_true_cls, y_pred_cls = np.asarray(y_true) == cls, np.asarray(y_pred) == cls
matrix = confusion_matrix(y_true_cls, y_pred_cls, labels=[0, 1])
tn += matrix[1, 1]
fp += matrix[0, 1]
return tn / (tn + fp) if (tn + fp) > 0 else 0
def averaging_mechanism(averaging_method: str, scores_per_class, weights=None) -> Union[np.ndarray, float]:
"""Receive scores per class and averaging method and returns result based on averaging_method.
Parameters
----------
averaging_method : str, default: 'per_class'
Determines which averaging method to apply, possible values are:
'per_class': Return a np array with the scores for each class (sorted by class name).
'binary': Returns the score for the positive class. Should be used only in binary classification cases.
'macro': Returns the mean of scores per class.
'weighted': Returns a weighted mean of scores based provided weights.
scores_per_class : array-like of shape (n_samples, n_classes)
The score of the metric per class when considering said class as the positive class and the remaining
classes as the negative.
weights : array-like of shape (n_samples,), default: None
True labels. Only required for 'weighted' averaging method.
Returns
-------
score : Union[np.ndarray, float]
The score for the given metric.
"""
if averaging_method == 'binary':
if len(scores_per_class) != 2:
raise DeepchecksValueError('Averaging method "binary" can only be used in binary classification.')
return scores_per_class[1]
elif averaging_method == 'per_class':
return np.asarray(scores_per_class)
elif averaging_method == 'macro':
# Classes that did not appear in the data are not considered as part of macro averaging.
return np.mean(scores_per_class) if weights is None else np.mean(scores_per_class[weights != 0])
elif averaging_method == 'weighted':
if weights is None:
raise DeepchecksValueError('Weights are required in order to apply weighted averaging method.')
return np.multiply(scores_per_class, weights).sum() / sum(weights)
else:
raise DeepchecksValueError(f'Unknown averaging {averaging_method}')
The provided code snippet includes necessary dependencies for implementing the `true_negative_rate_metric` function. Write a Python function `def true_negative_rate_metric(y_true, y_pred, averaging_method: str = 'per_class') -> Union[np.ndarray, float]` to solve the following problem:
Receive a metric which calculates true negative rate. Alternative name to the same metric is specificity. The rate is calculated as: True Negatives / (True Negatives + False Positives) Parameters ---------- y_true : array-like of shape (n_samples, n_classes) or (n_samples) for binary The labels should be passed in a sequence of sequences, with the sequence for each sample being a binary vector, representing the presence of the i-th label in that sample (multi-label). y_pred : array-like of shape (n_samples, n_classes) or (n_samples) for binary The predictions should be passed in a sequence of sequences, with the sequence for each sample being a binary vector, representing the presence of the i-th label in that sample (multi-label). averaging_method : str, default: 'per_class' Determines which averaging method to apply, possible values are: 'per_class': Return a np array with the scores for each class (sorted by class name). 'binary': Returns the score for the positive class. Should be used only in binary classification cases. 'micro': Returns the micro-averaged score. 'macro': Returns the mean of scores per class. 'weighted': Returns a weighted mean of scores based of the class size in y_true. Returns ------- score : Union[np.ndarray, float] The score for the given metric.
Here is the function:
def true_negative_rate_metric(y_true, y_pred, averaging_method: str = 'per_class') -> Union[np.ndarray, float]:
"""Receive a metric which calculates true negative rate. Alternative name to the same metric is specificity.
The rate is calculated as: True Negatives / (True Negatives + False Positives)
Parameters
----------
y_true : array-like of shape (n_samples, n_classes) or (n_samples) for binary
The labels should be passed in a sequence of sequences, with the sequence for each sample being a binary vector,
representing the presence of the i-th label in that sample (multi-label).
y_pred : array-like of shape (n_samples, n_classes) or (n_samples) for binary
The predictions should be passed in a sequence of sequences, with the sequence for each sample being a binary
vector, representing the presence of the i-th label in that sample (multi-label).
averaging_method : str, default: 'per_class'
Determines which averaging method to apply, possible values are:
'per_class': Return a np array with the scores for each class (sorted by class name).
'binary': Returns the score for the positive class. Should be used only in binary classification cases.
'micro': Returns the micro-averaged score.
'macro': Returns the mean of scores per class.
'weighted': Returns a weighted mean of scores based of the class size in y_true.
Returns
-------
score : Union[np.ndarray, float]
The score for the given metric.
"""
# Convert multi label into single label
if averaging_method != 'binary':
assert_multi_label_shape(y_true)
assert_multi_label_shape(y_pred)
classes = range(y_true.shape[1])
y_true = np.argmax(y_true, axis=1)
y_pred = np.argmax(y_pred, axis=1)
else:
assert_single_label_shape(y_true)
assert_single_label_shape(y_pred)
classes = [0, 1]
if averaging_method == 'micro':
return _micro_true_negative_rate(y_true, y_pred, classes)
scores_per_class = _true_negative_rate_per_class(y_true, y_pred, classes)
weights = [sum(y_true == cls) for cls in classes]
return averaging_mechanism(averaging_method, scores_per_class, weights) | Receive a metric which calculates true negative rate. Alternative name to the same metric is specificity. The rate is calculated as: True Negatives / (True Negatives + False Positives) Parameters ---------- y_true : array-like of shape (n_samples, n_classes) or (n_samples) for binary The labels should be passed in a sequence of sequences, with the sequence for each sample being a binary vector, representing the presence of the i-th label in that sample (multi-label). y_pred : array-like of shape (n_samples, n_classes) or (n_samples) for binary The predictions should be passed in a sequence of sequences, with the sequence for each sample being a binary vector, representing the presence of the i-th label in that sample (multi-label). averaging_method : str, default: 'per_class' Determines which averaging method to apply, possible values are: 'per_class': Return a np array with the scores for each class (sorted by class name). 'binary': Returns the score for the positive class. Should be used only in binary classification cases. 'micro': Returns the micro-averaged score. 'macro': Returns the mean of scores per class. 'weighted': Returns a weighted mean of scores based of the class size in y_true. Returns ------- score : Union[np.ndarray, float] The score for the given metric. |
550 | from typing import Union
import numpy as np
from sklearn.metrics import confusion_matrix, roc_auc_score
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.utils.metrics import averaging_mechanism
def assert_multi_label_shape(y):
if not isinstance(y, np.ndarray):
raise DeepchecksValueError(f'Expected y to be numpy array instead got: {type(y)}')
if y.ndim != 2:
raise DeepchecksValueError(f'Expected y to be numpy array with 2 dimensions instead got {y.ndim} dimensions.')
assert_binary_values(y)
# Since the metrics are not yet supporting real multi-label, make sure there isn't any row with sum larger than 1
if y.sum(axis=1).max() > 1:
raise DeepchecksValueError('Multi label scorers are not supported yet, the sum of a row in multi-label format '
'must not be larger than 1')
The provided code snippet includes necessary dependencies for implementing the `roc_auc_per_class` function. Write a Python function `def roc_auc_per_class(y_true, y_pred) -> np.ndarray` to solve the following problem:
Receives predictions and true labels and returns the ROC AUC score for each class. Parameters ---------- y_true : array-like of shape (n_samples, n_classes) The labels should be passed in a sequence of sequences, with the sequence for each sample being a binary vector, representing the presence of the i-th label in that sample (multi-label). y_pred : array-like of shape (n_samples, n_classes) Predicted label probabilities. Returns ------- roc_auc : np.ndarray The ROC AUC score for each class.
Here is the function:
def roc_auc_per_class(y_true, y_pred) -> np.ndarray:
"""Receives predictions and true labels and returns the ROC AUC score for each class.
Parameters
----------
y_true : array-like of shape (n_samples, n_classes)
The labels should be passed in a sequence of sequences, with the sequence for each sample being a binary vector,
representing the presence of the i-th label in that sample (multi-label).
y_pred : array-like of shape (n_samples, n_classes)
Predicted label probabilities.
Returns
-------
roc_auc : np.ndarray
The ROC AUC score for each class.
"""
# Convert multi label into single label
assert_multi_label_shape(y_true)
classes = range(y_true.shape[1])
y_true = np.argmax(y_true, axis=1)
return np.array([roc_auc_score(y_true == class_name, y_pred[:, i]) for i, class_name in enumerate(classes)]) | Receives predictions and true labels and returns the ROC AUC score for each class. Parameters ---------- y_true : array-like of shape (n_samples, n_classes) The labels should be passed in a sequence of sequences, with the sequence for each sample being a binary vector, representing the presence of the i-th label in that sample (multi-label). y_pred : array-like of shape (n_samples, n_classes) Predicted label probabilities. Returns ------- roc_auc : np.ndarray The ROC AUC score for each class. |
551 | import logging
import typing as t
import warnings
from numbers import Number
from typing import TYPE_CHECKING
import numpy as np
import pandas as pd
from packaging import version
from sklearn import __version__ as scikit_version
from sklearn.base import ClassifierMixin
from sklearn.metrics import get_scorer, log_loss, make_scorer, mean_absolute_error, mean_squared_error
from sklearn.metrics._scorer import _BaseScorer, _ProbaScorer
from sklearn.preprocessing import OneHotEncoder
from deepchecks.core import errors
from deepchecks.tabular.metric_utils.additional_classification_metrics import (false_negative_rate_metric,
false_positive_rate_metric,
roc_auc_per_class,
true_negative_rate_metric)
from deepchecks.tabular.utils.task_type import TaskType
from deepchecks.utils.docref import doclink
from deepchecks.utils.logger import get_logger
from deepchecks.utils.metrics import get_scorer_name
from deepchecks.utils.simple_models import PerfectModel
from deepchecks.utils.typing import BasicModel
from deepchecks.utils.validation import is_sequence_not_str
The provided code snippet includes necessary dependencies for implementing the `validate_proba` function. Write a Python function `def validate_proba(probabilities: np.array, model_classes: t.List)` to solve the following problem:
Validate that the number of classes (columns) in probabilities matches the model_classes.
Here is the function:
def validate_proba(probabilities: np.array, model_classes: t.List):
"""Validate that the number of classes (columns) in probabilities matches the model_classes."""
if probabilities.shape[1] != len(model_classes):
raise errors.ModelValidationError(
f'Model probabilities per class has {probabilities.shape[1]} '
f'classes while known model classes has {len(model_classes)}. You can set the model\'s'
f'classes manually using the model_classes argument in the run function.') | Validate that the number of classes (columns) in probabilities matches the model_classes. |
552 | import typing as t
from urllib.request import urlopen
import joblib
import pandas as pd
import sklearn
from sklearn.ensemble import AdaBoostClassifier
from deepchecks.tabular.dataset import Dataset
_MODEL_URL = 'https://figshare.com/ndownloader/files/35122759'
_MODEL_VERSION = '1.0.2'
def load_data(data_format: str = 'Dataset', as_train_test: bool = True) -> \
t.Union[t.Tuple, t.Union[Dataset, pd.DataFrame]]:
"""Load and returns the Breast Cancer dataset (classification).
Parameters
----------
data_format : str, default: 'Dataset'
Represent the format of the returned value. Can be 'Dataset'|'Dataframe'
'Dataset' will return the data as a Dataset object
'Dataframe' will return the data as a pandas Dataframe object
as_train_test : bool, default: True
If True, the returned data is splitted into train and test exactly like the toy model
was trained. The first return value is the train data and the second is the test data.
In order to get this model, call the load_fitted_model() function.
Otherwise, returns a single object.
Returns
-------
dataset : Union[deepchecks.Dataset, pd.DataFrame]
the data object, corresponding to the data_format attribute.
train, test : Tuple[Union[deepchecks.Dataset, pd.DataFrame],Union[deepchecks.Dataset, pd.DataFrame]
tuple if as_train_test = True. Tuple of two objects represents the dataset splitted to train and test sets.
"""
if not as_train_test:
dataset = pd.read_csv(_FULL_DATA_URL)
if data_format == 'Dataset':
dataset = Dataset(dataset, label=_target, cat_features=_CAT_FEATURES)
return dataset
elif data_format == 'Dataframe':
return dataset
else:
raise ValueError('data_format must be either "Dataset" or "Dataframe"')
else:
train = pd.read_csv(_TRAIN_DATA_URL)
test = pd.read_csv(_TEST_DATA_URL)
if data_format == 'Dataset':
train = Dataset(train, label=_target, cat_features=_CAT_FEATURES)
test = Dataset(test, label=_target, cat_features=_CAT_FEATURES)
return train, test
elif data_format == 'Dataframe':
return train, test
else:
raise ValueError('data_format must be either "Dataset" or "Dataframe"')
def _build_model():
"""Build the model to fit."""
return AdaBoostClassifier(random_state=0)
The provided code snippet includes necessary dependencies for implementing the `load_fitted_model` function. Write a Python function `def load_fitted_model(pretrained=True)` to solve the following problem:
Load and return a fitted classification model to predict the flower type in the iris dataset. Returns ------- model : Joblib The model/pipeline that was trained on the iris dataset.
Here is the function:
def load_fitted_model(pretrained=True):
"""Load and return a fitted classification model to predict the flower type in the iris dataset.
Returns
-------
model : Joblib
The model/pipeline that was trained on the iris dataset.
"""
if sklearn.__version__ == _MODEL_VERSION and pretrained:
with urlopen(_MODEL_URL) as f:
model = joblib.load(f)
else:
model = _build_model()
train, _ = load_data()
model.fit(train.data[train.features], train.data[train.label_name])
return model | Load and return a fitted classification model to predict the flower type in the iris dataset. Returns ------- model : Joblib The model/pipeline that was trained on the iris dataset. |
553 | import typing as t
from urllib.request import urlopen
import joblib
import pandas as pd
import sklearn
from category_encoders import OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from deepchecks.tabular.dataset import Dataset
_MODEL_URL = 'https://figshare.com/ndownloader/files/35122765'
_MODEL_VERSION = '1.0.2'
def load_data(data_format: str = 'Dataset', as_train_test: bool = True) -> \
t.Union[t.Tuple, t.Union[Dataset, pd.DataFrame]]:
"""Load and returns the phishing url dataset (classification).
Parameters
----------
data_format : str , default: Dataset
Represent the format of the returned value. Can be 'Dataset'|'Dataframe'
'Dataset' will return the data as a Dataset object
'Dataframe' will return the data as a pandas Dataframe object
as_train_test : bool , default: True
If True, the returned data is splitted into train and test exactly like the toy model
was trained. The first return value is the train data and the second is the test data.
In order to get this model, call the load_fitted_model() function.
Otherwise, returns a single object.
Returns
-------
dataset : Union[deepchecks.Dataset, pd.DataFrame]
the data object, corresponding to the data_format attribute.
train, test : Tuple[Union[deepchecks.Dataset, pd.DataFrame],Union[deepchecks.Dataset, pd.DataFrame]
tuple if as_train_test = True. Tuple of two objects represents the dataset splitted to train and test sets.
"""
if not as_train_test:
dataset = pd.read_csv(_FULL_DATA_URL, index_col=0)
if data_format == 'Dataset':
dataset = Dataset(dataset, label=_target, cat_features=_CAT_FEATURES, datetime_name=_DATE_COL)
return dataset
else:
train = pd.read_csv(_TRAIN_DATA_URL, index_col=0)
test = pd.read_csv(_TEST_DATA_URL, index_col=0)
if data_format == 'Dataset':
train = Dataset(train, label=_target, cat_features=_CAT_FEATURES, datetime_name=_DATE_COL)
test = Dataset(test, label=_target, cat_features=_CAT_FEATURES, datetime_name=_DATE_COL)
return train, test
def _build_model():
"""Build the model to fit."""
return Pipeline(steps=[
('preprocessing',
ColumnTransformer(transformers=[('num', SimpleImputer(),
_NUM_FEATURES),
('cat',
Pipeline(steps=[('imputer',
SimpleImputer(strategy='most_frequent')),
('encoder',
OneHotEncoder())]),
_CAT_FEATURES)])),
('model',
RandomForestClassifier(criterion='entropy', n_estimators=40, random_state=0))])
The provided code snippet includes necessary dependencies for implementing the `load_fitted_model` function. Write a Python function `def load_fitted_model(pretrained=True)` to solve the following problem:
Load and return a fitted regression model to predict the target in the phishing dataset. Returns ------- model : Joblib the model/pipeline that was trained on the phishing dataset.
Here is the function:
def load_fitted_model(pretrained=True):
"""Load and return a fitted regression model to predict the target in the phishing dataset.
Returns
-------
model : Joblib
the model/pipeline that was trained on the phishing dataset.
"""
if sklearn.__version__ == _MODEL_VERSION and pretrained:
with urlopen(_MODEL_URL) as f:
model = joblib.load(f)
else:
model = _build_model()
train, _ = load_data()
model.fit(train.data[train.features], train.data[train.label_name])
return model | Load and return a fitted regression model to predict the target in the phishing dataset. Returns ------- model : Joblib the model/pipeline that was trained on the phishing dataset. |
554 | import typing as t
from urllib.request import urlopen
import joblib
import pandas as pd
import sklearn
from category_encoders import OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from deepchecks.tabular.dataset import Dataset
class UrlDatasetProcessor:
"""A custom processing pipeline for the phishing URLs dataset."""
def _cols_to_scale(self, df: pd.DataFrame) -> t.List[object]:
return [
i
for i, x in df.dtypes.items()
if pd.api.types.is_numeric_dtype(x) and i != _target
]
def _shared_preprocess(self, df: pd.DataFrame) -> pd.DataFrame:
df = df.copy()
df[_DATE_COL] = pd.to_datetime(
df[_DATE_COL], format='%Y-%m-%d')
df = df.set_index(keys=_DATE_COL, drop=True)
df = df.drop(_NON_FEATURES, axis=1)
df = pd.get_dummies(df, columns=['ext'])
return df
def fit_transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""Fit this preprossor on the input dataframe and transform it."""
df = self._shared_preprocess(df)
self.scaler = sklearn.preprocessing.StandardScaler()
self.scale_cols = self._cols_to_scale(df)
df[self.scale_cols] = self.scaler.fit_transform(df[self.scale_cols])
return df
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""Transform the input dataframe using this fitted preprossor."""
df = self._shared_preprocess(df)
try:
df[self.scale_cols] = self.scaler.transform(df[self.scale_cols])
return df
except AttributeError as e:
raise Exception(
'UrlDatasetProcessor is unfitted! Call fit_transform() first!'
) from e
The provided code snippet includes necessary dependencies for implementing the `get_url_preprocessor` function. Write a Python function `def get_url_preprocessor()` to solve the following problem:
Return a data processor object for the phishing URL dataset.
Here is the function:
def get_url_preprocessor():
"""Return a data processor object for the phishing URL dataset."""
return UrlDatasetProcessor() | Return a data processor object for the phishing URL dataset. |
555 | import typing as t
from urllib.request import urlopen
import joblib
import pandas as pd
import sklearn
from sklearn.ensemble import RandomForestClassifier
from deepchecks.tabular.dataset import Dataset
_MODEL_URL = 'https://figshare.com/ndownloader/files/35122762'
_MODEL_VERSION = '1.0.2'
def load_data(data_format: str = 'Dataset', as_train_test: bool = True) -> \
t.Union[t.Tuple, t.Union[Dataset, pd.DataFrame]]:
"""Load and returns the Iris dataset (classification).
Parameters
----------
data_format : str , default: Dataset
Represent the format of the returned value. Can be 'Dataset'|'Dataframe'
'Dataset' will return the data as a Dataset object
'Dataframe' will return the data as a pandas Dataframe object
as_train_test : bool , default: True
If True, the returned data is splitted into train and test exactly like the toy model
was trained. The first return value is the train data and the second is the test data.
In order to get this model, call the load_fitted_model() function.
Otherwise, returns a single object.
Returns
-------
dataset : Union[deepchecks.Dataset, pd.DataFrame]
the data object, corresponding to the data_format attribute.
train, test : Tuple[Union[deepchecks.Dataset, pd.DataFrame],Union[deepchecks.Dataset, pd.DataFrame]
tuple if as_train_test = True. Tuple of two objects represents the dataset splitted to train and test sets.
"""
if not as_train_test:
dataset = pd.read_csv(_FULL_DATA_URL)
if data_format == 'Dataset':
dataset = Dataset(dataset, label=_target, cat_features=_CAT_FEATURES)
return dataset
else:
train = pd.read_csv(_TRAIN_DATA_URL)
test = pd.read_csv(_TEST_DATA_URL)
if data_format == 'Dataset':
train = Dataset(train, label=_target, cat_features=_CAT_FEATURES, label_type='multiclass')
test = Dataset(test, label=_target, cat_features=_CAT_FEATURES, label_type='multiclass')
return train, test
def _build_model():
"""Build the model to fit."""
return RandomForestClassifier(random_state=0)
The provided code snippet includes necessary dependencies for implementing the `load_fitted_model` function. Write a Python function `def load_fitted_model(pretrained=True)` to solve the following problem:
Load and return a fitted classification model to predict the flower type in the iris dataset. Returns ------- model : Joblib the model/pipeline that was trained on the iris dataset.
Here is the function:
def load_fitted_model(pretrained=True):
"""Load and return a fitted classification model to predict the flower type in the iris dataset.
Returns
-------
model : Joblib
the model/pipeline that was trained on the iris dataset.
"""
if sklearn.__version__ == _MODEL_VERSION and pretrained:
with urlopen(_MODEL_URL) as f:
model = joblib.load(f)
else:
model = _build_model()
train, _ = load_data()
model.fit(train.data[train.features], train.data[train.label_name])
return model | Load and return a fitted classification model to predict the flower type in the iris dataset. Returns ------- model : Joblib the model/pipeline that was trained on the iris dataset. |
556 | import typing as t
from urllib.request import urlopen
import joblib
import pandas as pd
import sklearn
from category_encoders import OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from deepchecks.tabular.dataset import Dataset
_MODEL_URL = 'https://ndownloader.figshare.com/files/36146916'
_MODEL_VERSION = '1.0.2'
def load_data(data_format: str = 'Dataset', as_train_test: bool = True) -> \
t.Union[t.Tuple, t.Union[Dataset, pd.DataFrame]]:
"""Load and returns the Wine Quality dataset (regression).
Parameters
----------
data_format : str , default: Dataset
Represent the format of the returned value. Can be 'Dataset'|'Dataframe'
'Dataset' will return the data as a Dataset object
'Dataframe' will return the data as a pandas Dataframe object
as_train_test : bool , default: True
If True, the returned data is splitted into train and test exactly like the toy model
was trained. The first return value is the train data and the second is the test data.
In order to get this model, call the load_fitted_model() function.
Otherwise, returns a single object.
Returns
-------
dataset : Union[deepchecks.Dataset, pd.DataFrame]
the data object, corresponding to the data_format attribute.
train_data, test_data : Tuple[Union[deepchecks.Dataset, pd.DataFrame],Union[deepchecks.Dataset, pd.DataFrame]
tuple if as_train_test = True. Tuple of two objects represents the dataset splitted to train and test sets.
"""
if not as_train_test:
dataset = pd.read_csv(_FULL_DATA_URL)
if data_format == 'Dataset':
dataset = Dataset(dataset, label=_target, cat_features=_CAT_FEATURES)
return dataset
else:
train = pd.read_csv(_TRAIN_DATA_URL)
test = pd.read_csv(_TEST_DATA_URL)
if data_format == 'Dataset':
train = Dataset(train, label=_target, cat_features=_CAT_FEATURES)
test = Dataset(test, label=_target, cat_features=_CAT_FEATURES)
return train, test
def _build_model():
"""Build the model to fit."""
return Pipeline(steps=[
('preprocessor',
ColumnTransformer(transformers=[('num',
Pipeline(steps=[('imputer',
SimpleImputer(strategy='median')),
('scaler',
StandardScaler())]),
_NUM_FEATURES),
('cat', OneHotEncoder(),
_CAT_FEATURES)])),
('classifier', RandomForestRegressor(random_state=0, max_depth=7, n_estimators=30))
])
The provided code snippet includes necessary dependencies for implementing the `load_fitted_model` function. Write a Python function `def load_fitted_model(pretrained=True)` to solve the following problem:
Load and return a fitted regression model to predict the quality in the Wine Quality dataset. Returns ------- model : Joblib the model/pipeline that was trained on the Wine Quality dataset.
Here is the function:
def load_fitted_model(pretrained=True):
"""Load and return a fitted regression model to predict the quality in the Wine Quality dataset.
Returns
-------
model : Joblib
the model/pipeline that was trained on the Wine Quality dataset.
"""
if sklearn.__version__ == _MODEL_VERSION and pretrained:
with urlopen(_MODEL_URL) as f:
model = joblib.load(f)
else:
model = _build_model()
train, _ = load_data()
model.fit(train.data[train.features], train.data[train.label_name])
return model | Load and return a fitted regression model to predict the quality in the Wine Quality dataset. Returns ------- model : Joblib the model/pipeline that was trained on the Wine Quality dataset. |
557 | import math
import time
import typing as t
import numpy as np
import pandas as pd
from deepchecks.tabular.dataset import Dataset
_TRAIN_DATA_URL = ('https://raw.githubusercontent.com/deepchecks/deepchecks-datasets/'
'8dd24134239b9df5d2a3a13cdce38cc22caaaaf4/airbnb_ref_data.csv')
_TEST_DATA_URL = ('https://raw.githubusercontent.com/deepchecks/deepchecks-datasets/'
'8dd24134239b9df5d2a3a13cdce38cc22caaaaf4/airbnb_prod_data.csv')
_target = 'price'
_predictions = 'predictions'
_datetime = 'timestamp'
_CAT_FEATURES = ['room_type', 'neighbourhood', 'neighbourhood_group', 'has_availability']
_FEATURES = _NUM_FEATURES + _CAT_FEATURES
class Dataset:
"""
Dataset wraps pandas DataFrame together with ML related metadata.
The Dataset class is containing additional data and methods intended for easily accessing
metadata relevant for the training or validating of an ML models.
Parameters
----------
df : Any
An object that can be casted to a pandas DataFrame
- containing data relevant for the training or validating of a ML models.
label : t.Union[Hashable, pd.Series, pd.DataFrame, np.ndarray] , default: None
label column provided either as a string with the name of an existing column in the DataFrame or a label
object including the label data (pandas Series/DataFrame or a numpy array) that will be concatenated to the
data in the DataFrame. in case of label data the following logic is applied to set the label name:
- Series: takes the series name or 'target' if name is empty
- DataFrame: expect single column in the dataframe and use its name
- numpy: use 'target'
features : t.Optional[t.Sequence[Hashable]] , default: None
List of names for the feature columns in the DataFrame.
cat_features : t.Optional[t.Sequence[Hashable]] , default: None
List of names for the categorical features in the DataFrame. In order to disable categorical.
features inference, pass cat_features=[]
index_name : t.Optional[Hashable] , default: None
Name of the index column in the dataframe. If set_index_from_dataframe_index is True and index_name
is not None, index will be created from the dataframe index level with the given name. If index levels
have no names, an int must be used to select the appropriate level by order.
set_index_from_dataframe_index : bool , default: False
If set to true, index will be created from the dataframe index instead of dataframe columns (default).
If index_name is None, first level of the index will be used in case of a multilevel index.
datetime_name : t.Optional[Hashable] , default: None
Name of the datetime column in the dataframe. If set_datetime_from_dataframe_index is True and datetime_name
is not None, date will be created from the dataframe index level with the given name. If index levels
have no names, an int must be used to select the appropriate level by order.
set_datetime_from_dataframe_index : bool , default: False
If set to true, date will be created from the dataframe index instead of dataframe columns (default).
If datetime_name is None, first level of the index will be used in case of a multilevel index.
convert_datetime : bool , default: True
If set to true, date will be converted to datetime using pandas.to_datetime.
datetime_args : t.Optional[t.Dict] , default: None
pandas.to_datetime args used for conversion of the datetime column.
(look at https://pandas.pydata.org/docs/reference/api/pandas.to_datetime.html for more documentation)
max_categorical_ratio : float , default: 0.01
The max ratio of unique values in a column in order for it to be inferred as a
categorical feature.
max_categories : int , default: None
The maximum number of categories in a column in order for it to be inferred as a categorical
feature. if None, uses is_categorical default inference mechanism.
label_type : str , default: None
Used to determine the task type. If None, inferred when running a check based on label column and model.
Possible values are: 'multiclass', 'binary' and 'regression'.
"""
_features: t.List[Hashable]
_label_name: t.Optional[Hashable]
_index_name: t.Optional[Hashable]
_set_index_from_dataframe_index: t.Optional[bool]
_datetime_name: t.Optional[Hashable]
_set_datetime_from_dataframe_index: t.Optional[bool]
_convert_datetime: t.Optional[bool]
_datetime_column: t.Optional[pd.Series]
_cat_features: t.List[Hashable]
_data: pd.DataFrame
_max_categorical_ratio: float
_max_categories: int
_label_type: t.Optional[TaskType]
def __init__(
self,
df: t.Any,
label: t.Union[Hashable, pd.Series, pd.DataFrame, np.ndarray] = None,
features: t.Optional[t.Sequence[Hashable]] = None,
cat_features: t.Optional[t.Sequence[Hashable]] = None,
index_name: t.Optional[Hashable] = None,
set_index_from_dataframe_index: bool = False,
datetime_name: t.Optional[Hashable] = None,
set_datetime_from_dataframe_index: bool = False,
convert_datetime: bool = True,
datetime_args: t.Optional[t.Dict] = None,
max_categorical_ratio: float = 0.01,
max_categories: int = None,
label_type: str = None,
dataset_name: t.Optional[str] = None,
label_classes=None
):
if len(df) == 0:
raise DeepchecksValueError('Can\'t create a Dataset object with an empty dataframe')
self._data = pd.DataFrame(df).copy()
# Checking for duplicate columns
duplicated_columns = [key for key, value in Counter(self._data.columns).items() if value > 1]
if len(duplicated_columns) >= 1:
raise DeepchecksValueError(
f"Data has {len(duplicated_columns)} duplicate columns. "
"Change the duplicate column names or remove them from the data. "
f"Duplicate column names: {duplicated_columns}")
# Validations
if label is None:
self._label_name = None
elif isinstance(label, (pd.Series, pd.DataFrame, np.ndarray)):
if isinstance(label, pd.DataFrame):
if label.shape[1] != 1:
raise DeepchecksValueError('Provide label as a Series or a DataFrame with a single column.')
label = label.iloc[:, 0]
elif isinstance(label, np.ndarray):
if len(label.shape) > 2:
raise DeepchecksValueError('Label must be either column vector or row vector')
elif len(label.shape) == 2:
if all(x != 1 for x in label.shape):
raise DeepchecksValueError('Label must be either column vector or row vector')
label = np.squeeze(label)
label = pd.Series(label)
if label.shape[0] != self._data.shape[0]:
raise DeepchecksValueError('Number of samples of label and data must be equal')
pd.testing.assert_index_equal(self._data.index, label.index)
self._label_name = DEFAULT_LABEL_NAME if label.name is None or label.name == 0 else label.name
if self._label_name in self._data.columns:
raise DeepchecksValueError(f'Data has column with name "{self._label_name}", change label column name '
f'or provide the column label name as str')
self._data[self._label_name] = label
elif isinstance(label, Hashable):
if label not in self._data.columns:
raise DeepchecksValueError(f'label column {label} not found in dataset columns')
self._label_name = label
else:
raise DeepchecksValueError(f'Unsupported type for label: {type(label).__name__}')
# Assert that the requested index can be found
if not set_index_from_dataframe_index:
if index_name is not None and index_name not in self._data.columns:
error_message = f'Index column {index_name} not found in dataset columns.'
if index_name == 'index':
error_message += ' If you attempted to use the dataframe index, set ' \
'set_index_from_dataframe_index to True instead.'
raise DeepchecksValueError(error_message)
else:
if index_name is not None:
if isinstance(index_name, str):
if index_name not in self._data.index.names:
raise DeepchecksValueError(f'Index {index_name} not found in dataframe index level names.')
elif isinstance(index_name, int):
if index_name > (len(self._data.index.names) - 1):
raise DeepchecksValueError(f'Dataframe index has less levels than {index_name + 1}.')
else:
raise DeepchecksValueError(f'When set_index_from_dataframe_index is True index_name can be None,'
f' int or str, but found {type(index_name)}')
# Assert that the requested datetime can be found
if not set_datetime_from_dataframe_index:
if datetime_name is not None and datetime_name not in self._data.columns:
error_message = f'Datetime column {datetime_name} not found in dataset columns.'
if datetime_name == 'date':
error_message += ' If you attempted to use the dataframe index, ' \
'set set_datetime_from_dataframe_index to True instead.'
raise DeepchecksValueError(error_message)
else:
if datetime_name is not None:
if isinstance(datetime_name, str):
if datetime_name not in self._data.index.names:
raise DeepchecksValueError(
f'Datetime {datetime_name} not found in dataframe index level names.'
)
elif isinstance(datetime_name, int):
if datetime_name > (len(self._data.index.names) - 1):
raise DeepchecksValueError(f'Dataframe index has less levels than {datetime_name + 1}.')
else:
raise DeepchecksValueError(f'When set_index_from_dataframe_index is True index_name can be None,'
f' int or str, but found {type(index_name)}')
self._datetime_column = self.get_datetime_column_from_index(datetime_name)
if features is not None:
difference = set(features) - set(self._data.columns)
if len(difference) > 0:
raise DeepchecksValueError('Features must be names of columns in dataframe. '
f'Features {difference} have not been '
'found in input dataframe.')
self._features = list(features)
else:
self._features = [x for x in self._data.columns if x not in
{self._label_name,
index_name if not set_index_from_dataframe_index else None,
datetime_name if not set_datetime_from_dataframe_index else None}]
if len(set(self._data.index)) != len(self._data.index):
if set_index_from_dataframe_index:
raise DeepchecksValueError('Selected index column has duplicate values.')
else:
self._data['original_df_index'] = self._data.index
self._data.index = range(len(self._data.index))
warnings.warn('Dataframe index has duplicate indexes, setting index to [0,1..,n-1].')
self._index_name = index_name
self._set_index_from_dataframe_index = set_index_from_dataframe_index
self._convert_datetime = convert_datetime
self._datetime_name = datetime_name
self._set_datetime_from_dataframe_index = set_datetime_from_dataframe_index
self._datetime_args = datetime_args or {}
self._max_categorical_ratio = max_categorical_ratio
self._max_categories = max_categories
if isinstance(dataset_name, str) or (dataset_name is None):
self.name = dataset_name
else:
raise DeepchecksValueError('The dataset_name parameter accepts a string or None.')
if self._label_name in self.features:
raise DeepchecksValueError(f'label column {self._label_name} can not be a feature column')
if self._datetime_name in self.features:
raise DeepchecksValueError(f'datetime column {self._datetime_name} can not be a feature column')
if self._index_name in self.features:
raise DeepchecksValueError(f'index column {self._index_name} can not be a feature column')
if cat_features is not None:
if set(cat_features).intersection(set(self._features)) != set(cat_features):
raise DeepchecksValueError('Categorical features must be a subset of features. '
f'Categorical features {set(cat_features) - set(self._features)} '
'have not been found in feature list.')
self._cat_features = list(cat_features)
else:
self._cat_features = self._infer_categorical_features(
self._data,
max_categorical_ratio=max_categorical_ratio,
max_categories=max_categories,
columns=self._features
)
if ((self._datetime_name is not None) or self._set_datetime_from_dataframe_index) and convert_datetime:
if self._set_datetime_from_dataframe_index:
self._datetime_column = pd.to_datetime(self._datetime_column, **self._datetime_args)
else:
self._data[self._datetime_name] = pd.to_datetime(self._data[self._datetime_name], **self._datetime_args)
if label_type in ['classification_label', 'regression_label']:
warnings.warn(f'{label_type} value for label type is deprecated, allowed task types are multiclass,'
f' binary and regression.', DeprecationWarning, stacklevel=2)
self._label_type = TaskType.REGRESSION if label_type == 'regression_label' else TaskType.MULTICLASS
elif label_type in [task.value for task in TaskType]:
self._label_type = TaskType(label_type)
elif label_type is not None:
raise DeepchecksValueError(f'allowed value for label type are {[task.value for task in TaskType]},'
f' received {label_type}.')
else:
self._label_type = None
if label_classes is not None:
warnings.warn('label_classes parameter is deprecated, use model_classes parameter on a check run function '
'instead.', DeprecationWarning, stacklevel=2)
unassigned_cols = [col for col in self._features if col not in self._cat_features]
self._numerical_features = infer_numerical_features(self._data[unassigned_cols])
def from_numpy(
cls: t.Type[TDataset],
*args: np.ndarray,
columns: t.Sequence[Hashable] = None,
label_name: t.Hashable = None,
**kwargs
) -> TDataset:
"""Create Dataset instance from numpy arrays.
Parameters
----------
*args: np.ndarray
Numpy array of data columns, and second optional numpy array of labels.
columns : t.Sequence[Hashable] , default: None
names for the columns. If none provided, the names that will be automatically
assigned to the columns will be: 1 - n (where n - number of columns)
label_name : t.Hashable , default: None
labels column name. If none is provided, the name 'target' will be used.
**kwargs : Dict
additional arguments that will be passed to the main Dataset constructor.
Returns
-------
Dataset
instance of the Dataset
Raises
------
DeepchecksValueError
if receives zero or more than two numpy arrays.
if columns (args[0]) is not two dimensional numpy array.
if labels (args[1]) is not one dimensional numpy array.
if features array or labels array is empty.
Examples
--------
>>> import numpy
>>> from deepchecks.tabular import Dataset
>>> features = numpy.array([[0.25, 0.3, 0.3],
... [0.14, 0.75, 0.3],
... [0.23, 0.39, 0.1]])
>>> labels = numpy.array([0.1, 0.1, 0.7])
>>> dataset = Dataset.from_numpy(features, labels)
Creating dataset only from features array.
>>> dataset = Dataset.from_numpy(features)
Passing additional arguments to the main Dataset constructor
>>> dataset = Dataset.from_numpy(features, labels, max_categorical_ratio=0.5)
Specifying features and label columns names.
>>> dataset = Dataset.from_numpy(
... features, labels,
... columns=['sensor-1', 'sensor-2', 'sensor-3'],
... label_name='labels'
... )
"""
if len(args) == 0 or len(args) > 2:
raise DeepchecksValueError(
"'from_numpy' constructor expecting to receive two numpy arrays (or at least one)."
"First array must contains the columns and second the labels."
)
columns_array = args[0]
columns_error_message = (
"'from_numpy' constructor expecting columns (args[0]) "
"to be not empty two dimensional array."
)
if len(columns_array.shape) != 2:
raise DeepchecksValueError(columns_error_message)
if columns_array.shape[0] == 0 or columns_array.shape[1] == 0:
raise DeepchecksValueError(columns_error_message)
if columns is not None and len(columns) != columns_array.shape[1]:
raise DeepchecksValueError(
f'{columns_array.shape[1]} columns were provided '
f'but only {len(columns)} name(s) for them`s.'
)
elif columns is None:
columns = [str(index) for index in range(1, columns_array.shape[1] + 1)]
if len(args) == 1:
labels_array = None
else:
labels_array = args[1]
if len(labels_array.shape) != 1 or labels_array.shape[0] == 0:
raise DeepchecksValueError(
"'from_numpy' constructor expecting labels (args[1]) "
"to be not empty one dimensional array."
)
labels_array = pd.Series(labels_array)
if label_name:
labels_array = labels_array.rename(label_name)
return cls(
df=pd.DataFrame(data=columns_array, columns=columns),
label=labels_array,
**kwargs
)
def data(self) -> pd.DataFrame:
"""Return the data of dataset."""
return self._data
def copy(self: TDataset, new_data: pd.DataFrame) -> TDataset:
"""Create a copy of this Dataset with new data.
Parameters
----------
new_data (DataFrame): new data from which new dataset will be created
Returns
-------
Dataset
new dataset instance
"""
# Filter out if columns were dropped
features = [feat for feat in self._features if feat in new_data.columns]
cat_features = [feat for feat in self.cat_features if feat in new_data.columns]
label_name = self._label_name if self._label_name in new_data.columns else None
label_type = None if self._label_type is None else self._label_type.value
index = self._index_name if self._index_name in new_data.columns else None
date = self._datetime_name if self._datetime_name in new_data.columns else None
cls = type(self)
return cls(new_data, features=features, cat_features=cat_features, label=label_name,
index_name=index, set_index_from_dataframe_index=self._set_index_from_dataframe_index,
datetime_name=date, set_datetime_from_dataframe_index=self._set_datetime_from_dataframe_index,
convert_datetime=self._convert_datetime, max_categorical_ratio=self._max_categorical_ratio,
max_categories=self._max_categories, label_type=label_type,
dataset_name=self.name)
def sample(self: TDataset, n_samples: t.Optional[int] = None, replace: bool = False,
random_state: t.Optional[int] = None) -> TDataset:
"""Create a copy of the dataset object, with the internal dataframe being a sample of the original dataframe.
Parameters
----------
n_samples : t.Optional[int]
Number of samples to draw.
replace : bool, default: False
Whether to sample with replacement.
random_state : t.Optional[int] , default None
Random state.
Returns
-------
Dataset
instance of the Dataset with sampled internal dataframe.
"""
if n_samples is None:
return self
n_samples = min(n_samples, len(self.data))
return self.copy(self.data.sample(n_samples, replace=replace, random_state=random_state))
def drop_na_labels(self) -> TDataset:
"""Create a copy of the dataset object without samples with missing labels."""
if not self.has_label():
return self
return self.copy(self.data[self.label_col.notna()])
def n_samples(self) -> int:
"""Return number of samples in dataframe.
Returns
-------
int
Number of samples in dataframe
"""
return self.data.shape[0]
def label_type(self) -> t.Optional[TaskType]:
"""Return the label type.
Returns
-------
t.Optional[TaskType]
Label type
"""
return self._label_type
def train_test_split(self: TDataset,
train_size: t.Union[int, float, None] = None,
test_size: t.Union[int, float] = 0.25,
random_state: int = 42,
shuffle: bool = True,
stratify: t.Union[t.List, pd.Series, np.ndarray, bool] = False
) -> t.Tuple[TDataset, TDataset]:
"""Split dataset into random train and test datasets.
Parameters
----------
train_size : t.Union[int, float, None] , default: None
If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in
the train split. If int, represents the absolute number of train samples. If None, the value is
automatically set to the complement of the test size.
test_size : t.Union[int, float] , default: 0.25
If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the
test split. If int, represents the absolute number of test samples.
random_state : int , default: 42
The random state to use for shuffling.
shuffle : bool , default: True
Whether to shuffle the data before splitting.
stratify : t.Union[t.List, pd.Series, np.ndarray, bool] , default: False
If True, data is split in a stratified fashion, using the class labels. If array-like, data is split in
a stratified fashion, using this as class labels.
Returns
-------
Dataset
Dataset containing train split data.
Dataset
Dataset containing test split data.
"""
if isinstance(stratify, bool):
stratify = self.label_col if stratify else None
train_df, test_df = train_test_split(self._data,
test_size=test_size,
train_size=train_size,
random_state=random_state,
shuffle=shuffle,
stratify=stratify)
return self.copy(train_df), self.copy(test_df)
def _infer_categorical_features(
df: pd.DataFrame,
max_categorical_ratio: float,
max_categories: int = None,
columns: t.Optional[t.List[Hashable]] = None,
) -> t.List[Hashable]:
"""Infers which features are categorical by checking types and number of unique values.
Parameters
----------
df: pd.DataFrame
max_categorical_ratio: float
max_categories: int , default: None
columns: t.Optional[t.List[Hashable]] , default: None
Returns
-------
t.List[Hashable]
Out of the list of feature names, returns list of categorical features
"""
categorical_columns = infer_categorical_features(
df,
max_categorical_ratio=max_categorical_ratio,
max_categories=max_categories,
columns=columns
)
message = ('It is recommended to initialize Dataset with categorical features by doing '
'"Dataset(df, cat_features=categorical_list)". No categorical features were passed, therefore '
'heuristically inferring categorical features in the data. '
f'{len(categorical_columns)} categorical features were inferred.')
if len(categorical_columns) > 0:
columns_to_print = categorical_columns[:7]
message += ': ' + ', '.join(list(map(str, columns_to_print)))
if len(categorical_columns) > len(columns_to_print):
message += '... For full list use dataset.cat_features'
get_logger().warning(message)
return categorical_columns
def is_categorical(self, col_name: Hashable) -> bool:
"""Check if a column is considered a category column in the dataset object.
Parameters
----------
col_name : Hashable
The name of the column in the dataframe
Returns
-------
bool
If is categorical according to input numbers
"""
return col_name in self._cat_features
def index_name(self) -> t.Optional[Hashable]:
"""If index column exists, return its name.
Returns
-------
t.Optional[Hashable]
index name
"""
return self._index_name
def index_col(self) -> t.Optional[pd.Series]:
"""Return index column. Index can be a named column or DataFrame index.
Returns
-------
t.Optional[pd.Series]
If index column exists, returns a pandas Series of the index column.
"""
if self._set_index_from_dataframe_index is True:
index_name = self.data.index.name or 'index'
if self._index_name is None:
return pd.Series(self.data.index.get_level_values(0), name=index_name,
index=self.data.index)
elif isinstance(self._index_name, (str, int)):
return pd.Series(self.data.index.get_level_values(self._index_name), name=index_name,
index=self.data.index)
else:
raise DeepchecksValueError(f'Don\'t know to handle index_name of type {type(self._index_name)}')
elif self._index_name is not None:
return self.data[self._index_name]
else: # No meaningful index to use: Index column not configured, and _set_index_from_dataframe_index is False
return
def datetime_name(self) -> t.Optional[Hashable]:
"""If datetime column exists, return its name.
Returns
-------
t.Optional[Hashable]
datetime name
"""
return self._datetime_name
def get_datetime_column_from_index(self, datetime_name):
"""Retrieve the datetime info from the index if _set_datetime_from_dataframe_index is True."""
index_name = self.data.index.name or 'datetime'
if datetime_name is None:
return pd.Series(self.data.index.get_level_values(0), name=index_name,
index=self.data.index)
elif isinstance(datetime_name, (str, int)):
return pd.Series(self.data.index.get_level_values(datetime_name), name=index_name,
index=self.data.index)
def datetime_col(self) -> t.Optional[pd.Series]:
"""Return datetime column if exists.
Returns
-------
t.Optional[pd.Series]
Series of the datetime column
"""
if self._set_datetime_from_dataframe_index is True and self._datetime_column is not None:
return self._datetime_column
elif self._datetime_name is not None:
return self.data[self._datetime_name]
else:
# No meaningful Datetime to use:
# Datetime column not configured, and _set_datetime_from_dataframe_index is False
return
def label_name(self) -> Hashable:
"""If label column exists, return its name. Otherwise, throw an exception.
Returns
-------
Hashable: Label name
"""
if not self._label_name:
raise DeepchecksNotSupportedError(
'Dataset does not contain a label column',
html=f'Dataset does not contain a label column. see {_get_dataset_docs_tag()}'
)
return self._label_name
def features(self) -> t.List[Hashable]:
"""Return list of feature names.
Returns
-------
t.List[Hashable]
List of feature names.
"""
return list(self._features)
def features_columns(self) -> pd.DataFrame:
"""Return DataFrame containing only the features defined in the dataset, if features are empty raise error.
Returns
-------
pd.DataFrame
"""
self.assert_features()
return self.data[self.features]
def label_col(self) -> pd.Series:
"""Return Series of the label defined in the dataset, if label is not defined raise error.
Returns
-------
pd.Series
"""
return self.data[self.label_name]
def cat_features(self) -> t.List[Hashable]:
"""Return list of categorical feature names.
Returns
-------
t.List[Hashable]
List of categorical feature names.
"""
return list(self._cat_features)
def numerical_features(self) -> t.List[Hashable]:
"""Return list of numerical feature names.
Returns
-------
t.List[Hashable]
List of numerical feature names.
"""
return list(self._numerical_features)
def classes_in_label_col(self) -> t.Tuple[str, ...]:
"""Return the classes from label column in sorted list. if no label column defined, return empty list.
Returns
-------
t.Tuple[str, ...]
Sorted classes
"""
if self.has_label():
return tuple(sorted(self.data[self.label_name].dropna().unique()))
else:
return tuple()
def columns_info(self) -> t.Dict[Hashable, str]:
"""Return the role and logical type of each column.
Returns
-------
t.Dict[Hashable, str]
Directory of a column and its role
"""
columns = {}
for column in self.data.columns:
if column == self._index_name:
value = 'index'
elif column == self._datetime_name:
value = 'date'
elif column == self._label_name:
value = 'label'
elif column in self._features:
if column in self.cat_features:
value = 'categorical feature'
elif column in self.numerical_features:
value = 'numerical feature'
else:
value = 'other feature'
else:
value = 'other'
columns[column] = value
return columns
def has_label(self) -> bool:
"""Return True if label column exists.
Returns
-------
bool
True if label column exists.
"""
return self._label_name is not None
def assert_features(self):
"""Check if features are defined (not empty) and if not raise error.
Raises
------
DeepchecksNotSupportedError
"""
if not self.features:
raise DeepchecksNotSupportedError(
'Dataset does not contain any feature columns',
html=f'Dataset does not contain any feature columns. see {_get_dataset_docs_tag()}'
)
def assert_datetime(self):
"""Check if datetime is defined and if not raise error.
Raises
------
DeepchecksNotSupportedError
"""
if not (self._set_datetime_from_dataframe_index or self._datetime_name):
raise DatasetValidationError(
'Dataset does not contain a datetime',
html=f'Dataset does not contain a datetime. see {_get_dataset_docs_tag()}'
)
def assert_index(self):
"""Check if index is defined and if not raise error.
Raises
------
DeepchecksNotSupportedError
"""
if not (self._set_index_from_dataframe_index or self._index_name):
raise DatasetValidationError(
'Dataset does not contain an index',
html=f'Dataset does not contain an index. see {_get_dataset_docs_tag()}'
)
def select(
self: TDataset,
columns: t.Union[Hashable, t.List[Hashable], None] = None,
ignore_columns: t.Union[Hashable, t.List[Hashable], None] = None,
keep_label: bool = False
) -> TDataset:
"""Filter dataset columns by given params.
Parameters
----------
columns : Union[Hashable, List[Hashable], None]
Column names to keep.
ignore_columns : Union[Hashable, List[Hashable], None]
Column names to drop.
Returns
-------
TDataset
horizontally filtered dataset
Raises
------
DeepchecksValueError
In case one of columns given don't exists raise error
"""
if (
keep_label
and isinstance(columns, list)
and self.label_name not in columns
):
columns = columns[:]
columns.append(self.label_name)
new_data = select_from_dataframe(self._data, columns, ignore_columns)
if new_data.equals(self.data):
return self
else:
return self.copy(new_data)
def cast_to_dataset(cls, obj: t.Any) -> 'Dataset':
"""Verify Dataset or transform to Dataset.
Function verifies that provided value is a non-empty instance of Dataset,
otherwise raises an exception, but if the 'cast' flag is set to True it will
also try to transform provided value to the Dataset instance.
Parameters
----------
obj
value to verify
Raises
------
DeepchecksValueError
if the provided value is not a Dataset instance;
if the provided value cannot be transformed into Dataset instance;
"""
if isinstance(obj, pd.DataFrame):
get_logger().warning(
'Received a "pandas.DataFrame" instance. It is recommended to pass a "deepchecks.tabular.Dataset" '
'instance by initializing it with the data and metadata, '
'for example by doing "Dataset(dataframe, label=label, cat_features=cat_features)"'
)
obj = Dataset(obj)
elif not isinstance(obj, Dataset):
raise DeepchecksValueError(
f'non-empty instance of Dataset or DataFrame was expected, instead got {type(obj).__name__}'
)
return obj.copy(obj.data)
def datasets_share_features(cls, *datasets: 'Dataset') -> bool:
"""Verify that all provided datasets share same features.
Parameters
----------
datasets : List[Dataset]
list of datasets to validate
Returns
-------
bool
True if all datasets share same features, otherwise False
Raises
------
AssertionError
'datasets' parameter is not a list;
'datasets' contains less than one dataset;
"""
assert len(datasets) > 1, "'datasets' must contains at least two items"
# TODO: should not we also check features dtypes?
features_names = set(datasets[0].features)
for ds in datasets[1:]:
if features_names != set(ds.features):
return False
return True
def datasets_share_categorical_features(cls, *datasets: 'Dataset') -> bool:
"""Verify that all provided datasets share same categorical features.
Parameters
----------
datasets : List[Dataset]
list of datasets to validate
Returns
-------
bool
True if all datasets share same categorical features, otherwise False
Raises
------
AssertionError
'datasets' parameter is not a list;
'datasets' contains less than one dataset;
"""
assert len(datasets) > 1, "'datasets' must contains at least two items"
# TODO: should not we also check features dtypes?
first = set(datasets[0].cat_features)
for ds in datasets[1:]:
features = set(ds.cat_features)
if first != features:
return False
return True
def datasets_share_label(cls, *datasets: 'Dataset') -> bool:
"""Verify that all provided datasets share same label column.
Parameters
----------
datasets : List[Dataset]
list of datasets to validate
Returns
-------
bool
True if all datasets share same categorical features, otherwise False
Raises
------
AssertionError
'datasets' parameter is not a list;
'datasets' contains less than one dataset;
"""
assert len(datasets) > 1, "'datasets' must contains at least two items"
# TODO: should not we also check label dtypes?
label_name = datasets[0].label_name
for ds in datasets[1:]:
if ds.label_name != label_name:
return False
return True
def datasets_share_index(cls, *datasets: 'Dataset') -> bool:
"""Verify that all provided datasets share same index column.
Parameters
----------
datasets : List[Dataset]
list of datasets to validate
Returns
-------
bool
True if all datasets share same index column, otherwise False
Raises
------
AssertionError
'datasets' parameter is not a list;
'datasets' contains less than one dataset;
"""
assert len(datasets) > 1, "'datasets' must contains at least two items"
first_ds = datasets[0]
for ds in datasets[1:]:
if (ds._index_name != first_ds._index_name or
ds._set_index_from_dataframe_index != first_ds._set_index_from_dataframe_index):
return False
return True
def datasets_share_date(cls, *datasets: 'Dataset') -> bool:
"""Verify that all provided datasets share same date column.
Parameters
----------
datasets : List[Dataset]
list of datasets to validate
Returns
-------
bool
True if all datasets share same date column, otherwise False
Raises
------
AssertionError
'datasets' parameter is not a list;
'datasets' contains less than one dataset;
"""
assert len(datasets) > 1, "'datasets' must contains at least two items"
first_ds = datasets[0]
for ds in datasets[1:]:
if (ds._datetime_name != first_ds._datetime_name or
ds._set_datetime_from_dataframe_index != first_ds._set_datetime_from_dataframe_index):
return False
return True
def _dataset_description(self) -> pd.DataFrame:
data = self.data
features = self.features
categorical_features = self.cat_features
numerical_features = self.numerical_features
label_column = t.cast(pd.Series, data[self.label_name]) if self.has_label() else None
index_column = self.index_col
datetime_column = self.datetime_col
label_name = None
index_name = None
datetime_name = None
dataset_columns_info = []
if index_column is not None:
index_name = index_column.name
dataset_columns_info.append([
index_name,
infer_dtype(index_column, skipna=True),
'Index',
'set from dataframe index' if self._set_index_from_dataframe_index is True else ''
])
if datetime_column is not None:
datetime_name = datetime_column.name
dataset_columns_info.append([
datetime_name,
infer_dtype(datetime_column, skipna=True),
'Datetime',
'set from DataFrame index' if self._set_datetime_from_dataframe_index is True else ''
])
if label_column is not None:
label_name = label_column.name
dataset_columns_info.append([
label_name,
infer_dtype(label_column, skipna=True),
'' if self.label_type is None else self.label_type.value.capitalize() + " LABEL",
''
])
all_columns = pd.Series(features + list(self.data.columns)).unique()
for feature_name in t.cast(t.Iterable[str], all_columns):
if feature_name in (index_name, datetime_name, label_name):
continue
feature_dtype = infer_dtype(data[feature_name], skipna=True)
if feature_name in categorical_features:
kind = 'Categorical Feature'
elif feature_name in numerical_features:
kind = 'Numerical Feature'
elif feature_name in features:
kind = 'Other Feature'
else:
kind = 'Dataset Column'
dataset_columns_info.append([feature_name, feature_dtype, kind, ''])
return pd.DataFrame(
data=dataset_columns_info,
columns=['Column', 'DType', 'Kind', 'Additional Info'],
)
def __repr__(
self,
max_cols: int = 8,
max_rows: int = 10,
fmt: DatasetReprFmt = 'string'
) -> str:
"""Represent a dataset instance."""
info = self._dataset_description()
columns = list(info[info['Additional Info'] == '']['Column'])
data = self.data.loc[:, columns] # Sorting horizontally
kwargs = dict(max_cols=max_cols, col_space=15)
if fmt == 'string':
features_info = info.to_string(max_rows=50, **kwargs)
data_to_show = data.to_string(show_dimensions=True, max_rows=max_rows, **kwargs)
title_template = '{:-^40}\n\n'
return ''.join((
title_template.format(' Dataset Description '),
f'{features_info}\n\n\n',
title_template.format(' Dataset Content '),
f'{data_to_show}\n\n',
))
elif fmt == 'html':
features_info = info.to_html(notebook=True, max_rows=50, **kwargs)
data_to_show = data.to_html(notebook=True, max_rows=max_rows, **kwargs)
return ''.join([
'<h4><b>Dataset Description</b></h4>',
features_info,
'<h4><b>Dataset Content</b></h4>',
data_to_show
])
else:
raise ValueError(
'"fmt" parameter supports only next values [string, html]'
)
def _ipython_display_(self):
display_html(HTML(self.__repr__(fmt='html')))
def __len__(self) -> int:
"""Return number of samples in the member dataframe.
Returns
-------
int
"""
return self.n_samples
def len_when_sampled(self, n_samples: int):
"""Return number of samples in the sampled dataframe this dataset is sampled with n_samples samples."""
return min(len(self), n_samples)
def is_sampled(self, n_samples: int):
"""Return True if the dataset number of samples will decrease when sampled with n_samples samples."""
if n_samples is None:
return False
return len(self) > n_samples
The provided code snippet includes necessary dependencies for implementing the `load_data_and_predictions` function. Write a Python function `def load_data_and_predictions(data_format: str = 'Dataset', load_train: bool = True, modify_timestamps: bool = True, data_size: t.Optional[int] = 15000, random_state: int = 42) -> t.Tuple[t.Union[Dataset, pd.DataFrame], np.ndarray]` to solve the following problem:
Load and returns the Airbnb NYC 2019 dataset (regression). Parameters ---------- data_format : str , default: Dataset Represent the format of the returned value. Can be 'Dataset'|'Dataframe' 'Dataset' will return the data as a Dataset object 'Dataframe' will return the data as a pandas Dataframe object load_train : bool , default: True If True, the returned data is the train data. otherwise the test dataset. modify_timestamps : bool , default: True If True, the returned data timestamp column will be for the last 30 days. Otherwise, the data timestamp will be for March 2023. data_size : t.Optional[int] , default: 15000 The number of samples to return. If None, returns all the data. random_state : int , default 42 The random state to use for sampling. Returns ------- dataset, predictions : Tuple[Union[deepchecks.Dataset, pd.DataFrame], np.ndarray] Tuple of the deepchecks dataset or dataframe and the predictions.
Here is the function:
def load_data_and_predictions(data_format: str = 'Dataset', load_train: bool = True, modify_timestamps: bool = True,
data_size: t.Optional[int] = 15000, random_state: int = 42) \
-> t.Tuple[t.Union[Dataset, pd.DataFrame], np.ndarray]:
"""Load and returns the Airbnb NYC 2019 dataset (regression).
Parameters
----------
data_format : str , default: Dataset
Represent the format of the returned value. Can be 'Dataset'|'Dataframe'
'Dataset' will return the data as a Dataset object
'Dataframe' will return the data as a pandas Dataframe object
load_train : bool , default: True
If True, the returned data is the train data. otherwise the test dataset.
modify_timestamps : bool , default: True
If True, the returned data timestamp column will be for the last 30 days.
Otherwise, the data timestamp will be for March 2023.
data_size : t.Optional[int] , default: 15000
The number of samples to return. If None, returns all the data.
random_state : int , default 42
The random state to use for sampling.
Returns
-------
dataset, predictions : Tuple[Union[deepchecks.Dataset, pd.DataFrame], np.ndarray]
Tuple of the deepchecks dataset or dataframe and the predictions.
"""
if load_train:
dataset = pd.read_csv(_TRAIN_DATA_URL)
else:
dataset = pd.read_csv(_TEST_DATA_URL)
if data_size is not None:
if data_size < len(dataset):
dataset = dataset.sample(data_size, random_state=random_state)
elif data_size > len(dataset):
dataset = pd.concat([dataset] * math.ceil(data_size / len(dataset)), axis=0, ignore_index=True)
dataset = dataset.sample(data_size, random_state=random_state)
if not load_train:
dataset = dataset.sort_values(_datetime)
if modify_timestamps and not load_train:
current_time = int(time.time())
time_test_start = current_time - 86400 * 30 # Span data for 30 days
dataset[_datetime] = np.sort(
(np.random.rand(len(dataset)) * (current_time - time_test_start)) + time_test_start
)
dataset[_datetime] = dataset[_datetime].apply(lambda x: pd.Timestamp(x, unit='s'))
predictions = np.asarray(dataset[_predictions])
dataset.drop(_predictions, axis=1, inplace=True)
if data_format == 'Dataset':
dataset = Dataset(dataset, label=_target, cat_features=_CAT_FEATURES,
features=_FEATURES)
return dataset, predictions | Load and returns the Airbnb NYC 2019 dataset (regression). Parameters ---------- data_format : str , default: Dataset Represent the format of the returned value. Can be 'Dataset'|'Dataframe' 'Dataset' will return the data as a Dataset object 'Dataframe' will return the data as a pandas Dataframe object load_train : bool , default: True If True, the returned data is the train data. otherwise the test dataset. modify_timestamps : bool , default: True If True, the returned data timestamp column will be for the last 30 days. Otherwise, the data timestamp will be for March 2023. data_size : t.Optional[int] , default: 15000 The number of samples to return. If None, returns all the data. random_state : int , default 42 The random state to use for sampling. Returns ------- dataset, predictions : Tuple[Union[deepchecks.Dataset, pd.DataFrame], np.ndarray] Tuple of the deepchecks dataset or dataframe and the predictions. |
558 | import math
import time
import typing as t
import numpy as np
import pandas as pd
from deepchecks.tabular.dataset import Dataset
The provided code snippet includes necessary dependencies for implementing the `load_pre_calculated_feature_importance` function. Write a Python function `def load_pre_calculated_feature_importance() -> pd.Series` to solve the following problem:
Load the pre-calculated feature importance for the Airbnb NYC 2019 dataset. Returns ------- feature_importance : pd.Series The feature importance for a model trained on the Airbnb NYC 2019 dataset.
Here is the function:
def load_pre_calculated_feature_importance() -> pd.Series:
"""Load the pre-calculated feature importance for the Airbnb NYC 2019 dataset.
Returns
-------
feature_importance : pd.Series
The feature importance for a model trained on the Airbnb NYC 2019 dataset.
"""
return pd.Series({
'neighbourhood_group': 0.1,
'neighbourhood': 0.2,
'room_type': 0.1,
'minimum_nights': 0.1,
'number_of_reviews': 0.1,
'reviews_per_month': 0.1,
'calculated_host_listings_count': 0.1,
'availability_365': 0.1,
'has_availability': 0.1,
}) | Load the pre-calculated feature importance for the Airbnb NYC 2019 dataset. Returns ------- feature_importance : pd.Series The feature importance for a model trained on the Airbnb NYC 2019 dataset. |
559 | import time
import typing as t
import warnings
import numpy as np
import pandas as pd
from sklearn.inspection import permutation_importance
from sklearn.pipeline import Pipeline
from deepchecks import tabular
from deepchecks.core import errors
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.tabular.metric_utils.scorers import DeepcheckScorer, get_default_scorers, init_validate_scorers
from deepchecks.tabular.utils.task_type import TaskType
from deepchecks.tabular.utils.validation import validate_model
from deepchecks.utils.logger import get_logger
from deepchecks.utils.typing import Hashable
def get_importance(name: str, feature_importances: pd.Series, ds: 'tabular.Dataset') -> int:
"""Return importance based on feature importance or label/date/index first."""
if name in feature_importances.keys():
return feature_importances[name]
elif ds.has_label() and name == ds.label_name:
return 1
elif name in [ds.datetime_name, ds.index_name]:
return 1
return 0
from typing import List
class Hashable(Protocol):
"""Trait for any hashable type that also defines comparison operators."""
def __hash__(self) -> int: # noqa: D105
...
def __le__(self, __value) -> bool: # noqa: D105
...
def __lt__(self, __value) -> bool: # noqa: D105
...
def __ge__(self, __value) -> bool: # noqa: D105
...
def __gt__(self, __value) -> bool: # noqa: D105
...
def __eq__(self, __value) -> bool: # noqa: D105
...
The provided code snippet includes necessary dependencies for implementing the `column_importance_sorter_dict` function. Write a Python function `def column_importance_sorter_dict( cols_dict: t.Dict[Hashable, t.Any], dataset: 'tabular.Dataset', feature_importances: t.Optional[pd.Series] = None, n_top: int = 10 ) -> t.Dict` to solve the following problem:
Return the dict of columns sorted and limited by feature importance. Parameters ---------- cols_dict : t.Dict[Hashable, t.Any] dict where columns are the keys dataset : tabular.Dataset dataset used to fit the model feature_importances : t.Optional[pd.Series] , default: None feature importance normalized to 0-1 indexed by feature names n_top : int , default: 10 amount of columns to show ordered by feature importance (date, index, label are first) Returns ------- Dict the dict of columns sorted and limited by feature importance.
Here is the function:
def column_importance_sorter_dict(
cols_dict: t.Dict[Hashable, t.Any],
dataset: 'tabular.Dataset',
feature_importances: t.Optional[pd.Series] = None,
n_top: int = 10
) -> t.Dict:
"""Return the dict of columns sorted and limited by feature importance.
Parameters
----------
cols_dict : t.Dict[Hashable, t.Any]
dict where columns are the keys
dataset : tabular.Dataset
dataset used to fit the model
feature_importances : t.Optional[pd.Series] , default: None
feature importance normalized to 0-1 indexed by feature names
n_top : int , default: 10
amount of columns to show ordered by feature importance (date, index, label are first)
Returns
-------
Dict
the dict of columns sorted and limited by feature importance.
"""
feature_importances = {} if feature_importances is None else feature_importances
def key(name):
return get_importance(name[0], feature_importances, dataset)
cols_dict = dict(sorted(cols_dict.items(), key=key, reverse=True))
if n_top:
return dict(list(cols_dict.items())[:n_top])
return cols_dict | Return the dict of columns sorted and limited by feature importance. Parameters ---------- cols_dict : t.Dict[Hashable, t.Any] dict where columns are the keys dataset : tabular.Dataset dataset used to fit the model feature_importances : t.Optional[pd.Series] , default: None feature importance normalized to 0-1 indexed by feature names n_top : int , default: 10 amount of columns to show ordered by feature importance (date, index, label are first) Returns ------- Dict the dict of columns sorted and limited by feature importance. |
560 | import time
import typing as t
import warnings
import numpy as np
import pandas as pd
from sklearn.inspection import permutation_importance
from sklearn.pipeline import Pipeline
from deepchecks import tabular
from deepchecks.core import errors
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.tabular.metric_utils.scorers import DeepcheckScorer, get_default_scorers, init_validate_scorers
from deepchecks.tabular.utils.task_type import TaskType
from deepchecks.tabular.utils.validation import validate_model
from deepchecks.utils.logger import get_logger
from deepchecks.utils.typing import Hashable
def get_importance(name: str, feature_importances: pd.Series, ds: 'tabular.Dataset') -> int:
"""Return importance based on feature importance or label/date/index first."""
if name in feature_importances.keys():
return feature_importances[name]
elif ds.has_label() and name == ds.label_name:
return 1
elif name in [ds.datetime_name, ds.index_name]:
return 1
return 0
from typing import List
class Hashable(Protocol):
"""Trait for any hashable type that also defines comparison operators."""
def __hash__(self) -> int: # noqa: D105
...
def __le__(self, __value) -> bool: # noqa: D105
...
def __lt__(self, __value) -> bool: # noqa: D105
...
def __ge__(self, __value) -> bool: # noqa: D105
...
def __gt__(self, __value) -> bool: # noqa: D105
...
def __eq__(self, __value) -> bool: # noqa: D105
...
The provided code snippet includes necessary dependencies for implementing the `column_importance_sorter_df` function. Write a Python function `def column_importance_sorter_df( df: pd.DataFrame, ds: 'tabular.Dataset', feature_importances: pd.Series, n_top: int = 10, col: t.Optional[Hashable] = None ) -> pd.DataFrame` to solve the following problem:
Return the dataframe of columns sorted and limited by feature importance. Parameters ---------- df : pd.DataFrame DataFrame to sort ds : tabular.Dataset dataset used to fit the model feature_importances : pd.Series feature importance normalized to 0-1 indexed by feature names n_top : int , default: 10 amount of columns to show ordered by feature importance (date, index, label are first) col : t.Optional[Hashable] , default: None name of column to sort the dataframe Returns ------- pd.DataFrame the dataframe sorted and limited by feature importance.
Here is the function:
def column_importance_sorter_df(
df: pd.DataFrame,
ds: 'tabular.Dataset',
feature_importances: pd.Series,
n_top: int = 10,
col: t.Optional[Hashable] = None
) -> pd.DataFrame:
"""Return the dataframe of columns sorted and limited by feature importance.
Parameters
----------
df : pd.DataFrame
DataFrame to sort
ds : tabular.Dataset
dataset used to fit the model
feature_importances : pd.Series
feature importance normalized to 0-1 indexed by feature names
n_top : int , default: 10
amount of columns to show ordered by feature importance (date, index, label are first)
col : t.Optional[Hashable] , default: None
name of column to sort the dataframe
Returns
-------
pd.DataFrame
the dataframe sorted and limited by feature importance.
"""
if len(df) == 0:
return df
feature_importances = {} if feature_importances is None else feature_importances
def key(column):
return [get_importance(name, feature_importances, ds) for name in column]
if col:
df = df.sort_values(by=[col], key=key, ascending=False)
df = df.sort_index(key=key, ascending=False)
if n_top:
return df.head(n_top)
return df | Return the dataframe of columns sorted and limited by feature importance. Parameters ---------- df : pd.DataFrame DataFrame to sort ds : tabular.Dataset dataset used to fit the model feature_importances : pd.Series feature importance normalized to 0-1 indexed by feature names n_top : int , default: 10 amount of columns to show ordered by feature importance (date, index, label are first) col : t.Optional[Hashable] , default: None name of column to sort the dataframe Returns ------- pd.DataFrame the dataframe sorted and limited by feature importance. |
561 | import time
import typing as t
import warnings
import numpy as np
import pandas as pd
from sklearn.inspection import permutation_importance
from sklearn.pipeline import Pipeline
from deepchecks import tabular
from deepchecks.core import errors
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.tabular.metric_utils.scorers import DeepcheckScorer, get_default_scorers, init_validate_scorers
from deepchecks.tabular.utils.task_type import TaskType
from deepchecks.tabular.utils.validation import validate_model
from deepchecks.utils.logger import get_logger
from deepchecks.utils.typing import Hashable
class DeepchecksValueError(DeepchecksBaseError):
"""Exception class that represent a fault parameter was passed to Deepchecks."""
pass
The provided code snippet includes necessary dependencies for implementing the `validate_feature_importance` function. Write a Python function `def validate_feature_importance(feature_importance: pd.Series, features: list, eps: float = 0.001) -> pd.Series` to solve the following problem:
Validate feature importance.
Here is the function:
def validate_feature_importance(feature_importance: pd.Series, features: list, eps: float = 0.001) -> pd.Series:
"""Validate feature importance."""
if not isinstance(feature_importance, pd.Series):
raise DeepchecksValueError('feature_importance must be given as a pandas.Series where the index is feature '
'names and the value is the calculated importance')
if feature_importance.isnull().any():
raise DeepchecksValueError('feature_importance must not contain null values')
if (feature_importance < 0).any():
raise DeepchecksValueError('feature_importance must not contain negative values')
if sorted(feature_importance.index) != sorted(features):
raise DeepchecksValueError('feature_importance index must be the feature names')
if not 1 - eps < feature_importance.sum() < 1 + eps:
warnings.warn('feature_importance does not sum to 1. Normalizing to 1.', UserWarning)
feature_importance = feature_importance / feature_importance.sum()
return feature_importance | Validate feature importance. |
562 | import typing as t
import numpy as np
import pandas as pd
from deepchecks import tabular
from deepchecks.core import errors
from deepchecks.utils.typing import BasicModel
supported_models_html = f'<a href="{supported_models_link}" target="_blank">supported model types</a>'
from typing import List
class BasicModel(Protocol):
"""Traits of a model that are necessary for deepchecks."""
def predict(self, X) -> List[Hashable]:
"""Predict on given X."""
...
The provided code snippet includes necessary dependencies for implementing the `model_type_validation` function. Write a Python function `def model_type_validation(model: t.Any)` to solve the following problem:
Receive any object and check if it's an instance of a model we support. Parameters ---------- model: t.Any Raises ------ DeepchecksValueError If the object is not of a supported type
Here is the function:
def model_type_validation(model: t.Any):
"""Receive any object and check if it's an instance of a model we support.
Parameters
----------
model: t.Any
Raises
------
DeepchecksValueError
If the object is not of a supported type
"""
if not isinstance(model, BasicModel):
raise errors.ModelValidationError(
f'Model supplied does not meets the minimal interface requirements. Read more about {supported_models_html}'
) | Receive any object and check if it's an instance of a model we support. Parameters ---------- model: t.Any Raises ------ DeepchecksValueError If the object is not of a supported type |
563 | import typing as t
import numpy as np
import pandas as pd
from deepchecks import tabular
from deepchecks.core import errors
from deepchecks.utils.typing import BasicModel
from typing import List
The provided code snippet includes necessary dependencies for implementing the `ensure_dataframe_type` function. Write a Python function `def ensure_dataframe_type(obj: t.Any) -> pd.DataFrame` to solve the following problem:
Ensure that given object is of type DataFrame or Dataset and return it as DataFrame. else raise error. Parameters ---------- obj : t.Any Object to ensure it is DataFrame or Dataset Returns ------- pd.DataFrame
Here is the function:
def ensure_dataframe_type(obj: t.Any) -> pd.DataFrame:
"""Ensure that given object is of type DataFrame or Dataset and return it as DataFrame. else raise error.
Parameters
----------
obj : t.Any
Object to ensure it is DataFrame or Dataset
Returns
-------
pd.DataFrame
"""
if isinstance(obj, pd.DataFrame):
return obj
elif isinstance(obj, tabular.Dataset):
return obj.data
else:
raise errors.DeepchecksValueError(
f'dataset must be of type DataFrame or Dataset, but got: {type(obj).__name__}'
) | Ensure that given object is of type DataFrame or Dataset and return it as DataFrame. else raise error. Parameters ---------- obj : t.Any Object to ensure it is DataFrame or Dataset Returns ------- pd.DataFrame |
564 | import typing as t
import numpy as np
import pandas as pd
from deepchecks import tabular
from deepchecks.core import errors
from deepchecks.utils.typing import BasicModel
from typing import List
The provided code snippet includes necessary dependencies for implementing the `ensure_predictions_shape` function. Write a Python function `def ensure_predictions_shape(pred: np.ndarray, data: t.Sequence) -> np.ndarray` to solve the following problem:
Ensure the predictions are in the right shape and if so return them. else raise error.
Here is the function:
def ensure_predictions_shape(pred: np.ndarray, data: t.Sequence) -> np.ndarray:
"""Ensure the predictions are in the right shape and if so return them. else raise error."""
if pred.shape[0] != len(data):
raise errors.ValidationError(f'Prediction array expected to be of same length as data {len(data)},'
f' but was: {pred.shape[0]}')
return pred | Ensure the predictions are in the right shape and if so return them. else raise error. |
565 | import typing as t
import numpy as np
import pandas as pd
from deepchecks import tabular
from deepchecks.core import errors
from deepchecks.utils.typing import BasicModel
from typing import List
The provided code snippet includes necessary dependencies for implementing the `ensure_predictions_proba` function. Write a Python function `def ensure_predictions_proba(pred_proba: np.ndarray, data: t.Sequence) -> np.ndarray` to solve the following problem:
Ensure the predictions are in the right shape and if so return them. else raise error.
Here is the function:
def ensure_predictions_proba(pred_proba: np.ndarray, data: t.Sequence) -> np.ndarray:
"""Ensure the predictions are in the right shape and if so return them. else raise error."""
if len(pred_proba) != len(data):
raise errors.ValidationError(f'Prediction probabilities expected to be of length {len(data)} '
f'but was: {len(pred_proba)}')
return pred_proba | Ensure the predictions are in the right shape and if so return them. else raise error. |
566 | import http.client
import os
import pathlib
import uuid
import deepchecks
from deepchecks.utils.logger import get_logger
MODULE_DIR = pathlib.Path(__file__).absolute().parent.parent
ANALYTICS_DISABLED = os.environ.get('DISABLE_DEEPCHECKS_ANONYMOUS_TELEMETRY', False) or \
os.environ.get('DISABLE_LATEST_VERSION_CHECK', False)
def get_logger() -> logging.Logger:
"""Retutn the deepchecks logger."""
return _logger
The provided code snippet includes necessary dependencies for implementing the `validate_latest_version` function. Write a Python function `def validate_latest_version()` to solve the following problem:
Check if we are on the latest version and send an anonymous import event to PostHog.
Here is the function:
def validate_latest_version():
"""Check if we are on the latest version and send an anonymous import event to PostHog."""
if not ANALYTICS_DISABLED:
try:
if os.path.exists(os.path.join(MODULE_DIR, '.user_id')):
with open(os.path.join(MODULE_DIR, '.user_id'), 'r', encoding='utf8') as f:
user_id = f.read()
else:
user_id = str(uuid.uuid4())
with open(os.path.join(MODULE_DIR, '.user_id'), 'w', encoding='utf8') as f:
f.write(user_id)
conn = http.client.HTTPSConnection('api.deepchecks.com', timeout=3)
conn.request('GET', f'/v3/latest?version={deepchecks.__version__}&uuid={user_id}')
result = conn.getresponse()
is_on_latest = result.read().decode() == 'True'
if not is_on_latest:
get_logger().warning('You are using deepchecks version %s, however a newer version is available.'
' Deepchecks is frequently updated with major improvements. You should consider '
'upgrading via the "python -m pip install --upgrade deepchecks" command.',
deepchecks.__version__)
os.environ['DISABLE_DEEPCHECKS_ANONYMOUS_TELEMETRY'] = 'True' # to ignore joblib
except Exception: # pylint: disable=broad-except
pass | Check if we are on the latest version and send an anonymous import event to PostHog. |
567 | from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from deepchecks.vision import Suite
from deepchecks.vision.checks import (ClassPerformance, ConfusionMatrixReport, # SimilarImageLeakage,
HeatmapComparison, ImageDatasetDrift, ImagePropertyDrift, ImagePropertyOutliers,
LabelDrift, LabelPropertyOutliers, MeanAveragePrecisionReport,
MeanAverageRecallReport, NewLabels, PredictionDrift, PropertyLabelCorrelation,
PropertyLabelCorrelationChange, SimpleModelComparison, WeakSegmentsPerformance)
def train_test_validation(label_properties: List[Dict[str, Any]] = None, image_properties: List[Dict[str, Any]] = None,
**kwargs) -> Suite:
"""Suite for validating correctness of train-test split, including distribution, \
integrity and leakage checks.
List of Checks:
.. list-table:: List of Checks
:widths: 50 50
:header-rows: 1
* - Check Example
- API Reference
* - :ref:`vision__new_labels`
- :class:`~deepchecks.vision.checks.train_test_validation.NewLabels`
* - :ref:`vision__heatmap_comparison`
- :class:`~deepchecks.vision.checks.train_test_validation.HeatmapComparison`
* - :ref:`vision__label_drift`
- :class:`~deepchecks.vision.checks.train_test_validation.LabelDrift`
* - :ref:`vision__image_property_drift`
- :class:`~deepchecks.vision.checks.train_test_validation.ImagePropertyDrift`
* - :ref:`vision__image_dataset_drift`
- :class:`~deepchecks.vision.checks.train_test_validation.ImageDatasetDrift`
* - :ref:`vision__property_label_correlation_change`
- :class:`~deepchecks.vision.checks.train_test_validation.PropertyLabelCorrelationChange`
Parameters
----------
label_properties : List[Dict[str, Any]], default: None
List of properties. Replaces the default deepchecks properties.
Each property is a dictionary with keys ``'name'`` (str), ``method`` (Callable) and ``'output_type'`` (str),
representing attributes of said method. 'output_type' must be one of:
- ``'numerical'`` - for continuous ordinal outputs.
- ``'categorical'`` - for discrete, non-ordinal outputs. These can still be numbers,
but these numbers do not have inherent value.
- ``'class_id'`` - for properties that return the class_id. This is used because these
properties are later matched with the ``VisionData.label_map``, if one was given.
For more on image / label properties, see the guide about :ref:`vision__properties_guide`.
image_properties : List[Dict[str, Any]], default: None
List of properties. Replaces the default deepchecks properties.
Each property is a dictionary with keys ``'name'`` (str), ``method`` (Callable) and ``'output_type'`` (str),
representing attributes of said method. 'output_type' must be one of:
- ``'numerical'`` - for continuous ordinal outputs.
- ``'categorical'`` - for discrete, non-ordinal outputs. These can still be numbers,
but these numbers do not have inherent value.
For more on image / label properties, see the guide about :ref:`vision__properties_guide`.
**kwargs : dict
additional arguments to pass to the checks.
Returns
-------
Suite
A Suite for validating correctness of train-test split, including distribution, \
integrity and leakage checks.
Examples
--------
>>> from deepchecks.vision.suites import train_test_validation
>>> suite = train_test_validation()
>>> train_data, test_data = ...
>>> result = suite.run(train_data, test_data, max_samples=800)
>>> result.show()
See Also
--------
:ref:`vision__classification_tutorial`
:ref:`vision__detection_tutorial`
:ref:`vision__segmentation_tutorial`
"""
args = locals()
args.pop('kwargs')
non_none_args = {k: v for k, v in args.items() if v is not None}
kwargs = {**non_none_args, **kwargs}
return Suite('Train Test Validation Suite', NewLabels(**kwargs).add_condition_new_label_ratio_less_or_equal(),
HeatmapComparison(**kwargs), LabelDrift(**kwargs).add_condition_drift_score_less_than(),
ImagePropertyDrift(**kwargs).add_condition_drift_score_less_than(), ImageDatasetDrift(**kwargs),
PropertyLabelCorrelationChange(**kwargs).add_condition_property_pps_difference_less_than(), )
def model_evaluation(scorers: Union[Dict[str, Union[Callable, str]], List[Any]] = None,
area_range: Tuple[float, float] = (32 ** 2, 96 ** 2),
image_properties: List[Dict[str, Any]] = None, prediction_properties: List[Dict[str, Any]] = None,
**kwargs) -> Suite:
"""Suite for evaluating the model's performance over different metrics, segments, error analysis, \
comparing to baseline, and more.
List of Checks:
.. list-table:: List of Checks
:widths: 50 50
:header-rows: 1
* - Check Example
- API Reference
* - :ref:`vision__class_performance`
- :class:`~deepchecks.vision.checks.model_evaluation.ClassPerformance`
* - :ref:`vision__mean_average_precision_report`
- :class:`~deepchecks.vision.checks.model_evaluation.MeanAveragePrecisionReport`
* - :ref:`vision__mean_average_recall_report`
- :class:`~deepchecks.vision.checks.model_evaluation.MeanAverageRecallReport`
* - :ref:`vision__prediction_drift`
- :class:`~deepchecks.vision.checks.model_evaluation.PredictionDrift`
* - :ref:`vision__simple_model_comparison`
- :class:`~deepchecks.vision.checks.model_evaluation.SimpleModelComparison`
* - :ref:`vision__weak_segments_performance`
- :class:`~deepchecks.vision.checks.model_evaluation.WeakSegmentPerformance`
Parameters
----------
scorers: Union[Dict[str, Union[Callable, str]], List[Any]], default: None
Scorers to override the default scorers (metrics), find more about the supported formats at
https://docs.deepchecks.com/stable/user-guide/general/metrics_guide.html
area_range: tuple, default: (32**2, 96**2)
Slices for small/medium/large buckets. (For object detection tasks only)
image_properties : List[Dict[str, Any]], default: None
List of properties. Replaces the default deepchecks properties.
Each property is a dictionary with keys ``'name'`` (str), ``method`` (Callable) and ``'output_type'`` (str),
representing attributes of said method. 'output_type' must be one of:
- ``'numerical'`` - for continuous ordinal outputs.
- ``'categorical'`` - for discrete, non-ordinal outputs. These can still be numbers,
but these numbers do not have inherent value.
For more on image / label properties, see the guide about :ref:`vision__properties_guide`.
prediction_properties : List[Dict[str, Any]], default: None
List of properties. Replaces the default deepchecks properties.
Each property is a dictionary with keys ``'name'`` (str), ``method`` (Callable) and ``'output_type'`` (str),
representing attributes of said method. 'output_type' must be one of:
- ``'numerical'`` - for continuous ordinal outputs.
- ``'categorical'`` - for discrete, non-ordinal outputs. These can still be numbers,
but these numbers do not have inherent value.
- ``'class_id'`` - for properties that return the class_id. This is used because these
properties are later matched with the ``VisionData.label_map``, if one was given.
For more on image / label properties, see the guide about :ref:`vision__properties_guide`.
**kwargs : dict
additional arguments to pass to the checks.
Returns
-------
Suite
A suite for evaluating the model's performance.
Examples
--------
>>> from deepchecks.vision.suites import model_evaluation
>>> suite = model_evaluation()
>>> test_vision_data = ...
>>> result = suite.run(test_vision_data, max_samples=800)
>>> result.show()
See Also
--------
:ref:`vision__classification_tutorial`
:ref:`vision__detection_tutorial`
:ref:`vision__segmentation_tutorial`
"""
args = locals()
args.pop('kwargs')
non_none_args = {k: v for k, v in args.items() if v is not None}
kwargs = {**non_none_args, **kwargs}
return Suite('Model Evaluation Suite',
ClassPerformance(**kwargs).add_condition_train_test_relative_degradation_less_than(),
MeanAveragePrecisionReport(**kwargs).add_condition_average_mean_average_precision_greater_than(),
MeanAverageRecallReport(**kwargs),
PredictionDrift(**kwargs).add_condition_drift_score_less_than(),
SimpleModelComparison(**kwargs).add_condition_gain_greater_than(), ConfusionMatrixReport(**kwargs),
WeakSegmentsPerformance(**kwargs).add_condition_segments_relative_performance_greater_than(), )
def data_integrity(image_properties: List[Dict[str, Any]] = None, label_properties: List[Dict[str, Any]] = None,
**kwargs) -> Suite:
"""
Create a suite that includes integrity checks.
List of Checks:
.. list-table:: List of Checks
:widths: 50 50
:header-rows: 1
* - Check Example
- API Reference
* - :ref:`vision__image_property_outliers`
- :class:`~deepchecks.vision.checks.data_integrity.ImagePropertyOutliers`
* - :ref:`vision__label_property_outliers`
- :class:`~deepchecks.vision.checks.model_evaluation.LabelPropertyOutliers`
Parameters
----------
image_properties : List[Dict[str, Any]], default: None
List of properties. Replaces the default deepchecks properties.
Each property is a dictionary with keys ``'name'`` (str), ``method`` (Callable) and ``'output_type'`` (str),
representing attributes of said method. 'output_type' must be one of:
- ``'numerical'`` - for continuous ordinal outputs.
- ``'categorical'`` - for discrete, non-ordinal outputs. These can still be numbers,
but these numbers do not have inherent value.
For more on image / label properties, see the guide about :ref:`vision__properties_guide`.
label_properties : List[Dict[str, Any]], default: None
List of properties. Replaces the default deepchecks properties.
Each property is a dictionary with keys ``'name'`` (str), ``method`` (Callable) and ``'output_type'`` (str),
representing attributes of said method. 'output_type' must be one of:
- ``'numerical'`` - for continuous ordinal outputs.
- ``'categorical'`` - for discrete, non-ordinal outputs. These can still be numbers,
but these numbers do not have inherent value.
- ``'class_id'`` - for properties that return the class_id. This is used because these
properties are later matched with the ``VisionData.label_map``, if one was given.
For more on image / label properties, see the guide about :ref:`vision__properties_guide`.
**kwargs : dict
additional arguments to pass to the checks.
Returns
-------
Suite
A suite that includes integrity checks.
Examples
--------
>>> from deepchecks.vision.suites import data_integrity
>>> suite = data_integrity()
>>> vision_data = ...
>>> result = suite.run(vision_data, max_samples=800)
>>> result.show()
See Also
--------
:ref:`vision__classification_tutorial`
:ref:`vision__detection_tutorial`
:ref:`vision__segmentation_tutorial`
"""
args = locals()
args.pop('kwargs')
non_none_args = {k: v for k, v in args.items() if v is not None}
kwargs = {**non_none_args, **kwargs}
return Suite('Data Integrity Suite', ImagePropertyOutliers(**kwargs), LabelPropertyOutliers(**kwargs),
PropertyLabelCorrelation(**kwargs))
The provided code snippet includes necessary dependencies for implementing the `full_suite` function. Write a Python function `def full_suite(n_samples: Optional[int] = 5000, image_properties: List[Dict[str, Any]] = None, label_properties: List[Dict[str, Any]] = None, prediction_properties: List[Dict[str, Any]] = None, scorers: Union[Dict[str, Union[Callable, str]], List[Any]] = None, area_range: Tuple[float, float] = (32 ** 2, 96 ** 2), **kwargs) -> Suite` to solve the following problem:
Create a suite that includes many of the implemented checks, for a quick overview of your model and data. Parameters ---------- n_samples : Optional[int] , default : 5000 Number of samples to use for the checks in the suite. If None, all samples will be used. image_properties : List[Dict[str, Any]], default: None List of properties. Replaces the default deepchecks properties. Each property is a dictionary with keys ``'name'`` (str), ``method`` (Callable) and ``'output_type'`` (str), representing attributes of said method. 'output_type' must be one of: - ``'numerical'`` - for continuous ordinal outputs. - ``'categorical'`` - for discrete, non-ordinal outputs. These can still be numbers, but these numbers do not have inherent value. For more on image / label properties, see the guide about :ref:`vision__properties_guide`. label_properties : List[Dict[str, Any]], default: None List of properties. Replaces the default deepchecks properties. Each property is a dictionary with keys ``'name'`` (str), ``method`` (Callable) and ``'output_type'`` (str), representing attributes of said method. 'output_type' must be one of: - ``'numerical'`` - for continuous ordinal outputs. - ``'categorical'`` - for discrete, non-ordinal outputs. These can still be numbers, but these numbers do not have inherent value. - ``'class_id'`` - for properties that return the class_id. This is used because these properties are later matched with the ``VisionData.label_map``, if one was given. For more on image / label properties, see the guide about :ref:`vision__properties_guide`. scorers: Union[Dict[str, Union[Callable, str]], List[Any]], default: None Scorers to override the default scorers (metrics), find more about the supported formats at https://docs.deepchecks.com/stable/user-guide/general/metrics_guide.html area_range: tuple, default: (32**2, 96**2) Slices for small/medium/large buckets. (For object detection tasks only) image_properties : List[Dict[str, Any]], default: None List of properties. Replaces the default deepchecks properties. Each property is a dictionary with keys ``'name'`` (str), ``method`` (Callable) and ``'output_type'`` (str), representing attributes of said method. 'output_type' must be one of: - ``'numerical'`` - for continuous ordinal outputs. - ``'categorical'`` - for discrete, non-ordinal outputs. These can still be numbers, but these numbers do not have inherent value. For more on image / label properties, see the guide about :ref:`vision__properties_guide`. prediction_properties : List[Dict[str, Any]], default: None List of properties. Replaces the default deepchecks properties. Each property is a dictionary with keys ``'name'`` (str), ``method`` (Callable) and ``'output_type'`` (str), representing attributes of said method. 'output_type' must be one of: - ``'numerical'`` - for continuous ordinal outputs. - ``'categorical'`` - for discrete, non-ordinal outputs. These can still be numbers, but these numbers do not have inherent value. - ``'class_id'`` - for properties that return the class_id. This is used because these properties are later matched with the ``VisionData.label_map``, if one was given. For more on image / label properties, see the guide about :ref:`vision__properties_guide`. Returns ------- Suite A suite that includes integrity checks.
Here is the function:
def full_suite(n_samples: Optional[int] = 5000, image_properties: List[Dict[str, Any]] = None,
label_properties: List[Dict[str, Any]] = None, prediction_properties: List[Dict[str, Any]] = None,
scorers: Union[Dict[str, Union[Callable, str]], List[Any]] = None,
area_range: Tuple[float, float] = (32 ** 2, 96 ** 2), **kwargs) -> Suite:
"""Create a suite that includes many of the implemented checks, for a quick overview of your model and data.
Parameters
----------
n_samples : Optional[int] , default : 5000
Number of samples to use for the checks in the suite. If None, all samples will be used.
image_properties : List[Dict[str, Any]], default: None
List of properties. Replaces the default deepchecks properties.
Each property is a dictionary with keys ``'name'`` (str), ``method`` (Callable) and ``'output_type'`` (str),
representing attributes of said method. 'output_type' must be one of:
- ``'numerical'`` - for continuous ordinal outputs.
- ``'categorical'`` - for discrete, non-ordinal outputs. These can still be numbers,
but these numbers do not have inherent value.
For more on image / label properties, see the guide about :ref:`vision__properties_guide`.
label_properties : List[Dict[str, Any]], default: None
List of properties. Replaces the default deepchecks properties.
Each property is a dictionary with keys ``'name'`` (str), ``method`` (Callable) and ``'output_type'`` (str),
representing attributes of said method. 'output_type' must be one of:
- ``'numerical'`` - for continuous ordinal outputs.
- ``'categorical'`` - for discrete, non-ordinal outputs. These can still be numbers,
but these numbers do not have inherent value.
- ``'class_id'`` - for properties that return the class_id. This is used because these
properties are later matched with the ``VisionData.label_map``, if one was given.
For more on image / label properties, see the guide about :ref:`vision__properties_guide`.
scorers: Union[Dict[str, Union[Callable, str]], List[Any]], default: None
Scorers to override the default scorers (metrics), find more about the supported formats at
https://docs.deepchecks.com/stable/user-guide/general/metrics_guide.html
area_range: tuple, default: (32**2, 96**2)
Slices for small/medium/large buckets. (For object detection tasks only)
image_properties : List[Dict[str, Any]], default: None
List of properties. Replaces the default deepchecks properties.
Each property is a dictionary with keys ``'name'`` (str), ``method`` (Callable) and ``'output_type'`` (str),
representing attributes of said method. 'output_type' must be one of:
- ``'numerical'`` - for continuous ordinal outputs.
- ``'categorical'`` - for discrete, non-ordinal outputs. These can still be numbers,
but these numbers do not have inherent value.
For more on image / label properties, see the guide about :ref:`vision__properties_guide`.
prediction_properties : List[Dict[str, Any]], default: None
List of properties. Replaces the default deepchecks properties.
Each property is a dictionary with keys ``'name'`` (str), ``method`` (Callable) and ``'output_type'`` (str),
representing attributes of said method. 'output_type' must be one of:
- ``'numerical'`` - for continuous ordinal outputs.
- ``'categorical'`` - for discrete, non-ordinal outputs. These can still be numbers,
but these numbers do not have inherent value.
- ``'class_id'`` - for properties that return the class_id. This is used because these
properties are later matched with the ``VisionData.label_map``, if one was given.
For more on image / label properties, see the guide about :ref:`vision__properties_guide`.
Returns
-------
Suite
A suite that includes integrity checks.
"""
args = locals()
args.pop('kwargs')
non_none_args = {k: v for k, v in args.items() if v is not None}
kwargs = {**non_none_args, **kwargs}
return Suite('Full Suite', model_evaluation(**kwargs), train_test_validation(**kwargs), data_integrity(**kwargs)) | Create a suite that includes many of the implemented checks, for a quick overview of your model and data. Parameters ---------- n_samples : Optional[int] , default : 5000 Number of samples to use for the checks in the suite. If None, all samples will be used. image_properties : List[Dict[str, Any]], default: None List of properties. Replaces the default deepchecks properties. Each property is a dictionary with keys ``'name'`` (str), ``method`` (Callable) and ``'output_type'`` (str), representing attributes of said method. 'output_type' must be one of: - ``'numerical'`` - for continuous ordinal outputs. - ``'categorical'`` - for discrete, non-ordinal outputs. These can still be numbers, but these numbers do not have inherent value. For more on image / label properties, see the guide about :ref:`vision__properties_guide`. label_properties : List[Dict[str, Any]], default: None List of properties. Replaces the default deepchecks properties. Each property is a dictionary with keys ``'name'`` (str), ``method`` (Callable) and ``'output_type'`` (str), representing attributes of said method. 'output_type' must be one of: - ``'numerical'`` - for continuous ordinal outputs. - ``'categorical'`` - for discrete, non-ordinal outputs. These can still be numbers, but these numbers do not have inherent value. - ``'class_id'`` - for properties that return the class_id. This is used because these properties are later matched with the ``VisionData.label_map``, if one was given. For more on image / label properties, see the guide about :ref:`vision__properties_guide`. scorers: Union[Dict[str, Union[Callable, str]], List[Any]], default: None Scorers to override the default scorers (metrics), find more about the supported formats at https://docs.deepchecks.com/stable/user-guide/general/metrics_guide.html area_range: tuple, default: (32**2, 96**2) Slices for small/medium/large buckets. (For object detection tasks only) image_properties : List[Dict[str, Any]], default: None List of properties. Replaces the default deepchecks properties. Each property is a dictionary with keys ``'name'`` (str), ``method`` (Callable) and ``'output_type'`` (str), representing attributes of said method. 'output_type' must be one of: - ``'numerical'`` - for continuous ordinal outputs. - ``'categorical'`` - for discrete, non-ordinal outputs. These can still be numbers, but these numbers do not have inherent value. For more on image / label properties, see the guide about :ref:`vision__properties_guide`. prediction_properties : List[Dict[str, Any]], default: None List of properties. Replaces the default deepchecks properties. Each property is a dictionary with keys ``'name'`` (str), ``method`` (Callable) and ``'output_type'`` (str), representing attributes of said method. 'output_type' must be one of: - ``'numerical'`` - for continuous ordinal outputs. - ``'categorical'`` - for discrete, non-ordinal outputs. These can still be numbers, but these numbers do not have inherent value. - ``'class_id'`` - for properties that return the class_id. This is used because these properties are later matched with the ``VisionData.label_map``, if one was given. For more on image / label properties, see the guide about :ref:`vision__properties_guide`. Returns ------- Suite A suite that includes integrity checks. |
568 | from collections import defaultdict
from typing import List
import numpy as np
def compute_bounding_box_class_ious(detected: np.ndarray, ground_truth: np.ndarray):
"""Compute ious between bounding boxes of the same class."""
bb_info = group_class_detection_label(detected, ground_truth)
# Calculating pairwise IoUs per class
return {class_id: compute_pairwise_ious(info["detected"], info["ground_truth"], jaccard_iou)
for class_id, info in bb_info.items()}
The provided code snippet includes necessary dependencies for implementing the `per_sample_mean_iou` function. Write a Python function `def per_sample_mean_iou(predictions: np.ndarray, labels: np.ndarray) -> List[float]` to solve the following problem:
Calculate mean iou for a single sample.
Here is the function:
def per_sample_mean_iou(predictions: np.ndarray, labels: np.ndarray) -> List[float]:
"""Calculate mean iou for a single sample."""
mean_ious = []
for detected, ground_truth in zip(predictions, labels):
if len(ground_truth) == 0:
if len(detected) == 0:
mean_ious.append(1)
else:
mean_ious.append(0)
continue
elif len(detected) == 0:
mean_ious.append(0)
continue
ious = compute_bounding_box_class_ious(detected, ground_truth)
count = 0
sum_iou = 0
for _, cls_ious in ious.items():
# Find best fit for each detection
for detection in cls_ious:
sum_iou += max(detection, default=0)
count += 1
if count:
mean_ious.append(sum_iou / count)
else:
mean_ious.append(0)
return mean_ious | Calculate mean iou for a single sample. |
569 | from typing import Tuple
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `segmentation_counts_per_class` function. Write a Python function `def segmentation_counts_per_class(y_true_onehot: np.ndarray, y_pred_onehot: np.ndarray)` to solve the following problem:
Compute the ground truth, predicted and intersection areas per class for segmentation metrics.
Here is the function:
def segmentation_counts_per_class(y_true_onehot: np.ndarray, y_pred_onehot: np.ndarray):
"""Compute the ground truth, predicted and intersection areas per class for segmentation metrics."""
tp_onehot = np.logical_and(y_true_onehot, y_pred_onehot)
tp_count_per_class = np.asarray([np.sum(tp_onehot[channel]) for channel in range(tp_onehot.shape[0])])
y_true_count_per_class = np.asarray([np.sum(y_true_onehot[channel]) for channel in range(tp_onehot.shape[0])])
pred_count_per_class = np.asarray([np.sum(y_pred_onehot[channel]) for channel in range(tp_onehot.shape[0])])
return tp_count_per_class, y_true_count_per_class, pred_count_per_class | Compute the ground truth, predicted and intersection areas per class for segmentation metrics. |
570 | import warnings
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
from ignite.metrics import Metric
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.vision.metrics_utils.metric_mixin import MetricMixin, ObjectDetectionMetricMixin
def _dict_conc(test_list):
result = defaultdict(list)
for i in range(len(test_list)):
current = test_list[i]
for key, value in current.items():
if isinstance(value, list):
for j in range(len(value)):
result[key].append(value[j])
else:
result[key].append(value)
return result | null |
571 | import numpy as np
The provided code snippet includes necessary dependencies for implementing the `_calc_recall` function. Write a Python function `def _calc_recall(tp: float, fp: float, fn: float) -> float` to solve the following problem:
Calculate recall for given matches and number of positives.
Here is the function:
def _calc_recall(tp: float, fp: float, fn: float) -> float: # pylint: disable=unused-argument
"""Calculate recall for given matches and number of positives."""
if tp + fn == 0:
return -1
rc = tp / (tp + fn + np.finfo(float).eps)
return rc | Calculate recall for given matches and number of positives. |
572 | import numpy as np
The provided code snippet includes necessary dependencies for implementing the `_calc_precision` function. Write a Python function `def _calc_precision(tp: float, fp: float, fn: float) -> float` to solve the following problem:
Calculate precision for given matches and number of positives.
Here is the function:
def _calc_precision(tp: float, fp: float, fn: float) -> float:
"""Calculate precision for given matches and number of positives."""
if tp + fn == 0:
return -1
if tp + fp == 0:
return 0
pr = tp / (tp + fp + np.finfo(float).eps)
return pr | Calculate precision for given matches and number of positives. |
573 | import numpy as np
The provided code snippet includes necessary dependencies for implementing the `_calc_f1` function. Write a Python function `def _calc_f1(tp: float, fp: float, fn: float) -> float` to solve the following problem:
Calculate F1 for given matches and number of positives.
Here is the function:
def _calc_f1(tp: float, fp: float, fn: float) -> float:
"""Calculate F1 for given matches and number of positives."""
if tp + fn == 0:
return -1
if tp + fp == 0:
return 0
rc = tp / (tp + fn + np.finfo(float).eps)
pr = tp / (tp + fp + np.finfo(float).eps)
f1 = (2 * rc * pr) / (rc + pr + np.finfo(float).eps)
return f1 | Calculate F1 for given matches and number of positives. |
574 | import numpy as np
The provided code snippet includes necessary dependencies for implementing the `_calc_fpr` function. Write a Python function `def _calc_fpr(tp: float, fp: float, fn: float) -> float` to solve the following problem:
Calculate FPR for given matches and number of positives.
Here is the function:
def _calc_fpr(tp: float, fp: float, fn: float) -> float:
"""Calculate FPR for given matches and number of positives."""
if tp + fn == 0:
return -1
if tp + fp == 0:
return 0
return fp / (tp + fn + np.finfo(float).eps) | Calculate FPR for given matches and number of positives. |
575 | import numpy as np
The provided code snippet includes necessary dependencies for implementing the `_calc_fnr` function. Write a Python function `def _calc_fnr(tp: float, fp: float, fn: float) -> float` to solve the following problem:
Calculate FNR for given matches and number of positives.
Here is the function:
def _calc_fnr(tp: float, fp: float, fn: float) -> float:
"""Calculate FNR for given matches and number of positives."""
if tp + fn == 0:
return -1
if tp + fp == 0:
return 1
return fn / (tp + fn + np.finfo(float).eps) | Calculate FNR for given matches and number of positives. |
576 | from collections import defaultdict
from typing import Tuple
import numpy as np
import torch
from ignite.metrics import Metric
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.vision.metrics_utils.semantic_segmentation_metric_utils import (format_segmentation_masks,
segmentation_counts_micro,
segmentation_counts_per_class)
def format_segmentation_masks(y_true: np.ndarray, y_pred: np.ndarray, threshold):
"""Bring the ground truth and the prediction masks to the same format (C, W, H) with values 1.0 or 0.0."""
pred_onehot = np.where(y_pred > threshold, 1.0, 0.0)
label_onehot = np.zeros_like(pred_onehot)
for channel in range(pred_onehot.shape[0]):
label_onehot[channel] = y_true == channel
return label_onehot, pred_onehot
def segmentation_counts_micro(y_true_onehot: np.ndarray, y_pred_onehot: np.ndarray) -> Tuple[int, int, int]:
"""Compute the micro averaged ground truth, predicted and intersection areas for segmentation metrics."""
tp_onehot = np.logical_and(y_true_onehot, y_pred_onehot)
return np.sum(tp_onehot), np.sum(y_true_onehot), np.sum(y_pred_onehot)
The provided code snippet includes necessary dependencies for implementing the `per_sample_dice` function. Write a Python function `def per_sample_dice(predictions, labels, threshold: float = 0.5, smooth: float = 1e-3)` to solve the following problem:
Calculate Dice score per sample.
Here is the function:
def per_sample_dice(predictions, labels, threshold: float = 0.5, smooth: float = 1e-3):
"""Calculate Dice score per sample."""
score = np.empty(len(labels))
for i in range(len(labels)):
gt_onehot, pred_onehot = format_segmentation_masks(labels[i], predictions[i], threshold)
tp_count, gt_count, pred_count = segmentation_counts_micro(gt_onehot, pred_onehot)
score[i] = (2 * tp_count + smooth) / (gt_count + pred_count + smooth)
return score.tolist() | Calculate Dice score per sample. |
577 | import typing as t
from copy import copy
from numbers import Number
import numpy as np
import pandas as pd
import torch
from ignite.metrics import Metric
from deepchecks.core import DatasetKind
from deepchecks.core.errors import DeepchecksNotSupportedError, DeepchecksValueError
from deepchecks.utils.metrics import get_scorer_name
from deepchecks.vision.metrics_utils import (CustomClassificationScorer, CustomMetric, ObjectDetectionAveragePrecision,
ObjectDetectionTpFpFn)
from deepchecks.vision.metrics_utils.semantic_segmentation_metrics import MeanDice, MeanIoU
from deepchecks.vision.vision_data import TaskType, VisionData
def get_default_classification_scorers(): # will use sklearn scorers
return {
'Precision': CustomClassificationScorer('precision_per_class'),
'Recall': CustomClassificationScorer('recall_per_class')
}
def get_default_object_detection_scorers() -> t.Dict[str, Metric]:
return {
'Average Precision': detection_dict['average_precision_per_class'](),
'Average Recall': detection_dict['average_recall_per_class']()
}
def get_default_semantic_segmentation_scorers() -> t.Dict[str, Metric]:
return {
'Dice': semantic_segmentation_dict['dice_per_class']()
}
detection_dict = {
'precision_per_class': lambda: ObjectDetectionTpFpFn(evaluating_function='precision', averaging_method='per_class'),
'precision': lambda: ObjectDetectionTpFpFn(evaluating_function='precision', averaging_method='binary'),
'precision_macro': lambda: ObjectDetectionTpFpFn(evaluating_function='precision', averaging_method='macro'),
'precision_micro': lambda: ObjectDetectionTpFpFn(evaluating_function='precision', averaging_method='micro'),
'precision_weighted': lambda: ObjectDetectionTpFpFn(evaluating_function='precision', averaging_method='weighted'),
'recall_per_class': lambda: ObjectDetectionTpFpFn(evaluating_function='recall', averaging_method='per_class'),
'recall': lambda: ObjectDetectionTpFpFn(evaluating_function='recall', averaging_method='binary'),
'recall_macro': lambda: ObjectDetectionTpFpFn(evaluating_function='recall', averaging_method='macro'),
'recall_micro': lambda: ObjectDetectionTpFpFn(evaluating_function='recall', averaging_method='micro'),
'recall_weighted': lambda: ObjectDetectionTpFpFn(evaluating_function='recall', averaging_method='weighted'),
'f1_per_class': lambda: ObjectDetectionTpFpFn(evaluating_function='f1', averaging_method='per_class'),
'f1': lambda: ObjectDetectionTpFpFn(evaluating_function='f1', averaging_method='binary'),
'f1_macro': lambda: ObjectDetectionTpFpFn(evaluating_function='f1', averaging_method='macro'),
'f1_micro': lambda: ObjectDetectionTpFpFn(evaluating_function='f1', averaging_method='micro'),
'f1_weighted': lambda: ObjectDetectionTpFpFn(evaluating_function='f1', averaging_method='weighted'),
'fpr_per_class': lambda: ObjectDetectionTpFpFn(evaluating_function='fpr', averaging_method='per_class'),
'fpr': lambda: ObjectDetectionTpFpFn(evaluating_function='fpr', averaging_method='binary'),
'fpr_macro': lambda: ObjectDetectionTpFpFn(evaluating_function='fpr', averaging_method='macro'),
'fpr_micro': lambda: ObjectDetectionTpFpFn(evaluating_function='fpr', averaging_method='micro'),
'fpr_weighted': lambda: ObjectDetectionTpFpFn(evaluating_function='fpr', averaging_method='weighted'),
'fnr_per_class': lambda: ObjectDetectionTpFpFn(evaluating_function='fnr', averaging_method='per_class'),
'fnr': lambda: ObjectDetectionTpFpFn(evaluating_function='fnr', averaging_method='binary'),
'fnr_macro': lambda: ObjectDetectionTpFpFn(evaluating_function='fnr', averaging_method='macro'),
'fnr_micro': lambda: ObjectDetectionTpFpFn(evaluating_function='fnr', averaging_method='micro'),
'fnr_weighted': lambda: ObjectDetectionTpFpFn(evaluating_function='fnr', averaging_method='weighted'),
'average_precision_per_class': lambda: ObjectDetectionAveragePrecision(return_option='ap'),
'average_precision_macro': lambda: ObjectDetectionAveragePrecision(return_option='ap', average='macro'),
'average_precision_weighted': lambda: ObjectDetectionAveragePrecision(return_option='ap', average='weighted'),
'average_recall_per_class': lambda: ObjectDetectionAveragePrecision(return_option='ar'),
'average_recall_macro': lambda: ObjectDetectionAveragePrecision(return_option='ar', average='macro'),
'average_recall_weighted': lambda: ObjectDetectionAveragePrecision(return_option='ar', average='weighted')
}
semantic_segmentation_dict = {
'dice_per_class': MeanDice,
'dice_macro': lambda: MeanDice(average='macro'),
'dice_micro': lambda: MeanDice(average='micro'),
'iou_per_class': MeanIoU,
'iou_macro': lambda: MeanIoU(average='macro'),
'iou_micro': lambda: MeanIoU(average='micro')
}
class DeepchecksValueError(DeepchecksBaseError):
"""Exception class that represent a fault parameter was passed to Deepchecks."""
pass
class DeepchecksNotSupportedError(DeepchecksBaseError):
"""Exception class that represents an unsupported action in Deepchecks."""
def get_scorer_name(scorer) -> str:
"""Get scorer name from a scorer."""
if isinstance(scorer, str):
return scorer[:scorer.index('_per_class')] if scorer.endswith('_per_class') else scorer
if hasattr(scorer, '__name__'):
return scorer.__name__
if isinstance(scorer, _BaseScorer):
return scorer._score_func.__name__ # pylint: disable=protected-access
return type(scorer).__name__
The provided code snippet includes necessary dependencies for implementing the `get_scorers_dict` function. Write a Python function `def get_scorers_dict( dataset: VisionData, alternative_scorers: t.Union[t.Dict[str, t.Union[Metric, str]], t.List[t.Union[Metric, str]]] = None, ) -> t.Dict[str, Metric]` to solve the following problem:
Get scorers list according to model object and label column. Parameters ---------- dataset : VisionData Dataset object alternative_scorers: Union[Dict[str, Union[Callable, str]], List[Any]] , default: None Scorers to override the default scorers (metrics), find more about the supported formats at https://docs.deepchecks.com/stable/user-guide/general/metrics_guide.html Returns ------- t.Dict[str, Metric] Scorers list
Here is the function:
def get_scorers_dict(
dataset: VisionData,
alternative_scorers: t.Union[t.Dict[str, t.Union[Metric, str]], t.List[t.Union[Metric, str]]] = None,
) -> t.Dict[str, Metric]:
"""Get scorers list according to model object and label column.
Parameters
----------
dataset : VisionData
Dataset object
alternative_scorers: Union[Dict[str, Union[Callable, str]], List[Any]] , default: None
Scorers to override the default scorers (metrics), find more about the supported formats at
https://docs.deepchecks.com/stable/user-guide/general/metrics_guide.html
Returns
-------
t.Dict[str, Metric]
Scorers list
"""
task_type = dataset.task_type
if alternative_scorers:
# For alternative scorers we create a copy since in suites we are running in parallel, so we can't use the same
# instance for several checks.
if isinstance(alternative_scorers, list):
alternative_scorers = {get_scorer_name(scorer): scorer for scorer in alternative_scorers}
scorers = {}
for name, metric in alternative_scorers.items():
# Validate that each alternative scorer is a correct type
if isinstance(metric, (Metric, CustomMetric)):
metric.reset()
scorers[name] = copy(metric)
elif isinstance(metric, str):
metric_name = metric.lower().replace(' ', '_').replace('sensitivity', 'recall')
if task_type == TaskType.OBJECT_DETECTION and metric_name in detection_dict:
converted_met = detection_dict[metric_name]()
elif task_type == TaskType.CLASSIFICATION:
converted_met = CustomClassificationScorer(metric)
elif task_type == TaskType.SEMANTIC_SEGMENTATION:
converted_met = semantic_segmentation_dict[metric_name]()
else:
raise DeepchecksNotSupportedError(
f'Unsupported metric: {name} of type {type(metric).__name__} was given.')
scorers[name] = converted_met
elif isinstance(metric, t.Callable):
if task_type == TaskType.CLASSIFICATION:
scorers[name] = CustomClassificationScorer(metric)
else:
raise DeepchecksNotSupportedError('Custom scikit-learn scorers are only supported for'
' classification.')
else:
raise DeepchecksValueError(
f'Excepted metric type one of [ignite.Metric, callable, str], was {type(metric).__name__}.')
return scorers
elif task_type == TaskType.CLASSIFICATION:
scorers = get_default_classification_scorers()
elif task_type == TaskType.OBJECT_DETECTION:
scorers = get_default_object_detection_scorers()
elif task_type == TaskType.SEMANTIC_SEGMENTATION:
scorers = get_default_semantic_segmentation_scorers()
else:
raise DeepchecksNotSupportedError(f'No scorers match task_type {task_type}')
return scorers | Get scorers list according to model object and label column. Parameters ---------- dataset : VisionData Dataset object alternative_scorers: Union[Dict[str, Union[Callable, str]], List[Any]] , default: None Scorers to override the default scorers (metrics), find more about the supported formats at https://docs.deepchecks.com/stable/user-guide/general/metrics_guide.html Returns ------- t.Dict[str, Metric] Scorers list |
578 | import typing as t
from copy import copy
from numbers import Number
import numpy as np
import pandas as pd
import torch
from ignite.metrics import Metric
from deepchecks.core import DatasetKind
from deepchecks.core.errors import DeepchecksNotSupportedError, DeepchecksValueError
from deepchecks.utils.metrics import get_scorer_name
from deepchecks.vision.metrics_utils import (CustomClassificationScorer, CustomMetric, ObjectDetectionAveragePrecision,
ObjectDetectionTpFpFn)
from deepchecks.vision.metrics_utils.semantic_segmentation_metrics import MeanDice, MeanIoU
from deepchecks.vision.vision_data import TaskType, VisionData
class DeepchecksValueError(DeepchecksBaseError):
"""Exception class that represent a fault parameter was passed to Deepchecks."""
pass
The provided code snippet includes necessary dependencies for implementing the `metric_results_to_df` function. Write a Python function `def metric_results_to_df(results: dict, dataset: VisionData) -> pd.DataFrame` to solve the following problem:
Get dict of metric name to tensor of classes scores, and convert it to dataframe.
Here is the function:
def metric_results_to_df(results: dict, dataset: VisionData) -> pd.DataFrame:
"""Get dict of metric name to tensor of classes scores, and convert it to dataframe."""
result_list = []
for metric, scores in results.items():
if isinstance(scores, Number):
result_list.append([metric, pd.NA, pd.NA, scores])
elif len(scores) == 1:
result_list.append([metric, pd.NA, pd.NA, scores[0]])
elif isinstance(scores, (torch.Tensor, list, np.ndarray, dict)):
# Deepchecks scorers returns classification class scores as dict but object detection as array TODO: unify
scores_iterator = scores.items() if isinstance(scores, dict) else enumerate(scores)
for class_id, class_score in scores_iterator:
class_name = dataset.label_map[class_id]
# The data might contain fewer classes than the model was trained on. filtering out
# any class id which is not presented in the data.
if np.isnan(class_score) or class_name not in dataset.get_observed_classes() or class_score == -1:
continue
result_list.append([metric, class_id, class_name, class_score])
else:
raise DeepchecksValueError(f'The metric {metric} returned a '
f'{type(scores)} instead of an array/tensor')
return pd.DataFrame(result_list, columns=['Metric', 'Class', 'Class Name', 'Value']) | Get dict of metric name to tensor of classes scores, and convert it to dataframe. |
579 | import typing as t
from copy import copy
from numbers import Number
import numpy as np
import pandas as pd
import torch
from ignite.metrics import Metric
from deepchecks.core import DatasetKind
from deepchecks.core.errors import DeepchecksNotSupportedError, DeepchecksValueError
from deepchecks.utils.metrics import get_scorer_name
from deepchecks.vision.metrics_utils import (CustomClassificationScorer, CustomMetric, ObjectDetectionAveragePrecision,
ObjectDetectionTpFpFn)
from deepchecks.vision.metrics_utils.semantic_segmentation_metrics import MeanDice, MeanIoU
from deepchecks.vision.vision_data import TaskType, VisionData
The provided code snippet includes necessary dependencies for implementing the `filter_classes_for_display` function. Write a Python function `def filter_classes_for_display(metrics_df: pd.DataFrame, metric_to_show_by: str, n_to_show: int, show_only: str, column_to_filter_by: str = 'Dataset', column_filter_value: str = None) -> list` to solve the following problem:
Filter the metrics dataframe for display purposes. Parameters ---------- metrics_df : pd.DataFrame Dataframe containing the metrics. n_to_show : int Number of classes to show in the report. show_only : str Specify which classes to show in the report. Can be one of the following: - 'largest': Show the largest classes. - 'smallest': Show the smallest classes. - 'random': Show random classes. - 'best': Show the classes with the highest score. - 'worst': Show the classes with the lowest score. metric_to_show_by : str Specify the metric to sort the results by. Relevant only when show_only is 'best' or 'worst'. column_to_filter_by : str , default: 'Dataset' Specify the name of the column to filter by. column_filter_value : str , default: None Specify the value of the column to filter by, if None will be set to test dataset name. Returns ------- list List of classes to show in the report.
Here is the function:
def filter_classes_for_display(metrics_df: pd.DataFrame,
metric_to_show_by: str,
n_to_show: int,
show_only: str,
column_to_filter_by: str = 'Dataset',
column_filter_value: str = None) -> list:
"""Filter the metrics dataframe for display purposes.
Parameters
----------
metrics_df : pd.DataFrame
Dataframe containing the metrics.
n_to_show : int
Number of classes to show in the report.
show_only : str
Specify which classes to show in the report. Can be one of the following:
- 'largest': Show the largest classes.
- 'smallest': Show the smallest classes.
- 'random': Show random classes.
- 'best': Show the classes with the highest score.
- 'worst': Show the classes with the lowest score.
metric_to_show_by : str
Specify the metric to sort the results by. Relevant only when show_only is 'best' or 'worst'.
column_to_filter_by : str , default: 'Dataset'
Specify the name of the column to filter by.
column_filter_value : str , default: None
Specify the value of the column to filter by, if None will be set to test dataset name.
Returns
-------
list
List of classes to show in the report.
"""
# working on the test dataset on default
if column_filter_value is None:
column_filter_value = DatasetKind.TEST.value
tests_metrics_df = metrics_df[(metrics_df[column_to_filter_by] == column_filter_value) &
(metrics_df['Metric'] == metric_to_show_by)]
if show_only == 'largest':
tests_metrics_df = tests_metrics_df.sort_values(by=['Number of samples', 'Value', 'Class'], ascending=False)
elif show_only == 'smallest':
tests_metrics_df = tests_metrics_df.sort_values(by=['Number of samples', 'Value', 'Class'], ascending=True)
elif show_only == 'random':
tests_metrics_df = tests_metrics_df.sample(frac=1)
elif show_only == 'best':
tests_metrics_df = tests_metrics_df.sort_values(by=['Value', 'Number of samples'], ascending=False)
elif show_only == 'worst':
tests_metrics_df = tests_metrics_df.sort_values(by=['Value', 'Number of samples'], ascending=True)
else:
raise ValueError(f'Unknown show_only value: {show_only}')
return tests_metrics_df.head(n_to_show)['Class'].to_list() | Filter the metrics dataframe for display purposes. Parameters ---------- metrics_df : pd.DataFrame Dataframe containing the metrics. n_to_show : int Number of classes to show in the report. show_only : str Specify which classes to show in the report. Can be one of the following: - 'largest': Show the largest classes. - 'smallest': Show the smallest classes. - 'random': Show random classes. - 'best': Show the classes with the highest score. - 'worst': Show the classes with the lowest score. metric_to_show_by : str Specify the metric to sort the results by. Relevant only when show_only is 'best' or 'worst'. column_to_filter_by : str , default: 'Dataset' Specify the name of the column to filter by. column_filter_value : str , default: None Specify the value of the column to filter by, if None will be set to test dataset name. Returns ------- list List of classes to show in the report. |
580 | import typing as t
from collections import defaultdict
from queue import PriorityQueue
import numpy as np
import pandas as pd
from deepchecks.core import CheckResult, DatasetKind
from deepchecks.utils.abstracts.confusion_matrix_abstract import create_confusion_matrix_figure
from deepchecks.vision._shared_docs import docstrings
from deepchecks.vision.base_checks import SingleDatasetCheck
from deepchecks.vision.context import Context
from deepchecks.vision.metrics_utils.iou_utils import jaccard_iou
from deepchecks.vision.vision_data import TaskType
from deepchecks.vision.vision_data.batch_wrapper import BatchWrapper
def filter_confusion_matrix(confusion_matrix: pd.DataFrame, number_of_categories: int) -> \
t.Tuple[np.ndarray, int]:
pq = PriorityQueue()
for row, values in enumerate(confusion_matrix):
for col, value in enumerate(values):
if row != col:
pq.put((-value, (row, col)))
categories = set()
while not pq.empty():
if len(categories) >= number_of_categories:
break
_, (row, col) = pq.get()
categories.add(row)
categories.add(col)
categories = sorted(categories)
return confusion_matrix[np.ix_(categories, categories)], categories | null |
581 | from typing import Any, Callable, Dict, Hashable, List, Optional, Union
import numpy as np
import pandas as pd
import plotly.express as px
from deepchecks.core import CheckResult, ConditionCategory, ConditionResult, DatasetKind
from deepchecks.core.errors import DeepchecksValueError
from deepchecks.utils import plot
from deepchecks.utils.metrics import get_gain
from deepchecks.utils.strings import format_percent
from deepchecks.vision._shared_docs import docstrings
from deepchecks.vision.base_checks import TrainTestCheck
from deepchecks.vision.context import Context
from deepchecks.vision.metrics_utils import CustomClassificationScorer, get_scorers_dict, metric_results_to_df
from deepchecks.vision.metrics_utils.scorers import filter_classes_for_display
from deepchecks.vision.vision_data import TaskType, VisionData
from deepchecks.vision.vision_data.batch_wrapper import BatchWrapper
def average_scores(scores, simple_model_scores, include_classes):
"""
Calculate the average of the scores for each metric for all classes.
Parameters
----------
scores : pd.DataFrame
the scores for the given model
simple_model_scores : pd.DataFrame
the scores for the simple model
include_classes : List[Hashable]
the classes to include in the calculation
Returns
-------
Dictionary[str, Dictionary[str, float]]
the average scores for each metric. The keys are the metric names, and the values are a dictionary
with the keys being Origin and Simple and the values being the average score.
"""
result = {}
metrics = scores['Metric'].unique()
for metric in metrics:
model_score = 0
simple_score = 0
total = 0
for _, row in scores.loc[scores['Metric'] == metric].iterrows():
if include_classes and row['Class'] not in include_classes:
continue
model_score += row['Value']
simple_score += simple_model_scores.loc[(simple_model_scores['Class'] == row['Class']) &
(simple_model_scores['Metric'] == metric)]['Value'].values[0]
total += 1
result[metric] = {
'Origin': model_score / total,
'Simple': simple_score / total
}
return result
def get_gain(base_score, score, perfect_score, max_gain):
"""Get gain between base score and score compared to the distance from the perfect score."""
distance_from_perfect = perfect_score - base_score
scores_diff = score - base_score
if distance_from_perfect == 0:
# If both base score and score are perfect, return 0 gain
if scores_diff == 0:
return 0
# else base_score is better than score, return -max_gain
return -max_gain
ratio = scores_diff / distance_from_perfect
if ratio < -max_gain:
return -max_gain
if ratio > max_gain:
return max_gain
return ratio
def format_percent(ratio: float, floating_point: int = 2, scientific_notation_threshold: int = 4,
add_positive_prefix: bool = False) -> str:
"""Format percent for elegant display.
Parameters
----------
ratio : float
Ratio to be displayed as percent
floating_point: int , default: 2
Number of floating points to display
scientific_notation_threshold: int, default: 4
Max number of floating points for which to show number as float. If number of floating points is larger than
this parameter, scientific notation (e.g. "10E-5%") will be shown.
add_positive_prefix: bool, default: False
add plus sign before positive percentages (minus sign is always added for negative percentages).
Returns
-------
str
String of ratio as percent
"""
result: str
if ratio < 0:
ratio = -ratio
prefix = '-'
else:
prefix = '+' if add_positive_prefix and ratio != 0 else ''
if int(ratio) == ratio:
result = f'{int(ratio) * 100}%'
elif ratio > 1:
result = truncate_zero_percent(ratio, floating_point)
elif ratio < 10**(-(2+floating_point)):
if ratio > 10**(-(2+scientific_notation_threshold)):
result = truncate_zero_percent(ratio, scientific_notation_threshold)
else:
result = f'{Decimal(ratio * 100):.{floating_point}E}%'
elif ratio > (1-10**(-(2+floating_point))):
if floating_point > 0:
result = f'99.{"".join(["9"]*floating_point)}%'
else:
result = '99%'
else:
result = truncate_zero_percent(ratio, floating_point)
return prefix + result
def calculate_condition_logic(result, include_classes=None, average=False, max_gain=None,
min_allowed_gain=None) -> ConditionResult:
scores = result.loc[result['Model'] == 'Given Model']
perfect_scores = result.loc[result['Model'] == 'Perfect Model']
simple_scores = result.loc[result['Model'] == 'Simple Model']
metrics = scores['Metric'].unique()
# Save min gain info to print when condition pass
min_gain = (np.inf, '')
def update_min_gain(gain, metric, class_name=None):
nonlocal min_gain
if gain < min_gain[0]:
message = f'Found minimal gain of {format_percent(gain)} for metric {metric}'
if class_name:
message += f' and class {class_name}'
min_gain = gain, message
fails = {}
if not average:
for metric in metrics:
failed_classes = {}
for _, scores_row in scores.loc[scores['Metric'] == metric].iterrows():
curr_class = scores_row['Class']
curr_class_name = scores_row['Class Name']
curr_value = scores_row['Value']
if include_classes and curr_class not in include_classes:
continue
perfect = perfect_scores.loc[(perfect_scores['Metric'] == metric) &
(perfect_scores['Class'] == curr_class)]['Value'].values[0]
if curr_value == perfect:
continue
simple_score_value = simple_scores.loc[(simple_scores['Class'] == curr_class) &
(simple_scores['Metric'] == metric)]['Value'].values[0]
gain = get_gain(simple_score_value,
curr_value,
perfect,
max_gain)
update_min_gain(gain, metric, curr_class_name)
if gain <= min_allowed_gain:
failed_classes[curr_class_name] = format_percent(gain)
if failed_classes:
fails[metric] = failed_classes
else:
scores = average_scores(scores, simple_scores, include_classes)
for metric, models_scores in scores.items():
metric_perfect_score = perfect_scores.loc[(perfect_scores['Metric'] == metric)]['Value'].values[0]
# If origin model is perfect, skip the gain calculation
if models_scores['Origin'] == metric_perfect_score:
continue
gain = get_gain(models_scores['Simple'],
models_scores['Origin'],
metric_perfect_score,
max_gain)
update_min_gain(gain, metric)
if gain <= min_allowed_gain:
fails[metric] = format_percent(gain)
if fails:
msg = f'Found metrics with gain below threshold: {fails}'
return ConditionResult(ConditionCategory.FAIL, msg)
else:
return ConditionResult(ConditionCategory.PASS, min_gain[1]) | null |
582 | import string
import typing as t
import warnings
from abc import abstractmethod
from collections import defaultdict
from numbers import Number
from secrets import choice
import numpy as np
import pandas as pd
from deepchecks.core import CheckResult, DatasetKind
from deepchecks.core.errors import DeepchecksProcessError, NotEnoughSamplesError
from deepchecks.utils.dataframes import hide_index_for_display
from deepchecks.utils.outliers import iqr_outliers_range
from deepchecks.utils.strings import format_number
from deepchecks.vision.base_checks import SingleDatasetCheck
from deepchecks.vision.context import Context
from deepchecks.vision.utils.image_functions import draw_image
from deepchecks.vision.utils.vision_properties import PropertiesInputType
from deepchecks.vision.vision_data import TaskType, VisionData
from deepchecks.vision.vision_data.batch_wrapper import BatchWrapper
def _is_list_of_numbers(l):
return not any(i is not None and not isinstance(i, Number) for i in l)
class DeepchecksProcessError(DeepchecksBaseError):
"""Exception class that represents an issue with a process."""
pass
The provided code snippet includes necessary dependencies for implementing the `_ensure_property_shape` function. Write a Python function `def _ensure_property_shape(property_values, data_len, prop_name)` to solve the following problem:
Validate the result of the property.
Here is the function:
def _ensure_property_shape(property_values, data_len, prop_name):
"""Validate the result of the property."""
if len(property_values) != data_len:
raise DeepchecksProcessError(f'Properties are expected to return value per image but instead got'
f' {len(property_values)} values for {data_len} images for property '
f'{prop_name}')
# If the first item is list validate all items are list of numbers
if isinstance(property_values[0], t.Sequence):
if any((not isinstance(x, t.Sequence) for x in property_values)):
raise DeepchecksProcessError(f'Property result is expected to be either all lists or all scalars but'
f' got mix for property {prop_name}')
if any((not _is_list_of_numbers(x) for x in property_values)):
raise DeepchecksProcessError(f'For outliers, properties are expected to be only numeric types but'
f' found non-numeric value for property {prop_name}')
# If first value is not list, validate all items are numeric
elif not _is_list_of_numbers(property_values):
raise DeepchecksProcessError(f'For outliers, properties are expected to be only numeric types but'
f' found non-numeric value for property {prop_name}') | Validate the result of the property. |
583 | import string
import typing as t
import warnings
from abc import abstractmethod
from collections import defaultdict
from numbers import Number
from secrets import choice
import numpy as np
import pandas as pd
from deepchecks.core import CheckResult, DatasetKind
from deepchecks.core.errors import DeepchecksProcessError, NotEnoughSamplesError
from deepchecks.utils.dataframes import hide_index_for_display
from deepchecks.utils.outliers import iqr_outliers_range
from deepchecks.utils.strings import format_number
from deepchecks.vision.base_checks import SingleDatasetCheck
from deepchecks.vision.context import Context
from deepchecks.vision.utils.image_functions import draw_image
from deepchecks.vision.utils.vision_properties import PropertiesInputType
from deepchecks.vision.vision_data import TaskType, VisionData
from deepchecks.vision.vision_data.batch_wrapper import BatchWrapper
def _sample_index_from_flatten_index(cumsum_lengths, flatten_index) -> int:
# The cumulative sum lengths is holding the cumulative sum of properties per image, so the first index which value
# is greater than the flatten index, is the image index.
# for example if the sums lengths is [1, 6, 11, 13, 16, 20] and the flatten index = 6, it means this property
# belong to the third image which is index = 2.
return np.argwhere(cumsum_lengths > flatten_index)[0][0] | null |
584 | import random
import sys
import typing as t
from collections import Counter
from enum import Enum
from numbers import Number
import numpy as np
from typing_extensions import NotRequired, TypedDict
from deepchecks.core.errors import DatasetValidationError
from deepchecks.utils.logger import get_logger
def is_torch_object(data_object) -> bool:
"""Check if data_object is a torch object without failing if torch isn't installed."""
return 'torch' in str(type(data_object))
def is_tensorflow_object(data_object) -> bool:
"""Check if data_object is a tensorflow object without failing if tensorflow isn't installed."""
return 'tensorflow' in str(type(data_object))
def get_logger() -> logging.Logger:
"""Retutn the deepchecks logger."""
return _logger
def get_data_loader_sequential(data_loader: DataLoader, shuffle: bool = False, n_samples=None) -> DataLoader:
"""Create new DataLoader with sampler of type IndicesSequentialSampler. This makes the data loader have \
consistent batches order."""
# First set generator seed to make it reproducible
if data_loader.generator:
data_loader.generator.set_state(torch.Generator().manual_seed(42).get_state())
indices = []
batch_sampler = data_loader.batch_sampler
# Using the batch sampler to get all indices
for batch in batch_sampler:
indices += batch
if shuffle:
indices = random.sample(indices, len(indices))
if n_samples is not None:
indices = indices[:n_samples]
# Create new sampler and batch sampler
sampler = IndicesSequentialSampler(indices)
new_batch_sampler = BatchSampler(sampler, batch_sampler.batch_size, batch_sampler.drop_last)
props = _get_data_loader_props(data_loader)
props['batch_sampler'] = new_batch_sampler
_collisions_removal_dataloader_props(props)
return data_loader.__class__(**props)
The provided code snippet includes necessary dependencies for implementing the `shuffle_loader` function. Write a Python function `def shuffle_loader(batch_loader)` to solve the following problem:
Reshuffle the batch loader.
Here is the function:
def shuffle_loader(batch_loader):
"""Reshuffle the batch loader."""
if is_torch_object(batch_loader) and 'DataLoader' in str(type(batch_loader)):
from deepchecks.vision.utils.test_utils import \
get_data_loader_sequential # pylint: disable=import-outside-toplevel
try:
_ = len(batch_loader)
return get_data_loader_sequential(data_loader=batch_loader, shuffle=True)
except Exception: # pylint: disable=broad-except
pass
elif is_tensorflow_object(batch_loader) and 'Dataset' in str(type(batch_loader)):
get_logger().warning('Shuffling for tensorflow datasets is not supported. Make sure that the data used to '
'create the Dataset was shuffled beforehand and set shuffle_batch_loader=False')
return batch_loader
get_logger().warning('Shuffling is not supported for received batch loader. Make sure that your provided '
'batch loader is indeed shuffled and set shuffle_batch_loader=False')
return batch_loader | Reshuffle the batch loader. |
585 | import random
import sys
import typing as t
from collections import Counter
from enum import Enum
from numbers import Number
import numpy as np
from typing_extensions import NotRequired, TypedDict
from deepchecks.core.errors import DatasetValidationError
from deepchecks.utils.logger import get_logger
class TaskType(Enum):
"""Enum containing supported task types."""
CLASSIFICATION = 'classification'
OBJECT_DETECTION = 'object_detection'
SEMANTIC_SEGMENTATION = 'semantic_segmentation'
OTHER = 'other'
def values(cls):
"""Return all values of the enum."""
return [e.value for e in TaskType]
The provided code snippet includes necessary dependencies for implementing the `get_class_ids_from_numpy_labels` function. Write a Python function `def get_class_ids_from_numpy_labels(labels: t.Sequence[t.Union[np.ndarray, int]], task_type: TaskType) -> t.Dict[int, int]` to solve the following problem:
Return the number of images containing each class_id. Returns ------- Dict[int, int] A dictionary mapping each class_id to the number of images containing it.
Here is the function:
def get_class_ids_from_numpy_labels(labels: t.Sequence[t.Union[np.ndarray, int]], task_type: TaskType) \
-> t.Dict[int, int]:
"""Return the number of images containing each class_id.
Returns
-------
Dict[int, int]
A dictionary mapping each class_id to the number of images containing it.
"""
if task_type == TaskType.CLASSIFICATION:
return Counter(labels)
elif task_type == TaskType.OBJECT_DETECTION:
class_ids_per_image = [label[:, 0] for label in labels if label is not None and len(label.shape) == 2]
return Counter(np.hstack(class_ids_per_image)) if len(class_ids_per_image) > 0 else {}
elif task_type == TaskType.SEMANTIC_SEGMENTATION:
labels_per_image = [np.unique(label) for label in labels if label is not None]
return Counter(np.hstack(labels_per_image))
else:
raise ValueError(f'Unsupported task type: {task_type}') | Return the number of images containing each class_id. Returns ------- Dict[int, int] A dictionary mapping each class_id to the number of images containing it. |
586 | import random
import sys
import typing as t
from collections import Counter
from enum import Enum
from numbers import Number
import numpy as np
from typing_extensions import NotRequired, TypedDict
from deepchecks.core.errors import DatasetValidationError
from deepchecks.utils.logger import get_logger
class TaskType(Enum):
"""Enum containing supported task types."""
CLASSIFICATION = 'classification'
OBJECT_DETECTION = 'object_detection'
SEMANTIC_SEGMENTATION = 'semantic_segmentation'
OTHER = 'other'
def values(cls):
"""Return all values of the enum."""
return [e.value for e in TaskType]
The provided code snippet includes necessary dependencies for implementing the `get_class_ids_from_numpy_preds` function. Write a Python function `def get_class_ids_from_numpy_preds(predictions: t.Sequence[t.Union[np.ndarray]], task_type: TaskType) -> t.Dict[int, int]` to solve the following problem:
Return the number of images containing each class_id. Returns ------- Dict[int, int] A dictionary mapping each class_id to the number of images containing it.
Here is the function:
def get_class_ids_from_numpy_preds(predictions: t.Sequence[t.Union[np.ndarray]], task_type: TaskType) \
-> t.Dict[int, int]:
"""Return the number of images containing each class_id.
Returns
-------
Dict[int, int]
A dictionary mapping each class_id to the number of images containing it.
"""
if task_type == TaskType.CLASSIFICATION:
return Counter([np.argmax(x) for x in predictions])
elif task_type == TaskType.OBJECT_DETECTION:
class_ids_per_image = [pred[:, 5] for pred in predictions if pred is not None and len(pred.shape) == 2]
return Counter(np.hstack(class_ids_per_image))
elif task_type == TaskType.SEMANTIC_SEGMENTATION:
classes_predicted_per_image = \
[np.unique(np.argmax(pred, axis=0)) for pred in predictions if pred is not None]
return Counter(np.hstack(classes_predicted_per_image))
else:
raise ValueError(f'Unsupported task type: {task_type}') | Return the number of images containing each class_id. Returns ------- Dict[int, int] A dictionary mapping each class_id to the number of images containing it. |
587 | import random
import sys
import typing as t
from collections import Counter
from enum import Enum
from numbers import Number
import numpy as np
from typing_extensions import NotRequired, TypedDict
from deepchecks.core.errors import DatasetValidationError
from deepchecks.utils.logger import get_logger
The provided code snippet includes necessary dependencies for implementing the `set_seeds` function. Write a Python function `def set_seeds(seed: int)` to solve the following problem:
Set seeds for reproducibility. Parameters ---------- seed : int Seed to be set
Here is the function:
def set_seeds(seed: int):
"""Set seeds for reproducibility.
Parameters
----------
seed : int
Seed to be set
"""
if seed is not None and isinstance(seed, int):
np.random.seed(seed)
random.seed(seed)
if 'torch' in sys.modules:
import torch # pylint: disable=import-outside-toplevel
torch.manual_seed(seed)
if 'tensorflow' in sys.modules:
import tensorflow as tf # pylint: disable=import-outside-toplevel
tf.random.set_seed(seed) | Set seeds for reproducibility. Parameters ---------- seed : int Seed to be set |
588 | import random
import sys
import typing as t
from collections import Counter
from enum import Enum
from numbers import Number
import numpy as np
from typing_extensions import NotRequired, TypedDict
from deepchecks.core.errors import DatasetValidationError
from deepchecks.utils.logger import get_logger
class DatasetValidationError(DeepchecksBaseError):
"""Represents unappropriate Dataset instance.
Should be used in a situation when a routine (like check instance, utility function, etc)
expected and received a dataset instance that did not meet routine requirements.
"""
pass
The provided code snippet includes necessary dependencies for implementing the `validate_vision_data_compatibility` function. Write a Python function `def validate_vision_data_compatibility(first, second) -> None` to solve the following problem:
Validate that two vision datasets are compatible. Raises: DeepchecksValueError: if the datasets are not compatible
Here is the function:
def validate_vision_data_compatibility(first, second) -> None:
"""Validate that two vision datasets are compatible.
Raises:
DeepchecksValueError: if the datasets are not compatible
"""
# TODO: add more validations
if first.task_type != second.task_type:
raise DatasetValidationError('Cannot compare datasets with different task types: '
f'{first.task_type.value} and {second.task_type.value}') | Validate that two vision datasets are compatible. Raises: DeepchecksValueError: if the datasets are not compatible |
589 | import typing as t
from pathlib import Path
import cv2
import numpy as np
import torch
from torch.utils.data import DataLoader
from typing_extensions import Literal
from deepchecks.vision.vision_data import VisionData
class SimpleClassificationDataset(VisionDataset):
"""Simple VisionDataset type for the classification tasks.
The current class expects that data within the root folder
will be structured the following way:
- root/
- class1/
image1.jpeg
Otherwise, exception will be raised.
Parameters
----------
root : str
Path to the root directory of the dataset.
transforms : Callable, optional
A function/transform that takes in an PIL image and returns a transformed version.
E.g, transforms.RandomCrop
transform : Callable, optional
A function/transforms that takes in an image and a label and returns the
transformed versions of both.
E.g, ``transforms.Rotate``
target_transform : Callable, optional
A function/transform that takes in the target and transforms it.
image_extension : str, default 'jpg'
images format
"""
def __init__(
self,
root: str,
transforms: t.Optional[t.Callable] = None,
transform: t.Optional[t.Callable] = None,
target_transform: t.Optional[t.Callable] = None,
image_extension: str = 'jpg'
) -> None:
self.root_path = Path(root).absolute()
if not (self.root_path.exists() and self.root_path.is_dir()):
raise ValueError(f'{self.root_path} - path does not exist or is not a folder')
super().__init__(str(self.root_path), transforms, transform, target_transform)
self.image_extension = image_extension.lower()
self.images = sorted(self.root_path.glob(f'*/*.{self.image_extension}'))
if len(self.images) == 0:
raise ValueError(f'{self.root_path} - is empty or has incorrect structure')
classes = {img.parent.name for img in self.images}
classes = sorted(list(classes))
# class label -> class index
self.classes_map = t.cast(t.Dict[str, int], dict(zip(classes, range(len(classes)))))
# class index -> class label
self.reverse_classes_map = t.cast(t.Dict[int, str], {
v: k
for k, v in self.classes_map.items()
})
def __getitem__(self, index: int) -> t.Tuple[np.ndarray, int]:
"""Get the image and label at the given index."""
image_file = self.images[index]
image = cv2.imread(str(image_file))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
target = self.classes_map[image_file.parent.name]
if self.transforms is not None:
transformed = self.transforms(image=image, target=target)
image, target = transformed['image'], transformed['target']
else:
if self.transform is not None:
image = self.transform(image=image)['image']
if self.target_transform is not None:
target = self.target_transform(target)
return image, target
def __len__(self) -> int:
"""Return the number of images in the dataset."""
return len(self.images)
def deepchecks_collate(batch):
"""Process batch to deepchecks format."""
imgs, labels = zip(*batch)
return {'images': list(imgs), 'labels': list(labels)}
The provided code snippet includes necessary dependencies for implementing the `classification_dataset_from_directory` function. Write a Python function `def classification_dataset_from_directory( root: str, batch_size: int = 32, num_workers: int = 0, shuffle: bool = True, pin_memory: bool = True, object_type: Literal[VisionData, DataLoader] = 'DataLoader', **kwargs ) -> t.Union[t.Tuple[t.Union[DataLoader, VisionData]], t.Union[DataLoader, VisionData]]` to solve the following problem:
Load a simple classification dataset. The function expects that the data within the root folder to be structured one of the following ways: - root/ - class1/ image1.jpeg - root/ - train/ - class1/ image1.jpeg - test/ - class1/ image1.jpeg Parameters ---------- root : str path to the data batch_size : int, default: 32 Batch size for the dataloader. num_workers : int, default: 0 Number of workers for the dataloader. shuffle : bool, default: True Whether to shuffle the dataset. pin_memory : bool, default: True If ``True``, the data loader will copy Tensors into CUDA pinned memory before returning them. object_type : Literal['Dataset', 'DataLoader'], default: 'DataLoader' type of the return value. If 'Dataset', :obj:`deepchecks.vision.VisionData` will be returned, otherwise :obj:`torch.utils.data.DataLoader` Returns ------- t.Union[t.Tuple[t.Union[DataLoader, vision.ClassificationData]], t.Union[DataLoader, vision.ClassificationData]] A DataLoader or VisionDataset instance or tuple representing a single dataset or train and test datasets.
Here is the function:
def classification_dataset_from_directory(
root: str,
batch_size: int = 32,
num_workers: int = 0,
shuffle: bool = True,
pin_memory: bool = True,
object_type: Literal[VisionData, DataLoader] = 'DataLoader',
**kwargs
) -> t.Union[t.Tuple[t.Union[DataLoader, VisionData]], t.Union[DataLoader, VisionData]]:
"""Load a simple classification dataset.
The function expects that the data within the root folder
to be structured one of the following ways:
- root/
- class1/
image1.jpeg
- root/
- train/
- class1/
image1.jpeg
- test/
- class1/
image1.jpeg
Parameters
----------
root : str
path to the data
batch_size : int, default: 32
Batch size for the dataloader.
num_workers : int, default: 0
Number of workers for the dataloader.
shuffle : bool, default: True
Whether to shuffle the dataset.
pin_memory : bool, default: True
If ``True``, the data loader will copy Tensors
into CUDA pinned memory before returning them.
object_type : Literal['Dataset', 'DataLoader'], default: 'DataLoader'
type of the return value. If 'Dataset', :obj:`deepchecks.vision.VisionData`
will be returned, otherwise :obj:`torch.utils.data.DataLoader`
Returns
-------
t.Union[t.Tuple[t.Union[DataLoader, vision.ClassificationData]], t.Union[DataLoader, vision.ClassificationData]]
A DataLoader or VisionDataset instance or tuple representing a single dataset or train and test datasets.
"""
def batch_collate(batch):
imgs, labels = zip(*batch)
return list(imgs), list(labels)
root_path = Path(root).absolute()
if not (root_path.exists() and root_path.is_dir()):
raise ValueError(f'{root_path} - path does not exist or is not a folder')
roots_of_datasets = []
if root_path.joinpath('train').exists():
roots_of_datasets.append(root_path.joinpath('train'))
if root_path.joinpath('test').exists():
roots_of_datasets.append(root_path.joinpath('test'))
if len(roots_of_datasets) == 0:
roots_of_datasets.append(root_path)
result = []
for dataset_root in roots_of_datasets:
dataset = SimpleClassificationDataset(root=str(dataset_root), **kwargs)
if object_type == 'DataLoader':
dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers,
collate_fn=batch_collate, pin_memory=pin_memory, generator=torch.Generator())
result.append(dataloader)
elif object_type == 'VisionData':
dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers,
collate_fn=deepchecks_collate, pin_memory=pin_memory, generator=torch.Generator())
result.append(VisionData(batch_loader=dataloader, label_map=dataset.reverse_classes_map,
task_type='classification'))
else:
raise TypeError(f'Unknown value of object_type - {object_type}')
return tuple(result) if len(result) > 1 else result[0] | Load a simple classification dataset. The function expects that the data within the root folder to be structured one of the following ways: - root/ - class1/ image1.jpeg - root/ - train/ - class1/ image1.jpeg - test/ - class1/ image1.jpeg Parameters ---------- root : str path to the data batch_size : int, default: 32 Batch size for the dataloader. num_workers : int, default: 0 Number of workers for the dataloader. shuffle : bool, default: True Whether to shuffle the dataset. pin_memory : bool, default: True If ``True``, the data loader will copy Tensors into CUDA pinned memory before returning them. object_type : Literal['Dataset', 'DataLoader'], default: 'DataLoader' type of the return value. If 'Dataset', :obj:`deepchecks.vision.VisionData` will be returned, otherwise :obj:`torch.utils.data.DataLoader` Returns ------- t.Union[t.Tuple[t.Union[DataLoader, vision.ClassificationData]], t.Union[DataLoader, vision.ClassificationData]] A DataLoader or VisionDataset instance or tuple representing a single dataset or train and test datasets. |
590 | from numbers import Number
from typing import Iterable
import numpy as np
from deepchecks.core.errors import ValidationError
from deepchecks.vision.vision_data import TaskType
from deepchecks.vision.vision_data.utils import object_to_numpy, sequence_to_numpy
class ValidationError(DeepchecksBaseError):
"""Represents more specific case of the ValueError (DeepchecksValueError)."""
pass
def object_to_numpy(data, expected_dtype=None, expected_ndim=None) -> t.Union[np.ndarray, Number, str]:
"""Convert an object to a numpy object.
Returns
-------
t.Union[np.ndarray, Number, str]
A numpy object or a single object (number/str) for provided data.
"""
if data is None:
return None
if is_torch_object(data):
result = data.cpu().detach().numpy()
elif is_tensorflow_object(data):
result = data.cpu().numpy()
elif isinstance(data, np.ndarray):
result = data
elif isinstance(data, (Number, str)):
return data
else:
result = np.array(data)
if expected_dtype is not None:
result = result.astype(expected_dtype)
if len(result.shape) == 0:
result = result.item()
elif len(result.shape) == 1 and result.shape[0] > 0 and expected_ndim == 2:
result = result.reshape(1, result.shape[0])
return result
The provided code snippet includes necessary dependencies for implementing the `validate_images_format` function. Write a Python function `def validate_images_format(images)` to solve the following problem:
Validate that the data is in the required format. Parameters ---------- images The images of the batch. Result of VisionData's batch_to_images Raises ------ DeepchecksValueError If the images doesn't fit the required deepchecks format.
Here is the function:
def validate_images_format(images):
"""Validate that the data is in the required format.
Parameters
----------
images
The images of the batch. Result of VisionData's batch_to_images
Raises
------
DeepchecksValueError
If the images doesn't fit the required deepchecks format.
"""
try:
image = object_to_numpy(images[0], expected_ndim=3)
except TypeError as err:
raise ValidationError(f'The batch images must be an iterable, received {type(images)}.') from err
try:
if len(image.shape) != 3:
raise ValidationError('The image inside the iterable must be a 3D array.')
except Exception as err:
raise ValidationError('The image inside the iterable must be a 3D array.') from err
if image.shape[2] not in [1, 3]:
raise ValidationError('The image inside the iterable must have 1 or 3 channels.')
sample_min = np.min(image)
sample_max = np.max(image)
if sample_min < 0 or sample_max > 255 or sample_max <= 1:
raise ValidationError(f'Image data should be in uint8 format(integers between 0 and 255), '
f'found values in range [{sample_min}, {sample_max}].') | Validate that the data is in the required format. Parameters ---------- images The images of the batch. Result of VisionData's batch_to_images Raises ------ DeepchecksValueError If the images doesn't fit the required deepchecks format. |
591 | from numbers import Number
from typing import Iterable
import numpy as np
from deepchecks.core.errors import ValidationError
from deepchecks.vision.vision_data import TaskType
from deepchecks.vision.vision_data.utils import object_to_numpy, sequence_to_numpy
def _validate_predictions_label_common_format(name, data, task_type: TaskType):
"""Validate that the data is in the required format and returns a non-empty sample in numpy format."""
name_plural = name + 's'
if task_type == TaskType.OTHER:
return None
try:
_ = data[0]
data = sequence_to_numpy(data)
except (IndexError, KeyError, TypeError) as err:
raise ValidationError(f'The batch {name_plural} must be a non empty iterable.') from err
sample_idx = 0
while data[sample_idx] is None or (isinstance(data[sample_idx], np.ndarray) and data[sample_idx].shape[0] == 0):
sample_idx += 1
if sample_idx == len(data):
return None # No data to validate
if task_type == TaskType.CLASSIFICATION:
return data[sample_idx] # for classification, the data is a single number no need to validate shape
try:
sample_shape = data[sample_idx].shape
except AttributeError as err:
raise ValidationError(f'{name} for {task_type.value} per image must be a multi dimensional array.') from err
if task_type == TaskType.OBJECT_DETECTION and len(sample_shape) not in (1, 2):
raise ValidationError(f'{name} for object detection per image must be a 2D array. Found shape {sample_shape}')
return data[sample_idx]
class ValidationError(DeepchecksBaseError):
"""Represents more specific case of the ValueError (DeepchecksValueError)."""
pass
The provided code snippet includes necessary dependencies for implementing the `validate_labels_format` function. Write a Python function `def validate_labels_format(labels, task_type: TaskType)` to solve the following problem:
Validate that the labels are in the required format based on task_type. Parameters ---------- labels The labels of the batch. Result of VisionData's batch_to_labels task_type: TaskType The task type of the model Raises ------ DeepchecksValueError If the labels doesn't fit the required deepchecks format.
Here is the function:
def validate_labels_format(labels, task_type: TaskType):
"""Validate that the labels are in the required format based on task_type.
Parameters
----------
labels
The labels of the batch. Result of VisionData's batch_to_labels
task_type: TaskType
The task type of the model
Raises
------
DeepchecksValueError
If the labels doesn't fit the required deepchecks format.
"""
single_image_label = _validate_predictions_label_common_format('label', labels, task_type)
if task_type == TaskType.CLASSIFICATION and single_image_label is not None:
if not isinstance(single_image_label, Number):
raise ValidationError('Classification label per image must be a number.')
elif task_type == TaskType.OBJECT_DETECTION and single_image_label is not None:
error_msg = 'Object detection label per image must be a sequence of 2D arrays, where each row ' \
'has 5 columns: [class_id, x_min, y_min, width, height]'
if single_image_label.shape[1] != 5:
raise ValidationError(f'{error_msg}')
if np.min(single_image_label) < 0:
raise ValidationError(f'Found one of coordinates to be negative, {error_msg}')
if np.max(single_image_label[:, 0] % 1) > 0:
raise ValidationError(f'Class_id must be a positive integer. {error_msg}')
elif task_type == TaskType.SEMANTIC_SEGMENTATION and single_image_label is not None:
if len(single_image_label.shape) != 2:
raise ValidationError('Semantic segmentation label per image must be a 2D array of shape (H, W),'
'where H and W are the height and width of the corresponding image.')
if np.max(single_image_label % 1) > 0:
raise ValidationError('In semantic segmentation, each pixel in label should represent a '
'class_id and there must be a positive integer.') | Validate that the labels are in the required format based on task_type. Parameters ---------- labels The labels of the batch. Result of VisionData's batch_to_labels task_type: TaskType The task type of the model Raises ------ DeepchecksValueError If the labels doesn't fit the required deepchecks format. |
592 | from numbers import Number
from typing import Iterable
import numpy as np
from deepchecks.core.errors import ValidationError
from deepchecks.vision.vision_data import TaskType
from deepchecks.vision.vision_data.utils import object_to_numpy, sequence_to_numpy
def _validate_predictions_label_common_format(name, data, task_type: TaskType):
"""Validate that the data is in the required format and returns a non-empty sample in numpy format."""
name_plural = name + 's'
if task_type == TaskType.OTHER:
return None
try:
_ = data[0]
data = sequence_to_numpy(data)
except (IndexError, KeyError, TypeError) as err:
raise ValidationError(f'The batch {name_plural} must be a non empty iterable.') from err
sample_idx = 0
while data[sample_idx] is None or (isinstance(data[sample_idx], np.ndarray) and data[sample_idx].shape[0] == 0):
sample_idx += 1
if sample_idx == len(data):
return None # No data to validate
if task_type == TaskType.CLASSIFICATION:
return data[sample_idx] # for classification, the data is a single number no need to validate shape
try:
sample_shape = data[sample_idx].shape
except AttributeError as err:
raise ValidationError(f'{name} for {task_type.value} per image must be a multi dimensional array.') from err
if task_type == TaskType.OBJECT_DETECTION and len(sample_shape) not in (1, 2):
raise ValidationError(f'{name} for object detection per image must be a 2D array. Found shape {sample_shape}')
return data[sample_idx]
class ValidationError(DeepchecksBaseError):
"""Represents more specific case of the ValueError (DeepchecksValueError)."""
pass
The provided code snippet includes necessary dependencies for implementing the `validate_predictions_format` function. Write a Python function `def validate_predictions_format(predictions, task_type: TaskType)` to solve the following problem:
Validate that the predictions are in the required format based on task_type. Parameters ---------- predictions The predictions of the batch. Result of VisionData's batch_to_predictions task_type: TaskType The task type of the model Raises ------ DeepchecksValueError If the predictions doesn't fit the required deepchecks format.
Here is the function:
def validate_predictions_format(predictions, task_type: TaskType):
"""Validate that the predictions are in the required format based on task_type.
Parameters
----------
predictions
The predictions of the batch. Result of VisionData's batch_to_predictions
task_type: TaskType
The task type of the model
Raises
------
DeepchecksValueError
If the predictions doesn't fit the required deepchecks format.
"""
single_image_pred = _validate_predictions_label_common_format('prediction', predictions, task_type)
if task_type == TaskType.CLASSIFICATION and single_image_pred is not None:
if not isinstance(single_image_pred, np.ndarray) or not isinstance(single_image_pred[0], Number) or \
not 0.99 < np.sum(single_image_pred) < 1.01:
raise ValidationError('Classification prediction per image must be a sequence of floats representing '
'probabilities per class.')
elif task_type == TaskType.OBJECT_DETECTION and single_image_pred is not None:
error_msg = 'Object detection prediction per image must be a sequence of 2D arrays, where each row ' \
'has 6 columns: [x_min, y_min, w, h, confidence, class_id]'
if single_image_pred.shape[1] != 6:
raise ValidationError(error_msg)
if np.min(single_image_pred) < 0:
raise ValidationError(f'Found one of coordinates to be negative, {error_msg}')
if np.max(single_image_pred[:, 5] % 1) > 0:
raise ValidationError(f'Class_id must be a positive integer. {error_msg}')
elif task_type == TaskType.SEMANTIC_SEGMENTATION and single_image_pred is not None:
if len(single_image_pred.shape) != 3:
raise ValidationError('Semantic segmentation prediction per image must be a 3D array of shape (C, H, W),'
'where H and W are the height and width of the corresponding image, and C is the '
'number of classes that can be detected.')
if not 0.99 < np.sum(single_image_pred[:, 0][:, 0]) < 1.01:
raise ValidationError('Semantic segmentation prediction per pixel per image should be probabilities per '
'each possible class') | Validate that the predictions are in the required format based on task_type. Parameters ---------- predictions The predictions of the batch. Result of VisionData's batch_to_predictions task_type: TaskType The task type of the model Raises ------ DeepchecksValueError If the predictions doesn't fit the required deepchecks format. |
593 | from numbers import Number
from typing import Iterable
import numpy as np
from deepchecks.core.errors import ValidationError
from deepchecks.vision.vision_data import TaskType
from deepchecks.vision.vision_data.utils import object_to_numpy, sequence_to_numpy
class ValidationError(DeepchecksBaseError):
"""Represents more specific case of the ValueError (DeepchecksValueError)."""
pass
The provided code snippet includes necessary dependencies for implementing the `validate_additional_data_format` function. Write a Python function `def validate_additional_data_format(additional_data_batch)` to solve the following problem:
Validate that the data is in the required format. Parameters ---------- additional_data_batch The additional data of the batch. Result of VisionData's batch_to_additional_data Raises ------ DeepchecksValueError If the images doesn't fit the required deepchecks format.
Here is the function:
def validate_additional_data_format(additional_data_batch):
"""Validate that the data is in the required format.
Parameters
----------
additional_data_batch
The additional data of the batch. Result of VisionData's batch_to_additional_data
Raises
------
DeepchecksValueError
If the images doesn't fit the required deepchecks format.
"""
if not isinstance(additional_data_batch, Iterable):
raise ValidationError('The batch additional_data must be an iterable.') | Validate that the data is in the required format. Parameters ---------- additional_data_batch The additional data of the batch. Result of VisionData's batch_to_additional_data Raises ------ DeepchecksValueError If the images doesn't fit the required deepchecks format. |
594 | from numbers import Number
from typing import Iterable
import numpy as np
from deepchecks.core.errors import ValidationError
from deepchecks.vision.vision_data import TaskType
from deepchecks.vision.vision_data.utils import object_to_numpy, sequence_to_numpy
class ValidationError(DeepchecksBaseError):
"""Represents more specific case of the ValueError (DeepchecksValueError)."""
pass
The provided code snippet includes necessary dependencies for implementing the `validate_embeddings_format` function. Write a Python function `def validate_embeddings_format(embeddings)` to solve the following problem:
Validate that the data is in the required format. Parameters ---------- embeddings The embeddings of the batch. Result of VisionData's batch_to_embeddings Raises ------ DeepchecksValueError If the images doesn't fit the required deepchecks format.
Here is the function:
def validate_embeddings_format(embeddings):
"""Validate that the data is in the required format.
Parameters
----------
embeddings
The embeddings of the batch. Result of VisionData's batch_to_embeddings
Raises
------
DeepchecksValueError
If the images doesn't fit the required deepchecks format.
"""
if not isinstance(embeddings, Iterable):
raise ValidationError('The batch embeddings must be an iterable.') | Validate that the data is in the required format. Parameters ---------- embeddings The embeddings of the batch. Result of VisionData's batch_to_embeddings Raises ------ DeepchecksValueError If the images doesn't fit the required deepchecks format. |
595 | from numbers import Number
from typing import Iterable
import numpy as np
from deepchecks.core.errors import ValidationError
from deepchecks.vision.vision_data import TaskType
from deepchecks.vision.vision_data.utils import object_to_numpy, sequence_to_numpy
class ValidationError(DeepchecksBaseError):
"""Represents more specific case of the ValueError (DeepchecksValueError)."""
pass
The provided code snippet includes necessary dependencies for implementing the `validate_image_identifiers_format` function. Write a Python function `def validate_image_identifiers_format(image_identifiers)` to solve the following problem:
Validate that the data is in the required format. Parameters ---------- image_identifiers The image identifiers of the batch. Result of VisionData's batch_to_image_identifiers Raises ------ DeepchecksValueError If the images doesn't fit the required deepchecks format.
Here is the function:
def validate_image_identifiers_format(image_identifiers):
"""Validate that the data is in the required format.
Parameters
----------
image_identifiers
The image identifiers of the batch. Result of VisionData's batch_to_image_identifiers
Raises
------
DeepchecksValueError
If the images doesn't fit the required deepchecks format.
"""
try:
sample = image_identifiers[0]
except TypeError as err:
raise ValidationError('The batch image_identifiers must be an iterable.') from err
if not isinstance(sample, str):
raise ValidationError('The image identifier inside the iterable must be a string.') | Validate that the data is in the required format. Parameters ---------- image_identifiers The image identifiers of the batch. Result of VisionData's batch_to_image_identifiers Raises ------ DeepchecksValueError If the images doesn't fit the required deepchecks format. |
596 | import contextlib
import os
import pathlib
import typing as t
from pathlib import Path
import albumentations as A
import numpy as np
import torch
import torchvision.transforms.functional as F
from albumentations.pytorch.transforms import ToTensorV2
from PIL import Image, ImageDraw
from torch import nn
from torch.utils.data import DataLoader
from typing_extensions import Literal
from deepchecks.vision.utils.test_utils import get_data_loader_sequential
from deepchecks.vision.vision_data import BatchOutputFormat, VisionData
from deepchecks.vision.vision_data.utils import object_to_numpy
DATA_DIR = pathlib.Path(__file__).absolute().parent.parent / 'assets' / 'coco_segmentation'
def load_model(pretrained: bool = True) -> nn.Module:
"""Load the lraspp_mobilenet_v3_large model and return it."""
model = lraspp_mobilenet_v3_large(pretrained=pretrained, progress=False)
_ = model.eval()
return model
def _batch_collate(batch):
"""Get list of samples from `CocoSegmentDataset` and combine them to a batch."""
images, masks = zip(*batch)
return list(images), list(masks)
def deepchecks_collate(model) -> t.Callable:
"""Process batch to deepchecks format.
Parameters
----------
model : nn.Module
model to predict with
Returns
-------
BatchOutputFormat
batch of data in deepchecks format
"""
def _process_batch_to_deepchecks_format(data) -> BatchOutputFormat:
raw_images = [x[0] for x in data]
images = [object_to_numpy(tensor).transpose((1, 2, 0)) for tensor in raw_images]
labels = [x[1] for x in data]
normalized_batch = [F.normalize(img.unsqueeze(0).float() / 255,
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) for img in raw_images]
predictions = [model(img)['out'].squeeze(0).detach() for img in normalized_batch]
predictions = [torch.nn.functional.softmax(pred, dim=0) for pred in predictions]
return {'images': images, 'labels': labels, 'predictions': predictions}
return _process_batch_to_deepchecks_format
class CocoSegmentationDataset(VisionDataset):
"""An instance of PyTorch VisionData the represents the COCO128-segments dataset.
Uses only the 21 categories used also by Pascal-VOC, in order to match the model supplied in this file,
torchvision's deeplabv3_mobilenet_v3_large.
Parameters
----------
root : str
Path to the root directory of the dataset.
name : str
Name of the dataset.
train : bool
if `True` train dataset, otherwise test dataset
transforms : Callable, optional
A function/transform that takes in an PIL image and returns a transformed version.
E.g, transforms.RandomCrop
"""
TRAIN_FRACTION = 0.5
def __init__(
self,
root: str,
name: str,
train: bool = True,
transforms: t.Optional[t.Callable] = None,
test_mode: bool = False
) -> None:
super().__init__(root, transforms=transforms)
self.train = train
self.root = Path(root).absolute()
self.images_dir = Path(root) / 'images' / name
self.labels_dir = Path(root) / 'labels' / name
all_images: t.List[Path] = sorted(self.images_dir.glob('./*.jpg'))
images: t.List[Path] = []
labels: t.List[t.Optional[Path]] = []
for i in range(len(all_images)):
label = self.labels_dir / f'{all_images[i].stem}.txt'
if label.exists():
polygons = label.open('r').read().strip().splitlines()
relevant_labels = [polygon.split()[0] for polygon in polygons]
relevant_labels = [class_id for class_id in relevant_labels if int(class_id) in
COCO_TO_PASCAL_VOC]
if len(relevant_labels) > 0:
images.append(all_images[i])
labels.append(label)
assert len(images) != 0, 'Did not find folder with images or it was empty'
assert not all(l is None for l in labels), 'Did not find folder with labels or it was empty'
train_len = int(self.TRAIN_FRACTION * len(images))
if test_mode is True:
if self.train is True:
self.images = images[0:5] * 2
self.labels = labels[0:5] * 2
else:
self.images = images[1:6] * 2
self.labels = labels[1:6] * 2
else:
if self.train is True:
self.images = images[0:train_len]
self.labels = labels[0:train_len]
else:
self.images = images[train_len:]
self.labels = labels[train_len:]
def __getitem__(self, idx: int) -> t.Tuple[torch.Tensor, torch.Tensor]:
"""Get the image and label at the given index."""
image = Image.open(str(self.images[idx]))
label_file = self.labels[idx]
masks = []
classes = []
if label_file is not None:
for label_str in label_file.open('r').read().strip().splitlines():
label = np.array(label_str.split(), dtype=np.float32)
class_id = int(label[0])
if class_id in COCO_TO_PASCAL_VOC:
# Transform normalized coordinates to un-normalized
coordinates = (label[1:].reshape(-1, 2) * np.array([image.width, image.height])).reshape(
-1).tolist()
# Create mask image
mask = Image.new('L', (image.width, image.height), 0)
ImageDraw.Draw(mask).polygon(coordinates, outline=1, fill=1)
# Add to list
masks.append(np.array(mask, dtype=bool))
classes.append(COCO_TO_PASCAL_VOC[class_id])
if self.transforms is not None:
# Albumentations accepts images as numpy
transformed = self.transforms(image=np.array(image), masks=masks)
image = transformed['image']
masks = transformed['masks']
# Transform masks to tensor of (num_masks, H, W)
if masks:
if isinstance(masks[0], np.ndarray):
masks = [torch.from_numpy(m) for m in masks]
masks = torch.stack(masks)
else:
masks = torch.empty((0, 3))
# Fake grayscale to rgb because model can't process grayscale:
if image.shape[0] == 1:
image = torch.stack([image[0], image[0], image[0]])
ret_label = np.zeros((image.shape[1], image.shape[2]))
ret_label_mask = np.zeros(ret_label.shape)
for i in range(len(classes)):
mask = np.logical_and(np.logical_not(ret_label_mask), np.array(masks[i]))
ret_label_mask = np.logical_or(ret_label_mask, mask)
ret_label += classes[i] * mask
return image, torch.as_tensor(ret_label)
def __len__(self):
"""Return the number of images in the dataset."""
return len(self.images)
def load_or_download(cls, train: bool, root: Path = DATA_DIR, test_mode: bool = False) -> 'CocoSegmentationDataset':
"""Load or download the coco128 dataset with segment annotations."""
extract_dir = root / 'coco128segments'
coco_dir = root / 'coco128segments' / 'coco128-seg'
folder = 'train2017'
if not coco_dir.exists():
url = 'https://ndownloader.figshare.com/files/37650656'
with open(os.devnull, 'w', encoding='utf8') as f, contextlib.redirect_stdout(f):
download_and_extract_archive(
url,
download_root=str(root),
extract_root=str(extract_dir),
filename='coco128-segments.zip'
)
try:
# remove coco128 README.txt so that it does not come in docs
os.remove('coco128segments/coco128/README.txt')
except: # pylint: disable=bare-except # noqa
pass
return CocoSegmentationDataset(coco_dir, folder, train=train, transforms=A.Compose([ToTensorV2()]),
test_mode=test_mode)
LABEL_MAP = {0: 'background', 1: 'airplane', 2: 'bicycle', 3: 'bird', 4: 'boat', 5: 'bottle', 6: 'bus', 7: 'car',
8: 'cat', 9: 'chair', 10: 'cow', 11: 'dining table', 12: 'dog', 13: 'horse', 14: 'motorcycle',
15: 'person', 16: 'potted plant', 17: 'sheep', 18: 'couch', 19: 'train', 20: 'tv'}
def get_data_loader_sequential(data_loader: DataLoader, shuffle: bool = False, n_samples=None) -> DataLoader:
"""Create new DataLoader with sampler of type IndicesSequentialSampler. This makes the data loader have \
consistent batches order."""
# First set generator seed to make it reproducible
if data_loader.generator:
data_loader.generator.set_state(torch.Generator().manual_seed(42).get_state())
indices = []
batch_sampler = data_loader.batch_sampler
# Using the batch sampler to get all indices
for batch in batch_sampler:
indices += batch
if shuffle:
indices = random.sample(indices, len(indices))
if n_samples is not None:
indices = indices[:n_samples]
# Create new sampler and batch sampler
sampler = IndicesSequentialSampler(indices)
new_batch_sampler = BatchSampler(sampler, batch_sampler.batch_size, batch_sampler.drop_last)
props = _get_data_loader_props(data_loader)
props['batch_sampler'] = new_batch_sampler
_collisions_removal_dataloader_props(props)
return data_loader.__class__(**props)
The provided code snippet includes necessary dependencies for implementing the `load_dataset` function. Write a Python function `def load_dataset( train: bool = True, batch_size: int = 32, num_workers: int = 0, shuffle: bool = True, pin_memory: bool = True, object_type: Literal['VisionData', 'DataLoader'] = 'VisionData', test_mode: bool = False ) -> t.Union[DataLoader, VisionData]` to solve the following problem:
Get the COCO128 dataset and return a dataloader. Parameters ---------- train : bool, default: True if `True` train dataset, otherwise test dataset batch_size : int, default: 32 Batch size for the dataloader. num_workers : int, default: 0 Number of workers for the dataloader. shuffle : bool, default: True Whether to shuffle the dataset. pin_memory : bool, default: True If ``True``, the data loader will copy Tensors into CUDA pinned memory before returning them. object_type : Literal['Dataset', 'DataLoader'], default: 'DataLoader' type of the return value. If 'Dataset', :obj:`deepchecks.vision.VisionDataset` will be returned, otherwise :obj:`torch.utils.data.DataLoader` test_mode: bool, default False whether to load this dataset in "test_mode", meaning very minimal number of images in order to use for unittests. Returns ------- Union[DataLoader, VisionDataset] A DataLoader or VisionDataset instance representing COCO128 dataset
Here is the function:
def load_dataset(
train: bool = True,
batch_size: int = 32,
num_workers: int = 0,
shuffle: bool = True,
pin_memory: bool = True,
object_type: Literal['VisionData', 'DataLoader'] = 'VisionData',
test_mode: bool = False
) -> t.Union[DataLoader, VisionData]:
"""Get the COCO128 dataset and return a dataloader.
Parameters
----------
train : bool, default: True
if `True` train dataset, otherwise test dataset
batch_size : int, default: 32
Batch size for the dataloader.
num_workers : int, default: 0
Number of workers for the dataloader.
shuffle : bool, default: True
Whether to shuffle the dataset.
pin_memory : bool, default: True
If ``True``, the data loader will copy Tensors
into CUDA pinned memory before returning them.
object_type : Literal['Dataset', 'DataLoader'], default: 'DataLoader'
type of the return value. If 'Dataset', :obj:`deepchecks.vision.VisionDataset`
will be returned, otherwise :obj:`torch.utils.data.DataLoader`
test_mode: bool, default False
whether to load this dataset in "test_mode", meaning very minimal number of images in order to use for
unittests.
Returns
-------
Union[DataLoader, VisionDataset]
A DataLoader or VisionDataset instance representing COCO128 dataset
"""
root = DATA_DIR
dataset = CocoSegmentationDataset.load_or_download(root=root, train=train, test_mode=test_mode)
if object_type == 'DataLoader':
return DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers,
collate_fn=_batch_collate, pin_memory=pin_memory, generator=torch.Generator())
elif object_type == 'VisionData':
model = load_model()
loader = DataLoader(dataset=dataset, batch_size=batch_size, num_workers=num_workers,
collate_fn=deepchecks_collate(model), pin_memory=pin_memory, generator=torch.Generator())
loader = get_data_loader_sequential(loader, shuffle=shuffle)
return VisionData(batch_loader=loader, task_type='semantic_segmentation', label_map=LABEL_MAP,
reshuffle_data=False)
else:
raise TypeError(f'Unknown value of object_type - {object_type}') | Get the COCO128 dataset and return a dataloader. Parameters ---------- train : bool, default: True if `True` train dataset, otherwise test dataset batch_size : int, default: 32 Batch size for the dataloader. num_workers : int, default: 0 Number of workers for the dataloader. shuffle : bool, default: True Whether to shuffle the dataset. pin_memory : bool, default: True If ``True``, the data loader will copy Tensors into CUDA pinned memory before returning them. object_type : Literal['Dataset', 'DataLoader'], default: 'DataLoader' type of the return value. If 'Dataset', :obj:`deepchecks.vision.VisionDataset` will be returned, otherwise :obj:`torch.utils.data.DataLoader` test_mode: bool, default False whether to load this dataset in "test_mode", meaning very minimal number of images in order to use for unittests. Returns ------- Union[DataLoader, VisionDataset] A DataLoader or VisionDataset instance representing COCO128 dataset |
597 | import logging
import typing as t
import warnings
import zipfile
from io import BytesIO
from pathlib import Path
from urllib.request import urlopen
import albumentations as A
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader
from typing_extensions import Literal
from deepchecks.vision.datasets.assets.coco_detection.static_predictions_yolo import \
coco_detections_static_predictions_dict
from deepchecks.vision.datasets.detection.coco_utils import COCO_DIR, LABEL_MAP, download_coco128, get_image_and_label
from deepchecks.vision.utils.test_utils import get_data_loader_sequential, hash_image
from deepchecks.vision.vision_data import BatchOutputFormat, VisionData
The provided code snippet includes necessary dependencies for implementing the `collate_without_model` function. Write a Python function `def collate_without_model(data) -> t.Tuple[t.List[np.ndarray], t.List[torch.Tensor]]` to solve the following problem:
Collate function for the coco dataset returning images and labels in correct format as tuple.
Here is the function:
def collate_without_model(data) -> t.Tuple[t.List[np.ndarray], t.List[torch.Tensor]]:
"""Collate function for the coco dataset returning images and labels in correct format as tuple."""
raw_images = [x[0] for x in data]
images = [np.array(x) for x in raw_images]
def move_class(tensor):
return torch.index_select(tensor, 1, torch.LongTensor([4, 0, 1, 2, 3]).to(tensor.device)) \
if len(tensor) > 0 else tensor
labels = [move_class(x[1]) for x in data]
return images, labels | Collate function for the coco dataset returning images and labels in correct format as tuple. |
598 | import contextlib
import hashlib
import json
import os
import pathlib
import typing as t
import urllib.request
from pathlib import Path
import numpy as np
import torch
from bs4 import BeautifulSoup
from PIL import Image
from torch import nn
from torch.utils.data import DataLoader
from typing_extensions import Literal
from deepchecks.vision.utils.test_utils import IndicesSequentialSampler
from deepchecks.vision.vision_data import BatchOutputFormat, VisionData
from deepchecks.vision.vision_data.utils import object_to_numpy
MASK_DIR = pathlib.Path(__file__).absolute().parent.parent / 'assets' / 'mask_detection'
def load_model(device: t.Union[str, torch.device] = 'cpu') -> nn.Module:
"""Load the pre-calculated prediction model and return it."""
dev = torch.device(device) if isinstance(device, str) else device
return MaskPrecalculatedModel(device=dev)
def deepchecks_collate(model) -> t.Callable:
"""Process batch to deepchecks format.
Parameters
----------
model : nn.Module
model to predict with
Returns
-------
BatchOutputFormat
batch of data in deepchecks format
"""
def _process_batch_to_deepchecks_format(data) -> BatchOutputFormat:
raw_images = [x[0] for x in data]
images = [np.array(x.permute(1, 2, 0)) * 255 for x in raw_images]
def extract_dict(in_dict):
return torch.concat([in_dict['labels'].reshape((-1, 1)), in_dict['boxes']], axis=1)
labels = [extract_dict(x[1]) for x in data]
predictions = model(raw_images)
return {'images': images, 'labels': labels, 'predictions': predictions}
return _process_batch_to_deepchecks_format
def _batch_collate(batch):
return tuple(zip(*batch))
class MaskDataset(VisionDataset):
"""Dataset for the mask dataset. Loads the images and labels from the dataset."""
def __init__(self, mask_dir, *args, **kwargs):
"""Initialize the dataset."""
super().__init__(mask_dir, *args, **kwargs)
# load all image files, sorting them to
# ensure that they are aligned
self.imgs = list(sorted(os.listdir(os.path.join(self.root, 'images'))))
def __getitem__(self, idx):
"""Get the image and labels at the given index."""
# load images ad masks
file_image = 'maksssksksss' + str(idx) + '.png'
file_label = 'maksssksksss' + str(idx) + '.xml'
img_path = os.path.join(os.path.join(self.root, 'images'), file_image)
label_path = os.path.join(os.path.join(self.root, 'annotations'), file_label)
img = Image.open(img_path).convert('RGB')
# Generate Label
target = self._generate_target(idx, label_path)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
"""Get the length of the dataset."""
return len(self.imgs)
def _generate_box(obj):
xmin = int(obj.find('xmin').text)
ymin = int(obj.find('ymin').text)
xmax = int(obj.find('xmax').text)
ymax = int(obj.find('ymax').text)
return [xmin, ymin, xmax - xmin, ymax - ymin]
def _generate_label(obj):
if obj.find('name').text == 'with_mask':
return 1
elif obj.find('name').text == 'mask_weared_incorrect':
return 2
return 0
def _generate_target(image_id, file):
with open(file, encoding='utf8') as f:
data = f.read()
soup = BeautifulSoup(data, 'xml')
objects = soup.find_all('object')
# Bounding boxes for objects
# In coco format, bbox = [xmin, ymin, width, height]
# In pytorch, the input should be [xmin, ymin, xmax, ymax]
boxes = []
labels = []
for i in objects:
boxes.append(MaskDataset._generate_box(i))
labels.append(MaskDataset._generate_label(i))
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# Labels (In my case, I only one class: target class or background)
labels = torch.as_tensor(labels, dtype=torch.int64)
# img_id to Tensor
img_id = torch.tensor([image_id])
# Annotation is in dictionary format
target = {'boxes': boxes, 'labels': labels, 'image_id': img_id}
return target
def download_mask(cls, root: t.Union[str, Path]) -> Path:
"""Download mask and returns the root path and folder name."""
root = root if isinstance(root, Path) else Path(root)
mask_dir = Path(os.path.join(root, 'mask'))
img_path = Path(os.path.join(mask_dir, 'images'))
label_path = Path(os.path.join(mask_dir, 'annotations'))
if img_path.exists() and label_path.exists():
return mask_dir
url = 'https://figshare.com/ndownloader/files/38115927'
md5 = '64b8f1d3036f3445557a8619f0400f6e'
with open(os.devnull, 'w', encoding='utf8') as f, contextlib.redirect_stdout(f):
download_and_extract_archive(
url,
download_root=str(mask_dir),
extract_root=str(mask_dir),
md5=md5,
filename='mask.zip'
)
return mask_dir
def get_time_to_sample_dict(cls, root: t.Union[str, Path]) -> t.Dict[int, t.List[int]]:
"""Return a dictionary of time to sample."""
time_dict_url = 'https://figshare.com/ndownloader/files/38116608'
root = root if isinstance(root, Path) else Path(root)
time_to_sample_dict_path = Path(os.path.join(root, 'time_to_sample_dict.json'))
if not time_to_sample_dict_path.exists():
urllib.request.urlretrieve(time_dict_url, time_to_sample_dict_path)
with open(time_to_sample_dict_path, 'r', encoding='utf8') as f:
return json.load(f)
LABEL_MAP = {2: 'Improperly Worn Mask',
1: 'Properly Worn Mask',
0: 'No Mask'}
class IndicesSequentialSampler(Sampler):
"""Samples elements sequentially from a given list of indices, without replacement.
Args:
indices (sequence): a sequence of indices
"""
indices: List[int]
def __init__(self, indices: List[int]) -> None:
super().__init__(None)
self.indices = indices
def __iter__(self) -> Iterator[int]:
"""Return an iterator over the indices."""
return iter(self.indices)
def __len__(self) -> int:
"""Return the number of indices."""
return len(self.indices)
def index_at(self, location):
"""Return for a given location, the real index value."""
return self.indices[location]
The provided code snippet includes necessary dependencies for implementing the `load_dataset` function. Write a Python function `def load_dataset( day_index: int = 0, batch_size: int = 32, num_workers: int = 0, pin_memory: bool = True, shuffle: bool = False, object_type: Literal['VisionData', 'DataLoader'] = 'DataLoader' ) -> t.Union[DataLoader, VisionData]` to solve the following problem:
Get the mask dataset and return a dataloader. Parameters ---------- day_index : int, default: 0 Select the index of the day that should be loaded. 0 is the training set, and each subsequent number is a subsequent day in the production dataset. Last day index is 59. batch_size : int, default: 32 Batch size for the dataloader. num_workers : int, default: 0 Number of workers for the dataloader. shuffle : bool, default: False Whether to shuffle the dataset. pin_memory : bool, default: True If ``True``, the data loader will copy Tensors into CUDA pinned memory before returning them. object_type : Literal['Dataset', 'DataLoader'], default: 'DataLoader' type of the return value. If 'Dataset', :obj:`deepchecks.vision.VisionData` will be returned, otherwise :obj:`torch.utils.data.DataLoader` Returns ------- Union[DataLoader, VisionDataset] A DataLoader or VisionDataset instance representing mask dataset
Here is the function:
def load_dataset(
day_index: int = 0,
batch_size: int = 32,
num_workers: int = 0,
pin_memory: bool = True,
shuffle: bool = False,
object_type: Literal['VisionData', 'DataLoader'] = 'DataLoader'
) -> t.Union[DataLoader, VisionData]:
"""Get the mask dataset and return a dataloader.
Parameters
----------
day_index : int, default: 0
Select the index of the day that should be loaded. 0 is the training set, and each subsequent number is a
subsequent day in the production dataset. Last day index is 59.
batch_size : int, default: 32
Batch size for the dataloader.
num_workers : int, default: 0
Number of workers for the dataloader.
shuffle : bool, default: False
Whether to shuffle the dataset.
pin_memory : bool, default: True
If ``True``, the data loader will copy Tensors
into CUDA pinned memory before returning them.
object_type : Literal['Dataset', 'DataLoader'], default: 'DataLoader'
type of the return value. If 'Dataset', :obj:`deepchecks.vision.VisionData`
will be returned, otherwise :obj:`torch.utils.data.DataLoader`
Returns
-------
Union[DataLoader, VisionDataset]
A DataLoader or VisionDataset instance representing mask dataset
"""
mask_dir = MaskDataset.download_mask(MASK_DIR)
time_to_sample_dict = MaskDataset.get_time_to_sample_dict(MASK_DIR)
if not isinstance(day_index, int) or day_index < 0 or day_index > 59:
raise ValueError('day_index must be an integer between 0 and 59')
time = list(time_to_sample_dict.keys())[day_index]
samples_to_use = time_to_sample_dict[time]
if shuffle:
sampler = torch.utils.data.SubsetRandomSampler(samples_to_use, generator=torch.Generator())
else:
sampler = IndicesSequentialSampler(samples_to_use)
dataset = MaskDataset(mask_dir=str(mask_dir), transform=transforms.Compose([transforms.ToTensor(), ]))
if object_type == 'DataLoader':
return DataLoader(dataset=dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=_batch_collate,
pin_memory=pin_memory, sampler=sampler)
elif object_type == 'VisionData':
model = load_model()
dataloader = DataLoader(dataset=dataset, batch_size=batch_size, num_workers=num_workers, sampler=sampler,
collate_fn=deepchecks_collate(model), pin_memory=pin_memory)
return VisionData(dataloader, task_type='object_detection', reshuffle_data=False,
label_map=LABEL_MAP, dataset_name=f'Mask Dataset at time {time}')
else:
raise TypeError(f'Unknown value of object_type - {object_type}') | Get the mask dataset and return a dataloader. Parameters ---------- day_index : int, default: 0 Select the index of the day that should be loaded. 0 is the training set, and each subsequent number is a subsequent day in the production dataset. Last day index is 59. batch_size : int, default: 32 Batch size for the dataloader. num_workers : int, default: 0 Number of workers for the dataloader. shuffle : bool, default: False Whether to shuffle the dataset. pin_memory : bool, default: True If ``True``, the data loader will copy Tensors into CUDA pinned memory before returning them. object_type : Literal['Dataset', 'DataLoader'], default: 'DataLoader' type of the return value. If 'Dataset', :obj:`deepchecks.vision.VisionData` will be returned, otherwise :obj:`torch.utils.data.DataLoader` Returns ------- Union[DataLoader, VisionDataset] A DataLoader or VisionDataset instance representing mask dataset |
599 | import contextlib
import hashlib
import json
import os
import pathlib
import typing as t
import urllib.request
from pathlib import Path
import numpy as np
import torch
from bs4 import BeautifulSoup
from PIL import Image
from torch import nn
from torch.utils.data import DataLoader
from typing_extensions import Literal
from deepchecks.vision.utils.test_utils import IndicesSequentialSampler
from deepchecks.vision.vision_data import BatchOutputFormat, VisionData
from deepchecks.vision.vision_data.utils import object_to_numpy
MASK_DIR = pathlib.Path(__file__).absolute().parent.parent / 'assets' / 'mask_detection'
class MaskDataset(VisionDataset):
"""Dataset for the mask dataset. Loads the images and labels from the dataset."""
def __init__(self, mask_dir, *args, **kwargs):
"""Initialize the dataset."""
super().__init__(mask_dir, *args, **kwargs)
# load all image files, sorting them to
# ensure that they are aligned
self.imgs = list(sorted(os.listdir(os.path.join(self.root, 'images'))))
def __getitem__(self, idx):
"""Get the image and labels at the given index."""
# load images ad masks
file_image = 'maksssksksss' + str(idx) + '.png'
file_label = 'maksssksksss' + str(idx) + '.xml'
img_path = os.path.join(os.path.join(self.root, 'images'), file_image)
label_path = os.path.join(os.path.join(self.root, 'annotations'), file_label)
img = Image.open(img_path).convert('RGB')
# Generate Label
target = self._generate_target(idx, label_path)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
"""Get the length of the dataset."""
return len(self.imgs)
def _generate_box(obj):
xmin = int(obj.find('xmin').text)
ymin = int(obj.find('ymin').text)
xmax = int(obj.find('xmax').text)
ymax = int(obj.find('ymax').text)
return [xmin, ymin, xmax - xmin, ymax - ymin]
def _generate_label(obj):
if obj.find('name').text == 'with_mask':
return 1
elif obj.find('name').text == 'mask_weared_incorrect':
return 2
return 0
def _generate_target(image_id, file):
with open(file, encoding='utf8') as f:
data = f.read()
soup = BeautifulSoup(data, 'xml')
objects = soup.find_all('object')
# Bounding boxes for objects
# In coco format, bbox = [xmin, ymin, width, height]
# In pytorch, the input should be [xmin, ymin, xmax, ymax]
boxes = []
labels = []
for i in objects:
boxes.append(MaskDataset._generate_box(i))
labels.append(MaskDataset._generate_label(i))
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# Labels (In my case, I only one class: target class or background)
labels = torch.as_tensor(labels, dtype=torch.int64)
# img_id to Tensor
img_id = torch.tensor([image_id])
# Annotation is in dictionary format
target = {'boxes': boxes, 'labels': labels, 'image_id': img_id}
return target
def download_mask(cls, root: t.Union[str, Path]) -> Path:
"""Download mask and returns the root path and folder name."""
root = root if isinstance(root, Path) else Path(root)
mask_dir = Path(os.path.join(root, 'mask'))
img_path = Path(os.path.join(mask_dir, 'images'))
label_path = Path(os.path.join(mask_dir, 'annotations'))
if img_path.exists() and label_path.exists():
return mask_dir
url = 'https://figshare.com/ndownloader/files/38115927'
md5 = '64b8f1d3036f3445557a8619f0400f6e'
with open(os.devnull, 'w', encoding='utf8') as f, contextlib.redirect_stdout(f):
download_and_extract_archive(
url,
download_root=str(mask_dir),
extract_root=str(mask_dir),
md5=md5,
filename='mask.zip'
)
return mask_dir
def get_time_to_sample_dict(cls, root: t.Union[str, Path]) -> t.Dict[int, t.List[int]]:
"""Return a dictionary of time to sample."""
time_dict_url = 'https://figshare.com/ndownloader/files/38116608'
root = root if isinstance(root, Path) else Path(root)
time_to_sample_dict_path = Path(os.path.join(root, 'time_to_sample_dict.json'))
if not time_to_sample_dict_path.exists():
urllib.request.urlretrieve(time_dict_url, time_to_sample_dict_path)
with open(time_to_sample_dict_path, 'r', encoding='utf8') as f:
return json.load(f)
The provided code snippet includes necessary dependencies for implementing the `get_data_timestamps` function. Write a Python function `def get_data_timestamps() -> t.List[int]` to solve the following problem:
Get a list of the data timestamps, one entry per day in the production data. Returns ------- t.List[int] A list of the data timestamps.
Here is the function:
def get_data_timestamps() -> t.List[int]:
"""Get a list of the data timestamps, one entry per day in the production data.
Returns
-------
t.List[int]
A list of the data timestamps.
"""
return list(map(int, MaskDataset.get_time_to_sample_dict(MASK_DIR).keys())) | Get a list of the data timestamps, one entry per day in the production data. Returns ------- t.List[int] A list of the data timestamps. |
600 | import os
import typing as t
from pathlib import Path
import albumentations as A
import numpy as np
from typing_extensions import Literal
from deepchecks import vision
from deepchecks.vision.datasets.detection.coco_utils import COCO_DIR, LABEL_MAP, download_coco128, get_image_and_label
from deepchecks.vision.vision_data import VisionData
_MODEL_URL = 'https://figshare.com/ndownloader/files/38695689'
def deepchecks_map(model):
def _deepchecks_map(image, label):
pred = _prediction_to_deepchecks_format(model, image)
# class_id is required to be the first column
label = tf.gather(label, [4, 0, 1, 2, 3], axis=1) if label is not None and len(label) > 0 else label
return {'images': [image], 'labels': [label], 'predictions': [pred]}
return _deepchecks_map
def create_tf_dataset(train: bool = True, n_samples: t.Optional[int] = None, transforms=None) -> tf.data.Dataset:
"""Create a tf dataset of the COCO128 dataset."""
coco_dir, dataset_name = download_coco128(COCO_DIR)
img_dir = Path(coco_dir / 'images' / dataset_name)
label_dir = Path(coco_dir / 'labels' / dataset_name)
files = os.listdir(img_dir)
train_len = int(TRAIN_FRACTION * len(files))
files = files[:train_len] if train else files[train_len:]
if n_samples is not None and n_samples < len(files):
files = files[:n_samples]
images, labels = [], []
for file_name in files:
label_file = label_dir / str(file_name).replace('jpg', 'txt')
image, label = get_image_and_label(img_dir / str(file_name), label_file, transforms)
images.append(image)
labels.append(np.asarray(label))
def generator():
for img, label in zip(images, labels):
yield img, label
dataset = tf.data.Dataset.from_generator(generator, output_signature=(
tf.TensorSpec(shape=(None, None, 3)), tf.TensorSpec(shape=None)))
return dataset
LABEL_MAP = {
0: 'person',
1: 'bicycle',
2: 'car',
3: 'motorcycle',
4: 'airplane',
5: 'bus',
6: 'train',
7: 'truck',
8: 'boat',
9: 'traffic light',
10: 'fire hydrant',
11: 'stop sign',
12: 'parking meter',
13: 'bench',
14: 'bird',
15: 'cat',
16: 'dog',
17: 'horse',
18: 'sheep',
19: 'cow',
20: 'elephant',
21: 'bear',
22: 'zebra',
23: 'giraffe',
24: 'backpack',
25: 'umbrella',
26: 'handbag',
27: 'tie',
28: 'suitcase',
29: 'frisbee',
30: 'skis',
31: 'snowboard',
32: 'sports ball',
33: 'kite',
34: 'baseball bat',
35: 'baseball glove',
36: 'skateboard',
37: 'surfboard',
38: 'tennis racket',
39: 'bottle',
40: 'wine glass',
41: 'cup',
42: 'fork',
43: 'knife',
44: 'spoon',
45: 'bowl',
46: 'banana',
47: 'apple',
48: 'sandwich',
49: 'orange',
50: 'broccoli',
51: 'carrot',
52: 'hot dog',
53: 'pizza',
54: 'donut',
55: 'cake',
56: 'chair',
57: 'couch',
58: 'potted plant',
59: 'bed',
60: 'dining table',
61: 'toilet',
62: 'tv',
63: 'laptop',
64: 'mouse',
65: 'remote',
66: 'keyboard',
67: 'cell phone',
68: 'microwave',
69: 'oven',
70: 'toaster',
71: 'sink',
72: 'refrigerator',
73: 'book',
74: 'clock',
75: 'vase',
76: 'scissors',
77: 'teddy bear',
78: 'hair drier',
79: 'toothbrush'
}
The provided code snippet includes necessary dependencies for implementing the `load_dataset` function. Write a Python function `def load_dataset( train: bool = True, shuffle: bool = False, object_type: Literal['VisionData', 'Dataset'] = 'Dataset', n_samples: t.Optional[int] = None, ) -> t.Union[tf.data.Dataset, vision.VisionData]` to solve the following problem:
Get the COCO128 dataset and return a dataloader. Parameters ---------- train : bool, default: True if `True` train dataset, otherwise test dataset shuffle : bool, default: False Whether to shuffle the dataset. object_type : Literal['Dataset', 'Dataset'], default: 'Dataset' type of the return value. If 'Dataset', :obj:`deepchecks.vision.VisionData` will be returned, otherwise :obj:`tf.data.Dataset`. n_samples : int, optional Number of samples to load. Return the first n_samples if shuffle is False otherwise selects n_samples at random. If None, returns all samples. Returns ------- Union[Dataset, VisionData] A Dataset or VisionData instance representing COCO128 dataset
Here is the function:
def load_dataset(
train: bool = True,
shuffle: bool = False,
object_type: Literal['VisionData', 'Dataset'] = 'Dataset',
n_samples: t.Optional[int] = None,
) -> t.Union[tf.data.Dataset, vision.VisionData]:
"""Get the COCO128 dataset and return a dataloader.
Parameters
----------
train : bool, default: True
if `True` train dataset, otherwise test dataset
shuffle : bool, default: False
Whether to shuffle the dataset.
object_type : Literal['Dataset', 'Dataset'], default: 'Dataset'
type of the return value. If 'Dataset', :obj:`deepchecks.vision.VisionData`
will be returned, otherwise :obj:`tf.data.Dataset`.
n_samples : int, optional
Number of samples to load. Return the first n_samples if shuffle
is False otherwise selects n_samples at random. If None, returns all samples.
Returns
-------
Union[Dataset, VisionData]
A Dataset or VisionData instance representing COCO128 dataset
"""
transforms = A.Compose([A.NoOp()], bbox_params=A.BboxParams(format='coco'))
coco_dataset = create_tf_dataset(train, n_samples, transforms)
if shuffle:
coco_dataset = coco_dataset.shuffle(128)
if object_type == 'Dataset':
return coco_dataset
elif object_type == 'VisionData':
model = hub.load(_MODEL_URL)
coco_dataset = coco_dataset.map(deepchecks_map(model))
return VisionData(batch_loader=coco_dataset, label_map=LABEL_MAP, task_type='object_detection',
reshuffle_data=False)
else:
raise TypeError(f'Unknown value of object_type - {object_type}') | Get the COCO128 dataset and return a dataloader. Parameters ---------- train : bool, default: True if `True` train dataset, otherwise test dataset shuffle : bool, default: False Whether to shuffle the dataset. object_type : Literal['Dataset', 'Dataset'], default: 'Dataset' type of the return value. If 'Dataset', :obj:`deepchecks.vision.VisionData` will be returned, otherwise :obj:`tf.data.Dataset`. n_samples : int, optional Number of samples to load. Return the first n_samples if shuffle is False otherwise selects n_samples at random. If None, returns all samples. Returns ------- Union[Dataset, VisionData] A Dataset or VisionData instance representing COCO128 dataset |
601 | import logging
import os
import pathlib
import pickle
import typing as t
import warnings
from itertools import cycle
from urllib.error import URLError
import albumentations as A
import numpy as np
import torch
import torch.nn.functional as F
from albumentations.pytorch import ToTensorV2
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.data.dataset import IterableDataset
from typing_extensions import Literal
from deepchecks.utils.logger import get_logger
from deepchecks.vision.utils.test_utils import get_data_loader_sequential, hash_image, un_normalize_batch
from deepchecks.vision.vision_data import BatchOutputFormat, VisionData
from deepchecks.vision.vision_data.utils import object_to_numpy
def un_normalize_batch(tensor: torch.Tensor, mean: Sized, std: Sized, max_pixel_value: int = 255):
"""Apply un-normalization on a tensor in order to display an image."""
dim = len(mean)
reshape_shape = (1, 1, 1, dim)
max_pixel_value = [max_pixel_value] * dim
mean = torch.tensor(mean, device=tensor.device).reshape(reshape_shape)
std = torch.tensor(std, device=tensor.device).reshape(reshape_shape)
tensor = (tensor * std) + mean
tensor = tensor * torch.tensor(max_pixel_value, device=tensor.device).reshape(reshape_shape)
return object_to_numpy(tensor)
The provided code snippet includes necessary dependencies for implementing the `collate_without_model` function. Write a Python function `def collate_without_model(data) -> t.Tuple[t.List[np.ndarray], t.List[int]]` to solve the following problem:
Collate function for the mnist dataset returning images and labels in correct format as tuple.
Here is the function:
def collate_without_model(data) -> t.Tuple[t.List[np.ndarray], t.List[int]]:
"""Collate function for the mnist dataset returning images and labels in correct format as tuple."""
raw_images = torch.stack([x[0] for x in data])
labels = [x[1] for x in data]
images = raw_images.permute(0, 2, 3, 1)
images = un_normalize_batch(images, mean=(0.1307,), std=(0.3081,))
return images, labels | Collate function for the mnist dataset returning images and labels in correct format as tuple. |