File size: 4,122 Bytes
d2a8669
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classification_metric.R
\name{classification_metric}
\alias{classification_metric}
\title{Classification Metric}
\usage{
classification_metric(dataset, classified_dataset, unprivileged_groups, privileged_groups)
}
\arguments{
\item{dataset}{(BinaryLabelDataset) Dataset containing ground-truth labels}

\item{classified_dataset}{(BinaryLabelDataset) Dataset containing predictions}

\item{unprivileged_groups}{Unprivileged groups. List containing unprivileged protected attribute name and value of the unprivileged protected attribute.}

\item{privileged_groups}{Privileged groups. List containing privileged protected attribute name and value of the privileged protected attribute.}
}
\description{
Class for computing metrics based on two BinaryLabelDatasets. The first dataset is the original one and the second is the output of the classification transformer (or similar)
}
\examples{
\dontrun{
load_aif360_lib()
# Input dataset
data <- data.frame("feat" = c(0,0,1,1,1,1,0,1,1,0), "label" = c(1,0,0,1,0,0,1,0,1,1))
# Create aif compatible input dataset
act <- aif360::binary_label_dataset(data_path = data,  favor_label=0, unfavor_label=1,
                            unprivileged_protected_attribute=0,
                            privileged_protected_attribute=1,
                            target_column="label", protected_attribute="feat")
# Classified dataset
pred_data <- data.frame("feat" = c(0,0,1,1,1,1,0,1,1,0), "label" = c(1,0,1,1,1,0,1,0,0,1))
# Create aif compatible classified dataset
pred <- aif360::binary_label_dataset(data_path = pred_data,  favor_label=0, unfavor_label=1,
                             unprivileged_protected_attribute=0,
                             privileged_protected_attribute=1,
                             target_column="label", protected_attribute="feat")
# Create an instance of classification metric
cm <- classification_metric(act, pred, list('feat', 1), list('feat', 0))
# Access metric functions
cm$accuracy()
}
}
\seealso{
\href{https://aif360.readthedocs.io/en/latest/modules/metrics.html#classification-metric}{Explore available classification metrics explanations here}

Available metrics:
\itemize{
  \item accuracy
  \item average_abs_odds_difference
  \item average_odds_difference
  \item between_all_groups_coefficient_of_variation
  \item between_all_groups_generalized_entropy_index
  \item between_all_groups_theil_index
  \item between_group_coefficient_of_variation
  \item between_group_generalized_entropy_index
  \item between_group_theil_index
  \item binary_confusion_matrix
  \item coefficient_of_variation
  \item disparate_impact
  \item equal_opportunity_difference
  \item error_rate
  \item error_rate_difference
  \item error_rate_ratio
  \item false_discovery_rate
  \item false_discovery_rate_difference
  \item false_discovery_rate_ratio
  \item false_negative_rate
  \item false_negative_rate_difference
  \item false_negative_rate_ratio
  \item false_omission_rate
  \item false_omission_rate_difference
  \item false_omission_rate_ratio
  \item false_positive_rate
  \item false_positive_rate_difference
  \item false_positive_rate_ratio
  \item generalized_binary_confusion_matrix
  \item generalized_entropy_index
  \item generalized_false_negative_rate
  \item generalized_false_positive_rate
  \item generalized_true_negative_rate
  \item generalized_true_positive_rate
  \item negative_predictive_value
  \item num_false_negatives
  \item num_false_positives
  \item num_generalized_false_negatives
  \item num_generalized_false_positives
  \item num_generalized_true_negatives
  \item num_generalized_true_positives
  \item num_pred_negatives
  \item num_pred_positives
  \item num_true_negatives
  \item num_true_positives
  \item performance_measures
  \item positive_predictive_value
  \item power
  \item precision
  \item recall
  \item selection_rate
  \item sensitivity
  \item specificity
  \item statistical_parity_difference
  \item theil_index
  \item true_negative_rate
  \item true_positive_rate
  \item true_positive_rate_difference

}
}