kleytondacosta commited on
Commit
a94715d
β€’
1 Parent(s): 87ce992

fix: data folder

Browse files
README.md CHANGED
@@ -1,3 +1,28 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ configs:
3
+ - config_name: binary_classification_preprocessing
4
+ data_files: benchmark_binary_classification_preprocessing.parquet
5
+ - config_name: binary_classification_inprocessing
6
+ data_files: benchmark_binary_classification_inprocessing.parquet
7
+ - config_name: binary_classification_postprocessing
8
+ data_files: benchmark_binary_classification_postprocessing.parquet
9
+ - config_name: multiclass_preprocessing
10
+ data_files: benchmark_multiclass_preprocessing.parquet
11
+ - config_name: multiclass_inprocessing
12
+ data_files: benchmark_multiclass_inprocessing.parquet
13
+ - config_name: multiclass_postprocessing
14
+ data_files: benchmark_multiclass_postprocessing.parquet
15
+ - config_name: regression_preprocessing
16
+ data_files: benchmark_regression_preprocessing.parquet
17
+ - config_name: regression_inprocessing
18
+ data_files: benchmark_regression_inprocessing.parquet
19
+ - config_name: regression_postprocessing
20
+ data_files: benchmark_regression_postprocessing.parquet
21
+ - config_name: clustering_preprocessing
22
+ data_files: benchmark_clustering_preprocessing.parquet
23
+ - config_name: clustering_inprocessing
24
+ data_files: benchmark_clustering_inprocessing.parquet
25
+ - config_name: clustering_postprocessing
26
+ data_files: benchmark_clustering_postprocessing.parquet
27
+ license: mit
28
+ ---
create_yaml.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+
3
+ data_folder = "data"
4
+ data = {"license": "mit", "configs": []}
5
+
6
+ # Collect configurations based on folder structure
7
+ for task in ["binary_classification", "multiclass", "regression", "clustering",]:
8
+ for stage in ["preprocessing", "inprocessing", "postprocessing"]:
9
+ config = {
10
+ "config_name": task + "_" + stage,
11
+ "data_files": f"benchmark_{task}_{stage}.parquet"
12
+ }
13
+ data["configs"].append(config)
14
+
15
+ # Write YAML to README.md with delimiters
16
+ with open("README.md", "w") as file:
17
+ file.write("---\n") # Add starting delimiter
18
+ yaml.dump(data, file, default_flow_style=False)
19
+ file.write("---\n") # Add ending delimiter
benchmark_binary_classification_preprocessing.csv β†’ data/benchmark_binary_classification_preprocessing.csv RENAMED
File without changes
benchmark_clustering_inprocessing.csv β†’ data/benchmark_clustering_inprocessing.csv RENAMED
File without changes
benchmark_clustering_postprocessing.csv β†’ data/benchmark_clustering_postprocessing.csv RENAMED
File without changes
benchmark_clustering_preprocessing.csv β†’ data/benchmark_clustering_preprocessing.csv RENAMED
File without changes
benchmark_multiclass_inprocessing.csv β†’ data/benchmark_multiclass_inprocessing.csv RENAMED
File without changes
benchmark_multiclass_postprocessing.csv β†’ data/benchmark_multiclass_postprocessing.csv RENAMED
File without changes
benchmark_multiclass_preprocessing.csv β†’ data/benchmark_multiclass_preprocessing.csv RENAMED
File without changes
benchmark_regression_inprocessing.csv β†’ data/benchmark_regression_inprocessing.csv RENAMED
File without changes
benchmark_regression_postprocessing.csv β†’ data/benchmark_regression_postprocessing.csv RENAMED
File without changes
benchmark_regression_preprocessing.csv β†’ data/benchmark_regression_preprocessing.csv RENAMED
File without changes
run_benchmark.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!pip install -q git+https://github.com/holistic-ai/holisticai.git@feature/bias-benchmark
2
+
3
+ import os
4
+ from holisticai.benchmark import BiasMitigationBenchmark
5
+
6
+ def main():
7
+ for task_type in ["multiclass",
8
+ "regression",
9
+ "clustering",
10
+ "binary_classification",
11
+ ]:
12
+ for stage in ["preprocessing",
13
+ "inprocessing",
14
+ "postprocessing"
15
+ ]:
16
+ benchmark = BiasMitigationBenchmark(task_type, stage)
17
+ results = benchmark.run()
18
+ if not os.path.exists("data"):
19
+ os.makedirs("data")
20
+ results.to_csv(f"data/benchmark_{task_type}_{stage}.csv", index=True)
21
+ print(f"Results saved for {task_type} task type and {stage} stage.") # noqa: T201
22
+
23
+ if __name__ == "__main__":
24
+ main()