biasaware / app.py
freyam's picture
Optimise evaluation logic
6d2d9db
raw
history blame
6.44 kB
import json
import gradio as gr
import pandas as pd
import os
from scripts.genbit import *
from scripts.gender_profession_bias import *
from scripts.gender_divide import *
methodologies = json.load(open("config/methodologies.json", "r"))
MAX_THRESHOLD = 5000
DATASET_CACHE = {}
def evaluate(dataset, sampling_method, sampling_size, column, methodology):
try:
print(
f"[{dataset.name.split('/')[-1]}::{column}] - {sampling_method} {sampling_size} entries"
)
data = DATASET_CACHE.setdefault(dataset.name, pd.read_csv(dataset.name))[
[column]
]
if sampling_method == "First":
data = data.head(sampling_size)
elif sampling_method == "Last":
data = data.tail(sampling_size)
elif sampling_method == "Random":
data = data.sample(n=sampling_size, random_state=42)
result_json = globals()[methodologies.get(methodology).get("fx")](data)
result_df = pd.DataFrame.from_dict(result_json, orient="index").reset_index()
result_df.columns = ["Metric", "Value"]
return gr.Dataframe.update(result_df, visible=True)
except Exception as e:
return gr.JSON.update(
{"error": f"An error occurred while processing the dataset. {e}"},
visible=True,
)
def display_dataset_config(dataset):
try:
data = DATASET_CACHE.setdefault(dataset.name, pd.read_csv(dataset.name))
columns = data.select_dtypes(include=["object"]).columns.tolist()
corpus = data[columns[0]].tolist()[0:5]
return (
gr.Radio.update(
label="Scope",
info="Determines the scope of the dataset to be analyzed",
choices=["First", "Last", "Random"],
value="First",
visible=True,
interactive=True,
),
gr.Slider.update(
label=f"Number of Entries",
info=f"Determines the number of entries to be analyzed. Due to computational constraints, the maximum number of entries that can be analyzed is {MAX_THRESHOLD}.",
minimum=1,
maximum=min(data.shape[0], MAX_THRESHOLD),
value=min(data.shape[0], MAX_THRESHOLD),
visible=True,
interactive=True,
),
gr.Radio.update(
label="Column",
info="Determines the column to be analyzed. These are the columns with text data.",
choices=columns,
value=columns[0],
visible=True,
interactive=True,
),
gr.DataFrame.update(
value=pd.DataFrame({f"{columns[0]}": corpus}), visible=True
),
)
except:
return (
gr.Radio.update(visible=False),
gr.Slider.update(visible=False),
gr.Radio.update(visible=False),
gr.DataFrame.update(visible=False),
)
def update_column_metadata(dataset, column):
data = DATASET_CACHE.setdefault(dataset.name, pd.read_csv(dataset.name))
corpus = data[column].tolist()[0:5]
return gr.Dataframe.update(value=pd.DataFrame({f"{column}": corpus}), visible=True)
def get_methodology_metadata(methodology):
title = "## " + methodology
description = methodologies.get(methodology).get("description")
metadata = f"{title}\n\n{description}"
return (
gr.Markdown.update(metadata, visible=True),
gr.Button.update(interactive=True, visible=True),
)
BiasAware = gr.Blocks(title="BiasAware: Dataset Bias Detection")
with BiasAware:
gr.Markdown(
"""
# BiasAware: Dataset Bias Detection
BiasAware is a specialized tool for detecting and quantifying biases within datasets used for Natural Language Processing (NLP) tasks. NLP training datasets frequently mirror the inherent biases of their source materials, resulting in AI models that unintentionally perpetuate stereotypes, exhibit underrepresentation, and showcase skewed perspectives.
"""
)
with gr.Row():
with gr.Column(scale=2):
gr.Markdown("## Dataset")
dataset_file = gr.File(label="Dataset", file_types=["csv"])
dataset_examples = gr.Examples(
[
os.path.join(os.path.dirname(__file__), "data/imdb_100.csv"),
os.path.join(os.path.dirname(__file__), "data/z_employee.csv"),
os.path.join(os.path.dirname(__file__), "data/z_sentences.csv"),
],
inputs=dataset_file,
label="Example Datasets",
)
dataset_sampling_method = gr.Radio(visible=False)
dataset_sampling_size = gr.Slider(visible=False)
dataset_column = gr.Radio(visible=False)
dataset_corpus = gr.Dataframe(
row_count=(5, "fixed"), col_count=(1, "fixed"), visible=False
)
with gr.Column(scale=2):
gr.Markdown("## Methodology")
methodology = gr.Radio(
label="Methodology",
info="Determines the methodology to be used for bias detection",
choices=methodologies.keys(),
)
evalButton = gr.Button(value="Run Evaluation", interactive=False)
methodology_metadata = gr.Markdown(visible=False)
with gr.Column(scale=4):
gr.Markdown("## Result")
result = gr.DataFrame(visible=False)
dataset_file.change(
fn=display_dataset_config,
inputs=[dataset_file],
outputs=[
dataset_sampling_method,
dataset_sampling_size,
dataset_column,
dataset_corpus,
],
)
dataset_column.change(
fn=update_column_metadata,
inputs=[dataset_file, dataset_column],
outputs=[dataset_corpus],
)
methodology.change(
fn=get_methodology_metadata,
inputs=[methodology],
outputs=[methodology_metadata, evalButton],
)
evalButton.click(
fn=evaluate,
inputs=[
dataset_file,
dataset_sampling_method,
dataset_sampling_size,
dataset_column,
methodology,
],
outputs=[result],
)
BiasAware.launch()