freyam commited on
Commit
5da6a2b
·
1 Parent(s): 239a9e5

Add Blocks and Plots

Browse files
Files changed (2) hide show
  1. app.py +95 -27
  2. plot.ipynb +0 -0
app.py CHANGED
@@ -1,33 +1,85 @@
1
  import gradio as gr
 
2
 
3
- def run_evaluation(dataset_id, methodology):
4
- return f'Running evaluation for {dataset_id} with {methodology}'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- if methodology == 'A':
7
- run_a(dataset_id)
8
- elif methodology == 'B':
9
- run_b(dataset_id)
10
- elif methodology == 'C':
11
- run_c(dataset_id)
12
-
13
 
14
- demo = gr.Blocks(theme=gr.themes.Soft())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  with demo:
17
  gr.Markdown("# BiasAware: Dataset Bias Detection")
18
-
 
 
 
19
  with gr.Row():
20
  with gr.Column(scale=1):
21
  gr.Markdown("Select a dataset to analyze")
22
 
23
- dataset_id = gr.Text(label="Dataset")
24
  gr.Examples(
25
  examples=["imdb", "amazon_reviews_multi", "tweet_eval"],
26
  fn=run_evaluation,
27
- inputs=[dataset_id]
28
  )
29
 
30
- methodology = gr.Dropdown(["Term Identity Diversity Analysis", "Textual Gender Label Evaluation", "GenBit"], label="Methodology")
 
 
 
 
 
 
 
31
 
32
  button = gr.Button("Run Evaluation")
33
 
@@ -35,23 +87,39 @@ with demo:
35
  gr.Markdown("### Results")
36
 
37
  with gr.Box():
38
- methodology_title = gr.Markdown("### Identity Term Sampling")
39
  methodology_description = gr.Markdown("lorem ipsum")
40
-
41
- methodology_test_description = gr.Markdown("lorem ipsum")
42
- outputs = gr.Markdown()
 
 
 
43
  gr.Error("No results to display")
44
-
 
 
 
 
 
 
 
 
 
 
 
 
45
  methodology.change(
46
- fn=lambda x: (f'### {x}', "lorem ipseum", "lorem ipsum"),
47
  inputs=[methodology],
48
- outputs=[methodology_title, methodology_description, methodology_test_description]
 
 
 
 
49
  )
50
 
51
- button.click(
52
- fn=run_evaluation,
53
- inputs=[dataset_id, methodology],
54
- outputs=[outputs]
55
- )
56
 
57
- demo.launch()
 
1
  import gradio as gr
2
+ import pandas as pd
3
 
4
+ data = [
5
+ ["Category", "Value", "Percentage"],
6
+ ["Total Reviews", 50000, None],
7
+ ["Total Sentences", 621647, None],
8
+ ["Pronouns in Sentences", None, None],
9
+ ["Male Pronouns", 85615, None],
10
+ ["Female Pronouns", 39372, None],
11
+ ["Both Male and Female Pronouns", 7765, None],
12
+ ["Exclusive Usage of Pronouns", None, None],
13
+ ["Only Male Pronouns", 77860, 13.77],
14
+ ["Only Female Pronouns", 31617, 6.33],
15
+ ["Pronouns and Professions in Sentences", None, None],
16
+ ["Male Pronouns with Professions", 5580, 0.9],
17
+ ["Female Pronouns with Professions", 2618, 0.42],
18
+ ["Exclusive Usage of Pronouns with Professions", None, None],
19
+ ["Only Male Pronouns with Professions", 5011, 0.81],
20
+ ["Only Female Pronouns with Professions", 2049, 0.33],
21
+ ["Pronouns and Professions in Combination", None, None],
22
+ ["Male or Female Pronouns with Professions", 7629, 1.23],
23
+ ["Male and Female Pronouns with Professions", 569, 0.09]
24
+ ]
25
 
 
 
 
 
 
 
 
26
 
27
+ def display_methodology(methodology):
28
+ title = methodology
29
+ description = ""
30
+ details = ""
31
+ if methodology == "Term Identity Diversity Analysis":
32
+ description = "111"
33
+ details = "222"
34
+ elif methodology == "Textual Gender Label Evaluation":
35
+ description = "333"
36
+ details = "444"
37
+ elif methodology == "GenBit":
38
+ description = "555"
39
+ details = "666"
40
+
41
+ return title, description, details
42
+
43
+
44
+ def run_evaluation(dataset, methodology):
45
+ return f"Running evaluation for {dataset} with {methodology}"
46
+
47
+ if methodology == "A":
48
+ run_a(dataset)
49
+ elif methodology == "B":
50
+ run_b(dataset)
51
+ elif methodology == "C":
52
+ run_c(dataset)
53
+
54
+
55
+ demo = gr.Blocks(title="BiasAware: Dataset Bias Detection",
56
+ theme=gr.themes.Soft())
57
 
58
  with demo:
59
  gr.Markdown("# BiasAware: Dataset Bias Detection")
60
+ gr.Markdown(
61
+ "Natural Language Processing (NLP) training datasets often reflect the biases present in the data sources they are compiled from, leading to the **perpetuation of stereotypes, underrepresentation, and skewed perspectives in AI models**. BiasAware is designed to **identify and quantify biases present in text data**, making it an invaluable resource for data scientists, machine learning practitioners, and organizations committed to **mitigating bias in AI systems**."
62
+ )
63
+
64
  with gr.Row():
65
  with gr.Column(scale=1):
66
  gr.Markdown("Select a dataset to analyze")
67
 
68
+ dataset = gr.Text(label="Dataset")
69
  gr.Examples(
70
  examples=["imdb", "amazon_reviews_multi", "tweet_eval"],
71
  fn=run_evaluation,
72
+ inputs=[dataset],
73
  )
74
 
75
+ methodology = gr.Radio(
76
+ [
77
+ "Term Identity Diversity Analysis",
78
+ "Textual Gender Label Evaluation",
79
+ "GenBit",
80
+ ],
81
+ label="Methodology",
82
+ )
83
 
84
  button = gr.Button("Run Evaluation")
85
 
 
87
  gr.Markdown("### Results")
88
 
89
  with gr.Box():
90
+ methodology_title = gr.Markdown("### Title")
91
  methodology_description = gr.Markdown("lorem ipsum")
92
+
93
+ methodology_details = gr.Markdown("lorem ipsum")
94
+ # outputs = gr.Markdown()
95
+ outputs = gr.DataFrame(pd.DataFrame(data), headers=[
96
+ "", "Count", "Percentage"])
97
+
98
  gr.Error("No results to display")
99
+
100
+ with gr.Column(scale=1):
101
+ gr.Markdown("### Leaderboard")
102
+ gr.DataFrame(
103
+ headers=["Dataset", "Score"],
104
+ value=[
105
+ ["imdb", 0.9],
106
+ ["amazon_reviews_multi", 0.8],
107
+ ["tweet_eval", 0.7],
108
+ ],
109
+ interactive=False,
110
+ )
111
+
112
  methodology.change(
113
+ fn=display_methodology,
114
  inputs=[methodology],
115
+ outputs=[
116
+ methodology_title,
117
+ methodology_description,
118
+ methodology_details,
119
+ ],
120
  )
121
 
122
+ button.click(fn=run_evaluation, inputs=[
123
+ dataset, methodology], outputs=[outputs])
 
 
 
124
 
125
+ demo.launch()
plot.ipynb ADDED
The diff for this file is too large to render. See raw diff