File size: 9,376 Bytes
0603b09
 
 
 
 
 
 
 
 
 
 
 
 
662bf12
 
0603b09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16c16dd
 
0603b09
 
 
 
 
 
 
 
 
 
16c16dd
0603b09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16c16dd
0603b09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16c16dd
0603b09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16c16dd
 
0603b09
 
 
 
 
 
 
 
 
16c16dd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
# %%
# TODOS: Plots with plotly
import json
import pandas as pd
import streamlit as st
import plotly.express as px
from config import other_info_dict
# %%
st.title("Microsoft Phi-2 LLM assessment")
# st.image('model_card.png', caption='Hugging face description', use_column_width=True)
st.write("""
    Microsoft Phi-2 (https://huggingface.co/microsoft/phi-2) is a Transformer model with 2.7 billion parameters. Performance on benchmarks for common sense, language understanding, and logical reasoning is nearly state-of-the-art among models with less than 13 billion parameters. Unlike typical LLM models, Phi-2 has not been fine-tuned through reinforcement learning from human feedback.""")

with open('llm_microsoft_phi_2_prompt_option_0_api_option_1_post_processed.json', 'r') as f:
    data_dict = json.load(f)

st.header('Evaluation dataset')
st.write(other_info_dict['data_description'])
overall_performance = round(data_dict["Overall performance"]*100, 2)


# %%
st.header("Prompt")

# File uploader
with open('prompt_0.txt', "r") as file:
    file_contents = file.read()
    # st.write(file_contents)
st.text_area("", value=file_contents, height=300)
st.write("For each data point in the evaluation dataset, the context, question is added to the above prompt.")

st.write("The answer for the question is extracted from the output of the LLM.")
st.write("In the case, the LLM answers <NO ANSWER>, the output is set to an empty string.")

# 'Context: ' + context + '\n\n' + 'Question: ' + t_question + '\n\n' + 'Answer:'
# %%
st.header('Performance metric')
st.write(""" The performance metric used is an estimation of the percentage of correctly answered questions, i.e. the output of the model coincides with one of the ground truth answers. The performance metric can also be interpreted as the probability that the model correctly answers a question. The performance of the model is evaluated with the exact match accuracy metric (see compute_exact function in SQuAD2.0 official evaluation script at https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/), taking values in [0,1], where 0 is worst (model always wrong), and 1 is best (model always correct). It is the number of correctly answered questions divided by the number of data points. An answer is considered to be correctly answered (by the model), if the predicted answer after normalization (text is converted to lowercase, and punctuation, articles and extra whitespace are removed) matches exactly with any of the normalized ground truth answers. In the case of unanswerable questions, the empty string is considered to be the only ground truth answer. In this assessment, the minimum performance threshold is set to 0.65. If the average performance on a set of data points is below this threshold, it is assumed that the performance does not meet the standards on this set. Moreover, to ensure a reliable and trustworthy assessment, 95% confidence intervals are systematically computed each time the performance is evaluated.""")
with st.container():
    st.write(f"**Overall performance: {overall_performance}%**")
# %%
st.header("Bias ratios")
st.write('Bias ratio is defined as the ratio of the highest performance to the lowest performance among reliable categories for a characteristic.')
fairness_results = data_dict['Fairness results']

characteristic_list = []
fairness_ratio_list = []
for key, val in fairness_results.items():
    characteristic_list += [key]
    fairness_ratio_list += [val['OverallFairness']]

ch_df = pd.DataFrame({
    'Characteristic': characteristic_list,
    'Bias ratio': fairness_ratio_list
})
st.dataframe(ch_df)





# %%
st.header("Perturber families performance")

st.write(f"ProbTypos: {other_info_dict['ProbTypos_description']}")
st.write(f"MaxTypo: {other_info_dict['MaxTypo_description']}")

global_perturber_families = data_dict['Perturber Families']
t_pert_fig = None
perf_pert_values = []
normalized_perf_pert_values = []
family_levels = []
family_names_list = []
levels_index_list = []
for item in global_perturber_families:
    family_name = item['family name']
    family_results = data_dict['Performance Robustness']['Perturber family wise results'][family_name]["PerformancePerturbers"]# TODO: change the structuer of post processing here 
    family_levels += item['levels']
    original_perf = family_results[item['levels'][0]]
    count = 0
    for t_item in item['levels']:
        perf_pert_values += [family_results[t_item]]
        normalized_perf_pert_values += [family_results[t_item]/original_perf]
        family_names_list += [family_name]
        levels_index_list += [count]
        count += 1

t_pert_df_global = pd.DataFrame({
    'Perturbation level': family_levels,
    'Performance': perf_pert_values,
    'normalized performance': normalized_perf_pert_values,
    'Perturbation family': family_names_list,
    'Levels' : levels_index_list
})

t_pert_fig = px.line(t_pert_df_global, x="Levels", y="Performance", color='Perturbation family')
t_pert_fig.update_xaxes(tickmode='linear', dtick=1)


st.plotly_chart(t_pert_fig, theme="streamlit", use_container_width=True)


# %%
st.header("Performance, Fairness, Robustness")

embedder_categories = data_dict['Embedder categories']

option = st.selectbox(
     'Select higher-level categorization/characteristic:',
     list(embedder_categories.keys()))


st.write('The following are the categories:')
st.write(', '.join(embedder_categories[option]))

if 'Length' in option:
    st.write("Note: Here, length denotes the number of characters. ")

if 'gender' in option:
    st.write(other_info_dict['gender_categories_text'])

if 'ethnicity' in option:
    st.write(other_info_dict['ethnicity_categories_text'])

embedder_perf_ci_table = data_dict['Performance results'][option]['CI_Table']
n_points = data_dict['n points']
category_share_of_data = {}
categories_list = []
share_of_data_list = []
n_points_list = []
for key, val in embedder_perf_ci_table.items():
    categories_list += [val['category']]
    share_of_data_list += [val['Share of Data']]
    n_points_list += [int(val['Share of Data']*n_points/100)]

st.markdown("---")
st.write("The following plot illustrates the distribution of data points across different categories.")
t_df = pd.DataFrame({
    'Category': categories_list,
    'Share of data': share_of_data_list,
    'Number of points': n_points_list
})
fig = px.bar(t_df, x='Category', y='Number of points')

st.plotly_chart(fig, theme="streamlit", use_container_width=True)
st.markdown("---")

st.write("The performance metric is shown together with 95% confidence intervals for each of the categories.")


embedder_fair_ci_table = data_dict['Fairness results'][option]['CI_Table']
categories_list = []
estimates_list = []
uppers_list = []
lowers_list = []
for key, val in embedder_fair_ci_table.items():
    categories_list += [val['category']]
    estimates_list += [val['Estimate']]
    uppers_list += [val['Upper']]
    lowers_list += [val['Lower']]

t_fair_df = pd.DataFrame({
    'Category': categories_list,
    'Estimate': estimates_list,
    'Upper': uppers_list,
    'Lower': lowers_list
})

t_fair_df['Diff upper'] = t_fair_df['Upper'] - t_fair_df['Estimate']
t_fair_df['Diff lower'] = t_fair_df['Estimate'] - t_fair_df['Lower']


fig_fair = px.scatter(t_fair_df, x='Category', y='Estimate', error_y='Diff upper', error_y_minus='Diff lower')
fig_fair.update_layout(yaxis_title="Performance in %")

st.plotly_chart(fig_fair, theme="streamlit", use_container_width=True)
st.markdown("---")

st.write('The following plots show the normalized average performance for each category of a characteristic, for each level of perturbation, starting with no perturbation. Each curve represents the normalized average performance on a category, by which we mean that we divide the average performance at every level of perturbation by the average performance without perturbation. ')

t_result = data_dict['Performance Robustness']['Embedder wise results'][option]
# Embedder categories
for item in global_perturber_families:
    family_name = item['family name']
    dfs_list = []
    count = 0
    for t_item in item['levels']:
        df = pd.DataFrame(t_result[t_item])
        df['Perturber'] = t_item
        df['Perturber family'] = family_name
        df['Levels'] = count
        dfs_list += [df]
        count += 1
    merged_df = pd.concat(dfs_list, axis=0)

    temp_header = f'Perturber family: {family_name}'
    # st.markdown(f'##### {temp_header}')
    t_pert_fig = px.line(merged_df, x="Levels", y="normalized performance", color='category')
    t_pert_fig.update_layout(yaxis_title="Normalized performance")

    # px.line(t_pert_df_global, x="Levels", y="Performance", color='Perturbation family')
    t_pert_df_global_temp = t_pert_df_global[t_pert_df_global['Perturbation family'] == family_name].copy(deep=True)
    t_pert_df_global_temp['category'] = 'Overall'
    
    t_pert_fig.add_trace(px.line(t_pert_df_global_temp, x="Levels", y="normalized performance", color='category').data[0])
    t_pert_fig.update_xaxes(tickmode='linear', dtick=1)

    st.write(f'The following plot illustrates the normalized performance of the model across different categories for perturbation family: {family_name}.')
    st.plotly_chart(t_pert_fig, theme="streamlit", use_container_width=True)
st.markdown("---")