|
import gradio as gr |
|
import pandas as pd |
|
import numpy as np |
|
from gradio_leaderboard import Leaderboard, SelectColumns, ColumnFilter |
|
|
|
|
|
|
|
TITLE = '''<h1> |
|
<span style="font-variant: small-caps;">M-RewardBench</span>: Evaluating Reward Models in Multilingual Settings |
|
</h1>''' |
|
INTRODUCTION_TEXT = ''' |
|
Evaluating the chat, safety, reasoning, and translation capabilities of Multilingual Reward Models. |
|
|
|
π [Paper](https://arxiv.org/pdf/2410.15522.pdf) | π» [Code](https://github.com/for-ai/m-rewardbench) | π€ [Dataset](https://hf.co/datasets/C4AI-Community/multilingual-reward-bench) | π [arXiv](https://arxiv.org/abs/2410.15522) | π [Leaderboard](https://c4ai-community-m-rewardbench.hf.space/) |
|
|
|
π https://m-rewardbench.github.io/''' |
|
|
|
|
|
|
|
GOOGLE_SHEET_URLS = [ |
|
"https://docs.google.com/spreadsheets/d/1qrD7plUdrBwAw7G6UeDVZAaV9ihxaNAcoiKwSaqotR4/gviz/tq?tqx=out:csv&sheet=gt", |
|
"https://docs.google.com/spreadsheets/d/1qrD7plUdrBwAw7G6UeDVZAaV9ihxaNAcoiKwSaqotR4/gviz/tq?tqx=out:csv&sheet=maple" |
|
] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class AutoEvalColumn: |
|
model = { |
|
"name": "Model", |
|
"type": "markdown", |
|
"displayed_by_default": True, |
|
"never_hidden": True, |
|
} |
|
|
|
model_type = { |
|
"name": "MT", |
|
"type": "markdown", |
|
"displayed_by_default": True, |
|
"never_hidden": True, |
|
} |
|
|
|
@classmethod |
|
def add_columns_from_df(cls, df, columns): |
|
for col in columns: |
|
if col.lower() != 'model': |
|
setattr(cls, col, { |
|
"name": col, |
|
"type": "markdown", |
|
"displayed_by_default": True, |
|
"never_hidden": False, |
|
}) |
|
|
|
|
|
class AutoEvalColumnTranslation: |
|
model = { |
|
"name": "Model", |
|
"type": "markdown", |
|
"displayed_by_default": True, |
|
"never_hidden": True, |
|
} |
|
|
|
model_type = { |
|
"name": "MT", |
|
"type": "markdown", |
|
"displayed_by_default": True, |
|
"never_hidden": True, |
|
} |
|
|
|
@classmethod |
|
def add_columns_from_df(cls, df, columns): |
|
for col in columns: |
|
if col.lower() != 'model': |
|
setattr(cls, col, { |
|
"name": col, |
|
"type": "markdown", |
|
"displayed_by_default": True, |
|
"never_hidden": False, |
|
}) |
|
|
|
def get_result_data(): |
|
return pd.read_csv(GOOGLE_SHEET_URLS[0]) |
|
|
|
|
|
def get_translation_data(): |
|
return pd.read_csv(GOOGLE_SHEET_URLS[1]) |
|
|
|
|
|
def init_leaderboard(dataframe, df_class): |
|
if dataframe is None or dataframe.empty: |
|
raise ValueError("Leaderboard DataFrame is empty or None.") |
|
|
|
return Leaderboard( |
|
value=dataframe, |
|
datatype=[ |
|
col["type"] |
|
for col in df_class.__dict__.values() |
|
if isinstance(col, dict) |
|
], |
|
select_columns=SelectColumns( |
|
default_selection=[ |
|
col["name"] |
|
for col in df_class.__dict__.values() |
|
if isinstance(col, dict) and col["displayed_by_default"] |
|
], |
|
cant_deselect=[ |
|
col["name"] |
|
for col in df_class.__dict__.values() |
|
if isinstance(col, dict) and col.get("never_hidden", False) |
|
], |
|
label="Select Columns to Display:", |
|
), |
|
search_columns=["Model"], |
|
interactive=False, |
|
) |
|
|
|
|
|
def format_model_link(row): |
|
"""Format model name as HTML link if URL is available""" |
|
model_name = row["Model"] |
|
|
|
|
|
|
|
return model_name |
|
|
|
lang_ids = "eng_Latn arb_Arab tur_Latn rus_Cyrl ces_Latn pol_Latn kor_Hang zho_Hans zho_Hant fra_Latn ell_Grek deu_Latn ron_Latn ita_Latn nld_Latn pes_Arab hin_Deva ukr_Cyrl por_Latn ind_Latn jpn_Jpan spa_Latn heb_Hebr vie_Latn" |
|
|
|
emojis = "π’ π¬ π―" |
|
|
|
model_types = {"Generative RM": "π¬", "DPO": "π―", "Sequence Classifier": "π’"} |
|
|
|
from functools import partial |
|
def format_with_color(val, min_val=50, max_val=100, scale=True): |
|
""" |
|
Formats a value with inline green color gradient CSS. |
|
Returns an HTML string with bold, black text and muted green background. |
|
""" |
|
try: |
|
val = float(val) |
|
if pd.isna(val): |
|
return str(val) |
|
|
|
|
|
normalized = (val - min_val) / (max_val - min_val) |
|
|
|
|
|
normalized = max(0, min(1, normalized)) |
|
|
|
|
|
|
|
intensity = int(50 + (150 * (1 - normalized))) |
|
|
|
|
|
show_val = val |
|
|
|
if scale: |
|
show_val = val*100 |
|
|
|
return f'<div val={val} style="background-color: rgb({intensity}, 200, {intensity}); color: black; font-weight: bold; text-align: center; vertical-align: middle;">{show_val:.1f}</div>' |
|
|
|
except (ValueError, TypeError): |
|
return str(val) |
|
|
|
demo = gr.Blocks(theme=gr.themes.Soft()) |
|
|
|
with demo: |
|
gr.HTML(TITLE) |
|
gr.Markdown(INTRODUCTION_TEXT) |
|
|
|
with gr.Tabs() as tabs: |
|
with gr.TabItem("π
Main"): |
|
df = get_result_data() |
|
df["Model_Type"] = df["Model_Type"].map(model_types) |
|
df["Model"] = df.apply(format_model_link, axis=1) |
|
df["zho"] = df[["zho_Hans", "zho_Hant"]].mean(axis=1) |
|
|
|
columns = lang_ids.split("\t") |
|
|
|
df.pop("zho_Hans") |
|
df.pop("zho_Hant") |
|
|
|
df.rename(columns={ |
|
"Model_Type": "MT", |
|
"Avg_Multilingual": "AVG", |
|
}, inplace=True) |
|
df.rename(columns={col: col[:3] for col in columns}, inplace=True) |
|
|
|
|
|
numeric_cols = df.select_dtypes(include=[np.number]).columns |
|
global_min = df.select_dtypes(include='number').min().min().astype(float) |
|
global_max = df.select_dtypes(include='number').max().max().astype(float) |
|
|
|
|
|
for col in numeric_cols: |
|
lang_format_with_color = partial(format_with_color, |
|
|
|
|
|
min_val=global_min, |
|
max_val=global_max, |
|
) |
|
|
|
df[col] = df[col].apply(lang_format_with_color) |
|
|
|
|
|
|
|
|
|
AutoEvalColumn.add_columns_from_df(df, numeric_cols) |
|
leaderboard = init_leaderboard(df, AutoEvalColumn) |
|
|
|
with gr.TabItem("π
Translation"): |
|
df = get_translation_data() |
|
df["Model_Type"] = df["Model_Type"].map(model_types) |
|
df["Model"] = df.apply(format_model_link, axis=1) |
|
|
|
df.rename(columns={ |
|
"Model_Type": "MT", |
|
"Avg": "AVG", |
|
}, inplace=True) |
|
|
|
numeric_cols = df.select_dtypes(include=[np.number]).columns |
|
|
|
|
|
global_min = df.select_dtypes(include='number').min().min().astype(float) |
|
global_max = df.select_dtypes(include='number').max().max().astype(float) |
|
|
|
|
|
for col in numeric_cols: |
|
|
|
lang_format_with_color = partial(format_with_color, |
|
min_val=global_min, |
|
max_val=global_max, |
|
|
|
|
|
scale=False) |
|
df[col] = df[col].apply(lang_format_with_color) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
AutoEvalColumnTranslation.add_columns_from_df(df, numeric_cols) |
|
leaderboard = init_leaderboard(df, AutoEvalColumnTranslation) |
|
|
|
|
|
with gr.TabItem("π Statistics"): |
|
gr.Markdown('''## Dataset Statistics |
|
|
|
| Category | # Instances | # Languages | |
|
|------------------------------|-------------|-------------| |
|
| **General-purpose capabilities** | | | |
|
| Chat | 296 | 23 | |
|
| Chat-Hard | 407 | 23 | |
|
| Safety | 736 | 23 | |
|
| Reasoning | 1,430 | 23 | |
|
| **Multilingual knowledge** | | | |
|
| Translation | 400 | 2 | |
|
| **Total** | 66,787 | - |''') |
|
|
|
|
|
|
|
with gr.Row(): |
|
with gr.Accordion("π Citation", open=False): |
|
citation_button = gr.Textbox( |
|
value=r"""@misc{gureja2024mrewardbench, |
|
title={M-RewardBench: Evaluating Reward Models in Multilingual Settings}, |
|
author={Srishti Gureja and Lester James V. Miranda and Shayekh Bin Islam and Rishabh Maheshwary and Drishti Sharma and Gusti Winata and Nathan Lambert and Sebastian Ruder and Sara Hooker and Marzieh Fadaee}, |
|
year={2024}, |
|
eprint={2410.15522}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CL}, |
|
url={https://arxiv.org/abs/2410.15522}, |
|
}""", |
|
lines=7, |
|
label="BibTeX", |
|
elem_id="citation-button", |
|
show_copy_button=True, |
|
) |
|
|
|
demo.launch(ssr_mode=False) |
|
|