Spaces:
Running
Running
File size: 9,566 Bytes
0d2e03d fc4805a 7628397 ac7f367 7628397 fc4805a 4b8641e fc4805a 693b8b0 b7d9ead 693b8b0 4b8641e e6406fb 4b8641e 08b8fd8 e6406fb e62ba11 b7d9ead 9ff938b e6406fb b7d9ead e6406fb fc4805a 693b8b0 a967188 693b8b0 e62ba11 fc4805a 19db760 fc4805a 61fd7c8 fc4805a 0d2e03d fc4805a 1aacc3d 3d0f875 1aacc3d 7628397 5cfe7fb 7628397 61fd7c8 b7d9ead e6406fb 61fd7c8 caa4ba5 e62ba11 caa4ba5 e62ba11 61fd7c8 19db760 61fd7c8 e62ba11 b7d9ead e62ba11 61fd7c8 e62ba11 61fd7c8 e62ba11 7628397 2fc55c3 4fb4bd7 7628397 06e7a40 7628397 2fc55c3 4fb4bd7 7628397 837dffe 1aacc3d 707038c d17e404 707038c 1aacc3d 7628397 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 |
import gradio as gr
import pandas as pd
import os
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import HfApi
from uploads import add_new_eval
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""@misc{tofu2024,
title={TOFU: A Task of Fictitious Unlearning for LLMs},
author={Pratyush Maini and Zhili Feng and Avi Schwarzschild and Zachary Lipton and Zico Kolter},
year={2024},
archivePrefix={arXiv},
primaryClass={cs.LG}
}"""
api = HfApi()
TOKEN = os.environ.get("TOKEN", None)
LEADERBOARD_PATH = f"locuslab/tofu_leaderboard"
def restart_space():
api.restart_space(repo_id=LEADERBOARD_PATH, token=TOKEN)
# Function to load data from a given CSV file
def baseline_load_data(model,version,metrics):
version = version.replace("%", "p")
file_path = f'versions/{model}-{version}/{model}-{version}.csv' # Replace with your file paths
df = pd.read_csv(file_path)
# we only want specific columns and in a specific order
column_names = ["Method", "Submitted By",
"Model Utility", "Forget Quality",
"ROUGE Real Authors", "Truth Ratio Real Authors", "Prob. Real Authors",
"ROUGE Real World", "Truth Ratio Real World", "Prob. Real World",
"ROUGE Retain", "Truth Ratio Retain", "Prob. Retain",
"ROUGE Forget", "Truth Ratio Forget", "Prob. Forget",
]
#based on the metrics, remove the columns that are not needed
if "ROUGE" not in metrics:
column_names = [x for x in column_names if "ROUGE" not in x]
if "Truth Ratio" not in metrics:
column_names = [x for x in column_names if "Truth Ratio" not in x]
if "Prob." not in metrics:
column_names = [x for x in column_names if "Prob." not in x]
#if there is a column with name WD, modify each entry in Method to include WD: method (WD = wd)
if "WD" in df.columns:
#get the WD column entry for each row and add it to the method name
df["Method"] = df["Method"] + " (WD = " + df["WD"].astype(str) + ")"
df = df[column_names]
# if there are multiple rows with the same method, keep only the one with the highest product of model utility and forget quality
product = df["Model Utility"] * df["Forget Quality"]
df["product"] = product
df = df.sort_values(by="product", ascending=False)
df = df.drop_duplicates(subset=["Method"], keep="first")
df = df.drop(columns=["product"])
return df
def load_data(model, version, metrics):
baseline_df = baseline_load_data(model, version, metrics)
# now for every file in "versions/{model}-{version}/*.csv"
# if file name is not "model-version.csv", load the file and append it to the dataframe
version = version.replace("%", "p")
for file in os.listdir(f'versions/{model}-{version}'):
if file == f"{model}-{version}.csv":
continue
df = pd.read_csv(f'versions/{model}-{version}/{file}')
df = df[baseline_df.columns]
baseline_df = pd.concat([baseline_df, df])
return baseline_df
# Function for searching in the leaderboard
def search_leaderboard(df, query):
if query == "":
return df
else:
return df[df['Method'].str.contains(query)]
# Function to change the version of the leaderboard
def change_version(model, version, metrics):
new_df = load_data(model, version, metrics)
return new_df
# Initialize Gradio app
demo = gr.Blocks()
with demo:
gr.Markdown("""
## π₯ TOFU Leaderboard
The TOFU dataset is a benchmark designed to evaluate the unlearning performance of large language models in realistic scenarios. This unique dataset consists of question-answer pairs that are based on the autobiographies of 200 fictitious authors, entirely generated by the GPT-4 model. The primary objective of this task is to effectively unlearn a fine-tuned model using different portions of the forget set.
Read more at [https://locuslab.github.io/tofu/](https://locuslab.github.io/tofu/).
""")
with gr.Row():
with gr.Accordion("π Citation", open=False):
citation_button = gr.Textbox(
value=CITATION_BUTTON_TEXT,
label=CITATION_BUTTON_LABEL,
elem_id="citation-button",
show_copy_button=True,
) #.style(show_copy_button=True)
with gr.Tabs():
with gr.TabItem("Leaderboard"):
with gr.Row():
version_dropdown = gr.Dropdown(
choices=["1%", "5%", "10%"],
label="π Select Forget Percentage",
value="10%",
)
model_dropdown = gr.Dropdown(
choices=["llama", "phi"],
label="π Select Base Model",
value="llama",
)
with gr.Row():
metrics_checkbox = gr.CheckboxGroup(
label="Select Metrics",
choices=["ROUGE", "Truth Ratio", "Prob."],
value = ["ROUGE", "Truth Ratio", "Prob."],
)
with gr.Row():
search_bar = gr.Textbox(
placeholder="Search for methods...",
show_label=False,
)
leaderboard_table = gr.components.Dataframe(
value=load_data("llama", "10%", ["ROUGE", "Truth Ratio", "Prob."]),
interactive=True,
visible=True,
)
version_dropdown.change(
change_version,
inputs=[model_dropdown,version_dropdown,metrics_checkbox],
outputs=leaderboard_table
)
model_dropdown.change(
change_version,
inputs=[model_dropdown,version_dropdown,metrics_checkbox],
outputs=leaderboard_table
)
search_bar.change(
search_leaderboard,
inputs=[leaderboard_table, search_bar,metrics_checkbox],
outputs=leaderboard_table
)
metrics_checkbox.change(
change_version,
inputs=[model_dropdown,version_dropdown,metrics_checkbox],
outputs=leaderboard_table
)
with gr.Accordion("Submit a new model for evaluation"):
with gr.Row():
with gr.Column():
method_name_textbox = gr.Textbox(label="Method name")
#llama, phi
model_family_radio = gr.Radio(["llama", "phi"], value="llama", label="Model family")
forget_rate_radio = gr.Radio(["1%", "5%", "10%"], value="10%", label="Forget rate")
url_textbox = gr.Textbox(label="Url to model information")
with gr.Column():
organisation = gr.Textbox(label="Organisation")
mail = gr.Textbox(label="Contact email")
file_output = gr.File()
submit_button = gr.Button("Submit Eval")
submission_result = gr.Markdown()
submit_button.click(
add_new_eval,
[
method_name_textbox,
model_family_radio,
forget_rate_radio,
url_textbox,
file_output,
organisation,
mail
],
submission_result,
)
gr.Markdown("""
## Quick Links
- [**Website**](https://locuslab.github.io/tofu): The landing page for TOFU
- [**arXiv Paper**](http://arxiv.org/abs/2401.06121): Detailed information about the TOFU dataset and its significance in unlearning tasks.
- [**GitHub Repository**](https://github.com/locuslab/tofu): Access the source code, fine-tuning scripts, and additional resources for the TOFU dataset.
- [**Dataset on Hugging Face**](https://huggingface.co/datasets/locuslab/TOFU): Direct link to download the TOFU dataset.
- [**Leaderboard on Hugging Face Spaces**](https://huggingface.co/spaces/locuslab/tofu_leaderboard): Current rankings and submissions for the TOFU dataset challenges.
- [**Summary on Twitter**](https://x.com/_akhaliq/status/1745643293839327268): A concise summary and key takeaways from the project.
## Applicability π
The dataset is in QA format, making it ideal for use with popular chat models such as Llama2, Mistral, or Qwen. However, it also works for any other large language model. The corresponding code base is written for the Llama2 model, but can be easily adapted to other models.
## Installation
```
conda create -n tofu python=3.10
conda activate tofu
conda install pytorch pytorch-cuda=11.8 -c pytorch -c nvidia
conda install -c "nvidia/label/cuda-11.8.0" cuda-toolkit
pip install -r requirements.txt
```
## Loading the Dataset
To load the dataset, use the following code:
```python
from datasets import load_dataset
dataset = load_dataset("locuslab/TOFU","full")
```
""")
# scheduler = BackgroundScheduler()
# scheduler.add_job(restart_space, "interval", seconds=1800)
# scheduler.start()
# demo.queue(default_concurrency_limit=40).launch()
# demo.launch()
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=3600)
scheduler.start()
demo.launch(debug=True)
|