Spaces:
Sleeping
Sleeping
from dataclasses import dataclass | |
from enum import Enum | |
class Task: | |
benchmark: str | |
metric: str | |
col_name: str | |
# Select your tasks here | |
# --------------------------------------------------- | |
class Tasks(Enum): | |
# task_key in the json file, metric_key in the json file, name to display in the leaderboard | |
task0 = Task("eng_natural_vanilla_acc", "acc", "[ENG-N] Vanilla Acc") | |
task1 = Task("eng_natural_vanilla_pa", "pa", "[ENG-N] Vanilla PA") | |
task2 = Task("eng_natural_metrics_reference_acc", "acc", "[ENG-N] Metrics Reference Acc") | |
task3 = Task("eng_natural_metrics_reference_pa", "pa", "[ENG-N] Metrics Reference PA") | |
task4 = Task("eng_natural_swap_acc", "acc", "[ENG-N] Swap Acc") | |
task5 = Task("eng_natural_swap_pa", "pa", "[ENG-N] Swap PA") | |
task6 = Task("eng_natural_swap_cot_acc", "acc", "[ENG-N] Swap COT Acc") | |
task7 = Task("eng_natural_swap_cot_pa", "pa", "[ENG-N] Swap COT PA") | |
task8 = Task("eng_adversarial_manual_vanilla_acc", "acc", "[ENG-A] Vanilla Acc") | |
task9 = Task("eng_adversarial_manual_vanilla_pa", "pa", "[ENG-A] Vanilla PA") | |
task10 = Task("eng_adversarial_manual_metrics_reference_acc", "acc", "[ENG-A] Metrics Reference Acc") | |
task11 = Task("eng_adversarial_manual_metrics_reference_pa", "pa", "[ENG-A] Metrics Reference PA") | |
task12 = Task("eng_adversarial_manual_swap_acc", "acc", "[ENG-A] Swap Acc") | |
task13 = Task("eng_adversarial_manual_swap_pa", "pa", "[ENG-A] Swap PA") | |
task14 = Task("eng_adversarial_manual_swap_cot_acc", "acc", "[ENG-A] Swap COT Acc") | |
task15 = Task("eng_adversarial_manual_swap_cot_pa", "pa", "[ENG-A] Swap COT PA") | |
task16 = Task("pt_natural_vanilla_acc", "acc", "[PT-N] Vanilla Acc") | |
task17 = Task("pt_natural_vanilla_pa", "pa", "[PT-N] Vanilla PA") | |
task18 = Task("pt_natural_metrics_reference_acc", "acc", "[PT-N] Metrics Reference Acc") | |
task19 = Task("pt_natural_metrics_reference_pa", "pa", "[PT-N] Metrics Reference PA") | |
task20 = Task("pt_natural_swap_acc", "acc", "[PT-N] Swap Acc") | |
task21 = Task("pt_natural_swap_pa", "pa", "[PT-N] Swap PA") | |
task22 = Task("pt_natural_swap_cot_acc", "acc", "[PT-N] Swap COT Acc") | |
task23 = Task("pt_natural_swap_cot_pa", "pa", "[PT-N] Swap COT PA") | |
task24 = Task("pt_adversarial_manual_vanilla_acc", "acc", "[PT-A] Vanilla Acc") | |
task25 = Task("pt_adversarial_manual_vanilla_pa", "pa", "[PT-A] Vanilla PA") | |
task26 = Task("pt_adversarial_manual_metrics_reference_acc", "acc", "[PT-A] Metrics Reference Acc") | |
task27 = Task("pt_adversarial_manual_metrics_reference_pa", "pa", "[PT-A] Metrics Reference PA") | |
task28 = Task("pt_adversarial_manual_swap_acc", "acc", "[PT-A] Swap Acc") | |
task29 = Task("pt_adversarial_manual_swap_pa", "pa", "[PT-A] Swap PA") | |
task30 = Task("pt_adversarial_manual_swap_cot_acc", "acc", "[PT-A] Swap COT Acc") | |
task31 = Task("pt_adversarial_manual_swap_cot_pa", "pa", "[PT-A] Swap COT PA") | |
task32 = Task("eng_pba_extraction_avg", "avg", "[ENG-P] Extraction Rate") | |
task33 = Task("eng_pba_positional_avg", "avg", "[ENG-P] Positional Consistency Avg") | |
task34 = Task("eng_pba_positional_std", "std", "[ENG-P] Positional Consistency Std") | |
task35 = Task("eng_pba_preference_avg", "avg", "[ENG-P] Preference Score") | |
task36 = Task("pt_pba_extraction_avg", "avg", "[PT-P] Extraction Rate") | |
task37 = Task("pt_pba_positional_avg", "avg", "[PT-P] Positional Consistency Avg") | |
task38 = Task("pt_pba_positional_std", "std", "[PT-P] Positional Consistency Std") | |
task39 = Task("pt_pba_preference_avg", "avg", "[PT-P] Preference Score") | |
NUM_FEWSHOT = 0 # Change with your few shot | |
# --------------------------------------------------- | |
# Your leaderboard name | |
TITLE = """<h1 align="center" id="space-title">LLM as Judge Eval</h1>""" | |
# What does your leaderboard evaluate? | |
INTRODUCTION_TEXT = """ | |
""" | |
# Which evaluations are you running? how can people reproduce what you have? | |
LLM_BENCHMARKS_TEXT = f""" | |
## How it works | |
## Reproducibility | |
To reproduce our results, here is the commands you can run: | |
""" | |
EVALUATION_QUEUE_TEXT = """ | |
## Some good practices before submitting a model | |
### 1) Make sure you can load your model and tokenizer using AutoClasses: | |
```python | |
from transformers import AutoConfig, AutoModel, AutoTokenizer | |
config = AutoConfig.from_pretrained("your model name", revision=revision) | |
model = AutoModel.from_pretrained("your model name", revision=revision) | |
tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision) | |
``` | |
If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded. | |
Note: make sure your model is public! | |
Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted! | |
### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index) | |
It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`! | |
### 3) Make sure your model has an open license! | |
This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗 | |
### 4) Fill up your model card | |
When we add extra information about models to the leaderboard, it will be automatically taken from the model card | |
## In case of model failure | |
If your model is displayed in the `FAILED` category, its execution stopped. | |
Make sure you have followed the above steps first. | |
If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task). | |
""" | |
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results" | |
CITATION_BUTTON_TEXT = r""" | |
""" | |