Spaces:
Sleeping
Sleeping
File size: 1,422 Bytes
3c6c62c 94bd921 b650828 94bd921 3c6c62c 94bd921 f023f79 3c6c62c b650828 3c6c62c b650828 3c6c62c b650828 3c6c62c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
from dataclasses import dataclass
from enum import Enum
@dataclass
class Task:
benchmark: str
metric: str
col_name: str
# Select your tasks here
# ---------------------------------------------------
class Tasks(Enum):
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
task0 = Task("anli_r1", "acc", "ANLI")
task1 = Task("logiqa", "acc_norm", "LogiQA")
NUM_FEWSHOT = 0 # Change with your few shot
# ---------------------------------------------------
# Your leaderboard name
TITLE = """<h1 align="center" id="space-title">MMIE</h1>"""
# MJB_LOGO = '<img src="" alt="Logo" style="width: 30%; display: block; margin: auto;">'
# What does your leaderboard evaluate?
INTRODUCTION_TEXT = """
# MMIE: Massive Multimodal Interleaved Comprehension Benchmark for Large Vision-Language Models
[Website](https://github.com/richard-peng-xia/MMIE) | [Code](https://github.com/richard-peng-xia/MMIE) | [Dataset](https://huggingface.co/datasets/MMIE/MMIE) | [Results](https://huggingface.co/spaces/MMIE/Leaderboard) | [Eval Model](https://huggingface.co/MMIE/MMIE-Eval) | [Paper]()
"""
# Which evaluations are you running? how can people reproduce what you have?
LLM_BENCHMARKS_TEXT = f"""
"""
EVALUATION_QUEUE_TEXT = """
"""
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""
"""
ABOUT_TEXT = """
"""
|