File size: 8,417 Bytes
1151512 c29c023 1151512 ced3ad9 1151512 f4eec6d 1151512 f4eec6d dda9204 1151512 f4eec6d 1151512 f4eec6d 1151512 c29c023 1151512 ced3ad9 1151512 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
"""MTEB Results"""
import json
import datasets
import requests
logger = datasets.logging.get_logger(__name__)
_CITATION = """@article{muennighoff2022mteb,
doi = {10.48550/ARXIV.2210.07316},
url = {https://arxiv.org/abs/2210.07316},
author = {Muennighoff, Niklas and Tazi, Nouamane and Magne, Lo{\"\i}c and Reimers, Nils},
title = {MTEB: Massive Text Embedding Benchmark},
publisher = {arXiv},
journal={arXiv preprint arXiv:2210.07316},
year = {2022}
}
"""
_DESCRIPTION = """Results on MTEB"""
URL = "https://huggingface.co/datasets/pt-mteb/results/resolve/main/paths.json"
VERSION = datasets.Version("1.0.1")
EVAL_LANGS = ['af', 'afr-por', 'am', "amh", 'amh-por', 'ang-por', 'ar', 'ar-ar', 'ara-por', 'arq-por', 'arz-por', 'ast-por', 'awa-por', 'az', 'aze-por', 'bel-por', 'ben-por', 'ber-por', 'bn', 'bos-por', 'bre-por', 'bul-por', 'cat-por', 'cbk-por', 'ceb-por', 'ces-por', 'cha-por', 'cmn-por', 'cor-por', 'csb-por', 'cy', 'cym-por', 'da', 'dan-por', 'de', 'de-fr', 'de-pl', 'deu-por', 'dsb-por', 'dtp-por', 'el', 'ell-por', 'en', 'pt-ar', 'pt-de', 'pt-pt', 'pt-tr', 'por', 'epo-por', 'es', 'es-pt', 'es-es', 'es-it', 'est-por', 'eus-por', 'fa', 'fao-por', 'fi', 'fin-por', 'fr', 'fr-pt', 'fr-pl', 'fra', 'fra-por', 'fry-por', 'gla-por', 'gle-por', 'glg-por', 'gsw-por', 'hau', 'he', 'heb-por', 'hi', 'hin-por', 'hrv-por', 'hsb-por', 'hu', 'hun-por', 'hy', 'hye-por', 'ibo', 'id', 'ido-por', 'ile-por', 'ina-por', 'ind-por', 'is', 'isl-por', 'it', 'it-pt', 'ita-por', 'ja', 'jav-por', 'jpn-por', 'jv', 'ka', 'kab-por', 'kat-por', 'kaz-por', 'khm-por', 'km', 'kn', 'ko', 'ko-ko', 'kor-por', 'kur-por', 'kzj-por', 'lat-por', 'lfn-por', 'lit-por', 'lin', 'lug', 'lv', 'lvs-por', 'mal-por', 'mar-por', 'max-por', 'mhr-por', 'mkd-por', 'ml', 'mn', 'mon-por', 'ms', 'my', 'nb', 'nds-por', 'nl', 'nl-ptde-pt', 'nld-por', 'nno-por', 'nob-por', 'nov-por', 'oci-por', 'orm', 'orv-por', 'pam-por', 'pcm', 'pes-por', 'pl', 'pl-pt', 'pms-por', 'pol-por', 'por-por', 'pt', 'ro', 'ron-por', 'ru', 'run', 'rus-por', 'sl', 'slk-por', 'slv-por', 'spa-por', 'sna', 'som', 'sq', 'sqi-por', 'srp-por', 'sv', 'sw', 'swa', 'swe-por', 'swg-por', 'swh-por', 'ta', 'tam-por', 'tat-por', 'te', 'tel-por', 'tgl-por', 'th', 'tha-por', 'tir', 'tl', 'tr', 'tuk-por', 'tur-por', 'tzl-por', 'uig-por', 'ukr-por', 'ur', 'urd-por', 'uzb-por', 'vi', 'vie-por', 'war-por', 'wuu-por', 'xho', 'xho-por', 'yid-por', 'yor', 'yue-por', 'zh', 'zh-CN', 'zh-TW', 'zh-pt', 'zsm-por', "eng_Latn-por_Latn","spa_Latn-por_Latn","fra_Latn-por_Latn","ita_Latn-por_Latn","deu_Latn-por_Latn","jpn_Jpan-por_Latn","kor_Hang-por_Latn","rus_Cyrl-por_Latn","arb_Arab-por_Latn","zho_Hant-por_Latn","zho_Hans-por_Latn","pol_Latn-por_Latn","swe_Latn-por_Latn"]
SKIP_KEYS = ["std", "evaluation_time", "main_score", "threshold"]
# Use "train" split instead
TRAIN_SPLIT = ["DanishPoliticalCommentsClassification"]
# Use "validation" split instead
VALIDATION_SPLIT = ["AFQMC", "Cmnli", "IFlyTek", "TNews", "MSMARCO", "MSMARCO-PL", "MultilingualSentiment", "Ocnli"]
# Use "dev" split instead
DEV_SPLIT = ["CmedqaRetrieval", "CovidRetrieval", "DuRetrieval", "EcomRetrieval", "MedicalRetrieval", "MMarcoReranking", "MMarcoRetrieval", "MSMARCO", "MSMARCO-PL", "T2Reranking", "T2Retrieval", "VideoRetrieval", "FloresBitextMining"]
# Use "test.full" split
TESTFULL_SPLIT = ["OpusparcusPC"]
# Needs to be run whenever new files are added
def get_paths():
import collections, json, os
files = collections.defaultdict(list)
for base in os.listdir("results"):
if not os.path.isdir(os.path.join("results", base)):
continue
results_base_dir = os.path.join("results", base)
result_dirs = []
for d in os.listdir(results_base_dir):
current_path = os.path.join(results_base_dir, d)
added_root = False
if os.path.isdir(current_path):
result_dirs.append((os.path.join(base,d), current_path))
elif current_path.endswith('.json') and not added_root:
result_dirs.append((base, results_base_dir))
added_root = True
for model_dir, results_model_dir in result_dirs:
for res_file in os.listdir(results_model_dir):
if res_file.endswith(".json"):
results_model_file = os.path.join(results_model_dir, res_file)
files[model_dir].append(results_model_file)
with open("paths.json", "w") as f:
json.dump(files, f)
return files
data = json.loads(requests.get(URL).content.decode('utf8'))
MODELS = list(data.keys())
class MTEBConfig(datasets.BuilderConfig):
def __init__(self,
complete_name=None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.complete_name = complete_name
class MTEBResults(datasets.GeneratorBasedBuilder):
"""MTEBResults"""
BUILDER_CONFIGS = [
MTEBConfig(
name=model.replace('/', '__') if '/' in model else model,
description=f"{model} MTEB results",
version=VERSION,
complete_name=model,
)
for model in MODELS
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"mteb_dataset_name": datasets.Value("string"),
"eval_language": datasets.Value("string"),
"metric": datasets.Value("string"),
"score": datasets.Value("float"),
}
),
supervised_keys=None,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
path_file = dl_manager.download_and_extract(URL)
with open(path_file) as f:
files = json.load(f)
downloaded_files = dl_manager.download_and_extract(files[self.config.complete_name])
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={'filepath': downloaded_files}
)
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info(f"Generating examples from {filepath}")
out = []
for path in filepath:
with open(path, encoding="utf-8") as f:
res_dict = json.load(f)
ds_name = res_dict["mteb_dataset_name"]
split = "test"
if (ds_name in TRAIN_SPLIT) and ("train" in res_dict):
split = "train"
elif (ds_name in VALIDATION_SPLIT) and ("validation" in res_dict):
split = "validation"
elif (ds_name in DEV_SPLIT) and ("dev" in res_dict):
split = "dev"
elif (ds_name in TESTFULL_SPLIT) and ("test.full" in res_dict):
split = "test.full"
elif "test" not in res_dict:
print(f"Skipping {ds_name} as split {split} not present.")
continue
res_dict = res_dict.get(split)
is_multilingual = any(x in res_dict for x in EVAL_LANGS)
langs = res_dict.keys() if is_multilingual else ["pt"]
for lang in langs:
if lang in SKIP_KEYS: continue
test_result_lang = res_dict.get(lang) if is_multilingual else res_dict
for metric, score in test_result_lang.items():
if not isinstance(score, dict):
score = {metric: score}
for sub_metric, sub_score in score.items():
if any(x in sub_metric for x in SKIP_KEYS): continue
if isinstance(sub_score, dict) or isinstance(sub_score, list): continue
out.append({
"mteb_dataset_name": ds_name,
"eval_language": lang if is_multilingual else "",
"metric": f"{metric}_{sub_metric}" if metric != sub_metric else metric,
"score": sub_score * 100,
})
for idx, row in enumerate(sorted(out, key=lambda x: x["mteb_dataset_name"])):
yield idx, row |