|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""NoMIRACL: A dataset to evaluation LLM robustness across 18 languages.""" |
|
|
|
import os |
|
import json |
|
import csv |
|
import datasets |
|
|
|
from collections import defaultdict |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{thakur-etal-2024-knowing, |
|
title = "{``}Knowing When You Don{'}t Know{''}: A Multilingual Relevance Assessment Dataset for Robust Retrieval-Augmented Generation", |
|
author = "Thakur, Nandan and |
|
Bonifacio, Luiz and |
|
Zhang, Crystina and |
|
Ogundepo, Odunayo and |
|
Kamalloo, Ehsan and |
|
Alfonso-Hermelo, David and |
|
Li, Xiaoguang and |
|
Liu, Qun and |
|
Chen, Boxing and |
|
Rezagholizadeh, Mehdi and |
|
Lin, Jimmy", |
|
editor = "Al-Onaizan, Yaser and |
|
Bansal, Mohit and |
|
Chen, Yun-Nung", |
|
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024", |
|
month = nov, |
|
year = "2024", |
|
address = "Miami, Florida, USA", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2024.findings-emnlp.730", |
|
pages = "12508--12526", |
|
abstract = "Retrieval-Augmented Generation (RAG) grounds Large Language Model (LLM) output by leveraging external knowledge sources to reduce factual hallucinations. However, prior work lacks a comprehensive evaluation of different language families, making it challenging to evaluate LLM robustness against errors in external retrieved knowledge. To overcome this, we establish **NoMIRACL**, a human-annotated dataset for evaluating LLM robustness in RAG across 18 typologically diverse languages. NoMIRACL includes both a non-relevant and a relevant subset. Queries in the non-relevant subset contain passages judged as non-relevant, whereas queries in the relevant subset include at least a single judged relevant passage. We measure relevance assessment using: (i) *hallucination rate*, measuring model tendency to hallucinate when the answer is not present in passages in the non-relevant subset, and (ii) *error rate*, measuring model inaccuracy to recognize relevant passages in the relevant subset. In our work, we observe that most models struggle to balance the two capacities. Models such as LLAMA-2 and Orca-2 achieve over 88{\%} hallucination rate on the non-relevant subset. Mistral and LLAMA-3 hallucinate less but can achieve up to a 74.9{\%} error rate on the relevant subset. Overall, GPT-4 is observed to provide the best tradeoff on both subsets, highlighting future work necessary to improve LLM robustness. NoMIRACL dataset and evaluation code are available at: https://github.com/project-miracl/nomiracl.", |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Data Loader for the NoMIRACL dataset. |
|
""" |
|
|
|
_URL = "https://nomiracl.github.io" |
|
|
|
_DL_URL_FORMAT = "data/{name}" |
|
|
|
|
|
def load_topics(filepath: str): |
|
""" |
|
Loads queries from a file and stores them in a dictionary. |
|
""" |
|
queries = {} |
|
with open(filepath, 'r', encoding='utf-8') as f: |
|
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE) |
|
for row in reader: |
|
queries[row[0]] = row[1] |
|
return queries |
|
|
|
def load_corpus(filepath: str): |
|
""" |
|
Loads the corpus file as a dictionary. |
|
""" |
|
corpus = {} |
|
with open(filepath, encoding='utf8') as fIn: |
|
for line in fIn: |
|
line = json.loads(line) |
|
corpus[line.get("docid")] = { |
|
"text": line.get("text", "").strip(), |
|
"title": line.get("title", "").strip(), |
|
} |
|
return corpus |
|
|
|
|
|
def load_qrels(filepath: str): |
|
if filepath is None: |
|
return None |
|
|
|
qrels = defaultdict(dict) |
|
with open(filepath, encoding="utf-8") as f: |
|
for line in f: |
|
qid, _, docid, rel = line.strip().split('\t') |
|
qrels[qid][docid] = int(rel) |
|
return qrels |
|
|
|
|
|
class NoMIRACLConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for NoMIRACL.""" |
|
|
|
def __init__(self, name, **kwargs): |
|
""" |
|
Args: |
|
name: `string`, name of dataset config (=language) |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(NoMIRACLConfig, self).__init__( |
|
version=datasets.Version("1.0.0", ""), name=name.lower(), **kwargs |
|
) |
|
|
|
self.data_root_url = _DL_URL_FORMAT.format(name=name) |
|
|
|
|
|
class NoMIRACL(datasets.GeneratorBasedBuilder): |
|
"""Multilingual NoMIRACL dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
NoMIRACLConfig(name="arabic", description="Arabic NoMIRACL dataset"), |
|
NoMIRACLConfig(name="chinese", description="Chinese NoMIRACL dataset"), |
|
NoMIRACLConfig(name="finnish", description="Finnish NoMIRACL dataset"), |
|
NoMIRACLConfig(name="german", description="German NoMIRACL dataset"), |
|
NoMIRACLConfig(name="indonesian", description="Indonesian NoMIRACL dataset"), |
|
NoMIRACLConfig(name="korean", description="Korean NoMIRACL dataset"), |
|
NoMIRACLConfig(name="russian", description="Russian NoMIRACL dataset"), |
|
NoMIRACLConfig(name="swahili", description="Swahili NoMIRACL dataset"), |
|
NoMIRACLConfig(name="thai", description="Thai NoMIRACL dataset"), |
|
NoMIRACLConfig(name="bengali", description="Bengali NoMIRACL dataset"), |
|
NoMIRACLConfig(name="english", description="English NoMIRACL dataset"), |
|
NoMIRACLConfig(name="french", description="French NoMIRACL dataset"), |
|
NoMIRACLConfig(name="hindi", description="Hindi NoMIRACL dataset"), |
|
NoMIRACLConfig(name="japanese", description="Japanese NoMIRACL dataset"), |
|
NoMIRACLConfig(name="persian", description="Persian NoMIRACL dataset"), |
|
NoMIRACLConfig(name="spanish", description="Spanish NoMIRACL dataset"), |
|
NoMIRACLConfig(name="telugu", description="Telugu NoMIRACL dataset"), |
|
NoMIRACLConfig(name="yoruba", description="Yoruba NoMIRACL dataset"), |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({ |
|
'query_id': datasets.Value('string'), |
|
'query': datasets.Value('string'), |
|
'positive_passages': [{ |
|
'docid': datasets.Value('string'), |
|
'text': datasets.Value('string'), |
|
'title': datasets.Value('string') |
|
}], |
|
'negative_passages': [{ |
|
'docid': datasets.Value('string'), |
|
'text': datasets.Value('string'), |
|
'title': datasets.Value('string'), |
|
}], |
|
}), |
|
supervised_keys=("file", "text"), |
|
homepage=_URL, |
|
citation=_CITATION |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
downloaded_files = dl_manager.download_and_extract({ |
|
"corpus": self.config.data_root_url + "/corpus.jsonl.gz", |
|
"dev": {"qrels": {"relevant": self.config.data_root_url + "/qrels/dev.relevant.tsv", |
|
"non_relevant": self.config.data_root_url + "/qrels/dev.non_relevant.tsv"}, |
|
"topics": {"relevant": self.config.data_root_url + "/topics/dev.relevant.tsv", |
|
"non_relevant": self.config.data_root_url + "/topics/dev.non_relevant.tsv"}}, |
|
"test": {"qrels": {"relevant": self.config.data_root_url + "/qrels/test.relevant.tsv", |
|
"non_relevant": self.config.data_root_url + "/qrels/test.non_relevant.tsv"}, |
|
"topics": {"relevant": self.config.data_root_url + "/topics/test.relevant.tsv", |
|
"non_relevant": self.config.data_root_url + "/topics/test.non_relevant.tsv"}}, |
|
}) |
|
|
|
splits = [ |
|
datasets.SplitGenerator( |
|
name="dev.relevant", |
|
gen_kwargs={ |
|
"corpus_path": downloaded_files["corpus"], |
|
"qrels_path": downloaded_files["dev"]["qrels"]["relevant"], |
|
"topics_path": downloaded_files["dev"]["topics"]["relevant"], |
|
} |
|
), |
|
datasets.SplitGenerator( |
|
name="dev.non_relevant", |
|
gen_kwargs={ |
|
"corpus_path": downloaded_files["corpus"], |
|
"qrels_path": downloaded_files["dev"]["qrels"]["non_relevant"], |
|
"topics_path": downloaded_files["dev"]["topics"]["non_relevant"], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name="test.relevant", |
|
gen_kwargs={ |
|
"corpus_path": downloaded_files["corpus"], |
|
"qrels_path": downloaded_files["test"]["qrels"]["relevant"], |
|
"topics_path": downloaded_files["test"]["topics"]["relevant"], |
|
} |
|
), |
|
datasets.SplitGenerator( |
|
name="test.non_relevant", |
|
gen_kwargs={ |
|
"corpus_path": downloaded_files["corpus"], |
|
"qrels_path": downloaded_files["test"]["qrels"]["non_relevant"], |
|
"topics_path": downloaded_files["test"]["topics"]["non_relevant"], |
|
}, |
|
), |
|
] |
|
|
|
return splits |
|
|
|
def _generate_examples(self, corpus_path, qrels_path, topics_path): |
|
|
|
corpus = load_corpus(corpus_path) |
|
qrels = load_qrels(qrels_path) |
|
topics = load_topics(topics_path) |
|
|
|
for qid in topics: |
|
data = {} |
|
data['query_id'] = qid |
|
data['query'] = topics[qid] |
|
|
|
pos_docids = [docid for docid, rel in qrels[qid].items() if rel == 1] if qrels is not None else [] |
|
neg_docids = [docid for docid, rel in qrels[qid].items() if rel == 0] if qrels is not None else [] |
|
data['positive_passages'] = [{ |
|
'docid': docid, |
|
**corpus[docid] |
|
} for docid in pos_docids if docid in corpus] |
|
data['negative_passages'] = [{ |
|
'docid': docid, |
|
**corpus[docid] |
|
} for docid in neg_docids if docid in corpus] |
|
yield qid, data |
|
|