|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
|
|
import datasets |
|
from collections import defaultdict |
|
|
|
|
|
_CITATION = "" |
|
|
|
languages = ['hausa', 'yoruba'] |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This dataset consists of the queries and relevance judgements in the CIRAL test collection. |
|
""" |
|
|
|
_HOMEPAGE = "" |
|
|
|
_LICENSE = "" |
|
|
|
_URLS = { |
|
lang: { |
|
'train': [ |
|
f'https://huggingface.co/datasets/CIRAL/ciral/resolve/main/ciral-{lang}/topics/topics.ciral-{lang}-train.tsv', |
|
f'https://huggingface.co/datasets/CIRAL/ciral/resolve/main/ciral-{lang}/qrels/qrels.ciral-{lang}-train.tsv' |
|
], |
|
'test':[] |
|
} for lang in languages |
|
} |
|
|
|
|
|
def load_queries(_file): |
|
if _file is None: |
|
return None |
|
|
|
queries = {} |
|
with open(_file, encoding="utf-8") as query_file: |
|
for line in query_file: |
|
id, query = line.strip().split('\t') |
|
queries[id] = query |
|
return queries |
|
|
|
def load_qrels(_file): |
|
if _file is None: |
|
return None |
|
|
|
qrels = defaultdict(dict) |
|
with open(_file, encoding="utf-8") as qrel_file: |
|
for line in qrel_file: |
|
qid, _, docid, rel = line.strip().split('\t') |
|
qrels[qid][docid] = int(rel) |
|
return qrels |
|
|
|
|
|
|
|
class CIRAL(datasets.GeneratorBasedBuilder): |
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name=lang, |
|
version=datasets.Version("1.1.0"), |
|
description=f"CIRAL data for {lang}.") for lang in languages |
|
] |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"query id": datasets.Value("string"), |
|
"query": datasets.Value("string"), |
|
"positive passages": [{ |
|
"docid": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"text": datasets.Value("string")}], |
|
"negative passages": datasets.features.Sequence({ |
|
"docid": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"text": datasets.Value("string")}) |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
lang = self.config.name |
|
downloaded_files = dl_manager.download_and_extract(_URLS[lang]) |
|
return [ |
|
datasets.SplitGenerator( |
|
name='train', |
|
gen_kwargs={ |
|
'filepaths': downloaded_files['train'], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name='test', |
|
gen_kwargs={ |
|
'filepaths': downloaded_files['test'], |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, filepaths): |
|
lang = self.config.name |
|
corpus = datasets.load_dataset('ciral/ciral-corpus', lang)['train'] |
|
docid2doc = {doc['docid']: doc['text'] for doc in corpus} |
|
|
|
query_file, qrel_file = (filepaths) if len(filepaths) == 2 else (filepaths[0], None) |
|
queries = load_queries(query_file) |
|
qrels = load_qrels(qrel_file) |
|
for query_id in queries: |
|
data = {} |
|
data['Query Id'] = query_id |
|
data['Query'] = queries[query] |
|
data['Judgements'] = [{ |
|
'docid': docid, |
|
'Judgement': judgement, |
|
'text': docid2doc[docid] |
|
} for docid, judgement in qrels[query_id].items()] |
|
|
|
yield query_id, data |