|
"""TODO(lc_quad): Add a description here.""" |
|
|
|
|
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
|
|
_CITATION = """ |
|
@inproceedings{dubey2017lc2, |
|
title={LC-QuAD 2.0: A Large Dataset for Complex Question Answering over Wikidata and DBpedia}, |
|
author={Dubey, Mohnish and Banerjee, Debayan and Abdelkawi, Abdelrahman and Lehmann, Jens}, |
|
booktitle={Proceedings of the 18th International Semantic Web Conference (ISWC)}, |
|
year={2019}, |
|
organization={Springer} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
LC-QuAD 2.0 is a Large Question Answering dataset with 30,000 pairs of question and its corresponding SPARQL query. The target knowledge base is Wikidata and DBpedia, specifically the 2018 version. Please see our paper for details about the dataset creation process and framework. |
|
""" |
|
_URL = "data.zip" |
|
|
|
|
|
class LcQuad(datasets.GeneratorBasedBuilder): |
|
"""TODO(lc_quad): Short description of my dataset.""" |
|
|
|
|
|
VERSION = datasets.Version("2.0.0") |
|
|
|
def _info(self): |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=datasets.Features( |
|
{ |
|
"NNQT_question": datasets.Value("string"), |
|
"uid": datasets.Value("int32"), |
|
"subgraph": datasets.Value("string"), |
|
"template_index": datasets.Value("int32"), |
|
"question": datasets.Value("string"), |
|
"sparql_wikidata": datasets.Value("string"), |
|
"sparql_dbpedia18": datasets.Value("string"), |
|
"template": datasets.Value("string"), |
|
|
|
"paraphrased_question": datasets.Value("string") |
|
|
|
} |
|
), |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage="http://lc-quad.sda.tech/", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
dl_dir = dl_manager.download_and_extract(_URL) |
|
dl_dir = os.path.join(dl_dir, "data") |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={"filepath": os.path.join(dl_dir, "train.json")}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={"filepath": os.path.join(dl_dir, "test.json")}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""Yields examples.""" |
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
data = json.load(f) |
|
for id_, row in enumerate(data): |
|
is_list = False |
|
for key in row: |
|
if key != "answer" and isinstance(row[key], list): |
|
is_list = True |
|
if is_list: |
|
continue |
|
yield id_, { |
|
"NNQT_question": row["NNQT_question"], |
|
"uid": row["uid"], |
|
"subgraph": row["subgraph"], |
|
"template_index": row["template_index"], |
|
"question": row["question"], |
|
"sparql_wikidata": row["sparql_wikidata"], |
|
"sparql_dbpedia18": row["sparql_dbpedia18"], |
|
"template": row["template"], |
|
|
|
"paraphrased_question": row["paraphrased_question"], |
|
} |
|
|