Datasets:
Tasks:
Text Classification
Languages:
English
Size:
100K<n<1M
ArXiv:
Tags:
knowledge-verification
License:
File size: 5,436 Bytes
14110dd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
"""FEVEROUS dataset."""
import json
import textwrap
import datasets
class FeverousConfig(datasets.BuilderConfig):
"""BuilderConfig for FEVER."""
def __init__(self, homepage: str = None, citation: str = None, base_url: str = None, urls: dict = None, **kwargs):
"""BuilderConfig for FEVEROUS.
Args:
homepage (`str`): Homepage.
citation (`str`): Citation reference.
base_url (`str`): Data base URL that precedes all data URLs.
urls (`dict`): Data URLs (each URL will pe preceded by `base_url`).
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(**kwargs)
self.homepage = homepage
self.citation = citation
self.base_url = base_url
self.urls = {key: f"{base_url}/{url}" for key, url in urls.items()}
class FeverOUS(datasets.GeneratorBasedBuilder):
"""FEVEROUS dataset."""
BUILDER_CONFIGS = [
FeverousConfig(
version=datasets.Version("1.0.0"),
description=textwrap.dedent(
"FEVEROUS:\n"
"FEVEROUS (Fact Extraction and VERification Over Unstructured and Structured information) is a fact "
"verification dataset which consists of 87,026 verified claims. Each claim is annotated with evidence "
"in the form of sentences and/or cells from tables in Wikipedia, as well as a label indicating whether "
"this evidence supports, refutes, or does not provide enough information to reach a verdict. The "
"dataset also contains annotation metadata such as annotator actions (query keywords, clicks on page, "
"time signatures), and the type of challenge each claim poses."
),
homepage="https://fever.ai/dataset/feverous.html",
citation=textwrap.dedent(
"""\
@inproceedings{Aly21Feverous,
author = {Aly, Rami and Guo, Zhijiang and Schlichtkrull, Michael Sejr and Thorne, James and Vlachos, Andreas and Christodoulopoulos, Christos and Cocarascu, Oana and Mittal, Arpit},
title = {{FEVEROUS}: Fact Extraction and {VERification} Over Unstructured and Structured information},
eprint={2106.05707},
archivePrefix={arXiv},
primaryClass={cs.CL},
year = {2021}
}"""
),
base_url="https://fever.ai/download/feverous",
urls={
datasets.Split.TRAIN: "feverous_train_challenges.jsonl",
datasets.Split.VALIDATION: "feverous_dev_challenges.jsonl",
datasets.Split.TEST: "feverous_test_unlabeled.jsonl",
},
),
]
def _info(self):
features = {
"id": datasets.Value("int32"),
"label": datasets.ClassLabel(names=["SUPPORTS", "REFUTES", "NOT ENOUGH INFO"]),
"claim": datasets.Value("string"),
"evidence": [
{
"content": [datasets.Value("string")],
"context": [[datasets.Value("string")]],
}
],
"annotator_operations": [
{
"operation": datasets.Value("string"),
"value": datasets.Value("string"),
"time": datasets.Value("float"),
}
],
"expected_challenge": datasets.Value("string"),
"challenge": datasets.Value("string"),
}
return datasets.DatasetInfo(
description=self.config.description,
features=datasets.Features(features),
homepage=self.config.homepage,
citation=self.config.citation,
)
def _split_generators(self, dl_manager):
dl_paths = dl_manager.download_and_extract(self.config.urls)
return [
datasets.SplitGenerator(
name=split,
gen_kwargs={
"filepath": dl_paths[split],
},
)
for split in dl_paths.keys()
]
def _generate_examples(self, filepath):
with open(filepath, encoding="utf-8") as f:
for id_, row in enumerate(f):
data = json.loads(row)
# First item in "train" has all values equal to empty strings
if [value for value in data.values() if value]:
evidence = data.get("evidence", [])
if evidence:
for evidence_set in evidence:
# Transform "context" from dict to list (analogue to "content")
evidence_set["context"] = [
evidence_set["context"][element_id] for element_id in evidence_set["content"]
]
yield id_, {
"id": data.get("id"),
"label": data.get("label", -1),
"claim": data.get("claim", ""),
"evidence": evidence,
"annotator_operations": data.get("annotator_operations", []),
"expected_challenge": data.get("expected_challenge", ""),
"challenge": data.get("challenge", ""),
}
|