norabelrose
commited on
Commit
•
85ebc1e
0
Parent(s):
Initial commit
Browse files
README.md
ADDED
@@ -0,0 +1,336 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- en
|
4 |
+
paperswithcode_id: fever
|
5 |
+
annotations_creators:
|
6 |
+
- crowdsourced
|
7 |
+
language_creators:
|
8 |
+
- found
|
9 |
+
license:
|
10 |
+
- cc-by-sa-3.0
|
11 |
+
- gpl-3.0
|
12 |
+
multilinguality:
|
13 |
+
- monolingual
|
14 |
+
pretty_name: FEVER
|
15 |
+
size_categories:
|
16 |
+
- 100K<n<1M
|
17 |
+
source_datasets:
|
18 |
+
- extended|wikipedia
|
19 |
+
task_categories:
|
20 |
+
- text-classification
|
21 |
+
task_ids: []
|
22 |
+
tags:
|
23 |
+
- knowledge-verification
|
24 |
+
dataset_info:
|
25 |
+
- config_name: v1.0
|
26 |
+
features:
|
27 |
+
- name: id
|
28 |
+
dtype: int32
|
29 |
+
- name: label
|
30 |
+
dtype: string
|
31 |
+
- name: claim
|
32 |
+
dtype: string
|
33 |
+
- name: evidence_annotation_id
|
34 |
+
dtype: int32
|
35 |
+
- name: evidence_id
|
36 |
+
dtype: int32
|
37 |
+
- name: evidence_wiki_url
|
38 |
+
dtype: string
|
39 |
+
- name: evidence_sentence_id
|
40 |
+
dtype: int32
|
41 |
+
splits:
|
42 |
+
- name: train
|
43 |
+
num_bytes: 24147163
|
44 |
+
num_examples: 263822
|
45 |
+
- name: dev
|
46 |
+
num_bytes: 2696375
|
47 |
+
num_examples: 28625
|
48 |
+
- name: paper_dev
|
49 |
+
num_bytes: 1348943
|
50 |
+
num_examples: 14475
|
51 |
+
- name: paper_test
|
52 |
+
num_bytes: 1347432
|
53 |
+
num_examples: 14150
|
54 |
+
download_size: 44853972
|
55 |
+
dataset_size: 40043693
|
56 |
+
- config_name: v2.0
|
57 |
+
features:
|
58 |
+
- name: id
|
59 |
+
dtype: int32
|
60 |
+
- name: label
|
61 |
+
dtype: string
|
62 |
+
- name: claim
|
63 |
+
dtype: string
|
64 |
+
- name: evidence_annotation_id
|
65 |
+
dtype: int32
|
66 |
+
- name: evidence_id
|
67 |
+
dtype: int32
|
68 |
+
- name: evidence_wiki_url
|
69 |
+
dtype: string
|
70 |
+
- name: evidence_sentence_id
|
71 |
+
dtype: int32
|
72 |
+
splits:
|
73 |
+
- name: validation
|
74 |
+
num_bytes: 306243
|
75 |
+
num_examples: 2384
|
76 |
+
download_size: 392466
|
77 |
+
dataset_size: 306243
|
78 |
+
- config_name: wiki_pages
|
79 |
+
features:
|
80 |
+
- name: id
|
81 |
+
dtype: string
|
82 |
+
- name: text
|
83 |
+
dtype: string
|
84 |
+
- name: lines
|
85 |
+
dtype: string
|
86 |
+
splits:
|
87 |
+
- name: wikipedia_pages
|
88 |
+
num_bytes: 7254115038
|
89 |
+
num_examples: 5416537
|
90 |
+
download_size: 1713485474
|
91 |
+
dataset_size: 7254115038
|
92 |
+
---
|
93 |
+
|
94 |
+
# Dataset Card for "fever"
|
95 |
+
|
96 |
+
## Table of Contents
|
97 |
+
- [Dataset Description](#dataset-description)
|
98 |
+
- [Dataset Summary](#dataset-summary)
|
99 |
+
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
100 |
+
- [Languages](#languages)
|
101 |
+
- [Dataset Structure](#dataset-structure)
|
102 |
+
- [Data Instances](#data-instances)
|
103 |
+
- [Data Fields](#data-fields)
|
104 |
+
- [Data Splits](#data-splits)
|
105 |
+
- [Dataset Creation](#dataset-creation)
|
106 |
+
- [Curation Rationale](#curation-rationale)
|
107 |
+
- [Source Data](#source-data)
|
108 |
+
- [Annotations](#annotations)
|
109 |
+
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
110 |
+
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
111 |
+
- [Social Impact of Dataset](#social-impact-of-dataset)
|
112 |
+
- [Discussion of Biases](#discussion-of-biases)
|
113 |
+
- [Other Known Limitations](#other-known-limitations)
|
114 |
+
- [Additional Information](#additional-information)
|
115 |
+
- [Dataset Curators](#dataset-curators)
|
116 |
+
- [Licensing Information](#licensing-information)
|
117 |
+
- [Citation Information](#citation-information)
|
118 |
+
- [Contributions](#contributions)
|
119 |
+
|
120 |
+
## Dataset Description
|
121 |
+
|
122 |
+
- **Homepage:** [https://fever.ai/](https://fever.ai/)
|
123 |
+
- **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
124 |
+
- **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
125 |
+
- **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
126 |
+
|
127 |
+
### Dataset Summary
|
128 |
+
|
129 |
+
With billions of individual pages on the web providing information on almost every conceivable topic, we should have
|
130 |
+
the ability to collect facts that answer almost every conceivable question. However, only a small fraction of this
|
131 |
+
information is contained in structured sources (Wikidata, Freebase, etc.) – we are therefore limited by our ability to
|
132 |
+
transform free-form text to structured knowledge. There is, however, another problem that has become the focus of a lot
|
133 |
+
of recent research and media coverage: false information coming from unreliable sources.
|
134 |
+
|
135 |
+
The FEVER workshops are a venue for work in verifiable knowledge extraction and to stimulate progress in this direction.
|
136 |
+
|
137 |
+
- FEVER Dataset: FEVER (Fact Extraction and VERification) consists of 185,445 claims generated by altering sentences
|
138 |
+
extracted from Wikipedia and subsequently verified without knowledge of the sentence they were derived from. The claims
|
139 |
+
are classified as Supported, Refuted or NotEnoughInfo. For the first two classes, the annotators also recorded the
|
140 |
+
sentence(s) forming the necessary evidence for their judgment.
|
141 |
+
|
142 |
+
- FEVER 2.0 Adversarial Attacks Dataset: The FEVER 2.0 Dataset consists of 1174 claims created by the submissions of
|
143 |
+
participants in the Breaker phase of the 2019 shared task. Participants (Breakers) were tasked with generating
|
144 |
+
adversarial examples that induce classification errors for the existing systems. Breakers submitted a dataset of up to
|
145 |
+
1000 instances with equal number of instances for each of the three classes (Supported, Refuted NotEnoughInfo). Only
|
146 |
+
novel claims (i.e. not contained in the original FEVER dataset) were considered as valid entries to the shared task.
|
147 |
+
The submissions were then manually evaluated for Correctness (grammatical, appropriately labeled and meet the FEVER
|
148 |
+
annotation guidelines requirements).
|
149 |
+
|
150 |
+
### Supported Tasks and Leaderboards
|
151 |
+
|
152 |
+
The task is verification of textual claims against textual sources.
|
153 |
+
|
154 |
+
When compared to textual entailment (TE)/natural language inference, the key difference is that in these tasks the
|
155 |
+
passage to verify each claim is given, and in recent years it typically consists a single sentence, while in
|
156 |
+
verification systems it is retrieved from a large set of documents in order to form the evidence.
|
157 |
+
|
158 |
+
### Languages
|
159 |
+
|
160 |
+
The dataset is in English.
|
161 |
+
|
162 |
+
## Dataset Structure
|
163 |
+
|
164 |
+
### Data Instances
|
165 |
+
|
166 |
+
#### v1.0
|
167 |
+
|
168 |
+
- **Size of downloaded dataset files:** 44.86 MB
|
169 |
+
- **Size of the generated dataset:** 40.05 MB
|
170 |
+
- **Total amount of disk used:** 84.89 MB
|
171 |
+
|
172 |
+
An example of 'train' looks as follows.
|
173 |
+
```
|
174 |
+
'claim': 'Nikolaj Coster-Waldau worked with the Fox Broadcasting Company.',
|
175 |
+
'evidence_wiki_url': 'Nikolaj_Coster-Waldau',
|
176 |
+
'label': 'SUPPORTS',
|
177 |
+
'id': 75397,
|
178 |
+
'evidence_id': 104971,
|
179 |
+
'evidence_sentence_id': 7,
|
180 |
+
'evidence_annotation_id': 92206}
|
181 |
+
```
|
182 |
+
|
183 |
+
#### v2.0
|
184 |
+
|
185 |
+
- **Size of downloaded dataset files:** 0.39 MB
|
186 |
+
- **Size of the generated dataset:** 0.30 MB
|
187 |
+
- **Total amount of disk used:** 0.70 MB
|
188 |
+
|
189 |
+
#### wiki_pages
|
190 |
+
|
191 |
+
- **Size of downloaded dataset files:** 1.71 GB
|
192 |
+
- **Size of the generated dataset:** 7.25 GB
|
193 |
+
- **Total amount of disk used:** 8.97 GB
|
194 |
+
|
195 |
+
An example of 'wikipedia_pages' looks as follows.
|
196 |
+
```
|
197 |
+
{'text': 'The following are the football -LRB- soccer -RRB- events of the year 1928 throughout the world . ',
|
198 |
+
'lines': '0\tThe following are the football -LRB- soccer -RRB- events of the year 1928 throughout the world .\n1\t',
|
199 |
+
'id': '1928_in_association_football'}
|
200 |
+
```
|
201 |
+
|
202 |
+
### Data Fields
|
203 |
+
|
204 |
+
The data fields are the same among all splits.
|
205 |
+
|
206 |
+
#### v1.0
|
207 |
+
|
208 |
+
- `id`: a `int32` feature.
|
209 |
+
- `label`: a `string` feature.
|
210 |
+
- `claim`: a `string` feature.
|
211 |
+
- `evidence_annotation_id`: a `int32` feature.
|
212 |
+
- `evidence_id`: a `int32` feature.
|
213 |
+
- `evidence_wiki_url`: a `string` feature.
|
214 |
+
- `evidence_sentence_id`: a `int32` feature.
|
215 |
+
|
216 |
+
#### v2.0
|
217 |
+
|
218 |
+
- `id`: a `int32` feature.
|
219 |
+
- `label`: a `string` feature.
|
220 |
+
- `claim`: a `string` feature.
|
221 |
+
- `evidence_annotation_id`: a `int32` feature.
|
222 |
+
- `evidence_id`: a `int32` feature.
|
223 |
+
- `evidence_wiki_url`: a `string` feature.
|
224 |
+
- `evidence_sentence_id`: a `int32` feature.
|
225 |
+
|
226 |
+
#### wiki_pages
|
227 |
+
|
228 |
+
- `id`: a `string` feature.
|
229 |
+
- `text`: a `string` feature.
|
230 |
+
- `lines`: a `string` feature.
|
231 |
+
|
232 |
+
### Data Splits
|
233 |
+
|
234 |
+
#### v1.0
|
235 |
+
|
236 |
+
| | train | dev | paper_dev | paper_test |
|
237 |
+
|------|-------:|------:|----------:|-----------:|
|
238 |
+
| v1.0 | 311431 | 37566 | 18999 | 18567 |
|
239 |
+
|
240 |
+
#### v2.0
|
241 |
+
|
242 |
+
| | validation |
|
243 |
+
|------|-----------:|
|
244 |
+
| v2.0 | 2384 |
|
245 |
+
|
246 |
+
#### wiki_pages
|
247 |
+
|
248 |
+
| | wikipedia_pages |
|
249 |
+
|------------|----------------:|
|
250 |
+
| wiki_pages | 5416537 |
|
251 |
+
|
252 |
+
## Dataset Creation
|
253 |
+
|
254 |
+
### Curation Rationale
|
255 |
+
|
256 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
257 |
+
|
258 |
+
### Source Data
|
259 |
+
|
260 |
+
#### Initial Data Collection and Normalization
|
261 |
+
|
262 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
263 |
+
|
264 |
+
#### Who are the source language producers?
|
265 |
+
|
266 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
267 |
+
|
268 |
+
### Annotations
|
269 |
+
|
270 |
+
#### Annotation process
|
271 |
+
|
272 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
273 |
+
|
274 |
+
#### Who are the annotators?
|
275 |
+
|
276 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
277 |
+
|
278 |
+
### Personal and Sensitive Information
|
279 |
+
|
280 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
281 |
+
|
282 |
+
## Considerations for Using the Data
|
283 |
+
|
284 |
+
### Social Impact of Dataset
|
285 |
+
|
286 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
287 |
+
|
288 |
+
### Discussion of Biases
|
289 |
+
|
290 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
291 |
+
|
292 |
+
### Other Known Limitations
|
293 |
+
|
294 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
295 |
+
|
296 |
+
## Additional Information
|
297 |
+
|
298 |
+
### Dataset Curators
|
299 |
+
|
300 |
+
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
301 |
+
|
302 |
+
### Licensing Information
|
303 |
+
|
304 |
+
FEVER license:
|
305 |
+
|
306 |
+
```
|
307 |
+
These data annotations incorporate material from Wikipedia, which is licensed pursuant to the Wikipedia Copyright Policy. These annotations are made available under the license terms described on the applicable Wikipedia article pages, or, where Wikipedia license terms are unavailable, under the Creative Commons Attribution-ShareAlike License (version 3.0), available at http://creativecommons.org/licenses/by-sa/3.0/ (collectively, the “License Termsâ€). You may not use these files except in compliance with the applicable License Terms.
|
308 |
+
```
|
309 |
+
|
310 |
+
### Citation Information
|
311 |
+
|
312 |
+
If you use "FEVER Dataset", please cite:
|
313 |
+
```bibtex
|
314 |
+
@inproceedings{Thorne18Fever,
|
315 |
+
author = {Thorne, James and Vlachos, Andreas and Christodoulopoulos, Christos and Mittal, Arpit},
|
316 |
+
title = {{FEVER}: a Large-scale Dataset for Fact Extraction and {VERification}},
|
317 |
+
booktitle = {NAACL-HLT},
|
318 |
+
year = {2018}
|
319 |
+
}
|
320 |
+
```
|
321 |
+
|
322 |
+
If you use "FEVER 2.0 Adversarial Attacks Dataset", please cite:
|
323 |
+
```bibtex
|
324 |
+
@inproceedings{Thorne19FEVER2,
|
325 |
+
author = {Thorne, James and Vlachos, Andreas and Cocarascu, Oana and Christodoulopoulos, Christos and Mittal, Arpit},
|
326 |
+
title = {The {FEVER2.0} Shared Task},
|
327 |
+
booktitle = {Proceedings of the Second Workshop on {Fact Extraction and VERification (FEVER)}},
|
328 |
+
year = {2018}
|
329 |
+
}
|
330 |
+
```
|
331 |
+
|
332 |
+
### Contributions
|
333 |
+
|
334 |
+
Thanks to [@thomwolf](https://github.com/thomwolf), [@lhoestq](https://github.com/lhoestq),
|
335 |
+
[@mariamabarham](https://github.com/mariamabarham), [@lewtun](https://github.com/lewtun),
|
336 |
+
[@albertvillanova](https://github.com/albertvillanova) for adding this dataset.
|
fever.py
ADDED
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Modified by Nora Belrose of EleutherAI (2023)
|
2 |
+
# coding=utf-8
|
3 |
+
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
|
17 |
+
# Lint as: python3
|
18 |
+
"""FEVER dataset."""
|
19 |
+
|
20 |
+
import json
|
21 |
+
import os
|
22 |
+
import textwrap
|
23 |
+
|
24 |
+
import datasets
|
25 |
+
|
26 |
+
|
27 |
+
class FeverConfig(datasets.BuilderConfig):
|
28 |
+
"""BuilderConfig for FEVER."""
|
29 |
+
|
30 |
+
def __init__(self, homepage: str = None, citation: str = None, base_url: str = None, urls: dict = None, **kwargs):
|
31 |
+
"""BuilderConfig for FEVER.
|
32 |
+
|
33 |
+
Args:
|
34 |
+
homepage (`str`): Homepage.
|
35 |
+
citation (`str`): Citation reference.
|
36 |
+
base_url (`str`): Data base URL that precedes all data URLs.
|
37 |
+
urls (`dict`): Data URLs (each URL will pe preceded by `base_url`).
|
38 |
+
**kwargs: keyword arguments forwarded to super.
|
39 |
+
"""
|
40 |
+
super().__init__(**kwargs)
|
41 |
+
self.homepage = homepage
|
42 |
+
self.citation = citation
|
43 |
+
self.base_url = base_url
|
44 |
+
self.urls = {key: f"{base_url}/{url}" for key, url in urls.items()}
|
45 |
+
|
46 |
+
|
47 |
+
class Fever(datasets.GeneratorBasedBuilder):
|
48 |
+
"""Fact Extraction and VERification Dataset."""
|
49 |
+
|
50 |
+
BUILDER_CONFIGS = [
|
51 |
+
FeverConfig(
|
52 |
+
name="v1.0",
|
53 |
+
version=datasets.Version("1.0.0"),
|
54 |
+
description=textwrap.dedent(
|
55 |
+
"FEVER v1.0\n"
|
56 |
+
"FEVER (Fact Extraction and VERification) consists of 185,445 claims generated by altering sentences "
|
57 |
+
"extracted from Wikipedia and subsequently verified without knowledge of the sentence they were "
|
58 |
+
"derived from. The claims are classified as Supported, Refuted or NotEnoughInfo. For the first two "
|
59 |
+
"classes, the annotators also recorded the sentence(s) forming the necessary evidence for their "
|
60 |
+
"judgment."
|
61 |
+
),
|
62 |
+
homepage="https://fever.ai/dataset/fever.html",
|
63 |
+
citation=textwrap.dedent(
|
64 |
+
"""\
|
65 |
+
@inproceedings{Thorne18Fever,
|
66 |
+
author = {Thorne, James and Vlachos, Andreas and Christodoulopoulos, Christos and Mittal, Arpit},
|
67 |
+
title = {{FEVER}: a Large-scale Dataset for Fact Extraction and {VERification}},
|
68 |
+
booktitle = {NAACL-HLT},
|
69 |
+
year = {2018}
|
70 |
+
}"""
|
71 |
+
),
|
72 |
+
base_url="https://fever.ai/download/fever",
|
73 |
+
urls={
|
74 |
+
datasets.Split.TRAIN: "train.jsonl",
|
75 |
+
"dev": "shared_task_dev.jsonl",
|
76 |
+
"paper_dev": "paper_dev.jsonl",
|
77 |
+
"paper_test": "paper_test.jsonl",
|
78 |
+
},
|
79 |
+
),
|
80 |
+
FeverConfig(
|
81 |
+
name="v2.0",
|
82 |
+
version=datasets.Version("2.0.0"),
|
83 |
+
description=textwrap.dedent(
|
84 |
+
"FEVER v2.0:\n"
|
85 |
+
"The FEVER 2.0 Dataset consists of 1174 claims created by the submissions of participants in the "
|
86 |
+
"Breaker phase of the 2019 shared task. Participants (Breakers) were tasked with generating "
|
87 |
+
"adversarial examples that induce classification errors for the existing systems. Breakers submitted "
|
88 |
+
"a dataset of up to 1000 instances with equal number of instances for each of the three classes "
|
89 |
+
"(Supported, Refuted NotEnoughInfo). Only novel claims (i.e. not contained in the original FEVER "
|
90 |
+
"dataset) were considered as valid entries to the shared task. The submissions were then manually "
|
91 |
+
"evaluated for Correctness (grammatical, appropriately labeled and meet the FEVER annotation "
|
92 |
+
"guidelines requirements)."
|
93 |
+
),
|
94 |
+
homepage="https://fever.ai/dataset/adversarial.html",
|
95 |
+
citation=textwrap.dedent(
|
96 |
+
"""\
|
97 |
+
@inproceedings{Thorne19FEVER2,
|
98 |
+
author = {Thorne, James and Vlachos, Andreas and Cocarascu, Oana and Christodoulopoulos, Christos and Mittal, Arpit},
|
99 |
+
title = {The {FEVER2.0} Shared Task},
|
100 |
+
booktitle = {Proceedings of the Second Workshop on {Fact Extraction and VERification (FEVER)}},
|
101 |
+
year = {2018}
|
102 |
+
}"""
|
103 |
+
),
|
104 |
+
base_url="https://fever.ai/download/fever2.0",
|
105 |
+
urls={
|
106 |
+
datasets.Split.VALIDATION: "fever2-fixers-dev.jsonl",
|
107 |
+
},
|
108 |
+
),
|
109 |
+
FeverConfig(
|
110 |
+
name="wiki_pages",
|
111 |
+
version=datasets.Version("1.0.0"),
|
112 |
+
description=textwrap.dedent(
|
113 |
+
"Wikipedia pages for FEVER v1.0:\n"
|
114 |
+
"FEVER (Fact Extraction and VERification) consists of 185,445 claims generated by altering sentences "
|
115 |
+
"extracted from Wikipedia and subsequently verified without knowledge of the sentence they were "
|
116 |
+
"derived from. The claims are classified as Supported, Refuted or NotEnoughInfo. For the first two "
|
117 |
+
"classes, the annotators also recorded the sentence(s) forming the necessary evidence for their "
|
118 |
+
"judgment."
|
119 |
+
),
|
120 |
+
homepage="https://fever.ai/dataset/fever.html",
|
121 |
+
citation=textwrap.dedent(
|
122 |
+
"""\
|
123 |
+
@inproceedings{Thorne18Fever,
|
124 |
+
author = {Thorne, James and Vlachos, Andreas and Christodoulopoulos, Christos and Mittal, Arpit},
|
125 |
+
title = {{FEVER}: a Large-scale Dataset for Fact Extraction and {VERification}},
|
126 |
+
booktitle = {NAACL-HLT},
|
127 |
+
year = {2018}
|
128 |
+
}"""
|
129 |
+
),
|
130 |
+
base_url="https://fever.ai/download/fever",
|
131 |
+
urls={
|
132 |
+
"wikipedia_pages": "wiki-pages.zip",
|
133 |
+
},
|
134 |
+
),
|
135 |
+
]
|
136 |
+
|
137 |
+
def _info(self):
|
138 |
+
if self.config.name == "wiki_pages":
|
139 |
+
features = {
|
140 |
+
"id": datasets.Value("string"),
|
141 |
+
"text": datasets.Value("string"),
|
142 |
+
"lines": datasets.Value("string"),
|
143 |
+
}
|
144 |
+
elif self.config.name == "v1.0" or self.config.name == "v2.0":
|
145 |
+
features = {
|
146 |
+
"id": datasets.Value("int32"),
|
147 |
+
"label": datasets.ClassLabel(names=["REFUTES", "SUPPORTS"]),
|
148 |
+
"claim": datasets.Value("string"),
|
149 |
+
"evidence_annotation_id": datasets.Value("int32"),
|
150 |
+
"evidence_id": datasets.Value("int32"),
|
151 |
+
"evidence_wiki_url": datasets.Value("string"),
|
152 |
+
"evidence_sentence_id": datasets.Value("int32"),
|
153 |
+
}
|
154 |
+
return datasets.DatasetInfo(
|
155 |
+
description=self.config.description,
|
156 |
+
features=datasets.Features(features),
|
157 |
+
homepage=self.config.homepage,
|
158 |
+
citation=self.config.citation,
|
159 |
+
)
|
160 |
+
|
161 |
+
def _split_generators(self, dl_manager):
|
162 |
+
"""Returns SplitGenerators."""
|
163 |
+
dl_paths = dl_manager.download_and_extract(self.config.urls)
|
164 |
+
return [
|
165 |
+
datasets.SplitGenerator(
|
166 |
+
name=split,
|
167 |
+
gen_kwargs={
|
168 |
+
"filepath": dl_paths[split]
|
169 |
+
if self.config.name != "wiki_pages"
|
170 |
+
else dl_manager.iter_files(os.path.join(dl_paths[split], "wiki-pages")),
|
171 |
+
},
|
172 |
+
)
|
173 |
+
for split in dl_paths.keys()
|
174 |
+
]
|
175 |
+
|
176 |
+
def _generate_examples(self, filepath):
|
177 |
+
"""Yields examples."""
|
178 |
+
if self.config.name == "v1.0" or self.config.name == "v2.0":
|
179 |
+
with open(filepath, encoding="utf-8") as f:
|
180 |
+
for row_id, row in enumerate(f):
|
181 |
+
data = json.loads(row)
|
182 |
+
id_ = data["id"]
|
183 |
+
label = data.get("label", "")
|
184 |
+
|
185 |
+
# Drop the examples with label "NOT ENOUGH INFO"
|
186 |
+
if label not in ("REFUTES", "SUPPORTS"):
|
187 |
+
continue
|
188 |
+
|
189 |
+
claim = data["claim"]
|
190 |
+
evidences = data.get("evidence", [])
|
191 |
+
if len(evidences) > 0:
|
192 |
+
for i in range(len(evidences)):
|
193 |
+
for j in range(len(evidences[i])):
|
194 |
+
annot_id = evidences[i][j][0] if evidences[i][j][0] else -1
|
195 |
+
evidence_id = evidences[i][j][1] if evidences[i][j][1] else -1
|
196 |
+
wiki_url = evidences[i][j][2] if evidences[i][j][2] else ""
|
197 |
+
sent_id = evidences[i][j][3] if evidences[i][j][3] else -1
|
198 |
+
yield str(row_id) + "_" + str(i) + "_" + str(j), {
|
199 |
+
"id": id_,
|
200 |
+
"label": label,
|
201 |
+
"claim": claim,
|
202 |
+
"evidence_annotation_id": annot_id,
|
203 |
+
"evidence_id": evidence_id,
|
204 |
+
"evidence_wiki_url": wiki_url,
|
205 |
+
"evidence_sentence_id": sent_id,
|
206 |
+
}
|
207 |
+
else:
|
208 |
+
yield row_id, {
|
209 |
+
"id": id_,
|
210 |
+
"label": label,
|
211 |
+
"claim": claim,
|
212 |
+
"evidence_annotation_id": -1,
|
213 |
+
"evidence_id": -1,
|
214 |
+
"evidence_wiki_url": "",
|
215 |
+
"evidence_sentence_id": -1,
|
216 |
+
}
|
217 |
+
elif self.config.name == "wiki_pages":
|
218 |
+
for file_id, file in enumerate(filepath):
|
219 |
+
with open(file, encoding="utf-8") as f:
|
220 |
+
for row_id, row in enumerate(f):
|
221 |
+
data = json.loads(row)
|
222 |
+
yield f"{file_id}_{row_id}", data
|