Datasets:

ArXiv:
License:
rajmohanc commited on
Commit
704fb32
·
verified ·
1 Parent(s): 0c60da6

Upload tab_fact.py

Browse files
Files changed (1) hide show
  1. tab_fact.py +152 -0
tab_fact.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """TabFact: A Large-scale Dataset for Table-based Fact Verification."""
15
+
16
+
17
+ import json
18
+ import os
19
+
20
+ import datasets
21
+
22
+ _CITATION = """\
23
+ @inproceedings{2019TabFactA,
24
+ title={TabFact : A Large-scale Dataset for Table-based Fact Verification},
25
+ author={Wenhu Chen, Hongmin Wang, Jianshu Chen, Yunkai Zhang, Hong Wang, Shiyang Li, Xiyou Zhou and William Yang Wang},
26
+ booktitle = {International Conference on Learning Representations (ICLR)},
27
+ address = {Addis Ababa, Ethiopia},
28
+ month = {April},
29
+ year = {2020}
30
+ }
31
+ """
32
+
33
+ _DESCRIPTION = """\
34
+ The problem of verifying whether a textual hypothesis holds the truth based on the given evidence, \
35
+ also known as fact verification, plays an important role in the study of natural language \
36
+ understanding and semantic representation. However, existing studies are restricted to \
37
+ dealing with unstructured textual evidence (e.g., sentences and passages, a pool of passages), \
38
+ while verification using structured forms of evidence, such as tables, graphs, and databases, remains unexplored. \
39
+ TABFACT is large scale dataset with 16k Wikipedia tables as evidence for 118k human annotated statements \
40
+ designed for fact verification with semi-structured evidence. \
41
+ The statements are labeled as either ENTAILED or REFUTED. \
42
+ TABFACT is challenging since it involves both soft linguistic reasoning and hard symbolic reasoning.
43
+ """
44
+
45
+ _HOMEPAGE = "https://tabfact.github.io/"
46
+
47
+ _GIT_ARCHIVE_URL = "https://github.com/wenhuchen/Table-Fact-Checking/archive/948b5560e2f7f8c9139bd91c7f093346a2bb56a8.zip"
48
+
49
+
50
+ class TabFact(datasets.GeneratorBasedBuilder):
51
+ """TabFact: A Large-scale Dataset for Table-based Fact Verification."""
52
+
53
+ VERSION = datasets.Version("1.0.0")
54
+
55
+ def _info(self):
56
+ features = {
57
+ "id": datasets.Value("int32"),
58
+ "table": {
59
+ "id": datasets.Value("string"),
60
+ "header": datasets.features.Sequence(datasets.Value("string")),
61
+ "rows": datasets.features.Sequence(
62
+ datasets.features.Sequence(datasets.Value("string"))
63
+ ),
64
+ "caption": datasets.Value("string"),
65
+ },
66
+ "statement": datasets.Value("string"),
67
+ "label": datasets.Value("int32"),
68
+ }
69
+
70
+ return datasets.DatasetInfo(
71
+ description=_DESCRIPTION,
72
+ features=datasets.Features(features),
73
+ supervised_keys=None,
74
+ homepage=_HOMEPAGE,
75
+ citation=_CITATION,
76
+ )
77
+
78
+ def _split_generators(self, dl_manager):
79
+ extracted_path = dl_manager.download_and_extract(_GIT_ARCHIVE_URL)
80
+
81
+ repo_path = os.path.join(
82
+ extracted_path,
83
+ "Table-Fact-Checking-948b5560e2f7f8c9139bd91c7f093346a2bb56a8", # pragma: allowlist secret
84
+ )
85
+ all_csv_path = os.path.join(repo_path, "data", "all_csv")
86
+
87
+ train_statements_file = os.path.join(
88
+ repo_path, "tokenized_data", "train_examples.json"
89
+ )
90
+ val_statements_file = os.path.join(
91
+ repo_path, "tokenized_data", "val_examples.json"
92
+ )
93
+ test_statements_file = os.path.join(
94
+ repo_path, "tokenized_data", "test_examples.json"
95
+ )
96
+
97
+ return [
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.TRAIN,
100
+ gen_kwargs={
101
+ "statements_file": train_statements_file,
102
+ "all_csv_path": all_csv_path,
103
+ },
104
+ ),
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split.VALIDATION,
107
+ gen_kwargs={
108
+ "statements_file": val_statements_file,
109
+ "all_csv_path": all_csv_path,
110
+ },
111
+ ),
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.TEST,
114
+ gen_kwargs={
115
+ "statements_file": test_statements_file,
116
+ "all_csv_path": all_csv_path,
117
+ },
118
+ ),
119
+ ]
120
+
121
+ def _generate_examples(self, statements_file, all_csv_path):
122
+ def convert_to_table_structure(table_str):
123
+ header = table_str.split("\n")[0].split("#")
124
+ rows = [row.split("#") for row in table_str.strip().split("\n")[1:]]
125
+ return {"header": header, "rows": rows}
126
+
127
+ with open(statements_file, encoding="utf-8") as f:
128
+ examples = json.load(f)
129
+
130
+ for i, (table_id, example) in enumerate(examples.items()):
131
+ table_file_path = os.path.join(all_csv_path, table_id)
132
+ with open(table_file_path, encoding="utf-8") as f:
133
+ table_text = f.read()
134
+
135
+ statements, labels, caption = example
136
+
137
+ for statement_idx, (statement, label) in enumerate(zip(statements, labels)):
138
+ table = convert_to_table_structure(table_text)
139
+ yield (
140
+ f"{i}_{statement_idx}",
141
+ {
142
+ "id": i,
143
+ "table": {
144
+ "id": table_id,
145
+ "header": table["header"],
146
+ "rows": table["rows"],
147
+ "caption": caption,
148
+ },
149
+ "statement": statement,
150
+ "label": label,
151
+ },
152
+ )