jeffnyman commited on
Commit
9bb456c
·
1 Parent(s): 70593da

Upload scifact.py

Browse files

Adding a scifact data loader script.

Files changed (1) hide show
  1. scifact.py +161 -0
scifact.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+
4
+ _DESCRIPTION = """
5
+ SciFact
6
+
7
+ A dataset of expert-written scientific claims paired with evidence-containing
8
+ abstracts and annotated with labels and rationales.
9
+ """
10
+
11
+ _CITATION = """
12
+ @InProceedings{Wadden2020FactOF,
13
+ author = {David Wadden, Shanchuan Lin, Kyle Lo, Lucy Lu Wang,
14
+ Madeleine van Zuylen, Arman Cohan, Hannaneh Hajishirzi},
15
+ title = {Fact or Fiction: Verifying Scientific Claims},
16
+ booktitle = {EMNLP},
17
+ year = 2020,
18
+ }
19
+ """
20
+
21
+ _DOWNLOAD_URL = "https://testerstories.com/files/ai_learn/data.tar.gz"
22
+
23
+
24
+ class ScifactConfig(datasets.BuilderConfig):
25
+ def __init__(self, **kwargs):
26
+ super(ScifactConfig, self).__init__(
27
+ version=datasets.Version("1.0.0", ""), **kwargs
28
+ )
29
+
30
+
31
+ class Scifact(datasets.GeneratorBasedBuilder):
32
+ VERSION = datasets.Version("0.1.0")
33
+
34
+ BUILDER_CONFIGS = [
35
+ ScifactConfig(name="corpus", description="The corpus of evidence documents"),
36
+ ScifactConfig(
37
+ name="claims", description="The claims are split into train, test, dev"
38
+ ),
39
+ ]
40
+
41
+ def _info(self):
42
+ if self.config.name == "corpus":
43
+ features = {
44
+ "doc_id": datasets.Value("int32"),
45
+ "title": datasets.Value("string"),
46
+ "abstract": datasets.features.Sequence(datasets.Value("string")),
47
+ "structured": datasets.Value("bool"),
48
+ }
49
+ else:
50
+ features = {
51
+ "id": datasets.Value("int32"),
52
+ "claim": datasets.Value("string"),
53
+ "evidence_doc_id": datasets.Value("string"),
54
+ "evidence_label": datasets.Value("string"),
55
+ "evidence_sentences": datasets.features.Sequence(
56
+ datasets.Value("int32")
57
+ ),
58
+ "cited_doc_ids": datasets.features.Sequence(datasets.Value("int32")),
59
+ }
60
+
61
+ return datasets.DatasetInfo(
62
+ description=_DESCRIPTION,
63
+ features=datasets.Features(features),
64
+ supervised_keys=None,
65
+ homepage="https://scifact.apps.allenai.org/",
66
+ citation=_CITATION,
67
+ )
68
+
69
+ def _split_generators(self, dl_manager):
70
+ archive = dl_manager.download(_DOWNLOAD_URL)
71
+
72
+ if self.config.name == "corpus":
73
+ return [
74
+ datasets.SplitGenerator(
75
+ name=datasets.Split.TRAIN,
76
+ gen_kwargs={
77
+ "filepath": "data/corpus.jsonl",
78
+ "split": "train",
79
+ "files": dl_manager.iter_archive(archive),
80
+ },
81
+ ),
82
+ ]
83
+ else:
84
+ return [
85
+ datasets.SplitGenerator(
86
+ name=datasets.Split.TRAIN,
87
+ gen_kwargs={
88
+ "filepath": "data/claims_train.jsonl",
89
+ "split": "train",
90
+ "files": dl_manager.iter_archive(archive),
91
+ },
92
+ ),
93
+ datasets.SplitGenerator(
94
+ name=datasets.Split.TEST,
95
+ gen_kwargs={
96
+ "filepath": "data/claims_test.jsonl",
97
+ "split": "test",
98
+ "files": dl_manager.iter_archive(archive),
99
+ },
100
+ ),
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.VALIDATION,
103
+ gen_kwargs={
104
+ "filepath": "data/claims_dev.jsonl",
105
+ "split": "dev",
106
+ "files": dl_manager.iter_archive(archive),
107
+ },
108
+ ),
109
+ ]
110
+
111
+ def _generate_examples(self, filepath, split, files):
112
+ for path, f in files:
113
+ if path == filepath:
114
+ for id_, row in enumerate(f):
115
+ data = json.loads(row.decode("utf-8"))
116
+
117
+ if self.config.name == "corpus":
118
+ yield id_, {
119
+ "doc_id": int(data["doc_id"]),
120
+ "title": data["title"],
121
+ "abstract": data["abstract"],
122
+ "structured": data["structured"],
123
+ }
124
+ else:
125
+ if split == "test":
126
+ yield id_, {
127
+ "id": data["id"],
128
+ "claim": data["claim"],
129
+ "evidence_doc_id": "",
130
+ "evidence_label": "",
131
+ "evidence_sentences": [],
132
+ "cited_doc_ids": [],
133
+ }
134
+ else:
135
+ evidences = data["evidence"]
136
+
137
+ if evidences:
138
+ for id1, doc_id in enumerate(evidences):
139
+ for id2, evidence in enumerate(evidences[doc_id]):
140
+ yield str(id_) + "_" + str(id1) + "_" + str(
141
+ id2
142
+ ), {
143
+ "id": data["id"],
144
+ "claim": data["claim"],
145
+ "evidence_doc_id": doc_id,
146
+ "evidence_label": evidence["label"],
147
+ "evidence_sentences": evidence["sentences"],
148
+ "cited_doc_ids": data.get(
149
+ "cited_doc_ids", []
150
+ ),
151
+ }
152
+ else:
153
+ yield id_, {
154
+ "id": data["id"],
155
+ "claim": data["claim"],
156
+ "evidence_doc_id": "",
157
+ "evidence_label": "",
158
+ "evidence_sentences": [],
159
+ "cited_doc_ids": data.get("cited_doc_ids", []),
160
+ }
161
+ break