Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
0fc3dba
·
1 Parent(s): a5cb561

upload hubscripts/scifact_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. scifact.py +421 -0
scifact.py ADDED
@@ -0,0 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ import os
18
+ from itertools import chain
19
+ from typing import Dict, List, Tuple
20
+
21
+ import datasets
22
+ from datasets import Value
23
+ import pandas as pd
24
+
25
+ from .bigbiohub import pairs.features
26
+ from .bigbiohub import BigBioConfig
27
+ from .bigbiohub import Tasks
28
+
29
+ _LANGUAGES = ['English']
30
+ _PUBMED = False
31
+ _LOCAL = False
32
+ _CITATION = """\
33
+ @article{wadden2020fact,
34
+ author = {David Wadden and Shanchuan Lin and Kyle Lo and Lucy Lu Wang and Madeleine van Zuylen and Arman Cohan and Hannaneh Hajishirzi},
35
+ title = {Fact or Fiction: Verifying Scientific Claims},
36
+ year = {2020},
37
+ address = {Online},
38
+ publisher = {Association for Computational Linguistics},
39
+ url = {https://aclanthology.org/2020.emnlp-main.609},
40
+ doi = {10.18653/v1/2020.emnlp-main.609},
41
+ pages = {7534--7550},
42
+ biburl = {},
43
+ bibsource = {}
44
+ }
45
+ """
46
+
47
+ _DATASETNAME = "scifact"
48
+ _DISPLAYNAME = "SciFact"
49
+
50
+
51
+ _DESCRIPTION_BASE = """\
52
+ SciFact is a dataset of 1.4K expert-written scientific claims paired with evidence-containing abstracts, and annotated with labels and rationales.
53
+ """
54
+
55
+ _SOURCE_CORPUS_DESCRIPTION = f"""\
56
+ {_DESCRIPTION_BASE} This config has abstracts and document ids.
57
+ """
58
+
59
+ _SOURCE_CLAIMS_DESCRIPTION = """\
60
+ {_DESCRIPTION_BASE} This config connects the claims to the evidence and doc ids.
61
+ """
62
+
63
+ _BIGBIO_PAIRS_RATIONALE_DESCRIPTION = """\
64
+ {_DESCRIPTION_BASE} This task is the following: given a claim and a text span composed of one or more sentences from an abstract, predict a label from ("rationale", "not_rationale") indicating if the span is evidence (can be supporting or refuting) for the claim. This roughly corresponds to the second task outlined in Section 5 of the paper."
65
+ """
66
+
67
+ _BIGBIO_PAIRS_LABELPREDICTION_DESCRIPTION = """\
68
+ {_DESCRIPTION_BASE} This task is the following: given a claim and a text span composed of one or more sentences from an abstract, predict a label from ("SUPPORT", "NOINFO", "CONTRADICT") indicating if the span supports, provides no info, or contradicts the claim. This roughly corresponds to the thrid task outlined in Section 5 of the paper.
69
+ """
70
+
71
+ _DESCRIPTION = {
72
+ "scifact_corpus_source": _SOURCE_CORPUS_DESCRIPTION,
73
+ "scifact_claims_source": _SOURCE_CLAIMS_DESCRIPTION,
74
+ "scifact_rationale_bigbio_pairs": _BIGBIO_PAIRS_RATIONALE_DESCRIPTION,
75
+ "scifact_labelprediction_bigbio_pairs": _BIGBIO_PAIRS_LABELPREDICTION_DESCRIPTION,
76
+ }
77
+
78
+ _HOMEPAGE = "https://scifact.apps.allenai.org/"
79
+
80
+
81
+ _LICENSE = 'Creative Commons Attribution Non Commercial 2.0 Generic'
82
+
83
+ _URLS = {
84
+ _DATASETNAME: "https://scifact.s3-us-west-2.amazonaws.com/release/latest/data.tar.gz",
85
+ }
86
+
87
+ _SUPPORTED_TASKS = [Tasks.TEXT_PAIRS_CLASSIFICATION]
88
+
89
+ _SOURCE_VERSION = "1.0.0"
90
+
91
+ _BIGBIO_VERSION = "1.0.0"
92
+
93
+
94
+ class SciFact(datasets.GeneratorBasedBuilder):
95
+ """
96
+ SciFact is a dataset of 1.4K expert-written scientific claims paired with evidence-containing abstracts, and annotated with labels and rationales.
97
+ """
98
+
99
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
100
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
101
+
102
+ BUILDER_CONFIGS = [
103
+ BigBioConfig(
104
+ name="scifact_corpus_source",
105
+ version=SOURCE_VERSION,
106
+ description="scifact source schema for the corpus config",
107
+ schema="source",
108
+ subset_id="scifact_corpus_source",
109
+ ),
110
+ BigBioConfig(
111
+ name="scifact_claims_source",
112
+ version=SOURCE_VERSION,
113
+ description="scifact source schema for the claims config",
114
+ schema="source",
115
+ subset_id="scifact_claims_source",
116
+ ),
117
+ BigBioConfig(
118
+ name="scifact_rationale_bigbio_pairs",
119
+ version=BIGBIO_VERSION,
120
+ description="scifact BigBio text pairs classification schema for rationale task",
121
+ schema="bigbio_pairs",
122
+ subset_id="scifact_rationale_pairs",
123
+ ),
124
+ BigBioConfig(
125
+ name="scifact_labelprediction_bigbio_pairs",
126
+ version=BIGBIO_VERSION,
127
+ description="scifact BigBio text pairs classification schema for label prediction task",
128
+ schema="bigbio_pairs",
129
+ subset_id="scifact_labelprediction_pairs",
130
+ ),
131
+ ]
132
+
133
+ DEFAULT_CONFIG_NAME = "scifact_claims_source"
134
+
135
+ def _info(self) -> datasets.DatasetInfo:
136
+
137
+ if self.config.schema == "source":
138
+ # modified from
139
+ # https://huggingface.co/datasets/scifact/blob/main/scifact.py#L50
140
+
141
+ if self.config.name == "scifact_corpus_source":
142
+ features = datasets.Features(
143
+ {
144
+ "doc_id": Value("int32"), # The document's S2ORC ID.
145
+ "title": Value("string"), # The title.
146
+ "abstract": [Value("string")], # The abstract, written as a list of sentences.
147
+ "structured": Value("bool"), # Indicator for whether this is a structured abstract.
148
+ }
149
+ )
150
+
151
+ elif self.config.name == "scifact_claims_source":
152
+ features = datasets.Features(
153
+ {
154
+ "id": Value("int32"), # An integer claim ID.
155
+ "claim": Value("string"), # The text of the claim.
156
+ "evidences": [
157
+ {
158
+ "doc_id": Value("int32"), # source doc_id for evidence
159
+ "sentence_ids": [Value("int32")], # sentence ids from doc_id
160
+ "label": Value("string"), # SUPPORT or CONTRADICT
161
+ },
162
+ ],
163
+ "cited_doc_ids": [Value("int32")], # The claim's "cited documents".
164
+ }
165
+ )
166
+
167
+ else:
168
+ raise NotImplementedError(
169
+ f"{self.config.name} config not implemented"
170
+ )
171
+
172
+ elif self.config.schema == "bigbio_pairs":
173
+ features = pairs.features
174
+
175
+ else:
176
+ raise NotImplementedError(f"{self.config.schema} schema not implemented")
177
+
178
+ return datasets.DatasetInfo(
179
+ description=_DESCRIPTION[self.config.name],
180
+ features=features,
181
+ homepage=_HOMEPAGE,
182
+ license=str(_LICENSE),
183
+ citation=_CITATION,
184
+ )
185
+
186
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
187
+ urls = _URLS[_DATASETNAME]
188
+ self.config.data_dir = dl_manager.download_and_extract(urls)
189
+
190
+ if self.config.name == "scifact_corpus_source":
191
+ return [
192
+ datasets.SplitGenerator(
193
+ name=datasets.Split.TRAIN,
194
+ gen_kwargs={
195
+ "filepath": os.path.join(
196
+ self.config.data_dir, "data", "corpus.jsonl"
197
+ ),
198
+ "split": "train",
199
+ },
200
+ ),
201
+ ]
202
+
203
+ # the test split is only returned in source schema
204
+ # this is b/c it only has claims with no cited docs or evidence
205
+ # the bigbio implementation of this dataset requires
206
+ # cited docs or evidence to construct samples
207
+ elif self.config.name == "scifact_claims_source":
208
+ return [
209
+ datasets.SplitGenerator(
210
+ name=datasets.Split.TRAIN,
211
+ gen_kwargs={
212
+ "filepath": os.path.join(
213
+ self.config.data_dir, "data", "claims_train.jsonl"
214
+ ),
215
+ "split": "train",
216
+ },
217
+ ),
218
+ datasets.SplitGenerator(
219
+ name=datasets.Split.VALIDATION,
220
+ gen_kwargs={
221
+ "filepath": os.path.join(
222
+ self.config.data_dir, "data", "claims_dev.jsonl"
223
+ ),
224
+ "split": "dev",
225
+ },
226
+ ),
227
+ datasets.SplitGenerator(
228
+ name=datasets.Split.TEST,
229
+ gen_kwargs={
230
+ "filepath": os.path.join(
231
+ self.config.data_dir, "data", "claims_test.jsonl"
232
+ ),
233
+ "split": "test",
234
+ },
235
+ ),
236
+ ]
237
+
238
+ elif self.config.name in [
239
+ "scifact_rationale_bigbio_pairs",
240
+ "scifact_labelprediction_bigbio_pairs",
241
+ ]:
242
+ return [
243
+ datasets.SplitGenerator(
244
+ name=datasets.Split.TRAIN,
245
+ gen_kwargs={
246
+ "filepath": os.path.join(
247
+ self.config.data_dir, "data", "claims_train.jsonl"
248
+ ),
249
+ "split": "train",
250
+ },
251
+ ),
252
+ datasets.SplitGenerator(
253
+ name=datasets.Split.VALIDATION,
254
+ gen_kwargs={
255
+ "filepath": os.path.join(
256
+ self.config.data_dir, "data", "claims_dev.jsonl"
257
+ ),
258
+ "split": "dev",
259
+ },
260
+ ),
261
+ ]
262
+
263
+
264
+ def _source_generate_examples(self, filepath, split) -> Tuple[str, Dict[str, str]]:
265
+
266
+ # here we just read corpus.jsonl and return the abstracts
267
+ if self.config.name == "scifact_corpus_source":
268
+ with open(filepath) as fp:
269
+ for id_, row in enumerate(fp.readlines()):
270
+ data = json.loads(row)
271
+ yield id_, {
272
+ "doc_id": int(data["doc_id"]),
273
+ "title": data["title"],
274
+ "abstract": data["abstract"],
275
+ "structured": data["structured"],
276
+ }
277
+
278
+ # here we are reading one of claims_(train|dev|test).jsonl
279
+ elif self.config.name == "scifact_claims_source":
280
+
281
+ # claims_test.jsonl only has "id" and "claim" keys
282
+ # claims_train.jsonl and claims_dev.jsonl sometimes have evidence
283
+ with open(filepath) as fp:
284
+ for id_, row in enumerate(fp.readlines()):
285
+ data = json.loads(row)
286
+ evidences_dict = data.get("evidence", {})
287
+ evidences_list = []
288
+ for doc_id, sent_lbl_list in evidences_dict.items():
289
+ for sent_lbl_dict in sent_lbl_list:
290
+ evidence = {
291
+ "doc_id": doc_id,
292
+ "sentence_ids": sent_lbl_dict["sentences"],
293
+ "label": sent_lbl_dict["label"],
294
+ }
295
+ evidences_list.append(evidence)
296
+
297
+ yield id_, {
298
+ "id": data["id"],
299
+ "claim": data["claim"],
300
+ "evidences": evidences_list,
301
+ "cited_doc_ids": data.get("cited_doc_ids", []),
302
+ }
303
+
304
+
305
+ def _bigbio_generate_examples(self, filepath, split) -> Tuple[str, Dict[str, str]]:
306
+ """
307
+ Here we always create one sample per sentence group.
308
+ Any sentence group in an evidence attribute will have
309
+ a label in {"rationale"} for the rationale task or
310
+ in {"SUPPORT", "CONTRADICT"} for the labelprediction task.
311
+ All other sentences will have either a "not_rationale"
312
+ label or a "NOINFO" label depending on the task.
313
+ """
314
+
315
+ # read corpus (one row per abstract)
316
+ corpus_file_path = os.path.join(self.config.data_dir, "data", "corpus.jsonl")
317
+ df_corpus = pd.read_json(corpus_file_path, lines=True)
318
+
319
+ # create one row per sentence and create sentence index
320
+ df_sents = df_corpus.explode('abstract')
321
+ df_sents = df_sents.rename(columns={"abstract": "sentence"})
322
+ df_sents['sent_num'] = df_sents.groupby('doc_id').transform('cumcount')
323
+ df_sents['doc_sent_id'] = df_sents.apply(lambda x: f"{x['doc_id']}-{x['sent_num']}", axis=1)
324
+
325
+ # read claims
326
+ df_claims = pd.read_json(filepath, lines=True)
327
+
328
+
329
+ # join claims to corpus
330
+ for _, claim_row in df_claims.iterrows():
331
+
332
+ evidence = claim_row['evidence']
333
+ cited_doc_ids = set(claim_row['cited_doc_ids'])
334
+ evidence_doc_ids = set([int(doc_id) for doc_id in evidence.keys()])
335
+
336
+ # assert all evidence doc IDs are in cited_doc_ids
337
+ assert len(evidence_doc_ids - cited_doc_ids) == 0
338
+
339
+ # this will have all abstract sentences from cited docs
340
+ df_claim_sents = df_sents[df_sents['doc_id'].isin(cited_doc_ids)]
341
+
342
+ # create all sentence samples as NOINFO then fix
343
+ noinfo_samples = {}
344
+ for _, row in df_claim_sents.iterrows():
345
+ sample = {
346
+ "claim": claim_row["claim"],
347
+ "claim_id": claim_row["id"],
348
+ "doc_id": row['doc_id'],
349
+ "sentence_ids": (row['sent_num'],),
350
+ "doc_sent_ids": (row['doc_sent_id'],),
351
+ "span": row['sentence'].strip(),
352
+ "label": "NOINFO",
353
+ }
354
+ noinfo_samples[sample["doc_sent_ids"]] = sample
355
+
356
+ # create evidence samples and remove from noinfo samples as we go
357
+ evidence_samples = []
358
+ for doc_id_str, sent_lbl_list in evidence.items():
359
+ doc_id = int(doc_id_str)
360
+
361
+ for sent_lbl_dict in sent_lbl_list:
362
+ sent_ids = sent_lbl_dict['sentences']
363
+ doc_sent_ids = [f"{doc_id}-{sent_id}" for sent_id in sent_ids]
364
+ df_evi = df_claim_sents[df_claim_sents['doc_sent_id'].isin(doc_sent_ids)]
365
+
366
+ sample = {
367
+ "claim": claim_row["claim"],
368
+ "claim_id": claim_row["id"],
369
+ "doc_id": doc_id,
370
+ "sentence_ids": tuple(sent_ids),
371
+ "doc_sent_ids": tuple(doc_sent_ids),
372
+ "span": " ".join([el.strip() for el in df_evi["sentence"].values]),
373
+ "label": sent_lbl_dict["label"],
374
+ }
375
+ evidence_samples.append(sample)
376
+ for doc_sent_id in doc_sent_ids:
377
+ del noinfo_samples[(doc_sent_id,)]
378
+
379
+ # combine all sample and put back in sentence order
380
+ all_samples = evidence_samples + list(noinfo_samples.values())
381
+ all_samples = sorted(all_samples, key=lambda x: (x['doc_id'], x['sentence_ids'][0]))
382
+
383
+ # add a unique ID
384
+ for _id, sample in enumerate(all_samples):
385
+ sample["id"] = f"{_id}-{sample['claim_id']}-{sample['doc_id']}-{sample['sentence_ids'][0]}"
386
+
387
+ RATIONALE_LABEL_MAP = {
388
+ "SUPPORT": "rationale",
389
+ "CONTRADICT": "rationale",
390
+ "NOINFO": "not_rationale",
391
+ }
392
+
393
+ if self.config.name == "scifact_rationale_bigbio_pairs":
394
+ for sample in all_samples:
395
+ yield sample['id'], {
396
+ "id": sample["id"],
397
+ "document_id": sample["doc_id"],
398
+ "text_1": sample["claim"],
399
+ "text_2": sample["span"],
400
+ "label": RATIONALE_LABEL_MAP[sample['label']],
401
+ }
402
+
403
+ elif self.config.name == "scifact_labelprediction_bigbio_pairs":
404
+ for sample in all_samples:
405
+ yield sample['id'], {
406
+ "id": sample["id"],
407
+ "document_id": sample["doc_id"],
408
+ "text_1": sample["claim"],
409
+ "text_2": sample["span"],
410
+ "label": sample['label'],
411
+ }
412
+
413
+ def _generate_examples(self, filepath, split) -> Tuple[int, dict]:
414
+
415
+ if "source" in self.config.name:
416
+ for sample in self._source_generate_examples(filepath, split):
417
+ yield sample
418
+
419
+ elif "bigbio" in self.config.name:
420
+ for sample in self._bigbio_generate_examples(filepath, split):
421
+ yield sample