Clémentine commited on
Commit
4a0598c
·
1 Parent(s): 3bf6e22
med_paragraph_simplification.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Covid Dialog dataset in English and Chinese"""
16
+
17
+
18
+ import copy
19
+ import os
20
+ import re
21
+ import textwrap
22
+
23
+ import datasets
24
+
25
+
26
+ # BibTeX citation
27
+ _CITATION = """\
28
+ @inproceedings{devaraj-etal-2021-paragraph,
29
+ title = "Paragraph-level Simplification of Medical Texts",
30
+ author = "Devaraj, Ashwin and Marshall, Iain and Wallace, Byron and Li, Junyi Jessy",
31
+ booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for
32
+ Computational Linguistics",
33
+ month = jun,
34
+ year = "2021",
35
+ publisher = "Association for Computational Linguistics",
36
+ url = "https://www.aclweb.org/anthology/2021.naacl-main.395",
37
+ pages = "4972--4984",
38
+
39
+ """
40
+
41
+ # Official description of the dataset
42
+ _DESCRIPTION = textwrap.dedent(
43
+ """
44
+ "Paragraph-level Simplification of Medical Texts" (Devaraj et al.) studies the problem of learning to simplify
45
+ medical texts. One of their contributions is a new corpus that is composed of technical abstracts and their
46
+ lay summaries on various clinical topics.
47
+
48
+ The author generated train/val/test splits, which are available in the GitHub repository linked in the paper.
49
+
50
+ The following is an example from the dataset:
51
+
52
+ {
53
+ "doi": "10.1002/14651858.CD011112.pub2",
54
+ "abstract": "We included six studies (reported as seven papers) involving 326 participants whose ages ranged
55
+ from 39 to 83 years, with a gender bias towards men (73% to 95% across studies), reflecting the characteristics
56
+ of patients with HNC. The risk of bias in the studies was generally high. We did not pool data from studies
57
+ because of significant differences in the interventions and outcomes evaluated. We found a lack of
58
+ standardisation and consistency in the outcomes measured and the endpoints at which they were evaluated.
59
+ We found no evidence that therapeutic exercises were better than TAU, or any other treatment, in improving the
60
+ safety and efficiency of oral swallowing (our primary outcome) or in improving any of the secondary outcomes.
61
+ Using the GRADE system, we classified the overall quality of the evidence for each outcome as very low, due to
62
+ the limited number of trials and their low quality. There were no adverse events reported that were directly
63
+ attributable to the intervention (swallowing exercises). We found no evidence that undertaking therapeutic
64
+ exercises before, during and/or immediately after HNC treatment leads to improvement in oral swallowing. This
65
+ absence of evidence may be due to the small participant numbers in trials, resulting in insufficient power to
66
+ detect any difference. Data from the identified trials could not be combined due to differences in the choice
67
+ of primary outcomes and in the measurement tools used to assess them, and the differing baseline and endpoints
68
+ across studies. Designing and implementing studies with stronger methodological rigour is essential. There needs
69
+ to be agreement about the key primary outcomes, the choice of validated assessment tools to measure them and the
70
+ time points at which those measurements are made.",
71
+ "pls": "We included six studies with 326 participants who undertook therapeutic exercises before, during and/or
72
+ after HNC treatment. We could not combine the results of the studies because of the variation in participants'
73
+ cancers, their treatments, the outcomes measured and the tools used to assess them, as well as the differing
74
+ time points for testing. Researchers have compared: (i) therapeutic exercises versus treatment as usual (TAU);
75
+ (ii) therapeutic exercises versus sham therapy; (iii) therapeutic exercises plus TAU versus TAU. The therapeutic
76
+ exercises varied in their design, timing and intensity. TAU involved managing patients' dysphagia when it
77
+ occurred, including inserting a tube for non-oral feeding. The evidence is up to date to 1 July 2016. We found
78
+ no evidence that therapeutic exercises were better than TAU, or any other treatment, in improving the safety and
79
+ efficiency of oral swallowing (our primary outcome) or in improving any of the secondary outcomes. However,
80
+ there is insufficient evidence to draw any clear conclusion about the effects of undertaking therapeutic
81
+ exercises before during and/or immediately after HNC treatment on preventing or reducing dysphagia. Studies had
82
+ small participant numbers, used complex interventions and varied in the choice of outcomes measured, making it
83
+ difficult to draw reliable conclusions. There were no reported adverse events directly attributable to the
84
+ intervention (swallowing exercises). The current quality of the evidence to support the use of therapeutic
85
+ exercises before, during and/or immediately after HNC treatment to prevent/reduce dysphagia is very low. We need
86
+ better designed, rigorous studies with larger participant numbers and agreed endpoints and outcome measurements
87
+ in order to draw clear(er) conclusions."
88
+ },
89
+
90
+ where "pls" stands for "plain-language summary".
91
+
92
+ Paper: http://arxiv.org/abs/2104.05767
93
+ Code: https://github.com/AshOlogn/Paragraph-level-Simplification-of-Medical-Texts
94
+ """
95
+ )
96
+
97
+ # Link to an official homepage for the dataset here
98
+ _HOMEPAGE = "https://github.com/AshOlogn/Paragraph-level-Simplification-of-Medical-Texts"
99
+
100
+ _LICENSE = ""
101
+
102
+
103
+ import datasets
104
+ import os
105
+ import json
106
+
107
+
108
+ class Builder(datasets.GeneratorBasedBuilder):
109
+ VERSION = datasets.Version("1.0.0")
110
+
111
+ BUILDER_CONFIGS = [datasets.BuilderConfig(name="default", version=datasets.Version("1.0.0"), description=_DESCRIPTION)]
112
+
113
+ def _info(self):
114
+ features = datasets.Features(
115
+ {
116
+ "query": datasets.Value("string"),
117
+ "answer": datasets.Value("string"),
118
+ }
119
+ )
120
+ return datasets.DatasetInfo(
121
+ description=f"Covid Dialogue dataset, as preprocessed and shuffled in HELM",
122
+ features=features,
123
+ homepage=_HOMEPAGE,
124
+ license=_LICENSE,
125
+ citation=_CITATION,
126
+ )
127
+
128
+ def _split_generators(self, dl_manager):
129
+ test_target = dl_manager.download("test.source")
130
+ test_source = dl_manager.download("test.source")
131
+ train_source = dl_manager.download("train.source")
132
+ train_target = dl_manager.download("train.target")
133
+ val_source = dl_manager.download("valid.source")
134
+ val_target = dl_manager.download("valid.target")
135
+
136
+ return [
137
+ datasets.SplitGenerator(
138
+ name=datasets.Split.TRAIN,
139
+ gen_kwargs={"target": train_target, "source": train_source},
140
+ ),
141
+ datasets.SplitGenerator(
142
+ name=datasets.Split.VALIDATION,
143
+ gen_kwargs={"target": val_target, "source": val_source},
144
+ ),
145
+ datasets.SplitGenerator(
146
+ name=datasets.Split.TEST,
147
+ gen_kwargs={"target": test_target, "source": test_source},
148
+ ),
149
+ ]
150
+
151
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
152
+ def _generate_examples(self, source, target):
153
+ with open(source, encoding="utf-8") as f_source:
154
+ with open(target, encoding="utf-8") as f_target:
155
+ for idx, (s, t) in enumerate(zip(f_source, f_target)):
156
+ yield idx, {"query": s.rstrip(), "answer": t.rstrip()}
test.source ADDED
The diff for this file is too large to render. See raw diff
 
test.target ADDED
The diff for this file is too large to render. See raw diff
 
train.source ADDED
The diff for this file is too large to render. See raw diff
 
train.target ADDED
The diff for this file is too large to render. See raw diff
 
valid.source ADDED
The diff for this file is too large to render. See raw diff
 
valid.target ADDED
The diff for this file is too large to render. See raw diff