Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
lovodkin93 commited on
Commit
3e469d5
·
1 Parent(s): 76f5c07

update the running script

Browse files
Files changed (1) hide show
  1. Controlled-Text-Reduction-dataset.py +0 -281
Controlled-Text-Reduction-dataset.py CHANGED
@@ -1,242 +1,3 @@
1
- # # coding=utf-8
2
- # # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- # #
4
- # # Licensed under the Apache License, Version 2.0 (the "License");
5
- # # you may not use this file except in compliance with the License.
6
- # # You may obtain a copy of the License at
7
- # #
8
- # # http://www.apache.org/licenses/LICENSE-2.0
9
- # #
10
- # # Unless required by applicable law or agreed to in writing, software
11
- # # distributed under the License is distributed on an "AS IS" BASIS,
12
- # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # # See the License for the specific language governing permissions and
14
- # # limitations under the License.
15
- # """A Dataset loading script for the Controlled Text Reduction dataset."""
16
-
17
-
18
- # import datasets
19
- # from dataclasses import dataclass
20
- # from pathlib import Path
21
- # from typing import List, Tuple
22
- # import pandas as pd
23
- # import json
24
- # import gzip
25
- # import itertools
26
-
27
-
28
- # _CITATION = """"""
29
- # # _CITATION = """\
30
- # # @inproceedings{roit2020controlled,
31
- # # title={Controlled Crowdsourcing for High-Quality QA-SRL Annotation},
32
- # # author={Roit, Paul and Klein, Ayal and Stepanov, Daniela and Mamou, Jonathan and Michael, Julian and Stanovsky, Gabriel and Zettlemoyer, Luke and Dagan, Ido},
33
- # # booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
34
- # # pages={7008--7013},
35
- # # year={2020}
36
- # # }
37
- # # """
38
-
39
-
40
- # _DESCRIPTION = """\
41
- # The dataset contains document-summary pairs with document spans (referred to as "highlights"), indicating the "pre-selected" spans that lead to the creation of the summary.
42
- # The evaluation and test datasets were constructed via controlled crowdsourcing.
43
- # The train datasets were automatically generated using the summary-source proposition-level alignment model SuperPAL (Ernst et al., 2021).
44
- # """
45
-
46
- # _HOMEPAGE = "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main"
47
-
48
- # _LICENSE = """MIT License
49
- # Copyright (c) 2022 lovodkin93
50
- # Permission is hereby granted, free of charge, to any person obtaining a copy
51
- # of this software and associated documentation files (the "Software"), to deal
52
- # in the Software without restriction, including without limitation the rights
53
- # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
54
- # copies of the Software, and to permit persons to whom the Software is
55
- # furnished to do so, subject to the following conditions:
56
- # The above copyright notice and this permission notice shall be included in all
57
- # copies or substantial portions of the Software.
58
- # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
59
- # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
60
- # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
61
- # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
62
- # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
63
- # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
64
- # SOFTWARE."""
65
-
66
-
67
- # # _URLs = {
68
- # # "csv": {
69
- # # "sentences": {
70
- # # "wikinews.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikinews.dev.full.csv",
71
- # # "wikinews.test": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikinews.test.full.csv",
72
- # # "wikipedia.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikipedia.dev.full.csv",
73
- # # "wikipedia.test": "https://github.com/plroit/qasrl-gs/raw/master/data/sentences/wikipedia.test.full.csv",
74
- # # },
75
- # # "qasrl-annotations": {
76
- # # "wikinews.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikinews.dev.gold.csv",
77
- # # "wikinews.test": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikinews.test.gold.csv",
78
- # # "wikipedia.dev": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikipedia.dev.gold.csv",
79
- # # "wikipedia.test": "https://github.com/plroit/qasrl-gs/raw/master/data/gold/wikipedia.test.gold.csv",
80
- # # },
81
- # # },
82
- # # "jsonl": "https://qasrl.org/data/qasrl-gs.tar"
83
- # # }
84
-
85
- # _URLs = {
86
- # "DUC-2001-2002": {
87
- # "dev": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/dev_DUC-2001-2002.csv",
88
- # "test": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/test_DUC-2001-2002.csv",
89
- # "train": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/train_DUC-2001-2002.csv"
90
- # },
91
- # "CNN-DM": {
92
- # "train": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/train_CNNDM.csv",
93
- # "dev": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/dev_DUC-2001-2002.csv",
94
- # "test": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/test_DUC-2001-2002.csv",
95
- # },
96
- # }
97
-
98
-
99
- # @dataclass
100
- # class ControlledTextReductionConfig(datasets.BuilderConfig):
101
- # """ Allow the loader to re-distribute the original dev and test splits between train, dev and test. """
102
- # data_source: str = "DUC-2001-2002" # "DUC-2001-2002" or "CNN-DM"
103
-
104
-
105
-
106
- # class ControlledTextReduction(datasets.GeneratorBasedBuilder):
107
- # """Controlled Text Reduction: dataset for the Controlled Text Reduction task ().
108
- # Each data point consists of a document, a summary, and a list of spans of the document that are the pre-selected content whose summary is the summary"""
109
-
110
-
111
- # VERSION = datasets.Version("1.0.0")
112
-
113
- # BUILDER_CONFIG_CLASS = ControlledTextReductionConfig
114
-
115
- # BUILDER_CONFIGS = [
116
- # ControlledTextReductionConfig(
117
- # name="DUC-2001-2002",
118
- # version=VERSION,
119
- # description="This provides the Controlled Text Reduction dataset extracted from the DUC 2001-2002 Single Document Summarization benchmark",
120
- # data_source="DUC-2001-2002"
121
- # ),
122
- # ControlledTextReductionConfig(
123
- # name="CNN-DM",
124
- # version=VERSION,
125
- # description="This provides the Controlled Text Reduction dataset extracted from the CNN-DM dataset (the train split)",
126
- # data_source="CNN-DM"
127
- # )
128
- # ]
129
-
130
- # DEFAULT_CONFIG_NAME = (
131
- # "DUC-2001-2002" # It's not mandatory to have a default configuration. Just use one if it make sense.
132
- # )
133
-
134
- # def _info(self):
135
- # features = datasets.Features(
136
- # {
137
- # "doc_text": datasets.Value("string"),
138
- # "summary_text": datasets.Value("string"),
139
- # "highlight_spans": datasets.Value("string")
140
- # }
141
- # )
142
- # return datasets.DatasetInfo(
143
- # # This is the description that will appear on the datasets page.
144
- # description=_DESCRIPTION,
145
- # # This defines the different columns of the dataset and their types
146
- # features=features, # Here we define them above because they are different between the two configurations
147
- # # If there's a common (input, target) tuple from the features,
148
- # # specify them here. They'll be used if as_supervised=True in
149
- # # builder.as_dataset.
150
- # supervised_keys=None,
151
- # # Homepage of the dataset for documentation
152
- # homepage=_HOMEPAGE,
153
- # # License for the dataset if available
154
- # license=_LICENSE,
155
- # # Citation for the dataset
156
- # citation=_CITATION,
157
- # )
158
-
159
- # def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
160
- # """Returns SplitGenerators."""
161
-
162
- # URLs = _URLs[self.config.data_source]
163
- # # Download and prepare all files - keep same structure as URLs
164
- # corpora = {section: Path(dl_manager.download_and_extract(URLs[section]))
165
- # for section in URLs}
166
-
167
- # if self.config.data_source=="CNN-DM":
168
- # return [
169
- # datasets.SplitGenerator(
170
- # name=datasets.Split.TRAIN,
171
- # # These kwargs will be passed to _generate_examples
172
- # gen_kwargs={
173
- # "filepath": corpora["train"]
174
- # },
175
- # ),
176
- # datasets.SplitGenerator(
177
- # name=datasets.Split.VALIDATION,
178
- # # These kwargs will be passed to _generate_examples
179
- # gen_kwargs={
180
- # "filepath": corpora["dev"]
181
- # },
182
- # ),
183
- # datasets.SplitGenerator(
184
- # name=datasets.Split.TEST,
185
- # # These kwargs will be passed to _generate_examples
186
- # gen_kwargs={
187
- # "filepath": corpora["test"]
188
- # },
189
- # ),
190
- # ]
191
-
192
- # else:
193
- # return [
194
- # datasets.SplitGenerator(
195
- # name=datasets.Split.TRAIN,
196
- # # These kwargs will be passed to _generate_examples
197
- # gen_kwargs={
198
- # "filepath": corpora["train"]
199
- # },
200
- # ),
201
- # datasets.SplitGenerator(
202
- # name=datasets.Split.VALIDATION,
203
- # # These kwargs will be passed to _generate_examples
204
- # gen_kwargs={
205
- # "filepath": corpora["dev"]
206
- # },
207
- # ),
208
- # datasets.SplitGenerator(
209
- # name=datasets.Split.TEST,
210
- # # These kwargs will be passed to _generate_examples
211
- # gen_kwargs={
212
- # "filepath": corpora["test"]
213
- # },
214
- # ),
215
- # ]
216
-
217
- # def _generate_examples(self, filepath: List[str]):
218
-
219
- # """ Yields Controlled Text Reduction examples from a csv file. Each instance contains the document, the summary and the pre-selected spans."""
220
-
221
- # # merge annotations from sections
222
- # df = pd.read_csv(filepath, index_col=False)
223
- # for counter, dic in enumerate(df.to_dict('records')):
224
- # columns_to_load_into_object = ["doc_text", "summary_text", "highlight_spans"]
225
- # for key in columns_to_load_into_object:
226
- # dic[key] = eval(dic[key])
227
- # yield counter, dic
228
-
229
-
230
-
231
-
232
-
233
- #################################################################################################################################################
234
-
235
-
236
-
237
-
238
-
239
-
240
  # coding=utf-8
241
  # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
242
  #
@@ -313,29 +74,6 @@ _URLs = {
313
  },
314
  }
315
 
316
- # _URLs = {
317
- # "dev_DUC-2001-2002": "https://media.githubusercontent.com/media/lovodkin93/Controlled_Text_Reduction/main/data/dev_DUC-2001-2002.csv",
318
- # "test_DUC-2001-2002": "https://media.githubusercontent.com/media/lovodkin93/Controlled_Text_Reduction/main/data/test_DUC-2001-2002.csv",
319
- # "train_DUC-2001-2002": "https://media.githubusercontent.com/media/lovodkin93/Controlled_Text_Reduction/main/data/train_DUC-2001-2002.csv"
320
- # }
321
-
322
-
323
- COLUMNS = ["doc_text", "summary_text", "highlight_spans"]
324
-
325
-
326
- # _URLs = {
327
- # "DUC-2001-2002": {
328
- # "dev": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/dev_DUC-2001-2002.csv",
329
- # "test": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/test_DUC-2001-2002.csv",
330
- # "train": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/train_DUC-2001-2002.csv"
331
- # },
332
- # "CNN-DM": {
333
- # "train": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/train_CNNDM.csv",
334
- # "dev": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/dev_DUC-2001-2002.csv",
335
- # "test": "https://github.com/lovodkin93/Controlled_Text_Reduction/tree/main/data/test_DUC-2001-2002.csv",
336
- # },
337
- # }
338
-
339
 
340
  @dataclass
341
  class ControlledTextReductionConfig(datasets.BuilderConfig):
@@ -444,22 +182,6 @@ class ControlledTectReduction(datasets.GeneratorBasedBuilder):
444
  ),
445
  ]
446
 
447
-
448
-
449
-
450
-
451
-
452
-
453
-
454
-
455
-
456
-
457
-
458
-
459
-
460
-
461
-
462
-
463
 
464
  def _generate_examples(self, filepath: List[str]):
465
 
@@ -468,7 +190,4 @@ class ControlledTectReduction(datasets.GeneratorBasedBuilder):
468
  # merge annotations from sections
469
  df = pd.read_csv(filepath)
470
  for counter, dic in enumerate(df.to_dict('records')):
471
- columns_to_load_into_object = ["doc_text", "summary_text", "highlight_spans"]
472
- # for key in columns_to_load_into_object:
473
- # dic[key] = eval(dic[key])
474
  yield counter, dic
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # coding=utf-8
2
  # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
  #
 
74
  },
75
  }
76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
  @dataclass
79
  class ControlledTextReductionConfig(datasets.BuilderConfig):
 
182
  ),
183
  ]
184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
  def _generate_examples(self, filepath: List[str]):
187
 
 
190
  # merge annotations from sections
191
  df = pd.read_csv(filepath)
192
  for counter, dic in enumerate(df.to_dict('records')):
 
 
 
193
  yield counter, dic