Datasets:

Modalities:
Tabular
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
albertvillanova HF staff commited on
Commit
22d7819
1 Parent(s): f4a7713

Delete loading script

Browse files
Files changed (1) hide show
  1. discofuse.py +0 -194
discofuse.py DELETED
@@ -1,194 +0,0 @@
1
- """TODO(discofuse): Add a description here."""
2
-
3
-
4
- import csv
5
- import os
6
-
7
- import datasets
8
-
9
-
10
- _URL_ = "https://storage.googleapis.com/gresearch/discofuse/"
11
- _CITATION = """\
12
- @InProceedings{GevaEtAl2019,
13
- title = {DiscoFuse: A Large-Scale Dataset for Discourse-Based Sentence Fusion},
14
- author = {Geva, Mor and Malmi, Eric and Szpektor, Idan and Berant, Jonathan},
15
- booktitle = {Proceedings of the 2019 Annual Conference of the North American Chapter of the Association for Computational Linguistics},
16
- note = {arXiv preprint arXiv:1902.10526},
17
- year = {2019}
18
- }
19
-
20
- """
21
-
22
- # TODO(discofuse):
23
- _DESCRIPTION = """\
24
- DISCOFUSE is a large scale dataset for discourse-based sentence fusion.
25
- """
26
-
27
-
28
- class DiscofuseConfig(datasets.BuilderConfig):
29
-
30
- """BuilderConfig for Discofuse"""
31
-
32
- def __init__(self, data_url, balanced=False, **kwargs):
33
- """
34
-
35
- Args:
36
- balanced: to specify if we want to load the balanced file or the full file
37
- **kwargs: keyword arguments forwarded to super.
38
- """
39
- super(DiscofuseConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
40
- self.balanced = balanced
41
- self.data_url = data_url
42
-
43
-
44
- class Discofuse(datasets.GeneratorBasedBuilder):
45
- """TODO(discofuse): Short description of my dataset."""
46
-
47
- # TODO(discofuse): Set up version.
48
- VERSION = datasets.Version("1.0.0")
49
- BUILDER_CONFIGS = [
50
- DiscofuseConfig(
51
- name="discofuse-sport", description="sentence fusion", data_url=_URL_ + "discofuse_v1_sports.zip"
52
- ),
53
- DiscofuseConfig(
54
- name="discofuse-wikipedia", description="sentence fusion", data_url=_URL_ + "discofuse_v1_wikipedia.zip"
55
- ),
56
- ]
57
-
58
- def _info(self):
59
- # TODO(discofuse): Specifies the datasets.DatasetInfo object
60
- return datasets.DatasetInfo(
61
- # This is the description that will appear on the datasets page.
62
- description=_DESCRIPTION,
63
- # datasets.features.FeatureConnectors
64
- features=datasets.Features(
65
- {
66
- "connective_string": datasets.Value("string"),
67
- "discourse_type": datasets.Value("string"),
68
- "coherent_second_sentence": datasets.Value("string"),
69
- "has_coref_type_pronoun": datasets.Value("float32"),
70
- "incoherent_first_sentence": datasets.Value("string"),
71
- "incoherent_second_sentence": datasets.Value("string"),
72
- "has_coref_type_nominal": datasets.Value("float32"),
73
- "coherent_first_sentence": datasets.Value("string"),
74
- # These are the features of your dataset like images, labels ...
75
- }
76
- ),
77
- # If there's a common (input, target) tuple from the features,
78
- # specify them here. They'll be used if as_supervised=True in
79
- # builder.as_dataset.
80
- supervised_keys=None,
81
- # Homepage of the dataset for documentation
82
- homepage="https://github.com/google-research-datasets/discofuse",
83
- citation=_CITATION,
84
- )
85
-
86
- def _split_generators(self, dl_manager):
87
- """Returns SplitGenerators."""
88
- # TODO(discofuse): Downloads the data and defines the splits
89
- # dl_manager is a datasets.download.DownloadManager that can be used to
90
- # download and extract URLs
91
- if self.config.name == "discofuse-sport":
92
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
93
- data_dir = os.path.join(dl_dir, "discofuse_v1/sports")
94
- if self.config.balanced:
95
- return [
96
- datasets.SplitGenerator(
97
- name=datasets.Split.TRAIN,
98
- # These kwargs will be passed to _generate_examples
99
- gen_kwargs={"filepath": os.path.join(data_dir, "train_balanced.tsv")},
100
- ),
101
- datasets.SplitGenerator(
102
- name=datasets.Split.TEST,
103
- # These kwargs will be passed to _generate_examples
104
- gen_kwargs={"filepath": os.path.join(data_dir, "test_balanced.tsv")},
105
- ),
106
- datasets.SplitGenerator(
107
- name=datasets.Split.VALIDATION,
108
- # These kwargs will be passed to _generate_examples
109
- gen_kwargs={"filepath": os.path.join(data_dir, "dev_balanced.tsv")},
110
- ),
111
- ]
112
- else:
113
- return [
114
- datasets.SplitGenerator(
115
- name=datasets.Split.TRAIN,
116
- # These kwargs will be passed to _generate_examples
117
- gen_kwargs={"filepath": os.path.join(data_dir, "train.tsv")},
118
- ),
119
- datasets.SplitGenerator(
120
- name=datasets.Split.TEST,
121
- # These kwargs will be passed to _generate_examples
122
- gen_kwargs={"filepath": os.path.join(data_dir, "test.tsv")},
123
- ),
124
- datasets.SplitGenerator(
125
- name=datasets.Split.VALIDATION,
126
- # These kwargs will be passed to _generate_examples
127
- gen_kwargs={"filepath": os.path.join(data_dir, "dev.tsv")},
128
- ),
129
- ]
130
- else:
131
- if self.config.name == "discofuse-wikipedia":
132
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
133
- data_dir = os.path.join(dl_dir, "discofuse_v1/wikipedia")
134
- if self.config.balanced:
135
- return [
136
- datasets.SplitGenerator(
137
- name=datasets.Split.TRAIN,
138
- # These kwargs will be passed to _generate_examples
139
- gen_kwargs={"filepath": os.path.join(data_dir, "train_balanced.tsv")},
140
- ),
141
- datasets.SplitGenerator(
142
- name=datasets.Split.TEST,
143
- # These kwargs will be passed to _generate_examples
144
- gen_kwargs={"filepath": os.path.join(data_dir, "test_balanced.tsv")},
145
- ),
146
- datasets.SplitGenerator(
147
- name=datasets.Split.VALIDATION,
148
- # These kwargs will be passed to _generate_examples
149
- gen_kwargs={"filepath": os.path.join(data_dir, "dev_balanced.tsv")},
150
- ),
151
- ]
152
- else:
153
- return [
154
- datasets.SplitGenerator(
155
- name=datasets.Split.TRAIN,
156
- # These kwargs will be passed to _generate_examples
157
- gen_kwargs={"filepath": os.path.join(data_dir, "train.tsv")},
158
- ),
159
- datasets.SplitGenerator(
160
- name=datasets.Split.TEST,
161
- # These kwargs will be passed to _generate_examples
162
- gen_kwargs={"filepath": os.path.join(data_dir, "test.tsv")},
163
- ),
164
- datasets.SplitGenerator(
165
- name=datasets.Split.VALIDATION,
166
- # These kwargs will be passed to _generate_examples
167
- gen_kwargs={"filepath": os.path.join(data_dir, "dev.tsv")},
168
- ),
169
- ]
170
-
171
- def _generate_examples(self, filepath):
172
- """Yields examples."""
173
- # TODO(discofuse): Yields (key, example) tuples from the dataset
174
- with open(filepath, encoding="utf-8") as f:
175
- data = csv.DictReader(f, delimiter="\t")
176
- for id_, row in enumerate(data):
177
- co_first_sent = row["coherent_first_sentence"]
178
- co_second_sent = row["coherent_second_sentence"]
179
- connect_str = row["connective_string"]
180
- discourse_type = row["discourse_type"]
181
- has_coref_pronoun = row["has_coref_type_pronoun"]
182
- has_coref_nominal = row["has_coref_type_nominal"]
183
- inco_first_sent = row["incoherent_first_sentence"]
184
- inco_second_sent = row["incoherent_second_sentence"]
185
- yield id_, {
186
- "connective_string": connect_str,
187
- "discourse_type": discourse_type,
188
- "coherent_second_sentence": co_second_sent,
189
- "has_coref_type_pronoun": has_coref_pronoun,
190
- "incoherent_first_sentence": inco_first_sent,
191
- "incoherent_second_sentence": inco_second_sent,
192
- "has_coref_type_nominal": has_coref_nominal,
193
- "coherent_first_sentence": co_first_sent,
194
- }