Datasets:

Modalities:
Tabular
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
system HF staff commited on
Commit
800f511
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"discofuse-sport": {"description": " DISCOFUSE is a large scale dataset for discourse-based sentence fusion. \n", "citation": "@InProceedings{GevaEtAl2019,\n title = {{DiscoFuse: A Large-Scale Dataset for Discourse-Based Sentence Fusion}},\n author = {Geva, Mor and Malmi, Eric and Szpektor, Idan and Berant, Jonathan},\n booktitle = {Proceedings of the 2019 Annual Conference of the North American Chapter of the Association for Computational Linguistics},\n note = {arXiv preprint arXiv:1902.10526},\n year = {2019}\n}\n\n", "homepage": "https://github.com/google-research-datasets/discofuse", "license": "", "features": {"connective_string": {"dtype": "string", "id": null, "_type": "Value"}, "discourse_type": {"dtype": "string", "id": null, "_type": "Value"}, "coherent_second_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "has_coref_type_pronoun": {"dtype": "float32", "id": null, "_type": "Value"}, "incoherent_first_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "incoherent_second_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "has_coref_type_nominal": {"dtype": "float32", "id": null, "_type": "Value"}, "coherent_first_sentence": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "discofuse", "config_name": "discofuse-sport", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 152156582, "num_examples": 445521, "dataset_name": "discofuse"}, "train": {"name": "train", "num_bytes": 14784888867, "num_examples": 43291020, "dataset_name": "discofuse"}, "validation": {"name": "validation", "num_bytes": 150702794, "num_examples": 440902, "dataset_name": "discofuse"}}, "download_checksums": {"https://storage.googleapis.com/discofuse_dataset_v1/discofuse_v1_sports.tar.gz": {"num_bytes": 4326640797, "checksum": "e2716f9ada309d50e5603182e9d1e8ecb9968b5b143dbd6251c4b10b9ae61517"}}, "download_size": 4326640797, "dataset_size": 15087748243, "size_in_bytes": 19414389040}, "discofuse-wikipedia": {"description": " DISCOFUSE is a large scale dataset for discourse-based sentence fusion. \n", "citation": "@InProceedings{GevaEtAl2019,\n title = {{DiscoFuse: A Large-Scale Dataset for Discourse-Based Sentence Fusion}},\n author = {Geva, Mor and Malmi, Eric and Szpektor, Idan and Berant, Jonathan},\n booktitle = {Proceedings of the 2019 Annual Conference of the North American Chapter of the Association for Computational Linguistics},\n note = {arXiv preprint arXiv:1902.10526},\n year = {2019}\n}\n\n", "homepage": "https://github.com/google-research-datasets/discofuse", "license": "", "features": {"connective_string": {"dtype": "string", "id": null, "_type": "Value"}, "discourse_type": {"dtype": "string", "id": null, "_type": "Value"}, "coherent_second_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "has_coref_type_pronoun": {"dtype": "float32", "id": null, "_type": "Value"}, "incoherent_first_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "incoherent_second_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "has_coref_type_nominal": {"dtype": "float32", "id": null, "_type": "Value"}, "coherent_first_sentence": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "discofuse", "config_name": "discofuse-wikipedia", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 64191920, "num_examples": 163657, "dataset_name": "discofuse"}, "train": {"name": "train", "num_bytes": 6396238380, "num_examples": 16310585, "dataset_name": "discofuse"}, "validation": {"name": "validation", "num_bytes": 65870774, "num_examples": 168081, "dataset_name": "discofuse"}}, "download_checksums": {"https://storage.googleapis.com/discofuse_dataset_v1/discofuse_v1_wikipedia.tar.gz": {"num_bytes": 1717418045, "checksum": "f351abc85a581142fb9e70a2cbedcc0054bc3568f11ce8255389ef5c007eebde"}}, "download_size": 1717418045, "dataset_size": 6526301074, "size_in_bytes": 8243719119}}
discofuse.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(discofuse): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import csv
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ # TODO(discofuse): BibTeX citation
12
+
13
+ _URL_ = "https://storage.googleapis.com/discofuse_dataset_v1/"
14
+ _CITATION = """\
15
+ @InProceedings{GevaEtAl2019,
16
+ title = {DiscoFuse: A Large-Scale Dataset for Discourse-Based Sentence Fusion},
17
+ author = {Geva, Mor and Malmi, Eric and Szpektor, Idan and Berant, Jonathan},
18
+ booktitle = {Proceedings of the 2019 Annual Conference of the North American Chapter of the Association for Computational Linguistics},
19
+ note = {arXiv preprint arXiv:1902.10526},
20
+ year = {2019}
21
+ }
22
+
23
+ """
24
+
25
+ # TODO(discofuse):
26
+ _DESCRIPTION = """\
27
+ DISCOFUSE is a large scale dataset for discourse-based sentence fusion.
28
+ """
29
+
30
+
31
+ class DiscofuseConfig(datasets.BuilderConfig):
32
+
33
+ """ BuilderConfig for Discofuse"""
34
+
35
+ def __init__(self, data_url, balanced=False, **kwargs):
36
+ """
37
+
38
+ Args:
39
+ balanced: to specify if we want to load the balanced file or the full file
40
+ **kwargs: keyword arguments forwarded to super.
41
+ """
42
+ super(DiscofuseConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
43
+ self.balanced = balanced
44
+ self.data_url = data_url
45
+
46
+
47
+ class Discofuse(datasets.GeneratorBasedBuilder):
48
+ """TODO(discofuse): Short description of my dataset."""
49
+
50
+ # TODO(discofuse): Set up version.
51
+ VERSION = datasets.Version("1.0.0")
52
+ BUILDER_CONFIGS = [
53
+ DiscofuseConfig(
54
+ name="discofuse-sport", description="sentence fusion", data_url=_URL_ + "discofuse_v1_sports.tar.gz"
55
+ ),
56
+ DiscofuseConfig(
57
+ name="discofuse-wikipedia", description="sentence fusion", data_url=_URL_ + "discofuse_v1_wikipedia.tar.gz"
58
+ ),
59
+ ]
60
+
61
+ def _info(self):
62
+ # TODO(discofuse): Specifies the datasets.DatasetInfo object
63
+ return datasets.DatasetInfo(
64
+ # This is the description that will appear on the datasets page.
65
+ description=_DESCRIPTION,
66
+ # datasets.features.FeatureConnectors
67
+ features=datasets.Features(
68
+ {
69
+ "connective_string": datasets.Value("string"),
70
+ "discourse_type": datasets.Value("string"),
71
+ "coherent_second_sentence": datasets.Value("string"),
72
+ "has_coref_type_pronoun": datasets.Value("float32"),
73
+ "incoherent_first_sentence": datasets.Value("string"),
74
+ "incoherent_second_sentence": datasets.Value("string"),
75
+ "has_coref_type_nominal": datasets.Value("float32"),
76
+ "coherent_first_sentence": datasets.Value("string"),
77
+ # These are the features of your dataset like images, labels ...
78
+ }
79
+ ),
80
+ # If there's a common (input, target) tuple from the features,
81
+ # specify them here. They'll be used if as_supervised=True in
82
+ # builder.as_dataset.
83
+ supervised_keys=None,
84
+ # Homepage of the dataset for documentation
85
+ homepage="https://github.com/google-research-datasets/discofuse",
86
+ citation=_CITATION,
87
+ )
88
+
89
+ def _split_generators(self, dl_manager):
90
+ """Returns SplitGenerators."""
91
+ # TODO(discofuse): Downloads the data and defines the splits
92
+ # dl_manager is a datasets.download.DownloadManager that can be used to
93
+ # download and extract URLs
94
+ if self.config.name == "discofuse-sport":
95
+ dl_dir = dl_manager.download_and_extract(self.config.data_url)
96
+ data_dir = os.path.join(dl_dir, "discofuse_v1/sports")
97
+ if self.config.balanced:
98
+ return [
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.TRAIN,
101
+ # These kwargs will be passed to _generate_examples
102
+ gen_kwargs={"filepath": os.path.join(data_dir, "train_balanced.tsv")},
103
+ ),
104
+ datasets.SplitGenerator(
105
+ name=datasets.Split.TEST,
106
+ # These kwargs will be passed to _generate_examples
107
+ gen_kwargs={"filepath": os.path.join(data_dir, "test_balanced.tsv")},
108
+ ),
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.VALIDATION,
111
+ # These kwargs will be passed to _generate_examples
112
+ gen_kwargs={"filepath": os.path.join(data_dir, "dev_balanced.tsv")},
113
+ ),
114
+ ]
115
+ else:
116
+ return [
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.TRAIN,
119
+ # These kwargs will be passed to _generate_examples
120
+ gen_kwargs={"filepath": os.path.join(data_dir, "train.tsv")},
121
+ ),
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.TEST,
124
+ # These kwargs will be passed to _generate_examples
125
+ gen_kwargs={"filepath": os.path.join(data_dir, "test.tsv")},
126
+ ),
127
+ datasets.SplitGenerator(
128
+ name=datasets.Split.VALIDATION,
129
+ # These kwargs will be passed to _generate_examples
130
+ gen_kwargs={"filepath": os.path.join(data_dir, "dev.tsv")},
131
+ ),
132
+ ]
133
+ else:
134
+ if self.config.name == "discofuse-wikipedia":
135
+ dl_dir = dl_manager.download_and_extract(self.config.data_url)
136
+ data_dir = os.path.join(dl_dir, "discofuse_v1/wikipedia")
137
+ if self.config.balanced:
138
+ return [
139
+ datasets.SplitGenerator(
140
+ name=datasets.Split.TRAIN,
141
+ # These kwargs will be passed to _generate_examples
142
+ gen_kwargs={"filepath": os.path.join(data_dir, "train_balanced.tsv")},
143
+ ),
144
+ datasets.SplitGenerator(
145
+ name=datasets.Split.TEST,
146
+ # These kwargs will be passed to _generate_examples
147
+ gen_kwargs={"filepath": os.path.join(data_dir, "test_balanced.tsv")},
148
+ ),
149
+ datasets.SplitGenerator(
150
+ name=datasets.Split.VALIDATION,
151
+ # These kwargs will be passed to _generate_examples
152
+ gen_kwargs={"filepath": os.path.join(data_dir, "dev_balanced.tsv")},
153
+ ),
154
+ ]
155
+ else:
156
+ return [
157
+ datasets.SplitGenerator(
158
+ name=datasets.Split.TRAIN,
159
+ # These kwargs will be passed to _generate_examples
160
+ gen_kwargs={"filepath": os.path.join(data_dir, "train.tsv")},
161
+ ),
162
+ datasets.SplitGenerator(
163
+ name=datasets.Split.TEST,
164
+ # These kwargs will be passed to _generate_examples
165
+ gen_kwargs={"filepath": os.path.join(data_dir, "test.tsv")},
166
+ ),
167
+ datasets.SplitGenerator(
168
+ name=datasets.Split.VALIDATION,
169
+ # These kwargs will be passed to _generate_examples
170
+ gen_kwargs={"filepath": os.path.join(data_dir, "dev.tsv")},
171
+ ),
172
+ ]
173
+
174
+ def _generate_examples(self, filepath):
175
+ """Yields examples."""
176
+ # TODO(discofuse): Yields (key, example) tuples from the dataset
177
+ with open(filepath, encoding="utf-8") as f:
178
+ data = csv.DictReader(f, delimiter="\t")
179
+ for id_, row in enumerate(data):
180
+ co_first_sent = row["coherent_first_sentence"]
181
+ co_second_sent = row["coherent_second_sentence"]
182
+ connect_str = row["connective_string"]
183
+ discourse_type = row["discourse_type"]
184
+ has_coref_pronoun = row["has_coref_type_pronoun"]
185
+ has_coref_nominal = row["has_coref_type_nominal"]
186
+ inco_first_sent = row["incoherent_first_sentence"]
187
+ inco_second_sent = row["incoherent_second_sentence"]
188
+ yield id_, {
189
+ "connective_string": connect_str,
190
+ "discourse_type": discourse_type,
191
+ "coherent_second_sentence": co_second_sent,
192
+ "has_coref_type_pronoun": has_coref_pronoun,
193
+ "incoherent_first_sentence": inco_first_sent,
194
+ "incoherent_second_sentence": inco_second_sent,
195
+ "has_coref_type_nominal": has_coref_nominal,
196
+ "coherent_first_sentence": co_first_sent,
197
+ }
dummy/discofuse-sport/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c74ddafa57ebf28c65224a0a21b8df7e22f32caffe1732f97c7775434000f41
3
+ size 3454
dummy/discofuse-wikipedia/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02026e9cc91b7ada0288fe856319fe54369a92abd0a9b136fbf3195157fcbaea
3
+ size 3496