Igor commited on
Commit
2a5f91a
·
1 Parent(s): dd2d580

feat(dev): add download script

Browse files
Files changed (1) hide show
  1. daiso.py +211 -0
daiso.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+ import textwrap
18
+ import csv
19
+ import pandas as pd
20
+ import json
21
+ import os
22
+
23
+ import datasets
24
+
25
+ _VERSION = datasets.Version("1.1.0")
26
+
27
+ # TODO: Add BibTeX citation
28
+ # Find for instance the citation on arxiv or on the dataset repo/website
29
+ _DAISO_CITATION = """\
30
+ @InProceedings{huggingface:dataset,
31
+ title = {A great new dataset},
32
+ author={Igor Kuzmin
33
+ },
34
+ year={2023}
35
+ }
36
+ """
37
+
38
+ # TODO: Add description of the dataset here
39
+ # You can copy an official description
40
+ _DAISO_DESCRIPTION = """\
41
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
42
+ """
43
+
44
+ # TODO: Add a link to an official homepage for the dataset here
45
+ _HOMEPAGE = ""
46
+
47
+ # TODO: Add the licence for the dataset here if you can find it
48
+ _LICENSE = ""
49
+
50
+ # TODO: Add link to the official dataset URLs here
51
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
+ _URL = "https://raw.githubusercontent.com/igorktech/DAISO-benchmark/dev"
54
+
55
+
56
+ class DAISOConfig(datasets.BuilderConfig):
57
+ """BuilderConfig for DAISO."""
58
+
59
+ def __init__(self, features, data_url, citation, url, **kwargs):
60
+ """BuilderConfig for DAISO.
61
+ Args:
62
+ features: `list[string]`, list of the features that will appear in the
63
+ feature dict. Should not include "label".
64
+ data_url: `string`, url to download the csv file from.
65
+ citation: `string`, citation for the data set.
66
+ url: `string`, url for information about the data set.
67
+ label_classes: `list[string]`, the list of classes for the label if the
68
+ label is present as a string. Non-string labels will be cast to either
69
+ 'False' or 'True'.
70
+ **kwargs: keyword arguments forwarded to super.
71
+ """
72
+ super(DAISOConfig, self).__init__(version=_VERSION, **kwargs)
73
+ # self.label_classes = label_classes
74
+ self.features = features
75
+ self.data_url = data_url
76
+ self.citation = citation
77
+ self.url = url
78
+
79
+
80
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
81
+ class DAISO(datasets.GeneratorBasedBuilder):
82
+ """TODO: Short description of my dataset."""
83
+
84
+ # This is an example of a dataset with multiple configurations.
85
+ # If you don't want/need to define several sub-sets in your dataset,
86
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
87
+
88
+ # If you need to make complex sub-parts in the datasets with configurable options
89
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
90
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
91
+
92
+ # You will be able to load one or the other configurations in the following list with
93
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
94
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
95
+ BUILDER_CONFIGS = [
96
+ DAISOConfig(
97
+ name="",
98
+ description=textwrap.dedent(
99
+ """\
100
+ """
101
+ ),
102
+ features=[
103
+ "Utterance",
104
+ "Dialogue_Act",
105
+ "Emotion",
106
+ "Dialogue_ID",
107
+ "Dialogue_Act_ISO"
108
+ ],
109
+ data_url={
110
+ "train": _URL + "/dyda/train.csv",
111
+ "dev": _URL + "/dyda/dev.csv",
112
+ "test": _URL + "/dyda/test.csv",
113
+ },
114
+ citation=textwrap.dedent(
115
+ """\
116
+ @InProceedings{li2017dailydialog,
117
+ author = {Li, Yanran and Su, Hui and Shen, Xiaoyu and Li, Wenjie and Cao, Ziqiang and Niu, Shuzi},
118
+ title = {DailyDialog: A Manually Labelled Multi-turn Dialogue Dataset},
119
+ booktitle = {Proceedings of The 8th International Joint Conference on Natural Language Processing (IJCNLP 2017)},
120
+ year = {2017}
121
+ }"""
122
+ ),
123
+ url="http://yanran.li/dailydialog.html",
124
+ )
125
+ ]
126
+
127
+ DEFAULT_CONFIG_NAME = "dyda" # It's not mandatory to have a default configuration. Just use one if it make sense.
128
+
129
+ def _info(self):
130
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
131
+ features = {feature: datasets.Value("string") for feature in self.config.features}
132
+ features["Idx"] = datasets.Value("int32")
133
+ # if self.config.name == "": # This is the name of the configuration selected in BUILDER_CONFIGS above
134
+ # features = datasets.Features(
135
+ # {
136
+ # "sentence": datasets.Value("string"),
137
+ # "option1": datasets.Value("string"),
138
+ # "answer": datasets.Value("string")
139
+ # # These are the features of your dataset like images, labels ...
140
+ # }
141
+ # )
142
+ return datasets.DatasetInfo(
143
+ # This is the description that will appear on the datasets page.
144
+ description=_DAISO_DESCRIPTION,
145
+ # This defines the different columns of the dataset and their types
146
+ features=datasets.Features(features),
147
+ # Here we define them above because they are different between the two configurations
148
+ # Homepage of the dataset for documentation
149
+ homepage=self.config.url,
150
+ # License for the dataset if available
151
+ # license=_LICENSE,
152
+ # Citation for the dataset
153
+ citation=self.config.citation + "\n" + _DAISO_CITATION,
154
+ )
155
+
156
+ def _split_generators(self, dl_manager):
157
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
158
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
159
+
160
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
161
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
162
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
163
+ data_files = dl_manager.download(self.config.data_url)
164
+ splits = []
165
+ if "train" in data_files:
166
+ splits.append(datasets.SplitGenerator(
167
+ name=datasets.Split.TRAIN,
168
+ # These kwargs will be passed to _generate_examples
169
+ gen_kwargs={
170
+ "file": data_files["train"],
171
+ "split": "train",
172
+ },
173
+ ))
174
+ if "dev" in data_files:
175
+ datasets.SplitGenerator(
176
+ name=datasets.Split.VALIDATION,
177
+ # These kwargs will be passed to _generate_examples
178
+ gen_kwargs={
179
+ "file": data_files["dev"],
180
+ "split": "dev",
181
+ },
182
+ )
183
+ if "test" in data_files:
184
+ datasets.SplitGenerator(
185
+ name=datasets.Split.TEST,
186
+ # These kwargs will be passed to _generate_examples
187
+ gen_kwargs={
188
+ "file": data_files["test"],
189
+ "split": "test"
190
+ },
191
+ )
192
+ return splits
193
+
194
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
195
+ def _generate_examples(self, file, split):
196
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
197
+ df = pd.read_csv(file, delimiter=",", header=0, quotechar='"', dtype=str)[
198
+ self.config.text_features.keys()
199
+ ]
200
+
201
+ rows = df.to_dict(orient="records")
202
+
203
+ for n, row in enumerate(rows):
204
+ example = row
205
+ example["Idx"] = n
206
+
207
+ if "Dialogue_Act" in example:
208
+ label = example["Dialogue_Act"]
209
+ example["Label"] = label
210
+
211
+ yield example["Idx"], example