Datasets:
mteb
/

Modalities:
Text
Formats:
json
Libraries:
Datasets
Dask
nouamanetazi HF staff commited on
Commit
ffd58f5
1 Parent(s): 97beb53

Upload massive.py

Browse files
Files changed (1) hide show
  1. massive.py +267 -0
massive.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+ """MASSIVE: A 1M-Example Multilingual Natural Language Understanding Dataset with 51 Typologically-Diverse Languages"""
4
+
5
+ import json
6
+ import datasets
7
+
8
+ logger = datasets.logging.get_logger(__name__)
9
+
10
+ _DESCRIPTION = """\
11
+ MASSIVE is a parallel dataset of > 1M utterances across 51 languages with annotations
12
+ for the Natural Language Understanding tasks of intent prediction and slot annotation.
13
+ Utterances span 60 intents and include 55 slot types. MASSIVE was created by localizing
14
+ the SLURP dataset, composed of general Intelligent Voice Assistant single-shot interactions.
15
+ """
16
+ _URL = "https://amazon-massive-nlu-dataset.s3.amazonaws.com/amazon-massive-dataset-1.0.tar.gz"
17
+
18
+ _LANGUAGES = [
19
+ "af-ZA",
20
+ "am-ET",
21
+ "ar-SA",
22
+ "az-AZ",
23
+ "bn-BD",
24
+ "cy-GB",
25
+ "da-DK",
26
+ "de-DE",
27
+ "el-GR",
28
+ "en-US",
29
+ "es-ES",
30
+ "fa-IR",
31
+ "fi-FI",
32
+ "fr-FR",
33
+ "he-IL",
34
+ "hi-IN",
35
+ "hu-HU",
36
+ "hy-AM",
37
+ "id-ID",
38
+ "is-IS",
39
+ "it-IT",
40
+ "ja-JP",
41
+ "jv-ID",
42
+ "ka-GE",
43
+ "km-KH",
44
+ "kn-IN",
45
+ "ko-KR",
46
+ "lv-LV",
47
+ "ml-IN",
48
+ "mn-MN",
49
+ "ms-MY",
50
+ "my-MM",
51
+ "nb-NO",
52
+ "nl-NL",
53
+ "pl-PL",
54
+ "pt-PT",
55
+ "ro-RO",
56
+ "ru-RU",
57
+ "sl-SL",
58
+ "sq-AL",
59
+ "sv-SE",
60
+ "sw-KE",
61
+ "ta-IN",
62
+ "te-IN",
63
+ "th-TH",
64
+ "tl-PH",
65
+ "tr-TR",
66
+ "ur-PK",
67
+ "vi-VN",
68
+ "zh-CN",
69
+ "zh-TW",
70
+ ]
71
+
72
+ _SCENARIOS = [
73
+ "social",
74
+ "transport",
75
+ "calendar",
76
+ "play",
77
+ "news",
78
+ "datetime",
79
+ "recommendation",
80
+ "email",
81
+ "iot",
82
+ "general",
83
+ "audio",
84
+ "lists",
85
+ "qa",
86
+ "cooking",
87
+ "takeaway",
88
+ "music",
89
+ "alarm",
90
+ "weather",
91
+ ]
92
+
93
+ _INTENTS = [
94
+ "datetime_query",
95
+ "iot_hue_lightchange",
96
+ "transport_ticket",
97
+ "takeaway_query",
98
+ "qa_stock",
99
+ "general_greet",
100
+ "recommendation_events",
101
+ "music_dislikeness",
102
+ "iot_wemo_off",
103
+ "cooking_recipe",
104
+ "qa_currency",
105
+ "transport_traffic",
106
+ "general_quirky",
107
+ "weather_query",
108
+ "audio_volume_up",
109
+ "email_addcontact",
110
+ "takeaway_order",
111
+ "email_querycontact",
112
+ "iot_hue_lightup",
113
+ "recommendation_locations",
114
+ "play_audiobook",
115
+ "lists_createoradd",
116
+ "news_query",
117
+ "alarm_query",
118
+ "iot_wemo_on",
119
+ "general_joke",
120
+ "qa_definition",
121
+ "social_query",
122
+ "music_settings",
123
+ "audio_volume_other",
124
+ "calendar_remove",
125
+ "iot_hue_lightdim",
126
+ "calendar_query",
127
+ "email_sendemail",
128
+ "iot_cleaning",
129
+ "audio_volume_down",
130
+ "play_radio",
131
+ "cooking_query",
132
+ "datetime_convert",
133
+ "qa_maths",
134
+ "iot_hue_lightoff",
135
+ "iot_hue_lighton",
136
+ "transport_query",
137
+ "music_likeness",
138
+ "email_query",
139
+ "play_music",
140
+ "audio_volume_mute",
141
+ "social_post",
142
+ "alarm_set",
143
+ "qa_factoid",
144
+ "calendar_set",
145
+ "play_game",
146
+ "alarm_remove",
147
+ "lists_remove",
148
+ "transport_taxi",
149
+ "recommendation_movies",
150
+ "iot_coffee",
151
+ "music_query",
152
+ "play_podcasts",
153
+ "lists_query",
154
+ ]
155
+
156
+
157
+ class MASSIVE(datasets.GeneratorBasedBuilder):
158
+ """MASSIVE: A 1M-Example Multilingual Natural Language Understanding Dataset with 51 Typologically-Diverse Languages"""
159
+
160
+ BUILDER_CONFIGS = [
161
+ datasets.BuilderConfig(
162
+ name=name,
163
+ version=datasets.Version("1.0.0"),
164
+ description=f"The MASSIVE corpora for {name}",
165
+ )
166
+ for name in _LANGUAGES
167
+ ]
168
+
169
+ DEFAULT_CONFIG_NAME = "en-US"
170
+
171
+ def _info(self):
172
+ return datasets.DatasetInfo(
173
+ description=_DESCRIPTION,
174
+ features=datasets.Features(
175
+ {
176
+ "id": datasets.Value("string"),
177
+ "locale": datasets.Value("string"),
178
+ "partition": datasets.Value("string"),
179
+ "scenario": datasets.features.ClassLabel(names=_SCENARIOS),
180
+ "intent": datasets.features.ClassLabel(names=_INTENTS),
181
+ "utt": datasets.Value("string"),
182
+ "annot_utt": datasets.Value("string"),
183
+ "worker_id": datasets.Value("string"),
184
+ "slot_method": datasets.Sequence(
185
+ {
186
+ "slot": datasets.Value("string"),
187
+ "method": datasets.Value("string"),
188
+ }
189
+ ),
190
+ "judgments": datasets.Sequence(
191
+ {
192
+ "worker_id": datasets.Value("string"),
193
+ "intent_score": datasets.Value("int8"),
194
+ "slots_score": datasets.Value("int8"),
195
+ "grammar_score": datasets.Value("int8"),
196
+ "spelling_score": datasets.Value("int8"),
197
+ "language_identification": datasets.Value("string"),
198
+ }
199
+ ),
200
+ },
201
+ ),
202
+ supervised_keys=None,
203
+ homepage="https://github.com/alexa/massive",
204
+ citation="_CITATION",
205
+ license="_LICENSE",
206
+ )
207
+
208
+ def _split_generators(self, dl_manager):
209
+
210
+ # path = dl_manager.download_and_extract(_URL)
211
+ archive_path = dl_manager.download(_URL)
212
+ files = dl_manager.iter_archive(archive_path)
213
+
214
+ return [
215
+ datasets.SplitGenerator(
216
+ name=datasets.Split.TRAIN,
217
+ gen_kwargs={
218
+ "files": files,
219
+ "split": "train",
220
+ "lang": self.config.name,
221
+ },
222
+ ),
223
+ datasets.SplitGenerator(
224
+ name=datasets.Split.VALIDATION,
225
+ gen_kwargs={
226
+ "files": files,
227
+ "split": "dev",
228
+ "lang": self.config.name,
229
+ },
230
+ ),
231
+ datasets.SplitGenerator(
232
+ name=datasets.Split.TEST,
233
+ gen_kwargs={
234
+ "files": files,
235
+ "split": "test",
236
+ "lang": self.config.name,
237
+ },
238
+ ),
239
+ ]
240
+
241
+ def _generate_examples(self, files, split, lang):
242
+
243
+ filepath = "1.0/data/" + lang + ".jsonl"
244
+
245
+ logger.info("⏳ Generating examples from = %s", filepath)
246
+
247
+ for path, f in files:
248
+ if path == filepath:
249
+ # Read the file
250
+ lines = f.readlines()
251
+
252
+ key_ = 0
253
+
254
+ for line in lines:
255
+
256
+ data = json.loads(line)
257
+
258
+ if data["partition"] != split:
259
+ continue
260
+
261
+ yield key_, {
262
+ "id": data["id"],
263
+ "label": data["scenario"],
264
+ "text": data["utt"],
265
+ }
266
+
267
+ key_ += 1