Datasets:
GEM
/

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
Sebastian Gehrmann commited on
Commit
e5d1748
·
1 Parent(s): 8c83980
Files changed (2) hide show
  1. dataset_infos.json +188 -0
  2. schema_guided_dialog.py +182 -0
dataset_infos.json ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "schema_guided_dialog": {
3
+ "description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n",
4
+ "citation": "@article{gem_benchmark,\n author = {Sebastian Gehrmann and\n Tosin P. Adewumi and\n Karmanya Aggarwal and\n Pawan Sasanka Ammanamanchi and\n Aremu Anuoluwapo and\n Antoine Bosselut and\n Khyathi Raghavi Chandu and\n Miruna{-}Adriana Clinciu and\n Dipanjan Das and\n Kaustubh D. Dhole and\n Wanyu Du and\n Esin Durmus and\n Ondrej Dusek and\n Chris Emezue and\n Varun Gangal and\n Cristina Garbacea and\n Tatsunori Hashimoto and\n Yufang Hou and\n Yacine Jernite and\n Harsh Jhamtani and\n Yangfeng Ji and\n Shailza Jolly and\n Dhruv Kumar and\n Faisal Ladhak and\n Aman Madaan and\n Mounica Maddela and\n Khyati Mahajan and\n Saad Mahamood and\n Bodhisattwa Prasad Majumder and\n Pedro Henrique Martins and\n Angelina McMillan{-}Major and\n Simon Mille and\n Emiel van Miltenburg and\n Moin Nadeem and\n Shashi Narayan and\n Vitaly Nikolaev and\n Rubungo Andre Niyongabo and\n Salomey Osei and\n Ankur P. Parikh and\n Laura Perez{-}Beltrachini and\n Niranjan Ramesh Rao and\n Vikas Raunak and\n Juan Diego Rodriguez and\n Sashank Santhanam and\n Joao Sedoc and\n Thibault Sellam and\n Samira Shaikh and\n Anastasia Shimorina and\n Marco Antonio Sobrevilla Cabezudo and\n Hendrik Strobelt and\n Nishant Subramani and\n Wei Xu and\n Diyi Yang and\n Akhila Yerukola and\n Jiawei Zhou},\n title = {The {GEM} Benchmark: Natural Language Generation, its Evaluation and\n Metrics},\n journal = {CoRR},\n volume = {abs/2102.01672},\n year = {2021},\n url = {https://arxiv.org/abs/2102.01672},\n archivePrefix = {arXiv},\n eprint = {2102.01672}\n}\n",
5
+ "homepage": "https://gem-benchmark.github.io/",
6
+ "license": "CC-BY-SA-4.0",
7
+ "features": {
8
+ "gem_id": {
9
+ "dtype": "string",
10
+ "id": null,
11
+ "_type": "Value"
12
+ },
13
+ "gem_parent_id": {
14
+ "dtype": "string",
15
+ "id": null,
16
+ "_type": "Value"
17
+ },
18
+ "dialog_acts": [
19
+ {
20
+ "act": {
21
+ "num_classes": 18,
22
+ "names": [
23
+ "AFFIRM",
24
+ "AFFIRM_INTENT",
25
+ "CONFIRM",
26
+ "GOODBYE",
27
+ "INFORM",
28
+ "INFORM_COUNT",
29
+ "INFORM_INTENT",
30
+ "NEGATE",
31
+ "NEGATE_INTENT",
32
+ "NOTIFY_FAILURE",
33
+ "NOTIFY_SUCCESS",
34
+ "OFFER",
35
+ "OFFER_INTENT",
36
+ "REQUEST",
37
+ "REQUEST_ALTS",
38
+ "REQ_MORE",
39
+ "SELECT",
40
+ "THANK_YOU"
41
+ ],
42
+ "names_file": null,
43
+ "id": null,
44
+ "_type": "ClassLabel"
45
+ },
46
+ "slot": {
47
+ "dtype": "string",
48
+ "id": null,
49
+ "_type": "Value"
50
+ },
51
+ "values": [
52
+ {
53
+ "dtype": "string",
54
+ "id": null,
55
+ "_type": "Value"
56
+ }
57
+ ]
58
+ }
59
+ ],
60
+ "context": [
61
+ {
62
+ "dtype": "string",
63
+ "id": null,
64
+ "_type": "Value"
65
+ }
66
+ ],
67
+ "dialog_id": {
68
+ "dtype": "string",
69
+ "id": null,
70
+ "_type": "Value"
71
+ },
72
+ "service": {
73
+ "dtype": "string",
74
+ "id": null,
75
+ "_type": "Value"
76
+ },
77
+ "turn_id": {
78
+ "dtype": "int32",
79
+ "id": null,
80
+ "_type": "Value"
81
+ },
82
+ "prompt": {
83
+ "dtype": "string",
84
+ "id": null,
85
+ "_type": "Value"
86
+ },
87
+ "target": {
88
+ "dtype": "string",
89
+ "id": null,
90
+ "_type": "Value"
91
+ },
92
+ "references": [
93
+ {
94
+ "dtype": "string",
95
+ "id": null,
96
+ "_type": "Value"
97
+ }
98
+ ]
99
+ },
100
+ "post_processed": null,
101
+ "supervised_keys": null,
102
+ "builder_name": "gem",
103
+ "config_name": "schema_guided_dialog",
104
+ "version": {
105
+ "version_str": "1.1.0",
106
+ "description": null,
107
+ "major": 1,
108
+ "minor": 1,
109
+ "patch": 0
110
+ },
111
+ "splits": {
112
+ "train": {
113
+ "name": "train",
114
+ "num_bytes": 146648117,
115
+ "num_examples": 164982,
116
+ "dataset_name": "gem"
117
+ },
118
+ "validation": {
119
+ "name": "validation",
120
+ "num_bytes": 9376504,
121
+ "num_examples": 10000,
122
+ "dataset_name": "gem"
123
+ },
124
+ "test": {
125
+ "name": "test",
126
+ "num_bytes": 10160596,
127
+ "num_examples": 10000,
128
+ "dataset_name": "gem"
129
+ },
130
+ "challenge_train_sample": {
131
+ "name": "challenge_train_sample",
132
+ "num_bytes": 441326,
133
+ "num_examples": 500,
134
+ "dataset_name": "gem"
135
+ },
136
+ "challenge_validation_sample": {
137
+ "name": "challenge_validation_sample",
138
+ "num_bytes": 491492,
139
+ "num_examples": 500,
140
+ "dataset_name": "gem"
141
+ },
142
+ "challenge_test_backtranslation": {
143
+ "name": "challenge_test_backtranslation",
144
+ "num_bytes": 512834,
145
+ "num_examples": 500,
146
+ "dataset_name": "gem"
147
+ },
148
+ "challenge_test_bfp02": {
149
+ "name": "challenge_test_bfp02",
150
+ "num_bytes": 529404,
151
+ "num_examples": 500,
152
+ "dataset_name": "gem"
153
+ },
154
+ "challenge_test_bfp05": {
155
+ "name": "challenge_test_bfp05",
156
+ "num_bytes": 515151,
157
+ "num_examples": 500,
158
+ "dataset_name": "gem"
159
+ },
160
+ "challenge_test_nopunc": {
161
+ "name": "challenge_test_nopunc",
162
+ "num_bytes": 509332,
163
+ "num_examples": 500,
164
+ "dataset_name": "gem"
165
+ },
166
+ "challenge_test_scramble": {
167
+ "name": "challenge_test_scramble",
168
+ "num_bytes": 514644,
169
+ "num_examples": 500,
170
+ "dataset_name": "gem"
171
+ }
172
+ },
173
+ "download_checksums": {
174
+ "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_sgd_context.zip": {
175
+ "num_bytes": 16544230,
176
+ "checksum": "abb2af00031152dbead4a75275dc195a576005529cc19b7f942669f5d257ef30"
177
+ },
178
+ "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/schema_guided_dialog.zip": {
179
+ "num_bytes": 1282238,
180
+ "checksum": "79231851df998a9dc2a1298f8061cf7e9e9ad0b1ea34f7e5124eb31960a4b842"
181
+ }
182
+ },
183
+ "download_size": 17826468,
184
+ "post_processing_size": null,
185
+ "dataset_size": 169699400,
186
+ "size_in_bytes": 187525868
187
+ }
188
+ }
schema_guided_dialog.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import datasets
4
+
5
+ _CITATION = """\
6
+ @inproceedings{rastogi2020towards,
7
+ title={Towards scalable multi-domain conversational agents: The schema-guided dialogue dataset},
8
+ author={Rastogi, Abhinav and Zang, Xiaoxue and Sunkara, Srinivas and Gupta, Raghav and Khaitan, Pranav},
9
+ booktitle={Proceedings of the AAAI Conference on Artificial Intelligence},
10
+ volume={34},
11
+ number={05},
12
+ pages={8689--8696},
13
+ year={2020}
14
+ }
15
+ """
16
+
17
+ _DESCRIPTION = """\
18
+ The Schema-Guided Dialogue (SGD) dataset contains 18K multi-domain task-oriented
19
+ dialogues between a human and a virtual assistant, which covers 17 domains
20
+ ranging from banks and events to media, calendar, travel, and weather. The
21
+ language presents in the datset is only English. The SGD dataset provides a
22
+ challenging testbed for a number of tasks in task-oriented dialogue, including
23
+ language understanding, slot filling, dialogue state tracking and response
24
+ generation. For the creation of the SGD dataset, they developed a multi-domain
25
+ dialogue simulator that generates dialogue outlines over an arbitrary combination
26
+ of APIs, dialogue states and system actions. Then, they used a crowd-sourcing
27
+ procedure to paraphrase these outlines to natural language utterances. This novel
28
+ crowd-sourcing procedure preserves all annotations obtained from the simulator and
29
+ does not require any extra annotations after dialogue collection.
30
+
31
+ """
32
+
33
+ _URLs = {
34
+ "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_sgd_context.zip",
35
+ "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/schema_guided_dialog.zip",
36
+ }
37
+
38
+ _SGD_ACTS = [
39
+ "AFFIRM",
40
+ "AFFIRM_INTENT",
41
+ "CONFIRM",
42
+ "GOODBYE",
43
+ "INFORM",
44
+ "INFORM_COUNT",
45
+ "INFORM_INTENT",
46
+ "NEGATE",
47
+ "NEGATE_INTENT",
48
+ "NOTIFY_FAILURE",
49
+ "NOTIFY_SUCCESS",
50
+ "OFFER",
51
+ "OFFER_INTENT",
52
+ "REQUEST",
53
+ "REQUEST_ALTS",
54
+ "REQ_MORE",
55
+ "SELECT",
56
+ "THANK_YOU",
57
+ ]
58
+
59
+
60
+ class SchemaGuidedDialog(datasets.GeneratorBasedBuilder):
61
+ VERSION = datasets.Version("1.0.0")
62
+ DEFAULT_CONFIG_NAME = "schema_guided_dialog"
63
+
64
+ def _info(self):
65
+ features = datasets.Features(
66
+ {
67
+ "gem_id": datasets.Value("string"),
68
+ "gem_parent_id": datasets.Value("string"),
69
+ "dialog_acts": [
70
+ {
71
+ "act": datasets.ClassLabel(names=_SGD_ACTS),
72
+ "slot": datasets.Value("string"),
73
+ "values": [datasets.Value("string")],
74
+ }
75
+ ],
76
+ "context": [datasets.Value("string")],
77
+ "dialog_id": datasets.Value("string"),
78
+ "service": datasets.Value("string"),
79
+ "turn_id": datasets.Value("int32"),
80
+ "prompt": datasets.Value("string"),
81
+ "target": datasets.Value("string"),
82
+ "references": [datasets.Value("string")],
83
+ }
84
+ )
85
+ return datasets.DatasetInfo(
86
+ description=_DESCRIPTION,
87
+ features=features,
88
+ supervised_keys=None,
89
+ homepage="",
90
+ citation=_CITATION,
91
+ )
92
+
93
+ def _split_generators(self, dl_manager):
94
+ """Returns SplitGenerators."""
95
+ dl_dir = dl_manager.download_and_extract(_URLs)
96
+ challenge_sets = [
97
+ (
98
+ "challenge_train_sample",
99
+ "train_schema_guided_dialog_RandomSample500_reformatted.json",
100
+ ),
101
+ (
102
+ "challenge_validation_sample",
103
+ "validation_schema_guided_dialog_RandomSample500_reformatted.json",
104
+ ),
105
+ (
106
+ "challenge_test_backtranslation",
107
+ "test_schema_guided_dialog_BackTranslation500_reformatted.json",
108
+ ),
109
+ (
110
+ "challenge_test_bfp02",
111
+ "test_schema_guided_dialog_ButterFingersPerturbation_p=0.02_500_reformatted.json",
112
+ ),
113
+ (
114
+ "challenge_test_bfp05",
115
+ "test_schema_guided_dialog_ButterFingersPerturbation_p=0.05_500_reformatted.json",
116
+ ),
117
+ (
118
+ "challenge_test_nopunc",
119
+ "test_schema_guided_dialog_WithoutPunctuation500_reformatted.json",
120
+ ),
121
+ (
122
+ "challenge_test_scramble",
123
+ "test_schema_guided_dialog_ScrambleInputStructure500_reformatted.json",
124
+ ),
125
+ ]
126
+ return [
127
+ datasets.SplitGenerator(
128
+ name=spl,
129
+ gen_kwargs={
130
+ "filepath": os.path.join(dl_dir["data"], "gem_sgd.json"),
131
+ "split": spl,
132
+ },
133
+ )
134
+ for spl in ["train", "validation", "test"]
135
+ ] + [
136
+ datasets.SplitGenerator(
137
+ name=challenge_split,
138
+ gen_kwargs={
139
+ "filepath": os.path.join(
140
+ dl_dir["challenge_set"], "schema_guided_dialog", filename
141
+ ),
142
+ "split": challenge_split,
143
+ },
144
+ )
145
+ for challenge_split, filename in challenge_sets
146
+ ]
147
+
148
+ def _generate_examples(self, filepath, split, filepaths=None, lang=None):
149
+ """Yields examples."""
150
+ if "challenge" in split:
151
+ exples = json.load(open(filepath, encoding="utf-8"))
152
+ if isinstance(exples, dict):
153
+ assert len(exples) == 1, "multiple entries found"
154
+ exples = list(exples.values())[0]
155
+ for id_, exple in enumerate(exples):
156
+ if len(exple) == 0:
157
+ continue
158
+ exple["gem_parent_id"] = exple["gem_id"]
159
+ exple["gem_id"] = f"schema_guided_dialog-{split}-{id_}"
160
+ yield id_, exple
161
+ else:
162
+ examples = json.load(open(filepath, encoding="utf-8"))[split]
163
+ for id_, example in enumerate(examples):
164
+ yield id_, {
165
+ "gem_id": f"schema_guided_dialog-{split}-{id_}",
166
+ "gem_parent_id": f"schema_guided_dialog-{split}-{id_}",
167
+ "dialog_acts": [
168
+ {
169
+ "act": act_id,
170
+ "slot": slot,
171
+ "values": values,
172
+ }
173
+ for act_id, slot, values in example["da"]
174
+ ],
175
+ "context": example["context"],
176
+ "dialog_id": example["dialog_id"],
177
+ "service": example["service"],
178
+ "turn_id": example["turn_ix"],
179
+ "prompt": example["prompt"],
180
+ "target": example["target"],
181
+ "references": [] if split == "train" else [example["target"]],
182
+ }