Datasets:

Modalities:
Text
Libraries:
Datasets
cjziems commited on
Commit
99098dc
·
1 Parent(s): 1067923

Upload wikisql_VALUE.py

Browse files
Files changed (1) hide show
  1. wikisql_VALUE.py +70 -80
wikisql_VALUE.py CHANGED
@@ -30,15 +30,20 @@ _AGG_OPS = ["", "MAX", "MIN", "COUNT", "SUM", "AVG"]
30
  _COND_OPS = ["=", ">", "<", "OP"]
31
 
32
 
33
- class WikiSQL(datasets.GeneratorBasedBuilder):
34
- """WikiSQL: A large crowd-sourced dataset for developing natural language interfaces for relational databases"""
35
-
36
- VERSION = datasets.Version("0.1.0")
37
-
38
- def _info(self):
39
- return datasets.DatasetInfo(
40
- description=_DESCRIPTION,
41
- features=datasets.Features(
 
 
 
 
 
42
  {
43
  "phase": datasets.Value("int32"),
44
  "question": datasets.Value("string"),
@@ -66,7 +71,55 @@ class WikiSQL(datasets.GeneratorBasedBuilder):
66
  ),
67
  },
68
  }
69
- ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  # If there's a common (input, target) tuple from the features,
71
  # specify them here. They'll be used if as_supervised=True in
72
  # builder.as_dataset.
@@ -83,89 +136,26 @@ class WikiSQL(datasets.GeneratorBasedBuilder):
83
 
84
  return [
85
  datasets.SplitGenerator(
86
- name=datasets.Split.AppE.TEST,
87
- gen_kwargs={
88
- "main_filepath": os.path.join(dl_dir, "test_AppE.jsonl"),
89
- "tables_filepath": os.path.join(dl_dir, "test.tables.jsonl"),
90
- },
91
- ),
92
- datasets.SplitGenerator(
93
- name=datasets.Split.AppE.VALIDATION,
94
- gen_kwargs={
95
- "main_filepath": os.path.join(dl_dir, "dev_AppE.jsonl"),
96
- "tables_filepath": os.path.join(dl_dir, "dev.tables.jsonl"),
97
- },
98
- ),
99
- datasets.SplitGenerator(
100
- name=datasets.Split.AppE.TRAIN,
101
- gen_kwargs={
102
- "main_filepath": os.path.join(dl_dir, "train_AppE.jsonl"),
103
- "tables_filepath": os.path.join(dl_dir, "train.tables.jsonl"),
104
- },
105
- ),
106
- datasets.SplitGenerator(
107
- name=datasets.Split.ChcE.TEST,
108
- gen_kwargs={
109
- "main_filepath": os.path.join(dl_dir, "test_ChcE.jsonl"),
110
- "tables_filepath": os.path.join(dl_dir, "test.tables.jsonl"),
111
- },
112
- ),
113
- datasets.SplitGenerator(
114
- name=datasets.Split.ChcE.VALIDATION,
115
- gen_kwargs={
116
- "main_filepath": os.path.join(dl_dir, "dev_ChcE.jsonl"),
117
- "tables_filepath": os.path.join(dl_dir, "dev.tables.jsonl"),
118
- },
119
- ),
120
- datasets.SplitGenerator(
121
- name=datasets.Split.ChcE.TRAIN,
122
- gen_kwargs={
123
- "main_filepath": os.path.join(dl_dir, "train_ChcE.jsonl"),
124
- "tables_filepath": os.path.join(dl_dir, "train.tables.jsonl"),
125
- },
126
- ),
127
- datasets.SplitGenerator(
128
- name=datasets.Split.CollSgE.TEST,
129
  gen_kwargs={
130
- "main_filepath": os.path.join(dl_dir, "test_CollSgE.jsonl"),
131
  "tables_filepath": os.path.join(dl_dir, "test.tables.jsonl"),
132
  },
133
  ),
134
  datasets.SplitGenerator(
135
- name=datasets.Split.CollSgE.VALIDATION,
136
  gen_kwargs={
137
- "main_filepath": os.path.join(dl_dir, "dev_CollSgE.jsonl"),
138
  "tables_filepath": os.path.join(dl_dir, "dev.tables.jsonl"),
139
  },
140
  ),
141
  datasets.SplitGenerator(
142
- name=datasets.Split.CollSgE.TRAIN,
143
  gen_kwargs={
144
- "main_filepath": os.path.join(dl_dir, "train_CollSgE.jsonl"),
145
  "tables_filepath": os.path.join(dl_dir, "train.tables.jsonl"),
146
  },
147
- ),
148
- datasets.SplitGenerator(
149
- name=datasets.Split.SAE.TEST,
150
- gen_kwargs={
151
- "main_filepath": os.path.join(dl_dir, "test.jsonl"),
152
- "tables_filepath": os.path.join(dl_dir, "test.tables.jsonl"),
153
- },
154
- ),
155
- datasets.SplitGenerator(
156
- name=datasets.Split.SAE.VALIDATION,
157
- gen_kwargs={
158
- "main_filepath": os.path.join(dl_dir, "dev.jsonl"),
159
- "tables_filepath": os.path.join(dl_dir, "dev.tables.jsonl"),
160
- },
161
- ),
162
- datasets.SplitGenerator(
163
- name=datasets.Split.SAE.TRAIN,
164
- gen_kwargs={
165
- "main_filepath": os.path.join(dl_dir, "train.jsonl"),
166
- "tables_filepath": os.path.join(dl_dir, "train.tables.jsonl"),
167
- },
168
- ),
169
  ]
170
 
171
  def _convert_to_human_readable(self, sel, agg, columns, conditions):
 
30
  _COND_OPS = ["=", ">", "<", "OP"]
31
 
32
 
33
+ class WikiSQLConfig(datasets.BuilderConfig):
34
+ """BuilderConfig for WikiSQL."""
35
+
36
+ def __init__(
37
+ self,
38
+ name,
39
+ description,
40
+ train_path,
41
+ dev_path,
42
+ test_path
43
+ **kwargs,
44
+ ):
45
+ super(WikiSQLConfig, self).__init__(version=datasets.Version("0.1.0", ""), **kwargs)
46
+ self.text_features = datasets.Features(
47
  {
48
  "phase": datasets.Value("int32"),
49
  "question": datasets.Value("string"),
 
71
  ),
72
  },
73
  }
74
+ )
75
+ self.description = description
76
+ self.train_path = train_path
77
+ self.dev_path = dev_path
78
+ self.test_path = test_path
79
+
80
+
81
+ class WikiSQL(datasets.GeneratorBasedBuilder):
82
+ """WikiSQL: A large crowd-sourced dataset for developing natural language interfaces for relational databases"""
83
+
84
+ VERSION = datasets.Version("0.1.0")
85
+
86
+ BUILDER_CONFIGS = [
87
+ WikiSQLConfig(
88
+ name="AppE",
89
+ description=textwrap.dedent(
90
+ """\
91
+ An Appalachian English variant of the large crowd-sourced dataset for developing natural language interfaces for relational databases"""
92
+ ),
93
+ train_path="train_AppE.jsonl",
94
+ dev_path="dev_AppE.jsonl",
95
+ test_path="test_AppE.jsonl"
96
+ ),
97
+ WikiSQLConfig(
98
+ name="ChcE",
99
+ description=textwrap.dedent(
100
+ """\
101
+ A Chicano English variant of the large crowd-sourced dataset for developing natural language interfaces for relational databases"""
102
+ ),
103
+ train_path="train_ChcE.jsonl",
104
+ dev_path="dev_ChcE.jsonl",
105
+ test_path="test_ChcE.jsonl"
106
+ ),
107
+ WikiSQLConfig(
108
+ name="CollSgE",
109
+ description=textwrap.dedent(
110
+ """\
111
+ A Singapore English (Singlish) variant of the large crowd-sourced dataset for developing natural language interfaces for relational databases"""
112
+ ),
113
+ train_path="train_CollSgE.jsonl",
114
+ dev_path="dev_CollSgE.jsonl",
115
+ test_path="test_CollSgE.jsonl"
116
+ ),
117
+ ]
118
+
119
+ def _info(self):
120
+ return datasets.DatasetInfo(
121
+ description=self.config.description,
122
+ features=self.config.features
123
  # If there's a common (input, target) tuple from the features,
124
  # specify them here. They'll be used if as_supervised=True in
125
  # builder.as_dataset.
 
136
 
137
  return [
138
  datasets.SplitGenerator(
139
+ name=datasets.Split.TEST,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
  gen_kwargs={
141
+ "main_filepath": os.path.join(dl_dir, self.config.test_path),
142
  "tables_filepath": os.path.join(dl_dir, "test.tables.jsonl"),
143
  },
144
  ),
145
  datasets.SplitGenerator(
146
+ name=datasets.Split.VALIDATION,
147
  gen_kwargs={
148
+ "main_filepath": os.path.join(dl_dir, self.config.dev_path),
149
  "tables_filepath": os.path.join(dl_dir, "dev.tables.jsonl"),
150
  },
151
  ),
152
  datasets.SplitGenerator(
153
+ name=datasets.Split.TRAIN,
154
  gen_kwargs={
155
+ "main_filepath": os.path.join(dl_dir, self.config.train_path),
156
  "tables_filepath": os.path.join(dl_dir, "train.tables.jsonl"),
157
  },
158
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  ]
160
 
161
  def _convert_to_human_readable(self, sel, agg, columns, conditions):