Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
system HF staff commited on
Commit
8b90f3b
1 Parent(s): 87f3440

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. cbt.py +60 -70
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  annotations_creators:
3
  - machine-generated
4
  language_creators:
 
1
  ---
2
+ pretty_name: Children’s Book Test (CBT)
3
  annotations_creators:
4
  - machine-generated
5
  language_creators:
cbt.py CHANGED
@@ -15,8 +15,6 @@
15
  """Children's Book Test Dataset."""
16
 
17
 
18
- import os
19
-
20
  import datasets
21
 
22
 
@@ -126,91 +124,83 @@ class Cbt(datasets.GeneratorBasedBuilder):
126
  def _split_generators(self, dl_manager):
127
  """Returns SplitGenerators."""
128
  my_urls = ZIP_URL # Cannot download just one single type as it is a compressed file.
129
- data_dir = dl_manager.download_and_extract(my_urls)
130
  return [
131
  datasets.SplitGenerator(
132
  name=datasets.Split.TRAIN,
133
  # These kwargs will be passed to _generate_examples
134
- gen_kwargs={
135
- "filepath": os.path.join(data_dir, paths[self.config.name]["train"]),
136
- },
137
  ),
138
  datasets.SplitGenerator(
139
  name=datasets.Split.TEST,
140
  # These kwargs will be passed to _generate_examples
141
- gen_kwargs={
142
- "filepath": os.path.join(data_dir, paths[self.config.name]["test"]),
143
- },
144
  ),
145
  datasets.SplitGenerator(
146
  name=datasets.Split.VALIDATION,
147
  # These kwargs will be passed to _generate_examples
148
- gen_kwargs={
149
- "filepath": os.path.join(data_dir, paths[self.config.name]["valid"]),
150
- },
151
  ),
152
  ]
153
 
154
- def _generate_examples(
155
- self, filepath # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
156
- ):
157
  """Yields examples as (key, example) tuples."""
158
- # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
159
- # The `key` is here for legacy reason (tfds) and is not important in itself.
160
-
161
- if self.config.name != "raw":
162
- with open(filepath, encoding="utf-8") as f:
163
- sentences = []
164
- example_idx = 0
165
- for idx, line in enumerate(f):
166
- if line.strip() == "":
167
- continue
168
-
169
- elif line.split()[0] == "21":
170
- splitline = line.split("\t") # question, answer options are tab separated
171
- question = splitline[0]
172
- answer = splitline[1]
173
- options = splitline[-1]
174
- question = question[2:].strip() # The first two indices contain `21`.
175
- answer = answer.strip()
176
- options = options.strip().split("|")
177
- yield example_idx, {
178
- "sentences": sentences,
179
- "question": question,
180
- "options": options,
181
- "answer": answer,
182
- }
183
 
184
- sentences = []
185
- example_idx += 1
186
- else:
187
- if len(line.split()[0]) == 1:
188
- sentences.append(line[1:].strip())
189
- else:
190
- sentences.append(line[2:].strip())
191
- # Text might contain double spaces.
192
- else:
193
- with open(filepath, encoding="utf=8") as f:
194
- book_idx = 0
195
- book_sentences = []
196
- for idx, line in enumerate(f):
197
- if line[:12] == "_BOOK_TITLE_":
198
- if idx == 0: # First line:
199
- title = line.split(":")[1].strip()
200
  else:
201
- yield book_idx, {
202
- "title": title,
203
- "content": "".join(book_sentences),
204
- }
205
- title = line.split(":")[1].strip()
206
- book_sentences = []
207
- book_idx += 1
208
- else:
209
- book_sentences.append(line)
210
  else:
211
- yield book_idx, {
212
- "title": title,
213
- "content": "".join(book_sentences),
214
- }
215
  book_sentences = []
216
- book_idx += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  """Children's Book Test Dataset."""
16
 
17
 
 
 
18
  import datasets
19
 
20
 
 
124
  def _split_generators(self, dl_manager):
125
  """Returns SplitGenerators."""
126
  my_urls = ZIP_URL # Cannot download just one single type as it is a compressed file.
127
+ archive = dl_manager.download(my_urls)
128
  return [
129
  datasets.SplitGenerator(
130
  name=datasets.Split.TRAIN,
131
  # These kwargs will be passed to _generate_examples
132
+ gen_kwargs={"filepath": paths[self.config.name]["train"], "files": dl_manager.iter_archive(archive)},
 
 
133
  ),
134
  datasets.SplitGenerator(
135
  name=datasets.Split.TEST,
136
  # These kwargs will be passed to _generate_examples
137
+ gen_kwargs={"filepath": paths[self.config.name]["test"], "files": dl_manager.iter_archive(archive)},
 
 
138
  ),
139
  datasets.SplitGenerator(
140
  name=datasets.Split.VALIDATION,
141
  # These kwargs will be passed to _generate_examples
142
+ gen_kwargs={"filepath": paths[self.config.name]["valid"], "files": dl_manager.iter_archive(archive)},
 
 
143
  ),
144
  ]
145
 
146
+ def _generate_examples(self, filepath, files):
 
 
147
  """Yields examples as (key, example) tuples."""
148
+ for path, f in files:
149
+ if path == filepath:
150
+ if self.config.name != "raw":
151
+ sentences = []
152
+ example_idx = 0
153
+ for idx, line in enumerate(f):
154
+ line = line.decode("utf-8")
155
+ if line.strip() == "":
156
+ continue
157
+
158
+ elif line.split()[0] == "21":
159
+ splitline = line.split("\t") # question, answer options are tab separated
160
+ question = splitline[0]
161
+ answer = splitline[1]
162
+ options = splitline[-1]
163
+ question = question[2:].strip() # The first two indices contain `21`.
164
+ answer = answer.strip()
165
+ options = options.strip().split("|")
166
+ yield example_idx, {
167
+ "sentences": sentences,
168
+ "question": question,
169
+ "options": options,
170
+ "answer": answer,
171
+ }
 
172
 
173
+ sentences = []
174
+ example_idx += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
  else:
176
+ if len(line.split()[0]) == 1:
177
+ sentences.append(line[1:].strip())
178
+ else:
179
+ sentences.append(line[2:].strip())
180
+ # Text might contain double spaces.
 
 
 
 
181
  else:
182
+ book_idx = 0
 
 
 
183
  book_sentences = []
184
+ for idx, line in enumerate(f):
185
+ line = line.decode("utf-8")
186
+ if line[:12] == "_BOOK_TITLE_":
187
+ if idx == 0: # First line:
188
+ title = line.split(":")[1].strip()
189
+ else:
190
+ yield book_idx, {
191
+ "title": title,
192
+ "content": "".join(book_sentences),
193
+ }
194
+ title = line.split(":")[1].strip()
195
+ book_sentences = []
196
+ book_idx += 1
197
+ else:
198
+ book_sentences.append(line)
199
+ else:
200
+ yield book_idx, {
201
+ "title": title,
202
+ "content": "".join(book_sentences),
203
+ }
204
+ book_sentences = []
205
+ book_idx += 1
206
+ break