Datasets:

Modalities:
Text
Languages:
English
Size:
< 1K
Libraries:
Datasets
License:
gabrielaltay albertvillanova HF staff commited on
Commit
d1a153f
1 Parent(s): 04980fa

Support streaming (#2)

Browse files

- Add data files (f3252580b72becc567f3cf388198fb7dd4d494da)
- Update loading script (de29136e9b2b21e1317c2c8feb78730dce2796a7)


Co-authored-by: Albert Villanova <[email protected]>

Files changed (4) hide show
  1. bionlp_st_2013_gro.py +21 -23
  2. data/devel.zip +3 -0
  3. data/test.zip +3 -0
  4. data/train.zip +3 -0
bionlp_st_2013_gro.py CHANGED
@@ -15,7 +15,7 @@
15
 
16
 
17
  from pathlib import Path
18
- from typing import List
19
 
20
  import datasets
21
 
@@ -62,8 +62,9 @@ _HOMEPAGE = "https://github.com/openbiocorpora/bionlp-st-2013-gro"
62
  _LICENSE = 'GENIA Project License for Annotated Corpora'
63
 
64
  _URLs = {
65
- "source": "https://github.com/openbiocorpora/bionlp-st-2013-gro/archive/refs/heads/master.zip",
66
- "bigbio_kb": "https://github.com/openbiocorpora/bionlp-st-2013-gro/archive/refs/heads/master.zip",
 
67
  }
68
 
69
  _SUPPORTED_TASKS = [
@@ -197,47 +198,44 @@ class bionlp_st_2013_gro(datasets.GeneratorBasedBuilder):
197
  def _split_generators(
198
  self, dl_manager: datasets.DownloadManager
199
  ) -> List[datasets.SplitGenerator]:
200
-
201
- my_urls = _URLs[self.config.schema]
202
- data_dir = Path(dl_manager.download_and_extract(my_urls))
203
- data_files = {
204
- "train": data_dir
205
- / f"bionlp-st-2013-gro-master"
206
- / "original-data"
207
- / "train",
208
- "dev": data_dir / f"bionlp-st-2013-gro-master" / "original-data" / "devel",
209
- "test": data_dir / f"bionlp-st-2013-gro-master" / "original-data" / "test",
210
- }
211
-
212
  return [
213
  datasets.SplitGenerator(
214
  name=datasets.Split.TRAIN,
215
- gen_kwargs={"data_files": data_files["train"]},
216
  ),
217
  datasets.SplitGenerator(
218
  name=datasets.Split.VALIDATION,
219
- gen_kwargs={"data_files": data_files["dev"]},
220
  ),
221
  datasets.SplitGenerator(
222
  name=datasets.Split.TEST,
223
- gen_kwargs={"data_files": data_files["test"]},
224
  ),
225
  ]
226
 
227
- def _generate_examples(self, data_files: Path):
228
  if self.config.schema == "source":
229
- txt_files = list(data_files.glob("*txt"))
230
- for guid, txt_file in enumerate(txt_files):
 
 
 
231
  example = parse_brat_file(txt_file)
232
  example["id"] = str(guid)
233
  yield guid, example
 
234
  elif self.config.schema == "bigbio_kb":
235
- txt_files = list(data_files.glob("*txt"))
236
- for guid, txt_file in enumerate(txt_files):
 
 
 
237
  example = brat_parse_to_bigbio_kb(
238
  parse_brat_file(txt_file)
239
  )
240
  example["id"] = str(guid)
241
  yield guid, example
 
242
  else:
243
  raise ValueError(f"Invalid config: {self.config.name}")
 
15
 
16
 
17
  from pathlib import Path
18
+ from typing import Iterable, List
19
 
20
  import datasets
21
 
 
62
  _LICENSE = 'GENIA Project License for Annotated Corpora'
63
 
64
  _URLs = {
65
+ "train": "data/train.zip",
66
+ "validation": "data/devel.zip",
67
+ "test": "data/test.zip",
68
  }
69
 
70
  _SUPPORTED_TASKS = [
 
198
  def _split_generators(
199
  self, dl_manager: datasets.DownloadManager
200
  ) -> List[datasets.SplitGenerator]:
201
+ data_files = dl_manager.download_and_extract(_URLs)
 
 
 
 
 
 
 
 
 
 
 
202
  return [
203
  datasets.SplitGenerator(
204
  name=datasets.Split.TRAIN,
205
+ gen_kwargs={"data_files": dl_manager.iter_files(data_files["train"])},
206
  ),
207
  datasets.SplitGenerator(
208
  name=datasets.Split.VALIDATION,
209
+ gen_kwargs={"data_files": dl_manager.iter_files(data_files["validation"])},
210
  ),
211
  datasets.SplitGenerator(
212
  name=datasets.Split.TEST,
213
+ gen_kwargs={"data_files": dl_manager.iter_files(data_files["test"])},
214
  ),
215
  ]
216
 
217
+ def _generate_examples(self, data_files: Iterable[str]):
218
  if self.config.schema == "source":
219
+ guid = 0
220
+ for data_file in data_files:
221
+ txt_file = Path(data_file)
222
+ if txt_file.suffix != ".txt":
223
+ continue
224
  example = parse_brat_file(txt_file)
225
  example["id"] = str(guid)
226
  yield guid, example
227
+ guid += 1
228
  elif self.config.schema == "bigbio_kb":
229
+ guid = 0
230
+ for data_file in data_files:
231
+ txt_file = Path(data_file)
232
+ if txt_file.suffix != ".txt":
233
+ continue
234
  example = brat_parse_to_bigbio_kb(
235
  parse_brat_file(txt_file)
236
  )
237
  example["id"] = str(guid)
238
  yield guid, example
239
+ guid += 1
240
  else:
241
  raise ValueError(f"Invalid config: {self.config.name}")
data/devel.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:633e8f535da8debcc17a4b6fb304de44238562bf2f589779cbf1799ffb6e9297
3
+ size 116493
data/test.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6db691187c9be334faabab483b333f39576a17c12387c9295c929daba4da6a92
3
+ size 164039
data/train.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee6ef5e0b48a7fe067c6c04dc27bafb1e7c14843bce4aecb93f884be4508fb56
3
+ size 355702