albertvillanova HF staff commited on
Commit
7f9e057
1 Parent(s): 9252910

Optimize code to use iter_files instead of globs

Browse files
Files changed (1) hide show
  1. cantemist.py +87 -80
cantemist.py CHANGED
@@ -22,12 +22,14 @@ mapped by clinical experts to a controlled terminology. Every tumor morphology
22
  mention is linked to an eCIE-O code (the Spanish equivalent of ICD-O).
23
  """
24
 
25
- import os
 
 
 
26
  from pathlib import Path
27
  from typing import Dict, List, Tuple
28
 
29
  import datasets
30
- import pandas as pd
31
 
32
  from .bigbiohub import kb_features
33
  from .bigbiohub import text_features
@@ -236,113 +238,116 @@ class CantemistDataset(datasets.GeneratorBasedBuilder):
236
  name=datasets.Split.TRAIN,
237
  gen_kwargs={
238
  "filepaths": {
239
- "task1": Path(
240
- os.path.join(data_dir, "train-set/cantemist-ner")
241
  ),
242
- "task2": Path(
243
- os.path.join(data_dir, "train-set/cantemist-norm")
244
- ),
245
- "task3": Path(
246
- os.path.join(data_dir, "train-set/cantemist-coding")
247
  ),
 
 
 
 
 
 
 
 
248
  },
249
- "split": "train",
250
  },
251
  ),
252
  datasets.SplitGenerator(
253
  name=datasets.Split.TEST,
254
  gen_kwargs={
255
  "filepaths": {
256
- "task1": Path(os.path.join(data_dir, "test-set/cantemist-ner")),
257
- "task2": Path(
258
- os.path.join(data_dir, "test-set/cantemist-norm")
259
  ),
260
- "task3": Path(
261
- os.path.join(data_dir, "test-set/cantemist-coding")
262
  ),
 
 
 
 
 
 
 
 
263
  },
264
- "split": "test",
265
  },
266
  ),
267
  datasets.SplitGenerator(
268
  name=datasets.Split.VALIDATION,
269
  gen_kwargs={
270
  "filepaths": {
271
- "task1_set1": Path(
272
- os.path.join(data_dir, "dev-set1/cantemist-ner")
273
- ),
274
- "task1_set2": Path(
275
- os.path.join(data_dir, "dev-set2/cantemist-ner")
276
- ),
277
- "task2_set1": Path(
278
- os.path.join(data_dir, "dev-set1/cantemist-norm")
279
- ),
280
- "task2_set2": Path(
281
- os.path.join(data_dir, "dev-set2/cantemist-norm")
282
- ),
283
- "task3_set1": Path(
284
- os.path.join(data_dir, "dev-set1/cantemist-coding")
285
  ),
286
- "task3_set2": Path(
287
- os.path.join(data_dir, "dev-set2/cantemist-coding")
 
 
 
 
 
288
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
289
  },
290
- "split": "dev",
291
  },
292
  ),
293
  ]
294
 
295
- def _generate_examples(self, filepaths, split: str) -> Tuple[int, Dict]:
296
  """
297
  This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
298
  Method parameters are unpacked from `gen_kwargs` as given in `_split_generators`.
299
  """
300
 
301
- if split != "dev":
302
- txt_files_task1 = list(filepaths["task1"].glob("*txt"))
303
- txt_files_task2 = list(filepaths["task2"].glob("*txt"))
304
- tsv_file_task3 = Path(
305
- os.path.join(filepaths["task3"], f"{split}-coding.tsv")
306
- )
307
- task3_df = pd.read_csv(tsv_file_task3, sep="\t", header=None)
308
- else:
309
- txt_files_task1, txt_files_task2, dfs = [], [], []
310
- for i in range(1, 3):
311
- txt_files_task1 += list(filepaths[f"task1_set{i}"].glob("*txt"))
312
- txt_files_task2 += list(filepaths[f"task2_set{i}"].glob("*txt"))
313
- tsv_file_task3 = Path(
314
- os.path.join(filepaths[f"task3_set{i}"], f"{split}{i}-coding.tsv")
315
- )
316
- df = pd.read_csv(tsv_file_task3, sep="\t", header=0)
317
- dfs.append(df)
318
- task3_df = pd.concat(dfs)
319
-
320
  if self.config.schema == "source" or self.config.schema == "bigbio_text":
321
- task3_dict = {}
322
- for idx, row in task3_df.iterrows():
323
- file, code = row[0], row[1]
324
- if file not in task3_dict:
325
- task3_dict[file] = [code]
326
- else:
327
- task3_dict[file] += [code]
328
 
329
  if self.config.schema == "source":
330
- for guid, txt_file in enumerate(txt_files_task2):
331
- example = parse_brat_file(txt_file, parse_notes=True)
332
- if example["document_id"] in task3_dict:
333
- example["labels"] = task3_dict[example["document_id"]]
334
- else:
335
- example[
336
- "labels"
337
- ] = (
338
- []
339
- ) # few cases where subtrack 3 has no codes for the current document
340
  example["id"] = str(guid)
341
  yield guid, example
342
 
343
  elif self.config.schema == "bigbio_kb":
344
- for guid, txt_file in enumerate(txt_files_task2):
345
- parsed_brat = parse_brat_file(txt_file, parse_notes=True)
 
 
 
 
346
  example = brat_parse_to_bigbio_kb(parsed_brat)
347
  example["id"] = str(guid)
348
  for i in range(0, len(example["entities"])):
@@ -354,14 +359,16 @@ class CantemistDataset(datasets.GeneratorBasedBuilder):
354
  yield guid, example
355
 
356
  elif self.config.schema == "bigbio_text":
357
- for guid, txt_file in enumerate(txt_files_task1):
358
- parsed_brat = parse_brat_file(txt_file, parse_notes=False)
359
- if parsed_brat["document_id"] in task3_dict:
360
- labels = task3_dict[parsed_brat["document_id"]]
361
- else:
362
- labels = (
363
- []
364
- ) # few cases where subtrack 3 has no codes for the current document
 
 
365
  example = {
366
  "id": str(guid),
367
  "document_id": parsed_brat["document_id"],
 
22
  mention is linked to an eCIE-O code (the Spanish equivalent of ICD-O).
23
  """
24
 
25
+ import csv
26
+ import os.path
27
+ from collections import defaultdict
28
+ from itertools import chain
29
  from pathlib import Path
30
  from typing import Dict, List, Tuple
31
 
32
  import datasets
 
33
 
34
  from .bigbiohub import kb_features
35
  from .bigbiohub import text_features
 
238
  name=datasets.Split.TRAIN,
239
  gen_kwargs={
240
  "filepaths": {
241
+ "task1": dl_manager.iter_files(
242
+ os.path.join(data_dir, "train-set", "cantemist-ner")
243
  ),
244
+ "task2": dl_manager.iter_files(
245
+ os.path.join(data_dir, "train-set", "cantemist-norm")
 
 
 
246
  ),
247
+ "task3": [
248
+ os.path.join(
249
+ data_dir,
250
+ "train-set",
251
+ "cantemist-coding",
252
+ "train-coding.tsv",
253
+ )
254
+ ],
255
  },
 
256
  },
257
  ),
258
  datasets.SplitGenerator(
259
  name=datasets.Split.TEST,
260
  gen_kwargs={
261
  "filepaths": {
262
+ "task1": dl_manager.iter_files(
263
+ os.path.join(data_dir, "test-set", "cantemist-ner")
 
264
  ),
265
+ "task2": dl_manager.iter_files(
266
+ os.path.join(data_dir, "test-set", "cantemist-norm")
267
  ),
268
+ "task3": [
269
+ os.path.join(
270
+ data_dir,
271
+ "test-set",
272
+ "cantemist-coding",
273
+ "test-coding.tsv",
274
+ )
275
+ ],
276
  },
 
277
  },
278
  ),
279
  datasets.SplitGenerator(
280
  name=datasets.Split.VALIDATION,
281
  gen_kwargs={
282
  "filepaths": {
283
+ "task1": chain(
284
+ dl_manager.iter_files(
285
+ os.path.join(data_dir, "dev-set1", "cantemist-ner")
286
+ ),
287
+ dl_manager.iter_files(
288
+ os.path.join(data_dir, "dev-set2", "cantemist-ner")
289
+ ),
 
 
 
 
 
 
 
290
  ),
291
+ "task2": chain(
292
+ dl_manager.iter_files(
293
+ os.path.join(data_dir, "dev-set1", "cantemist-norm")
294
+ ),
295
+ dl_manager.iter_files(
296
+ os.path.join(data_dir, "dev-set2", "cantemist-norm")
297
+ ),
298
  ),
299
+ "task3": [
300
+ os.path.join(
301
+ data_dir,
302
+ "dev-set1",
303
+ "cantemist-coding",
304
+ "dev1-coding.tsv",
305
+ ),
306
+ os.path.join(
307
+ data_dir,
308
+ "dev-set2",
309
+ "cantemist-coding",
310
+ "dev2-coding.tsv",
311
+ ),
312
+ ],
313
  },
 
314
  },
315
  ),
316
  ]
317
 
318
+ def _generate_examples(self, filepaths) -> Tuple[int, Dict]:
319
  """
320
  This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
321
  Method parameters are unpacked from `gen_kwargs` as given in `_split_generators`.
322
  """
323
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
324
  if self.config.schema == "source" or self.config.schema == "bigbio_text":
325
+ task3_dict = defaultdict(list)
326
+ for file_path in filepaths["task3"]:
327
+ with open(file_path, newline="", encoding="utf-8") as f:
328
+ reader = csv.DictReader(f, delimiter="\t")
329
+ for row in reader:
330
+ task3_dict[row["file"]].append(row["code"])
 
331
 
332
  if self.config.schema == "source":
333
+ for guid, file_path in enumerate(filepaths["task2"]):
334
+ if os.path.splitext(file_path)[-1] != ".txt":
335
+ continue
336
+ example = parse_brat_file(
337
+ Path(file_path), annotation_file_suffixes=[".ann"], parse_notes=True
338
+ )
339
+ # consider few cases where subtrack 3 has no codes for the current document
340
+ example["labels"] = task3_dict.get(example["document_id"], [])
 
 
341
  example["id"] = str(guid)
342
  yield guid, example
343
 
344
  elif self.config.schema == "bigbio_kb":
345
+ for guid, file_path in enumerate(filepaths["task2"]):
346
+ if os.path.splitext(file_path)[-1] != ".txt":
347
+ continue
348
+ parsed_brat = parse_brat_file(
349
+ Path(file_path), annotation_file_suffixes=[".ann"], parse_notes=True
350
+ )
351
  example = brat_parse_to_bigbio_kb(parsed_brat)
352
  example["id"] = str(guid)
353
  for i in range(0, len(example["entities"])):
 
359
  yield guid, example
360
 
361
  elif self.config.schema == "bigbio_text":
362
+ for guid, file_path in enumerate(filepaths["task1"]):
363
+ if os.path.splitext(file_path)[-1] != ".txt":
364
+ continue
365
+ parsed_brat = parse_brat_file(
366
+ Path(file_path),
367
+ annotation_file_suffixes=[".ann"],
368
+ parse_notes=False,
369
+ )
370
+ # consider few cases where subtrack 3 has no codes for the current document
371
+ labels = task3_dict.get(parsed_brat["document_id"], [])
372
  example = {
373
  "id": str(guid),
374
  "document_id": parsed_brat["document_id"],