Datasets:
Tasks:
Text Classification
Sub-tasks:
multi-label-classification
Languages:
English
Size:
100K<n<1M
ArXiv:
License:
Commit
•
9d193a9
1
Parent(s):
19a4dbf
Support streaming swda dataset (#4914)
Browse files* Support streaming swda dataset
* Remove unused import
Commit from https://github.com/huggingface/datasets/commit/f10d38b8b60b09a633823a2fb2529c83933b9c80
swda.py
CHANGED
@@ -26,7 +26,6 @@ the original corpus repo. Modifications are made to accommodate the HuggingFace
|
|
26 |
import csv
|
27 |
import datetime
|
28 |
import glob
|
29 |
-
import io
|
30 |
import os
|
31 |
import re
|
32 |
|
@@ -435,10 +434,8 @@ class Swda(datasets.GeneratorBasedBuilder):
|
|
435 |
dl_dir = dl_manager.download_and_extract(_URL)
|
436 |
# Use swda/ folder.
|
437 |
data_dir = os.path.join(dl_dir, "swda")
|
438 |
-
# Handle partitions files.
|
439 |
-
|
440 |
-
# Download extract and return paths of split files.
|
441 |
-
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
442 |
|
443 |
return [
|
444 |
# Return whole data path and train splits file downloaded path.
|
@@ -476,7 +473,8 @@ class Swda(datasets.GeneratorBasedBuilder):
|
|
476 |
"""
|
477 |
|
478 |
# Read in the split file.
|
479 |
-
|
|
|
480 |
# Read in corpus data using split files.
|
481 |
corpus = CorpusReader(src_dirname=data_dir, split_file=split_file)
|
482 |
# Generate examples.
|
|
|
26 |
import csv
|
27 |
import datetime
|
28 |
import glob
|
|
|
29 |
import os
|
30 |
import re
|
31 |
|
|
|
434 |
dl_dir = dl_manager.download_and_extract(_URL)
|
435 |
# Use swda/ folder.
|
436 |
data_dir = os.path.join(dl_dir, "swda")
|
437 |
+
# Handle partitions files: download extract and return paths of split files.
|
438 |
+
downloaded_files = dl_manager.download(self._URLS)
|
|
|
|
|
439 |
|
440 |
return [
|
441 |
# Return whole data path and train splits file downloaded path.
|
|
|
473 |
"""
|
474 |
|
475 |
# Read in the split file.
|
476 |
+
with open(file=split_file, mode="r", encoding="utf-8") as f:
|
477 |
+
split_file = f.read().splitlines()
|
478 |
# Read in corpus data using split files.
|
479 |
corpus = CorpusReader(src_dirname=data_dir, split_file=split_file)
|
480 |
# Generate examples.
|