shu / shu.py
shjwudp's picture
data!: Remove duplicate books and add new ones
9f80775
raw
history blame
2.03 kB
import json
import gzip
import datasets
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """\
shu is a chinese book dataset.
"""
_HOMEPAGE = "https://github.com/shjwudp/shu"
_DATA_URLS = {
"books": "books.jsonl.gz",
"clean text": "clean_text.jsonl.gz",
}
class ShuConfig(datasets.BuilderConfig):
"""BuilderConfig for shu."""
def __init__(self, *args, subsets, **kwargs) -> None:
"""BuilderConfig for shu.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(ShuConfig, self).__init__(
*args,
name="+".join(subsets),
**kwargs
)
self.subsets = subsets
class Shu(datasets.GeneratorBasedBuilder):
"""A chinese book dataset."""
VERSION = datasets.Version("0.2.0")
BUILDER_CONFIG_CLASS = ShuConfig
BUILDER_CONFIGS = [ShuConfig(subsets=[subset]) for subset in _DATA_URLS]
DEFAULT_CONFIG_NAME = "books"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"name": datasets.Value("string"),
"text": datasets.Value("string"),
}),
homepage=_HOMEPAGE,
)
def _split_generators(self, dl_manager):
data_urls = {subset: _DATA_URLS[subset] for subset in self.config.subsets}
archive = dl_manager.download(data_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": {
subset: archive[subset] for subset in self.config.subsets
},
},
),
]
def _generate_examples(self, files):
key = 0
for subset in files:
filepath = files[subset]
for line in gzip.open(filepath, "rt", encoding="utf-8"):
j = json.loads(line)
yield key, j
key += 1