RossVermouth
commited on
Commit
·
f3477ff
1
Parent(s):
2042543
Create chensu_test_dataset2.py
Browse files- chensu_test_dataset2.py +91 -0
chensu_test_dataset2.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""C4 dataset based on Common Crawl."""
|
2 |
+
|
3 |
+
|
4 |
+
import gzip
|
5 |
+
import json
|
6 |
+
|
7 |
+
import datasets
|
8 |
+
|
9 |
+
|
10 |
+
logger = datasets.logging.get_logger(__name__)
|
11 |
+
|
12 |
+
|
13 |
+
_DESCRIPTION = """\
|
14 |
+
A colossal, cleaned version of Common Crawl's web crawl corpus.
|
15 |
+
Based on Common Crawl dataset: "https://commoncrawl.org".
|
16 |
+
This is the processed version of Google's C4 dataset by AllenAI.
|
17 |
+
"""
|
18 |
+
|
19 |
+
_CITATION = """
|
20 |
+
@article{2019t5,
|
21 |
+
author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
|
22 |
+
title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
|
23 |
+
journal = {arXiv e-prints},
|
24 |
+
year = {2019},
|
25 |
+
archivePrefix = {arXiv},
|
26 |
+
eprint = {1910.10683},
|
27 |
+
}
|
28 |
+
"""
|
29 |
+
|
30 |
+
_URL = "https://github.com/allenai/allennlp/discussions/5056"
|
31 |
+
|
32 |
+
_VARIANTS = ["en", "realnewslike", "en.noblocklist", "en.noclean"]
|
33 |
+
|
34 |
+
_N_SHARDS_PER_SPLIT = {
|
35 |
+
"en": {"train": 1024, "validation": 8},
|
36 |
+
"realnewslike": {"train": 512, "validation": 1},
|
37 |
+
"en.noblocklist": {"train": 1024, "validation": 8},
|
38 |
+
"en.noclean": {"train": 7168, "validation": 64},
|
39 |
+
}
|
40 |
+
|
41 |
+
_DATA_URL = "https://huggingface.co/datasets/allenai/c4/resolve/1ddc917116b730e1859edef32896ec5c16be51d0/{name}/c4-{split}.{index:05d}-of-{n_shards:05d}.json.gz"
|
42 |
+
|
43 |
+
|
44 |
+
class C4(datasets.GeneratorBasedBuilder):
|
45 |
+
"""C4, a colossal, cleaned version of Common Crawl's web crawl corpus."""
|
46 |
+
|
47 |
+
BUILDER_CONFIGS = [datasets.BuilderConfig(name) for name in _VARIANTS]
|
48 |
+
|
49 |
+
def _info(self):
|
50 |
+
return datasets.DatasetInfo(
|
51 |
+
description=_DESCRIPTION,
|
52 |
+
features=datasets.Features(
|
53 |
+
{
|
54 |
+
"text": datasets.Value("string"),
|
55 |
+
"timestamp": datasets.Value("string"),
|
56 |
+
"url": datasets.Value("string"),
|
57 |
+
}
|
58 |
+
),
|
59 |
+
supervised_keys=None,
|
60 |
+
homepage=_URL,
|
61 |
+
citation=_CITATION,
|
62 |
+
)
|
63 |
+
|
64 |
+
def _split_generators(self, dl_manager):
|
65 |
+
data_urls = {}
|
66 |
+
for split in ["train", "validation"]:
|
67 |
+
n_shards = _N_SHARDS_PER_SPLIT[self.config.name][split]
|
68 |
+
data_urls[split] = [
|
69 |
+
_DATA_URL.format(name=self.config.name, split=split, index=index, n_shards=n_shards)
|
70 |
+
for index in range(n_shards)
|
71 |
+
]
|
72 |
+
train_downloaded_files = dl_manager.download(data_urls["train"])
|
73 |
+
validation_downloaded_files = dl_manager.download(data_urls["validation"])
|
74 |
+
return [
|
75 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
|
76 |
+
datasets.SplitGenerator(
|
77 |
+
name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}
|
78 |
+
),
|
79 |
+
]
|
80 |
+
|
81 |
+
def _generate_examples(self, filepaths):
|
82 |
+
"""This function returns the examples in the raw (text) form by iterating on all the files."""
|
83 |
+
id_ = 0
|
84 |
+
for filepath in filepaths:
|
85 |
+
logger.info("generating examples from = %s", filepath)
|
86 |
+
with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
|
87 |
+
for line in f:
|
88 |
+
if line:
|
89 |
+
example = json.loads(line)
|
90 |
+
yield id_, example
|
91 |
+
id_ += 1
|