Datasets:

Modalities:
Text
Languages:
Spanish
ArXiv:
Libraries:
Datasets
License:
File size: 4,595 Bytes
b91c10e
 
 
 
 
 
 
 
 
 
 
 
 
 
7512232
 
b91c10e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bfeaed5
b91c10e
 
 
 
 
 
 
 
7a98abb
 
 
b91c10e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d073743
b91c10e
 
 
 
 
 
 
82dca2e
b91c10e
 
 
7a98abb
b91c10e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
"""Perplexity Sampled mC4 dataset based on Common Crawl."""


import gzip
import json

import datasets
import numpy as np
from numpy.random import default_rng


logger = datasets.logging.get_logger(__name__)


_DESCRIPTION = """
50 million documents in Spanish extracted from mC4 applying perplexity sampling via mc4-sampling: "https://huggingface.co/datasets/bertin-project/mc4-sampling". Please, refer to BERTIN Project. The original dataset is the Multlingual Colossal, Cleaned version of Common Crawl's web crawl corpus (mC4), based on the Common Crawl dataset: "https://commoncrawl.org", and processed by AllenAI.
"""

_CITATION = """
@article{2019t5,
    author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
    title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
    journal = {arXiv e-prints},
    year = {2019},
    archivePrefix = {arXiv},
    eprint = {1910.10683},
}
"""

_URL = "https://github.com/allenai/allennlp/discussions/5056"

_DATA_URL_VALIDATION = "https://huggingface.co/datasets/allenai/c4/resolve/1ddc917116b730e1859edef32896ec5c16be51d0/multilingual/c4-es-validation.tfrecord-{index:05d}-of-{n_shards:05d}.json.gz"
_DATA_URL_TRAIN = "https://huggingface.co/datasets/bertin-project/mc4-es-sampled/resolve/main/mc4-es-train-50M-{config}-shard-{index:04d}-of-{n_shards:04d}.json.gz"

_CONFIGS = [
    "random",
    "stepwise",
    "gaussian",
]

_N_SHARDS_PER_SPLIT = {
    "random": {"train": 1024, "validation": 16},
    "stepwise": {"train": 1024, "validation": 16},
    "gaussian": {"train": 1024, "validation": 16},
}


class Mc4EsSampledConfig(datasets.BuilderConfig):
    """BuilderConfig for mC4."""

    def __init__(self, *args, configs, **kwargs):
        """BuilderConfig for mC4.
        Args:
            configs (:obj:`List[str]`): list of configs to load
            **kwargs: keyword arguments forwarded to super.
        """
        super().__init__(
            *args,
            name="+".join(configs),
            **kwargs,
        )
        self.configs = configs


class Mc4EsSampled(datasets.GeneratorBasedBuilder):
    """mC4, a colossal, cleaned version of Common Crawl's web crawl corpus."""

    BUILDER_CONFIGS = [Mc4EsSampledConfig(configs=[config]) for config in _CONFIGS]
    BUILDER_CONFIG_CLASS = Mc4EsSampledConfig

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "text": datasets.Value("string"),
                    "timestamp": datasets.Value("string"),
                    "url": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            homepage=_URL,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        data_urls = {}
        data_urls["train"] = [
            _DATA_URL_TRAIN.format(
                config=config,
                index=index + 1,
                n_shards=_N_SHARDS_PER_SPLIT[config]["train"],
            )
            for config in self.config.configs
            for index in range(_N_SHARDS_PER_SPLIT[config]["train"])
        ]
        data_urls["validation"] = [
            _DATA_URL_VALIDATION.format(
                index=index + 1,
                n_shards=_N_SHARDS_PER_SPLIT[config]["validation"],
            )
            for config in self.config.configs
            for index in range(_N_SHARDS_PER_SPLIT[config]["validation"] - 1)  # the last validation shard is not working in the original allen/c4 repo
        ]
        train_downloaded_files = dl_manager.download(data_urls["train"])
        validation_downloaded_files = dl_manager.download(data_urls["validation"])
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}
            ),
        ]

    def _generate_examples(self, filepaths):
        """This function returns the examples in the raw (text) form by iterating on all the files."""
        id_ = 0
        for filepath in filepaths:
            with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
                for line in f:
                    if line:
                        example = json.loads(line)
                        yield id_, example
                        id_ += 1