Datasets:
Size:
10B<n<100B
License:
File size: 5,380 Bytes
036116b 6558d97 9ff61d7 036116b 6558d97 9ff61d7 036116b 9ff61d7 6558d97 036116b 6558d97 9ff61d7 6558d97 036116b 9ff61d7 6558d97 036116b 9ff61d7 036116b 6558d97 9ff61d7 6558d97 036116b 6558d97 9ff61d7 6558d97 036116b 6558d97 9ff61d7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
import datasets
import gzip
import json
_URL = "http://nl.ijs.si/nikola/dedup_hbs/"
_URLS = [
# "macocu.hbs.translit.dedup.lines.gz",
# "hr_news.translit.dedup.lines.gz",
# "srwac.translit.dedup.lines.gz",
"riznica.translit.dedup.lines.gz",
# "mC4.sr.translit.dedup.lines.gz",
# "hrwac.translit.dedup.lines.gz",
# "cnrwac.translit.dedup.lines.gz",
# "classla-sr.translit.dedup.lines.gz",
# "classla-hr.translit.dedup.lines.gz",
# "classla-bs.translit.dedup.lines.gz",
# "cc100-sr.translit.dedup.lines.gz",
# "cc100-hr.translit.dedup.lines.gz",
# "bswac.translit.dedup.lines.gz",
]
_URLS = [_URL + i for i in _URLS]
_DESCRIPTION = """\
Data used to train BERTić model and its successors.
"""
_CITATION = """
@inproceedings{ljubesic-lauc-2021-bertic,
title = "{BERT}i{\'c} - The Transformer Language Model for {B}osnian, {C}roatian, {M}ontenegrin and {S}erbian",
author = "Ljube{\v{s}}i{\'c}, Nikola and
Lauc, Davor",
editor = "Babych, Bogdan and
Kanishcheva, Olga and
Nakov, Preslav and
Piskorski, Jakub and
Pivovarova, Lidia and
Starko, Vasyl and
Steinberger, Josef and
Yangarber, Roman and
Marci{\'n}czuk, Micha{\l} and
Pollak, Senja and
P{\v{r}}ib{\'a}{\v{n}}, Pavel and
Robnik-{\v{S}}ikonja, Marko",
booktitle = "Proceedings of the 8th Workshop on Balto-Slavic Natural Language Processing",
month = apr,
year = "2021",
address = "Kiyv, Ukraine",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.bsnlp-1.5",
pages = "37--42",
abstract = "In this paper we describe a transformer model pre-trained on 8 billion tokens of crawled text from the Croatian, Bosnian, Serbian and Montenegrin web domains. We evaluate the transformer model on the tasks of part-of-speech tagging, named-entity-recognition, geo-location prediction and commonsense causal reasoning, showing improvements on all tasks over state-of-the-art models. For commonsense reasoning evaluation we introduce COPA-HR - a translation of the Choice of Plausible Alternatives (COPA) dataset into Croatian. The BERTi{\'c} model is made available for free usage and further task-specific fine-tuning through HuggingFace.",
}"""
class BerticDataConfig(datasets.BuilderConfig):
"""BuilderConfig for Bertic data sample."""
def __init__(self, *args, subsets, **kwargs):
"""BuilderConfig for BerticData.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(BerticDataConfig, self).__init__(**kwargs)
self.subsets = subsets
class BerticData(datasets.GeneratorBasedBuilder):
"""Bertic dataset, used for training Bertic model."""
VERSION = datasets.Version("1.0.0")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
# If you need to make complex sub-parts in the datasets with configurable options
# You can create your own builder configuration class to store attribute, inheriting from BerticDataConfig
# BUILDER_CONFIG_CLASS = MyBuilderConfig
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('my_dataset', 'first_domain')
# data = datasets.load_dataset('my_dataset', 'second_domain')
BUILDER_CONFIGS = [
BerticDataConfig(
name="default",
subsets=["arxiv", "open-web-math", "algebraic-stack"],
name="default",
subsets=["arxiv", "open-web-math", "algebraic-stack"],
version=VERSION,
description="All subsets",
)
]
description="All subsets",
)
]
def _info(self):
features = datasets.Features(
{
"text": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(
self, dl_manager: datasets.DownloadManager
) -> List[datasets.SplitGenerator]:
def _split_generators(
self, dl_manager: datasets.DownloadManager
) -> List[datasets.SplitGenerator]:
urls_to_download = self._URLS
urls_to_download = {i: url for i, url in enumerate(_URLS)}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files[i]}
)
for i in urls_to_download.keys()
name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files[i]}
)
for i in urls_to_download.keys()
]
def _generate_examples(self, data_files):
key = 0
for name in data_files:
with gzip.open(name, "rb") as f:
key = 0
for name in data_files:
with gzip.open(name, "rb") as f:
for line in f.readlines():
yield key, {"text": line.decode("uft-8").strip()}
key += 1
|