Datasets:
Size:
10B<n<100B
License:
import datasets | |
import gzip | |
import json | |
_URL = "http://nl.ijs.si/nikola/dedup_hbs/" | |
_URLS = [ | |
# "macocu.hbs.translit.dedup.lines.gz", | |
# "hr_news.translit.dedup.lines.gz", | |
# "srwac.translit.dedup.lines.gz", | |
"riznica.translit.dedup.lines.gz", | |
# "mC4.sr.translit.dedup.lines.gz", | |
# "hrwac.translit.dedup.lines.gz", | |
# "cnrwac.translit.dedup.lines.gz", | |
# "classla-sr.translit.dedup.lines.gz", | |
# "classla-hr.translit.dedup.lines.gz", | |
# "classla-bs.translit.dedup.lines.gz", | |
# "cc100-sr.translit.dedup.lines.gz", | |
# "cc100-hr.translit.dedup.lines.gz", | |
# "bswac.translit.dedup.lines.gz", | |
] | |
_URLS = [_URL + i for i in _URLS] | |
_DESCRIPTION = """\ | |
Data used to train BERTić model and its successors. | |
""" | |
_CITATION = """ | |
@inproceedings{ljubesic-lauc-2021-bertic, | |
title = "{BERT}i{\'c} - The Transformer Language Model for {B}osnian, {C}roatian, {M}ontenegrin and {S}erbian", | |
author = "Ljube{\v{s}}i{\'c}, Nikola and | |
Lauc, Davor", | |
editor = "Babych, Bogdan and | |
Kanishcheva, Olga and | |
Nakov, Preslav and | |
Piskorski, Jakub and | |
Pivovarova, Lidia and | |
Starko, Vasyl and | |
Steinberger, Josef and | |
Yangarber, Roman and | |
Marci{\'n}czuk, Micha{\l} and | |
Pollak, Senja and | |
P{\v{r}}ib{\'a}{\v{n}}, Pavel and | |
Robnik-{\v{S}}ikonja, Marko", | |
booktitle = "Proceedings of the 8th Workshop on Balto-Slavic Natural Language Processing", | |
month = apr, | |
year = "2021", | |
address = "Kiyv, Ukraine", | |
publisher = "Association for Computational Linguistics", | |
url = "https://aclanthology.org/2021.bsnlp-1.5", | |
pages = "37--42", | |
abstract = "In this paper we describe a transformer model pre-trained on 8 billion tokens of crawled text from the Croatian, Bosnian, Serbian and Montenegrin web domains. We evaluate the transformer model on the tasks of part-of-speech tagging, named-entity-recognition, geo-location prediction and commonsense causal reasoning, showing improvements on all tasks over state-of-the-art models. For commonsense reasoning evaluation we introduce COPA-HR - a translation of the Choice of Plausible Alternatives (COPA) dataset into Croatian. The BERTi{\'c} model is made available for free usage and further task-specific fine-tuning through HuggingFace.", | |
}""" | |
class BerticDataConfig(datasets.BuilderConfig): | |
"""BuilderConfig for Bertic data sample.""" | |
def __init__(self, *args, subsets, **kwargs): | |
"""BuilderConfig for BerticData. | |
Args: | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super(BerticDataConfig, self).__init__(**kwargs) | |
self.subsets = subsets | |
class BerticData(datasets.GeneratorBasedBuilder): | |
"""Bertic dataset, used for training Bertic model.""" | |
VERSION = datasets.Version("1.0.0") | |
# This is an example of a dataset with multiple configurations. | |
# If you don't want/need to define several sub-sets in your dataset, | |
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. | |
# If you need to make complex sub-parts in the datasets with configurable options | |
# You can create your own builder configuration class to store attribute, inheriting from BerticDataConfig | |
# BUILDER_CONFIG_CLASS = MyBuilderConfig | |
# You will be able to load one or the other configurations in the following list with | |
# data = datasets.load_dataset('my_dataset', 'first_domain') | |
# data = datasets.load_dataset('my_dataset', 'second_domain') | |
BUILDER_CONFIGS = [ | |
BerticDataConfig( | |
name="default", | |
subsets=["arxiv", "open-web-math", "algebraic-stack"], | |
name="default", | |
subsets=["arxiv", "open-web-math", "algebraic-stack"], | |
version=VERSION, | |
description="All subsets", | |
) | |
] | |
description="All subsets", | |
) | |
] | |
def _info(self): | |
features = datasets.Features( | |
{ | |
"text": datasets.Value("string"), | |
} | |
) | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=features, | |
homepage=_HOMEPAGE, | |
citation=_CITATION, | |
) | |
def _split_generators( | |
self, dl_manager: datasets.DownloadManager | |
) -> List[datasets.SplitGenerator]: | |
def _split_generators( | |
self, dl_manager: datasets.DownloadManager | |
) -> List[datasets.SplitGenerator]: | |
urls_to_download = self._URLS | |
urls_to_download = {i: url for i, url in enumerate(_URLS)} | |
downloaded_files = dl_manager.download_and_extract(urls_to_download) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files[i]} | |
) | |
for i in urls_to_download.keys() | |
name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files[i]} | |
) | |
for i in urls_to_download.keys() | |
] | |
def _generate_examples(self, data_files): | |
key = 0 | |
for name in data_files: | |
with gzip.open(name, "rb") as f: | |
key = 0 | |
for name in data_files: | |
with gzip.open(name, "rb") as f: | |
for line in f.readlines(): | |
yield key, {"text": line.decode("uft-8").strip()} | |
key += 1 | |