poemas / poemas.py
mapama247
init commit
46bce12
raw
history blame
2 kB
"""
### Spanish Poetry Dataset ###
Collection of Spanish poems retrieved by Andrea Morales and Miguel L贸pez from the website www.poemas-del-alma.com
Corpus adapted for Causal Language Modeling (CLM) to train GPT-like models. The author and title of each poem has been removed.
Note that, depending on your tokenizer, you might want to replace the <BOS>/<SEP>/<EOS> tokens by <|endoftext|> or something else.
Also note that the number of rows is slightly lower than the original dataset (andreamorgar/spanish_poetry) because a few incorrect examples have been filtered out.
"""
import datasets
_DESCRIPTION = "Collection of Spanish poems retrieved from www.poemas-del-alma.com"
_HOMEPAGE = "https://www.kaggle.com/datasets/andreamorgar/spanish-poetry-dataset"
_AUTHORS = "Andrea Morales and Miguel L贸pez"
_LICENSE = "GNU Lesser General Public License"
class Poemas(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"text": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_AUTHORS,
)
def _split_generators(self, dl_manager):
data_file = dl_manager.download_and_extract("poemas.txt")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_file,
"split": "train",
},
)
]
def _generate_examples(self, filepath, split):
to_replace = {"<BOS>": "", "<EOS>": "", "<SEP>": "\n"}
with open(filepath, encoding="utf-8") as f:
for key, poem in enumerate(f.readlines()):
for old,new in to_replace.items():
poem = poem.replace(old, new)
yield key, {"text": poem.strip()}