mapama247 commited on
Commit
46bce12
1 Parent(s): 78f6061

init commit

Browse files
Files changed (3) hide show
  1. README.md +9 -0
  2. poemas.py +49 -0
  3. poemas.txt +0 -0
README.md CHANGED
@@ -1,3 +1,12 @@
1
  ---
2
  license: lgpl-3.0
 
3
  ---
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: lgpl-3.0
3
+ language: es
4
  ---
5
+
6
+ # Spanish poems for GPT finetuning
7
+
8
+ Collection of Spanish poems from www.poemas-del-alma.com.
9
+
10
+ The `poemas.txt` file has been formatted with special tokens for CLM training.
11
+
12
+ Titles and author names have been removed, leaving only the poem's body.
poemas.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ### Spanish Poetry Dataset ###
3
+ Collection of Spanish poems retrieved by Andrea Morales and Miguel López from the website www.poemas-del-alma.com
4
+ Corpus adapted for Causal Language Modeling (CLM) to train GPT-like models. The author and title of each poem has been removed.
5
+ Note that, depending on your tokenizer, you might want to replace the <BOS>/<SEP>/<EOS> tokens by <|endoftext|> or something else.
6
+ Also note that the number of rows is slightly lower than the original dataset (andreamorgar/spanish_poetry) because a few incorrect examples have been filtered out.
7
+ """
8
+
9
+ import datasets
10
+
11
+ _DESCRIPTION = "Collection of Spanish poems retrieved from www.poemas-del-alma.com"
12
+ _HOMEPAGE = "https://www.kaggle.com/datasets/andreamorgar/spanish-poetry-dataset"
13
+ _AUTHORS = "Andrea Morales and Miguel López"
14
+ _LICENSE = "GNU Lesser General Public License"
15
+
16
+ class Poemas(datasets.GeneratorBasedBuilder):
17
+ def _info(self):
18
+ features = datasets.Features(
19
+ {
20
+ "text": datasets.Value("string"),
21
+ }
22
+ )
23
+ return datasets.DatasetInfo(
24
+ description=_DESCRIPTION,
25
+ features=features,
26
+ homepage=_HOMEPAGE,
27
+ license=_LICENSE,
28
+ citation=_AUTHORS,
29
+ )
30
+
31
+ def _split_generators(self, dl_manager):
32
+ data_file = dl_manager.download_and_extract("poemas.txt")
33
+ return [
34
+ datasets.SplitGenerator(
35
+ name=datasets.Split.TRAIN,
36
+ gen_kwargs={
37
+ "filepath": data_file,
38
+ "split": "train",
39
+ },
40
+ )
41
+ ]
42
+
43
+ def _generate_examples(self, filepath, split):
44
+ to_replace = {"<BOS>": "", "<EOS>": "", "<SEP>": "\n"}
45
+ with open(filepath, encoding="utf-8") as f:
46
+ for key, poem in enumerate(f.readlines()):
47
+ for old,new in to_replace.items():
48
+ poem = poem.replace(old, new)
49
+ yield key, {"text": poem.strip()}
poemas.txt ADDED
The diff for this file is too large to render. See raw diff