File size: 5,480 Bytes
f281154
2d3e792
 
f281154
 
 
 
 
 
 
 
 
 
 
 
2d3e792
 
f281154
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2d3e792
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f281154
 
2d3e792
f281154
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
"""
Parse all paragraphs from all *.fb2 files in the input directory, create a Huggingface Dataset and push it 
to the Hub as `vldsavelyev/murakami`.
"""


import os
from pathlib import Path
from lxml import etree
import datasets

datasets.logging.set_verbosity_info()


_DESCRIPTION = """\
Russian translations of Murakami novels, to fine-tune a generative language model. Source is FB2 files 
from http://flibusta.is/a/8570.
"""


class Builder(datasets.GeneratorBasedBuilder):
    """Murakami novels, translated to Russian."""

    VERSION = datasets.Version("1.1.0")

    def _info(self):
        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # This defines the different columns of the dataset and their types
            features=datasets.Features({"text": datasets.Value("string")}),
        )

    def _split_generators(self, dl_manager: datasets.DownloadManager):
        data_dir = Path(self.base_path) / "data"
        fb2_paths = list(data_dir.glob("*.fb2"))
        if len(fb2_paths) > 0:
            print(f"Found {len(fb2_paths)} fb2 files in {data_dir}")
        else:
            raise ValueError(f"No fb2 files found in {data_dir}")

        smallest_path = min(fb2_paths, key=os.path.getsize)
        print(f"Using smallest title as a training example: {smallest_path}")

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepaths": [p for p in fb2_paths if p != smallest_path],
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepaths": [smallest_path],
                },
            ),
        ]

    def _generate_examples(self, filepaths):
        for fileidx, filepath in enumerate(filepaths):
            print(fileidx, filepath)
            title, text = self._extract_text_from_fb2(filepath, fileidx)
            yield title, {"text": text}

    @staticmethod
    def _extract_text_from_fb2(filepath: Path, fileidx: int) -> tuple[str, str]:
        """
        Parse FB2 file and return the concatenation of its paragraphs, along with the title.
        """
        # Load the FB2 format file
        with filepath.open("rb") as file:
            fb2_data = file.read()

        # Print structure of the FB2 format file
        # print(etree.tostring(etree.fromstring(fb2_data), pretty_print=True))

        # Parse the FB2 format file using lxml
        root = etree.fromstring(fb2_data)

        # Get the title of the book
        title = root.xpath(
            "//fb:title-info/fb:book-title",
            namespaces={"fb": "http://www.gribuser.ru/xml/fictionbook/2.0"},
        )[0].text
        print(title)

        # Get all book paragraphs
        paragraphs = root.xpath(
            "//fb:p",
            namespaces={"fb": "http://www.gribuser.ru/xml/fictionbook/2.0"},
        )

        # UNCOMMENT THE LINE BELOW TO BUILD `START_PARAGRAPHS`:
        # self.helper_to_find_first_paragraphs(paragraphs, title, bi)

        found_paragraphs = []
        skipping = True
        for pi, p in enumerate(paragraphs):
            if p.text is None:
                continue
            if (
                fileidx in Builder.START_PARAGRAPHS
                and pi >= Builder.START_PARAGRAPHS[fileidx]
            ):
                skipping = False
            if skipping and p.text.lower() == title.lower():
                skipping = False
            if not skipping:
                found_paragraphs.append(p)
        print(f"Found {len(found_paragraphs)} paragraphs")
        text = ""
        for p in found_paragraphs:
            text += p.text.replace(" ", " ") + "\n"
        text += "\n"
        return title, text

    # Number of initial <p> element to take from each fb2, by number. This allows to skip
    # intros and other junk in the beginning of an fb2. This is built semi-manually using
    # the `self.helper_to_find_first_paragraphs` function.
    START_PARAGRAPHS = {
        3: 5,
        6: 27,
        7: 3,
        9: 4,
        10: 3,
        12: 11,
        18: 5,
        20: 3,
        21: 5,
    }

    @staticmethod
    def helper_to_find_first_paragraphs(paragraphs, title, book_number, n=30):
        """
        Helps to eyeball first few paragraphs of a book to skip junk paragraphs
        in the beginning and manually construct the `tart_paragraphs` dict.
        """
        found_paragraphs = []
        skipping = True
        for i, p in enumerate(list(paragraphs)[:n]):
            if p.text is None:
                continue
            if (
                book_number in Builder.START_PARAGRAPHS
                and i >= Builder.START_PARAGRAPHS[book_number]
            ):
                skipping = False
            if skipping and p.text.lower() == title.lower():
                skipping = False
            if not skipping:
                found_paragraphs.append(f"   {i} {p.text}")

        if found_paragraphs:
            print("✅")
            print("\n".join(found_paragraphs))

        else:
            print("❌")
            for i, p in enumerate(list(paragraphs)[:30]):
                print(f"   {i} {p.text}")