File size: 4,378 Bytes
f281154
2d3e792
 
f281154
 
 
 
 
 
 
 
 
 
 
 
2d3e792
 
f281154
 
 
 
 
 
 
 
141b674
 
 
 
f281154
 
 
 
 
 
 
 
2d3e792
f17105a
 
2d3e792
f17105a
2d3e792
f17105a
2d3e792
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141b674
 
 
2d3e792
 
141b674
2d3e792
141b674
2d3e792
 
 
 
 
 
 
 
 
 
 
 
 
 
141b674
 
 
2d3e792
141b674
 
 
2d3e792
141b674
 
 
2d3e792
141b674
 
2d3e792
141b674
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
"""
Parse all paragraphs from all *.fb2 files in the input directory, create a Huggingface Dataset and push it 
to the Hub as `vldsavelyev/murakami`.
"""


import os
from pathlib import Path
from lxml import etree
import datasets

datasets.logging.set_verbosity_info()


_DESCRIPTION = """\
Russian translations of Murakami novels, to fine-tune a generative language model. Source is FB2 files 
from http://flibusta.is/a/8570.
"""


class Builder(datasets.GeneratorBasedBuilder):
    """Murakami novels, translated to Russian."""

    VERSION = datasets.Version("1.1.0")

    # Small chapters are usually the footnotes and the title of the book, skipping by default as it's
    # not helping to capture the style of the author anyway.
    MIN_CHAPTER_SIZE = 500

    def _info(self):
        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # This defines the different columns of the dataset and their types
            features=datasets.Features({"text": datasets.Value("string")}),
        )

    def _split_generators(self, dl_manager: datasets.DownloadManager):
        fb2_dir = dl_manager.download_and_extract("data.zip")
        fb2_paths = list(Path(fb2_dir).glob("**/*.fb2"))
        if len(fb2_paths) > 0:
            print(f"Found {len(fb2_paths)} fb2 files")
        else:
            raise ValueError(f"No fb2 files found in {fb2_dir}")

        smallest_path = min(fb2_paths, key=os.path.getsize)
        print(f"Using smallest title as a training example: {smallest_path}")

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepaths": [p for p in fb2_paths if p != smallest_path],
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepaths": [smallest_path],
                },
            ),
        ]

    def _generate_examples(self, filepaths):
        for fileidx, filepath in enumerate(filepaths):
            title, chapters = self._extract_text_from_fb2(filepath, fileidx)
            for i, chapter in enumerate(chapters):
                yield f"{title} {i}", {"text": chapter}

    @staticmethod
    def _extract_text_from_fb2(filepath: Path, fileidx: int) -> tuple[str, list[str]]:
        """
        Parse a FB2 file and return book chapters, along with the book title.
        """
        # Load the FB2 format file
        with filepath.open("rb") as file:
            fb2_data = file.read()

        # Parse the FB2 format file using lxml
        root = etree.fromstring(fb2_data)

        # Get the title of the book
        title = root.xpath(
            "//fb:title-info/fb:book-title",
            namespaces={"fb": "http://www.gribuser.ru/xml/fictionbook/2.0"},
        )[0].text

        # UNCOMMENT THIS TO BUILD `START_PARAGRAPHS`
        # helper_to_find_first_paragraphs(root, title, bi)
        # continue

        # All text is stored in <p> tags. There are also <section> tags, which do not have any content,
        # but serve as chapters separators. So we will merge all <p> tags contents between two <section>.
        chapters: list[str] = []

        def _add_chapter(text: str):
            if not text:
                return
            if (
                Builder.MIN_CHAPTER_SIZE is not None
                and len(text) < Builder.MIN_CHAPTER_SIZE
            ):
                # print(f"Skipping chapter of length {len(text)}")
                pass
            else:
                # print(f"Adding chapter of length {len(text)}")
                chapters.append(text)

        chapter = ""
        for e in root.iter():
            if e.tag.endswith("}p"):
                chapter += (e.text or "") + (e.tail or "")
            elif e.tag.endswith("}section"):
                _add_chapter(chapter)
                chapter = ""
        _add_chapter(chapter)

        print(f'{filepath}: "{title}", found {len(chapters)} chapters')
        # print(f"Chapter sizes: {', '.join(str(len(c)) for c in chapters)}")
        # print()
        return title, chapters