murakami / murakami.py
vldsavelyev's picture
Fix script for remote load_dataset
f17105a
"""
Parse all paragraphs from all *.fb2 files in the input directory, create a Huggingface Dataset and push it
to the Hub as `vldsavelyev/murakami`.
"""
import os
from pathlib import Path
from lxml import etree
import datasets
datasets.logging.set_verbosity_info()
_DESCRIPTION = """\
Russian translations of Murakami novels, to fine-tune a generative language model. Source is FB2 files
from http://flibusta.is/a/8570.
"""
class Builder(datasets.GeneratorBasedBuilder):
"""Murakami novels, translated to Russian."""
VERSION = datasets.Version("1.1.0")
# Small chapters are usually the footnotes and the title of the book, skipping by default as it's
# not helping to capture the style of the author anyway.
MIN_CHAPTER_SIZE = 500
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=datasets.Features({"text": datasets.Value("string")}),
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
fb2_dir = dl_manager.download_and_extract("data.zip")
fb2_paths = list(Path(fb2_dir).glob("**/*.fb2"))
if len(fb2_paths) > 0:
print(f"Found {len(fb2_paths)} fb2 files")
else:
raise ValueError(f"No fb2 files found in {fb2_dir}")
smallest_path = min(fb2_paths, key=os.path.getsize)
print(f"Using smallest title as a training example: {smallest_path}")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepaths": [p for p in fb2_paths if p != smallest_path],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepaths": [smallest_path],
},
),
]
def _generate_examples(self, filepaths):
for fileidx, filepath in enumerate(filepaths):
title, chapters = self._extract_text_from_fb2(filepath, fileidx)
for i, chapter in enumerate(chapters):
yield f"{title} {i}", {"text": chapter}
@staticmethod
def _extract_text_from_fb2(filepath: Path, fileidx: int) -> tuple[str, list[str]]:
"""
Parse a FB2 file and return book chapters, along with the book title.
"""
# Load the FB2 format file
with filepath.open("rb") as file:
fb2_data = file.read()
# Parse the FB2 format file using lxml
root = etree.fromstring(fb2_data)
# Get the title of the book
title = root.xpath(
"//fb:title-info/fb:book-title",
namespaces={"fb": "http://www.gribuser.ru/xml/fictionbook/2.0"},
)[0].text
# UNCOMMENT THIS TO BUILD `START_PARAGRAPHS`
# helper_to_find_first_paragraphs(root, title, bi)
# continue
# All text is stored in <p> tags. There are also <section> tags, which do not have any content,
# but serve as chapters separators. So we will merge all <p> tags contents between two <section>.
chapters: list[str] = []
def _add_chapter(text: str):
if not text:
return
if (
Builder.MIN_CHAPTER_SIZE is not None
and len(text) < Builder.MIN_CHAPTER_SIZE
):
# print(f"Skipping chapter of length {len(text)}")
pass
else:
# print(f"Adding chapter of length {len(text)}")
chapters.append(text)
chapter = ""
for e in root.iter():
if e.tag.endswith("}p"):
chapter += (e.text or "") + (e.tail or "")
elif e.tag.endswith("}section"):
_add_chapter(chapter)
chapter = ""
_add_chapter(chapter)
print(f'{filepath}: "{title}", found {len(chapters)} chapters')
# print(f"Chapter sizes: {', '.join(str(len(c)) for c in chapters)}")
# print()
return title, chapters