Move parsing fb2s to the self._generate_examples func. Use self.base_path to find fb2s in the repo
2d3e792
""" | |
Parse all paragraphs from all *.fb2 files in the input directory, create a Huggingface Dataset and push it | |
to the Hub as `vldsavelyev/murakami`. | |
""" | |
import os | |
from pathlib import Path | |
from lxml import etree | |
import datasets | |
datasets.logging.set_verbosity_info() | |
_DESCRIPTION = """\ | |
Russian translations of Murakami novels, to fine-tune a generative language model. Source is FB2 files | |
from http://flibusta.is/a/8570. | |
""" | |
class Builder(datasets.GeneratorBasedBuilder): | |
"""Murakami novels, translated to Russian.""" | |
VERSION = datasets.Version("1.1.0") | |
def _info(self): | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
features=datasets.Features({"text": datasets.Value("string")}), | |
) | |
def _split_generators(self, dl_manager: datasets.DownloadManager): | |
data_dir = Path(self.base_path) / "data" | |
fb2_paths = list(data_dir.glob("*.fb2")) | |
if len(fb2_paths) > 0: | |
print(f"Found {len(fb2_paths)} fb2 files in {data_dir}") | |
else: | |
raise ValueError(f"No fb2 files found in {data_dir}") | |
smallest_path = min(fb2_paths, key=os.path.getsize) | |
print(f"Using smallest title as a training example: {smallest_path}") | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepaths": [p for p in fb2_paths if p != smallest_path], | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepaths": [smallest_path], | |
}, | |
), | |
] | |
def _generate_examples(self, filepaths): | |
for fileidx, filepath in enumerate(filepaths): | |
print(fileidx, filepath) | |
title, text = self._extract_text_from_fb2(filepath, fileidx) | |
yield title, {"text": text} | |
def _extract_text_from_fb2(filepath: Path, fileidx: int) -> tuple[str, str]: | |
""" | |
Parse FB2 file and return the concatenation of its paragraphs, along with the title. | |
""" | |
# Load the FB2 format file | |
with filepath.open("rb") as file: | |
fb2_data = file.read() | |
# Print structure of the FB2 format file | |
# print(etree.tostring(etree.fromstring(fb2_data), pretty_print=True)) | |
# Parse the FB2 format file using lxml | |
root = etree.fromstring(fb2_data) | |
# Get the title of the book | |
title = root.xpath( | |
"//fb:title-info/fb:book-title", | |
namespaces={"fb": "http://www.gribuser.ru/xml/fictionbook/2.0"}, | |
)[0].text | |
print(title) | |
# Get all book paragraphs | |
paragraphs = root.xpath( | |
"//fb:p", | |
namespaces={"fb": "http://www.gribuser.ru/xml/fictionbook/2.0"}, | |
) | |
# UNCOMMENT THE LINE BELOW TO BUILD `START_PARAGRAPHS`: | |
# self.helper_to_find_first_paragraphs(paragraphs, title, bi) | |
found_paragraphs = [] | |
skipping = True | |
for pi, p in enumerate(paragraphs): | |
if p.text is None: | |
continue | |
if ( | |
fileidx in Builder.START_PARAGRAPHS | |
and pi >= Builder.START_PARAGRAPHS[fileidx] | |
): | |
skipping = False | |
if skipping and p.text.lower() == title.lower(): | |
skipping = False | |
if not skipping: | |
found_paragraphs.append(p) | |
print(f"Found {len(found_paragraphs)} paragraphs") | |
text = "" | |
for p in found_paragraphs: | |
text += p.text.replace(" ", " ") + "\n" | |
text += "\n" | |
return title, text | |
# Number of initial <p> element to take from each fb2, by number. This allows to skip | |
# intros and other junk in the beginning of an fb2. This is built semi-manually using | |
# the `self.helper_to_find_first_paragraphs` function. | |
START_PARAGRAPHS = { | |
3: 5, | |
6: 27, | |
7: 3, | |
9: 4, | |
10: 3, | |
12: 11, | |
18: 5, | |
20: 3, | |
21: 5, | |
} | |
def helper_to_find_first_paragraphs(paragraphs, title, book_number, n=30): | |
""" | |
Helps to eyeball first few paragraphs of a book to skip junk paragraphs | |
in the beginning and manually construct the `tart_paragraphs` dict. | |
""" | |
found_paragraphs = [] | |
skipping = True | |
for i, p in enumerate(list(paragraphs)[:n]): | |
if p.text is None: | |
continue | |
if ( | |
book_number in Builder.START_PARAGRAPHS | |
and i >= Builder.START_PARAGRAPHS[book_number] | |
): | |
skipping = False | |
if skipping and p.text.lower() == title.lower(): | |
skipping = False | |
if not skipping: | |
found_paragraphs.append(f" {i} {p.text}") | |
if found_paragraphs: | |
print("✅") | |
print("\n".join(found_paragraphs)) | |
else: | |
print("❌") | |
for i, p in enumerate(list(paragraphs)[:30]): | |
print(f" {i} {p.text}") | |