|
""" |
|
Parse all paragraphs from all *.fb2 files in the input directory, create a Huggingface Dataset and push it to the Hub as `vldsavelyev/murakami`. |
|
""" |
|
|
|
|
|
import os |
|
from pathlib import Path |
|
from lxml import etree |
|
import datasets |
|
from datasets import Dataset |
|
from huggingface_hub import create_repo |
|
|
|
datasets.logging.set_verbosity_info() |
|
|
|
|
|
_DESCRIPTION = """\ |
|
Russian translations of Murakami novels, to fine-tune a generative language model. Source is FB2 files from http://flibusta.is/a/8570. |
|
""" |
|
|
|
|
|
class Builder(datasets.GeneratorBasedBuilder): |
|
"""Murakami novels, translated to Russian.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _info(self): |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=datasets.Features({"text": datasets.Value("string")}), |
|
) |
|
|
|
|
|
|
|
|
|
START_PARAGRAPHS = { |
|
3: 5, |
|
6: 27, |
|
7: 3, |
|
9: 4, |
|
10: 3, |
|
12: 11, |
|
18: 5, |
|
20: 3, |
|
21: 5, |
|
} |
|
|
|
@staticmethod |
|
def helper_to_find_first_paragraphs(paragraphs, title, book_number, n=30): |
|
""" |
|
Helps to eyeball first few paragraphs of a book to skip junk paragraphs |
|
in the beginning and manually construct the `tart_paragraphs` dict. |
|
""" |
|
found_paragraphs = [] |
|
skipping = True |
|
for i, p in enumerate(list(paragraphs)[:n]): |
|
if p.text is None: |
|
continue |
|
if ( |
|
book_number in Builder.START_PARAGRAPHS |
|
and i >= Builder.START_PARAGRAPHS[book_number] |
|
): |
|
skipping = False |
|
if skipping and p.text.lower() == title.lower(): |
|
skipping = False |
|
if not skipping: |
|
found_paragraphs.append(f" {i} {p.text}") |
|
|
|
if found_paragraphs: |
|
print("✅") |
|
print("\n".join(found_paragraphs)) |
|
|
|
else: |
|
print("❌") |
|
for i, p in enumerate(list(paragraphs)[:30]): |
|
print(f" {i} {p.text}") |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
data_dir = "data" |
|
text_by_name = {} |
|
fb2s = list(Path(data_dir).glob("*.fb2")) |
|
if len(fb2s) > 0: |
|
print(f"Found {len(fb2s)} fb2 files in {data_dir}") |
|
else: |
|
raise ValueError(f"No fb2 files found in {data_dir}") |
|
|
|
for bi, path in enumerate(fb2s): |
|
print(bi, path) |
|
|
|
|
|
with path.open("rb") as file: |
|
fb2_data = file.read() |
|
|
|
|
|
|
|
|
|
|
|
root = etree.fromstring(fb2_data) |
|
|
|
|
|
title = root.xpath( |
|
"//fb:title-info/fb:book-title", |
|
namespaces={"fb": "http://www.gribuser.ru/xml/fictionbook/2.0"}, |
|
)[0].text |
|
print(title) |
|
|
|
|
|
paragraphs = root.xpath( |
|
"//fb:p", |
|
namespaces={"fb": "http://www.gribuser.ru/xml/fictionbook/2.0"}, |
|
) |
|
|
|
|
|
|
|
|
|
found_paragraphs = [] |
|
skipping = True |
|
for pi, p in enumerate(paragraphs): |
|
if p.text is None: |
|
continue |
|
if ( |
|
bi in Builder.START_PARAGRAPHS |
|
and pi >= Builder.START_PARAGRAPHS[bi] |
|
): |
|
skipping = False |
|
if skipping and p.text.lower() == title.lower(): |
|
skipping = False |
|
if not skipping: |
|
found_paragraphs.append(p) |
|
print(f"Found {len(found_paragraphs)} paragraphs") |
|
|
|
text_by_name[title] = "" |
|
for p in found_paragraphs: |
|
text_by_name[title] += p.text.replace(" ", " ") + "\n" |
|
text_by_name[title] += "\n" |
|
|
|
print("Novel by size:") |
|
for title, text in text_by_name.items(): |
|
print(f" {title}: {len(text):,} characters") |
|
|
|
smallest_title = min(text_by_name, key=lambda k: len(text_by_name[k])) |
|
print( |
|
f"Using smallest novel {smallest_title} " |
|
f"({len(text_by_name[smallest_title]):,} characters) as a test set" |
|
) |
|
|
|
test_titles = [smallest_title] |
|
train_titles = [t for t in text_by_name if t not in test_titles] |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"titles": train_titles, |
|
"texts": [text_by_name[t] for t in train_titles], |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"titles": test_titles, |
|
"texts": [text_by_name[t] for t in test_titles], |
|
"split": "test", |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, titles, texts, split): |
|
|
|
|
|
for title, text in zip(titles, texts): |
|
yield title, {"text": text} |
|
|