Last commit not found
import re | |
from typing import List | |
import unicodedata | |
from pathlib import Path | |
from bs4 import BeautifulSoup | |
import datasets as ds | |
_DESCRIPTION = "Parallel passages from novels." | |
_CITATION = """ | |
内山将夫,高橋真弓.(2003) 日英対訳文対応付けデータ. | |
Masao Utiyama and Mayumi Takahashi. (2003) English-Japanese Translation Alignment Data. | |
""".strip() | |
_HOMEPAGE = "https://www2.nict.go.jp/astrec-att/member/mutiyama/align/" | |
_LICENSE = None | |
_DOWNLOAD_URL = ( | |
"https://www2.nict.go.jp/astrec-att/member/mutiyama/align/download/align-070215.zip" | |
) | |
def preprocess(text: str): | |
text = re.sub(r"<注[0-9]+>", "", text.strip()) | |
text = re.sub(r"[#.*?]", "", text) | |
text = re.sub(r"([\u3040-\u309F]+)", "", text) | |
text = re.sub(r" − (.+) − ", "――\\1――", text) | |
text = re.sub(r"_(.+)_", "\\1", text) | |
text = re.sub(r" ``$", "''", text.strip()) | |
text = re.sub(r"^――", "", text.strip()) | |
return text.strip() | |
def parse_html_table(path: Path): | |
try: | |
with path.open(encoding="shift_jis") as f: | |
content = f.read() | |
except UnicodeDecodeError: | |
try: | |
with path.open(encoding="utf-8") as f: | |
content = f.read() | |
except UnicodeDecodeError: | |
try: | |
with path.open(encoding="cp932") as f: | |
content = f.read() | |
except UnicodeDecodeError: | |
return [], [] | |
soup = BeautifulSoup(content, "lxml") | |
tables = soup.find_all("table") | |
texts_en, texts_ja = [], [] | |
cur_text_en, cur_text_ja = "", "" | |
cur_left_parens, cur_right_parens = 0, 0 | |
cur_left_quote, cur_right_quote = 0, 0 | |
cur_left_parens_ja, cur_right_parens_ja = 0, 0 | |
cur_left_parens_ja2, cur_right_parens_ja2 = 0, 0 | |
for table in tables: | |
for tr in table.find_all("tr"): | |
text_en, _, text_ja = (preprocess(td.text) for td in tr.find_all("td")) | |
text_ja = unicodedata.normalize("NFKC", text_ja) | |
cur_left_parens += min(text_en.count("("), 1) | |
cur_right_parens += min(text_en.count(")"), 1) | |
cur_left_quote += min(len(list(re.findall(r"``", text_en))), 1) | |
cur_right_quote += min(len(list(re.findall(r"''", text_en))), 1) | |
cur_left_parens_ja += min(text_ja.count("「"), 1) | |
cur_right_parens_ja += min(text_ja.count("」"), 1) | |
cur_left_parens_ja2 += min(text_ja.count("『"), 1) | |
cur_right_parens_ja2 += min(text_ja.count("』"), 1) | |
if ( | |
text_ja.strip().endswith("。") | |
and text_en.strip().endswith(".") | |
and cur_left_parens == cur_right_parens | |
and cur_left_quote == cur_right_quote | |
and cur_left_parens_ja == cur_right_parens_ja | |
and cur_left_parens_ja2 == cur_right_parens_ja2 | |
): | |
texts_en.append((cur_text_en + " " + text_en).strip()) | |
texts_ja.append((cur_text_ja + text_ja).strip()) | |
cur_text_en, cur_text_ja = "", "" | |
cur_left_parens, cur_right_parens = 0, 0 | |
cur_left_quote, cur_right_quote = 0, 0 | |
cur_left_parens_ja, cur_right_parens_ja = 0, 0 | |
cur_left_parens_ja2, cur_right_parens_ja2 = 0, 0 | |
else: | |
cur_text_en += " " + text_en | |
cur_text_ja += text_ja | |
texts_en.append(cur_text_en.strip()) | |
texts_ja.append(cur_text_ja.strip()) | |
return texts_en, texts_ja | |
class EnJaAlignDataset(ds.GeneratorBasedBuilder): | |
VERSION = ds.Version("1.0.0") | |
DEFAULT_CONFIG_NAME = "default" | |
BUILDER_CONFIGS = [ | |
ds.BuilderConfig( | |
name="default", | |
version=VERSION, | |
description="", | |
), | |
] | |
def _info(self) -> ds.DatasetInfo: | |
if self.config.name == "default": | |
features = ds.Features( | |
{ | |
"id": ds.Value("int64"), | |
"en": ds.Value("string"), | |
"ja": ds.Value("string"), | |
"source": ds.Value("string"), | |
} | |
) | |
return ds.DatasetInfo( | |
description=_DESCRIPTION, | |
citation=_CITATION, | |
homepage=_HOMEPAGE, | |
license=_LICENSE, | |
features=features, | |
) | |
def _split_generators(self, dl_manager: ds.DownloadManager): | |
data_path = dl_manager.download_and_extract(_DOWNLOAD_URL) | |
paths = list(Path(data_path, "align/htmPages").glob("*.htm")) | |
return [ | |
ds.SplitGenerator( | |
name=ds.Split.TRAIN, | |
gen_kwargs={"paths": paths}, | |
) | |
] | |
def _preprocess_ja(self, text: str) -> str: | |
text = re.sub(r"\d+\.(\d|\.)*", "", text.strip()).strip() | |
text = re.sub(r"^――", "", text).strip() | |
return text | |
def _preprocess_en(self, text: str) -> str: | |
text = re.sub(r"\d+\.(\d|\.)*", "", text.strip()).strip() | |
text = re.sub(r"```(.*?)'", "``\1", text).strip() | |
text = re.sub(r"``(.*?)''", r'"\1"', text).strip() | |
return text | |
def _generate_examples(self, paths: List[Path]): | |
idx = 0 | |
for path in paths: | |
texts_en, texts_ja = parse_html_table(path) | |
for text_en, text_ja in zip(texts_en, texts_ja, strict=True): | |
row = { | |
"id": idx, | |
"en": self._preprocess_en(text_en), | |
"ja": self._preprocess_ja(text_ja), | |
"source": path.name, | |
} | |
if ( | |
isinstance(row["en"], str) | |
and isinstance(row["ja"], str) | |
and len(row["en"]) > 0 | |
and len(row["ja"]) > 0 | |
): | |
yield idx, row | |
idx += 1 | |