wmt_14 / wmt_14.py
thomwolf's picture
thomwolf HF Staff
tweak
5b0e976
import datasets
import os
import json
_CITATION = ""
_DESCRIPTION = """
The 2014 Workshop on Statistical Machine Translation:
https://aclanthology.org/W14-3302.pdf
The scenario consists of 5 subsets, each of which is a parallel corpus between English and another language. The
non-English languages include Czech, German, French, Hindi, and Russian.
For each language pair, the validation and test set each includes around 3,000 examples, while the training set is
usually much larger. We therefore randomly downsample the training set to speedup data processing.
Task prompt structure:
Translate {source_language} to {target_language}:
{Hypothesis} = {Reference}
Example from WMT14 Fr-En:
Hypothesis: Assemblée générale
Reference: General Assembly
"""
class WMT14(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name=name, version=datasets.Version("1.0.0"), description="")
for name in ["cs-en", "de-en", "fr-en", "hi-en", "ru-en"]
]
def _info(self):
source_language, target_language = self.config.name.split('-')
features = datasets.Features(
{
source_language: datasets.Value("string"),
target_language: datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage="",
license="",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
train_json = dl_manager.download(os.path.join(self.config.name, "train.jsonl"))
test_json = dl_manager.download(os.path.join(self.config.name, "test.jsonl"))
val_json = dl_manager.download(os.path.join(self.config.name, "validation.jsonl"))
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"path": train_json},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"path": test_json},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"path": val_json},
)
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, path):
with open(path, encoding="utf-8") as f:
for key, row in enumerate(f):
yield key, json.loads(row)