File size: 2,385 Bytes
11538ef
 
 
 
0742b9b
 
 
11538ef
 
 
 
baa8233
e5f9604
11538ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0742b9b
 
11538ef
 
82dfe66
11538ef
2efe5be
11538ef
 
 
 
 
baa8233
11538ef
 
 
baa8233
11538ef
 
 
 
 
 
 
e5f9604
 
 
11538ef
 
 
e5f9604
11538ef
 
 
 
e5f9604
 
11538ef
baa8233
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
"""
NOTE: This file implements translation tasks using datasets from WMT conferences,
provided by sacrebleu. Traditionally they are evaluated with BLEU scores. TER
and CHRF are other options.
We defer citations and descriptions of the many translations tasks used
here to the SacreBLEU repo from which we've obtained the datasets:
https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/dataset.py
Homepage: https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/dataset.py
"""
from sacrebleu import sacrebleu
import datasets
import os
import json


_CITATION = """
@inproceedings{post-2018-call,
    title = "A Call for Clarity in Reporting {BLEU} Scores",
    author = "Post, Matt",
    booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
    month = oct,
    year = "2018",
    address = "Belgium, Brussels",
    publisher = "Association for Computational Linguistics",
    url = "https://www.aclweb.org/anthology/W18-6319",
    pages = "186--191",
}
"""


class SacrebleuManual(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name=f"{name.replace('/', '_')}_{langpair}", version=datasets.Version("1.0.0"), description="")
            for name in sacrebleu.get_available_testsets()
            for langpair in sacrebleu.get_langpairs_for_testset(name)
        ]

    def _info(self):
        features = datasets.Features(
            {
                "translation": datasets.Value("string"),
            }
        )
        return datasets.DatasetInfo(
            description=f"Sacrebleu\n{self.config.description}",
            features=features,
            homepage="",
            license="",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        downloaded_files = dl_manager.download(f"{os.path.join(*self.config.name.split('_'))}.jsonl")
        print(downloaded_files)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"path": downloaded_files},
            )
        ]

    # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
    def _generate_examples(self, path):
        with open(path, encoding="utf-8") as f:
            for key, row in enumerate(f):
                yield key, json.loads(row)