add data
Browse files- cs-en/test.jsonl +0 -0
- cs-en/train.jsonl +0 -0
- cs-en/validation.jsonl +0 -0
- data/test.json +0 -0
- data/validation.json +0 -0
- de-en/test.jsonl +0 -0
- de-en/train.jsonl +0 -0
- de-en/validation.jsonl +0 -0
- fr-en/test.jsonl +0 -0
- data/train.json → fr-en/train.jsonl +0 -0
- fr-en/validation.jsonl +0 -0
- hi-en/test.jsonl +0 -0
- hi-en/train.jsonl +0 -0
- hi-en/validation.jsonl +0 -0
- ru-en/test.jsonl +0 -0
- ru-en/train.jsonl +0 -0
- ru-en/validation.jsonl +0 -0
- wmt_14.py +77 -0
cs-en/test.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
cs-en/train.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
cs-en/validation.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/test.json
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/validation.json
DELETED
The diff for this file is too large to render.
See raw diff
|
|
de-en/test.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
de-en/train.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
de-en/validation.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
fr-en/test.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/train.json → fr-en/train.jsonl
RENAMED
The diff for this file is too large to render.
See raw diff
|
|
fr-en/validation.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
hi-en/test.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
hi-en/train.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
hi-en/validation.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ru-en/test.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ru-en/train.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ru-en/validation.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
wmt_14.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datasets
|
2 |
+
import os
|
3 |
+
import json
|
4 |
+
|
5 |
+
|
6 |
+
_CITATION = ""
|
7 |
+
_DESCRIPTION = """
|
8 |
+
The 2014 Workshop on Statistical Machine Translation:
|
9 |
+
https://aclanthology.org/W14-3302.pdf
|
10 |
+
|
11 |
+
The scenario consists of 5 subsets, each of which is a parallel corpus between English and another language. The
|
12 |
+
non-English languages include Czech, German, French, Hindi, and Russian.
|
13 |
+
|
14 |
+
For each language pair, the validation and test set each includes around 3,000 examples, while the training set is
|
15 |
+
usually much larger. We therefore randomly downsample the training set to speedup data processing.
|
16 |
+
|
17 |
+
Task prompt structure:
|
18 |
+
|
19 |
+
Translate {source_language} to {target_language}:
|
20 |
+
{Hypothesis} = {Reference}
|
21 |
+
|
22 |
+
Example from WMT14 Fr-En:
|
23 |
+
|
24 |
+
Hypothesis: Assemblée générale
|
25 |
+
Reference: General Assembly
|
26 |
+
"""
|
27 |
+
|
28 |
+
class Summarization(datasets.GeneratorBasedBuilder):
|
29 |
+
VERSION = datasets.Version("1.0.0")
|
30 |
+
|
31 |
+
BUILDER_CONFIGS = [
|
32 |
+
datasets.BuilderConfig(name=name, version=datasets.Version("1.0.0"), description="")
|
33 |
+
for name in ["cs-en", "de-en", "fr-en", "hi-en", "ru-en", "en-en"]
|
34 |
+
]
|
35 |
+
|
36 |
+
def _info(self):
|
37 |
+
source_language, target_language = self.config.name.split('-')
|
38 |
+
features = datasets.Features(
|
39 |
+
{
|
40 |
+
source_language: datasets.Value("string"),
|
41 |
+
target_language: datasets.Value("string"),
|
42 |
+
|
43 |
+
}
|
44 |
+
)
|
45 |
+
return datasets.DatasetInfo(
|
46 |
+
description=_DESCRIPTION,
|
47 |
+
features=features,
|
48 |
+
homepage="",
|
49 |
+
license="",
|
50 |
+
citation=_CITATION,
|
51 |
+
)
|
52 |
+
|
53 |
+
def _split_generators(self, dl_manager):
|
54 |
+
train_json = dl_manager.download(os.path.join(self.config.name, "train.jsonl"))
|
55 |
+
test_json = dl_manager.download(os.path.join(self.config.name, "test.jsonl"))
|
56 |
+
val_json = dl_manager.download(os.path.join(self.config.name, "validation.jsonl"))
|
57 |
+
|
58 |
+
return [
|
59 |
+
datasets.SplitGenerator(
|
60 |
+
name=datasets.Split.TRAIN,
|
61 |
+
gen_kwargs={"path": train_json},
|
62 |
+
),
|
63 |
+
datasets.SplitGenerator(
|
64 |
+
name=datasets.Split.TEST,
|
65 |
+
gen_kwargs={"path": test_json},
|
66 |
+
),
|
67 |
+
datasets.SplitGenerator(
|
68 |
+
name=datasets.Split.VALIDATION,
|
69 |
+
gen_kwargs={"path": val_json},
|
70 |
+
)
|
71 |
+
]
|
72 |
+
|
73 |
+
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
74 |
+
def _generate_examples(self, path):
|
75 |
+
with open(path, encoding="utf-8") as f:
|
76 |
+
for key, row in enumerate(f):
|
77 |
+
yield key, json.loads(row)
|