add data
Browse files- data/test.jsonl +0 -0
- wikitext_103.py +50 -0
data/test.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
wikitext_103.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datasets
|
2 |
+
import os
|
3 |
+
import json
|
4 |
+
|
5 |
+
|
6 |
+
_CITATION = ""
|
7 |
+
_DESCRIPTION = """
|
8 |
+
Wikitext-103 dataset from this paper:
|
9 |
+
https://arxiv.org/pdf/1609.07843.pdf
|
10 |
+
|
11 |
+
Gopher's authors concatenate all the articles, set context length to n/2 (n = max_seq_len),
|
12 |
+
and use the "closed vocabulary" variant of the dataset for evaluation.
|
13 |
+
|
14 |
+
In contrast, we evaluate the model on each article independently, use single token contexts
|
15 |
+
(except for the last sequence in each document), and use the raw dataset.
|
16 |
+
"""
|
17 |
+
|
18 |
+
class Wikitext103(datasets.GeneratorBasedBuilder):
|
19 |
+
VERSION = datasets.Version("1.0.0")
|
20 |
+
|
21 |
+
def _info(self):
|
22 |
+
features = datasets.Features(
|
23 |
+
{
|
24 |
+
"text": datasets.Value("string"),
|
25 |
+
|
26 |
+
}
|
27 |
+
)
|
28 |
+
return datasets.DatasetInfo(
|
29 |
+
description=_DESCRIPTION,
|
30 |
+
features=features,
|
31 |
+
homepage="",
|
32 |
+
license="",
|
33 |
+
citation=_CITATION,
|
34 |
+
)
|
35 |
+
|
36 |
+
def _split_generators(self, dl_manager):
|
37 |
+
test_json = dl_manager.download(os.path.join("data", "test.jsonl"))
|
38 |
+
|
39 |
+
return [
|
40 |
+
datasets.SplitGenerator(
|
41 |
+
name=datasets.Split.TEST,
|
42 |
+
gen_kwargs={"path": test_json},
|
43 |
+
)
|
44 |
+
]
|
45 |
+
|
46 |
+
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
47 |
+
def _generate_examples(self, path):
|
48 |
+
with open(path, encoding="utf-8") as f:
|
49 |
+
for key, row in enumerate(f):
|
50 |
+
yield key, json.loads(row)
|