devrim commited on
Commit
fc6e90a
·
verified ·
1 Parent(s): 3ea87b1

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -53,3 +53,16 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ test/partition_0.jsonl filter=lfs diff=lfs merge=lfs -text
57
+ train/partition_0.jsonl filter=lfs diff=lfs merge=lfs -text
58
+ train/partition_1.jsonl filter=lfs diff=lfs merge=lfs -text
59
+ train/partition_10.jsonl filter=lfs diff=lfs merge=lfs -text
60
+ train/partition_11.jsonl filter=lfs diff=lfs merge=lfs -text
61
+ train/partition_2.jsonl filter=lfs diff=lfs merge=lfs -text
62
+ train/partition_3.jsonl filter=lfs diff=lfs merge=lfs -text
63
+ train/partition_4.jsonl filter=lfs diff=lfs merge=lfs -text
64
+ train/partition_5.jsonl filter=lfs diff=lfs merge=lfs -text
65
+ train/partition_6.jsonl filter=lfs diff=lfs merge=lfs -text
66
+ train/partition_7.jsonl filter=lfs diff=lfs merge=lfs -text
67
+ train/partition_8.jsonl filter=lfs diff=lfs merge=lfs -text
68
+ train/partition_9.jsonl filter=lfs diff=lfs merge=lfs -text
info.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "n_instances": 160488,
3
+ "avg_num_words": 4802.806957529535,
4
+ "source_dataset": {
5
+ "path": "wikimedia/wikipedia",
6
+ "name": "20231101.en",
7
+ "split": "train"
8
+ }
9
+ }
test/partition_0.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d843e1f1a1ee692844da2d8c828a09d96eebbf69561d7f8ce2e636180ad0041e
3
+ size 384008167
train/partition_0.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c27bc4e18038ddd9d979cfe9e331f3d3696f37a3b624e11b1bf1e31accc24dd
3
+ size 384027753
train/partition_1.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2d09a2895b57c22e936fb9ead692780d2497c15497d405a06c88174a15bf018
3
+ size 384020637
train/partition_10.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5722723ef9f4e56a68caa4af853e22f8058c858a1e752a693a0946c3503f22d5
3
+ size 384008336
train/partition_11.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04ad71ab57615de031e7300fec4104666f2a5e479e40b798d1c6b6ee6ffcc50c
3
+ size 317818053
train/partition_2.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d6afd9de019e00c2e053db557c4c1d0fe317ce3bed5a5d5ca648c59951c1783
3
+ size 384017566
train/partition_3.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97cab6298866641254de913394897082862c91071105442b7d6c47c85e32f411
3
+ size 384012697
train/partition_4.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3e53108b48ebe5f4b2252f6f408f917484913b981b7e9af8d748c61d7379d11
3
+ size 384000482
train/partition_5.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:524971363cf18882b93ffb1c6e93877b99ddfcee76647864ddd72c3e898f2f18
3
+ size 384019477
train/partition_6.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34243842d3c2f224783ad0548d4c44346bc43c6f16b7baffdc82e75926c42fe5
3
+ size 384099179
train/partition_7.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:659b2f7b31822e52e78d48b0e8a214987e5e3359db692acdfb8c3cb24f3da1c1
3
+ size 384017029
train/partition_8.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f62eee7848ec4c9ef2abf32a54c199976ded73ef767d64a2c037ad459016637
3
+ size 384005902
train/partition_9.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0fe662d8ae57515c9b05ac0029e9e9957a9f3272ec34e7e2016edd00b5f5884
3
+ size 384006511
wiki_long_subset.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Devrim Cavusoglu and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Wiki Long Subset."""
18
+
19
+
20
+ import json
21
+
22
+ import datasets
23
+
24
+ logger = datasets.logging.get_logger(__name__)
25
+
26
+
27
+ _DESCRIPTION = """\
28
+ Dataset consisting of long wikipedia articles.
29
+ """
30
+
31
+ _URLS = {
32
+ "train": [
33
+ "train/partition_0.jsonl",
34
+ "train/partition_1.jsonl",
35
+ "train/partition_2.jsonl",
36
+ "train/partition_3.jsonl",
37
+ "train/partition_4.jsonl",
38
+ "train/partition_5.jsonl",
39
+ "train/partition_6.jsonl",
40
+ "train/partition_7.jsonl",
41
+ "train/partition_8.jsonl",
42
+ "train/partition_9.jsonl",
43
+ "train/partition_10.jsonl",
44
+ "train/partition_11.jsonl",
45
+ ],
46
+ "test": "test/partition_0.jsonl",
47
+ }
48
+
49
+
50
+ class WikiLongDatasetConfig(datasets.BuilderConfig):
51
+ """BuilderConfig for Dataset."""
52
+
53
+ def __init__(self, **kwargs):
54
+ """BuilderConfig for Dataset.
55
+
56
+ Args:
57
+ **kwargs: keyword arguments forwarded to super.
58
+ """
59
+ super(WikiLongDatasetConfig, self).__init__(**kwargs)
60
+
61
+ @property
62
+ def features(self):
63
+ return {
64
+ "id": datasets.Value("string"),
65
+ "url": datasets.Value("string"),
66
+ "title": datasets.Value("string"),
67
+ "text": datasets.Value("string"),
68
+ }
69
+
70
+
71
+ class WikiLongDataset(datasets.GeneratorBasedBuilder):
72
+ """WikiLongDataset Classification dataset. Version 1.0."""
73
+
74
+ BUILDER_CONFIGS = [
75
+ WikiLongDatasetConfig(
76
+ version=datasets.Version("1.0.0", ""),
77
+ description="Long Wikipedia Articles"
78
+ )
79
+ ]
80
+ BUILDER_CONFIG_CLASS = WikiLongDatasetConfig
81
+
82
+ def _info(self):
83
+ return datasets.DatasetInfo(
84
+ description=_DESCRIPTION,
85
+ features=datasets.Features(self.config.features),
86
+ )
87
+
88
+ def _split_generators(self, dl_manager):
89
+ data_dir = dl_manager.download_and_extract(_URLS)
90
+
91
+ return [
92
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir["train"]}),
93
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir["test"]}),
94
+ ]
95
+
96
+ def _generate_examples(self, filepath):
97
+ """This function returns the examples in the raw (text) form."""
98
+ logger.info("generating examples from = %s", filepath)
99
+ if isinstance(filepath, str):
100
+ filepath = [filepath]
101
+ key = 0
102
+ for path in filepath:
103
+ with open(path, encoding="utf-8") as data:
104
+ for article in data:
105
+ yield key, json.loads(article)
106
+ key += 1