Datasets:

Languages:
Indonesian
ArXiv:
License:
holylovenia commited on
Commit
76d3709
1 Parent(s): 12df322

Upload id_newspaper_2018.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. id_newspaper_2018.py +150 -0
id_newspaper_2018.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ import os
18
+ from pathlib import Path
19
+ from typing import Dict, List, Tuple
20
+
21
+ import datasets
22
+
23
+ from seacrowd.utils import schemas
24
+ from seacrowd.utils.configs import SEACrowdConfig
25
+ from seacrowd.utils.constants import Licenses, Tasks
26
+
27
+ _CITATION = """\
28
+ @misc{feryandi2018,
29
+ author={Nurdiantoro, Feryandi}
30
+ title={Dataset-Artikel},
31
+ year = {2018},
32
+ url = {https://github.com/feryandi/Dataset-Artikel},
33
+ }
34
+ """
35
+
36
+ _DATASETNAME = "id_newspaper_2018"
37
+
38
+ _DESCRIPTION = """\
39
+ The ID Newspapers 2018 dataset provides 500K articles from various Indonesian news sources. Articles were taken from
40
+ 7 primary sources (Detik, Kompas, Tempo, CNN Indonesia, Sindo, Republika, Poskota). The compressed files can be
41
+ retrieved from datahttps://huggingface.co/datasets/indonesian-nlp/id_newspapers_2018.
42
+ """
43
+
44
+ _HOMEPAGE = "https://github.com/feryandi/Dataset-Artikel"
45
+
46
+ _LANGUAGES = ["ind"]
47
+
48
+ _LICENSE = Licenses.CC_BY_SA_4_0.value
49
+
50
+ _LOCAL = False
51
+
52
+ _URLS = "https://huggingface.co/datasets/indonesian-nlp/id_newspapers_2018/resolve/main/newspapers-json.tgz"
53
+
54
+ _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
55
+
56
+ _SOURCE_VERSION = "1.0.0"
57
+
58
+ _SEACROWD_VERSION = "2024.06.20"
59
+
60
+
61
+ class IDNewspapers2018Dataset(datasets.GeneratorBasedBuilder):
62
+ """
63
+ ID Newspapers 2018 is a pretraining dataset from https://huggingface.co/datasets/indonesian-nlp/id_newspapers_2018.
64
+ """
65
+
66
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
67
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
68
+
69
+ BUILDER_CONFIGS = [
70
+ SEACrowdConfig(
71
+ name=f"{_DATASETNAME}_source",
72
+ version=datasets.Version(_SOURCE_VERSION),
73
+ description=f"{_DATASETNAME} source schema",
74
+ schema="source",
75
+ subset_id=f"{_DATASETNAME}",
76
+ ),
77
+ SEACrowdConfig(
78
+ name=f"{_DATASETNAME}_seacrowd_ssp",
79
+ version=datasets.Version(_SEACROWD_VERSION),
80
+ description=f"{_DATASETNAME} SEACrowd schema",
81
+ schema="seacrowd_ssp",
82
+ subset_id=f"{_DATASETNAME}",
83
+ ),
84
+ ]
85
+
86
+ def _info(self) -> datasets.DatasetInfo:
87
+ if self.config.schema == "source":
88
+ features = datasets.Features({"url": datasets.Value("string"), "date": datasets.Value("string"), "title": datasets.Value("string"), "content": datasets.Value("string")})
89
+ elif self.config.schema == "seacrowd_ssp":
90
+ features = schemas.ssp_features
91
+ else:
92
+ raise ValueError(f"Invalid schema: '{self.config.schema}'")
93
+
94
+ return datasets.DatasetInfo(
95
+ description=_DESCRIPTION,
96
+ features=features,
97
+ homepage=_HOMEPAGE,
98
+ license=_LICENSE,
99
+ citation=_CITATION,
100
+ )
101
+
102
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
103
+ """
104
+ Returns SplitGenerators.
105
+ """
106
+
107
+ path = dl_manager.download_and_extract(_URLS)
108
+
109
+ return [
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split.TRAIN,
112
+ gen_kwargs={
113
+ "path": path,
114
+ "split": "train",
115
+ },
116
+ )
117
+ ]
118
+
119
+ def _generate_examples(self, path: Path, split: str) -> Tuple[int, Dict]:
120
+ """
121
+ Yields examples as (key, example) tuples.
122
+ """
123
+ file_paths = []
124
+ for path, subdirs, files in os.walk(path):
125
+ for name in files:
126
+ if name[-5:] == ".json":
127
+ file_paths.append(os.path.join(path, name))
128
+
129
+ for idx, file_path in enumerate(file_paths):
130
+ with open(file_path, "r", encoding="utf-8") as file:
131
+ data = json.load(file)
132
+
133
+ if self.config.schema == "source":
134
+ x = {
135
+ "url": data["url"],
136
+ "date": data["date"],
137
+ "title": data["title"],
138
+ "content": data["content"],
139
+ }
140
+ yield idx, x
141
+
142
+ elif self.config.schema == "seacrowd_ssp":
143
+ x = {
144
+ "id": str(idx),
145
+ "text": data["content"],
146
+ }
147
+ yield idx, x
148
+
149
+ else:
150
+ raise ValueError(f"Invalid schema: '{self.config.schema}'")