File size: 4,560 Bytes
9e06882
 
 
 
 
 
8cdd422
 
 
9e06882
 
 
8cdd422
9e06882
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8cdd422
9e06882
 
 
 
 
8cdd422
9e06882
 
 
 
 
 
8cdd422
 
 
9e06882
8cdd422
9e06882
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8cdd422
9e06882
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8cdd422
9e06882
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
from pathlib import Path
from typing import List

import datasets
import pandas as pd

from seacrowd.utils import schemas
from seacrowd.utils.configs import SEACrowdConfig
from seacrowd.utils.constants import DEFAULT_SEACROWD_VIEW_NAME, DEFAULT_SOURCE_VIEW_NAME, Tasks

_DATASETNAME = "id_hoax_news"
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
_UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME

_LANGUAGES = ["ind"]  # We follow ISO639-3 langauge code (https://iso639-3.sil.org/code_tables/639/data)
_LOCAL = False
_CITATION = """\
@INPROCEEDINGS{8265649,  author={Pratiwi, Inggrid Yanuar Risca and Asmara, Rosa Andrie and Rahutomo, Faisal},  booktitle={2017 11th International Conference on Information & Communication Technology and System (ICTS)},   title={Study of hoax news detection using naïve bayes classifier in Indonesian language},   year={2017},  volume={},  number={},  pages={73-78},  doi={10.1109/ICTS.2017.8265649}}
"""

_DESCRIPTION = """\
This research proposes to build an automatic hoax news detection and collects 250 pages of hoax and valid news articles in Indonesian language.
Each data sample is annotated by three reviewers and the final taggings are obtained by voting of those three reviewers.
"""

_HOMEPAGE = "https://data.mendeley.com/datasets/p3hfgr5j3m/1"

_LICENSE = "Creative Commons Attribution 4.0 International"

_URLs = {
    "train": "https://data.mendeley.com/public-files/datasets/p3hfgr5j3m/files/38bfcff2-8a32-4920-9c26-4f63b5b2dad8/file_downloaded",
}

_SUPPORTED_TASKS = [Tasks.HOAX_NEWS_CLASSIFICATION]

_SOURCE_VERSION = "1.0.0"
_SEACROWD_VERSION = "2024.06.20"


class IdHoaxNews(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIGS = [
        SEACrowdConfig(
            name="id_hoax_news_source",
            version=datasets.Version(_SOURCE_VERSION),
            description="Hoax News source schema",
            schema="source",
            subset_id="id_hoax_news",
        ),
        SEACrowdConfig(
            name="id_hoax_news_seacrowd_text",
            version=datasets.Version(_SEACROWD_VERSION),
            description="Hoax News Nusantara schema",
            schema="seacrowd_text",
            subset_id="id_hoax_news",
        ),
    ]

    DEFAULT_CONFIG_NAME = "id_hoax_news_source"

    def _info(self):
        if self.config.schema == "source":
            features = datasets.Features(
                {
                    "index": datasets.Value("string"),
                    "news": datasets.Value("string"),
                    "label": datasets.Value("string"),
                }
            )
        elif self.config.schema == "seacrowd_text":
            features = schemas.text_features(["Valid", "Hoax"])

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
        train_tsv_path = Path(dl_manager.download_and_extract(_URLs["train"]))
        data_files = {
            "train": train_tsv_path / "250 news with valid hoax label.csv",
        }

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"filepath": data_files["train"]},
            ),
        ]

    def _generate_examples(self, filepath: Path):
        news_file = open(filepath, 'r', encoding='ISO-8859-1')
        lines = news_file.readlines()
        news = []
        labels = []

        curr_news = ''
        for l in lines[1:]:
            l = l.replace('\n', '')
            if ';Valid' in l:
                curr_news += l.replace(';Valid', '')
                news.append(curr_news)
                labels.append('Valid')
                curr_news = ''
            elif ';Hoax' in l:
                curr_news += l.replace(';Hoax', '')
                news.append(curr_news)
                labels.append('Hoax')
                curr_news = ''
            else:
                curr_news += l + ' '

        if self.config.schema == "source":
            for i in range(len(news)):
                ex = {"index": str(i), "news": news[i], "label": labels[i]}
                yield i, ex
        elif self.config.schema == "seacrowd_text":
            for i in range(len(news)):
                ex = {"id": str(i), "text": news[i], "label": labels[i]}
                yield i, ex
        else:
            raise ValueError(f"Invalid config: {self.config.name}")