Datasets:

Languages:
Vietnamese
ArXiv:
License:
File size: 4,824 Bytes
f68a4f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
from pathlib import Path
from typing import Dict, List, Tuple

import datasets
import pandas as pd

from seacrowd.utils import schemas
from seacrowd.utils.configs import SEACrowdConfig
from seacrowd.utils.constants import Licenses, Tasks

_CITATION = """\
@incollection{nguyen2021vietnamese,
  title={Vietnamese Complaint Detection on E-Commerce Websites},
  author={Nguyen, Nhung Thi-Hong and Ha, Phuong Phan-Dieu and Nguyen, Luan Thanh and Nguyen, Kiet Van and Nguyen, Ngan Luu-Thuy},
  booktitle={New Trends in Intelligent Software Methodologies, Tools and Techniques},
  pages={618--629},
  year={2021},
  publisher={IOS Press}
}
"""

_DATASETNAME = "uit_viocd"

_DESCRIPTION = """\
The UIT-ViOCD dataset includes 5,485 reviews e-commerce sites across four categories: fashion, cosmetics, applications,
and phones. Each review is annotated by humans, assigning a label of 1 for complaints and 0 for non-complaints.
The dataset is divided into training, validation, and test sets, distributed approximately in an 80:10:10 ratio.
"""

_HOMEPAGE = "https://huggingface.co/datasets/tarudesu/ViOCD"

_LANGUAGES = ["vie"]

_LICENSE = Licenses.UNKNOWN.value

_LOCAL = False

_URLS = {
    "train": "https://huggingface.co/datasets/tarudesu/ViOCD/resolve/main/train.csv?download=true",
    "val": "https://huggingface.co/datasets/tarudesu/ViOCD/resolve/main/val.csv?download=true",
    "test": "https://huggingface.co/datasets/tarudesu/ViOCD/resolve/main/test.csv?download=true",
}

_SUPPORTED_TASKS = [Tasks.COMPLAINT_DETECTION]

_SOURCE_VERSION = "1.0.0"

_SEACROWD_VERSION = "2024.06.20"


class UITVIOCDDataset(datasets.GeneratorBasedBuilder):
    """The UIT-ViOCD dataset includes 5,485 reviews e-commerce sites across four categories: fashion, cosmetics, applications, and phones."""

    SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
    SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)

    LABEL_CLASSES = [1, 0]

    SEACROWD_SCHEMA_NAME = "text"

    BUILDER_CONFIGS = [
        SEACrowdConfig(
            name=f"{_DATASETNAME}_source",
            version=SOURCE_VERSION,
            description=f"{_DATASETNAME} source schema",
            schema="source",
            subset_id=_DATASETNAME,
        ),
        SEACrowdConfig(
            name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
            version=SEACROWD_VERSION,
            description=f"{_DATASETNAME} SEACrowd schema",
            schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
            subset_id=_DATASETNAME,
        ),
    ]

    DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"

    def _info(self) -> datasets.DatasetInfo:

        if self.config.schema == "source":
            features = datasets.Features(
                {
                    "review": datasets.Value("string"),
                    "review_tokenize": datasets.Value("string"),
                    "label": datasets.ClassLabel(names=self.LABEL_CLASSES),
                    "domain": datasets.Value("string"),
                }
            )

        elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
            features = schemas.text_features(self.LABEL_CLASSES)

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
        data_dir = dl_manager.download_and_extract(_URLS)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": data_dir["train"],
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "filepath": data_dir["test"],
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={
                    "filepath": data_dir["val"],
                },
            ),
        ]

    def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
        df = pd.read_csv(filepath)

        if self.config.schema == "source":
            for key, example in df.iterrows():
                yield key, {
                    "review": example["review"],
                    "review_tokenize": example["review_tokenize"],
                    "label": example["label"],
                    "domain": example["domain"],
                }

        elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
            for key, example in df.iterrows():
                yield key, {"id": str(key), "text": str(example["review"]), "label": int(example["label"])}