File size: 2,363 Bytes
33e8a13
 
1d8a000
4e69905
33e8a13
 
 
 
 
 
 
 
 
 
 
36ba043
33e8a13
 
 
fce0648
33e8a13
 
 
 
4e69905
33e8a13
 
 
 
 
 
 
4e69905
33e8a13
4e69905
33e8a13
 
 
 
 
4e69905
33e8a13
 
36ba043
33e8a13
 
1d8a000
33e8a13
 
 
57a8f7a
 
 
 
 
 
 
 
 
 
 
 
 
 
e834dbc
106245e
 
 
1d8a000
c9abf2e
 
 
 
106245e
c9abf2e
 
 
 
 
 
4e69905
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
"""ViHOS - Vietnamese Hate and Offensive Spans dataset"""

import pandas as pd

import datasets

_DESCRIPTION = """\
This is a dataset of Vietnamese Hate and Offensive Spans dataset from social media texts.
"""

_HOMEPAGE = "https://huggingface.co/datasets/phusroyal/ViHOS"

_LICENSE = "mit"

_URLS = [
    "https://huggingface.co/datasets/phusroyal/ViHOS/blob/main/test/test.csv"
]

class ViHOS(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("2.0.0")

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "content": datasets.Value("string"),
                    "index_spans": datasets.Value("string")
                }
            ),
            
            homepage=_HOMEPAGE,
            
            license=_LICENSE
        )

    def _split_generators(self, dl_manager):
        data_dir = dl_manager.download_and_extract(_URLS)
        
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "filepath": data_dir[0],
                    "split": "test",
                },
            )
        ]
    def load_csv(filename):
        data = list()
        # Open file in read mode
        file = open(filename,"r")
        # Reading file
        lines = reader(file)
        csv_reader = reader(file)
        for row in csv_reader:
            if not row:
                continue
            data.append(row)
    
        return data
    
    def _generate_examples(self, filepath, split):
        colnames=['id', 'Content', 'Span ids']
        
        data = pd.read_csv(filepath, names=colnames, header=None, sep=",", on_bad_lines='skip')
        
        # for i in range(len(data)):
        #     id_ = data.loc[i, 'id']
        #     content = data.loc[i, 'Content']
        #     span_ids = data.loc[i, 'Span ids']
    
        #     yield id_, {
        #         "id": id_,
        #         "content": content,
        #         "span_ids": span_ids,
        #     }
        yield data
                
        # data = pd.read_csv(filepath, colnames=colnames, header = None)
        # yield data.iloc[:, 0], dataframe.iloc[:, 1], dataframe.iloc[:, 2]