File size: 4,482 Bytes
f80bebc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e08abd6
f80bebc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104

from datasets import Value, ClassLabel,Sequence
import datasets


_SNAP10K_CITATION = """\

"""

_SNAP10K_DESCRIPTION = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""

class SNAP10KConfig(datasets.BuilderConfig):

    def __init__(
        self,
        text_features,
        label_column,
        data_url,
        data_dir,
        citation,
        url,
        label_classes=None,
        process_label=lambda x: x,
        **kwargs,
    ):
        super(SNAP10KConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
        
        self.text_features = text_features
        self.label_column = label_column
        self.label_classes = label_classes
        self.data_url = data_url
        self.data_dir = data_dir
        self.citation = citation
        self.url = url
        self.process_label = process_label

class SNAP10K(datasets.GeneratorBasedBuilder):
    domain_list = ['Automotive_5', 'Electronics_5', 'Industrial_and_Scientific_5', 'Kindle_Store_5', 'Cell_Phones_and_Accessories_5', 'Musical_Instruments_5', 'Office_Products_5', 'Patio_Lawn_and_Garden_5', 'Sports_and_Outdoors_5', 'Luxury_Beauty_5', 'Grocery_and_Gourmet_Food_5', 'Digital_Music_5', 'Tools_and_Home_Improvement_5', 'Pet_Supplies_5', 'Prime_Pantry_5', 'Toys_and_Games_5', 'Movies_and_TV_5', 'Home_and_Kitchen_5', 'Arts_Crafts_and_Sewing_5', 'Video_Games_5', 'CDs_and_Vinyl_5']

    BUILDER_CONFIGS  = [
        SNAP10KConfig(name=domain_name, 
                    description= f'comments of {domain_name}.', 
                    text_features={'sentence':'sentence', 'domain':'domain'}, 
                    label_classes=['POS','NEG'], 
                    label_column='label',
                    citation="",
                    data_dir= "",
                    data_url = r"https://huggingface.co/datasets/kuroneko3578/snap21/resolve/main/data.7z",
                    url='https://github.com/ws719547997/LNB-DA')
        for domain_name in domain_list
    ]

    def _info(self):
        features = {'id':Value(dtype='int32', id=None),
            'domain':Value(dtype='string', id=None),
            'label':ClassLabel(num_classes=2, names=['POS', 'NEG'], names_file=None, id=None),
            'rank':Value(dtype='string', id=None),
            'sentence':Value(dtype='string', id=None)}

        return datasets.DatasetInfo(
            description=_SNAP10K_DESCRIPTION,
            features=datasets.Features(features),
            homepage=self.config.url,
            citation=self.config.citation + "\n" + _SNAP10K_CITATION,
        )

    def _split_generators(self, dl_manager):
        downloaded_file = dl_manager.download_and_extract(self.config.data_url)
        print(downloaded_file)

        test_file = rf'{downloaded_file}\data\test\{self.config.name}.txt'
        dev_file = rf'{downloaded_file}\data\dev\{self.config.name}.txt'
        train_file = rf'{downloaded_file}\data\train\{self.config.name}.txt'
        return [datasets.SplitGenerator(name=datasets.Split.TEST,
                                        gen_kwargs={
                                            "data_file": test_file,
                                            "split": "test",
                                        },), 
                datasets.SplitGenerator(name=datasets.Split.VALIDATION,
                                        gen_kwargs={
                                            "data_file": dev_file,
                                            "split": "dev",
                                        },), 
                datasets.SplitGenerator(name=datasets.Split.TRAIN,
                                        gen_kwargs={
                                            "data_file": train_file,
                                            "split": "train",
                                        },)]

    def _generate_examples(self, data_file, split):
        with open(data_file, 'r', encoding='utf-8') as f:
            for line in f:
                lin = line.strip()
                if not lin:
                    continue
                lin_sp = lin.split('\t')
                if len(lin_sp) < 5:
                    continue
                # id, {example}
                yield lin_sp[0], {'sentence':lin_sp[4],'domain':lin_sp[1], 'label':lin_sp[2], 'id':lin_sp[0], 'rank':lin_sp[3]}