File size: 2,609 Bytes
7f2eb9f
cbccf14
a0a398d
0f86d75
4747b67
821db02
1699be0
 
0f86d75
 
cbccf14
 
0f86d75
 
cbccf14
 
 
4747b67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cbccf14
 
 
afe164d
821db02
cbccf14
afe164d
cbccf14
 
 
 
 
 
 
0f86d75
cbccf14
 
 
0f86d75
 
 
 
 
 
 
cbccf14
 
0f86d75
 
 
 
 
cbccf14
 
 
 
 
 
0f86d75
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import os
import xml.etree.ElementTree as ET
import datasets
from datasets import GeneratorBasedBuilder, DatasetInfo, Split, SplitGenerator, Features, Value, Sequence

_BASE_URL = "https://drive.google.com/uc?export=download&id=15YklLKAaNXomOAkEAMmLAIIFhS9o-ywS"


class UzABSA(GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name="uzabsa", version=VERSION,
                               description="UZABSA dataset for sentiment analysis in Uzbek"),
    ]

    def _info(self):
        return DatasetInfo(
            features=Features({
                "sentence_id": Value("string"),
                "text": Value("string"),
                "aspect_terms": Sequence({
                    "term": Value("string"),
                    "polarity": Value("string"),
                    "from": Value("int32"),
                    "to": Value("int32"),
                }),
                "aspect_categories": Sequence({
                    "category": Value("string"),
                    "polarity": Value("string"),
                }),
            })
        )

    def _split_generators(self, dl_manager):
        # Use the dl_manager to download and cache the data
        downloaded_file = dl_manager.download_and_extract(_BASE_URL)
        return [
            SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepath": downloaded_file}),
        ]

    def _generate_examples(self, filepath):
        tree = ET.parse(filepath)
        root = tree.getroot()

        for sentence in root.findall("sentence"):
            sentence_id = sentence.get("ID")
            text = sentence.find("text").text

            aspect_terms = []
            for aspect_term in sentence.findall("./aspectTerms/aspectTerm"):
                aspect_terms.append({
                    "term": aspect_term.get("term"),
                    "polarity": aspect_term.get("polarity"),
                    "from": int(aspect_term.get("from")),
                    "to": int(aspect_term.get("to")),
                })

            aspect_categories = []
            for aspect_category in sentence.findall("./aspectCategories/aspectCategory"):
                aspect_categories.append({
                    "category": aspect_category.get("category"),
                    "polarity": aspect_category.get("polarity"),
                })

            yield sentence_id, {
                "sentence_id": sentence_id,
                "text": text,
                "aspect_terms": aspect_terms,
                "aspect_categories": aspect_categories,
            }