File size: 5,100 Bytes
aced5d2
a13b715
 
aced5d2
a13b715
aced5d2
a13b715
aced5d2
 
a13b715
b014534
a13b715
748b31e
b014534
 
1174de2
a13b715
 
aced5d2
 
a13b715
aced5d2
 
 
 
 
 
a13b715
 
aced5d2
 
a13b715
 
aced5d2
 
 
 
 
a13b715
 
 
 
 
 
 
aced5d2
a13b715
aced5d2
a13b715
 
ddd15c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a13b715
 
 
ddd15c1
 
 
 
 
 
a13b715
 
 
aced5d2
a13b715
 
 
 
 
aced5d2
 
b014534
 
aced5d2
a13b715
aced5d2
 
a13b715
aced5d2
 
b014534
 
aced5d2
a13b715
 
ddd15c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
# Loading script for the Telugu-English Codeswitch Transliterate dataset
import datasets

logger = datasets.logging.get_logger(__name__)

_CITATION = """ """

_DESCRIPTION = """Telugu English POS Codeswitch dataset.
               """

_HOMEPAGE = ""

_URL = "https://huggingface.co/datasets/anishka/CodeSwitching-TE-EN/blob/main/"
_TRAINING_FILE = "TWT-train.conllu"
_DEV_FILE = "TWT-dev.conllu"
_TEST_FILE = "TWT-test.conllu"


class TeEnCodeSwitchConfig(datasets.BuilderConfig):
    """ Builder config for the Ancora Ca NER dataset """

    def __init__(self, **kwargs):
        """BuilderConfig for TeEnCodeSwitch.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(TeEnCodeSwitchConfig, self).__init__(**kwargs)


class TeEnCodeSwitch(datasets.GeneratorBasedBuilder):
    """ Te-En-CodeSwitch dataset."""

    BUILDER_CONFIGS = [
        TeEnCodeSwitchConfig(
            name="Te-En-CodeSwitch",
            version=datasets.Version("0.0.1"),
            description="Te-En-CodeSwitch dataset"
        ),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "tokens": datasets.Sequence(datasets.Value("string")),
                    "ner_tags": datasets.Sequence(
                        datasets.features.ClassLabel(
                            names=[
                                "NOUN",
                                "PUNCT",
                                "ADP",
                                "NUM",
                                "SYM",
                                "SCONJ",
                                "ADJ",
                                "PART",
                                "DET",
                                "CCONJ",
                                "PROPN",
                                "PRON",
                                "X",
                                "_",
                                "ADV",
                                "INTJ",
                                "VERB",
                                "AUX",
                            ]
                        )
                    ),
                    "xpos": datasets.Sequence(datasets.Value("string")),
                    "feats": datasets.Sequence(datasets.Value("string")),
                    "head": datasets.Sequence(datasets.Value("string")),
                    "deprel": datasets.Sequence(datasets.Value("string")),
                    "deps": datasets.Sequence(datasets.Value("string")),
                    "misc": datasets.Sequence(datasets.Value("string")),
                }
            ),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        urls_to_download = {
            "train": f"{_URL}{_TRAINING_FILE}",
            "dev": f"{_URL}{_DEV_FILE}",
            "test": f"{_URL}{_TEST_FILE}",
        }
        downloaded_files = dl_manager.download_and_extract(urls_to_download)
        print ("Downloading files: ")
        print (urls_to_download)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
        ]

    def _generate_examples(self, filepath):
        id = 0
        for path in filepath:
            with open(path, "r", encoding="utf-8") as data_file:
                tokenlist = list(conllu.parse_incr(data_file))
                for sent in tokenlist:
                    if "sent_id" in sent.metadata:
                        idx = sent.metadata["sent_id"]
                    else:
                        idx = id

                    tokens = [token["form"] for token in sent]

                    if "text" in sent.metadata:
                        txt = sent.metadata["text"]
                    else:
                        txt = " ".join(tokens)

                    yield id, {
                        "idx": str(idx),
                        "text": txt,
                        "tokens": [token["form"] for token in sent],
                        "lemmas": [token["lemma"] for token in sent],
                        "upos": [token["upos"] for token in sent],
                        "xpos": [token["xpos"] for token in sent],
                        "feats": [str(token["feats"]) for token in sent],
                        "head": [str(token["head"]) for token in sent],
                        "deprel": [str(token["deprel"]) for token in sent],
                        "deps": [str(token["deps"]) for token in sent],
                        "misc": [str(token["misc"]) for token in sent],
                    }
                    id += 1