File size: 5,920 Bytes
11ba7dc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7a90f01
9fb2ad1
 
11ba7dc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9fb2ad1
 
11ba7dc
 
 
 
 
7a90f01
9fb2ad1
 
11ba7dc
7a90f01
9fb2ad1
 
 
 
7a90f01
11ba7dc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7a90f01
11ba7dc
 
 
 
 
 
 
7a90f01
9fb2ad1
 
11ba7dc
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
"""HashSet dataset."""

import datasets
import pandas as pd
import json

_CITATION = """

@article{kodali2022hashset,

  title={HashSet--A Dataset For Hashtag Segmentation},

  author={Kodali, Prashant and Bhatnagar, Akshala and Ahuja, Naman and Shrivastava, Manish and Kumaraguru, Ponnurangam},

  journal={arXiv preprint arXiv:2201.06741},

  year={2022}

}

"""

_DESCRIPTION = """

Hashset is a new dataset consisiting on 1.9k manually annotated and 3.3M loosely supervised tweets for testing the 

efficiency of hashtag segmentation models. We compare State of The Art Hashtag Segmentation models on Hashset and other 

baseline datasets (STAN and BOUN). We compare and analyse the results across the datasets to argue that HashSet can act 

as a good benchmark for hashtag segmentation tasks.



HashSet Manual: contains 1.9k manually annotated hashtags. Each row consists of the hashtag, segmented 

hashtag ,named entity annotations, a list storing whether the hashtag contains mix of hindi and english 

tokens and/or contains non-english tokens.

"""
_URL = "https://raw.githubusercontent.com/prashantkodali/HashSet/master/datasets/hashset/HashSet-Manual.csv"

class HashSetManual(datasets.GeneratorBasedBuilder):

    VERSION = datasets.Version("1.0.0")

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "index": datasets.Value("int32"),
                    "hashtag": datasets.Value("string"),
                    "segmentation": datasets.Value("string"),
                    "spans": datasets.Sequence(
                        {
                            "start": datasets.Value("int32"),
                            "end": datasets.Value("int32"),
                            "text": datasets.Value("string")
                        }
                    ),
                    "source": datasets.Value("string"),
                    "gold_position": datasets.Value("int32"),
                    "mix": datasets.Value("bool"),
                    "other": datasets.Value("bool"),
                    "ner": datasets.Value("bool"),
                    "annotator_id": datasets.Value("int32"),
                    "annotation_id": datasets.Value("int32"),
                    "created_at": datasets.Value("timestamp[us]"),
                    "updated_at": datasets.Value("timestamp[us]"),
                    "lead_time": datasets.Value("float64"),
                    "rank": datasets.Sequence(
                        {
                            "position": datasets.Value("int32"),
                            "candidate": datasets.Value("string")
                        }
                    )
                }
            ),
            supervised_keys=None,
            homepage="https://github.com/prashantkodali/HashSet/",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        downloaded_files = dl_manager.download(_URL)
        return [
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files }),
        ]

    def _generate_examples(self, filepath):

        def read_language_labels(field):
            mix_label = "Hashtag has a mix of english and hindi tokens"
            other_label = "Hashtag has non english token "
            ner_label = "Hashtag has named entities"
            try:
                record = json.loads(field)
            except json.decoder.JSONDecodeError:
                record = {"choices": [field]}
            
            mix = False
            other = False
            ner = False
            if mix_label in record["choices"]:
                mix = True
            if other_label in record["choices"]:
                other = True
            if ner_label in record["choices"]:
                ner = True
            return mix, other, ner

        def read_entities(field):
            try:
                record = json.loads(field)
            except json.decoder.JSONDecodeError:
                return []
            output = []
            for row in record:
                output.append({
                    "start": row.get("start", None),
                    "end": row.get("end", None),
                    "text": row.get("text", None)
                })
            return output

        def read_rank(row):
            output = []
            for i in range(10):
                output.append({
                    "position": str(i+1),
                    "candidate": row[str(i+1)]
                })
            return output

        def get_gold_position(field):
            output = field.strip("$")
            try:
                return int(output)
            except ValueError:
                return None

        records = pd.read_csv(filepath).to_dict("records")
        for idx, row in enumerate(records):
            mix, other, ner = read_language_labels(row["mutlitoken"])
            yield idx, {
                    "index": row["Unnamed: 0"],
                    "hashtag": row["Hashtag"],
                    "segmentation": row["Final Segmentation"],
                    "spans": read_entities(row["charner"]),
                    "source": row["Source"],
                    "gold_position": get_gold_position(row["topk"]),
                    "mix": mix,
                    "other": other,
                    "ner": ner,
                    "annotator_id": int(row["annotator"]),
                    "annotation_id": int(row["annotation_id"]),
                    "created_at": row["created_at"],
                    "updated_at": row["updated_at"],
                    "lead_time": row["lead_time"],
                    "rank": read_rank(row)
                }