ruanchaves commited on
Commit
11ba7dc
·
1 Parent(s): 48f6547

Upload hashset_manual.py

Browse files
Files changed (1) hide show
  1. hashset_manual.py +144 -0
hashset_manual.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """HashSet dataset."""
2
+
3
+ import datasets
4
+ import pandas as pd
5
+ import json
6
+
7
+ _CITATION = """
8
+ @article{kodali2022hashset,
9
+ title={HashSet--A Dataset For Hashtag Segmentation},
10
+ author={Kodali, Prashant and Bhatnagar, Akshala and Ahuja, Naman and Shrivastava, Manish and Kumaraguru, Ponnurangam},
11
+ journal={arXiv preprint arXiv:2201.06741},
12
+ year={2022}
13
+ }
14
+ """
15
+
16
+ _DESCRIPTION = """
17
+ Hashset is a new dataset consisiting on 1.9k manually annotated and 3.3M loosely supervised tweets for testing the
18
+ efficiency of hashtag segmentation models. We compare State of The Art Hashtag Segmentation models on Hashset and other
19
+ baseline datasets (STAN and BOUN). We compare and analyse the results across the datasets to argue that HashSet can act
20
+ as a good benchmark for hashtag segmentation tasks.
21
+
22
+ HashSet Manual: contains 1.9k manually annotated hashtags. Each row consists of the hashtag, segmented
23
+ hashtag ,named entity annotations, a list storing whether the hashtag contains mix of hindi and english
24
+ tokens and/or contains non-english tokens.
25
+ """
26
+ _URL = "https://raw.githubusercontent.com/prashantkodali/HashSet/master/datasets/hashset/HashSet-Manual.csv"
27
+
28
+ class HashSetManual(datasets.GeneratorBasedBuilder):
29
+
30
+ VERSION = datasets.Version("1.0.0")
31
+
32
+ def _info(self):
33
+ return datasets.DatasetInfo(
34
+ description=_DESCRIPTION,
35
+ features=datasets.Features(
36
+ {
37
+ "index": datasets.Value("int32"),
38
+ "hashtag": datasets.Value("string"),
39
+ "segmentation": datasets.Value("string"),
40
+ "spans": datasets.Sequence(
41
+ {
42
+ "start": datasets.Value("int32"),
43
+ "end": datasets.Value("int32"),
44
+ "text": datasets.Value("string")
45
+ }
46
+ ),
47
+ "source": datasets.Value("string"),
48
+ "gold_position": datasets.Value("int32"),
49
+ "english": datasets.Value("bool"),
50
+ "hindi": datasets.Value("bool"),
51
+ "annotator_id": datasets.Value("int32"),
52
+ "annotation_id": datasets.Value("int32"),
53
+ "created_at": datasets.Value("timestamp[us]"),
54
+ "updated_at": datasets.Value("timestamp[us]"),
55
+ "lead_time": datasets.Value("float64"),
56
+ "rank": datasets.Sequence(
57
+ {
58
+ "position": datasets.Value("int32"),
59
+ "candidate": datasets.Value("string")
60
+ }
61
+ )
62
+ }
63
+ ),
64
+ supervised_keys=None,
65
+ homepage="https://github.com/prashantkodali/HashSet/",
66
+ citation=_CITATION,
67
+ )
68
+
69
+ def _split_generators(self, dl_manager):
70
+ downloaded_files = dl_manager.download(_URL)
71
+ return [
72
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files }),
73
+ ]
74
+
75
+ def _generate_examples(self, filepath):
76
+
77
+ def read_language_labels(field):
78
+ mix_label = "Hashtag has a mix of english and hindi tokens"
79
+ hindi_label = "Hashtag has non english token "
80
+ try:
81
+ record = json.loads(field)
82
+ except json.decoder.JSONDecodeError:
83
+ record = {"choices": [field]}
84
+
85
+ if mix_label in record["choices"]:
86
+ english = True
87
+ hindi = True
88
+ elif hindi_label in record["choices"]:
89
+ english = False
90
+ hindi = True
91
+ else:
92
+ english = True
93
+ hindi = False
94
+ return english, hindi
95
+
96
+ def read_entities(field):
97
+ try:
98
+ record = json.loads(field)
99
+ except json.decoder.JSONDecodeError:
100
+ return []
101
+ output = []
102
+ for row in record:
103
+ output.append({
104
+ "start": row.get("start", None),
105
+ "end": row.get("end", None),
106
+ "text": row.get("text", None)
107
+ })
108
+ return output
109
+
110
+ def read_rank(row):
111
+ output = []
112
+ for i in range(10):
113
+ output.append({
114
+ "position": str(i+1),
115
+ "candidate": row[str(i+1)]
116
+ })
117
+ return output
118
+
119
+ def get_gold_position(field):
120
+ output = field.strip("$")
121
+ try:
122
+ return int(output)
123
+ except ValueError:
124
+ return None
125
+
126
+ records = pd.read_csv(filepath).to_dict("records")
127
+ for idx, row in enumerate(records):
128
+ english, hindi = read_language_labels(row["mutlitoken"])
129
+ yield idx, {
130
+ "index": row["Unnamed: 0"],
131
+ "hashtag": row["Hashtag"],
132
+ "segmentation": row["Final Segmentation"],
133
+ "spans": read_entities(row["charner"]),
134
+ "source": row["Source"],
135
+ "gold_position": get_gold_position(row["topk"]),
136
+ "english": english,
137
+ "hindi": hindi,
138
+ "annotator_id": int(row["annotator"]),
139
+ "annotation_id": int(row["annotation_id"]),
140
+ "created_at": row["created_at"],
141
+ "updated_at": row["updated_at"],
142
+ "lead_time": row["lead_time"],
143
+ "rank": read_rank(row)
144
+ }