Datasets:
ruanchaves
commited on
Commit
·
9fb2ad1
1
Parent(s):
448569e
add other, ner fields
Browse files- hashset_manual.py +16 -9
hashset_manual.py
CHANGED
@@ -48,6 +48,8 @@ class HashSetManual(datasets.GeneratorBasedBuilder):
|
|
48 |
"gold_position": datasets.Value("int32"),
|
49 |
"english": datasets.Value("bool"),
|
50 |
"hindi": datasets.Value("bool"),
|
|
|
|
|
51 |
"annotator_id": datasets.Value("int32"),
|
52 |
"annotation_id": datasets.Value("int32"),
|
53 |
"created_at": datasets.Value("timestamp[us]"),
|
@@ -76,22 +78,25 @@ class HashSetManual(datasets.GeneratorBasedBuilder):
|
|
76 |
|
77 |
def read_language_labels(field):
|
78 |
mix_label = "Hashtag has a mix of english and hindi tokens"
|
79 |
-
|
|
|
80 |
try:
|
81 |
record = json.loads(field)
|
82 |
except json.decoder.JSONDecodeError:
|
83 |
record = {"choices": [field]}
|
84 |
|
|
|
|
|
|
|
|
|
85 |
if mix_label in record["choices"]:
|
86 |
english = True
|
87 |
hindi = True
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
hindi = False
|
94 |
-
return english, hindi
|
95 |
|
96 |
def read_entities(field):
|
97 |
try:
|
@@ -125,7 +130,7 @@ class HashSetManual(datasets.GeneratorBasedBuilder):
|
|
125 |
|
126 |
records = pd.read_csv(filepath).to_dict("records")
|
127 |
for idx, row in enumerate(records):
|
128 |
-
english, hindi = read_language_labels(row["mutlitoken"])
|
129 |
yield idx, {
|
130 |
"index": row["Unnamed: 0"],
|
131 |
"hashtag": row["Hashtag"],
|
@@ -135,6 +140,8 @@ class HashSetManual(datasets.GeneratorBasedBuilder):
|
|
135 |
"gold_position": get_gold_position(row["topk"]),
|
136 |
"english": english,
|
137 |
"hindi": hindi,
|
|
|
|
|
138 |
"annotator_id": int(row["annotator"]),
|
139 |
"annotation_id": int(row["annotation_id"]),
|
140 |
"created_at": row["created_at"],
|
|
|
48 |
"gold_position": datasets.Value("int32"),
|
49 |
"english": datasets.Value("bool"),
|
50 |
"hindi": datasets.Value("bool"),
|
51 |
+
"other": datasets.Value("bool"),
|
52 |
+
"ner": datasets.Value("bool"),
|
53 |
"annotator_id": datasets.Value("int32"),
|
54 |
"annotation_id": datasets.Value("int32"),
|
55 |
"created_at": datasets.Value("timestamp[us]"),
|
|
|
78 |
|
79 |
def read_language_labels(field):
|
80 |
mix_label = "Hashtag has a mix of english and hindi tokens"
|
81 |
+
other_label = "Hashtag has non english token "
|
82 |
+
ner_label = "Hashtag has named entities"
|
83 |
try:
|
84 |
record = json.loads(field)
|
85 |
except json.decoder.JSONDecodeError:
|
86 |
record = {"choices": [field]}
|
87 |
|
88 |
+
english = False
|
89 |
+
hindi = False
|
90 |
+
other = False
|
91 |
+
ner = False
|
92 |
if mix_label in record["choices"]:
|
93 |
english = True
|
94 |
hindi = True
|
95 |
+
if other_label in record["choices"]:
|
96 |
+
other = True
|
97 |
+
if ner_label in record["choices"]:
|
98 |
+
ner = True
|
99 |
+
return english, hindi, other, ner
|
|
|
|
|
100 |
|
101 |
def read_entities(field):
|
102 |
try:
|
|
|
130 |
|
131 |
records = pd.read_csv(filepath).to_dict("records")
|
132 |
for idx, row in enumerate(records):
|
133 |
+
english, hindi, other, ner = read_language_labels(row["mutlitoken"])
|
134 |
yield idx, {
|
135 |
"index": row["Unnamed: 0"],
|
136 |
"hashtag": row["Hashtag"],
|
|
|
140 |
"gold_position": get_gold_position(row["topk"]),
|
141 |
"english": english,
|
142 |
"hindi": hindi,
|
143 |
+
"other": other,
|
144 |
+
"ner": ner,
|
145 |
"annotator_id": int(row["annotator"]),
|
146 |
"annotation_id": int(row["annotation_id"]),
|
147 |
"created_at": row["created_at"],
|