Datasets:
Tasks:
Text Classification
Modalities:
Text
Sub-tasks:
hate-speech-detection
Size:
10K - 100K
ArXiv:
reader running for all langs
Browse files- offenseval_2020.py +37 -21
offenseval_2020.py
CHANGED
@@ -26,12 +26,6 @@ logger = datasets.logging.get_logger(__name__)
|
|
26 |
|
27 |
|
28 |
_CITATION = """\
|
29 |
-
@inproceedings{zampieri-etal-2020-semeval,
|
30 |
-
title = {{SemEval-2020 Task 12: Multilingual Offensive Language Identification in Social Media (OffensEval 2020)}},
|
31 |
-
author = {Zampieri, Marcos and Nakov, Preslav and Rosenthal, Sara and Atanasova, Pepa and Karadzhov, Georgi and Mubarak, Hamdy and Derczynski, Leon and Pitenis, Zeses and \c{C}\"{o}ltekin, \c{C}a\u{g}r{\i}},
|
32 |
-
booktitle = {Proceedings of SemEval},
|
33 |
-
year = {2020}
|
34 |
-
}
|
35 |
"""
|
36 |
|
37 |
_DESCRIPTION = """\
|
@@ -87,8 +81,9 @@ class OffensEval2020(datasets.GeneratorBasedBuilder):
|
|
87 |
features=datasets.Features(
|
88 |
{
|
89 |
"id": datasets.Value("string"),
|
|
|
90 |
"text": datasets.Value("string"),
|
91 |
-
"
|
92 |
names=[
|
93 |
"OFF",
|
94 |
"NOT",
|
@@ -104,21 +99,42 @@ class OffensEval2020(datasets.GeneratorBasedBuilder):
|
|
104 |
def _split_generators(self, dl_manager):
|
105 |
"""Returns SplitGenerators."""
|
106 |
train_text = dl_manager.download_and_extract(f"offenseval-{self.config.name}-training-v1.tsv")
|
107 |
-
|
108 |
-
|
109 |
|
110 |
return [
|
111 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath":
|
|
|
112 |
]
|
113 |
|
114 |
-
def _generate_examples(self, filepath):
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
instance
|
121 |
-
|
122 |
-
instance.pop(
|
123 |
-
|
124 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
|
28 |
_CITATION = """\
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
"""
|
30 |
|
31 |
_DESCRIPTION = """\
|
|
|
81 |
features=datasets.Features(
|
82 |
{
|
83 |
"id": datasets.Value("string"),
|
84 |
+
"original_id": datasets.Value("string"),
|
85 |
"text": datasets.Value("string"),
|
86 |
+
"subtask_a": datasets.features.ClassLabel(
|
87 |
names=[
|
88 |
"OFF",
|
89 |
"NOT",
|
|
|
99 |
def _split_generators(self, dl_manager):
|
100 |
"""Returns SplitGenerators."""
|
101 |
train_text = dl_manager.download_and_extract(f"offenseval-{self.config.name}-training-v1.tsv")
|
102 |
+
test_labels = dl_manager.download_and_extract(f"offenseval-{self.config.name}-labela-v1.csv")
|
103 |
+
test_text = dl_manager.download_and_extract(f"offenseval-{self.config.name}-test-v1.tsv")
|
104 |
|
105 |
return [
|
106 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_text, "split": 'train'}),
|
107 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": {'labels':test_labels, 'text':test_text}, "split": 'test'}),
|
108 |
]
|
109 |
|
110 |
+
def _generate_examples(self, filepath, split=None):
|
111 |
+
if split == "train":
|
112 |
+
logger.info("⏳ Generating examples from = %s", filepath)
|
113 |
+
with open(filepath, encoding="utf-8") as f:
|
114 |
+
OffensEval2020_reader = csv.DictReader(f, delimiter="\t", quotechar='"')
|
115 |
+
guid = 0
|
116 |
+
for instance in OffensEval2020_reader:
|
117 |
+
instance["text"] = instance.pop("tweet")
|
118 |
+
instance["original_id"] = instance.pop("id")
|
119 |
+
instance["id"] = str(guid)
|
120 |
+
yield guid, instance
|
121 |
+
guid += 1
|
122 |
+
elif split == 'test':
|
123 |
+
logger.info("⏳ Generating examples from = %s", filepath['text'])
|
124 |
+
labeldict = {}
|
125 |
+
with open(filepath['labels']) as labels:
|
126 |
+
for line in labels:
|
127 |
+
line = line.strip().split(',')
|
128 |
+
if line:
|
129 |
+
labeldict[line[0]] = line[1]
|
130 |
+
with open(filepath['text']) as f:
|
131 |
+
OffensEval2020_reader = csv.DictReader(f, delimiter="\t", quotechar='"')
|
132 |
+
guid = 0
|
133 |
+
for instance in OffensEval2020_reader:
|
134 |
+
instance["text"] = instance.pop("tweet")
|
135 |
+
instance["original_id"] = instance.pop("id")
|
136 |
+
instance["id"] = str(guid)
|
137 |
+
instance["subtask_a"] = labeldict[instance["original_id"]]
|
138 |
+
yield guid, instance
|
139 |
+
guid += 1
|
140 |
+
|