priamai commited on
Commit
8b8195b
1 Parent(s): 8543b6e

Update openthreatner.py

Browse files
Files changed (1) hide show
  1. openthreatner.py +164 -145
openthreatner.py CHANGED
@@ -1,146 +1,165 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """The Open Threat dataset"""
18
-
19
-
20
- import datasets
21
-
22
-
23
- logger = datasets.logging.get_logger(__name__)
24
-
25
- _DESCRIPTION = """\
26
- TBD
27
- """
28
-
29
- _CITATION = """\
30
- TBD
31
- """
32
-
33
-
34
- # replace this url with our repository or the backend url
35
- _URL = "https://raw.githubusercontent.com/leondz/emerging_entities_17/master/"
36
- _TRAINING_FILE = "wnut17train.conll"
37
- _DEV_FILE = "emerging.dev.conll"
38
- _TEST_FILE = "emerging.test.annotated"
39
-
40
- class OurDatasetConfig(datasets.BuilderConfig):
41
- """The Open NER dataset."""
42
-
43
- def __init__(self, **kwargs):
44
- """BuilderConfig for Open Threat dataset.
45
- Args:
46
- **kwargs: keyword arguments forwarded to super.
47
- """
48
- super(OurDatasetConfig, self).__init__(**kwargs)
49
-
50
-
51
- class OurDataset(datasets.GeneratorBasedBuilder):
52
- """The Open NER dataset Entities Dataset."""
53
-
54
- BUILDER_CONFIGS = [
55
- OurDatasetConfig(
56
- name="Open Threat", version=datasets.Version("1.0.0"), description="The Open Cyber Threat Entities Dataset"
57
- ),
58
- ]
59
-
60
- def _info(self):
61
- return datasets.DatasetInfo(
62
- description=_DESCRIPTION,
63
- features=datasets.Features(
64
- {
65
- "id": datasets.Value("string"),
66
- "tokens": datasets.Sequence(datasets.Value("string")),
67
- "ner_tags": datasets.Sequence(
68
- #TODO here: replace with the apprioriate list
69
- datasets.features.ClassLabel(
70
- names=[
71
- "O",
72
- "B-corporation",
73
- "I-corporation",
74
- "B-creative-work",
75
- "I-creative-work",
76
- "B-group",
77
- "I-group",
78
- "B-location",
79
- "I-location",
80
- "B-person",
81
- "I-person",
82
- "B-product",
83
- "I-product",
84
- ]
85
- )
86
- ),
87
- }
88
- ),
89
- supervised_keys=None,
90
- #TODO replace here with our website
91
- homepage="http://noisy-text.github.io/2017/emerging-rare-entities.html",
92
- citation=_CITATION,
93
- )
94
-
95
- def _split_generators(self, dl_manager):
96
- """Returns SplitGenerators."""
97
- urls_to_download = {
98
- "train": f"{_URL}{_TRAINING_FILE}",
99
- "dev": f"{_URL}{_DEV_FILE}",
100
- "test": f"{_URL}{_TEST_FILE}",
101
- }
102
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
103
-
104
- return [
105
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
106
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
107
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
108
- ]
109
-
110
- def _generate_examples(self, filepath):
111
- logger.info("⏳ Generating examples from = %s", filepath)
112
- with open(filepath, encoding="utf-8") as f:
113
- current_tokens = []
114
- current_labels = []
115
- sentence_counter = 0
116
- for row in f:
117
- row = row.rstrip()
118
- if row:
119
- token, label = row.split("\t")
120
- current_tokens.append(token)
121
- current_labels.append(label)
122
- else:
123
- # New sentence
124
- if not current_tokens:
125
- # Consecutive empty lines will cause empty sentences
126
- continue
127
- assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
128
- sentence = (
129
- sentence_counter,
130
- {
131
- "id": str(sentence_counter),
132
- "tokens": current_tokens,
133
- "ner_tags": current_labels,
134
- },
135
- )
136
- sentence_counter += 1
137
- current_tokens = []
138
- current_labels = []
139
- yield sentence
140
- # Don't forget last sentence in dataset 🧐
141
- if current_tokens:
142
- yield sentence_counter, {
143
- "id": str(sentence_counter),
144
- "tokens": current_tokens,
145
- "ner_tags": current_labels,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  }
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """The Open Threat dataset"""
18
+
19
+
20
+ import datasets
21
+
22
+
23
+ logger = datasets.logging.get_logger(__name__)
24
+
25
+ _DESCRIPTION = """\
26
+ TBD
27
+ """
28
+
29
+ _CITATION = """\
30
+ TBD
31
+ """
32
+
33
+
34
+ _URL = "https://huggingface.co/datasets/priamai/openthreatner/raw/main/conll/"
35
+ _TRAINING_FILE = "text_32.conll"
36
+ _DEV_FILE = "text_23.conll"
37
+ _TEST_FILE = "text_1.conll"
38
+
39
+ class OurDatasetConfig(datasets.BuilderConfig):
40
+ """The Open NER dataset."""
41
+
42
+ def __init__(self, **kwargs):
43
+ """BuilderConfig for Open Threat dataset.
44
+ Args:
45
+ **kwargs: keyword arguments forwarded to super.
46
+ """
47
+ super(OurDatasetConfig, self).__init__(**kwargs)
48
+
49
+
50
+ class OurDataset(datasets.GeneratorBasedBuilder):
51
+ """The Open NER dataset Entities Dataset."""
52
+
53
+ BUILDER_CONFIGS = [
54
+ OurDatasetConfig(
55
+ name="Open Threat", version=datasets.Version("1.0.0"), description="The Open Cyber Threat Entities Dataset"
56
+ ),
57
+ ]
58
+
59
+ def _info(self):
60
+
61
+ names = names = [
62
+ "O",
63
+ "B-date",
64
+ "I-date",
65
+ "B-time",
66
+ "I-time",
67
+ "B-geo_location",
68
+ "I-geo_location",
69
+ "B-organization",
70
+ "I-organization",
71
+ "B-sector",
72
+ "I-sector",
73
+ "B-threat_actor",
74
+ "I-threat_actor",
75
+ "B-exploit_name",
76
+ "I-exploit_name",
77
+ "B-malware",
78
+ "I-malware",
79
+ "B-os",
80
+ "I-os",
81
+ "B-software",
82
+ "I-software",
83
+ "B-hardware",
84
+ "I-hardware",
85
+ "B-username",
86
+ "I-username",
87
+ "B-ttp",
88
+ "I-ttp",
89
+ "B-code_cmd",
90
+ "I-code_cmd",
91
+ "B-classification",
92
+ "I-classification",
93
+ ]
94
+
95
+ return datasets.DatasetInfo(
96
+ description=_DESCRIPTION,
97
+ features=datasets.Features(
98
+ {
99
+ "id": datasets.Value("string"),
100
+ "tokens": datasets.Sequence(datasets.Value("string")),
101
+ "ner_tags": datasets.Sequence(
102
+ #TODO here: replace with the apprioriate list
103
+ datasets.features.ClassLabel(
104
+ names= list(map(str.lower,names))
105
+ )
106
+ ),
107
+ }
108
+ ),
109
+ supervised_keys=None,
110
+ homepage="https://test.cti.tools/",
111
+ citation=_CITATION,
112
+ )
113
+
114
+ def _split_generators(self, dl_manager):
115
+ """Returns SplitGenerators."""
116
+ urls_to_download = {
117
+ "train": f"{_URL}{_TRAINING_FILE}",
118
+ "dev": f"{_URL}{_DEV_FILE}",
119
+ "test": f"{_URL}{_TEST_FILE}",
120
+ }
121
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
122
+
123
+ return [
124
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
125
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
126
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
127
+ ]
128
+
129
+ def _generate_examples(self, filepath):
130
+ logger.info("⏳ Generating examples from = %s", filepath)
131
+ with open(filepath, encoding="utf-8") as f:
132
+ current_tokens = []
133
+ current_labels = []
134
+ sentence_counter = 0
135
+ for row in f:
136
+ row = row.rstrip()
137
+ if row:
138
+ token, label = row.split("\t")
139
+ current_tokens.append(token)
140
+ current_labels.append(label)
141
+ else:
142
+ # New sentence
143
+ if not current_tokens:
144
+ # Consecutive empty lines will cause empty sentences
145
+ continue
146
+ assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
147
+ sentence = (
148
+ sentence_counter,
149
+ {
150
+ "id": str(sentence_counter),
151
+ "tokens": current_tokens,
152
+ "ner_tags": current_labels,
153
+ },
154
+ )
155
+ sentence_counter += 1
156
+ current_tokens = []
157
+ current_labels = []
158
+ yield sentence
159
+ # Don't forget last sentence in dataset 🧐
160
+ if current_tokens:
161
+ yield sentence_counter, {
162
+ "id": str(sentence_counter),
163
+ "tokens": current_tokens,
164
+ "ner_tags": current_labels,
165
  }