# coding=utf-8 # Copyright 2020 HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """The Open Threat dataset""" import datasets logger = datasets.logging.get_logger(__name__) _DESCRIPTION = """\ TBD """ _CITATION = """\ TBD """ _URL = "https://huggingface.co/datasets/priamai/openthreatner/raw/main/conll/" _TRAINING_FILE = "text_32.conll" _DEV_FILE = "text_23.conll" _TEST_FILE = "text_56.conll" class OurDatasetConfig(datasets.BuilderConfig): """The Open NER dataset.""" def __init__(self, **kwargs): """BuilderConfig for Open Threat dataset. Args: **kwargs: keyword arguments forwarded to super. """ super(OurDatasetConfig, self).__init__(**kwargs) class OurDataset(datasets.GeneratorBasedBuilder): """The Open NER dataset Entities Dataset.""" BUILDER_CONFIGS = [ OurDatasetConfig( name="Open Threat", version=datasets.Version("1.0.0"), description="The Open Cyber Threat Entities Dataset" ), ] def _info(self): our_names = [ "O", "B-date", "I-date", "B-time", "I-time", "B-geo_location", "I-geo_location", "B-organization", "I-organization", "B-sector", "I-sector", "B-threat_actor", "I-threat_actor", "B-exploit_name", "I-exploit_name", "B-malware", "I-malware", "B-os", "I-os", "B-software", "I-software", "B-hardware", "I-hardware", "B-username", "I-username", "B-ttp", "I-ttp", "B-code_cmd", "I-code_cmd", "B-classification", "I-classification", ] logger.info("Total names = %d" % len(our_names)) dinfo = datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "tokens": datasets.Sequence(datasets.Value("string")), "ner_tags": datasets.Sequence( datasets.features.ClassLabel( names= list(map(str.upper,our_names)) ) ), } ), supervised_keys=None, homepage="https://test.cti.tools/", citation=_CITATION, ) logger.info(dinfo) return dinfo def _split_generators(self, dl_manager): """Returns SplitGenerators.""" urls_to_download = { "train": f"{_URL}{_TRAINING_FILE}", "dev": f"{_URL}{_DEV_FILE}", "test": f"{_URL}{_TEST_FILE}", } downloaded_files = dl_manager.download_and_extract(urls_to_download) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}), ] def _generate_examples(self, filepath): logger.info("⏳ Generating examples from = %s", filepath) with open(filepath, encoding="utf-8") as f: current_tokens = [] current_labels = [] sentence_counter = 0 for row in f: row = row.rstrip() if row: token, label = row.split("\t") current_tokens.append(token) current_labels.append(label) else: # New sentence if not current_tokens: # Consecutive empty lines will cause empty sentences continue assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels" sentence = ( sentence_counter, { "id": str(sentence_counter), "tokens": current_tokens, "ner_tags": current_labels, }, ) sentence_counter += 1 current_tokens = [] current_labels = [] yield sentence # Don't forget last sentence in dataset 🧐 if current_tokens: yield sentence_counter, { "id": str(sentence_counter), "tokens": current_tokens, "ner_tags": current_labels, }