# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script # contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Script for the dataset containing tasks for genome origin detection.""" from typing import List import datasets # Custom function to parse lines in the dataset def parse_text(fp): for line in fp: line = line.strip() if not line: # Skip empty lines continue seq, label = line.split(",") yield seq, label # Dataset metadata _CITATION = """TBD""" _DESCRIPTION = """TBD""" _HOMEPAGE = "TBD" _LICENSE = "TBD" # Tasks available in the dataset _TASKS = [ "four_kingdoms", "plasmid_detection", ] class GenomeOriginTasksConfig(datasets.BuilderConfig): """BuilderConfig for Genome Origin tasks dataset.""" def __init__(self, *args, task: str, **kwargs): """ BuilderConfig for Genome Origin tasks dataset. Args: task (str): Task name. **kwargs: Additional keyword arguments forwarded to super. """ super().__init__( *args, name=f"{task}", **kwargs, ) self.task = task class GenomeOriginTasks(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.1.0") BUILDER_CONFIG_CLASS = GenomeOriginTasksConfig BUILDER_CONFIGS = [GenomeOriginTasksConfig(task=task) for task in _TASKS] DEFAULT_CONFIG_NAME = "four_kingdoms" def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "sequence": datasets.Value("string"), "label": datasets.Value("int32"), } ), homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: """Returns SplitGenerators.""" train_file = dl_manager.download_and_extract(self.config.task + "/train.txt") test_file = dl_manager.download_and_extract(self.config.task + "/test.txt") return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"file_path": train_file} ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"file_path": test_file} ), ] def _generate_examples(self, file_path): """Generates examples from a given file.""" key = 0 with open(file_path, "r") as f: for seq, label in parse_text(f): yield key, { "sequence": seq.upper(), # Ensure sequences are uppercase "label": int(label), # Convert label to integer } key += 1