toy_downstream_tasks_multilabel / toy_downstream_tasks_multilabel.py
hdallatorre's picture
feat: change labels are handled
06953a4
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script
# contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for the dataset containing the "promoter_all" and "enhancers" downstream tasks from the Nucleotide
Transformer paper."""
from typing import List
import datasets
from Bio import SeqIO
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@article{dalla2023nucleotide,
title={The Nucleotide Transformer: Building and Evaluating Robust Foundation Models for Human Genomics},
author={Dalla-Torre, Hugo and Gonzalez, Liam and Mendoza-Revilla, Javier and Carranza, Nicolas Lopez and Grzywaczewski, Adam Henryk and Oteri, Francesco and Dallago, Christian and Trop, Evan and Sirelkhatim, Hassan and Richard, Guillaume and others},
journal={bioRxiv},
pages={2023--01},
year={2023},
publisher={Cold Spring Harbor Laboratory}
}
"""
# You can copy an official description
_DESCRIPTION = """\
Multilabel datasets used in the Nucleotide Transformer paper.
"""
_HOMEPAGE = "https://github.com/instadeepai/nucleotide-transformer"
_LICENSE = "https://github.com/instadeepai/nucleotide-transformer/LICENSE.md"
# The toy_classification and toy_regression are two manually created configurations
# with 5 samples in both the train and test fasta files. It is notably used in order to
# test the scripts.
_TASKS_NUM_LABELS_DTYPE = [
("toy_classification", 2, "int32"),
("toy_regression", 2, "float32"),
]
_TASK_NAMES = [
"toy_classification",
"toy_regression",
]
_TASK_INFO = {
"toy_classification": {"type": "binary"},
"toy_regression": {"type": "regression"},
}
class NucleotideTransformerDownstreamTasksConfig(datasets.BuilderConfig):
"""BuilderConfig for The Nucleotide Transformer downstream taks dataset."""
def __init__(self, *args, task: str, **kwargs):
"""BuilderConfig downstream tasks dataset.
Args:
task (:obj:`str`): Task name.
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(
*args,
name=f"{task}",
**kwargs,
)
self.task = task
self.task_type = _TASK_INFO[self.task]["type"]
class NucleotideTransformerDownstreamTasks(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIG_CLASS = NucleotideTransformerDownstreamTasksConfig
BUILDER_CONFIGS = [
NucleotideTransformerDownstreamTasksConfig(task=task) for task in _TASK_NAMES
]
DEFAULT_CONFIG_NAME = "toy_classification"
def _info(self):
feature_dit = {
"sequence": datasets.Value("string"),
"name": datasets.Value("string"),
}
if self.config.task_type == "regression":
feature_dit["labels"] = [datasets.Value("float32")]
elif self.config.task_type == "binary":
feature_dit["labels"] = [datasets.Value("int8")]
features = datasets.Features(feature_dit)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(
self, dl_manager: datasets.DownloadManager
) -> List[datasets.SplitGenerator]:
train_file = dl_manager.download_and_extract(self.config.task + "/train.fna")
test_file = dl_manager.download_and_extract(self.config.task + "/test.fna")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"file": train_file}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"file": test_file}
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, file):
with open(file, "r") as f:
key = 0
for record in SeqIO.parse(f, "fasta"):
# Yields examples as (key, example) tuples
split_name = record.name.split("|")
name = split_name[0]
labels = split_name[1:]
yield key, {"sequence": str(record.seq), "name": name, "labels": labels}
key += 1