toy_downstream_tasks_multilabel / nucleotide_transformer_downstream_tasks_multilabel.py
hdallatorre's picture
feat: Add dataset size to info
6a89a07
raw
history blame
5.79 kB
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script
# contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for the dataset containing the "promoter_all" and "enhancers" downstream tasks from the Nucleotide
Transformer paper."""
from typing import List
import datasets
from Bio import SeqIO
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@article{dalla2023nucleotide,
title={The Nucleotide Transformer: Building and Evaluating Robust Foundation Models for Human Genomics},
author={Dalla-Torre, Hugo and Gonzalez, Liam and Mendoza-Revilla, Javier and Carranza, Nicolas Lopez and Grzywaczewski, Adam Henryk and Oteri, Francesco and Dallago, Christian and Trop, Evan and Sirelkhatim, Hassan and Richard, Guillaume and others},
journal={bioRxiv},
pages={2023--01},
year={2023},
publisher={Cold Spring Harbor Laboratory}
}
"""
# You can copy an official description
_DESCRIPTION = """\
Multilabel datasets used in the Nucleotide Transformer paper.
"""
_HOMEPAGE = "https://github.com/instadeepai/nucleotide-transformer"
_LICENSE = "https://github.com/instadeepai/nucleotide-transformer/LICENSE.md"
# The toy_classification and toy_regression are two manually created configurations
# with 5 samples in both the train and test fasta files. It is notably used in order to
# test the scripts.
_TASKS_NUM_LABELS_DTYPE = [
("deepstarr", 6, "float32"),
("toy_classification", 2, "int32"),
("toy_regression", 2, "float32"),
]
_SPLIT_SIZES = {
"deepstarr": {"train": 402034, "test": 41184},
"toy_classification": {"train": 35, "test": 35},
"toy_regression": {"train": 25, "test": 15},
}
class NucleotideTransformerDownstreamTasksConfig(datasets.BuilderConfig):
"""BuilderConfig for The Nucleotide Transformer downstream taks dataset."""
def __init__(
self, *args, task: str, num_labels=int, dtype: str = "int32", **kwargs
):
"""BuilderConfig downstream tasks dataset.
Args:
task (:obj:`str`): Task name.
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(
*args,
name=f"{task}",
**kwargs,
)
self.task = task
self.num_labels = num_labels
self.dtype = dtype
self.split_sizes = _SPLIT_SIZES[task]
class NucleotideTransformerDownstreamTasks(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIG_CLASS = NucleotideTransformerDownstreamTasksConfig
BUILDER_CONFIGS = [
NucleotideTransformerDownstreamTasksConfig(
task=task, num_labels=num_labels, dtype=dtype
)
for (task, num_labels, dtype) in _TASKS_NUM_LABELS_DTYPE
]
DEFAULT_CONFIG_NAME = "deepstarr"
def _info(self):
features_dict = {
"sequence": datasets.Value("string"),
"name": datasets.Value("string"),
}
labels_dict = {
f"label_{i}": datasets.Value(self.config.dtype)
for i in range(self.config.num_labels)
}
features_dict.update(labels_dict)
features = datasets.Features(features_dict)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
# Number of sequences
dataset_size=self.config.split_sizes,
)
def _split_generators(
self, dl_manager: datasets.DownloadManager
) -> List[datasets.SplitGenerator]:
train_file = dl_manager.download_and_extract(self.config.task + "/train.fna")
test_file = dl_manager.download_and_extract(self.config.task + "/test.fna")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"file": train_file}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"file": test_file}
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, file):
key = 0
with open(file, "rt") as f:
fasta_sequences = SeqIO.parse(f, "fasta")
for record in fasta_sequences:
# parse descriptions in the fasta file
sequence, name = str(record.seq), str(record.name)
labels = [float(label) for label in name.split("|")[1:]]
sequence_name_dict = {
"sequence": sequence,
"name": name,
}
labels_dict = {
f"label_{i}": labels[i] for i in range(self.config.num_labels)
}
sequence_name_dict.update(labels_dict)
# yield example
yield key, sequence_name_dict
key += 1