GUE_public / GUE.py
fm4bio-ning's picture
Upload GUE.py
dd5706a verified
raw
history blame
3.24 kB
"""Script for the dataset containing the 28 downstream tasks from the DNABertv2 paper."""
from typing import List
import csv
import datasets
# This function is a basic reimplementation of SeqIO's parse method. This allows the
# dataset viewer to work as it does not require an external package.
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = ''
# You can copy an official description
_DESCRIPTION = ''
_HOMEPAGE = ""
_LICENSE = ""
_TASKS = [
"splice/reconstructed",
"mouse/0",
"mouse/1"
]
class GUEConfig(datasets.BuilderConfig):
"""BuilderConfig for GUE taks dataset."""
def __init__(self, *args, task: str, **kwargs):
"""BuilderConfig downstream tasks dataset.
Args:
task (:obj:`str`): Task name.
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(
*args,
name=f"{task}",
**kwargs,
)
self.task = task
class GUEDownstreamTasks(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIG_CLASS = GUEConfig
BUILDER_CONFIGS = [
GUEConfig(task=task) for task in _TASKS
]
DEFAULT_CONFIG_NAME = "reconstructed"
def _info(self):
features = datasets.Features(
{
"sequence": datasets.Value("string"),
"label": datasets.Value("int32"),
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(
self, dl_manager: datasets.DownloadManager
) -> List[datasets.SplitGenerator]:
train_file = dl_manager.download_and_extract(self.config.task + "/train.csv")
valid_file = dl_manager.download_and_extract(self.config.task + "/dev.csv")
test_file = dl_manager.download_and_extract(self.config.task + "/test.csv")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"file": train_file}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION, gen_kwargs={"file": valid_file}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"file": test_file}
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, file):
key = 0
with open(file, "rt") as f:
csv_reader = csv.reader(f)
head = next(csv_reader)
for sequence, label in csv_reader:
# yield example
yield key, {
"sequence": sequence,
"label": label,
}
key += 1