SlicedMidiDataset / SlicedMidiDataset.py
wmatejuk's picture
script
dc040e4
import json
from typing import List
import datasets
import numpy as np
import fortepyan as ff
from tqdm import tqdm
from datasets import (
Split,
Dataset,
DatasetInfo,
BuilderConfig,
GeneratorBasedBuilder,
load_dataset,
concatenate_datasets,
)
_DESC = """
Dataset of midi pieces sliced to records of fixed number of notes.
"""
class TokenizedMidiDatasetConfig(BuilderConfig):
def __init__(
self,
base_dataset_name: str = "roszcz/maestro-v1-sustain",
extra_datasets: list[str] = [],
sequence_length: int = 64,
sequence_step: int = 42,
**kwargs,
):
super().__init__()
# Version history:
# 0.0.1: Initial version.
super().__init__(version=datasets.Version("0.0.2"), **kwargs)
self.base_dataset_name: str = base_dataset_name
self.extra_datasets: list[str] = extra_datasets
self.sequence_length: int = sequence_length
self.sequence_step: int = sequence_step
class TokenizedMidiDataset(GeneratorBasedBuilder):
def _info(self) -> DatasetInfo:
return datasets.DatasetInfo(description=_DESC)
BUILDER_CONFIG_CLASS = TokenizedMidiDatasetConfig
BUILDER_CONFIGS = [
TokenizedMidiDatasetConfig(
base_dataset_name="roszcz/maestro-sustain-v2",
extra_datasets=["roszcz/giant-midi-sustain-v2"],
sequence_length=32,
sequence_step=16,
name="giant-short",
),
TokenizedMidiDatasetConfig(
base_dataset_name="roszcz/maestro-sustain-v2",
extra_datasets=[],
sequence_length=32,
sequence_step=16,
name="basic-short",
),
TokenizedMidiDatasetConfig(
base_dataset_name="roszcz/maestro-sustain-v2",
extra_datasets=["roszcz/giant-midi-sustain-v2"],
sequence_length=64,
sequence_step=16,
name="giant-mid",
),
TokenizedMidiDatasetConfig(
base_dataset_name="roszcz/maestro-sustain-v2",
extra_datasets=[],
sequence_length=64,
sequence_step=16,
name="basic-mid",
),
TokenizedMidiDatasetConfig(
base_dataset_name="roszcz/maestro-sustain-v2",
extra_datasets=["roszcz/giant-midi-sustain-v2"],
sequence_length=128,
sequence_step=16,
name="giant-long",
),
TokenizedMidiDatasetConfig(
base_dataset_name="roszcz/maestro-sustain-v2",
extra_datasets=[],
sequence_length=128,
sequence_step=16,
name="basic-long",
),
]
DEFAULT_CONFIG_NAME = "basic-mid"
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
base = load_dataset(self.config.base_dataset_name)
other_datasets = [load_dataset(path, split="train") for path in self.config.extra_datasets]
other_datasets.append(base["train"])
dataset = concatenate_datasets(other_datasets)
# This will enable multiprocessing in load_dataset()
n_shards = 12
train_shards = [dataset.shard(n_shards, it) for it in range(n_shards)]
return [
datasets.SplitGenerator(name=Split.TRAIN, gen_kwargs={"dataset_shards": train_shards}),
datasets.SplitGenerator(name=Split.TEST, gen_kwargs={"dataset_shards": [base["test"]]}),
datasets.SplitGenerator(name=Split.VALIDATION, gen_kwargs={"dataset_shards": [base["validation"]]}),
]
def piece_to_records(self, piece: ff.MidiPiece) -> list[dict]:
# better practice than setting a global random state
rs = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(4)))
n_samples = 1 + (piece.size - self.config.sequence_length) // self.config.sequence_step
# uniform distribution, piece should be covered almost entirely
piece_idxs = range(piece.size - self.config.sequence_length)
start_points = rs.choice(piece_idxs, size=n_samples, replace=False)
chopped_sequences = []
for start in start_points:
start = int(start)
finish = start + self.config.sequence_length
part = piece[start:finish]
sequence = {
"notes": {
"pitch": part.df.pitch.astype("int16").values.T,
"start": part.df.start.values,
"end": part.df.end.values,
"duration": part.df.duration.values,
"velocity": part.df.velocity.values,
},
"source": json.dumps(part.source),
}
chopped_sequences.append(sequence)
return chopped_sequences
def filter_pauses(self, piece: ff.MidiPiece) -> list[ff.MidiPiece]:
next_start = piece.df.start.shift(-1)
silent_distance = next_start - piece.df.end
# Seconds
distance_threshold = 4
ids = silent_distance > distance_threshold
break_idxs = np.where(ids)[0]
pieces = []
start = 0
for break_idx in break_idxs:
finish = break_idx.item() + 1
piece_part = piece[start:finish]
if piece_part.size <= self.config.sequence_length:
continue
pieces.append(piece_part)
start = finish
return pieces
def _generate_examples(self, dataset_shards: list[Dataset]):
# ~10min for giant midi
for dataset in dataset_shards:
for it, record in tqdm(enumerate(dataset), total=len(dataset)):
piece = ff.MidiPiece.from_huggingface(dict(record))
pieces = self.filter_pauses(piece)
chopped_sequences = sum([self.piece_to_records(piece) for piece in pieces], [])
for jt, sequence in enumerate(chopped_sequences):
# NOTE I don't really understand how this works :(
# When using jt as the key I started getting
# duplicate key errors when doing load_dataset(num_proc=8)
key = f"{it}_{jt}"
yield key, sequence