File size: 6,277 Bytes
dc040e4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
import json
from typing import List
import datasets
import numpy as np
import fortepyan as ff
from tqdm import tqdm
from datasets import (
Split,
Dataset,
DatasetInfo,
BuilderConfig,
GeneratorBasedBuilder,
load_dataset,
concatenate_datasets,
)
_DESC = """
Dataset of midi pieces sliced to records of fixed number of notes.
"""
class TokenizedMidiDatasetConfig(BuilderConfig):
def __init__(
self,
base_dataset_name: str = "roszcz/maestro-v1-sustain",
extra_datasets: list[str] = [],
sequence_length: int = 64,
sequence_step: int = 42,
**kwargs,
):
super().__init__()
# Version history:
# 0.0.1: Initial version.
super().__init__(version=datasets.Version("0.0.2"), **kwargs)
self.base_dataset_name: str = base_dataset_name
self.extra_datasets: list[str] = extra_datasets
self.sequence_length: int = sequence_length
self.sequence_step: int = sequence_step
class TokenizedMidiDataset(GeneratorBasedBuilder):
def _info(self) -> DatasetInfo:
return datasets.DatasetInfo(description=_DESC)
BUILDER_CONFIG_CLASS = TokenizedMidiDatasetConfig
BUILDER_CONFIGS = [
TokenizedMidiDatasetConfig(
base_dataset_name="roszcz/maestro-sustain-v2",
extra_datasets=["roszcz/giant-midi-sustain-v2"],
sequence_length=32,
sequence_step=16,
name="giant-short",
),
TokenizedMidiDatasetConfig(
base_dataset_name="roszcz/maestro-sustain-v2",
extra_datasets=[],
sequence_length=32,
sequence_step=16,
name="basic-short",
),
TokenizedMidiDatasetConfig(
base_dataset_name="roszcz/maestro-sustain-v2",
extra_datasets=["roszcz/giant-midi-sustain-v2"],
sequence_length=64,
sequence_step=16,
name="giant-mid",
),
TokenizedMidiDatasetConfig(
base_dataset_name="roszcz/maestro-sustain-v2",
extra_datasets=[],
sequence_length=64,
sequence_step=16,
name="basic-mid",
),
TokenizedMidiDatasetConfig(
base_dataset_name="roszcz/maestro-sustain-v2",
extra_datasets=["roszcz/giant-midi-sustain-v2"],
sequence_length=128,
sequence_step=16,
name="giant-long",
),
TokenizedMidiDatasetConfig(
base_dataset_name="roszcz/maestro-sustain-v2",
extra_datasets=[],
sequence_length=128,
sequence_step=16,
name="basic-long",
),
]
DEFAULT_CONFIG_NAME = "basic-mid"
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
base = load_dataset(self.config.base_dataset_name)
other_datasets = [load_dataset(path, split="train") for path in self.config.extra_datasets]
other_datasets.append(base["train"])
dataset = concatenate_datasets(other_datasets)
# This will enable multiprocessing in load_dataset()
n_shards = 12
train_shards = [dataset.shard(n_shards, it) for it in range(n_shards)]
return [
datasets.SplitGenerator(name=Split.TRAIN, gen_kwargs={"dataset_shards": train_shards}),
datasets.SplitGenerator(name=Split.TEST, gen_kwargs={"dataset_shards": [base["test"]]}),
datasets.SplitGenerator(name=Split.VALIDATION, gen_kwargs={"dataset_shards": [base["validation"]]}),
]
def piece_to_records(self, piece: ff.MidiPiece) -> list[dict]:
# better practice than setting a global random state
rs = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(4)))
n_samples = 1 + (piece.size - self.config.sequence_length) // self.config.sequence_step
# uniform distribution, piece should be covered almost entirely
piece_idxs = range(piece.size - self.config.sequence_length)
start_points = rs.choice(piece_idxs, size=n_samples, replace=False)
chopped_sequences = []
for start in start_points:
start = int(start)
finish = start + self.config.sequence_length
part = piece[start:finish]
sequence = {
"notes": {
"pitch": part.df.pitch.astype("int16").values.T,
"start": part.df.start.values,
"end": part.df.end.values,
"duration": part.df.duration.values,
"velocity": part.df.velocity.values,
},
"source": json.dumps(part.source),
}
chopped_sequences.append(sequence)
return chopped_sequences
def filter_pauses(self, piece: ff.MidiPiece) -> list[ff.MidiPiece]:
next_start = piece.df.start.shift(-1)
silent_distance = next_start - piece.df.end
# Seconds
distance_threshold = 4
ids = silent_distance > distance_threshold
break_idxs = np.where(ids)[0]
pieces = []
start = 0
for break_idx in break_idxs:
finish = break_idx.item() + 1
piece_part = piece[start:finish]
if piece_part.size <= self.config.sequence_length:
continue
pieces.append(piece_part)
start = finish
return pieces
def _generate_examples(self, dataset_shards: list[Dataset]):
# ~10min for giant midi
for dataset in dataset_shards:
for it, record in tqdm(enumerate(dataset), total=len(dataset)):
piece = ff.MidiPiece.from_huggingface(dict(record))
pieces = self.filter_pauses(piece)
chopped_sequences = sum([self.piece_to_records(piece) for piece in pieces], [])
for jt, sequence in enumerate(chopped_sequences):
# NOTE I don't really understand how this works :(
# When using jt as the key I started getting
# duplicate key errors when doing load_dataset(num_proc=8)
key = f"{it}_{jt}"
yield key, sequence
|