File size: 4,996 Bytes
9bf7aea |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
import random
import re
from pathlib import Path
from typing import Dict, Iterator, List, Optional, Union
import numpy as np
from datasets import Dataset, load_dataset, Audio, load_from_disk, DatasetDict
from datasets import concatenate_datasets
from pydantic import BaseModel, ConfigDict
from tqdm import tqdm
from multilingual_dataset.commonvoice_stats import STATS
# import logging
#
# logging.basicConfig(
# level="DEBUG"
# )
FOREIGN_TOKEN = "<|muted|>"
def replace_consecutive_muted(text):
# Regular expression to match one or more consecutive <|muted|> tokens
pattern = fr'(\s*{re.escape(FOREIGN_TOKEN)}\s*)+'
# Replace consecutive tokens with just one <|muted|>
cleaned_text = re.sub(pattern, FOREIGN_TOKEN, text)
return cleaned_text
class AudioSample(BaseModel):
model_config = ConfigDict(
arbitrary_types_allowed=True
)
path: Optional[str]
array: np.ndarray
sampling_rate: int
class CommonVoiceSample(BaseModel):
audio: AudioSample
sentence: str
locale: str
class MultilingualDatasetSampler:
def __init__(self, split: str):
self.split = split
self.country_codes = list(STATS["locales"].keys())
self.datasets = {
code: self.prepare_dataset(
load_dataset("mozilla-foundation/common_voice_13_0", code, split=split, streaming=True))
for code in self.country_codes}
@staticmethod
def prepare_dataset(dataset: Dataset) -> Iterator[Dict]:
dataset = dataset.remove_columns(list(set(dataset.column_names) - {"sentence", "audio", "locale"}))
dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
return dataset.iter(1)
def get_sample(self, is_french: bool) -> CommonVoiceSample:
while True:
if is_french:
code = "fr"
else:
code = random.choice(self.country_codes)
try:
item = next(self.datasets[code])
item = {k: v[0] for k, v in item.items()}
return CommonVoiceSample.model_validate(item)
except StopIteration:
continue
def merge_samples(samples: List[CommonVoiceSample]) -> CommonVoiceSample:
sentences = []
for sample in samples:
if sample.locale == "fr":
sentences.append(sample.sentence.strip())
else:
sentences.append(FOREIGN_TOKEN)
return CommonVoiceSample(
audio=AudioSample(
path="",
sampling_rate=16000,
array=np.concat([sample.audio.array for sample in samples], axis=0)),
locale="fr",
sentence=replace_consecutive_muted(" ".join(sentences))
)
def build_small_multilingual_dataset(sampler: MultilingualDatasetSampler, french_prob: float = 0.3,
dataset_size: int = 10000) -> Iterator[Dict]:
max_audio_length = 16000 * 30
for _ in range(dataset_size):
sample_len = 0
samples = []
while True:
is_french = random.random() <= french_prob
sample = sampler.get_sample(is_french)
sample_len += sample.audio.array.shape[0]
if sample_len > max_audio_length:
if samples:
yield merge_samples(samples).dict()
break
samples.append(sample)
def load_splitted_local_ds(folder: Path):
datasets = []
for dataset_path in folder.iterdir():
datasets.append(load_from_disk(dataset_path))
dataset = concatenate_datasets(datasets)
dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
return dataset
def save_and_split_dataset(dataset_size: int, split: str, save_folder: Union[str, Path]):
split_size = 1000
i = 0
sampler = MultilingualDatasetSampler(split=split)
dataset_items = []
save_folder = Path(save_folder)
def save():
dataset = Dataset.from_list(dataset_items)
dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
dataset.save_to_disk(save_folder / str(i))
for item in tqdm(build_small_multilingual_dataset(sampler=sampler, dataset_size=dataset_size), total=dataset_size,
desc="building dataset"):
dataset_items.append(item)
if len(dataset_items) == split_size:
save()
i += 1
dataset_items = []
if dataset_items:
save()
if __name__ == "__main__":
save_and_split_dataset(100000, "train", "dataset_splits_train")
save_and_split_dataset(1000, "test", "dataset_splits_test")
train_dataset = load_splitted_local_ds(Path("dataset_splits_train"))
test_dataset = load_splitted_local_ds(Path("dataset_splits_test"))
dataset = DatasetDict(
train=train_dataset,
test=test_dataset
)
dataset.push_to_hub("mixed_multilingual_commonvoice_all_languages_100k")
# dataset.save_to_disk("mixed_multilingual_commonvoice")
|