mixed_multilingual_commonvoice_all_languages_100k / mixed_languages_commonvoice.py
BrunoHays's picture
add scripts used to curate
9bf7aea verified
raw
history blame
5 kB
import random
import re
from pathlib import Path
from typing import Dict, Iterator, List, Optional, Union
import numpy as np
from datasets import Dataset, load_dataset, Audio, load_from_disk, DatasetDict
from datasets import concatenate_datasets
from pydantic import BaseModel, ConfigDict
from tqdm import tqdm
from multilingual_dataset.commonvoice_stats import STATS
# import logging
#
# logging.basicConfig(
# level="DEBUG"
# )
FOREIGN_TOKEN = "<|muted|>"
def replace_consecutive_muted(text):
# Regular expression to match one or more consecutive <|muted|> tokens
pattern = fr'(\s*{re.escape(FOREIGN_TOKEN)}\s*)+'
# Replace consecutive tokens with just one <|muted|>
cleaned_text = re.sub(pattern, FOREIGN_TOKEN, text)
return cleaned_text
class AudioSample(BaseModel):
model_config = ConfigDict(
arbitrary_types_allowed=True
)
path: Optional[str]
array: np.ndarray
sampling_rate: int
class CommonVoiceSample(BaseModel):
audio: AudioSample
sentence: str
locale: str
class MultilingualDatasetSampler:
def __init__(self, split: str):
self.split = split
self.country_codes = list(STATS["locales"].keys())
self.datasets = {
code: self.prepare_dataset(
load_dataset("mozilla-foundation/common_voice_13_0", code, split=split, streaming=True))
for code in self.country_codes}
@staticmethod
def prepare_dataset(dataset: Dataset) -> Iterator[Dict]:
dataset = dataset.remove_columns(list(set(dataset.column_names) - {"sentence", "audio", "locale"}))
dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
return dataset.iter(1)
def get_sample(self, is_french: bool) -> CommonVoiceSample:
while True:
if is_french:
code = "fr"
else:
code = random.choice(self.country_codes)
try:
item = next(self.datasets[code])
item = {k: v[0] for k, v in item.items()}
return CommonVoiceSample.model_validate(item)
except StopIteration:
continue
def merge_samples(samples: List[CommonVoiceSample]) -> CommonVoiceSample:
sentences = []
for sample in samples:
if sample.locale == "fr":
sentences.append(sample.sentence.strip())
else:
sentences.append(FOREIGN_TOKEN)
return CommonVoiceSample(
audio=AudioSample(
path="",
sampling_rate=16000,
array=np.concat([sample.audio.array for sample in samples], axis=0)),
locale="fr",
sentence=replace_consecutive_muted(" ".join(sentences))
)
def build_small_multilingual_dataset(sampler: MultilingualDatasetSampler, french_prob: float = 0.3,
dataset_size: int = 10000) -> Iterator[Dict]:
max_audio_length = 16000 * 30
for _ in range(dataset_size):
sample_len = 0
samples = []
while True:
is_french = random.random() <= french_prob
sample = sampler.get_sample(is_french)
sample_len += sample.audio.array.shape[0]
if sample_len > max_audio_length:
if samples:
yield merge_samples(samples).dict()
break
samples.append(sample)
def load_splitted_local_ds(folder: Path):
datasets = []
for dataset_path in folder.iterdir():
datasets.append(load_from_disk(dataset_path))
dataset = concatenate_datasets(datasets)
dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
return dataset
def save_and_split_dataset(dataset_size: int, split: str, save_folder: Union[str, Path]):
split_size = 1000
i = 0
sampler = MultilingualDatasetSampler(split=split)
dataset_items = []
save_folder = Path(save_folder)
def save():
dataset = Dataset.from_list(dataset_items)
dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
dataset.save_to_disk(save_folder / str(i))
for item in tqdm(build_small_multilingual_dataset(sampler=sampler, dataset_size=dataset_size), total=dataset_size,
desc="building dataset"):
dataset_items.append(item)
if len(dataset_items) == split_size:
save()
i += 1
dataset_items = []
if dataset_items:
save()
if __name__ == "__main__":
save_and_split_dataset(100000, "train", "dataset_splits_train")
save_and_split_dataset(1000, "test", "dataset_splits_test")
train_dataset = load_splitted_local_ds(Path("dataset_splits_train"))
test_dataset = load_splitted_local_ds(Path("dataset_splits_test"))
dataset = DatasetDict(
train=train_dataset,
test=test_dataset
)
dataset.push_to_hub("mixed_multilingual_commonvoice_all_languages_100k")
# dataset.save_to_disk("mixed_multilingual_commonvoice")