File size: 6,119 Bytes
b5a72fa de5541d 7d2e23d de5541d 7d2e23d 659f386 7d2e23d de5541d b5a72fa 659f386 b5a72fa dcf2982 7d2e23d 7889fd7 dcf2982 b5a72fa 620caaa dcf2982 620caaa 7d2e23d dcf2982 b5a72fa dcf2982 b5a72fa dcf2982 7d2e23d 7889fd7 dcf2982 b5a72fa 7889fd7 de5541d 7889fd7 de5541d b5a72fa dcf2982 b5a72fa dcf2982 b5a72fa dcf2982 7889fd7 dcf2982 7d2e23d 7889fd7 dcf2982 7889fd7 de5541d dcf2982 7889fd7 dcf2982 b5a72fa 7889fd7 b5a72fa dcf2982 7889fd7 b5a72fa dcf2982 7889fd7 b5a72fa dcf2982 7889fd7 b5a72fa 7d2e23d 7889fd7 b5a72fa 7889fd7 b5a72fa 7889fd7 dcf2982 7889fd7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
import os
import csv
import datasets
from datasets import Audio, BuilderConfig
# Konfiguratsiya sinfi: til qisqartmasi va ma'lumotlar joylashgan papkani belgilaydi.
class STTConfig(BuilderConfig):
def __init__(self, language_abbr, data_dir, **kwargs):
"""
Args:
language_abbr (str): Masalan, "uz".
data_dir (str): Dataset joylashgan asosiy papka, masalan "Dataset_STT".
**kwargs: Qolgan parametrlar.
"""
super().__init__(**kwargs)
self.language_abbr = language_abbr
self.data_dir = data_dir
# Dataset yuklash skripti
class MySTTDataset(datasets.GeneratorBasedBuilder):
"""
Uzbek STT dataset yuklash skripti:
- Audio fayllar .tar arxiv ichida saqlangan.
- Transkripsiya ma'lumotlari TSV faylda joylashgan.
- Streaming rejimida, tar fayllar dl_manager.iter_archive() orqali o‘qiladi.
- "audio" ustuni Audio() tipida aniqlangan, ya'ni qiymat dictionary shaklida:
{"path": <tar ichidagi fayl nomi>, "bytes": <audio baytlari>}
bo‘lishi kerak, shunda Dataset Viewer "play" tugmasini ko‘rsatadi.
"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
STTConfig(
name="uz",
version=datasets.Version("1.0.0"),
description="Uzbek subset of the STT dataset",
language_abbr="uz",
data_dir="Dataset_STT", # Asosiy papka nomi
)
]
DEFAULT_CONFIG_NAME = "uz"
def _info(self):
"""
Dataset ustunlarini aniqlaydi.
"audio" ustuni Audio(sampling_rate=None) tipida berilgan, shuning uchun
audio fayllar avtomatik dekodlanadi va resample qilinadi.
"""
return datasets.DatasetInfo(
description=(
"Uzbek STT dataset: audio fayllar tar arxivida saqlangan va "
"transcriptions esa TSV faylda mavjud. Streaming rejimi bilan tar "
"arxivdan audio fayllar o'qiladi."
),
features=datasets.Features({
"id": datasets.Value("string"),
"audio": Audio(sampling_rate=None),
"sentence": datasets.Value("string"),
"duration": datasets.Value("float"),
"age": datasets.Value("string"),
"gender": datasets.Value("string"),
"accents": datasets.Value("string"),
"locale": datasets.Value("string"),
}),
supervised_keys=None,
version=self.VERSION,
)
def _split_generators(self, dl_manager):
"""
Har bir split uchun: tar arxiv va mos TSV fayllarining yo'llari aniqlanadi.
Tar arxivlardan streaming rejimida o'qish uchun dl_manager.iter_archive() dan foydalanamiz.
"""
config = self.config
base_dir = config.data_dir # Masalan: "Dataset_STT"
lang = config.language_abbr # Masalan: "uz"
# Tar arxiv fayllari (extract qilinmaydi, balki iter_archive orqali o'qiladi)
train_tar = os.path.join(base_dir, "audio", lang, "train.tar")
test_tar = os.path.join(base_dir, "audio", lang, "test.tar")
val_tar = os.path.join(base_dir, "audio", lang, "validation.tar")
train_audio_files = dl_manager.iter_archive(train_tar)
test_audio_files = dl_manager.iter_archive(test_tar)
val_audio_files = dl_manager.iter_archive(val_tar)
# TSV fayllar yo'li
train_tsv = os.path.join(base_dir, "transcript", lang, "train.tsv")
test_tsv = os.path.join(base_dir, "transcript", lang, "test.tsv")
val_tsv = os.path.join(base_dir, "transcript", lang, "validation.tsv")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"audio_files": train_audio_files, "tsv_path": train_tsv},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"audio_files": test_audio_files, "tsv_path": test_tsv},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"audio_files": val_audio_files, "tsv_path": val_tsv},
),
]
def _generate_examples(self, audio_files, tsv_path):
"""
TSV faylini qatorma-qator o'qiydi va metadata lug'atini tuzadi.
So'ng, tar arxividan kelayotgan audio fayllarni (streaming iteratori orqali)
mos metadata bilan birlashtiradi.
Har bir audio ustuni qiymati quyidagicha shakllantiriladi:
{"path": <tar ichidagi fayl nomi>, "bytes": <audio fayl baytlari>}
Bu shakl Dataset Viewer tomonidan Audio() sifatida aniqlanadi.
"""
# TSV faylidan metadata lug'atini tuzamiz: kalit – fayl nomi (masalan, "ID.mp3")
metadata = {}
with open(tsv_path, "r", encoding="utf-8") as f:
reader = csv.DictReader(f, delimiter="\t")
for row in reader:
filename = row["id"] + ".mp3"
metadata[filename] = row
# Tar arxivdan streaming iterator orqali o'qilgan fayllar
for idx, (file_path, file_obj) in enumerate(audio_files):
# file_path: tar arxiv ichidagi nisbiy yo'l (masalan, "009f0d56-c7db-4de3-bd3e-92a37d6f0cb9.mp3")
if file_path in metadata:
row = metadata[file_path]
audio_bytes = file_obj.read()
yield idx, {
"id": row["id"],
"audio": {"path": file_path, "bytes": audio_bytes},
"sentence": row.get("sentence", ""),
"duration": float(row.get("duration", 0.0)),
"age": row.get("age", ""),
"gender": row.get("gender", ""),
"accents": row.get("accents", ""),
"locale": row.get("locale", ""),
}
|