new_dataset_stt / my_stt_dataset.py
Elyordev's picture
Upload my_stt_dataset.py
dcf2982 verified
raw
history blame
5.3 kB
import os
import csv
import datasets
from datasets import Audio
class MySTTDataset(datasets.GeneratorBasedBuilder):
"""
Common Voice uslubidagi minimal dataset skript:
- 3 ta tar fayl (train/test/validation)
- Har bir tar fayl ichida .mp3 audio
- Har bir split'ga mos TSV fayl (train.tsv, test.tsv, validation.tsv)
- Audio ustuni -> HF Viewer da "play" tugmasi
"""
VERSION = datasets.Version("1.0.0")
# Agar ko'p config bo'lmasa, bu qismni soddalashtiramiz.
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="uz",
version=VERSION,
description="STT dataset for Uzbek language (example).",
)
]
DEFAULT_CONFIG_NAME = "uz"
def _info(self):
"""
Bu yerda datasetning xususiyatlari (features) e'lon qilinadi.
'audio' ustuni Audio() turida bo'lsa, viewer pleyer ko'rsatadi.
"""
return datasets.DatasetInfo(
description="Uzbek STT dataset: audio in .tar, transcriptions in .tsv.",
features=datasets.Features(
{
"id": datasets.Value("string"),
"audio": Audio(sampling_rate=None),
"sentence": datasets.Value("string"),
"duration": datasets.Value("float"),
"age": datasets.Value("string"),
"gender": datasets.Value("string"),
"accents": datasets.Value("string"),
"locale": datasets.Value("string"),
}
),
supervised_keys=None,
version=self.VERSION,
)
def _split_generators(self, dl_manager):
"""
Har bir split uchun: tar va tsv fayllar yo'lini belgilab,
dl_manager orqali yuklab/extract qildirib, so'ng _generate_examples() ga beramiz.
"""
# local path misoli (reposingizda bo'lsa).
# Agar huggingface.co'dan yuklamoqchi bo'lsangiz, URL qilishingiz mumkin
train_tar = "Dataset_STT/audio/uz/train.tar"
train_tsv = "Dataset_STT/transcript/uz/train.tsv"
test_tar = "Dataset_STT/audio/uz/test.tar"
test_tsv = "Dataset_STT/transcript/uz/test.tsv"
val_tar = "Dataset_STT/audio/uz/validation.tar"
val_tsv = "Dataset_STT/transcript/uz/validation.tsv"
# Bu fayllarni download+extract (yoki local bo'lsa, faqat extract) qilamiz:
# Eslatma: agar localda bo'lsayu, dl_manager `is_local=True` deb topishi mumkin,
# ammo baribir .extract ishlaydi.
train_tar_extracted = dl_manager.extract(train_tar)
test_tar_extracted = dl_manager.extract(test_tar)
val_tar_extracted = dl_manager.extract(val_tar)
# Har bir splitted datasetga mos "SplitGenerator" qaytaramiz
# "gen_kwargs" -> _generate_examples() ga paramlar
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"archive_dir": train_tar_extracted, # tar fayl ochilib yoyilgan papka
"tsv_path": train_tsv,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"archive_dir": test_tar_extracted,
"tsv_path": test_tsv,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"archive_dir": val_tar_extracted,
"tsv_path": val_tsv,
},
),
]
def _generate_examples(self, archive_dir, tsv_path):
"""
Ushbu metod har bir split uchun audio+transkript juftliklarini geneate qiladi.
- 'archive_dir' papkada .tar dan ochilgan .mp3 fayllar mavjud.
- 'tsv_path' faylini qatorma-qator o'qib, 'id' -> "id.mp3" yo'lini izlaymiz.
"""
# TSV ni o'qiymiz:
with open(tsv_path, "r", encoding="utf-8") as f:
reader = csv.DictReader(f, delimiter="\t")
for idx, row in enumerate(reader):
# tsv da shunaqa ustunlar bo'lishi kutiladi:
# id, sentence, duration, age, gender, accents, locale
audio_id = row["id"]
mp3_file = audio_id + ".mp3"
mp3_path = os.path.join(archive_dir, mp3_file)
# Agar audio fayl exist bo'lsa:
if os.path.isfile(mp3_path):
yield idx, {
"id": audio_id,
"audio": mp3_path, # Audio() -> pleyer
"sentence": row.get("sentence", ""),
"duration": float(row.get("duration", 0.0)),
"age": row.get("age", ""),
"gender": row.get("gender", ""),
"accents": row.get("accents", ""),
"locale": row.get("locale", ""),
}
else:
# Audio topilmasa, skip (yoki exception)
continue