File size: 4,997 Bytes
b5a72fa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
import os
import tarfile
import csv
import datasets
from datasets import Audio
class MySTTDatasetConfig(datasets.BuilderConfig):
"""Config klass (kerak bo'lsa turli versiya yoki param qo'yish uchun)."""
class MySTTDataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
MySTTDatasetConfig(name="default", version=datasets.Version("1.0.0")),
]
def _info(self):
return datasets.DatasetInfo(
description="STT dataset .tar ichida audio, .tsv ichida transkript",
features=datasets.Features(
{
"id": datasets.Value("string"),
"audio": Audio(sampling_rate=None), # Audio turi
"sentence": datasets.Value("string"),
"duration": datasets.Value("float"),
"age": datasets.Value("string"),
"gender": datasets.Value("string"),
"accents": datasets.Value("string"),
"locale": datasets.Value("string"),
}
),
supervised_keys=None,
)
def _split_generators(self, dl_manager):
"""
dl_manager.download_and_extract() bilan remote fayllarni yuklaysiz
yoki local path bersangiz bo'ladi.
Masalan:
https://huggingface.co/datasets/Elyordev/new_dataset_stt/resolve/main/Dataset_STT/audio/uz/train/train.tar
https://huggingface.co/datasets/Elyordev/new_dataset_stt/resolve/main/Dataset_STT/transcript/uz/train/train.tsv
"""
train_tar = dl_manager.download_and_extract("URL_train_tar") # masalan
train_tsv = dl_manager.download("URL_train_tsv")
val_tar = dl_manager.download_and_extract("URL_val_tar")
val_tsv = dl_manager.download("URL_val_tsv")
test_tar = dl_manager.download_and_extract("URL_test_tar")
test_tsv = dl_manager.download("URL_test_tsv")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"archive_path": train_tar,
"tsv_path": train_tsv,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"archive_path": val_tar,
"tsv_path": val_tsv,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"archive_path": test_tar,
"tsv_path": test_tsv,
},
),
]
def _generate_examples(self, archive_path, tsv_path):
"""
Har bir tar faylni ochib, TSV dagi id + sentence boshqariladi.
"""
# `archive_path` bu dl_manager.download_and_extract qilgan manzil
# lekin e’tibor bering: .extract automatik ravishda chiqarib yuboradi.
# Agar `.tar` ochilmasin desangiz, download() deyish kifoya,
# lekin streaming qilish uchun biroz boshqacha yo'l tutish kerak.
# Keling, bu yerda tar fayl allaqachon extract qilingan deb faraz qilamiz:
# train.tar -> train/ (ichida mp3 fayllar paydo bo'lgan bo'ladi).
# Agar real .tar ichidan "on-the-fly" o'qimoqchi bo'lsak,
# dl_manager.download() + tarfile.open(...) da ishlash lozim.
# Shunchaki misol tariqasida:
audio_base_path = archive_path # extract bo'lgach papka manzili
# Ehtimol, audio_base_path = os.path.join(archive_path, "train") bo'lishi ham mumkin
# chunki tar ichida "train/" deb nomlangan ichki papka bo'lishi mumkin.
with open(tsv_path, "r", encoding="utf-8") as f:
reader = csv.DictReader(f, delimiter="\t")
for n, row in enumerate(reader):
audio_id = row["id"] # masalan '009f0d56-c7db-4de3-bd3e-92a37d6f0cb9'
audio_file = audio_id + ".mp3"
# to'liq path
audio_path = os.path.join(audio_base_path, audio_file)
# Agar audio fayl topilsa:
if os.path.isfile(audio_path):
yield n, {
"id": audio_id,
"audio": audio_path, # Audio typeda faqat path bersak, HF o'zi o'qiydi
"sentence": row["sentence"],
"duration": float(row["duration"]),
"age": row["age"],
"gender": row["gender"],
"accents": row["accents"],
"locale": row["locale"],
}
else:
# Agar mp3 topilmasa, o'ziz xato signal qilishingiz mumkin
# yoki continue qilishingiz mumkin
pass |