import os import csv import datasets from datasets import Audio # Har xil config - 'sample' va 'full' class MySTTDatasetConfig(datasets.BuilderConfig): def __init__(self, limit=None, **kwargs): """ limit : int yoki None Har bir splitdan qancha qatorni o'qish. None bo'lsa, cheklanmagan. """ super().__init__(**kwargs) self.limit = limit class MySTTDataset(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ MySTTDatasetConfig( name="sample", version=datasets.Version("1.0.0"), description="Faqat har bir splitdan 10k qator ko'rsatish uchun", limit=10_000, # masalan 10 ming ), MySTTDatasetConfig( name="full", version=datasets.Version("1.0.0"), description="Hech qanday cheklovsiz to'liq dataset", limit=None, ), ] DEFAULT_CONFIG_NAME = "sample" def _info(self): return datasets.DatasetInfo( description="Speech-to-text dataset (tar ichida audio, tsvda transkript).", features=datasets.Features({ "id": datasets.Value("string"), "audio": Audio(sampling_rate=None), "sentence": datasets.Value("string"), "duration": datasets.Value("float"), "age": datasets.Value("string"), "gender": datasets.Value("string"), "accents": datasets.Value("string"), "locale": datasets.Value("string"), }), supervised_keys=None, ) def _split_generators(self, dl_manager): # TODO: train.tar, test.tar, validation.tar + tegishli TSV link yoki local path # Hozircha misol tariqasida local path'lar ko'rsatamiz train_tar = "Dataset_STT/audio/uz/train/train.tar" train_tsv = "Dataset_STT/transcript/uz/train/train.tsv" val_tar = "Dataset_STT/audio/uz/validation/validation.tar" val_tsv = "Dataset_STT/transcript/uz/validation/validation.tsv" test_tar = "Dataset_STT/audio/uz/test/test.tar" test_tsv = "Dataset_STT/transcript/uz/test/test.tsv" return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "tar_path": train_tar, "tsv_path": train_tsv, }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "tar_path": val_tar, "tsv_path": val_tsv, }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "tar_path": test_tar, "tsv_path": test_tsv, }, ), ] def _generate_examples(self, tar_path, tsv_path): """ limit=10_000 bo'lsa, har bir splitdan 10 mingtagina qator qaytaradi. Agar limit=None bo'lsa, hamma qatorni o'qiydi. """ limit = self.config.limit # Tar ichidagi mp3 fayllarni avval extract qilasiz yoki on-the-fly o'qiysiz # Eslatma: HF Viewer uchun eng osoni audio papkaga ochib qo'yish yoki # `dl_manager.download_and_extract(...)` ishlatishdir. # Bu yerda misol tariqasida tar ni ochib, audio fayllarni papkaga yoyildi deb faraz qilamiz: # Masalan audio papka: "Dataset_STT/audio/uz/train/unpacked" # Yoki to'liq yo'li: tar_path = "Dataset_STT/audio/uz/train/train.tar" # "unpacked" papkani o'zingiz oldindan tar -xvf bilan yaratishingiz kerak. # Yoki tarfile moduli bilan python ichida extraction qilishingiz mumkin. # Soddalik uchun, tar ni allaqachon manual ravishda unpack qildik deb olamiz: audio_folder = tar_path.replace(".tar", "_unpacked") # misol: "Dataset_STT/audio/uz/train/train_unpacked" # Keyin TSV'ni o'qiymiz: with open(tsv_path, "r", encoding="utf-8") as f: reader = csv.DictReader(f, delimiter="\t") for idx, row in enumerate(reader): if limit is not None and idx >= limit: break # 10k dan oshsa, to'xtaymiz audio_id = row["id"] mp3_file = audio_id + ".mp3" mp3_path = os.path.join(audio_folder, mp3_file) yield idx, { "id": audio_id, "audio": mp3_path, "sentence": row["sentence"], "duration": float(row["duration"]), "age": row["age"], "gender": row["gender"], "accents": row["accents"], "locale": row["locale"], }