Dataset_STT / dataset_stt.py
Elyordev's picture
Upload dataset_stt.py
b015b0a verified
import csv
import os
import tarfile
import datasets
from tqdm import tqdm
_DESCRIPTION = """\
This dataset is designed for speech-to-text (STT) tasks. It contains audio files stored as tar archives along with their corresponding transcript files in TSV format. The data is for the Uzbek language.
"""
_CITATION = """\
@misc{dataset_stt2025,
title={Dataset_STT},
author={Your Name},
year={2025}
}
"""
class DatasetSTT(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
def _info(self):
features = datasets.Features({
"id": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=16000), # Agar kerak bo'lsa, sampling_rate ni moslashtiring
"sentence": datasets.Value("string"),
"duration": datasets.Value("float"),
"age": datasets.Value("string"),
"gender": datasets.Value("string"),
"accents": datasets.Value("string"),
"locale": datasets.Value("string")
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage="https://huggingface.co/datasets/Elyordev/Dataset_STT",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""
_split_generators da har bir split uchun kerakli fayllarni belgilaymiz.
Biz quyidagi splitlarni qo'llaymiz: TRAIN, TEST va VALIDATION.
Data_files argumenti orqali audio arxiv va transcript TSV fayllarini olamiz.
"""
data_files = self.config.data_files
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"audio_archive": data_files["train"]["audio"],
"transcript_file": data_files["train"]["transcript"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"audio_archive": data_files["test"]["audio"],
"transcript_file": data_files["test"]["transcript"],
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"audio_archive": data_files["validation"]["audio"],
"transcript_file": data_files["validation"]["transcript"],
},
),
]
def _generate_examples(self, audio_archive, transcript_file):
"""
Transcript TSV faylini o'qib, har bir yozuv uchun:
- Tar arxivni ochamiz va audio fayllarni indekslaymiz.
- Transcript faylida ko'rsatilgan "path" ustuni orqali mos audio faylni topamiz.
- Audio faylni butun baytlar shaklida o'qib, audio maydoni sifatida qaytaramiz.
"""
# Tar arxivni ochamiz
with tarfile.open(audio_archive, "r:*") as tar:
# Arxiv ichidagi barcha fayllarni (fayl nomi -> tarinfo) indekslaymiz
tar_index = {os.path.basename(member.name): member for member in tar.getmembers() if member.isfile()}
# Transcript TSV faylini ochamiz (UTF-8 kodlashda)
with open(transcript_file, "r", encoding="utf-8") as f:
reader = csv.DictReader(f, delimiter="\t")
for row in tqdm(reader, desc="Processing transcripts"):
file_name = row["path"] # Masalan: "2cd08f62-aa25-4f5e-bb73-40cfc19a215e.mp3"
if file_name not in tar_index:
print(f"Warning: {file_name} not found in {audio_archive}")
continue
audio_member = tar.extractfile(tar_index[file_name])
if audio_member is None:
print(f"Warning: Could not extract {file_name}")
continue
audio_bytes = audio_member.read()
yield row["id"], {
"id": row["id"],
"audio": {"path": file_name, "bytes": audio_bytes},
"sentence": row["sentence"],
"duration": float(row["duration"]) if row["duration"] else 0.0,
"age": row["age"],
"gender": row["gender"],
"accents": row["accents"],
"locale": row["locale"],
}