Elyordev commited on
Commit
d2dfbf5
·
verified ·
1 Parent(s): 40a22e8

Upload dataset.py

Browse files
Files changed (1) hide show
  1. dataset.py +46 -220
dataset.py CHANGED
@@ -1,222 +1,48 @@
1
- # coding=utf-8
2
- # Copyright 2023
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # http://www.apache.org/licenses/LICENSE-2.0
6
- #
7
- # Ushbu fayl Common Voice uslubidagi dataset loading script bo'lib,
8
- # audio/uz/<split>/<split>.tar va transcript/uz/<split>/<split>.tsv
9
- # fayllarni yuklab, audio+transkriptsiyani birlashtiradi.
10
-
11
  import os
12
  import csv
13
- import json
14
-
15
- import datasets
16
- from datasets.utils.py_utils import size_str
17
-
18
-
19
- # ------------------ 1. Metadata va sozlamalar ------------------
20
- _CITATION = """\
21
- @misc{yourcitation,
22
- title = {Your STT dataset title},
23
- author = {You or your org},
24
- year = {2023},
25
- url = {https://huggingface.co/datasets/Elyordev/new_dataset_stt_audio}
26
- }
27
- """
28
-
29
- _DESCRIPTION = """\
30
- Bu dataset mp3 formatdagi audio fayllar va tsv metadata fayllardan iborat.
31
- Papka tuzilishi Common Voice uslubiga o'xshash:
32
- audio/uz/[train|validation|test]/*.tar va transcript/uz/[train|validation|test]/*.tsv
33
- """
34
-
35
- _HOMEPAGE = "https://huggingface.co/datasets/Elyordev/new_dataset_stt_audio"
36
- _LICENSE = "Apache License 2.0"
37
-
38
- # Bitta til: "uz" (xohlasangiz ko'paytirishingiz mumkin)
39
- LANGUAGES = {
40
- "uz": {
41
- "language_name": "Uzbek",
42
- "num_clips": None, # Agar xohlasangiz, taxminiy klip sonini kiriting
43
- "num_speakers": None,
44
- "validated_hr": None,
45
- "total_hr": None,
46
- "size_bytes": None,
47
- },
48
- }
49
-
50
- # Bizda har bir splitda 1 dona tar shard bor deb faraz qilamiz
51
- N_SHARDS = {
52
- "uz": {
53
- "train": 1,
54
- "validation": 1,
55
- "test": 1,
56
- }
57
- }
58
-
59
- # Asosiy URL: repodagi fayllarni resolve qilish uchun
60
- _BASE_URL = "https://huggingface.co/datasets/Elyordev/new_dataset_stt_audio/resolve/main/"
61
-
62
- # Audio fayl yo'li: audio/uz/<split>/<split>.tar
63
- _AUDIO_URL = _BASE_URL + "audio/{lang}/{split}/{split}.tar"
64
-
65
- # Transcript fayl yo'li: transcript/uz/<split>/<split>.tsv
66
- _TRANSCRIPT_URL = _BASE_URL + "transcript/{lang}/{split}/{split}.tsv"
67
-
68
-
69
- # ------------------ 2. Config klassi ------------------
70
- class NewDatasetSTTAudioConfig(datasets.BuilderConfig):
71
- """Bitta config (masalan, 'uz') - xohlasangiz ko'proq tillarni ham qo'shishingiz mumkin."""
72
- def __init__(self, language, **kwargs):
73
- super().__init__(**kwargs)
74
- self.language = language
75
- self.num_clips = LANGUAGES[language]["num_clips"]
76
- self.num_speakers = LANGUAGES[language]["num_speakers"]
77
- self.validated_hr = LANGUAGES[language]["validated_hr"]
78
- self.total_hr = LANGUAGES[language]["total_hr"]
79
- self.size_bytes = LANGUAGES[language]["size_bytes"]
80
- self.size_human = size_str(self.size_bytes) if self.size_bytes else None
81
-
82
-
83
- # ------------------ 3. Asosiy dataset builder ------------------
84
- class NewDatasetSTTAudio(datasets.GeneratorBasedBuilder):
85
- BUILDER_CONFIGS = [
86
- # Masalan, faqat "uz" config
87
- NewDatasetSTTAudioConfig(
88
- name="uz",
89
- version=datasets.Version("1.0.0"),
90
- description="Uzbek STT dataset with Common Voice-like structure",
91
- language="uz",
92
- ),
93
- ]
94
- DEFAULT_WRITER_BATCH_SIZE = 1000
95
-
96
- def _info(self):
97
- lang = self.config.language
98
- # O'zingiz xohlagancha izoh tuzishingiz mumkin
99
- description = (
100
- f"Common Voice uslubidagi dataset: til = {lang}. "
101
- f"{_DESCRIPTION}"
102
- )
103
- features = datasets.Features(
104
- {
105
- "id": datasets.Value("string"),
106
- "path": datasets.Value("string"),
107
- "audio": datasets.features.Audio(sampling_rate=16000), # agar 16kHz bo'lsa
108
- "sentence": datasets.Value("string"),
109
- "age": datasets.Value("string"),
110
- "gender": datasets.Value("string"),
111
- "accents": datasets.Value("string"),
112
- "locale": datasets.Value("string"),
113
- "duration": datasets.Value("float"), # agar tsv da float bo'lsa
114
- }
115
- )
116
- return datasets.DatasetInfo(
117
- description=description,
118
- features=features,
119
- supervised_keys=None,
120
- homepage=_HOMEPAGE,
121
- license=_LICENSE,
122
- citation=_CITATION,
123
- version=self.config.version,
124
- )
125
-
126
- def _split_generators(self, dl_manager):
127
- """
128
- Common Voice misolida bo'lgani kabi:
129
- train, validation, test splitlari uchun tar va tsv fayllarni yuklaymiz.
130
- """
131
- lang = self.config.language
132
- n_shards = N_SHARDS[lang] # {'train':1, 'validation':1, 'test':1}
133
- split_generators = []
134
-
135
- # Bizda splits = ["train", "validation", "test"]
136
- for split in ["train", "validation", "test"]:
137
- # Audio (tar) URL lar ro'yxati (har bir splitda bitta shard)
138
- audio_urls = [
139
- _AUDIO_URL.format(lang=lang, split=split, shard_idx=i)
140
- for i in range(n_shards[split])
141
- ]
142
- # .tar fayllarni yuklab olamiz
143
- audio_paths = dl_manager.download(audio_urls)
144
-
145
- # .tar fayllarni streaming yoki to'liq extract qilamiz
146
- # Common Voice 'iter_archive' orqali stream qiladi, lekin biz local_extracted qilsak ham bo'ladi
147
- local_extracted_archive_paths = []
148
- if not dl_manager.is_streaming:
149
- local_extracted_archive_paths = dl_manager.extract(audio_paths)
150
-
151
- # Transcript (tsv) URL
152
- transcript_url = _TRANSCRIPT_URL.format(lang=lang, split=split)
153
- transcript_path = dl_manager.download_and_extract(transcript_url)
154
-
155
- split_generators.append(
156
- datasets.SplitGenerator(
157
- name=getattr(datasets.Split, split.upper()),
158
- gen_kwargs={
159
- "archives": [
160
- dl_manager.iter_archive(path) for path in audio_paths
161
- ],
162
- "local_extracted_archive_paths": local_extracted_archive_paths,
163
- "meta_path": transcript_path,
164
- },
165
- )
166
- )
167
-
168
- return split_generators
169
-
170
- def _generate_examples(self, archives, local_extracted_archive_paths, meta_path):
171
- """
172
- Har bir split uchun:
173
- 1) transcript .tsv faylni o'qish
174
- 2) audio tar ichidagi fayllarni "archives" orqali iteratsiya qilish
175
- 3) tsv'dagi 'path' bilan audio fayl nomini bog'lash
176
- 4) natijada (key, example) qaytarish
177
- """
178
- # Tsv fayl (meta_path) ni o'qib, metadata lug'atini tuzamiz
179
- # formati: { "filename.mp3": { ... ustunlar ... } }
180
- metadata = {}
181
- with open(meta_path, encoding="utf-8") as f:
182
- reader = csv.DictReader(f, delimiter="\t")
183
- for row in reader:
184
- # Ehtiyot chorasi: agar path .mp3 bilan tugamasa, qo'shamiz
185
- if not row["path"].endswith(".mp3"):
186
- row["path"] += ".mp3"
187
- metadata[row["path"]] = row
188
-
189
- # Endi tar fayllarni o'qish
190
- # Common Voice misolida bir splitda bir nechta shard bo'lishi mumkin, shuning uchun ro'yxat
191
- for shard_idx, archive in enumerate(archives):
192
- # archive = dl_manager.iter_archive(path) => (path_in_tar, fileobj) generator
193
- for path_in_tar, fileobj in archive:
194
- # Masalan, path_in_tar = "common_voice_uz_12345.mp3"
195
- _, filename = os.path.split(path_in_tar)
196
- if filename in metadata:
197
- # Metadata qatorini olish
198
- row = metadata[filename]
199
- # local_extracted_archive_paths[shard_idx] => .tar fayl extract qilingan joy
200
- # Agar to'liq extract bo'lmagan bo'lsa, to'g'ridan-to'g'ri bytes bilan ham ishlasa bo'ladi
201
- # Common Voice rasmiy misolida 'result["audio"] = {"path": path, "bytes": file.read()}' qilingan
202
-
203
- example = dict(row)
204
- # "id" ustuni bo'lmasa, idx sifatida path_in_tar dan foydalansa bo'ladi
205
- if "id" not in example:
206
- example["id"] = filename
207
-
208
- # Audio: tar fayl ichidan o'qilgan bytes
209
- # Datasets "Audio" featuri "bytes" ni o'z-o'zidan tan olmaydi,
210
- # lekin "path" + "bytes" berish uslubi Common Voice scriptida ishlatilgan
211
- # Keyingi bosqichda decode qilinadi
212
- example["audio"] = {
213
- "path": path_in_tar,
214
- "bytes": fileobj.read(),
215
- }
216
-
217
- # Qo'shimcha ustunlarni ham row dan olamiz:
218
- # sentence, age, gender, accents, locale, duration, ...
219
- # Agar bo'lmasa, bo'sh qiymat kiritiladi
220
- # Biz "metadata"dan oldin dict(row) deb oldik, demak "example"da hamma ustun bor.
221
-
222
- yield path_in_tar, example
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import csv
3
+ import tarfile
4
+
5
+ def _generate_examples(self, prompts_path, audio_tar_path):
6
+ """
7
+ Yields examples as (key, example) tuples.
8
+
9
+ Args:
10
+ prompts_path (str): transcript/uz/<split>/<split>.tsv – metadata fayl yo'li.
11
+ audio_tar_path (str): audio/uz/<split>/<split>.tar – tar arxiv yo'li (ichida *.mp3).
12
+ """
13
+ # 1. TSV fayldan metadata ni o'qish
14
+ metadata_map = {}
15
+ with open(prompts_path, encoding="utf-8") as f:
16
+ reader = csv.DictReader(f, delimiter="\t")
17
+ for row in reader:
18
+ # "path" ustunida fayl nomi bo'lishi kutiladi, masalan: "H3H38EY38D8.mp3"
19
+ file_name = row["path"].strip()
20
+ if not file_name.endswith(".mp3"):
21
+ file_name += ".mp3"
22
+ metadata_map[file_name] = row
23
+
24
+ # 2. Tar arxiv ichidan audio fayllarni o'qish
25
+ id_ = 0
26
+ with tarfile.open(audio_tar_path, "r") as tar:
27
+ for member in tar.getmembers():
28
+ # Tar ichidagi fayl nomidan faqat fayl nomini olish
29
+ file_name = os.path.basename(member.name)
30
+ if file_name in metadata_map:
31
+ row = metadata_map[file_name]
32
+ audio_file = tar.extractfile(member)
33
+ if audio_file is None:
34
+ continue # Agar fayl ochilmasa, o'tkazib yuboramiz
35
+ audio_bytes = audio_file.read()
36
+ audio = {"path": file_name, "bytes": audio_bytes}
37
+ yield id_, {
38
+ "id": row.get("id", file_name),
39
+ "path": row.get("path", file_name),
40
+ "sentence": row.get("sentence", ""),
41
+ "duration": float(row.get("duration", 0.0)),
42
+ "age": row.get("age", ""),
43
+ "gender": row.get("gender", ""),
44
+ "accents": row.get("accents", ""),
45
+ "locale": row.get("locale", ""),
46
+ "audio": audio,
47
+ }
48
+ id_ += 1