Elyordev commited on
Commit
40a22e8
·
verified ·
1 Parent(s): f50b5e1

Upload dataset.py

Browse files
Files changed (1) hide show
  1. dataset.py +185 -94
dataset.py CHANGED
@@ -1,131 +1,222 @@
1
- import csv
 
 
 
 
 
 
 
 
 
2
  import os
3
- from typing import Iterator, Tuple
 
4
 
5
  import datasets
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  _DESCRIPTION = """\
8
  Bu dataset mp3 formatdagi audio fayllar va tsv metadata fayllardan iborat.
9
- Audio fayllar .tar arxiv ichida saqlangan va tsv faylda fayl nomlari (masalan, H3H38EY38D8.mp3) keltirilgan.
10
- Katta datasetning faqat 100 tadan yozuvi olingan mini versiyasi.
11
  """
12
 
13
- _HOMEPAGE = "https://huggingface.co/datasets/Elyordev/new_dataset_stt_mini"
14
- _LICENSE = "MIT"
15
 
16
- # Har bir split uchun .tsv va .tar fayllarning repo ichidagi joylashuvi (mini variantda ham xuddi shu).
17
- _URLS = {
18
- "train": {
19
- "tsv": "train/train.tsv",
20
- "tar": "train/train.tar",
21
- },
22
- "validation": {
23
- "tsv": "validation/validation.tsv",
24
- "tar": "validation/validation.tar",
25
- },
26
- "test": {
27
- "tsv": "test/test.tsv",
28
- "tar": "test/test.tar",
29
  },
30
  }
31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
- class MyMiniDatasetSTTConfig(datasets.BuilderConfig):
34
- def __init__(self, **kwargs):
35
- super(MyMiniDatasetSTTConfig, self).__init__(**kwargs)
36
 
37
 
38
- class MyMiniDatasetSTT(datasets.GeneratorBasedBuilder):
39
- VERSION = datasets.Version("1.0.0")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  BUILDER_CONFIGS = [
41
- MyMiniDatasetSTTConfig(
42
- name="default",
43
- version=VERSION,
44
- description="Mini STT dataset with mp3 audios in tar archives (100 examples per split)",
 
 
45
  ),
46
  ]
 
47
 
48
  def _info(self):
49
- return datasets.DatasetInfo(
50
- description=_DESCRIPTION,
51
- features=datasets.Features({
 
 
 
 
 
52
  "id": datasets.Value("string"),
53
- "path": datasets.Value("string"), # Fayl nomi, masalan: H3H38EY38D8.mp3
 
54
  "sentence": datasets.Value("string"),
55
- "duration": datasets.Value("float"),
56
  "age": datasets.Value("string"),
57
  "gender": datasets.Value("string"),
58
  "accents": datasets.Value("string"),
59
  "locale": datasets.Value("string"),
60
- # Audio feature: datasets.Audio avtomatik tarzda tar URI orqali yuklaydi
61
- "audio": datasets.Audio(sampling_rate=16000),
62
- }),
 
 
 
63
  supervised_keys=None,
64
  homepage=_HOMEPAGE,
65
  license=_LICENSE,
 
 
66
  )
67
 
68
  def _split_generators(self, dl_manager):
69
  """
70
- Har bir split uchun .tsv va .tar fayllarni yuklab olamiz.
 
71
  """
72
- downloaded_files = {}
73
- for split in _URLS:
74
- downloaded_files[split] = {
75
- "tsv": dl_manager.download_and_extract(_URLS[split]["tsv"]),
76
- "tar": dl_manager.download_and_extract(_URLS[split]["tar"]),
77
- }
78
- return [
79
- datasets.SplitGenerator(
80
- name=datasets.Split.TRAIN,
81
- gen_kwargs={
82
- "tsv_path": downloaded_files["train"]["tsv"],
83
- "tar_path": downloaded_files["train"]["tar"],
84
- },
85
- ),
86
- datasets.SplitGenerator(
87
- name=datasets.Split.VALIDATION,
88
- gen_kwargs={
89
- "tsv_path": downloaded_files["validation"]["tsv"],
90
- "tar_path": downloaded_files["validation"]["tar"],
91
- },
92
- ),
93
- datasets.SplitGenerator(
94
- name=datasets.Split.TEST,
95
- gen_kwargs={
96
- "tsv_path": downloaded_files["test"]["tsv"],
97
- "tar_path": downloaded_files["test"]["tar"],
98
- },
99
- ),
100
- ]
101
-
102
- def _generate_examples(self, tsv_path: str, tar_path: str) -> Iterator[Tuple[int, dict]]:
 
 
 
 
 
 
 
 
 
103
  """
104
- Har bir .tsv fayldagi qatordan misol (example) yaratamiz.
105
- Audio faylga murojaat qilish uchun "tar://" sintaksisidan foydalanamiz:
106
- "tar://<tar fayl yo'li>#<tsv fayldagi path>".
107
-
108
- Katta datasetni cheklash uchun 100 misoldan keyin break qilamiz.
109
  """
110
- with open(tsv_path, encoding="utf-8") as f:
 
 
 
111
  reader = csv.DictReader(f, delimiter="\t")
112
- for idx, row in enumerate(reader):
113
- if idx >= 100:
114
- # faqat 100 ta misol bilan to'xtatamiz
115
- break
116
-
117
- mp3_file = row["path"]
118
- # Audio fayl uchun URI: masalan, "tar://.../train.tar#H3H38EY38D8.mp3"
119
- audio_ref = f"tar://{tar_path}#{mp3_file}"
120
-
121
- yield idx, {
122
- "id": row["id"],
123
- "path": mp3_file,
124
- "sentence": row["sentence"],
125
- "duration": float(row.get("duration", 0.0)),
126
- "age": row.get("age", ""),
127
- "gender": row.get("gender", ""),
128
- "accents": row.get("accents", ""),
129
- "locale": row.get("locale", ""),
130
- "audio": audio_ref,
131
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ #
7
+ # Ushbu fayl Common Voice uslubidagi dataset loading script bo'lib,
8
+ # audio/uz/<split>/<split>.tar va transcript/uz/<split>/<split>.tsv
9
+ # fayllarni yuklab, audio+transkriptsiyani birlashtiradi.
10
+
11
  import os
12
+ import csv
13
+ import json
14
 
15
  import datasets
16
+ from datasets.utils.py_utils import size_str
17
+
18
+
19
+ # ------------------ 1. Metadata va sozlamalar ------------------
20
+ _CITATION = """\
21
+ @misc{yourcitation,
22
+ title = {Your STT dataset title},
23
+ author = {You or your org},
24
+ year = {2023},
25
+ url = {https://huggingface.co/datasets/Elyordev/new_dataset_stt_audio}
26
+ }
27
+ """
28
 
29
  _DESCRIPTION = """\
30
  Bu dataset mp3 formatdagi audio fayllar va tsv metadata fayllardan iborat.
31
+ Papka tuzilishi Common Voice uslubiga o'xshash:
32
+ audio/uz/[train|validation|test]/*.tar va transcript/uz/[train|validation|test]/*.tsv
33
  """
34
 
35
+ _HOMEPAGE = "https://huggingface.co/datasets/Elyordev/new_dataset_stt_audio"
36
+ _LICENSE = "Apache License 2.0"
37
 
38
+ # Bitta til: "uz" (xohlasangiz ko'paytirishingiz mumkin)
39
+ LANGUAGES = {
40
+ "uz": {
41
+ "language_name": "Uzbek",
42
+ "num_clips": None, # Agar xohlasangiz, taxminiy klip sonini kiriting
43
+ "num_speakers": None,
44
+ "validated_hr": None,
45
+ "total_hr": None,
46
+ "size_bytes": None,
 
 
 
 
47
  },
48
  }
49
 
50
+ # Bizda har bir splitda 1 dona tar shard bor deb faraz qilamiz
51
+ N_SHARDS = {
52
+ "uz": {
53
+ "train": 1,
54
+ "validation": 1,
55
+ "test": 1,
56
+ }
57
+ }
58
+
59
+ # Asosiy URL: repodagi fayllarni resolve qilish uchun
60
+ _BASE_URL = "https://huggingface.co/datasets/Elyordev/new_dataset_stt_audio/resolve/main/"
61
+
62
+ # Audio fayl yo'li: audio/uz/<split>/<split>.tar
63
+ _AUDIO_URL = _BASE_URL + "audio/{lang}/{split}/{split}.tar"
64
 
65
+ # Transcript fayl yo'li: transcript/uz/<split>/<split>.tsv
66
+ _TRANSCRIPT_URL = _BASE_URL + "transcript/{lang}/{split}/{split}.tsv"
 
67
 
68
 
69
+ # ------------------ 2. Config klassi ------------------
70
+ class NewDatasetSTTAudioConfig(datasets.BuilderConfig):
71
+ """Bitta config (masalan, 'uz') - xohlasangiz ko'proq tillarni ham qo'shishingiz mumkin."""
72
+ def __init__(self, language, **kwargs):
73
+ super().__init__(**kwargs)
74
+ self.language = language
75
+ self.num_clips = LANGUAGES[language]["num_clips"]
76
+ self.num_speakers = LANGUAGES[language]["num_speakers"]
77
+ self.validated_hr = LANGUAGES[language]["validated_hr"]
78
+ self.total_hr = LANGUAGES[language]["total_hr"]
79
+ self.size_bytes = LANGUAGES[language]["size_bytes"]
80
+ self.size_human = size_str(self.size_bytes) if self.size_bytes else None
81
+
82
+
83
+ # ------------------ 3. Asosiy dataset builder ------------------
84
+ class NewDatasetSTTAudio(datasets.GeneratorBasedBuilder):
85
  BUILDER_CONFIGS = [
86
+ # Masalan, faqat "uz" config
87
+ NewDatasetSTTAudioConfig(
88
+ name="uz",
89
+ version=datasets.Version("1.0.0"),
90
+ description="Uzbek STT dataset with Common Voice-like structure",
91
+ language="uz",
92
  ),
93
  ]
94
+ DEFAULT_WRITER_BATCH_SIZE = 1000
95
 
96
  def _info(self):
97
+ lang = self.config.language
98
+ # O'zingiz xohlagancha izoh tuzishingiz mumkin
99
+ description = (
100
+ f"Common Voice uslubidagi dataset: til = {lang}. "
101
+ f"{_DESCRIPTION}"
102
+ )
103
+ features = datasets.Features(
104
+ {
105
  "id": datasets.Value("string"),
106
+ "path": datasets.Value("string"),
107
+ "audio": datasets.features.Audio(sampling_rate=16000), # agar 16kHz bo'lsa
108
  "sentence": datasets.Value("string"),
 
109
  "age": datasets.Value("string"),
110
  "gender": datasets.Value("string"),
111
  "accents": datasets.Value("string"),
112
  "locale": datasets.Value("string"),
113
+ "duration": datasets.Value("float"), # agar tsv da float bo'lsa
114
+ }
115
+ )
116
+ return datasets.DatasetInfo(
117
+ description=description,
118
+ features=features,
119
  supervised_keys=None,
120
  homepage=_HOMEPAGE,
121
  license=_LICENSE,
122
+ citation=_CITATION,
123
+ version=self.config.version,
124
  )
125
 
126
  def _split_generators(self, dl_manager):
127
  """
128
+ Common Voice misolida bo'lgani kabi:
129
+ train, validation, test splitlari uchun tar va tsv fayllarni yuklaymiz.
130
  """
131
+ lang = self.config.language
132
+ n_shards = N_SHARDS[lang] # {'train':1, 'validation':1, 'test':1}
133
+ split_generators = []
134
+
135
+ # Bizda splits = ["train", "validation", "test"]
136
+ for split in ["train", "validation", "test"]:
137
+ # Audio (tar) URL lar ro'yxati (har bir splitda bitta shard)
138
+ audio_urls = [
139
+ _AUDIO_URL.format(lang=lang, split=split, shard_idx=i)
140
+ for i in range(n_shards[split])
141
+ ]
142
+ # .tar fayllarni yuklab olamiz
143
+ audio_paths = dl_manager.download(audio_urls)
144
+
145
+ # .tar fayllarni streaming yoki to'liq extract qilamiz
146
+ # Common Voice 'iter_archive' orqali stream qiladi, lekin biz local_extracted qilsak ham bo'ladi
147
+ local_extracted_archive_paths = []
148
+ if not dl_manager.is_streaming:
149
+ local_extracted_archive_paths = dl_manager.extract(audio_paths)
150
+
151
+ # Transcript (tsv) URL
152
+ transcript_url = _TRANSCRIPT_URL.format(lang=lang, split=split)
153
+ transcript_path = dl_manager.download_and_extract(transcript_url)
154
+
155
+ split_generators.append(
156
+ datasets.SplitGenerator(
157
+ name=getattr(datasets.Split, split.upper()),
158
+ gen_kwargs={
159
+ "archives": [
160
+ dl_manager.iter_archive(path) for path in audio_paths
161
+ ],
162
+ "local_extracted_archive_paths": local_extracted_archive_paths,
163
+ "meta_path": transcript_path,
164
+ },
165
+ )
166
+ )
167
+
168
+ return split_generators
169
+
170
+ def _generate_examples(self, archives, local_extracted_archive_paths, meta_path):
171
  """
172
+ Har bir split uchun:
173
+ 1) transcript .tsv faylni o'qish
174
+ 2) audio tar ichidagi fayllarni "archives" orqali iteratsiya qilish
175
+ 3) tsv'dagi 'path' bilan audio fayl nomini bog'lash
176
+ 4) natijada (key, example) qaytarish
177
  """
178
+ # Tsv fayl (meta_path) ni o'qib, metadata lug'atini tuzamiz
179
+ # formati: { "filename.mp3": { ... ustunlar ... } }
180
+ metadata = {}
181
+ with open(meta_path, encoding="utf-8") as f:
182
  reader = csv.DictReader(f, delimiter="\t")
183
+ for row in reader:
184
+ # Ehtiyot chorasi: agar path .mp3 bilan tugamasa, qo'shamiz
185
+ if not row["path"].endswith(".mp3"):
186
+ row["path"] += ".mp3"
187
+ metadata[row["path"]] = row
188
+
189
+ # Endi tar fayllarni o'qish
190
+ # Common Voice misolida bir splitda bir nechta shard bo'lishi mumkin, shuning uchun ro'yxat
191
+ for shard_idx, archive in enumerate(archives):
192
+ # archive = dl_manager.iter_archive(path) => (path_in_tar, fileobj) generator
193
+ for path_in_tar, fileobj in archive:
194
+ # Masalan, path_in_tar = "common_voice_uz_12345.mp3"
195
+ _, filename = os.path.split(path_in_tar)
196
+ if filename in metadata:
197
+ # Metadata qatorini olish
198
+ row = metadata[filename]
199
+ # local_extracted_archive_paths[shard_idx] => .tar fayl extract qilingan joy
200
+ # Agar to'liq extract bo'lmagan bo'lsa, to'g'ridan-to'g'ri bytes bilan ham ishlasa bo'ladi
201
+ # Common Voice rasmiy misolida 'result["audio"] = {"path": path, "bytes": file.read()}' qilingan
202
+
203
+ example = dict(row)
204
+ # "id" ustuni bo'lmasa, idx sifatida path_in_tar dan foydalansa bo'ladi
205
+ if "id" not in example:
206
+ example["id"] = filename
207
+
208
+ # Audio: tar fayl ichidan o'qilgan bytes
209
+ # Datasets "Audio" featuri "bytes" ni o'z-o'zidan tan olmaydi,
210
+ # lekin "path" + "bytes" berish uslubi Common Voice scriptida ishlatilgan
211
+ # Keyingi bosqichda decode qilinadi
212
+ example["audio"] = {
213
+ "path": path_in_tar,
214
+ "bytes": fileobj.read(),
215
+ }
216
+
217
+ # Qo'shimcha ustunlarni ham row dan olamiz:
218
+ # sentence, age, gender, accents, locale, duration, ...
219
+ # Agar bo'lmasa, bo'sh qiymat kiritiladi
220
+ # Biz "metadata"dan oldin dict(row) deb oldik, demak "example"da hamma ustun bor.
221
+
222
+ yield path_in_tar, example