Elyordev commited on
Commit
29d69d4
Β·
verified Β·
1 Parent(s): 5adffe2

Upload dataset.py

Browse files
Files changed (1) hide show
  1. dataset.py +188 -45
dataset.py CHANGED
@@ -1,48 +1,191 @@
1
- import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import csv
 
3
  import tarfile
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- def _generate_examples(self, prompts_path, audio_tar_path):
6
- """
7
- Yields examples as (key, example) tuples.
8
-
9
- Args:
10
- prompts_path (str): transcript/uz/<split>/<split>.tsv – metadata fayl yo'li.
11
- audio_tar_path (str): audio/uz/<split>/<split>.tar – tar arxiv yo'li (ichida *.mp3).
12
- """
13
- # 1. TSV fayldan metadata ni o'qish
14
- metadata_map = {}
15
- with open(prompts_path, encoding="utf-8") as f:
16
- reader = csv.DictReader(f, delimiter="\t")
17
- for row in reader:
18
- # "path" ustunida fayl nomi bo'lishi kutiladi, masalan: "H3H38EY38D8.mp3"
19
- file_name = row["path"].strip()
20
- if not file_name.endswith(".mp3"):
21
- file_name += ".mp3"
22
- metadata_map[file_name] = row
23
-
24
- # 2. Tar arxiv ichidan audio fayllarni o'qish
25
- id_ = 0
26
- with tarfile.open(audio_tar_path, "r") as tar:
27
- for member in tar.getmembers():
28
- # Tar ichidagi fayl nomidan faqat fayl nomini olish
29
- file_name = os.path.basename(member.name)
30
- if file_name in metadata_map:
31
- row = metadata_map[file_name]
32
- audio_file = tar.extractfile(member)
33
- if audio_file is None:
34
- continue # Agar fayl ochilmasa, o'tkazib yuboramiz
35
- audio_bytes = audio_file.read()
36
- audio = {"path": file_name, "bytes": audio_bytes}
37
- yield id_, {
38
- "id": row.get("id", file_name),
39
- "path": row.get("path", file_name),
40
- "sentence": row.get("sentence", ""),
41
- "duration": float(row.get("duration", 0.0)),
42
- "age": row.get("age", ""),
43
- "gender": row.get("gender", ""),
44
- "accents": row.get("accents", ""),
45
- "locale": row.get("locale", ""),
46
- "audio": audio,
47
- }
48
- id_ += 1
 
1
+ # coding=utf-8
2
+ """
3
+ new_dataset_stt_audio dataset.
4
+
5
+ This dataset consists of audio files stored in tar archives and transcript files in TSV format.
6
+ The dataset structure is as follows:
7
+
8
+ new_dataset_stt_audio/
9
+ β”œβ”€β”€ audio/
10
+ β”‚ └── uz/
11
+ β”‚ β”œβ”€β”€ train/
12
+ β”‚ β”‚ └── train.tar
13
+ β”‚ β”œβ”€β”€ validation/
14
+ β”‚ β”‚ └── validation.tar
15
+ β”‚ └── test/
16
+ β”‚ └── test.tar
17
+ └── transcript/
18
+ └── uz/
19
+ β”œβ”€β”€ train/
20
+ β”‚ └── train.tsv
21
+ β”œβ”€β”€ validation/
22
+ β”‚ └── validation.tsv
23
+ └── test/
24
+ └── test.tsv
25
+
26
+ Each transcript TSV file has columns:
27
+ id, path, sentence, duration, age, gender, accents, locale.
28
+
29
+ The audio field is loaded using a tar URI, allowing streaming from the tar archive.
30
+ """
31
+
32
  import csv
33
+ import os
34
  import tarfile
35
+ from typing import Iterator, Tuple
36
+
37
+ import datasets
38
+
39
+ _CITATION = """\
40
+ @misc{yourcitation2023,
41
+ title={Your Dataset Title},
42
+ author={Your Name},
43
+ year={2023},
44
+ url={https://huggingface.co/datasets/Elyordev/new_dataset_stt_audio}
45
+ }
46
+ """
47
+
48
+ _DESCRIPTION = """\
49
+ This dataset consists of audio files and corresponding transcripts for speech-to-text tasks.
50
+ The audio files are stored in tar archives under the audio/uz folder for each split (train, validation, test),
51
+ and the transcripts are stored as TSV files under transcript/uz for each split.
52
+ The transcript TSV files have the following columns:
53
+ id, path, sentence, duration, age, gender, accents, locale.
54
+ The audio is loaded using a tar URI to enable streaming.
55
+ """
56
+
57
+ _HOMEPAGE = "https://huggingface.co/datasets/Elyordev/new_dataset_stt_audio"
58
+ _LICENSE = "MIT"
59
+
60
+ class NewDatasetSTTAudioConfig(datasets.BuilderConfig):
61
+ """Builder config for new_dataset_stt_audio."""
62
+ def __init__(self, language="uz", **kwargs):
63
+ super(NewDatasetSTTAudioConfig, self).__init__(**kwargs)
64
+ self.language = language
65
+
66
+ class NewDatasetSTTAudio(datasets.GeneratorBasedBuilder):
67
+ """New Dataset STT Audio builder."""
68
+ VERSION = datasets.Version("1.0.0")
69
+ BUILDER_CONFIGS = [
70
+ NewDatasetSTTAudioConfig(
71
+ name="default",
72
+ version=VERSION,
73
+ description="STT dataset with audio tar archives and transcript TSV files for Uzbek language",
74
+ language="uz",
75
+ ),
76
+ ]
77
+
78
+ def _info(self):
79
+ features = datasets.Features({
80
+ "id": datasets.Value("string"),
81
+ "path": datasets.Value("string"),
82
+ "sentence": datasets.Value("string"),
83
+ "duration": datasets.Value("float"),
84
+ "age": datasets.Value("string"),
85
+ "gender": datasets.Value("string"),
86
+ "accents": datasets.Value("string"),
87
+ "locale": datasets.Value("string"),
88
+ "audio": datasets.Audio(sampling_rate=16000),
89
+ })
90
+ return datasets.DatasetInfo(
91
+ description=_DESCRIPTION,
92
+ features=features,
93
+ supervised_keys=None,
94
+ homepage=_HOMEPAGE,
95
+ license=_LICENSE,
96
+ citation=_CITATION,
97
+ )
98
+
99
+ def _split_generators(self, dl_manager):
100
+ """
101
+ Returns SplitGenerators.
102
+ Expects the dataset to be provided manually via the repository.
103
+ The manual_dir should contain the following structure:
104
+
105
+ new_dataset_stt_audio/
106
+ audio/uz/{train, validation, test}/*.tar
107
+ transcript/uz/{train, validation, test}/*.tsv
108
+ """
109
+ manual_dir = dl_manager.manual_dir if dl_manager.manual_dir is not None else ""
110
+ language = self.config.language
111
+
112
+ splits = {
113
+ "train": {
114
+ "transcript": os.path.join(manual_dir, "transcript", language, "train", "train.tsv"),
115
+ "audio": os.path.join(manual_dir, "audio", language, "train", "train.tar")
116
+ },
117
+ "validation": {
118
+ "transcript": os.path.join(manual_dir, "transcript", language, "validation", "validation.tsv"),
119
+ "audio": os.path.join(manual_dir, "audio", language, "validation", "validation.tar")
120
+ },
121
+ "test": {
122
+ "transcript": os.path.join(manual_dir, "transcript", language, "test", "test.tsv"),
123
+ "audio": os.path.join(manual_dir, "audio", language, "test", "test.tar")
124
+ }
125
+ }
126
+
127
+ return [
128
+ datasets.SplitGenerator(
129
+ name=datasets.Split.TRAIN,
130
+ gen_kwargs={
131
+ "transcript_path": splits["train"]["transcript"],
132
+ "audio_tar_path": splits["train"]["audio"],
133
+ },
134
+ ),
135
+ datasets.SplitGenerator(
136
+ name=datasets.Split.VALIDATION,
137
+ gen_kwargs={
138
+ "transcript_path": splits["validation"]["transcript"],
139
+ "audio_tar_path": splits["validation"]["audio"],
140
+ },
141
+ ),
142
+ datasets.SplitGenerator(
143
+ name=datasets.Split.TEST,
144
+ gen_kwargs={
145
+ "transcript_path": splits["test"]["transcript"],
146
+ "audio_tar_path": splits["test"]["audio"],
147
+ },
148
+ ),
149
+ ]
150
+
151
+ def _generate_examples(self, transcript_path: str, audio_tar_path: str) -> Iterator[Tuple[str, dict]]:
152
+ """
153
+ Yields examples as (key, example) tuples.
154
+
155
+ Args:
156
+ transcript_path (str): Path to the transcript TSV file.
157
+ audio_tar_path (str): Path to the audio tar archive.
158
+ """
159
+ # 1. Read the transcript TSV file into a dictionary mapping file name to metadata.
160
+ metadata_map = {}
161
+ with open(transcript_path, encoding="utf-8") as f:
162
+ reader = csv.DictReader(f, delimiter="\t")
163
+ for row in reader:
164
+ file_name = row["path"].strip()
165
+ if not file_name.endswith(".mp3"):
166
+ file_name += ".mp3"
167
+ metadata_map[file_name] = row
168
 
169
+ # 2. Create a base audio URI for streaming from the tar archive.
170
+ base_audio_uri = f"tar://{audio_tar_path}#"
171
+
172
+ # 3. Open the tar archive and iterate through its members.
173
+ id_ = 0
174
+ with tarfile.open(audio_tar_path, "r") as tar:
175
+ for member in tar.getmembers():
176
+ file_name = os.path.basename(member.name)
177
+ if file_name in metadata_map:
178
+ row = metadata_map[file_name]
179
+ audio_uri = base_audio_uri + file_name
180
+ yield str(id_), {
181
+ "id": row["id"],
182
+ "path": row["path"],
183
+ "sentence": row["sentence"],
184
+ "duration": float(row.get("duration", 0.0)),
185
+ "age": row.get("age", ""),
186
+ "gender": row.get("gender", ""),
187
+ "accents": row.get("accents", ""),
188
+ "locale": row.get("locale", ""),
189
+ "audio": audio_uri,
190
+ }
191
+ id_ += 1