Create hy_asr_grqaser_script.py
Browse files- hy_asr_grqaser_script.py +44 -0
hy_asr_grqaser_script.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pandas as pd
|
3 |
+
from datasets import Dataset, DatasetDict, Features, Value, Audio
|
4 |
+
|
5 |
+
def load_dataset_script(data_dir):
|
6 |
+
"""
|
7 |
+
Load dataset script for custom audio-transcription dataset.
|
8 |
+
|
9 |
+
:param data_dir: Directory where the data and metadata.csv are stored.
|
10 |
+
:return: A Hugging Face Dataset object.
|
11 |
+
"""
|
12 |
+
# Load metadata.csv
|
13 |
+
metadata = pd.read_csv(os.path.join(data_dir, "metadata.csv"))
|
14 |
+
|
15 |
+
# Create lists for audio files and transcriptions
|
16 |
+
audio_files = []
|
17 |
+
transcriptions = []
|
18 |
+
|
19 |
+
# Iterate through the metadata and populate the lists
|
20 |
+
for _, row in metadata.iterrows():
|
21 |
+
audio_files.append({'path': os.path.join(data_dir, row['file_name'])})
|
22 |
+
transcriptions.append(row['transcription'])
|
23 |
+
|
24 |
+
# Define features of the dataset
|
25 |
+
features = Features({
|
26 |
+
'audio': Audio(sampling_rate=16_000), # Adjust the sampling rate as needed
|
27 |
+
'sentence': Value('string')
|
28 |
+
})
|
29 |
+
|
30 |
+
# Create a dataset
|
31 |
+
dataset = Dataset.from_dict({
|
32 |
+
'audio': audio_files,
|
33 |
+
'sentence': transcriptions
|
34 |
+
}, features=features)
|
35 |
+
|
36 |
+
# You can split the dataset here if needed, or return as a single dataset
|
37 |
+
return DatasetDict({'train': dataset})
|
38 |
+
|
39 |
+
|
40 |
+
# Example usage
|
41 |
+
if __name__ == "__main__":
|
42 |
+
data_directory = "path/to/your/data"
|
43 |
+
dataset = load_dataset_script(data_directory)
|
44 |
+
print(dataset)
|