|
|
|
import os |
|
import pandas as pd |
|
from datasets import Dataset, DatasetDict, Features, Value, Audio |
|
|
|
|
|
def load_dataset_script(data_dir): |
|
""" |
|
Load dataset script for custom audio-transcription dataset. |
|
|
|
:param data_dir: Directory where the data and metadata.csv are stored. |
|
:return: A Hugging Face Dataset object. |
|
""" |
|
|
|
metadata = pd.read_csv(os.path.join(data_dir, "metadata.csv")) |
|
|
|
|
|
audio_files = [] |
|
transcriptions = [] |
|
|
|
|
|
for _, row in metadata.iterrows(): |
|
audio_files.append({'path': os.path.join(data_dir, row['file_name'])}) |
|
transcriptions.append(row['transcription']) |
|
|
|
|
|
features = Features({ |
|
'audio': Audio(sampling_rate=16_000), |
|
'sentence': Value('string') |
|
}) |
|
|
|
|
|
dataset = Dataset.from_dict({ |
|
'audio': audio_files, |
|
'sentence': transcriptions |
|
}, features=features) |
|
|
|
|
|
return DatasetDict({'train': dataset}) |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
data_directory = "C:\\Projects\\aeneas\\hy_asr_grqaser" |
|
dataset = load_dataset_script(data_directory) |
|
print(dataset["train"][2]) |
|
|