mypin-voice-dataset / mypin-voice-dataset.py
atulksingh's picture
Update mypin-voice-dataset.py
0127588 verified
import datasets
import os
import random
import json
class MyDataset(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
description="My dataset with text and audio.",
features=datasets.Features({
"sentence": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=16_000), # Defines the audio feature
}),
homepage="https://huggingface.co/datasets/my-dataset",
license="MIT",
)
def _split_generators(self, dl_manager):
# Download and extract the dataset
# data_dir = dl_manager.download_and_extract("https://github.com/atulksingh011/test-voice-data/raw/main/audios.tar.gz")
# metadata = dl_manager.download("https://github.com/atulksingh011/test-voice-data/raw/main/metadata.jsonl")
data_dir = dl_manager.download_and_extract("https://raw.githubusercontent.com/atulksingh011/test-voice-data/refs/heads/record-names/audio.tar.gz")
metadata = dl_manager.download("https://github.com/atulksingh011/test-voice-data/raw/record-names/metadata.jsonl")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": metadata,
"audio_dir": data_dir,
"split": "train"
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": metadata,
"audio_dir": data_dir,
"split": "eval"
},
)
]
def _generate_examples(self, filepath, audio_dir, split):
# Read and parse the metadata JSONL file
with open(filepath, "r", encoding="utf-8") as f:
data = [json.loads(line) for line in f]
# Shuffle the data for randomness
random.shuffle(data)
# Calculate split index for training and evaluation
split_index = int(len(data) * 0.8)
if split == "train":
examples = data[:split_index]
else:
examples = data[split_index:]
# Yield the examples
for idx, record in enumerate(examples):
yield idx, {
"sentence": record["sentence"],
"audio": os.path.join(audio_dir, record["file_name"]), # Correct path to the audio file
}