File size: 2,506 Bytes
eec5401
 
00193e9
7e4da78
eec5401
 
 
 
7e4da78
 
ac80f9d
eec5401
 
7e4da78
 
eec5401
 
 
7e4da78
20a8dfe
 
cc75534
20a8dfe
 
eec5401
 
 
 
00193e9
 
1b082c9
d4a7562
00193e9
eec5401
00193e9
3b01e6c
00193e9
 
1b082c9
d4a7562
00193e9
 
eec5401
 
d4a7562
7e4da78
 
00193e9
 
7e4da78
00193e9
 
7e4da78
d4a7562
00193e9
 
 
 
7e4da78
 
 
00193e9
 
0127588
00193e9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import datasets
import os
import random
import json

class MyDataset(datasets.GeneratorBasedBuilder):
    def _info(self):
        return datasets.DatasetInfo(
            description="My dataset with text and audio.",
            features=datasets.Features({
                "sentence": datasets.Value("string"),
                "audio": datasets.Audio(sampling_rate=16_000),  # Defines the audio feature
            }),
            homepage="https://huggingface.co/datasets/my-dataset",
            license="MIT",
        )

    def _split_generators(self, dl_manager):
        # Download and extract the dataset
        # data_dir = dl_manager.download_and_extract("https://github.com/atulksingh011/test-voice-data/raw/main/audios.tar.gz")
        # metadata = dl_manager.download("https://github.com/atulksingh011/test-voice-data/raw/main/metadata.jsonl")
        
        data_dir = dl_manager.download_and_extract("https://raw.githubusercontent.com/atulksingh011/test-voice-data/refs/heads/record-names/audio.tar.gz")
        metadata = dl_manager.download("https://github.com/atulksingh011/test-voice-data/raw/record-names/metadata.jsonl")
        
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": metadata, 
                    "audio_dir": data_dir,
                    "split": "train"
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "filepath": metadata, 
                    "audio_dir": data_dir,
                    "split": "eval"
                },
            )
        ]

    def _generate_examples(self, filepath, audio_dir, split):
        # Read and parse the metadata JSONL file
        with open(filepath, "r", encoding="utf-8") as f:
            data = [json.loads(line) for line in f]

        # Shuffle the data for randomness
        random.shuffle(data)

        # Calculate split index for training and evaluation
        split_index = int(len(data) * 0.8)
        if split == "train":
            examples = data[:split_index]
        else:
            examples = data[split_index:]

        # Yield the examples
        for idx, record in enumerate(examples):
            yield idx, {
                "sentence": record["sentence"],
                "audio": os.path.join(audio_dir, record["file_name"]),  # Correct path to the audio file
            }