File size: 2,054 Bytes
66b2451
82cd9ba
 
1df0862
6c81428
1df0862
6c81428
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67cd8e7
6c81428
 
 
1df0862
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
##
import os
import pandas as pd
from datasets import GeneratorBasedBuilder, DatasetInfo, SplitGenerator, Split, Features, Value, Audio, Version

class HyAsrGrqaser(GeneratorBasedBuilder):
    """Armenian Audio-Transcription Dataset"""

    VERSION = Version("1.0.0")

    def _info(self):
        return DatasetInfo(
            description="This dataset contains Armenian speech and transcriptions.",
            features=Features({
                'audio': Audio(sampling_rate=16_000),  # Adjust the sampling rate as needed
                'sentence': Value('string')
            }),
            supervised_keys=("audio", "sentence"),
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        # Assuming the script is in the root of the project structure
        data_dir = os.path.dirname(__file__)
        metadata_path = os.path.join(os.path.dirname(__file__), "metadata.csv")
        return [
            SplitGenerator(
                name=Split.TRAIN,
                gen_kwargs={"data_dir": data_dir, "metadata_path": metadata_path}
            ),
        ]

    def _generate_examples(self, data_dir, metadata_path):
        """Yields examples."""
        # Load metadata.csv
        metadata = pd.read_csv(metadata_path)

        # Generate examples
        for idx, row in metadata.iterrows():
            file_path = os.path.join(data_dir, row['file_name'])
            transcription_path = os.path.join(data_dir, row['transcription_file'])
            with open(transcription_path, 'r') as f:
                transcription = f.read().strip()
            yield idx, {
                'audio': {'path': file_path},
                'sentence': transcription
            }

# Testing the dataset locally
# if __name__ == "__main__":
#     from datasets import load_dataset
#     dataset = load_dataset("C:\\Projects\\aeneas\\hy_asr_grqaser\\hy_asr_grqaser.py")
#     print(dataset["train"][0])
##
from datasets import load_dataset

dataset = load_dataset("aburnazy/hy_asr_grqaser")
print(dataset)