File size: 4,417 Bytes
66c456b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117

import csv
import os
import datasets
from tqdm import tqdm
import tarfile



_CITATION = """\
@inproceedings{commonvoice:2020,
  author = {Ardila, R. and Branson, M. and Davis, K. and Henretty, M. and Kohler, M. and Meyer, J. and Morais, R. and Saunders, L. and Tyers, F. M. and Weber, G.},
  title = {Common Voice: A Massively-Multilingual Speech Corpus},
  booktitle = {Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)},
  pages = {4211--4215},
  year = 2020
}
"""

_HOMEPAGE = "https://commonvoice.mozilla.org/en/datasets"

_LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"

_BASE_URL = "https://huggingface.co/datasets/TifinLab/kabyle_asr/raw/main/"


_AUDIO_URL = _BASE_URL + "data/{split}.tar"

_TRANSCRIPT_URL = _BASE_URL + "text/{split}.csv"



class KabyleAsr(datasets.GeneratorBasedBuilder):


    VERSION = datasets.Version("1.1.0")
    
    def _info(self):
        return datasets.DatasetInfo(
            
            description=_DESCRIPTION,
            features=datasets.Features({
                
                "id": datasets.Value("int64"),
                "path": datasets.Value("string"),
                "audio": datasets.features.Audio(sampling_rate=48000),
                "text": datasets.Value("string"),
                "licence": datasets.Value("string"),
            }),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )


    def _split_generators(self, dl_manager):
        splits = {
            "train": _AUDIO_URL.format(split="train"),
            "test": _AUDIO_URL.format(split="test"),
        }
        
       

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"audio_paths": dl_manager.download(splits["train"]),
                            "split":"train",
                            "transcript_path": dl_manager.download_and_extract(_TRANSCRIPT_URL.format(split="train"))
          
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"audio_paths": dl_manager.download(splits["test"]),
                            "split":"test",
                            "transcript_path": dl_manager.download_and_extract(_TRANSCRIPT_URL.format(split="test"))},
            ),
        ]


    def _generate_examples(self, audio_paths, split, transcript_path):
        with open(transcript_path, encoding="utf-8") as f:
            # L'utilisation de csv.DictReader permet de traiter facilement chaque ligne du CSV
            reader = csv.DictReader(f, delimiter=";", quoting=csv.QUOTE_NONE)
    
            # Extraction et traitement des fichiers audio de l'archive
            audio_tar_path = audio_paths + ".tar"
            with tarfile.open(audio_tar_path, "r") as tar:
                # Créer un dictionnaire pour associer chaque fichier audio à son chemin extrait
                audio_file_dict = {member.name: tar.extractfile(member) for member in tar.getmembers()}
    
            # Itérer sur chaque ligne du fichier de métadonnées pour générer des exemples
            for row_id, row in enumerate(tqdm(reader, desc=f"Génération d'exemples pour {split}")):
                audio_filename = row["Path"]
                sentence = row['Text']
                licenseR = row['Licence']
                # Vérifier si le fichier audio correspondant existe
                if audio_filename in audio_file_dict:
                    # Créer un chemin temporaire pour stocker le fichier audio extrait
                    audio_extracted_path = os.path.join("temp", audio_filename)
                    # Écrire le contenu du fichier audio extrait
                    with open(audio_extracted_path, "wb") as audio_out:
                        audio_out.write(audio_file_dict[audio_filename].read())
    
                    with open(audio_extracted_path, "rb") as audio_in:
                        audio_bytes = audio_in.read()
    
                        yield row_id, {
                            "id": row_id,
                            "path": audio_filename,
                            "audio": {"path": audio_extracted_path, "bytes": audio_bytes},
                            "text": sentence,
                            "licence": licenseR
                        }