TifinLab commited on
Commit
5f197a8
·
verified ·
1 Parent(s): 3351801

Delete kabyle_asr.py

Browse files
Files changed (1) hide show
  1. kabyle_asr.py +0 -116
kabyle_asr.py DELETED
@@ -1,116 +0,0 @@
1
-
2
- import csv
3
- import os
4
- import datasets
5
- from tqdm import tqdm
6
- import tarfile
7
-
8
-
9
-
10
- _CITATION = """\
11
- @inproceedings{commonvoice:2020,
12
- author = {Ardila, R. and Branson, M. and Davis, K. and Henretty, M. and Kohler, M. and Meyer, J. and Morais, R. and Saunders, L. and Tyers, F. M. and Weber, G.},
13
- title = {Common Voice: A Massively-Multilingual Speech Corpus},
14
- booktitle = {Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)},
15
- pages = {4211--4215},
16
- year = 2020
17
- }
18
- """
19
-
20
- _HOMEPAGE = "https://commonvoice.mozilla.org/en/datasets"
21
-
22
- _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
23
-
24
- _BASE_URL = "https://huggingface.co/datasets/TifinLab/kabyle_asr/raw/main/"
25
-
26
-
27
- _AUDIO_URL = _BASE_URL + "data/{split}.tar"
28
-
29
- _TRANSCRIPT_URL = _BASE_URL + "text/{split}.csv"
30
-
31
-
32
-
33
- class KabyleAsr(datasets.GeneratorBasedBuilder):
34
-
35
-
36
- VERSION = datasets.Version("1.1.0")
37
-
38
- def _info(self):
39
- return datasets.DatasetInfo(
40
-
41
- description=_DESCRIPTION,
42
- features=datasets.Features({
43
-
44
- "id": datasets.Value("int64"),
45
- "path": datasets.Value("string"),
46
- "audio": datasets.features.Audio(sampling_rate=48000),
47
- "text": datasets.Value("string"),
48
- "licence": datasets.Value("string"),
49
- }),
50
- supervised_keys=None,
51
- homepage=_HOMEPAGE,
52
- license=_LICENSE,
53
- citation=_CITATION,
54
- )
55
-
56
-
57
- def _split_generators(self, dl_manager):
58
- splits = {
59
- "train": _AUDIO_URL.format(split="train"),
60
- "test": _AUDIO_URL.format(split="test"),
61
- }
62
-
63
-
64
-
65
- return [
66
- datasets.SplitGenerator(
67
- name=datasets.Split.TRAIN,
68
- gen_kwargs={"audio_paths": dl_manager.download(splits["train"]),
69
- "split":"train",
70
- "transcript_path": dl_manager.download_and_extract(_TRANSCRIPT_URL.format(split="train"))
71
-
72
- ),
73
- datasets.SplitGenerator(
74
- name=datasets.Split.TEST,
75
- gen_kwargs={"audio_paths": dl_manager.download(splits["test"]),
76
- "split":"test",
77
- "transcript_path": dl_manager.download_and_extract(_TRANSCRIPT_URL.format(split="test"))},
78
- ),
79
- ]
80
-
81
-
82
- def _generate_examples(self, audio_paths, split, transcript_path):
83
- with open(transcript_path, encoding="utf-8") as f:
84
- # L'utilisation de csv.DictReader permet de traiter facilement chaque ligne du CSV
85
- reader = csv.DictReader(f, delimiter=";", quoting=csv.QUOTE_NONE)
86
-
87
- # Extraction et traitement des fichiers audio de l'archive
88
- audio_tar_path = audio_paths + ".tar"
89
- with tarfile.open(audio_tar_path, "r") as tar:
90
- # Créer un dictionnaire pour associer chaque fichier audio à son chemin extrait
91
- audio_file_dict = {member.name: tar.extractfile(member) for member in tar.getmembers()}
92
-
93
- # Itérer sur chaque ligne du fichier de métadonnées pour générer des exemples
94
- for row_id, row in enumerate(tqdm(reader, desc=f"Génération d'exemples pour {split}")):
95
- audio_filename = row["Path"]
96
- sentence = row['Text']
97
- licenseR = row['Licence']
98
- # Vérifier si le fichier audio correspondant existe
99
- if audio_filename in audio_file_dict:
100
- # Créer un chemin temporaire pour stocker le fichier audio extrait
101
- audio_extracted_path = os.path.join("temp", audio_filename)
102
- # Écrire le contenu du fichier audio extrait
103
- with open(audio_extracted_path, "wb") as audio_out:
104
- audio_out.write(audio_file_dict[audio_filename].read())
105
-
106
- with open(audio_extracted_path, "rb") as audio_in:
107
- audio_bytes = audio_in.read()
108
-
109
- yield row_id, {
110
- "id": row_id,
111
- "path": audio_filename,
112
- "audio": {"path": audio_extracted_path, "bytes": audio_bytes},
113
- "text": sentence,
114
- "licence": licenseR
115
- }
116
-