flexthink
commited on
Commit
•
af7bf66
1
Parent(s):
9159674
Remove ClassLabel encoding (to make the arrow dataset compatible with raw JSON).
Browse filesThis dataset was originally design for SpeechBrain, which has its own encoders
in the pipeline
- librig2p-nostress-space.py +4 -46
librig2p-nostress-space.py
CHANGED
@@ -13,50 +13,8 @@ Grapheme-to-Phoneme training, validation and test sets
|
|
13 |
|
14 |
_BASE_URL = "https://huggingface.co/datasets/flexthink/librig2p-nostress-space/resolve/main/dataset"
|
15 |
|
16 |
-
_HOMEPAGE_URL = "https://
|
17 |
|
18 |
-
_PHONEMES = [
|
19 |
-
"AA",
|
20 |
-
"AE",
|
21 |
-
"AH",
|
22 |
-
"AO",
|
23 |
-
"AW",
|
24 |
-
"AY",
|
25 |
-
"B",
|
26 |
-
"CH",
|
27 |
-
"D",
|
28 |
-
"DH",
|
29 |
-
"EH",
|
30 |
-
"ER",
|
31 |
-
"EY",
|
32 |
-
"F",
|
33 |
-
"G",
|
34 |
-
"HH",
|
35 |
-
"IH",
|
36 |
-
"IY",
|
37 |
-
"JH",
|
38 |
-
"K",
|
39 |
-
"L",
|
40 |
-
"M",
|
41 |
-
"N",
|
42 |
-
"NG",
|
43 |
-
"OW",
|
44 |
-
"OY",
|
45 |
-
"P",
|
46 |
-
"R",
|
47 |
-
"S",
|
48 |
-
"SH",
|
49 |
-
"T",
|
50 |
-
"TH",
|
51 |
-
"UH",
|
52 |
-
"UW",
|
53 |
-
"V",
|
54 |
-
"W",
|
55 |
-
"Y",
|
56 |
-
"Z",
|
57 |
-
"ZH",
|
58 |
-
" "
|
59 |
-
]
|
60 |
_ORIGINS = ["librispeech", "librispeech-lex", "wikipedia-homograph"]
|
61 |
_NA = "N/A"
|
62 |
_SPLIT_TYPES = ["train", "valid", "test"]
|
@@ -79,9 +37,9 @@ class GraphemeToPhoneme(datasets.GeneratorBasedBuilder):
|
|
79 |
{
|
80 |
"id": datasets.Value("string"),
|
81 |
"speaker_id": datasets.Value("string"),
|
82 |
-
"origin": datasets.
|
83 |
"char": datasets.Value("string"),
|
84 |
-
"phn": datasets.Sequence(datasets.
|
85 |
"homograph": datasets.Value("string"),
|
86 |
"homograph_wordid": datasets.Value("string"),
|
87 |
"homograph_char_start": datasets.Value("int32"),
|
@@ -114,7 +72,7 @@ class GraphemeToPhoneme(datasets.GeneratorBasedBuilder):
|
|
114 |
def _generate_examples(self, datapath, datatype):
|
115 |
with open(datapath, encoding="utf-8") as f:
|
116 |
data = json.load(f)
|
117 |
-
|
118 |
for sentence_counter, (item_id, item) in enumerate(data.items()):
|
119 |
resp = {
|
120 |
"id": item_id,
|
|
|
13 |
|
14 |
_BASE_URL = "https://huggingface.co/datasets/flexthink/librig2p-nostress-space/resolve/main/dataset"
|
15 |
|
16 |
+
_HOMEPAGE_URL = "https://huggingface.co/datasets/flexthink/librig2p-nostress-space"
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
_ORIGINS = ["librispeech", "librispeech-lex", "wikipedia-homograph"]
|
19 |
_NA = "N/A"
|
20 |
_SPLIT_TYPES = ["train", "valid", "test"]
|
|
|
37 |
{
|
38 |
"id": datasets.Value("string"),
|
39 |
"speaker_id": datasets.Value("string"),
|
40 |
+
"origin": datasets.Value("string"),
|
41 |
"char": datasets.Value("string"),
|
42 |
+
"phn": datasets.Sequence(datasets.Value("string")),
|
43 |
"homograph": datasets.Value("string"),
|
44 |
"homograph_wordid": datasets.Value("string"),
|
45 |
"homograph_char_start": datasets.Value("int32"),
|
|
|
72 |
def _generate_examples(self, datapath, datatype):
|
73 |
with open(datapath, encoding="utf-8") as f:
|
74 |
data = json.load(f)
|
75 |
+
breakpoint()
|
76 |
for sentence_counter, (item_id, item) in enumerate(data.items()):
|
77 |
resp = {
|
78 |
"id": item_id,
|