Update libriheavy.py
Browse files- libriheavy.py +8 -4
libriheavy.py
CHANGED
@@ -58,15 +58,15 @@ class Libriheavy(datasets.GeneratorBasedBuilder):
|
|
58 |
"text": datasets.Value("string"),
|
59 |
"word_segments": datasets.Sequence(
|
60 |
{
|
61 |
-
"start": datasets.Value("
|
62 |
-
"end": datasets.Value("
|
63 |
"word": datasets.Value("string"),
|
64 |
}
|
65 |
),
|
66 |
"phone_segments": datasets.Sequence(
|
67 |
{
|
68 |
-
"start": datasets.Value("
|
69 |
-
"end": datasets.Value("
|
70 |
"phone": datasets.Value("string"),
|
71 |
}
|
72 |
),
|
@@ -153,6 +153,7 @@ class Libriheavy(datasets.GeneratorBasedBuilder):
|
|
153 |
# skip the last utterance
|
154 |
if utterance_id == sorted(list(text.keys()))[-1]:
|
155 |
continue
|
|
|
156 |
result = {
|
157 |
"id": chunk["speaker_id"] + "_" + utterance_id,
|
158 |
"speaker_id": chunk["speaker_id"],
|
@@ -179,6 +180,9 @@ class Libriheavy(datasets.GeneratorBasedBuilder):
|
|
179 |
"word_segments": [
|
180 |
{"start": segment[0], "end": segment[1], "word": segment[2]} for segment in utterance["word_segments"]
|
181 |
],
|
|
|
|
|
|
|
182 |
"mel_spectrogram": npz[str(utterance_id)].item()["mel"][0][0],
|
183 |
}
|
184 |
yield chunk["speaker_id"] + "_" + utterance_id, result
|
|
|
58 |
"text": datasets.Value("string"),
|
59 |
"word_segments": datasets.Sequence(
|
60 |
{
|
61 |
+
"start": datasets.Value("float32"),
|
62 |
+
"end": datasets.Value("float32"),
|
63 |
"word": datasets.Value("string"),
|
64 |
}
|
65 |
),
|
66 |
"phone_segments": datasets.Sequence(
|
67 |
{
|
68 |
+
"start": datasets.Value("float32"),
|
69 |
+
"end": datasets.Value("float32"),
|
70 |
"phone": datasets.Value("string"),
|
71 |
}
|
72 |
),
|
|
|
153 |
# skip the last utterance
|
154 |
if utterance_id == sorted(list(text.keys()))[-1]:
|
155 |
continue
|
156 |
+
print(utterance["phone_segments"])
|
157 |
result = {
|
158 |
"id": chunk["speaker_id"] + "_" + utterance_id,
|
159 |
"speaker_id": chunk["speaker_id"],
|
|
|
180 |
"word_segments": [
|
181 |
{"start": segment[0], "end": segment[1], "word": segment[2]} for segment in utterance["word_segments"]
|
182 |
],
|
183 |
+
"phone_segments": [
|
184 |
+
{"start": segment[0], "end": segment[1], "word": segment[2]} for segment in utterance["phone_segments"]
|
185 |
+
],
|
186 |
"mel_spectrogram": npz[str(utterance_id)].item()["mel"][0][0],
|
187 |
}
|
188 |
yield chunk["speaker_id"] + "_" + utterance_id, result
|