Spaces:
Sleeping
Sleeping
waysolong
commited on
Commit
·
1b38289
1
Parent(s):
d5cd191
中文预测
Browse files- .gitignore +0 -1
- app.py +358 -4
- deploy_model.py +40 -0
- examples.py +23 -0
- test_wavs/A2_0.wav +0 -0
- test_wavs/A2_1.wav +0 -0
- test_wavs/C7_639.wav +0 -0
.gitignore
CHANGED
@@ -5,7 +5,6 @@
|
|
5 |
[Mm]odel_speech/
|
6 |
|
7 |
__pycache__
|
8 |
-
*.wav
|
9 |
*.model_yaml
|
10 |
Test_Report_*
|
11 |
data/*
|
|
|
5 |
[Mm]odel_speech/
|
6 |
|
7 |
__pycache__
|
|
|
8 |
*.model_yaml
|
9 |
Test_Report_*
|
10 |
data/*
|
app.py
CHANGED
@@ -1,7 +1,361 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
-
|
4 |
-
return "Hello " + name + "!!"
|
5 |
|
6 |
-
|
7 |
-
iface.launch()
|
|
|
1 |
+
'''
|
2 |
+
Author: Wxl
|
3 |
+
Date: 2024-03-11 13:58:58
|
4 |
+
LastEditors: waysolong [email protected]
|
5 |
+
LastEditTime: 2024-03-11 16:22:34
|
6 |
+
FilePath: \apeech_rec\app.py
|
7 |
+
Description:
|
8 |
+
Copyright (c) 2024 by ${git_name} email: ${git_email}, All Rights Reserved.
|
9 |
+
'''
|
10 |
+
# References:
|
11 |
+
# https://gradio.app/docs/#dropdown
|
12 |
+
|
13 |
+
import logging
|
14 |
+
import os
|
15 |
+
import wave
|
16 |
+
import tempfile
|
17 |
+
import time
|
18 |
+
import urllib.request
|
19 |
+
import uuid
|
20 |
+
from datetime import datetime
|
21 |
+
|
22 |
import gradio as gr
|
23 |
+
import torch
|
24 |
+
|
25 |
+
from examples import examples
|
26 |
+
from deploy_model import decode, get_pretrained_model,language_to_models
|
27 |
+
|
28 |
+
languages = list(language_to_models.keys())
|
29 |
+
|
30 |
+
|
31 |
+
def convert_to_wav(in_filename: str) -> str:
|
32 |
+
"""Convert the input audio file to a wave file"""
|
33 |
+
out_filename = str(uuid.uuid4())
|
34 |
+
out_filename = f"{in_filename}.wav"
|
35 |
+
|
36 |
+
logging.info(f"Converting '{in_filename}' to '{out_filename}'")
|
37 |
+
_ = os.system(f"ffmpeg -hide_banner -i '{in_filename}' -ar 16000 '{out_filename}'")
|
38 |
+
|
39 |
+
return out_filename
|
40 |
+
|
41 |
+
|
42 |
+
def build_html_output(s: str, style: str = "result_item_success"):
|
43 |
+
return f"""
|
44 |
+
<div class='result'>
|
45 |
+
<div class='result_item {style}'>
|
46 |
+
{s}
|
47 |
+
</div>
|
48 |
+
</div>
|
49 |
+
"""
|
50 |
+
|
51 |
+
def process_url(
|
52 |
+
language: str,
|
53 |
+
repo_id: str,
|
54 |
+
decoding_method: str,
|
55 |
+
num_active_paths: int,
|
56 |
+
url: str,
|
57 |
+
):
|
58 |
+
logging.info(f"Processing URL: {url}")
|
59 |
+
with tempfile.NamedTemporaryFile() as f:
|
60 |
+
try:
|
61 |
+
urllib.request.urlretrieve(url, f.name)
|
62 |
+
|
63 |
+
return process(
|
64 |
+
in_filename=f.name,
|
65 |
+
language=language,
|
66 |
+
repo_id=repo_id,
|
67 |
+
decoding_method=decoding_method,
|
68 |
+
num_active_paths=num_active_paths,
|
69 |
+
)
|
70 |
+
except Exception as e:
|
71 |
+
logging.info(str(e))
|
72 |
+
return "", build_html_output(str(e), "result_item_error")
|
73 |
+
|
74 |
+
def process_uploaded_file(
|
75 |
+
language: str,
|
76 |
+
repo_id: str,
|
77 |
+
decoding_method: str,
|
78 |
+
num_active_paths: int,
|
79 |
+
in_filename: str,
|
80 |
+
):
|
81 |
+
if in_filename is None or in_filename == "":
|
82 |
+
return "", build_html_output(
|
83 |
+
"Please first upload a file and then click "
|
84 |
+
'the button "submit for recognition"',
|
85 |
+
"result_item_error",
|
86 |
+
)
|
87 |
+
|
88 |
+
logging.info(f"Processing uploaded file: {in_filename}")
|
89 |
+
try:
|
90 |
+
return process(
|
91 |
+
in_filename=in_filename,
|
92 |
+
language=language,
|
93 |
+
repo_id=repo_id,
|
94 |
+
decoding_method=decoding_method,
|
95 |
+
num_active_paths=num_active_paths,
|
96 |
+
)
|
97 |
+
except Exception as e:
|
98 |
+
logging.info(str(e))
|
99 |
+
return "", build_html_output(str(e), "result_item_error")
|
100 |
+
def process_microphone(
|
101 |
+
language: str,
|
102 |
+
repo_id: str,
|
103 |
+
decoding_method: str,
|
104 |
+
num_active_paths: int,
|
105 |
+
in_filename: str,
|
106 |
+
):
|
107 |
+
if in_filename is None or in_filename == "":
|
108 |
+
return "", build_html_output(
|
109 |
+
"Please first click 'Record from microphone', speak, "
|
110 |
+
"click 'Stop recording', and then "
|
111 |
+
"click the button 'submit for recognition'",
|
112 |
+
"result_item_error",
|
113 |
+
)
|
114 |
+
|
115 |
+
logging.info(f"Processing microphone: {in_filename}")
|
116 |
+
try:
|
117 |
+
return process(
|
118 |
+
in_filename=in_filename,
|
119 |
+
language=language,
|
120 |
+
repo_id=repo_id,
|
121 |
+
decoding_method=decoding_method,
|
122 |
+
num_active_paths=num_active_paths,
|
123 |
+
)
|
124 |
+
except Exception as e:
|
125 |
+
logging.info(str(e))
|
126 |
+
return "", build_html_output(str(e), "result_item_error")
|
127 |
+
|
128 |
+
@torch.no_grad()
|
129 |
+
def process(
|
130 |
+
language: str,
|
131 |
+
repo_id: str,
|
132 |
+
decoding_method: str,
|
133 |
+
num_active_paths: int,
|
134 |
+
in_filename: str,
|
135 |
+
):
|
136 |
+
logging.info(f"language: {language}")
|
137 |
+
logging.info(f"repo_id: {repo_id}")
|
138 |
+
logging.info(f"decoding_method: {decoding_method}")
|
139 |
+
logging.info(f"num_active_paths: {num_active_paths}")
|
140 |
+
logging.info(f"in_filename: {in_filename}")
|
141 |
+
filename = convert_to_wav(in_filename)
|
142 |
+
|
143 |
+
now = datetime.now()
|
144 |
+
date_time = now.strftime("%Y-%m-%d %H:%M:%S.%f")
|
145 |
+
logging.info(f"Started at {date_time}")
|
146 |
+
|
147 |
+
start = time.time()
|
148 |
+
|
149 |
+
recognizer = get_pretrained_model(
|
150 |
+
repo_id,
|
151 |
+
decoding_method=decoding_method,
|
152 |
+
num_active_paths=num_active_paths,
|
153 |
+
)
|
154 |
+
|
155 |
+
text = decode(recognizer, filename)
|
156 |
+
|
157 |
+
date_time = now.strftime("%Y-%m-%d %H:%M:%S.%f")
|
158 |
+
end = time.time()
|
159 |
+
|
160 |
+
wav = wave.open(filename,"rb") # 打开一个wav格式的声音文件流
|
161 |
+
num_frame = wav.getnframes() # 获取帧数
|
162 |
+
num_channel=wav.getnchannels() # 获取声道数
|
163 |
+
framerate=wav.getframerate() # 获取帧速率
|
164 |
+
num_sample_width=wav.getsampwidth() # 获取实例的比特宽度,即每一帧的字节数
|
165 |
+
str_data = wav.readframes(num_frame) # 读取全部的帧
|
166 |
+
wav.close() # 关闭流
|
167 |
+
duration = str_data / framerate
|
168 |
+
rtf = (end - start) / duration
|
169 |
+
|
170 |
+
logging.info(f"Finished at {date_time} s. Elapsed: {end - start: .3f} s")
|
171 |
+
|
172 |
+
info = f"""
|
173 |
+
Wave duration : {duration: .3f} s <br/>
|
174 |
+
Processing time: {end - start: .3f} s <br/>
|
175 |
+
RTF: {end - start: .3f}/{duration: .3f} = {rtf:.3f} <br/>
|
176 |
+
"""
|
177 |
+
if rtf > 1:
|
178 |
+
info += (
|
179 |
+
"<br/>We are loading the model for the first run. "
|
180 |
+
"Please run again to measure the real RTF.<br/>"
|
181 |
+
)
|
182 |
+
|
183 |
+
logging.info(info)
|
184 |
+
logging.info(f"\nrepo_id: {repo_id}\nhyp: {text}")
|
185 |
+
|
186 |
+
return text, build_html_output(info)
|
187 |
+
title = "# Automatic Speech Recognition with Next-gen Kaldi"
|
188 |
+
description = """
|
189 |
+
This space shows how to do automatic speech recognition with Next-gen Kaldi.
|
190 |
+
Please visit
|
191 |
+
<https://k2-fsa.github.io/sherpa/ncnn/wasm/hf-spaces.html>
|
192 |
+
for streaming speech recognition with **Next-gen Kaldi** using WebAssembly.
|
193 |
+
It is running on CPU within a docker container provided by Hugging Face.
|
194 |
+
See more information by visiting the following links:
|
195 |
+
- <https://github.com/k2-fsa/icefall>
|
196 |
+
- <https://github.com/k2-fsa/sherpa>
|
197 |
+
- <https://github.com/k2-fsa/k2>
|
198 |
+
- <https://github.com/lhotse-speech/lhotse>
|
199 |
+
If you want to deploy it locally, please see
|
200 |
+
<https://k2-fsa.github.io/sherpa/>
|
201 |
+
"""
|
202 |
+
|
203 |
+
# css style is copied from
|
204 |
+
# https://huggingface.co/spaces/alphacep/asr/blob/main/app.py#L113
|
205 |
+
css = """
|
206 |
+
.result {display:flex;flex-direction:column}
|
207 |
+
.result_item {padding:15px;margin-bottom:8px;border-radius:15px;width:100%}
|
208 |
+
.result_item_success {background-color:mediumaquamarine;color:white;align-self:start}
|
209 |
+
.result_item_error {background-color:#ff7070;color:white;align-self:start}
|
210 |
+
"""
|
211 |
+
def update_model_dropdown(language: str):
|
212 |
+
if language in language_to_models:
|
213 |
+
choices = language_to_models[language]
|
214 |
+
return gr.Dropdown(
|
215 |
+
choices=choices,
|
216 |
+
value=choices[0],
|
217 |
+
interactive=True,
|
218 |
+
)
|
219 |
+
|
220 |
+
raise ValueError(f"Unsupported language: {language}")
|
221 |
+
|
222 |
+
|
223 |
+
demo = gr.Blocks(css=css)
|
224 |
+
with demo:
|
225 |
+
gr.Markdown(title)
|
226 |
+
language_choices = list(language_to_models.keys())
|
227 |
+
|
228 |
+
language_radio = gr.Radio(
|
229 |
+
label="Language",
|
230 |
+
choices=language_choices,
|
231 |
+
value=language_choices[0],
|
232 |
+
)
|
233 |
+
model_dropdown = gr.Dropdown(
|
234 |
+
choices=language_to_models[language_choices[0]],
|
235 |
+
label="Select a model",
|
236 |
+
value=language_to_models[language_choices[0]][0],
|
237 |
+
)
|
238 |
+
|
239 |
+
language_radio.change(
|
240 |
+
update_model_dropdown,
|
241 |
+
inputs=language_radio,
|
242 |
+
outputs=model_dropdown,
|
243 |
+
)
|
244 |
+
|
245 |
+
decoding_method_radio = gr.Radio(
|
246 |
+
label="Decoding method",
|
247 |
+
choices=["greedy_search", "modified_beam_search"],
|
248 |
+
value="greedy_search",
|
249 |
+
)
|
250 |
+
num_active_paths_slider = gr.Slider(
|
251 |
+
minimum=1,
|
252 |
+
value=4,
|
253 |
+
step=1,
|
254 |
+
label="Number of active paths for modified_beam_search",
|
255 |
+
)
|
256 |
+
|
257 |
+
with gr.Tabs():
|
258 |
+
with gr.TabItem("Upload from disk"):
|
259 |
+
uploaded_file = gr.Audio(
|
260 |
+
sources=["upload"], # Choose between "microphone", "upload"
|
261 |
+
type="filepath",
|
262 |
+
label="Upload from disk",
|
263 |
+
)
|
264 |
+
upload_button = gr.Button("Submit for recognition")
|
265 |
+
uploaded_output = gr.Textbox(label="Recognized speech from uploaded file")
|
266 |
+
uploaded_html_info = gr.HTML(label="Info")
|
267 |
+
|
268 |
+
gr.Examples(
|
269 |
+
examples=examples,
|
270 |
+
inputs=[
|
271 |
+
language_radio,
|
272 |
+
model_dropdown,
|
273 |
+
decoding_method_radio,
|
274 |
+
num_active_paths_slider,
|
275 |
+
uploaded_file,
|
276 |
+
],
|
277 |
+
outputs=[uploaded_output, uploaded_html_info],
|
278 |
+
fn=process_uploaded_file,
|
279 |
+
)
|
280 |
+
with gr.TabItem("Record from microphone"):
|
281 |
+
microphone = gr.Audio(
|
282 |
+
sources=["microphone"], # Choose between "microphone", "upload"
|
283 |
+
type="filepath",
|
284 |
+
label="Record from microphone",
|
285 |
+
)
|
286 |
+
|
287 |
+
record_button = gr.Button("Submit for recognition")
|
288 |
+
recorded_output = gr.Textbox(label="Recognized speech from recordings")
|
289 |
+
recorded_html_info = gr.HTML(label="Info")
|
290 |
+
|
291 |
+
gr.Examples(
|
292 |
+
examples=examples,
|
293 |
+
inputs=[
|
294 |
+
language_radio,
|
295 |
+
model_dropdown,
|
296 |
+
decoding_method_radio,
|
297 |
+
num_active_paths_slider,
|
298 |
+
microphone,
|
299 |
+
],
|
300 |
+
outputs=[recorded_output, recorded_html_info],
|
301 |
+
fn=process_microphone,
|
302 |
+
)
|
303 |
+
with gr.TabItem("From URL"):
|
304 |
+
url_textbox = gr.Textbox(
|
305 |
+
max_lines=1,
|
306 |
+
placeholder="URL to an audio file",
|
307 |
+
label="URL",
|
308 |
+
interactive=True,
|
309 |
+
)
|
310 |
+
|
311 |
+
url_button = gr.Button("Submit for recognition")
|
312 |
+
url_output = gr.Textbox(label="Recognized speech from URL")
|
313 |
+
url_html_info = gr.HTML(label="Info")
|
314 |
+
upload_button.click(
|
315 |
+
process_uploaded_file,
|
316 |
+
inputs=[
|
317 |
+
language_radio,
|
318 |
+
model_dropdown,
|
319 |
+
decoding_method_radio,
|
320 |
+
num_active_paths_slider,
|
321 |
+
uploaded_file,
|
322 |
+
],
|
323 |
+
outputs=[uploaded_output, uploaded_html_info],
|
324 |
+
)
|
325 |
+
record_button.click(
|
326 |
+
process_microphone,
|
327 |
+
inputs=[
|
328 |
+
language_radio,
|
329 |
+
model_dropdown,
|
330 |
+
decoding_method_radio,
|
331 |
+
num_active_paths_slider,
|
332 |
+
microphone,
|
333 |
+
],
|
334 |
+
outputs=[recorded_output, recorded_html_info],
|
335 |
+
)
|
336 |
+
url_button.click(
|
337 |
+
process_url,
|
338 |
+
inputs=[
|
339 |
+
language_radio,
|
340 |
+
model_dropdown,
|
341 |
+
decoding_method_radio,
|
342 |
+
num_active_paths_slider,
|
343 |
+
url_textbox,
|
344 |
+
],
|
345 |
+
outputs=[url_output, url_html_info],
|
346 |
+
)
|
347 |
+
|
348 |
+
gr.Markdown(description)
|
349 |
+
torch.set_num_threads(1)
|
350 |
+
torch.set_num_interop_threads(1)
|
351 |
+
|
352 |
+
torch._C._jit_set_profiling_executor(False)
|
353 |
+
torch._C._jit_set_profiling_mode(False)
|
354 |
+
torch._C._set_graph_executor_optimize(False)
|
355 |
+
|
356 |
+
if __name__ == "__main__":
|
357 |
+
formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
|
358 |
|
359 |
+
logging.basicConfig(format=formatter, level=logging.INFO)
|
|
|
360 |
|
361 |
+
demo.launch()
|
|
deploy_model.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
from speech_model import ModelSpeech
|
4 |
+
from model_zoo.speech_model.keras_backend import SpeechModel251BN
|
5 |
+
from speech_features import Spectrogram
|
6 |
+
from language_model3 import ModelLanguage
|
7 |
+
|
8 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
9 |
+
|
10 |
+
AUDIO_LENGTH = 1600
|
11 |
+
AUDIO_FEATURE_LENGTH = 200
|
12 |
+
CHANNELS = 1
|
13 |
+
# 默认输出的拼音的表示大小是1428,即1427个拼音+1个空白块
|
14 |
+
OUTPUT_SIZE = 1428
|
15 |
+
sm251bn = SpeechModel251BN(
|
16 |
+
input_shape=(AUDIO_LENGTH, AUDIO_FEATURE_LENGTH, CHANNELS),
|
17 |
+
output_size=OUTPUT_SIZE
|
18 |
+
)
|
19 |
+
def get_pretrained_model():
|
20 |
+
|
21 |
+
|
22 |
+
feat = Spectrogram()
|
23 |
+
ms = ModelSpeech(sm251bn, feat, max_label_length=64)
|
24 |
+
|
25 |
+
ms.load_model('save_models/SpeechModel251bn/' + sm251bn.get_model_name() + '.model.h5')
|
26 |
+
return ms
|
27 |
+
def decode(model,filename):
|
28 |
+
|
29 |
+
res = model.recognize_speech_from_file(filename)
|
30 |
+
print('*[提示] 声学模型语音识别结果:\n', res)
|
31 |
+
return res
|
32 |
+
def not_use():
|
33 |
+
ml = ModelLanguage('model_language')
|
34 |
+
ml.load_model()
|
35 |
+
str_pinyin = res
|
36 |
+
res = ml.pinyin_to_text(str_pinyin)
|
37 |
+
print('语音识别最终结果:\n', res)
|
38 |
+
chinese_models = {"chinese":'save_models/SpeechModel251bn/' + sm251bn.get_model_name() + '.model.h5'}
|
39 |
+
language_to_models = {
|
40 |
+
"Chinese": list(chinese_models.keys())}
|
examples.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
examples = [
|
2 |
+
[
|
3 |
+
"Chinese",
|
4 |
+
"csukuangfj/sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20",
|
5 |
+
"greedy_search",
|
6 |
+
4,
|
7 |
+
"./test_wavs/A2_0.wav",
|
8 |
+
],
|
9 |
+
[
|
10 |
+
"Chinese",
|
11 |
+
"csukuangfj/sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20",
|
12 |
+
"greedy_search",
|
13 |
+
4,
|
14 |
+
"./test_wavs/A2_1.wav",
|
15 |
+
],
|
16 |
+
[
|
17 |
+
"Chinese",
|
18 |
+
"csukuangfj/sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20",
|
19 |
+
"greedy_search",
|
20 |
+
4,
|
21 |
+
"./test_wavs/C7_639.wav",
|
22 |
+
]
|
23 |
+
]
|
test_wavs/A2_0.wav
ADDED
Binary file (314 kB). View file
|
|
test_wavs/A2_1.wav
ADDED
Binary file (326 kB). View file
|
|
test_wavs/C7_639.wav
ADDED
Binary file (304 kB). View file
|
|