justest KPatrick commited on
Commit
22804d3
·
0 Parent(s):

Duplicate from KPatrick/PaddleSpeechASR

Browse files

Co-authored-by: KPatrick <[email protected]>

Files changed (5) hide show
  1. .gitattributes +27 -0
  2. README.md +38 -0
  3. app.py +41 -0
  4. packages.txt +5 -0
  5. requirements.txt +9 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: PaddleSpeechASR
3
+ emoji: 🌖
4
+ colorFrom: green
5
+ colorTo: red
6
+ sdk: gradio
7
+ app_file: app.py
8
+ pinned: false
9
+ duplicated_from: KPatrick/PaddleSpeechASR
10
+ ---
11
+
12
+ # Configuration
13
+
14
+ `title`: _string_
15
+ Display title for the Space
16
+
17
+ `emoji`: _string_
18
+ Space emoji (emoji-only character allowed)
19
+
20
+ `colorFrom`: _string_
21
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
22
+
23
+ `colorTo`: _string_
24
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
25
+
26
+ `sdk`: _string_
27
+ Can be either `gradio` or `streamlit`
28
+
29
+ `sdk_version` : _string_
30
+ Only applicable for `streamlit` SDK.
31
+ See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
32
+
33
+ `app_file`: _string_
34
+ Path to your main application file (which contains either `gradio` or `streamlit` Python code).
35
+ Path is relative to the root of the repository.
36
+
37
+ `pinned`: _boolean_
38
+ Whether the Space stays on top of your list.
app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import librosa
3
+ import numpy as np
4
+ import paddlehub as hub
5
+ from paddlenlp import Taskflow
6
+ from paddlespeech.cli import ASRExecutor
7
+ import soundfile as sf
8
+
9
+ # asr_model = hub.Module(name='u2_conformer_aishell')
10
+ asr_executor = ASRExecutor()
11
+ text_correct_model = Taskflow("text_correction")
12
+ punc_model = hub.Module(name='auto_punc')
13
+
14
+
15
+ def speech_recognize(file):
16
+ data, sr = librosa.load(file)
17
+ if sr != 16000:
18
+ data = librosa.resample(data, sr, 16000)
19
+ sf.write(file, data, samplerate=16000)
20
+
21
+ print(f'[Audio Input] shape: {data.shape}, dtype: {data.dtype}, file: {file}')
22
+ # text = asr_model.speech_recognize(file, device='cpu')
23
+ text = asr_executor(file)
24
+ text_correction = text_correct_model(text)[0]
25
+ cor_text, errors = text_correction['target'], text_correction['errors']
26
+ print(f'[Text Correction] errors: {errors}')
27
+ punc_text = punc_model.add_puncs(cor_text, device='cpu')[0]
28
+
29
+ ret = ''
30
+ ret += f'[ASR] {text}\n'
31
+ ret += f'[COR] {cor_text}\n'
32
+ ret += f'[PUN] {punc_text}'
33
+ return ret
34
+
35
+
36
+ iface = gr.Interface(
37
+ fn=speech_recognize,
38
+ inputs=gr.inputs.Audio(source="microphone", type='filepath'),
39
+ outputs="text",
40
+ )
41
+ iface.launch()
packages.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ libsndfile1
2
+ sox
3
+ ffmpeg
4
+ libsm6
5
+ libxext6
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ paddlepaddle==2.2.0
2
+ paddleaudio==0.1.0a0
3
+ paddlespeech==0.1.0rc0
4
+ paddlehub
5
+ paddlenlp
6
+ pypinyin
7
+ SoundFile
8
+ librosa
9
+ opencv-python-headless