qgyd2021 commited on
Commit
a158d14
·
1 Parent(s): 7860ecd

first commit

Browse files
.gitignore ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ .git/
3
+ .idea/
4
+
5
+ **/flagged/
6
+ **/log/
7
+ **/logs/
8
+ **/__pycache__/
9
+
10
+ data/
11
+ docs/
12
+ dotenv/
13
+ trained_models/
14
+ temp/
15
+
16
+ **/*.xlsx
examples/evaluation/evaluation.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import argparse
4
+ import base64
5
+ from datetime import datetime
6
+ import json
7
+ import os
8
+ from pathlib import Path
9
+ import sys
10
+
11
+ pwd = os.path.abspath(os.path.dirname(__file__))
12
+ sys.path.append(os.path.join(pwd, "../../"))
13
+
14
+ import pandas as pd
15
+ import requests
16
+ from tqdm import tqdm
17
+
18
+ from project_settings import project_path
19
+
20
+
21
+ def get_args():
22
+ parser = argparse.ArgumentParser()
23
+ parser.add_argument("--host", default="127.0.0.1", type=str)
24
+ parser.add_argument("--port", default=2080, type=int)
25
+
26
+ parser.add_argument(
27
+ "--wav_dir",
28
+ default=(project_path / "data/wav/65").as_posix(),
29
+ type=str
30
+ )
31
+ parser.add_argument("--country_code", default=65, type=int)
32
+
33
+ args = parser.parse_args()
34
+ return args
35
+
36
+
37
+ # 1
38
+ label_map_1 = {
39
+ "bell": "bell",
40
+ "caller_id_blocked": "caller_id_blocked",
41
+ "can_not_be_completed": "can_not_be_completed",
42
+ "disconnected": "disconnected",
43
+ "disconnected_or_out_of_service_or_invalid_number": "disconnected_or_out_of_service_or_invalid_number",
44
+ "mute": "mute",
45
+ "not_accept_calls": "not_accept_calls",
46
+ "not_available": "not_available",
47
+ "out_of_service": "out_of_service",
48
+ "restricted_or_unavailable": "restricted_or_unavailable",
49
+ }
50
+
51
+ # 52
52
+ label_map_52 = {
53
+ "bell": "bell",
54
+ "mute": "mute",
55
+
56
+ "music": "music",
57
+ "voice": "music",
58
+
59
+ "arrears": "out_of_service",
60
+
61
+ "busy": "busy",
62
+
63
+ "invalid_number": "invalid_number",
64
+
65
+ "not_available": "not_available",
66
+
67
+ "not_available_or_out_of_service": "not_available_or_out_of_service",
68
+ "number_paused": "number_paused",
69
+ "out_of_service": "out_of_service",
70
+ "restricted": "out_of_service",
71
+
72
+ "power_off_or_out_of_service": "power_off_or_out_of_service",
73
+ "voicemail": "voicemail",
74
+ "voicemail_is_full": "voicemail_is_full",
75
+ }
76
+
77
+ # 63
78
+ label_map_63 = {
79
+ "bell": "bell",
80
+ "mute": "mute",
81
+
82
+ "music": "music",
83
+ "voice": "music",
84
+ "noise": "music",
85
+
86
+ "busy": "busy",
87
+ "call_forwarding": "call_forwarding",
88
+ "can_not_be_completed": "can_not_be_completed",
89
+ "invalid_number": "invalid_number",
90
+ "no_route_found": "no_route_found",
91
+ "not_reachable": "not_reachable",
92
+ "out_of_service": "out_of_service",
93
+ "unattended": "unattended",
94
+ "unattended_or_out_of_coverage_area": "unattended_or_out_of_coverage_area",
95
+
96
+ }
97
+
98
+ # 65
99
+ label_map_65 = {
100
+ "bell": "bell",
101
+ "mute": "mute",
102
+
103
+ "music": "music",
104
+ "voice": "music",
105
+ "noise": "music",
106
+
107
+ "busy": "busy",
108
+ "can_not_be_completed": "can_not_be_completed",
109
+ "invalid_number": "invalid_number",
110
+ "not_available": "not_available",
111
+ "not_responding": "not_responding",
112
+ "out_of_service": "out_of_service",
113
+ }
114
+
115
+ # 66
116
+ label_map_66 = {
117
+ "bell": "bell",
118
+ "mute": "mute",
119
+
120
+ "music": "music",
121
+ "voice": "music",
122
+ "noise": "music",
123
+
124
+ "invalid_number": "invalid_number",
125
+ "no_answer": "no_answer",
126
+
127
+ "not_reachable": "not_reachable",
128
+ "out_of_service": "out_of_service",
129
+ "voicemail": "voicemail",
130
+
131
+ }
132
+
133
+ # 91
134
+ label_map_91 = {
135
+ "bell": "bell",
136
+ "mute": "mute",
137
+
138
+ "music": "music",
139
+ "voice": "music",
140
+ "noise": "music",
141
+
142
+ "noise_music": "music",
143
+ "noise_mute": "music",
144
+ "noise_voice": "music",
145
+
146
+ "busy": "busy",
147
+
148
+ "can_not_connect": "not_reachable",
149
+ "forwarded": "forwarded",
150
+ "line_busy": "line_busy",
151
+ "not_available": "not_available",
152
+ "not_reachable": "not_reachable",
153
+ "invalid_number": "invalid_number",
154
+ "out_of_service": "out_of_service",
155
+ "out_of_service_area": "out_of_service_area",
156
+ "power_off": "power_off",
157
+ "unknown": "music",
158
+
159
+ }
160
+
161
+ # 234
162
+ label_map_234 = {
163
+ "bell": "bell",
164
+ "mute": "mute",
165
+
166
+ "music": "music",
167
+ "voice": "music",
168
+ "other": "music",
169
+
170
+ "busy": "busy",
171
+ "on_another_call": "busy",
172
+
173
+ "can_not_be_reached": "power_off",
174
+ "invalid_number": "invalid_number",
175
+ "not_available": "not_available",
176
+ "power_off": "power_off",
177
+ "power_off_or_out_of_service": "power_off",
178
+ "not_reachable": "power_off",
179
+
180
+ }
181
+
182
+ # 254
183
+ label_map_254 = {
184
+ "bell": "bell",
185
+ "mute": "mute",
186
+
187
+ "music": "music",
188
+ "voice": "music",
189
+
190
+ "busy": "busy",
191
+ "invalid_number": "invalid_number",
192
+ "not_available": "not_available",
193
+ "not_reachable": "power_off",
194
+ "out_of_service": "out_of_service",
195
+ "power_off": "power_off",
196
+
197
+ }
198
+
199
+ # 255
200
+ label_map_255 = {
201
+ "bell": "bell",
202
+ "mute": "mute",
203
+
204
+ "busy": "busy",
205
+ "can_not_be_connected": "can_not_be_connected",
206
+ "invalid_number": "invalid_number",
207
+ "no_answer": "no_answer",
208
+
209
+
210
+ "not_available": "not_available",
211
+ "not_reachable": "not_available",
212
+
213
+ "restricted": "restricted",
214
+ "switched_off_or_not_available": "switched_off_or_not_available",
215
+ }
216
+
217
+ # 886
218
+ label_map_886 = {
219
+ "bell": "bell",
220
+ "mute": "mute",
221
+
222
+ "music": "music",
223
+ "voice": "music",
224
+ "卡线": "music",
225
+
226
+ "busy": "busy",
227
+ "invalid_number": "invalid_number",
228
+ "not_available": "not_available",
229
+ "number_paused": "number_paused",
230
+
231
+ "power_off": "power_off",
232
+ "voicemail": "voicemail",
233
+ }
234
+
235
+ area_code2label_map = {
236
+ 1: label_map_1,
237
+ 52: label_map_52,
238
+ 63: label_map_63,
239
+ 65: label_map_65,
240
+ 66: label_map_66,
241
+ 91: label_map_91,
242
+ 234: label_map_234,
243
+ 254: label_map_254,
244
+ 255: label_map_255,
245
+ 886: label_map_886,
246
+
247
+ }
248
+
249
+
250
+ def main():
251
+ args = get_args()
252
+
253
+ wav_dir = Path(args.wav_dir)
254
+
255
+ url = "http://{host}:{port}/call_status".format(
256
+ host=args.host,
257
+ port=args.port,
258
+ )
259
+ headers = {
260
+ "Content-Type": "application/json"
261
+ }
262
+
263
+ label_map = area_code2label_map[args.country_code]
264
+
265
+ result = list()
266
+ for filename in tqdm(wav_dir.glob("*/*.wav")):
267
+ label = filename.parts[-2]
268
+ if label not in label_map.keys():
269
+ continue
270
+ label = label_map[label]
271
+
272
+ with open(filename, "rb") as f:
273
+ data = f.read()
274
+
275
+ base64string = base64.b64encode(data).decode("utf-8")
276
+
277
+ data = {
278
+ "country": args.country_code,
279
+ "record": base64string
280
+ }
281
+
282
+ resp = requests.post(url, headers=headers, data=json.dumps(data))
283
+ if resp.status_code != 200:
284
+ print("request failed, status_code: {}, text: {}.".format(resp.status_code, resp.text))
285
+ continue
286
+ js = resp.json()
287
+ predict = js["result"]["label_id"]
288
+ result.append({
289
+ "filename": filename,
290
+ "target": label,
291
+ "predict": predict,
292
+ "correct": 1 if label == predict else 0,
293
+ })
294
+
295
+ version = datetime.now().strftime("%Y%m%d_%H%M")
296
+ result = pd.DataFrame(result)
297
+ result.to_excel("evaluation_{}_{}.xlsx".format(args.country_code, version), index=False, encoding="utf_8_sig")
298
+ return
299
+
300
+
301
+ if __name__ == "__main__":
302
+ main()
examples/make_templates/step_1_wav_classification.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import argparse
4
+ from collections import Counter, defaultdict
5
+ from glob import glob
6
+ from itertools import chain
7
+ import json
8
+ import os
9
+ from pathlib import Path
10
+ import sys
11
+
12
+ pwd = os.path.abspath(os.path.dirname(__file__))
13
+ sys.path.append(os.path.join(pwd, '../../'))
14
+
15
+ import numpy as np
16
+ from scipy.io import wavfile
17
+ import torch
18
+ import torch.nn as nn
19
+ import shutil
20
+ from tqdm import tqdm
21
+
22
+ from project_settings import project_path
23
+ from toolbox.cv2.misc import show_image
24
+ from toolbox.python_speech_features.misc import wave2spectrum_image
25
+
26
+
27
+ area_code = 55
28
+
29
+
30
+ def get_args():
31
+ parser = argparse.ArgumentParser()
32
+
33
+ parser.add_argument(
34
+ "--model_dir",
35
+ default=(project_path / "trained_models/early_media_20220721").as_posix(),
36
+ type=str
37
+ )
38
+ parser.add_argument(
39
+ "--wav_dir",
40
+ default=(project_path / "data/early_media/{area_code}/wav".format(area_code=area_code)).as_posix(),
41
+ type=str
42
+ )
43
+
44
+ args = parser.parse_args()
45
+ return args
46
+
47
+
48
+ def demo1():
49
+ args = get_args()
50
+
51
+ model_dir = Path(args.model_dir)
52
+ wav_dir = Path(args.wav_dir)
53
+
54
+ # model
55
+ seq2seq_encoder = torch.jit.load(model_dir / "seq2seq_encoder.pth")
56
+ seq2vec_encoder = torch.jit.load(model_dir / "seq2vec_encoder.pth")
57
+ classification_layer = torch.jit.load(model_dir / "classification_layer.pth")
58
+ with open(model_dir / "index2token.json", "r", encoding="utf-8") as f:
59
+ index2token = json.load(f)
60
+
61
+ # 读取文件
62
+ for filename in tqdm(wav_dir.glob("*.wav")):
63
+ filename: Path = filename
64
+ # path, fn = os.path.split(filename)
65
+ try:
66
+ sample_rate, wave = wavfile.read(filename)
67
+ except UnboundLocalError:
68
+ os.remove(filename)
69
+ continue
70
+ if sample_rate != 8000:
71
+ raise AssertionError
72
+
73
+ if len(wave) < 1.0 * sample_rate:
74
+ os.remove(filename.as_posix())
75
+ continue
76
+
77
+ max_wave_value = 32768.0
78
+ wave = wave / max_wave_value
79
+
80
+ array = wave2spectrum_image(
81
+ wave,
82
+ sample_rate=8000,
83
+ xmax=10,
84
+ xmin=-50,
85
+ winlen=0.025,
86
+ winstep=0.01,
87
+ nfft=512,
88
+ n_low_freq=100,
89
+ )
90
+ # show_image(array.T)
91
+
92
+ array = np.array([array], dtype=np.float32)
93
+ array = torch.tensor(array, dtype=torch.float32)
94
+ mask: torch.IntTensor = torch.ones(size=array.shape[:-1], device=array.device, dtype=torch.int32)
95
+
96
+ array = seq2seq_encoder.forward(array, mask)
97
+
98
+ length = array.shape[-2]
99
+
100
+ m_win_size = 50
101
+ m_win_step = 25
102
+
103
+ labels = list()
104
+ idx = 0
105
+ while True:
106
+ begin = idx * m_win_step
107
+ end = begin + m_win_size
108
+ if end > length:
109
+ break
110
+ window = array[:, begin:end, :]
111
+
112
+ window = seq2vec_encoder.forward(window)
113
+
114
+ logits = classification_layer(window)
115
+ probs = torch.nn.functional.softmax(logits, dim=-1)
116
+ label_idx = probs.argmax(dim=-1).item()
117
+
118
+ label_str = index2token[str(label_idx)]
119
+ labels.append(label_str)
120
+ idx += 1
121
+
122
+ counter = Counter(labels)
123
+ total = sum(counter.values())
124
+
125
+ rate_dict = defaultdict(float)
126
+ for k, v in counter.items():
127
+ rate_dict[k] = v / total
128
+
129
+ if rate_dict["voice"] > 0.1:
130
+ tgt = filename.parent / "voice"
131
+ elif rate_dict["music"] > 0.1:
132
+ tgt = filename.parent / "music"
133
+ elif rate_dict["bell"] > 0.1:
134
+ tgt = filename.parent / "bell"
135
+ else:
136
+ tgt = filename.parent / "mute"
137
+
138
+ tgt.mkdir(exist_ok=True)
139
+ try:
140
+ shutil.move(filename.as_posix(), tgt.as_posix())
141
+ except shutil.Error:
142
+ fn = tgt / "{}_2.wav".format(filename.stem)
143
+ shutil.move(filename.as_posix(), fn)
144
+ return
145
+
146
+
147
+ if __name__ == '__main__':
148
+ demo1()
examples/make_templates/step_2_wav_split.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ ASR:
5
+ https://cloud.tencent.com/product/asr#mod2
6
+
7
+ https://huggingface.co/spaces/sanchit-gandhi/whisper-large-v2
8
+ https://huggingface.co/spaces/hf-audio/whisper-large-v3
9
+ """
10
+ import argparse
11
+ from pathlib import Path
12
+
13
+ import numpy as np
14
+ from python_speech_features import sigproc
15
+ from scipy.io import wavfile
16
+ from tqdm import tqdm
17
+
18
+ from project_settings import project_path
19
+
20
+
21
+ area_code = 91
22
+
23
+
24
+ def get_args():
25
+ parser = argparse.ArgumentParser()
26
+ parser.add_argument(
27
+ "--filename",
28
+ default=(project_path / "data/wav/91/91_wav/voice/91_1699977085057.wav").as_posix(),
29
+ type=str
30
+ )
31
+ parser.add_argument(
32
+ "--templates_segmented_dir",
33
+ default=(project_path / "data/early_media_template/{area_code}/segmented".format(area_code=area_code)).as_posix(),
34
+ type=str
35
+ )
36
+ parser.add_argument("--win_size", default=2.0, type=float)
37
+ parser.add_argument("--win_len", default=0.5, type=float)
38
+ args = parser.parse_args()
39
+ return args
40
+
41
+
42
+ def main():
43
+ args = get_args()
44
+
45
+ filename = Path(args.filename)
46
+ templates_segmented_dir = Path(args.templates_segmented_dir)
47
+
48
+ templates_segmented_dir.mkdir(parents=True, exist_ok=True)
49
+
50
+ sample_rate, signal = wavfile.read(filename)
51
+
52
+ frames = sigproc.framesig(
53
+ sig=signal,
54
+ frame_len=args.win_size * sample_rate,
55
+ frame_step=args.win_len * sample_rate,
56
+ # winfunc=np.hamming
57
+ )
58
+
59
+ for j, frame in enumerate(frames):
60
+ to_filename = templates_segmented_dir / "{}_{}.wav".format(filename.stem, j)
61
+
62
+ frame = np.array(frame, dtype=np.int16)
63
+ wavfile.write(
64
+ filename=to_filename,
65
+ rate=sample_rate,
66
+ data=frame
67
+ )
68
+
69
+ return
70
+
71
+
72
+ if __name__ == '__main__':
73
+ main()
examples/make_templates/step_3_move_by_template.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import argparse
4
+ from collections import defaultdict
5
+ from glob import glob
6
+ import os
7
+ from pathlib import Path
8
+ import shutil
9
+ from typing import Dict, List, Callable
10
+
11
+ import cv2 as cv
12
+ import numpy as np
13
+ from python_speech_features import sigproc
14
+ from scipy.io import wavfile
15
+ from tqdm import tqdm
16
+
17
+ from project_settings import project_path
18
+ from toolbox.python_speech_features.misc import wave2spectrum_image
19
+
20
+
21
+ area_code = 55
22
+
23
+
24
+ def get_args():
25
+ parser = argparse.ArgumentParser()
26
+ parser.add_argument(
27
+ "--templates_dir",
28
+ default=(project_path / "data/early_media/{area_code}/templates".format(
29
+ area_code=area_code
30
+ )).as_posix(),
31
+ type=str
32
+ )
33
+ parser.add_argument(
34
+ "--wav_dir",
35
+ default=(project_path / "data/early_media/{area_code}/wav".format(area_code=area_code)).as_posix(),
36
+ type=str
37
+ )
38
+ args = parser.parse_args()
39
+ return args
40
+
41
+
42
+ class AudioTemplateMatch(object):
43
+ def __init__(self,
44
+ wave_to_spectrum: Callable,
45
+ sample_rate: int = 8000,
46
+ template_crop: float = 0.1,
47
+ threshold: float = 0.01,
48
+ ):
49
+ """
50
+ :param wave_to_spectrum: Callable, 传入 wave, np.ndarray, shape=(n,), 输出 spectrum, np.ndarray, shape=(time_steps, n_dim)
51
+ :param sample_rate:
52
+ :param template_crop:
53
+ :param threshold:
54
+ """
55
+ self.wave_to_spectrum = wave_to_spectrum
56
+ self.sample_rate = sample_rate
57
+ self.template_crop = template_crop
58
+ self.threshold = threshold
59
+
60
+ self.dim = 100
61
+
62
+ self.label2templates: Dict[str, List[Dict[str, np.ndarray]]] = None
63
+ self.max_template_width: int = None
64
+
65
+ def load_template(self, path: str):
66
+ filename_pattern = os.path.join(path, '*/*.wav')
67
+ filename_list = glob(filename_pattern)
68
+ label2templates = defaultdict(list)
69
+ max_template_width = 0
70
+
71
+ print('loading templates.')
72
+ for filename in tqdm(filename_list):
73
+ path, fn = os.path.split(filename)
74
+ root_path, label = os.path.split(path)
75
+
76
+ # wave, sample_rate = librosa.load(filename, sr=self.sample_rate)
77
+ sample_rate, wave = wavfile.read(filename)
78
+ if sample_rate != self.sample_rate:
79
+ raise AssertionError('expected sample rate: {}, instead of: {}'.format(self.sample_rate, sample_rate))
80
+ if wave.dtype != np.int16:
81
+ raise AssertionError('expected wave dtype np.int16, instead of: {}'.format(wave.dtype))
82
+
83
+ if wave.shape[0] < self.sample_rate:
84
+ raise AssertionError('wave.shape: {}'.format(wave.shape))
85
+
86
+ # dtype np.int16
87
+ max_wave_value = 32768.0
88
+ wave = wave / max_wave_value
89
+
90
+ template = self.wave_to_spectrum(wave)
91
+ template = template[:, :self.dim]
92
+
93
+ template_width, _ = template.shape
94
+ if template_width > max_template_width:
95
+ max_template_width = template_width
96
+ label2templates[label].append({
97
+ 'filename': filename,
98
+ 'template': template,
99
+ })
100
+
101
+ self.label2templates = label2templates
102
+ self.max_template_width = max_template_width
103
+ return label2templates, max_template_width
104
+
105
+ def template_match_by_wave(self, wave: np.ndarray):
106
+ # dtype np.int16
107
+ max_wave_value = 32768.0
108
+ wave = wave / max_wave_value
109
+
110
+ spectrum = self.wave_to_spectrum(wave)
111
+
112
+ spectrum = spectrum[:, :self.dim]
113
+ result = self.template_match_by_spectrum(spectrum)
114
+ return result
115
+
116
+ def template_match_by_spectrum(self, spectrum: np.ndarray):
117
+ result = self._shadow_template_match(spectrum)
118
+ return result
119
+
120
+ def _shadow_template_match(self, spectrum):
121
+ matches = list()
122
+
123
+ if spectrum.shape[0] < self.max_template_width:
124
+ return matches
125
+
126
+ for label, templates in self.label2templates.items():
127
+ for templ in templates:
128
+ filename = templ['filename']
129
+ template = templ['template']
130
+
131
+ tw, _ = template.shape[:2]
132
+ c = int(tw * self.template_crop)
133
+ template = template[c: -c]
134
+
135
+ tw, th = template.shape[:2]
136
+
137
+ shadow_m = 3
138
+ shadow_spect = spectrum[:, :shadow_m]
139
+ shadow_templ = template[:, :shadow_m]
140
+
141
+ sqdiff_normed = cv.matchTemplate(image=shadow_spect, templ=shadow_templ, method=cv.TM_SQDIFF_NORMED)
142
+ min_val, _, min_loc, _ = cv.minMaxLoc(sqdiff_normed)
143
+ # print(min_val, min_loc)
144
+ if min_val > self.threshold:
145
+ continue
146
+
147
+ # master
148
+ _, x = min_loc
149
+ match_spectrum = spectrum[x:x+tw, :]
150
+ sqdiff_normed = cv.matchTemplate(image=match_spectrum, templ=template, method=cv.TM_SQDIFF_NORMED)
151
+
152
+ min_val, _, min_loc, _ = cv.minMaxLoc(sqdiff_normed)
153
+ # print(min_val, min_loc)
154
+ if min_val > self.threshold:
155
+ continue
156
+
157
+ matches.append({
158
+ 'begin': x,
159
+ 'width': tw,
160
+ 'label': label,
161
+ 'filename': filename,
162
+ 'min_val': min_val,
163
+ })
164
+ return matches
165
+
166
+
167
+ def main():
168
+ args = get_args()
169
+
170
+ templates_dir = Path(args.templates_dir)
171
+ wav_dir = Path(args.wav_dir)
172
+
173
+ def wave_to_spectrum(wave: np.ndarray):
174
+ spectrum = wave2spectrum_image(wave=wave, sample_rate=8000)
175
+ spectrum = np.array(spectrum, dtype=np.float32)
176
+ spectrum /= 255
177
+ return spectrum
178
+
179
+ audio_template_match = AudioTemplateMatch(
180
+ wave_to_spectrum=wave_to_spectrum,
181
+ sample_rate=8000,
182
+ template_crop=0.1,
183
+ threshold=0.007,
184
+ )
185
+ audio_template_match.load_template(path=args.templates_dir)
186
+
187
+ for filename in tqdm(wav_dir.glob("voice/*.wav")):
188
+ filename: Path = filename
189
+
190
+ sample_rate, signal = wavfile.read(filename)
191
+
192
+ if sample_rate != 8000:
193
+ print('sample rate not 8000, filename: {}'.format(filename))
194
+
195
+ matches = audio_template_match.template_match_by_wave(wave=signal)
196
+
197
+ if len(matches) == 0:
198
+ continue
199
+
200
+ labels = [match['label'] for match in matches]
201
+ labels_ = [label for label in labels if label not in ("music",)]
202
+
203
+ if len(set(labels_)) > 1:
204
+ print("超过两个模板类别被匹配,请检测是否匹配正确。")
205
+ print(filename)
206
+ for match in matches:
207
+ print(match)
208
+ continue
209
+
210
+ if len(labels_) == 0:
211
+ label = "music"
212
+ else:
213
+ label = labels_[0]
214
+
215
+ if filename.parts[-2] != label:
216
+ tgt = filename.parent.parent / label
217
+ os.makedirs(tgt, exist_ok=True)
218
+ try:
219
+ shutil.move(filename.as_posix(), tgt.as_posix())
220
+ except shutil.Error:
221
+ print(filename)
222
+ print(tgt)
223
+ continue
224
+
225
+ return
226
+
227
+
228
+ if __name__ == '__main__':
229
+ main()
examples/make_templates/step_4_batch_wav_split.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import argparse
4
+ from pathlib import Path
5
+
6
+ import numpy as np
7
+ from python_speech_features import sigproc
8
+ from scipy.io import wavfile
9
+ from tqdm import tqdm
10
+
11
+ from project_settings import project_path
12
+
13
+
14
+ area_code = 1
15
+
16
+
17
+ def get_args():
18
+ parser = argparse.ArgumentParser()
19
+ parser.add_argument(
20
+ "--segmented_dir",
21
+ default=(project_path / "data/2s_wav/{area_code}".format(area_code=area_code)).as_posix(),
22
+ type=str
23
+ )
24
+ parser.add_argument(
25
+ "--templates_dir",
26
+ default=(project_path / "data/early_media_template/{area_code}".format(area_code=area_code)).as_posix(),
27
+ type=str
28
+ )
29
+ parser.add_argument(
30
+ "--wav_dir",
31
+ default=(project_path / "data/wav/{area_code}".format(area_code=area_code)).as_posix(),
32
+ type=str
33
+ )
34
+ parser.add_argument("--win_size", default=2.0, type=float)
35
+ parser.add_argument("--win_len", default=2.0, type=float)
36
+ args = parser.parse_args()
37
+ return args
38
+
39
+
40
+ def main():
41
+ args = get_args()
42
+
43
+ segmented_dir = Path(args.segmented_dir)
44
+ templates_dir = Path(args.templates_dir)
45
+ wav_dir = Path(args.wav_dir)
46
+
47
+ segmented_dir.mkdir(parents=True, exist_ok=True)
48
+ templates_dir.mkdir(parents=True, exist_ok=True)
49
+
50
+ for filename in tqdm(wav_dir.glob("*/*.wav")):
51
+ # print(filename)
52
+
53
+ sample_rate, signal = wavfile.read(filename)
54
+ if len(signal) < args.win_size * sample_rate:
55
+ continue
56
+
57
+ frames = sigproc.framesig(
58
+ sig=signal,
59
+ frame_len=args.win_size * sample_rate,
60
+ frame_step=args.win_len * sample_rate,
61
+ # winfunc=np.hamming
62
+ )
63
+
64
+ for j, frame in enumerate(frames):
65
+ to_filename = segmented_dir / "{}_{}.wav".format(filename.stem, j)
66
+
67
+ frame = np.array(frame, dtype=np.int16)
68
+ wavfile.write(
69
+ filename=to_filename,
70
+ rate=sample_rate,
71
+ data=frame
72
+ )
73
+
74
+ return
75
+
76
+
77
+ if __name__ == '__main__':
78
+ main()
project_settings.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import os
4
+ from pathlib import Path
5
+
6
+
7
+ project_path = os.path.abspath(os.path.dirname(__file__))
8
+ project_path = Path(project_path)
9
+
10
+
11
+ if __name__ == '__main__':
12
+ pass
requirements.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ setuptools_rust==1.1.2
2
+ gradio==2.4.6
3
+ opencv-contrib-python==3.4.10.37
4
+ flask==2.0.2
5
+ gevent==21.12.0
6
+ werkzeug==2.0.2
7
+ jsonschema==4.0.0
8
+ numpy==1.19.5
9
+ scipy==1.5.4
10
+ torch==1.10.2
11
+ tqdm==4.62.3
12
+ python_speech_features==0.6
13
+ scikit-learn==0.24.2
14
+ requests==2.26.0
15
+ gunicorn==20.1.0
16
+ pandas==1.1.5
17
+ xlrd==1.2.0
18
+ openpyxl==3.0.9
toolbox/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+
5
+ if __name__ == '__main__':
6
+ pass
toolbox/cv2/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+
5
+ if __name__ == '__main__':
6
+ pass
toolbox/cv2/misc.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import copy
4
+ from typing import List, Union
5
+ import cv2 as cv
6
+
7
+
8
+ def show_image(image, win_name='input image'):
9
+ # cv.namedWindow(win_name, cv.WINDOW_NORMAL)
10
+ cv.namedWindow(win_name, cv.WINDOW_AUTOSIZE)
11
+
12
+ cv.imshow(win_name, image)
13
+ cv.waitKey(0)
14
+ cv.destroyAllWindows()
15
+ return
16
+
17
+
18
+ def erode(labels: List[Union[str, int]], erode_label: Union[str, int], n: int = 1):
19
+ """
20
+ 遍历 labels 列表, 将连续的 erode_label 标签侵蚀 n 个.
21
+ """
22
+ result = list()
23
+ in_span = False
24
+ count = 0
25
+ for idx, label in enumerate(labels):
26
+ if label == erode_label:
27
+ if not in_span:
28
+ in_span = True
29
+ count = 0
30
+ if count < n:
31
+ if len(result) == 0:
32
+ result.append(label)
33
+ else:
34
+ result.append(result[-1])
35
+ count += 1
36
+ continue
37
+ else:
38
+ result.append(label)
39
+ continue
40
+ elif label != erode_label:
41
+ if in_span:
42
+ in_span = False
43
+
44
+ for i in range(min(len(result), n)):
45
+ result[-i-1] = label
46
+ result.append(label)
47
+ continue
48
+ else:
49
+ result.append(label)
50
+ continue
51
+
52
+ result.append(label)
53
+ return result
54
+
55
+
56
+ def dilate(labels: List[Union[str, int]], dilate_label: Union[str, int], n: int = 1):
57
+ """
58
+ 遍历 labels 列表, 将连续的 dilate_label 标签扩张 n 个.
59
+ """
60
+ result = list()
61
+ in_span = False
62
+ count = float('inf')
63
+ for idx, label in enumerate(labels):
64
+ if count < n:
65
+ result.append(dilate_label)
66
+ count += 1
67
+ continue
68
+ if label == dilate_label:
69
+ if not in_span:
70
+ in_span = True
71
+
72
+ for i in range(min(len(result), n)):
73
+ result[-i-1] = label
74
+ result.append(label)
75
+ continue
76
+ else:
77
+ result.append(label)
78
+ continue
79
+ else:
80
+ if in_span:
81
+ in_span = False
82
+ result.append(dilate_label)
83
+ count = 1
84
+ continue
85
+ else:
86
+ result.append(label)
87
+ continue
88
+
89
+ return result
90
+
91
+
92
+ def demo1():
93
+ labels = [
94
+ 'voice', 'mute', 'mute', 'voice', 'voice', 'voice', 'voice', 'bell', 'bell', 'bell', 'mute', 'mute', 'mute', 'voice',
95
+ ]
96
+
97
+ result = erode(
98
+ labels=labels,
99
+ erode_label='voice',
100
+ n=1,
101
+
102
+ )
103
+ print(len(labels))
104
+ print(len(result))
105
+ print(result)
106
+ return
107
+
108
+
109
+ def demo2():
110
+ labels = [
111
+ 'voice', 'mute', 'mute', 'voice', 'voice', 'voice', 'voice', 'bell', 'bell', 'bell', 'mute', 'mute', 'mute', 'voice',
112
+ ]
113
+
114
+ result = dilate(
115
+ labels=labels,
116
+ dilate_label='voice',
117
+ n=2,
118
+
119
+ )
120
+ print(len(labels))
121
+ print(len(result))
122
+ print(result)
123
+
124
+ return
125
+
126
+
127
+ def demo3():
128
+ import time
129
+ labels = ['mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'voice', 'bell', 'bell', 'bell', 'bell', 'bell', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'bell', 'bell', 'bell', 'bell', 'bell', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'bell', 'bell', 'bell', 'bell', 'bell', 'bell', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute']
130
+
131
+ begin = time.time()
132
+ labels = erode(labels, erode_label='music', n=1)
133
+ labels = dilate(labels, dilate_label='music', n=1)
134
+
135
+ labels = dilate(labels, dilate_label='voice', n=2)
136
+ labels = erode(labels, erode_label='voice', n=2)
137
+ labels = erode(labels, erode_label='voice', n=1)
138
+ labels = dilate(labels, dilate_label='voice', n=3)
139
+
140
+ cost = time.time() - begin
141
+ print(cost)
142
+ print(labels)
143
+ return
144
+
145
+
146
+ if __name__ == '__main__':
147
+ # demo1()
148
+ # demo2()
149
+ demo3()
toolbox/python_speech_features/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+
5
+ if __name__ == '__main__':
6
+ pass
toolbox/python_speech_features/misc.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import os
4
+
5
+ import cv2 as cv
6
+ import numpy as np
7
+ from python_speech_features import sigproc
8
+ from python_speech_features import mfcc
9
+ from sklearn import preprocessing
10
+
11
+
12
+ def wave2spectrum(sample_rate, wave, winlen=0.025, winstep=0.01, nfft=512):
13
+ """计算功率谱图像"""
14
+ frames = sigproc.framesig(
15
+ sig=wave,
16
+ frame_len=winlen * sample_rate,
17
+ frame_step=winstep * sample_rate,
18
+ winfunc=np.hamming
19
+ )
20
+ spectrum = sigproc.powspec(
21
+ frames=frames,
22
+ NFFT=nfft
23
+ )
24
+ spectrum = spectrum.T
25
+ return spectrum
26
+
27
+
28
+ def wave2spectrum_image(
29
+ wave, sample_rate,
30
+ xmax=10, xmin=-50,
31
+ winlen=0.025, winstep=0.01, nfft=512,
32
+ n_low_freq=None
33
+ ):
34
+ """
35
+ :return: numpy.ndarray, shape=(time_step, n_dim)
36
+ """
37
+ spectrum = wave2spectrum(
38
+ sample_rate, wave,
39
+ winlen=winlen,
40
+ winstep=winstep,
41
+ nfft=nfft,
42
+ )
43
+ spectrum = np.log(spectrum, out=np.zeros_like(spectrum), where=(spectrum != 0))
44
+ spectrum = spectrum.T
45
+ gray = 255 * (spectrum - xmin) / (xmax - xmin)
46
+ gray = np.array(gray, dtype=np.uint8)
47
+ if n_low_freq is not None:
48
+ gray = gray[:, :n_low_freq]
49
+
50
+ return gray
51
+
52
+
53
+ def compute_delta(specgram: np.ndarray, win_length: int = 5):
54
+ """
55
+ :param specgram: shape=[time_steps, n_mels]
56
+ :param win_length:
57
+ :return:
58
+ """
59
+ n = (win_length - 1) // 2
60
+
61
+ specgram = np.array(specgram, dtype=np.float32)
62
+
63
+ kernel = np.arange(-n, n + 1, 1)
64
+ kernel = np.reshape(kernel, newshape=(2 * n + 1, 1))
65
+ kernel = np.array(kernel, dtype=np.float32) / 10
66
+
67
+ delta = cv.filter2D(
68
+ src=specgram,
69
+ ddepth=cv.CV_32F,
70
+ kernel=kernel,
71
+ )
72
+ return delta
73
+
74
+
75
+ def delta_mfcc_feature(signal, sample_rate):
76
+ """
77
+ 为 GMM UBM 声纹识别模型, 编写此代码.
78
+
79
+ https://github.com/pventrella20/Speaker_identification_-GMM-UBM-
80
+ https://github.com/MChamith/Speaker_verification_gmm_ubm
81
+
82
+ :param signal: np.ndarray
83
+ :param sample_rate: frequenza del file audio
84
+ :return:
85
+ """
86
+
87
+ # shape=[time_steps, n_mels]
88
+ mfcc_feat = mfcc(
89
+ signal=signal,
90
+ samplerate=sample_rate,
91
+ winlen=0.025,
92
+ winstep=0.01,
93
+ numcep=20,
94
+ appendEnergy=True
95
+ )
96
+
97
+ mfcc_feat = preprocessing.scale(mfcc_feat)
98
+ delta = compute_delta(mfcc_feat)
99
+ combined = np.hstack(tup=(mfcc_feat, delta))
100
+ return combined
101
+
102
+
103
+ if __name__ == '__main__':
104
+ pass
toolbox/python_speech_features/silence_detect.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import numpy as np
4
+ from python_speech_features import sigproc
5
+
6
+
7
+ def calc_energy(signal, samplerate=16000, winlen=0.025, winstep=0.01):
8
+ """
9
+ 任意信号都可以看作是在电阻R=1 的电路上的电流 I. 则能量为 I^2
10
+ """
11
+ signal = np.array(signal, dtype=np.float32)
12
+ power = np.square(signal)
13
+
14
+ # 分帧
15
+ frames = sigproc.framesig(power, winlen*samplerate, winstep*samplerate)
16
+ # 各帧能量总和.
17
+ energy = np.mean(frames, axis=-1)
18
+ return energy
19
+
20
+
21
+ def calc_zero_crossing_rate(signal, samplerate=16000, winlen=0.025, winstep=0.01):
22
+ """过零率. """
23
+ signal = np.where(signal >= 0, 1, -1)
24
+ cross_zero = np.where(signal[1:] != signal[:-1], 1, 0)
25
+
26
+ frames = sigproc.framesig(cross_zero, winlen*samplerate, winstep*samplerate)
27
+ _, n = frames.shape
28
+ cross_zero_rate = np.mean(frames, axis=-1)
29
+
30
+ return cross_zero_rate
31
+
32
+
33
+ def detect_silence(signal, samplerate=16000, winlen=0.025, winstep=0.01, min_energy=0.01, min_cross_zero_rate=0.05):
34
+ """静音段检测"""
35
+ energy = calc_energy(
36
+ signal=signal,
37
+ samplerate=samplerate,
38
+ winlen=winlen,
39
+ winstep=winstep,
40
+ )
41
+ cross_zero_rate = calc_zero_crossing_rate(
42
+ signal=signal,
43
+ samplerate=samplerate,
44
+ winlen=winlen,
45
+ winstep=winstep,
46
+ )
47
+ energy = energy < min_energy
48
+ cross_zero_rate = cross_zero_rate < min_cross_zero_rate
49
+ silence_signal = np.array(energy + cross_zero_rate, dtype=np.bool)
50
+ silence_signal = silence_signal.tolist()
51
+
52
+ frame_len = int(sigproc.round_half_up(winlen*samplerate))
53
+ frame_step = int(sigproc.round_half_up(winstep*samplerate))
54
+
55
+ silence_list = list()
56
+ last_s = False
57
+ for idx, s in enumerate(silence_signal):
58
+ if s is True:
59
+ if last_s is True:
60
+ silence = silence_list.pop(-1)
61
+ begin = silence[0]
62
+ count = silence[1]
63
+ silence_list.append([begin, count + 1])
64
+ else:
65
+ begin = frame_step * idx
66
+ silence_list.append([begin, 1])
67
+
68
+ last_s = s
69
+
70
+ result = list()
71
+ for silence in silence_list:
72
+ begin = silence[0]
73
+ count = silence[1]
74
+ end = begin + frame_step * (count - 1) + frame_len
75
+ result.append([begin, end])
76
+
77
+ return result
78
+
79
+
80
+ if __name__ == '__main__':
81
+ pass
toolbox/python_speech_features/wave_features.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import numpy as np
4
+
5
+ from smart.python_speech_features.silence_detect import detect_silence
6
+
7
+
8
+ def calc_wave_features(signal, sample_rate):
9
+ assert signal.dtype == np.int16
10
+ assert sample_rate == 8000
11
+
12
+ signal = np.array(signal, dtype=np.float32)
13
+ # plt.plot(signal)
14
+ # plt.show()
15
+
16
+ l = len(signal)
17
+
18
+ # 均值
19
+ mean = np.mean(signal)
20
+
21
+ # 方差
22
+ var = np.var(signal)
23
+
24
+ # 百分位数
25
+ per = np.percentile(signal, q=[1, 25, 50, 75, 99])
26
+ per1, per25, per50, per75, per99 = per
27
+
28
+ # 静音段占比
29
+ silences = detect_silence(
30
+ signal=signal,
31
+ samplerate=sample_rate,
32
+ min_energy=120,
33
+ min_cross_zero_rate=0.01
34
+ )
35
+ silence_total = 0
36
+ for silence in silences:
37
+ li = silence[1] - silence[0]
38
+ silence_total += li
39
+ silence_rate = silence_total / l
40
+
41
+ # 非静音段方差
42
+ last_e = 0
43
+ non_silences = list()
44
+ for silence in silences:
45
+ b, e = silence
46
+ if b > last_e:
47
+ non_silences.append([last_e, b])
48
+ last_e = e
49
+ else:
50
+ if l > last_e:
51
+ non_silences.append([last_e, l])
52
+
53
+ # 静音段的数量
54
+ silence_count = len(non_silences)
55
+
56
+ if silence_count == 0:
57
+ mean_non_silence = 0
58
+ var_non_silence = 0
59
+ var_var_non_silence = 0
60
+ var_non_silence_rate = 1
61
+ else:
62
+ signal_non_silences = list()
63
+ for non_silence in non_silences:
64
+ b, e = non_silence
65
+ signal_non_silences.append(signal[b: e])
66
+
67
+ # 非静音段, 各段方差的方差.
68
+ v = list()
69
+ for signal_non_silence in signal_non_silences:
70
+ v.append(np.var(signal_non_silence))
71
+ var_var_non_silence = np.var(v)
72
+
73
+ signal_non_silences = np.concatenate(signal_non_silences)
74
+ # 非静音段整体均值
75
+ mean_non_silence = np.mean(signal_non_silences)
76
+ # 非静音段整体方差
77
+ var_non_silence = np.var(signal_non_silences)
78
+ # 非静音段整体方差 除以 整体方差
79
+ var_non_silence_rate = var_non_silence / var
80
+
81
+ # 全段, 分段方差的方差
82
+ sub_signal_list = np.split(signal, 20)
83
+
84
+ whole_var = list()
85
+ for sub_signal in sub_signal_list:
86
+ sub_var = np.var(sub_signal)
87
+ whole_var.append(sub_var)
88
+ var_var_whole = np.var(whole_var)
89
+
90
+ result = {
91
+ 'mean': mean,
92
+ 'var': var,
93
+ 'per1': per1,
94
+ 'per25': per25,
95
+ 'per50': per50,
96
+ 'per75': per75,
97
+ 'per99': per99,
98
+ 'silence_rate': silence_rate,
99
+ 'mean_non_silence': mean_non_silence,
100
+ 'silence_count': silence_count,
101
+ 'var_var_non_silence': var_var_non_silence,
102
+ 'var_non_silence': var_non_silence,
103
+ 'var_non_silence_rate': var_non_silence_rate,
104
+ 'var_var_whole': var_var_whole,
105
+
106
+ }
107
+ return result
108
+
109
+
110
+ if __name__ == '__main__':
111
+ pass