Datasets:
License:
update
Browse files- data/call_monitor_examples_wav.zip +2 -2
- examples/channel_dual_to_single.py +56 -0
- examples/make_early_media_speech_wav/step_1_make_media_speech.py +220 -0
- examples/make_early_media_speech_wav/step_2_make_annotations.py +79 -0
- examples/make_full_user_wav/step_1_make_full_uer_wav.py +92 -0
- examples/make_templates/step_1_wav_classification.py +1 -1
- examples/make_templates/step_2_wav_split.py +3 -3
- examples/make_templates/step_3_move_by_template.py +3 -3
- examples/seach_early_media_wav.py +43 -0
- toolbox/torch/__init__.py +6 -0
- toolbox/torch/utils/__init__.py +6 -0
- toolbox/torch/utils/data/__init__.py +6 -0
- toolbox/torch/utils/data/vocabulary.py +211 -0
data/call_monitor_examples_wav.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ac3448d2c7a017270dc9e9dc304d66e8bff6caccc7f011bdcbe5febeddbf316c
|
3 |
+
size 2129737
|
examples/channel_dual_to_single.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import argparse
|
4 |
+
import os
|
5 |
+
from pathlib import Path
|
6 |
+
|
7 |
+
from scipy.io import wavfile
|
8 |
+
from tqdm import tqdm
|
9 |
+
|
10 |
+
from project_settings import project_path
|
11 |
+
|
12 |
+
|
13 |
+
def get_args():
|
14 |
+
parser = argparse.ArgumentParser()
|
15 |
+
|
16 |
+
parser.add_argument(
|
17 |
+
"--wav_dir",
|
18 |
+
default=(project_path / "data/early_media/60/wav").as_posix(),
|
19 |
+
type=str
|
20 |
+
)
|
21 |
+
|
22 |
+
args = parser.parse_args()
|
23 |
+
return args
|
24 |
+
|
25 |
+
|
26 |
+
def main():
|
27 |
+
args = get_args()
|
28 |
+
|
29 |
+
wav_dir = Path(args.wav_dir)
|
30 |
+
for filename in tqdm(wav_dir.glob("*.wav")):
|
31 |
+
print(filename)
|
32 |
+
|
33 |
+
try:
|
34 |
+
sample_rate, signal = wavfile.read(filename)
|
35 |
+
except UnboundLocalError:
|
36 |
+
os.remove(filename)
|
37 |
+
continue
|
38 |
+
if sample_rate != 8000:
|
39 |
+
raise AssertionError
|
40 |
+
|
41 |
+
if len(signal) < 1.0 * sample_rate:
|
42 |
+
os.remove(filename.as_posix())
|
43 |
+
continue
|
44 |
+
|
45 |
+
# print(signal.shape)
|
46 |
+
signal = signal[:, 0]
|
47 |
+
# print(signal.shape)
|
48 |
+
|
49 |
+
wavfile.write(filename.as_posix(), rate=8000, data=signal)
|
50 |
+
# wavfile.write("temp2.wav", rate=8000, data=signal)
|
51 |
+
# exit(0)
|
52 |
+
return
|
53 |
+
|
54 |
+
|
55 |
+
if __name__ == '__main__':
|
56 |
+
main()
|
examples/make_early_media_speech_wav/step_1_make_media_speech.py
ADDED
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import argparse
|
4 |
+
import json
|
5 |
+
from pathlib import Path
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
from scipy.io import wavfile
|
9 |
+
import torch
|
10 |
+
from tqdm import tqdm
|
11 |
+
from typing import List
|
12 |
+
|
13 |
+
from project_settings import project_path
|
14 |
+
from toolbox.cv2.misc import show_image, erode, dilate
|
15 |
+
from toolbox.python_speech_features.misc import wave2spectrum_image
|
16 |
+
|
17 |
+
|
18 |
+
area_code = 60
|
19 |
+
|
20 |
+
|
21 |
+
def get_args():
|
22 |
+
parser = argparse.ArgumentParser()
|
23 |
+
parser.add_argument(
|
24 |
+
"--model_dir",
|
25 |
+
default=(project_path / "trained_models/early_media_20220721").as_posix(),
|
26 |
+
type=str
|
27 |
+
)
|
28 |
+
parser.add_argument(
|
29 |
+
"--wav_dir",
|
30 |
+
default=(project_path / "data/early_media/{area_code}/wav".format(area_code=area_code)).as_posix(),
|
31 |
+
type=str
|
32 |
+
)
|
33 |
+
parser.add_argument(
|
34 |
+
"--media_speech_dir",
|
35 |
+
default=(project_path / "data/early_media/{area_code}/media_speech".format(area_code=area_code)).as_posix(),
|
36 |
+
type=str
|
37 |
+
)
|
38 |
+
parser.add_argument("--win_size", default=2, type=int)
|
39 |
+
parser.add_argument("--win_step", default=1, type=int)
|
40 |
+
args = parser.parse_args()
|
41 |
+
return args
|
42 |
+
|
43 |
+
|
44 |
+
class PyTorchSpectrumTagger(object):
|
45 |
+
def __init__(self,
|
46 |
+
seq2seq_encoder_pth: str,
|
47 |
+
seq2vec_encoder_pth: str,
|
48 |
+
classification_layer_pth: str,
|
49 |
+
index2token_json: str,
|
50 |
+
win_size: int = 50,
|
51 |
+
win_step: int = 25,
|
52 |
+
):
|
53 |
+
self.seq2seq_encoder_pth = seq2seq_encoder_pth
|
54 |
+
self.seq2vec_encoder_pth = seq2vec_encoder_pth
|
55 |
+
self.classification_layer_pth = classification_layer_pth
|
56 |
+
self.index2token_json = index2token_json
|
57 |
+
self.win_size = win_size
|
58 |
+
self.win_step = win_step
|
59 |
+
|
60 |
+
self.seq2seq_encoder = torch.jit.load(self.seq2seq_encoder_pth)
|
61 |
+
self.seq2vec_encoder = torch.jit.load(self.seq2vec_encoder_pth)
|
62 |
+
self.classification_layer = torch.jit.load(self.classification_layer_pth)
|
63 |
+
with open(self.index2token_json, "r", encoding="utf-8") as f:
|
64 |
+
index2token = json.load(f)
|
65 |
+
self.index2token = index2token
|
66 |
+
|
67 |
+
def tag(self, spectrum: np.ndarray):
|
68 |
+
array = np.expand_dims(spectrum, axis=0)
|
69 |
+
array = torch.tensor(array, dtype=torch.float32)
|
70 |
+
mask: torch.IntTensor = torch.ones(size=array.shape[:-1], device=array.device, dtype=torch.int32)
|
71 |
+
|
72 |
+
array = self.seq2seq_encoder.forward(array, mask=mask)
|
73 |
+
|
74 |
+
length = array.shape[-2]
|
75 |
+
|
76 |
+
result = list()
|
77 |
+
idx = 0
|
78 |
+
while True:
|
79 |
+
begin = idx * self.win_step
|
80 |
+
end = begin + self.win_size
|
81 |
+
if end > length:
|
82 |
+
break
|
83 |
+
window = array[:, begin:end, :]
|
84 |
+
window = self.seq2vec_encoder.forward(window)
|
85 |
+
logits = self.classification_layer(window)
|
86 |
+
probs = torch.nn.functional.softmax(logits, dim=-1)
|
87 |
+
label_idx = probs.argmax(dim=-1).item()
|
88 |
+
label_str = self.index2token[str(label_idx)]
|
89 |
+
result.append(label_str)
|
90 |
+
idx += 1
|
91 |
+
|
92 |
+
return result
|
93 |
+
|
94 |
+
|
95 |
+
def correct_labels(labels: List[str]):
|
96 |
+
"""
|
97 |
+
(1)
|
98 |
+
先消极零散的 music 标签.
|
99 |
+
|
100 |
+
(2)
|
101 |
+
1. 扩张. 将零散的 voice 标签连接起来.
|
102 |
+
2. 侵蚀. 返回原长度. 并消除零散 voice 标签
|
103 |
+
3. 扩张. 扩张 voice 标签, 以保障模板匹配的充分.
|
104 |
+
:param labels:
|
105 |
+
:return:
|
106 |
+
"""
|
107 |
+
labels = erode(labels, erode_label="music", n=1)
|
108 |
+
labels = dilate(labels, dilate_label="music", n=1)
|
109 |
+
|
110 |
+
labels = dilate(labels, dilate_label="voice", n=2)
|
111 |
+
labels = erode(labels, erode_label="voice", n=2)
|
112 |
+
labels = erode(labels, erode_label="voice", n=1)
|
113 |
+
labels = dilate(labels, dilate_label="voice", n=3)
|
114 |
+
|
115 |
+
return labels
|
116 |
+
|
117 |
+
|
118 |
+
def split_signal_by_labels(signal: np.ndarray, labels: List[str]):
|
119 |
+
l = len(labels)
|
120 |
+
|
121 |
+
voice = list()
|
122 |
+
begin = None
|
123 |
+
for idx, label in enumerate(labels):
|
124 |
+
if label == "voice":
|
125 |
+
if begin is None:
|
126 |
+
begin = idx
|
127 |
+
elif label != "voice":
|
128 |
+
if begin is not None:
|
129 |
+
voice.append((begin, idx))
|
130 |
+
begin = None
|
131 |
+
else:
|
132 |
+
pass
|
133 |
+
else:
|
134 |
+
if begin is not None:
|
135 |
+
voice.append((begin, l))
|
136 |
+
|
137 |
+
result = list()
|
138 |
+
|
139 |
+
win_size = signal.shape[0] / l
|
140 |
+
for begin, end in voice:
|
141 |
+
begin = int(begin * win_size)
|
142 |
+
end = int(end * win_size)
|
143 |
+
|
144 |
+
sub_signal = signal[begin: end + 1]
|
145 |
+
result.append({
|
146 |
+
"begin": begin,
|
147 |
+
"sub_signal": sub_signal,
|
148 |
+
})
|
149 |
+
|
150 |
+
return result
|
151 |
+
|
152 |
+
|
153 |
+
def main():
|
154 |
+
args = get_args()
|
155 |
+
|
156 |
+
max_wave_value = 32768.0
|
157 |
+
|
158 |
+
model_dir = Path(args.model_dir)
|
159 |
+
wav_dir = Path(args.wav_dir)
|
160 |
+
media_speech_dir = Path(args.media_speech_dir)
|
161 |
+
|
162 |
+
spectrum_tagger = PyTorchSpectrumTagger(
|
163 |
+
seq2seq_encoder_pth=model_dir / "seq2seq_encoder.pth",
|
164 |
+
seq2vec_encoder_pth=model_dir / "seq2vec_encoder.pth",
|
165 |
+
classification_layer_pth=model_dir / "classification_layer.pth",
|
166 |
+
index2token_json=model_dir / "index2token.json",
|
167 |
+
)
|
168 |
+
|
169 |
+
for filename in tqdm(wav_dir.glob("*/*.wav")):
|
170 |
+
if filename.parts[-2] in ("bell", "music"):
|
171 |
+
continue
|
172 |
+
sample_rate, signal = wavfile.read(filename)
|
173 |
+
|
174 |
+
if len(signal) < 1.0 * sample_rate:
|
175 |
+
continue
|
176 |
+
|
177 |
+
signal_ = signal / max_wave_value
|
178 |
+
|
179 |
+
spectrum = wave2spectrum_image(
|
180 |
+
signal_,
|
181 |
+
sample_rate=8000,
|
182 |
+
xmax=10,
|
183 |
+
xmin=-50,
|
184 |
+
winlen=0.025,
|
185 |
+
winstep=0.01,
|
186 |
+
nfft=512,
|
187 |
+
n_low_freq=100,
|
188 |
+
)
|
189 |
+
# show_image(array.T)
|
190 |
+
|
191 |
+
labels = spectrum_tagger.tag(spectrum)
|
192 |
+
labels = correct_labels(labels)
|
193 |
+
|
194 |
+
if "voice" not in labels:
|
195 |
+
continue
|
196 |
+
|
197 |
+
sub_signal_list = split_signal_by_labels(signal, labels)
|
198 |
+
|
199 |
+
for i, sub_signal_group in enumerate(sub_signal_list):
|
200 |
+
to_dir = media_speech_dir / filename.parts[-2]
|
201 |
+
to_dir.mkdir(exist_ok=True)
|
202 |
+
to_filename = to_dir / "{}_{}.wav".format(filename.stem, i)
|
203 |
+
|
204 |
+
sub_signal = sub_signal_group["sub_signal"]
|
205 |
+
|
206 |
+
sub_signal = np.array(sub_signal, dtype=np.int16)
|
207 |
+
if len(sub_signal) < sample_rate * 4:
|
208 |
+
continue
|
209 |
+
|
210 |
+
wavfile.write(
|
211 |
+
filename=to_filename,
|
212 |
+
rate=sample_rate,
|
213 |
+
data=sub_signal
|
214 |
+
)
|
215 |
+
|
216 |
+
return
|
217 |
+
|
218 |
+
|
219 |
+
if __name__ == '__main__':
|
220 |
+
main()
|
examples/make_early_media_speech_wav/step_2_make_annotations.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import argparse
|
4 |
+
import json
|
5 |
+
from pathlib import Path
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
import pandas as pd
|
9 |
+
from scipy.io import wavfile
|
10 |
+
import torch
|
11 |
+
from tqdm import tqdm
|
12 |
+
from typing import List
|
13 |
+
|
14 |
+
from project_settings import project_path
|
15 |
+
from toolbox.cv2.misc import show_image, erode, dilate
|
16 |
+
from toolbox.python_speech_features.misc import wave2spectrum_image
|
17 |
+
|
18 |
+
|
19 |
+
area_code = 60
|
20 |
+
|
21 |
+
|
22 |
+
def get_args():
|
23 |
+
parser = argparse.ArgumentParser()
|
24 |
+
parser.add_argument(
|
25 |
+
"--media_speech_dir",
|
26 |
+
default=(project_path / "data/early_media/{area_code}/media_speech".format(area_code=area_code)).as_posix(),
|
27 |
+
type=str
|
28 |
+
)
|
29 |
+
parser.add_argument(
|
30 |
+
"--annotations_file",
|
31 |
+
default=(project_path / "data/early_media/{area_code}/media_speech/annotations.xlsx".format(area_code=area_code)).as_posix(),
|
32 |
+
type=str
|
33 |
+
)
|
34 |
+
args = parser.parse_args()
|
35 |
+
return args
|
36 |
+
|
37 |
+
|
38 |
+
def main():
|
39 |
+
args = get_args()
|
40 |
+
|
41 |
+
media_speech_dir = Path(args.media_speech_dir)
|
42 |
+
annotations_file = Path(args.annotations_file)
|
43 |
+
|
44 |
+
name_to_row = dict()
|
45 |
+
if annotations_file.exists():
|
46 |
+
df = pd.read_excel(annotations_file.as_posix())
|
47 |
+
for i, row in df.iterrows():
|
48 |
+
row = dict(row)
|
49 |
+
name = row["filename"]
|
50 |
+
name_to_row[name] = row
|
51 |
+
|
52 |
+
result = list()
|
53 |
+
for filename in tqdm(media_speech_dir.glob("*/*.wav")):
|
54 |
+
filename = Path(filename)
|
55 |
+
relative_name = filename.relative_to(media_speech_dir)
|
56 |
+
|
57 |
+
name = relative_name.as_posix()
|
58 |
+
row = name_to_row.get(name)
|
59 |
+
if row is None:
|
60 |
+
text = ""
|
61 |
+
else:
|
62 |
+
text = row["text"]
|
63 |
+
|
64 |
+
result.append({
|
65 |
+
"filename": name,
|
66 |
+
"text": text
|
67 |
+
})
|
68 |
+
|
69 |
+
result = pd.DataFrame(result)
|
70 |
+
result.to_excel(
|
71 |
+
annotations_file.as_posix(),
|
72 |
+
index=False,
|
73 |
+
encoding="utf_8_sig"
|
74 |
+
)
|
75 |
+
return
|
76 |
+
|
77 |
+
|
78 |
+
if __name__ == '__main__':
|
79 |
+
main()
|
examples/make_full_user_wav/step_1_make_full_uer_wav.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import argparse
|
4 |
+
import os
|
5 |
+
from pathlib import Path
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
from scipy.io import wavfile
|
9 |
+
from tqdm import tqdm
|
10 |
+
|
11 |
+
from project_settings import project_path
|
12 |
+
|
13 |
+
|
14 |
+
area_code = 60
|
15 |
+
|
16 |
+
|
17 |
+
def get_args():
|
18 |
+
parser = argparse.ArgumentParser()
|
19 |
+
parser.add_argument(
|
20 |
+
"--calling_wav_dir",
|
21 |
+
default=(project_path / "data/calling/{area_code}/wav".format(area_code=area_code)).as_posix(),
|
22 |
+
type=str
|
23 |
+
)
|
24 |
+
parser.add_argument(
|
25 |
+
"--early_media_wav_dir",
|
26 |
+
default=(project_path / "data/early_media/{area_code}/wav".format(area_code=area_code)).as_posix(),
|
27 |
+
type=str
|
28 |
+
)
|
29 |
+
parser.add_argument(
|
30 |
+
"--output_dir",
|
31 |
+
default=(project_path / "data/call_monitor_examples_wav/ms-MY").as_posix(),
|
32 |
+
type=str
|
33 |
+
)
|
34 |
+
parser.add_argument(
|
35 |
+
"--query",
|
36 |
+
default="2b626374-8f27-4b50-90b7-47cbfa10accc",
|
37 |
+
type=str
|
38 |
+
)
|
39 |
+
parser.add_argument(
|
40 |
+
"--expected_sample_rate",
|
41 |
+
default=8000,
|
42 |
+
type=int
|
43 |
+
)
|
44 |
+
args = parser.parse_args()
|
45 |
+
return args
|
46 |
+
|
47 |
+
|
48 |
+
def main():
|
49 |
+
args = get_args()
|
50 |
+
|
51 |
+
calling_wav_dir = Path(args.calling_wav_dir)
|
52 |
+
early_media_wav_dir = Path(args.early_media_wav_dir)
|
53 |
+
output_dir = Path(args.output_dir)
|
54 |
+
|
55 |
+
# calling
|
56 |
+
calling_signal = np.zeros(shape=(0,), dtype=np.int16)
|
57 |
+
for filename in tqdm(calling_wav_dir.glob("**/*.wav")):
|
58 |
+
basename = filename.stem
|
59 |
+
# print(basename)
|
60 |
+
if str(basename).__contains__(args.query):
|
61 |
+
print(filename)
|
62 |
+
|
63 |
+
sample_rate, signal = wavfile.read(filename)
|
64 |
+
if sample_rate != args.expected_sample_rate:
|
65 |
+
raise AssertionError
|
66 |
+
calling_signal = signal[:, 0]
|
67 |
+
|
68 |
+
# early media
|
69 |
+
early_media_signal = np.zeros(shape=(0,), dtype=np.int16)
|
70 |
+
for filename in tqdm(early_media_wav_dir.glob("**/*.wav")):
|
71 |
+
basename = filename.stem
|
72 |
+
# print(basename)
|
73 |
+
if str(basename).__contains__(args.query):
|
74 |
+
print(filename)
|
75 |
+
sample_rate, signal = wavfile.read(filename)
|
76 |
+
if sample_rate != args.expected_sample_rate:
|
77 |
+
raise AssertionError
|
78 |
+
early_media_signal = signal
|
79 |
+
|
80 |
+
on_answer_ts = int(len(early_media_signal) / args.expected_sample_rate * 1000)
|
81 |
+
print("on_answer_ts: {}".format(on_answer_ts))
|
82 |
+
|
83 |
+
signal = np.concatenate([early_media_signal, calling_signal], axis=0)
|
84 |
+
|
85 |
+
to_filename = output_dir / "{}.wav".format(args.query)
|
86 |
+
wavfile.write(to_filename.as_posix(), rate=8000, data=signal)
|
87 |
+
|
88 |
+
return
|
89 |
+
|
90 |
+
|
91 |
+
if __name__ == '__main__':
|
92 |
+
main()
|
examples/make_templates/step_1_wav_classification.py
CHANGED
@@ -24,7 +24,7 @@ from toolbox.cv2.misc import show_image
|
|
24 |
from toolbox.python_speech_features.misc import wave2spectrum_image
|
25 |
|
26 |
|
27 |
-
area_code =
|
28 |
|
29 |
|
30 |
def get_args():
|
|
|
24 |
from toolbox.python_speech_features.misc import wave2spectrum_image
|
25 |
|
26 |
|
27 |
+
area_code = 60
|
28 |
|
29 |
|
30 |
def get_args():
|
examples/make_templates/step_2_wav_split.py
CHANGED
@@ -18,19 +18,19 @@ from tqdm import tqdm
|
|
18 |
from project_settings import project_path
|
19 |
|
20 |
|
21 |
-
area_code =
|
22 |
|
23 |
|
24 |
def get_args():
|
25 |
parser = argparse.ArgumentParser()
|
26 |
parser.add_argument(
|
27 |
"--filename",
|
28 |
-
default=(project_path / "data/
|
29 |
type=str
|
30 |
)
|
31 |
parser.add_argument(
|
32 |
"--templates_segmented_dir",
|
33 |
-
default=(project_path / "data/
|
34 |
type=str
|
35 |
)
|
36 |
parser.add_argument("--win_size", default=2.0, type=float)
|
|
|
18 |
from project_settings import project_path
|
19 |
|
20 |
|
21 |
+
area_code = 60
|
22 |
|
23 |
|
24 |
def get_args():
|
25 |
parser = argparse.ArgumentParser()
|
26 |
parser.add_argument(
|
27 |
"--filename",
|
28 |
+
default=(project_path / "data/early_media/60/wav/voice/early_vm_e4543473-eab8-4240-8e9c-d5249a1137b5.wav").as_posix(),
|
29 |
type=str
|
30 |
)
|
31 |
parser.add_argument(
|
32 |
"--templates_segmented_dir",
|
33 |
+
default=(project_path / "data/early_media/{area_code}/temp".format(area_code=area_code)).as_posix(),
|
34 |
type=str
|
35 |
)
|
36 |
parser.add_argument("--win_size", default=2.0, type=float)
|
examples/make_templates/step_3_move_by_template.py
CHANGED
@@ -18,7 +18,7 @@ from project_settings import project_path
|
|
18 |
from toolbox.python_speech_features.misc import wave2spectrum_image
|
19 |
|
20 |
|
21 |
-
area_code =
|
22 |
|
23 |
|
24 |
def get_args():
|
@@ -203,8 +203,8 @@ def main():
|
|
203 |
if len(set(labels_)) > 1:
|
204 |
print("超过两个模板类别被匹配,请检测是否匹配正确。")
|
205 |
print(filename)
|
206 |
-
for match in matches:
|
207 |
-
|
208 |
continue
|
209 |
|
210 |
if len(labels_) == 0:
|
|
|
18 |
from toolbox.python_speech_features.misc import wave2spectrum_image
|
19 |
|
20 |
|
21 |
+
area_code = 60
|
22 |
|
23 |
|
24 |
def get_args():
|
|
|
203 |
if len(set(labels_)) > 1:
|
204 |
print("超过两个模板类别被匹配,请检测是否匹配正确。")
|
205 |
print(filename)
|
206 |
+
# for match in matches:
|
207 |
+
# print(match)
|
208 |
continue
|
209 |
|
210 |
if len(labels_) == 0:
|
examples/seach_early_media_wav.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import argparse
|
4 |
+
import os
|
5 |
+
from pathlib import Path
|
6 |
+
|
7 |
+
from scipy.io import wavfile
|
8 |
+
from tqdm import tqdm
|
9 |
+
|
10 |
+
from project_settings import project_path
|
11 |
+
|
12 |
+
|
13 |
+
def get_args():
|
14 |
+
parser = argparse.ArgumentParser()
|
15 |
+
|
16 |
+
parser.add_argument(
|
17 |
+
"--wav_dir",
|
18 |
+
default=(project_path / "data/early_media/60/wav").as_posix(),
|
19 |
+
type=str
|
20 |
+
)
|
21 |
+
parser.add_argument(
|
22 |
+
"--query",
|
23 |
+
default="e9b985ea-aca6-4e04-87ab-e51271503f4c",
|
24 |
+
type=str
|
25 |
+
)
|
26 |
+
args = parser.parse_args()
|
27 |
+
return args
|
28 |
+
|
29 |
+
|
30 |
+
def main():
|
31 |
+
args = get_args()
|
32 |
+
|
33 |
+
wav_dir = Path(args.wav_dir)
|
34 |
+
for filename in tqdm(wav_dir.glob("*/*.wav")):
|
35 |
+
basename = filename.stem
|
36 |
+
# print(basename)
|
37 |
+
if str(basename).__contains__(args.query):
|
38 |
+
print(filename)
|
39 |
+
return
|
40 |
+
|
41 |
+
|
42 |
+
if __name__ == '__main__':
|
43 |
+
main()
|
toolbox/torch/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
|
4 |
+
|
5 |
+
if __name__ == '__main__':
|
6 |
+
pass
|
toolbox/torch/utils/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
|
4 |
+
|
5 |
+
if __name__ == '__main__':
|
6 |
+
pass
|
toolbox/torch/utils/data/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
|
4 |
+
|
5 |
+
if __name__ == '__main__':
|
6 |
+
pass
|
toolbox/torch/utils/data/vocabulary.py
ADDED
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
from collections import defaultdict, OrderedDict
|
4 |
+
import os
|
5 |
+
from typing import Any, Callable, Dict, Iterable, List, Set
|
6 |
+
|
7 |
+
|
8 |
+
def namespace_match(pattern: str, namespace: str):
|
9 |
+
"""
|
10 |
+
Matches a namespace pattern against a namespace string. For example, ``*tags`` matches
|
11 |
+
``passage_tags`` and ``question_tags`` and ``tokens`` matches ``tokens`` but not
|
12 |
+
``stemmed_tokens``.
|
13 |
+
"""
|
14 |
+
if pattern[0] == '*' and namespace.endswith(pattern[1:]):
|
15 |
+
return True
|
16 |
+
elif pattern == namespace:
|
17 |
+
return True
|
18 |
+
return False
|
19 |
+
|
20 |
+
|
21 |
+
class _NamespaceDependentDefaultDict(defaultdict):
|
22 |
+
def __init__(self,
|
23 |
+
non_padded_namespaces: Set[str],
|
24 |
+
padded_function: Callable[[], Any],
|
25 |
+
non_padded_function: Callable[[], Any]) -> None:
|
26 |
+
self._non_padded_namespaces = set(non_padded_namespaces)
|
27 |
+
self._padded_function = padded_function
|
28 |
+
self._non_padded_function = non_padded_function
|
29 |
+
super(_NamespaceDependentDefaultDict, self).__init__()
|
30 |
+
|
31 |
+
def __missing__(self, key: str):
|
32 |
+
if any(namespace_match(pattern, key) for pattern in self._non_padded_namespaces):
|
33 |
+
value = self._non_padded_function()
|
34 |
+
else:
|
35 |
+
value = self._padded_function()
|
36 |
+
dict.__setitem__(self, key, value)
|
37 |
+
return value
|
38 |
+
|
39 |
+
def add_non_padded_namespaces(self, non_padded_namespaces: Set[str]):
|
40 |
+
# add non_padded_namespaces which weren't already present
|
41 |
+
self._non_padded_namespaces.update(non_padded_namespaces)
|
42 |
+
|
43 |
+
|
44 |
+
class _TokenToIndexDefaultDict(_NamespaceDependentDefaultDict):
|
45 |
+
def __init__(self, non_padded_namespaces: Set[str], padding_token: str, oov_token: str) -> None:
|
46 |
+
super(_TokenToIndexDefaultDict, self).__init__(non_padded_namespaces,
|
47 |
+
lambda: {padding_token: 0, oov_token: 1},
|
48 |
+
lambda: {})
|
49 |
+
|
50 |
+
|
51 |
+
class _IndexToTokenDefaultDict(_NamespaceDependentDefaultDict):
|
52 |
+
def __init__(self, non_padded_namespaces: Set[str], padding_token: str, oov_token: str) -> None:
|
53 |
+
super(_IndexToTokenDefaultDict, self).__init__(non_padded_namespaces,
|
54 |
+
lambda: {0: padding_token, 1: oov_token},
|
55 |
+
lambda: {})
|
56 |
+
|
57 |
+
|
58 |
+
DEFAULT_NON_PADDED_NAMESPACES = ("*tags", "*labels")
|
59 |
+
DEFAULT_PADDING_TOKEN = '[PAD]'
|
60 |
+
DEFAULT_OOV_TOKEN = '[UNK]'
|
61 |
+
NAMESPACE_PADDING_FILE = 'non_padded_namespaces.txt'
|
62 |
+
|
63 |
+
|
64 |
+
class Vocabulary(object):
|
65 |
+
def __init__(self, non_padded_namespaces: Iterable[str] = DEFAULT_NON_PADDED_NAMESPACES):
|
66 |
+
self._non_padded_namespaces = set(non_padded_namespaces)
|
67 |
+
self._padding_token = DEFAULT_PADDING_TOKEN
|
68 |
+
self._oov_token = DEFAULT_OOV_TOKEN
|
69 |
+
self._token_to_index = _TokenToIndexDefaultDict(self._non_padded_namespaces,
|
70 |
+
self._padding_token,
|
71 |
+
self._oov_token)
|
72 |
+
self._index_to_token = _IndexToTokenDefaultDict(self._non_padded_namespaces,
|
73 |
+
self._padding_token,
|
74 |
+
self._oov_token)
|
75 |
+
|
76 |
+
def add_token_to_namespace(self, token: str, namespace: str = 'tokens') -> int:
|
77 |
+
if token not in self._token_to_index[namespace]:
|
78 |
+
index = len(self._token_to_index[namespace])
|
79 |
+
self._token_to_index[namespace][token] = index
|
80 |
+
self._index_to_token[namespace][index] = token
|
81 |
+
return index
|
82 |
+
else:
|
83 |
+
return self._token_to_index[namespace][token]
|
84 |
+
|
85 |
+
def get_index_to_token_vocabulary(self, namespace: str = 'tokens') -> Dict[int, str]:
|
86 |
+
return self._index_to_token[namespace]
|
87 |
+
|
88 |
+
def get_token_to_index_vocabulary(self, namespace: str = 'tokens') -> Dict[str, int]:
|
89 |
+
return self._token_to_index[namespace]
|
90 |
+
|
91 |
+
def get_token_index(self, token: str, namespace: str = 'tokens') -> int:
|
92 |
+
if token in self._token_to_index[namespace]:
|
93 |
+
return self._token_to_index[namespace][token]
|
94 |
+
else:
|
95 |
+
return self._token_to_index[namespace][self._oov_token]
|
96 |
+
|
97 |
+
def get_token_from_index(self, index: int, namespace: str = 'tokens'):
|
98 |
+
return self._index_to_token[namespace][index]
|
99 |
+
|
100 |
+
def get_vocab_size(self, namespace: str = 'tokens') -> int:
|
101 |
+
return len(self._token_to_index[namespace])
|
102 |
+
|
103 |
+
def save_to_files(self, directory: str):
|
104 |
+
os.makedirs(directory, exist_ok=True)
|
105 |
+
with open(os.path.join(directory, NAMESPACE_PADDING_FILE), 'w', encoding='utf-8') as f:
|
106 |
+
for namespace_str in self._non_padded_namespaces:
|
107 |
+
f.write('{}\n'.format(namespace_str))
|
108 |
+
|
109 |
+
for namespace, token_to_index in self._token_to_index.items():
|
110 |
+
filename = os.path.join(directory, '{}.txt'.format(namespace))
|
111 |
+
with open(filename, 'w', encoding='utf-8') as f:
|
112 |
+
for token, _ in token_to_index.items():
|
113 |
+
f.write('{}\n'.format(token))
|
114 |
+
|
115 |
+
@classmethod
|
116 |
+
def from_files(cls, directory: str) -> 'Vocabulary':
|
117 |
+
with open(os.path.join(directory, NAMESPACE_PADDING_FILE), 'r', encoding='utf-8') as f:
|
118 |
+
non_padded_namespaces = [namespace_str.strip() for namespace_str in f]
|
119 |
+
|
120 |
+
vocab = cls(non_padded_namespaces=non_padded_namespaces)
|
121 |
+
|
122 |
+
for namespace_filename in os.listdir(directory):
|
123 |
+
if namespace_filename == NAMESPACE_PADDING_FILE:
|
124 |
+
continue
|
125 |
+
if namespace_filename.startswith("."):
|
126 |
+
continue
|
127 |
+
namespace = namespace_filename.replace('.txt', '')
|
128 |
+
if any(namespace_match(pattern, namespace) for pattern in non_padded_namespaces):
|
129 |
+
is_padded = False
|
130 |
+
else:
|
131 |
+
is_padded = True
|
132 |
+
filename = os.path.join(directory, namespace_filename)
|
133 |
+
vocab.set_from_file(filename, is_padded, namespace=namespace)
|
134 |
+
|
135 |
+
return vocab
|
136 |
+
|
137 |
+
def set_from_file(self,
|
138 |
+
filename: str,
|
139 |
+
is_padded: bool = True,
|
140 |
+
oov_token: str = DEFAULT_OOV_TOKEN,
|
141 |
+
namespace: str = "tokens"
|
142 |
+
):
|
143 |
+
if is_padded:
|
144 |
+
self._token_to_index[namespace] = {self._padding_token: 0}
|
145 |
+
self._index_to_token[namespace] = {0: self._padding_token}
|
146 |
+
else:
|
147 |
+
self._token_to_index[namespace] = {}
|
148 |
+
self._index_to_token[namespace] = {}
|
149 |
+
|
150 |
+
with open(filename, 'r', encoding='utf-8') as f:
|
151 |
+
index = 1 if is_padded else 0
|
152 |
+
for row in f:
|
153 |
+
token = str(row).strip()
|
154 |
+
if token == oov_token:
|
155 |
+
token = self._oov_token
|
156 |
+
self._token_to_index[namespace][token] = index
|
157 |
+
self._index_to_token[namespace][index] = token
|
158 |
+
index += 1
|
159 |
+
|
160 |
+
def convert_tokens_to_ids(self, tokens: List[str], namespace: str = "tokens"):
|
161 |
+
result = list()
|
162 |
+
for token in tokens:
|
163 |
+
idx = self._token_to_index[namespace].get(token)
|
164 |
+
if idx is None:
|
165 |
+
idx = self._token_to_index[namespace][self._oov_token]
|
166 |
+
result.append(idx)
|
167 |
+
return result
|
168 |
+
|
169 |
+
def convert_ids_to_tokens(self, ids: List[int], namespace: str = "tokens"):
|
170 |
+
result = list()
|
171 |
+
for idx in ids:
|
172 |
+
idx = self._index_to_token[namespace][idx]
|
173 |
+
result.append(idx)
|
174 |
+
return result
|
175 |
+
|
176 |
+
def pad_or_truncate_ids_by_max_length(self, ids: List[int], max_length: int, namespace: str = "tokens"):
|
177 |
+
pad_idx = self._token_to_index[namespace][self._padding_token]
|
178 |
+
|
179 |
+
length = len(ids)
|
180 |
+
if length > max_length:
|
181 |
+
result = ids[:max_length]
|
182 |
+
else:
|
183 |
+
result = ids + [pad_idx] * (max_length - length)
|
184 |
+
return result
|
185 |
+
|
186 |
+
|
187 |
+
def demo1():
|
188 |
+
import jieba
|
189 |
+
|
190 |
+
vocabulary = Vocabulary()
|
191 |
+
vocabulary.add_token_to_namespace('白天', 'tokens')
|
192 |
+
vocabulary.add_token_to_namespace('晚上', 'tokens')
|
193 |
+
|
194 |
+
text = '不是在白天, 就是在晚上'
|
195 |
+
tokens = jieba.lcut(text)
|
196 |
+
|
197 |
+
print(tokens)
|
198 |
+
|
199 |
+
ids = vocabulary.convert_tokens_to_ids(tokens)
|
200 |
+
print(ids)
|
201 |
+
|
202 |
+
padded_idx = vocabulary.pad_or_truncate_ids_by_max_length(ids, 10)
|
203 |
+
print(padded_idx)
|
204 |
+
|
205 |
+
tokens = vocabulary.convert_ids_to_tokens(padded_idx)
|
206 |
+
print(tokens)
|
207 |
+
return
|
208 |
+
|
209 |
+
|
210 |
+
if __name__ == '__main__':
|
211 |
+
demo1()
|