Datasets:
License:
File size: 4,006 Bytes
a158d14 7661f93 a158d14 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
from collections import Counter, defaultdict
from glob import glob
from itertools import chain
import json
import os
from pathlib import Path
import sys
pwd = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(pwd, '../../'))
import numpy as np
from scipy.io import wavfile
import torch
import torch.nn as nn
import shutil
from tqdm import tqdm
from project_settings import project_path
from toolbox.cv2.misc import show_image
from toolbox.python_speech_features.misc import wave2spectrum_image
area_code = 234
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir",
default=(project_path / "trained_models/early_media_20220721").as_posix(),
type=str
)
parser.add_argument(
"--wav_dir",
default=(project_path / "data/early_media/{area_code}/wav".format(area_code=area_code)).as_posix(),
type=str
)
args = parser.parse_args()
return args
def demo1():
args = get_args()
model_dir = Path(args.model_dir)
wav_dir = Path(args.wav_dir)
# model
seq2seq_encoder = torch.jit.load(model_dir / "seq2seq_encoder.pth")
seq2vec_encoder = torch.jit.load(model_dir / "seq2vec_encoder.pth")
classification_layer = torch.jit.load(model_dir / "classification_layer.pth")
with open(model_dir / "index2token.json", "r", encoding="utf-8") as f:
index2token = json.load(f)
# 读取文件
for filename in tqdm(wav_dir.glob("*.wav")):
filename: Path = filename
# path, fn = os.path.split(filename)
try:
sample_rate, wave = wavfile.read(filename)
except UnboundLocalError:
os.remove(filename)
continue
if sample_rate != 8000:
raise AssertionError
if len(wave) < 1.0 * sample_rate:
os.remove(filename.as_posix())
continue
max_wave_value = 32768.0
wave = wave / max_wave_value
array = wave2spectrum_image(
wave,
sample_rate=8000,
xmax=10,
xmin=-50,
winlen=0.025,
winstep=0.01,
nfft=512,
n_low_freq=100,
)
# show_image(array.T)
array = np.array([array], dtype=np.float32)
array = torch.tensor(array, dtype=torch.float32)
mask: torch.IntTensor = torch.ones(size=array.shape[:-1], device=array.device, dtype=torch.int32)
array = seq2seq_encoder.forward(array, mask)
length = array.shape[-2]
m_win_size = 50
m_win_step = 25
labels = list()
idx = 0
while True:
begin = idx * m_win_step
end = begin + m_win_size
if end > length:
break
window = array[:, begin:end, :]
window = seq2vec_encoder.forward(window)
logits = classification_layer(window)
probs = torch.nn.functional.softmax(logits, dim=-1)
label_idx = probs.argmax(dim=-1).item()
label_str = index2token[str(label_idx)]
labels.append(label_str)
idx += 1
counter = Counter(labels)
total = sum(counter.values())
rate_dict = defaultdict(float)
for k, v in counter.items():
rate_dict[k] = v / total
if rate_dict["voice"] > 0.1:
tgt = filename.parent / "voice"
elif rate_dict["music"] > 0.1:
tgt = filename.parent / "music"
elif rate_dict["bell"] > 0.1:
tgt = filename.parent / "bell"
else:
tgt = filename.parent / "mute"
tgt.mkdir(exist_ok=True)
try:
shutil.move(filename.as_posix(), tgt.as_posix())
except shutil.Error:
fn = tgt / "{}_2.wav".format(filename.stem)
shutil.move(filename.as_posix(), fn)
return
if __name__ == '__main__':
demo1()
|