Spaces:
Running
Running
Katock
commited on
Commit
·
0382342
1
Parent(s):
f85ad87
debug
Browse files- models/carl/cover.png +0 -0
- models/ryder/cover.png +0 -0
- models/sweet/cover.png +0 -0
- utils.py +0 -33
- vencoder/CNHubertLarge.py +33 -0
- vencoder/ContentVec256L12_Onnx.py +28 -0
- vencoder/ContentVec256L9.py +35 -0
- vencoder/ContentVec256L9_Onnx.py +28 -0
- vencoder/ContentVec768L12.py +34 -0
- vencoder/ContentVec768L12_Onnx.py +28 -0
- vencoder/ContentVec768L9_Onnx.py +28 -0
- vencoder/DPHubert.py +26 -0
- vencoder/HubertSoft.py +24 -0
- vencoder/HubertSoft_Onnx.py +28 -0
- vencoder/WhisperPPG.py +30 -0
- vencoder/WhisperPPGLarge.py +30 -0
- vencoder/__init__.py +0 -0
- vencoder/dphubert/__init__.py +0 -0
- vencoder/dphubert/components.py +1410 -0
- vencoder/dphubert/hardconcrete.py +122 -0
- vencoder/dphubert/model.py +966 -0
- vencoder/dphubert/pruning_utils.py +51 -0
- vencoder/dphubert/utils/__init__.py +0 -0
- vencoder/dphubert/utils/import_huggingface_wavlm.py +129 -0
- vencoder/encoder.py +12 -0
- vencoder/hubert/__init__.py +0 -0
- vencoder/hubert/hubert_model.py +222 -0
- vencoder/hubert/hubert_model_onnx.py +217 -0
- vencoder/whisper/__init__.py +0 -0
- vencoder/whisper/audio.py +125 -0
- vencoder/whisper/decoding.py +712 -0
- vencoder/whisper/model.py +269 -0
- vencoder/whisper/tokenizer.py +331 -0
- vencoder/whisper/utils.py +163 -0
models/carl/cover.png
ADDED
![]() |
models/ryder/cover.png
ADDED
![]() |
models/sweet/cover.png
ADDED
![]() |
utils.py
CHANGED
@@ -15,7 +15,6 @@ from scipy.io.wavfile import read
|
|
15 |
import torch
|
16 |
from torch.nn import functional as F
|
17 |
from modules.commons import sequence_mask
|
18 |
-
import faiss
|
19 |
import tqdm
|
20 |
|
21 |
MATPLOTLIB_FLAG = False
|
@@ -428,38 +427,6 @@ def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出
|
|
428 |
)
|
429 |
return data2
|
430 |
|
431 |
-
def train_index(spk_name,root_dir = "dataset/44k/"): #from: RVC https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI
|
432 |
-
print("The feature index is constructing.")
|
433 |
-
exp_dir = os.path.join(root_dir,spk_name)
|
434 |
-
listdir_res = []
|
435 |
-
for file in os.listdir(exp_dir):
|
436 |
-
if ".wav.soft.pt" in file:
|
437 |
-
listdir_res.append(os.path.join(exp_dir,file))
|
438 |
-
if len(listdir_res) == 0:
|
439 |
-
raise Exception("You need to run preprocess_hubert_f0.py!")
|
440 |
-
npys = []
|
441 |
-
for name in sorted(listdir_res):
|
442 |
-
phone = torch.load(name)[0].transpose(-1,-2).numpy()
|
443 |
-
npys.append(phone)
|
444 |
-
big_npy = np.concatenate(npys, 0)
|
445 |
-
big_npy_idx = np.arange(big_npy.shape[0])
|
446 |
-
np.random.shuffle(big_npy_idx)
|
447 |
-
big_npy = big_npy[big_npy_idx]
|
448 |
-
n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39)
|
449 |
-
index = faiss.index_factory(big_npy.shape[1] , "IVF%s,Flat" % n_ivf)
|
450 |
-
index_ivf = faiss.extract_index_ivf(index) #
|
451 |
-
index_ivf.nprobe = 1
|
452 |
-
index.train(big_npy)
|
453 |
-
batch_size_add = 8192
|
454 |
-
for i in range(0, big_npy.shape[0], batch_size_add):
|
455 |
-
index.add(big_npy[i : i + batch_size_add])
|
456 |
-
# faiss.write_index(
|
457 |
-
# index,
|
458 |
-
# f"added_{spk_name}.index"
|
459 |
-
# )
|
460 |
-
print("Successfully build index")
|
461 |
-
return index
|
462 |
-
|
463 |
|
464 |
class HParams():
|
465 |
def __init__(self, **kwargs):
|
|
|
15 |
import torch
|
16 |
from torch.nn import functional as F
|
17 |
from modules.commons import sequence_mask
|
|
|
18 |
import tqdm
|
19 |
|
20 |
MATPLOTLIB_FLAG = False
|
|
|
427 |
)
|
428 |
return data2
|
429 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
430 |
|
431 |
class HParams():
|
432 |
def __init__(self, **kwargs):
|
vencoder/CNHubertLarge.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from vencoder.encoder import SpeechEncoder
|
2 |
+
import torch
|
3 |
+
from fairseq import checkpoint_utils
|
4 |
+
|
5 |
+
class CNHubertLarge(SpeechEncoder):
|
6 |
+
def __init__(self,vec_path = "pretrain/chinese-hubert-large-fairseq-ckpt.pt",device=None):
|
7 |
+
print("load model(s) from {}".format(vec_path))
|
8 |
+
self.hidden_dim = 1024
|
9 |
+
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
|
10 |
+
[vec_path],
|
11 |
+
suffix="",
|
12 |
+
)
|
13 |
+
if device is None:
|
14 |
+
self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
15 |
+
else:
|
16 |
+
self.dev = torch.device(device)
|
17 |
+
self.model = models[0].to(self.dev)
|
18 |
+
self.model.eval()
|
19 |
+
|
20 |
+
def encoder(self, wav):
|
21 |
+
feats = wav
|
22 |
+
if feats.dim() == 2: # double channels
|
23 |
+
feats = feats.mean(-1)
|
24 |
+
assert feats.dim() == 1, feats.dim()
|
25 |
+
feats = feats.view(1, -1)
|
26 |
+
padding_mask = torch.BoolTensor(feats.shape).fill_(False)
|
27 |
+
inputs = {
|
28 |
+
"source": feats.to(wav.device),
|
29 |
+
"padding_mask": padding_mask.to(wav.device)
|
30 |
+
}
|
31 |
+
with torch.no_grad():
|
32 |
+
logits = self.model.extract_features(**inputs)
|
33 |
+
return logits[0].transpose(1, 2)
|
vencoder/ContentVec256L12_Onnx.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from vencoder.encoder import SpeechEncoder
|
2 |
+
import onnxruntime
|
3 |
+
import torch
|
4 |
+
|
5 |
+
class ContentVec256L12_Onnx(SpeechEncoder):
|
6 |
+
def __init__(self,vec_path = "pretrain/vec-256-layer-12.onnx",device=None):
|
7 |
+
print("load model(s) from {}".format(vec_path))
|
8 |
+
self.hidden_dim = 256
|
9 |
+
if device is None:
|
10 |
+
self.dev = torch.device("cpu")
|
11 |
+
else:
|
12 |
+
self.dev = torch.device(device)
|
13 |
+
if device == 'cpu' or device == torch.device("cpu") or device is None:
|
14 |
+
providers = ['CPUExecutionProvider']
|
15 |
+
elif device == 'cuda' or device == torch.device("cuda"):
|
16 |
+
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
|
17 |
+
self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
|
18 |
+
|
19 |
+
def encoder(self, wav):
|
20 |
+
feats = wav
|
21 |
+
if feats.dim() == 2: # double channels
|
22 |
+
feats = feats.mean(-1)
|
23 |
+
assert feats.dim() == 1, feats.dim()
|
24 |
+
feats = feats.view(1, -1)
|
25 |
+
feats = feats.unsqueeze(0).cpu().detach().numpy()
|
26 |
+
onnx_input = {self.model.get_inputs()[0].name: feats}
|
27 |
+
logits = self.model.run(None, onnx_input)
|
28 |
+
return torch.tensor(logits[0]).transpose(1, 2).to(self.dev)
|
vencoder/ContentVec256L9.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from vencoder.encoder import SpeechEncoder
|
2 |
+
import torch
|
3 |
+
from fairseq import checkpoint_utils
|
4 |
+
|
5 |
+
class ContentVec256L9(SpeechEncoder):
|
6 |
+
def __init__(self,vec_path = "pretrain/checkpoint_best_legacy_500.pt",device=None):
|
7 |
+
print("load model(s) from {}".format(vec_path))
|
8 |
+
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
|
9 |
+
[vec_path],
|
10 |
+
suffix="",
|
11 |
+
)
|
12 |
+
self.hidden_dim = 256
|
13 |
+
if device is None:
|
14 |
+
self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
15 |
+
else:
|
16 |
+
self.dev = torch.device(device)
|
17 |
+
self.model = models[0].to(self.dev)
|
18 |
+
self.model.eval()
|
19 |
+
|
20 |
+
def encoder(self, wav):
|
21 |
+
feats = wav
|
22 |
+
if feats.dim() == 2: # double channels
|
23 |
+
feats = feats.mean(-1)
|
24 |
+
assert feats.dim() == 1, feats.dim()
|
25 |
+
feats = feats.view(1, -1)
|
26 |
+
padding_mask = torch.BoolTensor(feats.shape).fill_(False)
|
27 |
+
inputs = {
|
28 |
+
"source": feats.to(wav.device),
|
29 |
+
"padding_mask": padding_mask.to(wav.device),
|
30 |
+
"output_layer": 9, # layer 9
|
31 |
+
}
|
32 |
+
with torch.no_grad():
|
33 |
+
logits = self.model.extract_features(**inputs)
|
34 |
+
feats = self.model.final_proj(logits[0])
|
35 |
+
return feats.transpose(1, 2)
|
vencoder/ContentVec256L9_Onnx.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from vencoder.encoder import SpeechEncoder
|
2 |
+
import onnxruntime
|
3 |
+
import torch
|
4 |
+
|
5 |
+
class ContentVec256L9_Onnx(SpeechEncoder):
|
6 |
+
def __init__(self,vec_path = "pretrain/vec-256-layer-9.onnx",device=None):
|
7 |
+
print("load model(s) from {}".format(vec_path))
|
8 |
+
self.hidden_dim = 256
|
9 |
+
if device is None:
|
10 |
+
self.dev = torch.device("cpu")
|
11 |
+
else:
|
12 |
+
self.dev = torch.device(device)
|
13 |
+
if device == 'cpu' or device == torch.device("cpu") or device is None:
|
14 |
+
providers = ['CPUExecutionProvider']
|
15 |
+
elif device == 'cuda' or device == torch.device("cuda"):
|
16 |
+
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
|
17 |
+
self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
|
18 |
+
|
19 |
+
def encoder(self, wav):
|
20 |
+
feats = wav
|
21 |
+
if feats.dim() == 2: # double channels
|
22 |
+
feats = feats.mean(-1)
|
23 |
+
assert feats.dim() == 1, feats.dim()
|
24 |
+
feats = feats.view(1, -1)
|
25 |
+
feats = feats.unsqueeze(0).cpu().detach().numpy()
|
26 |
+
onnx_input = {self.model.get_inputs()[0].name: feats}
|
27 |
+
logits = self.model.run(None, onnx_input)
|
28 |
+
return torch.tensor(logits[0]).transpose(1, 2).to(self.dev)
|
vencoder/ContentVec768L12.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from vencoder.encoder import SpeechEncoder
|
2 |
+
import torch
|
3 |
+
from fairseq import checkpoint_utils
|
4 |
+
|
5 |
+
class ContentVec768L12(SpeechEncoder):
|
6 |
+
def __init__(self,vec_path = "pretrain/checkpoint_best_legacy_500.pt",device=None):
|
7 |
+
print("load model(s) from {}".format(vec_path))
|
8 |
+
self.hidden_dim = 768
|
9 |
+
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
|
10 |
+
[vec_path],
|
11 |
+
suffix="",
|
12 |
+
)
|
13 |
+
if device is None:
|
14 |
+
self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
15 |
+
else:
|
16 |
+
self.dev = torch.device(device)
|
17 |
+
self.model = models[0].to(self.dev)
|
18 |
+
self.model.eval()
|
19 |
+
|
20 |
+
def encoder(self, wav):
|
21 |
+
feats = wav
|
22 |
+
if feats.dim() == 2: # double channels
|
23 |
+
feats = feats.mean(-1)
|
24 |
+
assert feats.dim() == 1, feats.dim()
|
25 |
+
feats = feats.view(1, -1)
|
26 |
+
padding_mask = torch.BoolTensor(feats.shape).fill_(False)
|
27 |
+
inputs = {
|
28 |
+
"source": feats.to(wav.device),
|
29 |
+
"padding_mask": padding_mask.to(wav.device),
|
30 |
+
"output_layer": 12, # layer 12
|
31 |
+
}
|
32 |
+
with torch.no_grad():
|
33 |
+
logits = self.model.extract_features(**inputs)
|
34 |
+
return logits[0].transpose(1, 2)
|
vencoder/ContentVec768L12_Onnx.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from vencoder.encoder import SpeechEncoder
|
2 |
+
import onnxruntime
|
3 |
+
import torch
|
4 |
+
|
5 |
+
class ContentVec768L12_Onnx(SpeechEncoder):
|
6 |
+
def __init__(self,vec_path = "pretrain/vec-768-layer-12.onnx",device=None):
|
7 |
+
print("load model(s) from {}".format(vec_path))
|
8 |
+
self.hidden_dim = 768
|
9 |
+
if device is None:
|
10 |
+
self.dev = torch.device("cpu")
|
11 |
+
else:
|
12 |
+
self.dev = torch.device(device)
|
13 |
+
if device == 'cpu' or device == torch.device("cpu") or device is None:
|
14 |
+
providers = ['CPUExecutionProvider']
|
15 |
+
elif device == 'cuda' or device == torch.device("cuda"):
|
16 |
+
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
|
17 |
+
self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
|
18 |
+
|
19 |
+
def encoder(self, wav):
|
20 |
+
feats = wav
|
21 |
+
if feats.dim() == 2: # double channels
|
22 |
+
feats = feats.mean(-1)
|
23 |
+
assert feats.dim() == 1, feats.dim()
|
24 |
+
feats = feats.view(1, -1)
|
25 |
+
feats = feats.unsqueeze(0).cpu().detach().numpy()
|
26 |
+
onnx_input = {self.model.get_inputs()[0].name: feats}
|
27 |
+
logits = self.model.run(None, onnx_input)
|
28 |
+
return torch.tensor(logits[0]).transpose(1, 2).to(self.dev)
|
vencoder/ContentVec768L9_Onnx.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from vencoder.encoder import SpeechEncoder
|
2 |
+
import onnxruntime
|
3 |
+
import torch
|
4 |
+
|
5 |
+
class ContentVec768L9_Onnx(SpeechEncoder):
|
6 |
+
def __init__(self,vec_path = "pretrain/vec-768-layer-9.onnx",device=None):
|
7 |
+
print("load model(s) from {}".format(vec_path))
|
8 |
+
self.hidden_dim = 768
|
9 |
+
if device is None:
|
10 |
+
self.dev = torch.device("cpu")
|
11 |
+
else:
|
12 |
+
self.dev = torch.device(device)
|
13 |
+
if device == 'cpu' or device == torch.device("cpu") or device is None:
|
14 |
+
providers = ['CPUExecutionProvider']
|
15 |
+
elif device == 'cuda' or device == torch.device("cuda"):
|
16 |
+
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
|
17 |
+
self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
|
18 |
+
|
19 |
+
def encoder(self, wav):
|
20 |
+
feats = wav
|
21 |
+
if feats.dim() == 2: # double channels
|
22 |
+
feats = feats.mean(-1)
|
23 |
+
assert feats.dim() == 1, feats.dim()
|
24 |
+
feats = feats.view(1, -1)
|
25 |
+
feats = feats.unsqueeze(0).cpu().detach().numpy()
|
26 |
+
onnx_input = {self.model.get_inputs()[0].name: feats}
|
27 |
+
logits = self.model.run(None, onnx_input)
|
28 |
+
return torch.tensor(logits[0]).transpose(1, 2).to(self.dev)
|
vencoder/DPHubert.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from vencoder.encoder import SpeechEncoder
|
2 |
+
import torch
|
3 |
+
from vencoder.dphubert.model import wav2vec2_model
|
4 |
+
|
5 |
+
class DPHubert(SpeechEncoder):
|
6 |
+
def __init__(self,vec_path = "pretrain/DPHuBERT-sp0.75.pth",device=None):
|
7 |
+
print("load model(s) from {}".format(vec_path))
|
8 |
+
if device is None:
|
9 |
+
self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
10 |
+
else:
|
11 |
+
self.dev = torch.device(device)
|
12 |
+
ckpt = torch.load(vec_path)
|
13 |
+
self.hidden_dim = 768
|
14 |
+
self.model = wav2vec2_model(**ckpt["config"]).to(self.dev)
|
15 |
+
self.model.load_state_dict(ckpt["state_dict"], strict=False)
|
16 |
+
|
17 |
+
def encoder(self, wav):
|
18 |
+
feats = wav
|
19 |
+
if feats.dim() == 2: # double channels
|
20 |
+
feats = feats.mean(-1)
|
21 |
+
assert feats.dim() == 1, feats.dim()
|
22 |
+
feats = feats[None,:]
|
23 |
+
with torch.no_grad():
|
24 |
+
with torch.inference_mode():
|
25 |
+
units = self.model(feats)[0]
|
26 |
+
return units.transpose(1,2)
|
vencoder/HubertSoft.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from vencoder.encoder import SpeechEncoder
|
2 |
+
import torch
|
3 |
+
from vencoder.hubert import hubert_model
|
4 |
+
class HubertSoft(SpeechEncoder):
|
5 |
+
def __init__(self,vec_path = "pretrain/hubert-soft-0d54a1f4.pt",device=None):
|
6 |
+
print("load model(s) from {}".format(vec_path))
|
7 |
+
hubert_soft = hubert_model.hubert_soft(vec_path)
|
8 |
+
if device is None:
|
9 |
+
self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
10 |
+
else:
|
11 |
+
self.dev = torch.device(device)
|
12 |
+
self.hidden_dim = 256
|
13 |
+
self.model = hubert_soft.to(self.dev)
|
14 |
+
|
15 |
+
def encoder(self, wav):
|
16 |
+
feats = wav
|
17 |
+
if feats.dim() == 2: # double channels
|
18 |
+
feats = feats.mean(-1)
|
19 |
+
assert feats.dim() == 1, feats.dim()
|
20 |
+
feats = feats[None,None,:]
|
21 |
+
with torch.no_grad():
|
22 |
+
with torch.inference_mode():
|
23 |
+
units = self.model.units(feats)
|
24 |
+
return units.transpose(1,2)
|
vencoder/HubertSoft_Onnx.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from vencoder.encoder import SpeechEncoder
|
2 |
+
import onnxruntime
|
3 |
+
import torch
|
4 |
+
|
5 |
+
class HubertSoft_Onnx(SpeechEncoder):
|
6 |
+
def __init__(self,vec_path = "pretrain/hubert-soft.onnx",device=None):
|
7 |
+
print("load model(s) from {}".format(vec_path))
|
8 |
+
self.hidden_dim = 256
|
9 |
+
if device is None:
|
10 |
+
self.dev = torch.device("cpu")
|
11 |
+
else:
|
12 |
+
self.dev = torch.device(device)
|
13 |
+
if device == 'cpu' or device == torch.device("cpu") or device is None:
|
14 |
+
providers = ['CPUExecutionProvider']
|
15 |
+
elif device == 'cuda' or device == torch.device("cuda"):
|
16 |
+
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
|
17 |
+
self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
|
18 |
+
|
19 |
+
def encoder(self, wav):
|
20 |
+
feats = wav
|
21 |
+
if feats.dim() == 2: # double channels
|
22 |
+
feats = feats.mean(-1)
|
23 |
+
assert feats.dim() == 1, feats.dim()
|
24 |
+
feats = feats.view(1, -1)
|
25 |
+
feats = feats.unsqueeze(0).cpu().detach().numpy()
|
26 |
+
onnx_input = {self.model.get_inputs()[0].name: feats}
|
27 |
+
logits = self.model.run(None, onnx_input)
|
28 |
+
return torch.tensor(logits[0]).transpose(1, 2).to(self.dev)
|
vencoder/WhisperPPG.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from vencoder.encoder import SpeechEncoder
|
2 |
+
import torch
|
3 |
+
|
4 |
+
from vencoder.whisper.model import Whisper, ModelDimensions
|
5 |
+
from vencoder.whisper.audio import pad_or_trim, log_mel_spectrogram
|
6 |
+
|
7 |
+
|
8 |
+
class WhisperPPG(SpeechEncoder):
|
9 |
+
def __init__(self,vec_path = "pretrain/medium.pt",device=None):
|
10 |
+
if device is None:
|
11 |
+
self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
12 |
+
else:
|
13 |
+
self.dev = torch.device(device)
|
14 |
+
checkpoint = torch.load(vec_path, map_location=device)
|
15 |
+
dims = ModelDimensions(**checkpoint["dims"])
|
16 |
+
model = Whisper(dims)
|
17 |
+
model.load_state_dict(checkpoint["model_state_dict"])
|
18 |
+
self.hidden_dim = dims
|
19 |
+
self.model = model.to(self.dev)
|
20 |
+
|
21 |
+
def encoder(self, wav):
|
22 |
+
audio = wav
|
23 |
+
audln = audio.shape[0]
|
24 |
+
ppgln = audln // 320
|
25 |
+
audio = pad_or_trim(audio)
|
26 |
+
mel = log_mel_spectrogram(audio).to(self.dev)
|
27 |
+
with torch.no_grad():
|
28 |
+
ppg = self.model.encoder(mel.unsqueeze(0)).squeeze().data.cpu().float().numpy()
|
29 |
+
ppg = torch.FloatTensor(ppg[:ppgln,]).to(self.dev)
|
30 |
+
return ppg[None,:,:].transpose(1, 2)
|
vencoder/WhisperPPGLarge.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from vencoder.encoder import SpeechEncoder
|
2 |
+
import torch
|
3 |
+
|
4 |
+
from vencoder.whisper.model import Whisper, ModelDimensions
|
5 |
+
from vencoder.whisper.audio import pad_or_trim, log_mel_spectrogram
|
6 |
+
|
7 |
+
|
8 |
+
class WhisperPPGLarge(SpeechEncoder):
|
9 |
+
def __init__(self,vec_path = "pretrain/large-v2.pt",device=None):
|
10 |
+
if device is None:
|
11 |
+
self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
12 |
+
else:
|
13 |
+
self.dev = torch.device(device)
|
14 |
+
checkpoint = torch.load(vec_path, map_location=device)
|
15 |
+
dims = ModelDimensions(**checkpoint["dims"])
|
16 |
+
model = Whisper(dims)
|
17 |
+
model.load_state_dict(checkpoint["model_state_dict"])
|
18 |
+
self.hidden_dim = dims
|
19 |
+
self.model = model.to(self.dev)
|
20 |
+
|
21 |
+
def encoder(self, wav):
|
22 |
+
audio = wav
|
23 |
+
audln = audio.shape[0]
|
24 |
+
ppgln = audln // 320
|
25 |
+
audio = pad_or_trim(audio)
|
26 |
+
mel = log_mel_spectrogram(audio).to(self.dev)
|
27 |
+
with torch.no_grad():
|
28 |
+
ppg = self.model.encoder(mel.unsqueeze(0)).squeeze().data.cpu().float().numpy()
|
29 |
+
ppg = torch.FloatTensor(ppg[:ppgln,]).to(self.dev)
|
30 |
+
return ppg[None,:,:].transpose(1, 2)
|
vencoder/__init__.py
ADDED
File without changes
|
vencoder/dphubert/__init__.py
ADDED
File without changes
|
vencoder/dphubert/components.py
ADDED
@@ -0,0 +1,1410 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Building blocks for speech SSL models supporting pruning.
|
2 |
+
|
3 |
+
Originally from:
|
4 |
+
https://github.com/pytorch/audio/blob/main/torchaudio/models/wav2vec2/components.py
|
5 |
+
|
6 |
+
"""
|
7 |
+
|
8 |
+
from collections import defaultdict
|
9 |
+
from typing import List, Optional, Tuple
|
10 |
+
import math
|
11 |
+
|
12 |
+
import torch
|
13 |
+
from torch import nn, Tensor
|
14 |
+
from torch.nn import Module, Parameter
|
15 |
+
|
16 |
+
from .hardconcrete import HardConcrete
|
17 |
+
from .pruning_utils import (
|
18 |
+
prune_linear_layer,
|
19 |
+
prune_conv1d_layer,
|
20 |
+
prune_layer_norm,
|
21 |
+
)
|
22 |
+
|
23 |
+
|
24 |
+
def _init_transformer_params(module):
|
25 |
+
"""
|
26 |
+
Initialize the weights of Transformer module in Wav2Vec2/HuBERT.
|
27 |
+
|
28 |
+
If the module is ``nn.Linear``, normalize the weight with mean 0 and standard deviation 0.02.
|
29 |
+
If ``bias`` is set to ``True`` in the module, set ``bias`` to 0.
|
30 |
+
|
31 |
+
If the module is ``nn.Embedding``, normalize the weight with mean 0 and standard deviation 0.02.
|
32 |
+
If ``padding_idx`` is not None, set the weight of padding to 0.
|
33 |
+
|
34 |
+
Note:
|
35 |
+
Ths method corresponds to
|
36 |
+
`init_bert_params
|
37 |
+
<https://github.com/facebookresearch/fairseq/blob/main/fairseq/modules/transformer_sentence_encoder.py#L21>`__
|
38 |
+
in the original ``fairseq`` implementation.
|
39 |
+
"""
|
40 |
+
|
41 |
+
def normal_(data):
|
42 |
+
data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device))
|
43 |
+
|
44 |
+
if isinstance(module, nn.Linear):
|
45 |
+
normal_(module.weight.data)
|
46 |
+
if module.bias is not None:
|
47 |
+
module.bias.data.zero_()
|
48 |
+
if isinstance(module, nn.Embedding):
|
49 |
+
normal_(module.weight.data)
|
50 |
+
if module.padding_idx is not None:
|
51 |
+
module.weight.data[module.padding_idx].zero_()
|
52 |
+
|
53 |
+
|
54 |
+
class LayerNorm(nn.LayerNorm):
|
55 |
+
"""Layer norm with transpose"""
|
56 |
+
|
57 |
+
def forward(self, input: Tensor) -> Tensor:
|
58 |
+
x = input.transpose(-2, -1)
|
59 |
+
x = nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
|
60 |
+
x = x.transpose(-2, -1)
|
61 |
+
return x
|
62 |
+
|
63 |
+
|
64 |
+
class ConvLayerBlock(Module):
|
65 |
+
"""Convolution unit of FeatureExtractor"""
|
66 |
+
|
67 |
+
def __init__(
|
68 |
+
self,
|
69 |
+
in_channels: int,
|
70 |
+
out_channels: int,
|
71 |
+
kernel_size: int,
|
72 |
+
stride: int,
|
73 |
+
bias: bool,
|
74 |
+
layer_norm: Optional[Module],
|
75 |
+
prune_conv_channels: bool = False,
|
76 |
+
):
|
77 |
+
super().__init__()
|
78 |
+
self.kernel_size = kernel_size
|
79 |
+
self.stride = stride
|
80 |
+
self.layer_norm = layer_norm
|
81 |
+
self.conv = nn.Conv1d(
|
82 |
+
in_channels=in_channels,
|
83 |
+
out_channels=out_channels,
|
84 |
+
kernel_size=kernel_size,
|
85 |
+
stride=stride,
|
86 |
+
bias=bias,
|
87 |
+
)
|
88 |
+
|
89 |
+
if prune_conv_channels:
|
90 |
+
self.hard_concrete = HardConcrete(n_in=out_channels, init_mean=0.01)
|
91 |
+
else:
|
92 |
+
self.hard_concrete = None
|
93 |
+
|
94 |
+
def forward(
|
95 |
+
self,
|
96 |
+
x: Tensor,
|
97 |
+
length: Optional[Tensor],
|
98 |
+
) -> Tuple[Tensor, Optional[Tensor]]:
|
99 |
+
"""
|
100 |
+
Args:
|
101 |
+
x (Tensor): Shape: ``[batch, in_channels, in_frame]``.
|
102 |
+
length (Tensor or None, optional): Shape ``[batch, ]``.
|
103 |
+
Returns:
|
104 |
+
Tensor: Shape ``[batch, out_channels, out_frames]``.
|
105 |
+
Optional[Tensor]: Shape ``[batch, ]``.
|
106 |
+
"""
|
107 |
+
x = self.conv(x)
|
108 |
+
if self.layer_norm is not None:
|
109 |
+
x = self.layer_norm(x)
|
110 |
+
x = nn.functional.gelu(x)
|
111 |
+
|
112 |
+
if self.hard_concrete is not None:
|
113 |
+
channel_mask = self.hard_concrete() # hard concrete mask, (out_channels,)
|
114 |
+
x = x * channel_mask.unsqueeze(-1)
|
115 |
+
|
116 |
+
if length is not None:
|
117 |
+
length = torch.div(length - self.kernel_size, self.stride, rounding_mode="floor") + 1
|
118 |
+
# When input length is 0, the resulting length can be negative. So fix it here.
|
119 |
+
length = torch.max(torch.zeros_like(length), length)
|
120 |
+
return x, length
|
121 |
+
|
122 |
+
def get_num_params_and_out_channels(self, in_channels):
|
123 |
+
if self.hard_concrete is not None:
|
124 |
+
out_channels = self.hard_concrete.l0_norm()
|
125 |
+
else:
|
126 |
+
out_channels = self.conv.out_channels
|
127 |
+
|
128 |
+
num_params = in_channels * out_channels * self.kernel_size
|
129 |
+
if self.conv.bias is not None:
|
130 |
+
num_params += out_channels
|
131 |
+
if self.layer_norm is not None:
|
132 |
+
num_params += out_channels * 2
|
133 |
+
|
134 |
+
return num_params, out_channels
|
135 |
+
|
136 |
+
|
137 |
+
class FeatureExtractor(Module):
|
138 |
+
"""Extract features from audio
|
139 |
+
|
140 |
+
Args:
|
141 |
+
conv_layers (nn.ModuleList):
|
142 |
+
convolution layers
|
143 |
+
"""
|
144 |
+
|
145 |
+
def __init__(
|
146 |
+
self,
|
147 |
+
conv_layers: nn.ModuleList,
|
148 |
+
):
|
149 |
+
super().__init__()
|
150 |
+
self.conv_layers = conv_layers
|
151 |
+
|
152 |
+
# NOTE: a dummy weight used to save the soft mask of the last conv layer
|
153 |
+
self.dummy_weight = nn.Parameter(
|
154 |
+
torch.ones(conv_layers[-1].conv.out_channels, dtype=torch.float32),
|
155 |
+
requires_grad=False
|
156 |
+
)
|
157 |
+
|
158 |
+
def forward(
|
159 |
+
self,
|
160 |
+
x: Tensor,
|
161 |
+
length: Optional[Tensor],
|
162 |
+
) -> Tuple[Tensor, Optional[Tensor]]:
|
163 |
+
"""
|
164 |
+
Args:
|
165 |
+
x (Tensor):
|
166 |
+
Input Tensor representing a batch of audio,
|
167 |
+
shape: ``[batch, time]``.
|
168 |
+
length (Tensor or None, optional):
|
169 |
+
Valid length of each input sample. shape: ``[batch, ]``.
|
170 |
+
|
171 |
+
Returns:
|
172 |
+
Tensor:
|
173 |
+
The resulting feature, shape: ``[batch, frame, feature]``
|
174 |
+
Optional[Tensor]:
|
175 |
+
Valid length of each output sample. shape: ``[batch, ]``.
|
176 |
+
"""
|
177 |
+
if x.ndim != 2:
|
178 |
+
raise ValueError("Expected the input Tensor to be 2D (batch, time), " "but received {list(x.shape)}")
|
179 |
+
|
180 |
+
x = x.unsqueeze(1) # (batch, channel==1, frame)
|
181 |
+
for layer in self.conv_layers:
|
182 |
+
x, length = layer(x, length) # (batch, feature, frame)
|
183 |
+
x = x.transpose(1, 2) # (batch, frame, feature)
|
184 |
+
x = x * self.dummy_weight
|
185 |
+
return x, length
|
186 |
+
|
187 |
+
def get_num_params_and_final_out_channels(self):
|
188 |
+
in_channels = 1
|
189 |
+
num_params = 0
|
190 |
+
for layer in self.conv_layers:
|
191 |
+
layer_params, in_channels = layer.get_num_params_and_out_channels(in_channels)
|
192 |
+
num_params += layer_params
|
193 |
+
|
194 |
+
num_params += in_channels # dummy weight
|
195 |
+
|
196 |
+
return num_params, in_channels
|
197 |
+
|
198 |
+
def prune(self):
|
199 |
+
""""Prune conv layers and dummy weight based on hardconcrete parameters.
|
200 |
+
This is an in-place operation.
|
201 |
+
"""
|
202 |
+
new_config = [] # [(output_channel, kernel_size, stride), ...]
|
203 |
+
for idx, layer in enumerate(self.conv_layers):
|
204 |
+
if layer.hard_concrete is not None:
|
205 |
+
assert not layer.hard_concrete.training
|
206 |
+
mask = layer.hard_concrete() # (out_features,)
|
207 |
+
index = mask.nonzero().squeeze(-1) # 2D -> 1D
|
208 |
+
assert len(index) > 0, f"Conv channels pruned to zero at index {idx}"
|
209 |
+
new_config.append(
|
210 |
+
(len(index), layer.kernel_size, layer.stride)
|
211 |
+
)
|
212 |
+
|
213 |
+
# prune the current layer
|
214 |
+
prune_conv1d_layer(layer.conv, index, "output")
|
215 |
+
if layer.layer_norm is not None:
|
216 |
+
prune_layer_norm(layer.layer_norm, index)
|
217 |
+
|
218 |
+
# prune the next layer
|
219 |
+
if idx == len(self.conv_layers) - 1:
|
220 |
+
self.dummy_weight.data *= mask
|
221 |
+
self.dummy_weight = nn.Parameter(
|
222 |
+
self.dummy_weight.index_select(0, index).clone().detach(), requires_grad=False
|
223 |
+
)
|
224 |
+
else:
|
225 |
+
self.conv_layers[idx+1].conv.weight.data *= mask.unsqueeze(-1)
|
226 |
+
prune_conv1d_layer(self.conv_layers[idx+1].conv, index, dim="input")
|
227 |
+
|
228 |
+
layer.hard_concrete = None
|
229 |
+
else:
|
230 |
+
new_config.append(
|
231 |
+
(layer.conv.out_channels, layer.kernel_size, layer.stride)
|
232 |
+
)
|
233 |
+
index = torch.arange(layer.conv.out_channels, dtype=torch.long)
|
234 |
+
|
235 |
+
return new_config, index
|
236 |
+
|
237 |
+
|
238 |
+
class FeatureProjection(Module):
|
239 |
+
"""Layer that connects FeatureExtractor and Encoder
|
240 |
+
|
241 |
+
Projects features to encoder dimension.
|
242 |
+
|
243 |
+
Args:
|
244 |
+
in_features (int): Input feature dim.
|
245 |
+
out_features (int): Output feature dim.
|
246 |
+
dropout (float): Dropout probability.
|
247 |
+
"""
|
248 |
+
|
249 |
+
def __init__(
|
250 |
+
self,
|
251 |
+
in_features: int,
|
252 |
+
out_features: int,
|
253 |
+
dropout: float,
|
254 |
+
):
|
255 |
+
super().__init__()
|
256 |
+
self.layer_norm = nn.LayerNorm(in_features)
|
257 |
+
self.projection = nn.Linear(
|
258 |
+
in_features,
|
259 |
+
out_features,
|
260 |
+
)
|
261 |
+
self.dropout = nn.Dropout(dropout)
|
262 |
+
|
263 |
+
def forward(self, x):
|
264 |
+
"""
|
265 |
+
Args:
|
266 |
+
x (Tensor):
|
267 |
+
Feature Tensor. shape: ``[batch, frame, in_feature]``
|
268 |
+
Returns:
|
269 |
+
Tensor: Projected features. ``[batch, frame, out_feature]``.
|
270 |
+
"""
|
271 |
+
x = self.layer_norm(x)
|
272 |
+
x = self.projection(x)
|
273 |
+
x = self.dropout(x)
|
274 |
+
return x
|
275 |
+
|
276 |
+
def get_num_params(self, in_features):
|
277 |
+
return in_features * 2 + (in_features + 1) * self.projection.out_features
|
278 |
+
|
279 |
+
|
280 |
+
class ConvolutionalPositionalEmbedding(Module):
|
281 |
+
"""Positional embedding which is placed at the beginning of Transformer.
|
282 |
+
|
283 |
+
Args:
|
284 |
+
embed_dim (int): Feature dimension of the input Tensor.
|
285 |
+
kernel_size (int): The number of frames to be use.
|
286 |
+
groups (int): The number of groups in feature dimensions.
|
287 |
+
"""
|
288 |
+
|
289 |
+
def __init__(
|
290 |
+
self,
|
291 |
+
embed_dim: int,
|
292 |
+
kernel_size: int,
|
293 |
+
groups: int,
|
294 |
+
):
|
295 |
+
super().__init__()
|
296 |
+
self.embed_dim = embed_dim
|
297 |
+
self.kernel_size = kernel_size
|
298 |
+
self.conv = nn.Conv1d(
|
299 |
+
in_channels=embed_dim,
|
300 |
+
out_channels=embed_dim,
|
301 |
+
kernel_size=kernel_size,
|
302 |
+
padding=kernel_size // 2,
|
303 |
+
groups=groups,
|
304 |
+
)
|
305 |
+
|
306 |
+
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
|
307 |
+
self.num_remove: int = 1 if kernel_size % 2 == 0 else 0
|
308 |
+
|
309 |
+
def __prepare_scriptable__(self):
|
310 |
+
for hook in self.conv._forward_pre_hooks.values():
|
311 |
+
# The hook we want to remove is an instance of WeightNorm class, so
|
312 |
+
# normally we would do `if isinstance(...)` but this class is not accessible
|
313 |
+
# because of shadowing, so we check the module name directly.
|
314 |
+
# https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3
|
315 |
+
if hook.__module__ == "torch.nn.utils.weight_norm" and hook.__class__.__name__ == "WeightNorm":
|
316 |
+
torch.nn.utils.remove_weight_norm(self.conv)
|
317 |
+
return self
|
318 |
+
|
319 |
+
def forward(self, x):
|
320 |
+
"""
|
321 |
+
Args:
|
322 |
+
x (Tensor): shape ``[batch, frame, feature]``.
|
323 |
+
|
324 |
+
Returns:
|
325 |
+
Tensor: The resulting feature. Shape ``[batch, frame, feature]``.
|
326 |
+
"""
|
327 |
+
x = x.transpose(-2, -1)
|
328 |
+
x = self.conv(x)
|
329 |
+
if self.num_remove > 0:
|
330 |
+
x = x[..., : -self.num_remove]
|
331 |
+
x = torch.nn.functional.gelu(x)
|
332 |
+
x = x.transpose(-2, -1)
|
333 |
+
return x
|
334 |
+
|
335 |
+
|
336 |
+
class SelfAttention(Module):
|
337 |
+
"""Multihead Self Attention module
|
338 |
+
|
339 |
+
Args:
|
340 |
+
embed_dim (int): Total dimension of the model.
|
341 |
+
num_heads (int): The number of heads.
|
342 |
+
dropout (float, optional):
|
343 |
+
Dropout probability on attn_output_weights. Default: ``0.0``
|
344 |
+
"""
|
345 |
+
|
346 |
+
def __init__(
|
347 |
+
self,
|
348 |
+
embed_dim: int,
|
349 |
+
num_heads: int,
|
350 |
+
head_dim: int,
|
351 |
+
dropout: float = 0.0,
|
352 |
+
prune_heads: bool = False, # whether to prune attention heads
|
353 |
+
prune_layer: bool = False, # whether to prune entire attention layers
|
354 |
+
):
|
355 |
+
super().__init__()
|
356 |
+
|
357 |
+
self.embed_dim = embed_dim
|
358 |
+
self.num_heads = num_heads
|
359 |
+
self.head_dim = head_dim
|
360 |
+
self.dropout = torch.nn.Dropout(dropout)
|
361 |
+
|
362 |
+
self.scaling = self.head_dim**-0.5
|
363 |
+
|
364 |
+
self.k_proj = nn.Linear(embed_dim, num_heads * head_dim, bias=True)
|
365 |
+
self.v_proj = nn.Linear(embed_dim, num_heads * head_dim, bias=True)
|
366 |
+
self.q_proj = nn.Linear(embed_dim, num_heads * head_dim, bias=True)
|
367 |
+
self.out_proj = nn.Linear(num_heads * head_dim, embed_dim, bias=True)
|
368 |
+
|
369 |
+
if prune_heads:
|
370 |
+
self.hard_concrete_for_heads = HardConcrete(n_in=num_heads, init_mean=0.01)
|
371 |
+
else:
|
372 |
+
self.hard_concrete_for_heads = None
|
373 |
+
|
374 |
+
if prune_layer:
|
375 |
+
self.hard_concrete_for_layer = HardConcrete(n_in=1, init_mean=0.01)
|
376 |
+
else:
|
377 |
+
self.hard_concrete_for_layer = None
|
378 |
+
|
379 |
+
def forward(
|
380 |
+
self,
|
381 |
+
x: Tensor,
|
382 |
+
attention_mask: Optional[Tensor] = None,
|
383 |
+
position_bias: Optional[Tensor] = None,
|
384 |
+
key_padding_mask: Optional[Tensor] = None,
|
385 |
+
) -> Tuple[Tensor, Optional[Tensor]]:
|
386 |
+
"""
|
387 |
+
Args:
|
388 |
+
x (Tensor): shape: ``[batch_size, sequence_length, embed_dim]``.
|
389 |
+
attention_mask (Tensor or ``None``, optional):
|
390 |
+
shape: ``[batch_size, 1, sequence_length, sequence_length]``
|
391 |
+
position_bias: Not used. Only for the compatibility with :py:class:`WavLMSelfAttention`.
|
392 |
+
key_padding_mask (Tensor or ``None``): Not used. Only for the compatibility with
|
393 |
+
:py:class:`WavLMSelfAttention`.
|
394 |
+
Returns:
|
395 |
+
(Tensor, ``None``): The resulting attention output and ``None`` (necessary for compatibility
|
396 |
+
with :py:class:`WavLMSelAttention`).
|
397 |
+
Attention output shape: ``[batch, sequence_length, embed_dim]``.
|
398 |
+
"""
|
399 |
+
if x.ndim != 3 or x.shape[2] != self.embed_dim:
|
400 |
+
raise ValueError(
|
401 |
+
f"The expected input shape is (batch, sequence, embed_dim=={self.embed_dim}). " f"Found {x.shape}."
|
402 |
+
)
|
403 |
+
batch_size, length, embed_dim = x.size()
|
404 |
+
|
405 |
+
shape = (batch_size, length, self.num_heads, self.head_dim)
|
406 |
+
q = self.q_proj(x).view(*shape).transpose(2, 1) # B, nH, L, Hd
|
407 |
+
k = self.k_proj(x).view(*shape).permute(0, 2, 3, 1) # B, nH, Hd, L
|
408 |
+
v = self.v_proj(x).view(*shape).transpose(2, 1) # B, nH, L, Hd
|
409 |
+
|
410 |
+
# scale down q to avoid value overflow.
|
411 |
+
weights = (self.scaling * q) @ k # B, nH, L, L
|
412 |
+
if attention_mask is not None:
|
413 |
+
weights += attention_mask
|
414 |
+
# subtracting a constant value from the tensor won't change the output of softmax.
|
415 |
+
# apply the subtraction to avoid value overflow in torch.nn.functional.softmax.
|
416 |
+
# for more details, please see Equation 7 in https://arxiv.org/abs/2112.08778
|
417 |
+
weights = weights - weights.max(dim=-1, keepdim=True)[0]
|
418 |
+
|
419 |
+
weights = torch.nn.functional.softmax(weights, dim=-1)
|
420 |
+
weights = self.dropout(weights)
|
421 |
+
|
422 |
+
output = weights @ v # B, nH, L, Hd
|
423 |
+
|
424 |
+
if self.hard_concrete_for_heads is not None:
|
425 |
+
head_mask = self.hard_concrete_for_heads() # (nH,)
|
426 |
+
output = output * head_mask.unsqueeze(-1).unsqueeze(-1)
|
427 |
+
|
428 |
+
output = output.transpose(2, 1).reshape(batch_size, length, self.num_heads * self.head_dim)
|
429 |
+
|
430 |
+
output = self.out_proj(output)
|
431 |
+
|
432 |
+
if self.hard_concrete_for_layer is not None:
|
433 |
+
layer_mask = self.hard_concrete_for_layer() # (1,)
|
434 |
+
output = output * layer_mask
|
435 |
+
|
436 |
+
return output, None # Necessary for compatibility with WavLMSelAttention
|
437 |
+
|
438 |
+
def get_num_params(self):
|
439 |
+
if self.hard_concrete_for_heads is not None:
|
440 |
+
num_heads = self.hard_concrete_for_heads.l0_norm()
|
441 |
+
else:
|
442 |
+
num_heads = self.num_heads
|
443 |
+
num_params = (self.embed_dim + 1) * num_heads * self.head_dim * 3 \
|
444 |
+
+ (num_heads * self.head_dim + 1) * self.embed_dim
|
445 |
+
|
446 |
+
if self.hard_concrete_for_layer is not None:
|
447 |
+
num_params *= self.hard_concrete_for_layer.l0_norm()
|
448 |
+
|
449 |
+
return num_params
|
450 |
+
|
451 |
+
def prune(self):
|
452 |
+
new_config = {
|
453 |
+
"use_attention": True,
|
454 |
+
"num_heads": self.num_heads,
|
455 |
+
}
|
456 |
+
if self.hard_concrete_for_layer is not None:
|
457 |
+
assert not self.hard_concrete_for_layer.training
|
458 |
+
layer_mask = self.hard_concrete_for_layer() # (1,)
|
459 |
+
self.out_proj.weight.data *= layer_mask
|
460 |
+
self.out_proj.bias.data *= layer_mask
|
461 |
+
if layer_mask == 0:
|
462 |
+
new_config["use_attention"] = False
|
463 |
+
self.hard_concrete_for_layer = None
|
464 |
+
|
465 |
+
if self.hard_concrete_for_heads is not None:
|
466 |
+
assert not self.hard_concrete_for_heads.training
|
467 |
+
head_mask = self.hard_concrete_for_heads() # (num_heads,)
|
468 |
+
new_config["num_heads"] = len(head_mask.nonzero())
|
469 |
+
if new_config["num_heads"] == 0:
|
470 |
+
new_config["use_attention"] = False
|
471 |
+
else:
|
472 |
+
full_mask = head_mask.repeat_interleave(self.head_dim)
|
473 |
+
full_index = full_mask.nonzero().squeeze(-1) # 1D
|
474 |
+
|
475 |
+
prune_linear_layer(self.k_proj, full_index, "output")
|
476 |
+
prune_linear_layer(self.v_proj, full_index, "output")
|
477 |
+
prune_linear_layer(self.q_proj, full_index, "output")
|
478 |
+
|
479 |
+
self.out_proj.weight.data *= full_mask
|
480 |
+
prune_linear_layer(self.out_proj, full_index, "input")
|
481 |
+
self.hard_concrete_for_heads = None
|
482 |
+
|
483 |
+
return new_config
|
484 |
+
|
485 |
+
|
486 |
+
class WavLMSelfAttention(SelfAttention):
|
487 |
+
"""Multi-headed self-attention for WavLM model :cite:`chen2022wavlm`.
|
488 |
+
|
489 |
+
Args:
|
490 |
+
embed_dim (int): Total dimension of the model.
|
491 |
+
num_heads (int): The number of heads.
|
492 |
+
dropout (float, optional): Dropout probability on attn_output_weights. (Default: to ``0.0``)
|
493 |
+
bias (bool, optional): If ``True``, add bias to input / output projection layers. (Default: ``True``)
|
494 |
+
has_relative_attention_bias (bool, optional): If ``True``, apply relative position embedding.
|
495 |
+
Necessary in the first encoder layer, but not in the subsequent ones. (Default: ``False``)
|
496 |
+
num_buckets (int, optional): Number of buckets for relative position embedding. (Default: ``32``)
|
497 |
+
max_distance (int, optional): Naximum distance for relative position embedding. (Default: ``128``)
|
498 |
+
gru_rel_pos (bool, optional): If ``True``, apply gated relative position embedding. (Default: ``False``)
|
499 |
+
"""
|
500 |
+
|
501 |
+
def __init__(
|
502 |
+
self,
|
503 |
+
embed_dim: int,
|
504 |
+
total_num_heads: int,
|
505 |
+
remaining_heads: Optional[List[int]] = None,
|
506 |
+
dropout: float = 0.0,
|
507 |
+
bias: bool = True,
|
508 |
+
has_relative_attention_bias: bool = False,
|
509 |
+
num_buckets: int = 32,
|
510 |
+
max_distance: int = 128,
|
511 |
+
gru_rel_pos: bool = True,
|
512 |
+
prune_heads: bool = False,
|
513 |
+
prune_layer: bool = False,
|
514 |
+
):
|
515 |
+
self.total_num_heads = total_num_heads
|
516 |
+
if remaining_heads is None:
|
517 |
+
self.remaining_heads = list(range(total_num_heads))
|
518 |
+
else:
|
519 |
+
self.remaining_heads = remaining_heads # list of indices
|
520 |
+
|
521 |
+
self.head_dim = embed_dim // total_num_heads
|
522 |
+
|
523 |
+
super().__init__(embed_dim, len(self.remaining_heads), self.head_dim, dropout, prune_heads, prune_layer)
|
524 |
+
|
525 |
+
self.has_relative_attention_bias = has_relative_attention_bias
|
526 |
+
self.num_buckets = num_buckets
|
527 |
+
self.max_distance = max_distance
|
528 |
+
|
529 |
+
if has_relative_attention_bias:
|
530 |
+
self.rel_attn_embed = nn.Embedding(num_buckets, total_num_heads)
|
531 |
+
else:
|
532 |
+
self.rel_attn_embed = None
|
533 |
+
|
534 |
+
# override linear layers to customize bias
|
535 |
+
self.k_proj = nn.Linear(embed_dim, len(self.remaining_heads) * self.head_dim, bias=bias)
|
536 |
+
self.v_proj = nn.Linear(embed_dim, len(self.remaining_heads) * self.head_dim, bias=bias)
|
537 |
+
self.q_proj = nn.Linear(embed_dim, len(self.remaining_heads) * self.head_dim, bias=bias)
|
538 |
+
self.out_proj = nn.Linear(len(self.remaining_heads) * self.head_dim, embed_dim, bias=bias)
|
539 |
+
|
540 |
+
self.gru_rel_pos = gru_rel_pos
|
541 |
+
if self.gru_rel_pos:
|
542 |
+
self.gru_rel_pos_linear = nn.Linear(self.head_dim, 8)
|
543 |
+
self.gru_rel_pos_const = nn.Parameter(torch.ones(1, total_num_heads, 1, 1))
|
544 |
+
self.has_position_bias = True
|
545 |
+
|
546 |
+
def compute_bias(self, query_length: int, key_length: int) -> Tensor:
|
547 |
+
"""Compute relative position embeddings for WavLM model.
|
548 |
+
Args:
|
549 |
+
query_length (int): Query position can take values between 0 and ``query_length - 1``.
|
550 |
+
key_length (int): Key position can take values between 0 and ``key_length - 1``.
|
551 |
+
Returns:
|
552 |
+
Tensor of shape `(num_heads, query_length, key_length)`, relative positions embeddings
|
553 |
+
"""
|
554 |
+
context_position = torch.arange(query_length, dtype=torch.long)[:, None]
|
555 |
+
memory_position = torch.arange(key_length, dtype=torch.long)[None, :]
|
556 |
+
relative_position = memory_position - context_position # Shape (query_length, key_length)
|
557 |
+
relative_position_bucket = self._relative_positions_bucket(relative_position, bidirectional=True)
|
558 |
+
relative_position_bucket = relative_position_bucket.to(self.rel_attn_embed.weight.device)
|
559 |
+
values = self.rel_attn_embed(relative_position_bucket) # Shape (query_length, key_length, num_heads)
|
560 |
+
values = values.permute([2, 0, 1])
|
561 |
+
return values
|
562 |
+
|
563 |
+
def _relative_positions_bucket(self, relative_positions: Tensor, bidirectional: bool = True):
|
564 |
+
"""Compute relative position buckets for WavLM model. Computation similar to formula (5) in WavLM
|
565 |
+
paper :cite:`chen2022wavlm`.
|
566 |
+
Args:
|
567 |
+
relative_positions (Tensor): Relative offsets between query and key positions,
|
568 |
+
of shape ``(query_length, key_length)``.
|
569 |
+
bidirectional (bool): If ``True``, values will be filled both above and below the diagonal in the resulting
|
570 |
+
matrix. If ``False``, the elements above the diagonal (i.e. with negative relative offsets) will be set
|
571 |
+
to zero. (Default ``True``)
|
572 |
+
Returns:
|
573 |
+
Tensor of shape ``(query_length, key_length)`` filled bucketed values of with relative positions.
|
574 |
+
"""
|
575 |
+
num_buckets = self.num_buckets
|
576 |
+
max_distance = self.max_distance
|
577 |
+
# Shape (query_length, key_length)
|
578 |
+
relative_buckets = torch.zeros_like(relative_positions, dtype=torch.long)
|
579 |
+
|
580 |
+
if bidirectional:
|
581 |
+
num_buckets = num_buckets // 2
|
582 |
+
relative_buckets += (relative_positions > 0).to(torch.long) * num_buckets
|
583 |
+
relative_positions = torch.abs(relative_positions)
|
584 |
+
else:
|
585 |
+
relative_positions = -torch.min(relative_positions, torch.zeros_like(relative_positions))
|
586 |
+
|
587 |
+
max_exact = num_buckets // 2
|
588 |
+
is_small = relative_positions < max_exact
|
589 |
+
|
590 |
+
relative_postion_if_large = max_exact + (
|
591 |
+
torch.log(relative_positions.float() / max_exact)
|
592 |
+
/ math.log(max_distance / max_exact)
|
593 |
+
* (num_buckets - max_exact)
|
594 |
+
).to(torch.long)
|
595 |
+
relative_postion_if_large = torch.min(
|
596 |
+
relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1)
|
597 |
+
)
|
598 |
+
|
599 |
+
relative_buckets += torch.where(is_small, relative_positions, relative_postion_if_large)
|
600 |
+
return relative_buckets
|
601 |
+
|
602 |
+
def forward(
|
603 |
+
self,
|
604 |
+
query: Tensor,
|
605 |
+
attention_mask: Optional[Tensor] = None,
|
606 |
+
position_bias: Optional[Tensor] = None,
|
607 |
+
key_padding_mask: Optional[Tensor] = None,
|
608 |
+
) -> Tuple[Tensor, Optional[Tensor]]:
|
609 |
+
"""
|
610 |
+
Args:
|
611 |
+
query (Tensor): Input of shape ``(batch_size, src_len, embed_dim)``.
|
612 |
+
key_padding_mask (Tensor or None, optional): Mask to exclude keys that are pads, of shape
|
613 |
+
`(batch, src_len)`, where padding elements are indicated by 1s. (Default: ``None``)
|
614 |
+
attn_mask: Needs to be ``None``. The argument exists for compatibility with
|
615 |
+
``EncoderLayer``. (Default: ``None``)
|
616 |
+
position_bias (Tensor or None, optional): Position bias of shape
|
617 |
+
``(batch_size * num_heads, src_len, src_len)``. When used inside WavLM model encoder, will be
|
618 |
+
generated in the first layer and then passed from each encoder layer to the next one.
|
619 |
+
(Default: ``None``)
|
620 |
+
Returns:
|
621 |
+
attn_output (Tensor): Attention output of shape ``(batch_size, src_len, embed_dim)``.
|
622 |
+
position_bias (Tensor or None): Position bias of shape ``(batch_size * num_heads, src_len, src_len)``.
|
623 |
+
"""
|
624 |
+
bsz, seq_len, embed_dim = query.size()
|
625 |
+
assert embed_dim == self.embed_dim
|
626 |
+
assert key_padding_mask is None
|
627 |
+
|
628 |
+
# only for the first layer
|
629 |
+
if self.rel_attn_embed is not None and position_bias is None:
|
630 |
+
position_bias = self.compute_bias(seq_len, seq_len)
|
631 |
+
position_bias = position_bias.unsqueeze(0).repeat(bsz, 1, 1, 1).view(bsz * self.total_num_heads, seq_len, seq_len)
|
632 |
+
|
633 |
+
attn_mask_rel_pos: Optional[Tensor] = None
|
634 |
+
if position_bias is not None:
|
635 |
+
attn_mask_rel_pos = position_bias
|
636 |
+
if self.gru_rel_pos: # Apply gating on relative position bias
|
637 |
+
query_layer = query.view(bsz, seq_len, self.total_num_heads, -1)
|
638 |
+
query_layer = query_layer.permute(0, 2, 1, 3)
|
639 |
+
|
640 |
+
gate_a, gate_b = torch.sigmoid(
|
641 |
+
self.gru_rel_pos_linear(query_layer).view(bsz, self.total_num_heads, seq_len, 2, 4).sum(-1, keepdim=False)
|
642 |
+
).chunk(2, dim=-1)
|
643 |
+
gate_a_1 = gate_a * (gate_b * self.gru_rel_pos_const - 1.0) + 2.0
|
644 |
+
attn_mask_rel_pos = gate_a_1.view(bsz * self.total_num_heads, -1, 1) * position_bias
|
645 |
+
|
646 |
+
attn_mask_rel_pos = attn_mask_rel_pos.view((-1, seq_len, seq_len))
|
647 |
+
attn_mask_rel_pos = attn_mask_rel_pos.reshape(bsz, self.total_num_heads, seq_len, seq_len)[:, self.remaining_heads, :, :]
|
648 |
+
|
649 |
+
attn_mask = attn_mask_rel_pos
|
650 |
+
if attention_mask is not None:
|
651 |
+
attn_mask = attn_mask + attention_mask
|
652 |
+
if key_padding_mask is not None:
|
653 |
+
attn_mask = attn_mask.masked_fill(
|
654 |
+
key_padding_mask.reshape(bsz, 1, 1, seq_len),
|
655 |
+
float("-inf")
|
656 |
+
)
|
657 |
+
attn_output, _ = super().forward(query, attention_mask=attn_mask)
|
658 |
+
|
659 |
+
return attn_output, position_bias
|
660 |
+
|
661 |
+
def prune(self):
|
662 |
+
new_config = {
|
663 |
+
"use_attention": True,
|
664 |
+
"remaining_heads": self.remaining_heads,
|
665 |
+
}
|
666 |
+
if self.hard_concrete_for_layer is not None:
|
667 |
+
assert not self.hard_concrete_for_layer.training
|
668 |
+
layer_mask = self.hard_concrete_for_layer() # (1,)
|
669 |
+
self.out_proj.weight.data *= layer_mask
|
670 |
+
self.out_proj.bias.data *= layer_mask
|
671 |
+
if layer_mask == 0:
|
672 |
+
new_config["use_attention"] = False
|
673 |
+
self.hard_concrete_for_layer = None
|
674 |
+
|
675 |
+
if self.hard_concrete_for_heads is not None:
|
676 |
+
assert not self.hard_concrete_for_heads.training
|
677 |
+
head_mask = self.hard_concrete_for_heads() # (num_heads,)
|
678 |
+
new_config["remaining_heads"] = head_mask.nonzero().squeeze(-1).tolist()
|
679 |
+
if len(new_config["remaining_heads"]) == 0:
|
680 |
+
new_config["use_attention"] = False
|
681 |
+
else:
|
682 |
+
full_mask = head_mask.repeat_interleave(self.head_dim)
|
683 |
+
full_index = full_mask.nonzero().squeeze(-1) # 1D
|
684 |
+
|
685 |
+
prune_linear_layer(self.k_proj, full_index, "output")
|
686 |
+
prune_linear_layer(self.v_proj, full_index, "output")
|
687 |
+
prune_linear_layer(self.q_proj, full_index, "output")
|
688 |
+
|
689 |
+
self.out_proj.weight.data *= full_mask
|
690 |
+
prune_linear_layer(self.out_proj, full_index, "input")
|
691 |
+
self.hard_concrete_for_heads = None
|
692 |
+
|
693 |
+
return new_config
|
694 |
+
|
695 |
+
|
696 |
+
class FeedForward(Module):
|
697 |
+
"""Layer that follows attention layer in encoder layer."""
|
698 |
+
|
699 |
+
def __init__(
|
700 |
+
self,
|
701 |
+
io_features: int,
|
702 |
+
intermediate_features: int,
|
703 |
+
intermediate_dropout: float,
|
704 |
+
output_dropout: float,
|
705 |
+
prune_intermediate: bool = False,
|
706 |
+
prune_layer: bool = False,
|
707 |
+
):
|
708 |
+
super().__init__()
|
709 |
+
self.intermediate_dense = nn.Linear(io_features, intermediate_features)
|
710 |
+
self.intermediate_dropout = nn.Dropout(intermediate_dropout)
|
711 |
+
self.output_dense = nn.Linear(intermediate_features, io_features)
|
712 |
+
self.output_dropout = nn.Dropout(output_dropout)
|
713 |
+
|
714 |
+
if prune_intermediate:
|
715 |
+
self.hard_concrete_for_intermediate = HardConcrete(
|
716 |
+
n_in=intermediate_features, init_mean=0.5
|
717 |
+
)
|
718 |
+
else:
|
719 |
+
self.hard_concrete_for_intermediate = None
|
720 |
+
|
721 |
+
if prune_layer:
|
722 |
+
self.hard_concrete_for_layer = HardConcrete(n_in=1, init_mean=0.01)
|
723 |
+
else:
|
724 |
+
self.hard_concrete_for_layer = None
|
725 |
+
|
726 |
+
def forward(self, x):
|
727 |
+
"""
|
728 |
+
Args:
|
729 |
+
x (Tensor): shape: `(batch, sequence_length, io_features)`
|
730 |
+
Returns:
|
731 |
+
x (Tensor): shape: `(batch, sequence_length, io_features)`
|
732 |
+
"""
|
733 |
+
x = self.intermediate_dense(x)
|
734 |
+
x = torch.nn.functional.gelu(x)
|
735 |
+
x = self.intermediate_dropout(x)
|
736 |
+
|
737 |
+
if self.hard_concrete_for_intermediate is not None:
|
738 |
+
intermediate_mask = self.hard_concrete_for_intermediate() # (intermediate_features,)
|
739 |
+
x = x * intermediate_mask
|
740 |
+
|
741 |
+
x = self.output_dense(x)
|
742 |
+
x = self.output_dropout(x)
|
743 |
+
|
744 |
+
if self.hard_concrete_for_layer is not None:
|
745 |
+
layer_mask = self.hard_concrete_for_layer() # (1,)
|
746 |
+
x = x * layer_mask
|
747 |
+
|
748 |
+
return x
|
749 |
+
|
750 |
+
def get_num_params(self):
|
751 |
+
io_features = self.intermediate_dense.in_features
|
752 |
+
if self.hard_concrete_for_intermediate is not None:
|
753 |
+
intermediate_features = self.hard_concrete_for_intermediate.l0_norm()
|
754 |
+
else:
|
755 |
+
intermediate_features = self.intermediate_dense.out_features
|
756 |
+
num_params = (io_features + 1) * intermediate_features + (intermediate_features + 1) * io_features
|
757 |
+
|
758 |
+
if self.hard_concrete_for_layer is not None:
|
759 |
+
num_params *= self.hard_concrete_for_layer.l0_norm()
|
760 |
+
|
761 |
+
return num_params
|
762 |
+
|
763 |
+
def prune(self):
|
764 |
+
new_config = {
|
765 |
+
"use_feed_forward": True,
|
766 |
+
"ff_interm_features": self.intermediate_dense.out_features
|
767 |
+
}
|
768 |
+
if self.hard_concrete_for_layer is not None:
|
769 |
+
assert not self.hard_concrete_for_layer.training
|
770 |
+
layer_mask = self.hard_concrete_for_layer()
|
771 |
+
self.output_dense.weight.data *= layer_mask
|
772 |
+
self.output_dense.bias.data *= layer_mask
|
773 |
+
if layer_mask == 0:
|
774 |
+
new_config["use_feed_forward"] = False
|
775 |
+
self.hard_concrete_for_layer = None
|
776 |
+
|
777 |
+
if self.hard_concrete_for_intermediate is not None:
|
778 |
+
assert not self.hard_concrete_for_intermediate.training
|
779 |
+
interm_mask = self.hard_concrete_for_intermediate()
|
780 |
+
interm_index = interm_mask.nonzero().squeeze(-1) # NOTE: must specify dim=-1
|
781 |
+
new_config["ff_interm_features"] = len(interm_index)
|
782 |
+
if new_config["ff_interm_features"] == 0:
|
783 |
+
new_config["use_feed_forward"] = False
|
784 |
+
else:
|
785 |
+
prune_linear_layer(self.intermediate_dense, interm_index, "output")
|
786 |
+
|
787 |
+
self.output_dense.weight.data *= interm_mask
|
788 |
+
prune_linear_layer(self.output_dense, interm_index, "input")
|
789 |
+
self.hard_concrete_for_intermediate = None
|
790 |
+
|
791 |
+
return new_config
|
792 |
+
|
793 |
+
|
794 |
+
class EncoderLayer(Module):
|
795 |
+
"""A layer unit in encoder. Combines multihead self attention and feed forward."""
|
796 |
+
|
797 |
+
def __init__(
|
798 |
+
self,
|
799 |
+
attention: Optional[Module], # can be None if the entire layer is pruned
|
800 |
+
dropout: float,
|
801 |
+
layer_norm_first: bool,
|
802 |
+
feed_forward: Optional[Module], # can be None if the entire layer is pruned
|
803 |
+
embed_dim: int,
|
804 |
+
):
|
805 |
+
super().__init__()
|
806 |
+
self.attention = attention
|
807 |
+
self.dropout = nn.Dropout(dropout)
|
808 |
+
self.layer_norm = nn.LayerNorm(embed_dim)
|
809 |
+
self.layer_norm_first = layer_norm_first
|
810 |
+
self.feed_forward = feed_forward
|
811 |
+
self.final_layer_norm = nn.LayerNorm(embed_dim)
|
812 |
+
self.embed_dim = embed_dim
|
813 |
+
|
814 |
+
def forward(
|
815 |
+
self,
|
816 |
+
x: Tensor,
|
817 |
+
attention_mask: Optional[Tensor] = None,
|
818 |
+
position_bias: Optional[Tensor] = None,
|
819 |
+
key_padding_mask: Optional[Tensor] = None,
|
820 |
+
) -> Tuple[Tensor, Optional[Tensor]]:
|
821 |
+
"""
|
822 |
+
Args:
|
823 |
+
x (Tensor): Input of shape ``(batch, sequence_length, embed_dim)``.
|
824 |
+
attention_mask (Tensor or ``None``, optional): attention mask
|
825 |
+
of shape ``(batch, 1, sequence_length, sequence_length)``. (Default: ``None``)
|
826 |
+
position_bias (Tensor or ``None``, optional): position bias of shape
|
827 |
+
``(batch_size * num_heads, src_len, src_len)``.
|
828 |
+
Only necessary for WavLM model, ``None`` otherwise. (Default: ``None``)
|
829 |
+
key_padding_mask (Tensor or ``None``, optional): key padding mask of shape ``(batch_size, src_len)``.
|
830 |
+
Only used for WavLM model, ignored otherwise. (Default: ``None``)
|
831 |
+
Returns:
|
832 |
+
(x, position_bias): Shapes are the same as in the input. Position bias is only relevant for WaLM model,
|
833 |
+
``None`` otherwise.
|
834 |
+
"""
|
835 |
+
if self.attention is not None:
|
836 |
+
residual = x
|
837 |
+
|
838 |
+
if self.layer_norm_first:
|
839 |
+
x = self.layer_norm(x)
|
840 |
+
|
841 |
+
x, position_bias = self.attention(
|
842 |
+
x, attention_mask=attention_mask, position_bias=position_bias, key_padding_mask=key_padding_mask
|
843 |
+
)
|
844 |
+
|
845 |
+
x = self.dropout(x)
|
846 |
+
x = residual + x
|
847 |
+
|
848 |
+
if self.layer_norm_first:
|
849 |
+
if self.feed_forward is not None:
|
850 |
+
x = x + self.feed_forward(self.final_layer_norm(x))
|
851 |
+
else:
|
852 |
+
# NOTE: for post norm, the layer norms should always be applied even if the layers are pruned.
|
853 |
+
x = self.layer_norm(x)
|
854 |
+
if self.feed_forward is not None:
|
855 |
+
x = x + self.feed_forward(x)
|
856 |
+
x = self.final_layer_norm(x)
|
857 |
+
return x, position_bias
|
858 |
+
|
859 |
+
def get_num_params(self):
|
860 |
+
num_params = self.embed_dim * 2 * 2 # two layer norms
|
861 |
+
if self.attention is not None:
|
862 |
+
num_params += self.attention.get_num_params()
|
863 |
+
if self.feed_forward is not None:
|
864 |
+
num_params += self.feed_forward.get_num_params()
|
865 |
+
return num_params
|
866 |
+
|
867 |
+
|
868 |
+
class Transformer(Module):
|
869 |
+
def __init__(
|
870 |
+
self,
|
871 |
+
pos_conv_embed: Module,
|
872 |
+
dropout: float,
|
873 |
+
layers: Module,
|
874 |
+
layer_norm_first: bool,
|
875 |
+
layer_drop: float,
|
876 |
+
):
|
877 |
+
super().__init__()
|
878 |
+
self.pos_conv_embed = pos_conv_embed
|
879 |
+
self.layer_norm = nn.LayerNorm(pos_conv_embed.embed_dim)
|
880 |
+
self.layer_norm_first = layer_norm_first
|
881 |
+
self.layer_drop = layer_drop
|
882 |
+
self.dropout = nn.Dropout(dropout)
|
883 |
+
self.layers = layers
|
884 |
+
|
885 |
+
def _preprocess(self, x: Tensor):
|
886 |
+
x = x + self.pos_conv_embed(x)
|
887 |
+
|
888 |
+
if self.layer_norm_first:
|
889 |
+
x = self.layer_norm(x)
|
890 |
+
|
891 |
+
x = self.dropout(x)
|
892 |
+
return x
|
893 |
+
|
894 |
+
def forward(
|
895 |
+
self,
|
896 |
+
x: Tensor,
|
897 |
+
attention_mask: Optional[Tensor] = None,
|
898 |
+
position_bias: Optional[Tensor] = None,
|
899 |
+
) -> Tensor:
|
900 |
+
x = self._preprocess(x)
|
901 |
+
for layer in self.layers:
|
902 |
+
if not (self.training and torch.rand(1).item() <= self.layer_drop):
|
903 |
+
x, position_bias = layer(x, attention_mask, position_bias=position_bias)
|
904 |
+
|
905 |
+
if not self.layer_norm_first:
|
906 |
+
x = self.layer_norm(x)
|
907 |
+
return x
|
908 |
+
|
909 |
+
def get_intermediate_outputs(
|
910 |
+
self,
|
911 |
+
x: Tensor,
|
912 |
+
attention_mask: Optional[Tensor] = None,
|
913 |
+
num_layers: Optional[int] = None,
|
914 |
+
position_bias: Optional[Tensor] = None,
|
915 |
+
) -> List[Tensor]:
|
916 |
+
if num_layers is not None:
|
917 |
+
if not 0 < num_layers <= len(self.layers):
|
918 |
+
raise ValueError(f"`num_layers` must be between [1, {len(self.layers)}]")
|
919 |
+
|
920 |
+
ret: List[Tensor] = []
|
921 |
+
x = self._preprocess(x)
|
922 |
+
for layer in self.layers:
|
923 |
+
x, position_bias = layer(x, attention_mask, position_bias=position_bias)
|
924 |
+
ret.append(x)
|
925 |
+
if num_layers is not None and len(ret) >= num_layers:
|
926 |
+
return ret
|
927 |
+
return ret
|
928 |
+
|
929 |
+
def get_num_params(self):
|
930 |
+
# pos_conv_embed and layer_norm
|
931 |
+
num_params = sum(p.numel() for p in self.pos_conv_embed.parameters()) + self.pos_conv_embed.embed_dim * 2
|
932 |
+
for layer in self.layers:
|
933 |
+
num_params += layer.get_num_params()
|
934 |
+
return num_params
|
935 |
+
|
936 |
+
def prune(self):
|
937 |
+
new_config = defaultdict(list)
|
938 |
+
for layer in self.layers:
|
939 |
+
attention_config = layer.attention.prune()
|
940 |
+
new_config["use_attention"].append(attention_config["use_attention"])
|
941 |
+
if "remaining_heads" in attention_config:
|
942 |
+
new_config["remaining_heads"].append(attention_config["remaining_heads"])
|
943 |
+
else:
|
944 |
+
new_config["num_heads"].append(attention_config["num_heads"])
|
945 |
+
|
946 |
+
if not attention_config["use_attention"]:
|
947 |
+
layer.attention = None
|
948 |
+
|
949 |
+
ff_config = layer.feed_forward.prune()
|
950 |
+
new_config["use_feed_forward"].append(ff_config["use_feed_forward"])
|
951 |
+
new_config["ff_interm_features"].append(ff_config["ff_interm_features"])
|
952 |
+
if not ff_config["use_feed_forward"]:
|
953 |
+
layer.feed_forward = None
|
954 |
+
|
955 |
+
return new_config
|
956 |
+
|
957 |
+
|
958 |
+
class Encoder(Module):
|
959 |
+
def __init__(
|
960 |
+
self,
|
961 |
+
feature_projection: Module,
|
962 |
+
transformer: Module,
|
963 |
+
):
|
964 |
+
super().__init__()
|
965 |
+
self.feature_projection = feature_projection
|
966 |
+
self.transformer = transformer
|
967 |
+
|
968 |
+
def _preprocess(
|
969 |
+
self,
|
970 |
+
features: Tensor,
|
971 |
+
lengths: Optional[Tensor] = None,
|
972 |
+
) -> Tuple[Tensor, Optional[Tensor]]:
|
973 |
+
x = self.feature_projection(features)
|
974 |
+
|
975 |
+
mask: Optional[Tensor] = None
|
976 |
+
if lengths is not None:
|
977 |
+
batch_size, max_len, _ = x.shape
|
978 |
+
# create mask for padded elements and zero-out them
|
979 |
+
mask = torch.arange(max_len, device=lengths.device).expand(batch_size, max_len) >= lengths[:, None]
|
980 |
+
x[mask] = 0.0
|
981 |
+
# extend the mask to attention shape and set weight
|
982 |
+
mask = -10000.0 * mask[:, None, None, :].to(dtype=features.dtype)
|
983 |
+
mask = mask.expand(batch_size, 1, max_len, max_len)
|
984 |
+
return x, mask
|
985 |
+
|
986 |
+
def forward(
|
987 |
+
self,
|
988 |
+
features: Tensor,
|
989 |
+
lengths: Optional[Tensor] = None,
|
990 |
+
) -> Tensor:
|
991 |
+
x, mask = self._preprocess(features, lengths)
|
992 |
+
x = self.transformer(x, attention_mask=mask)
|
993 |
+
return x
|
994 |
+
|
995 |
+
def extract_features(
|
996 |
+
self,
|
997 |
+
features: Tensor,
|
998 |
+
lengths: Optional[Tensor] = None,
|
999 |
+
num_layers: Optional[int] = None,
|
1000 |
+
) -> List[Tensor]:
|
1001 |
+
x, masks = self._preprocess(features, lengths)
|
1002 |
+
interm = self.transformer.get_intermediate_outputs(x, attention_mask=masks, num_layers=num_layers)
|
1003 |
+
return [x] + interm
|
1004 |
+
|
1005 |
+
def get_num_params(self, in_features):
|
1006 |
+
"""Calculate the current model size."""
|
1007 |
+
feature_projection_size = self.feature_projection.get_num_params(in_features)
|
1008 |
+
transformer_size = self.transformer.get_num_params()
|
1009 |
+
return feature_projection_size + transformer_size
|
1010 |
+
|
1011 |
+
def prune(self, conv_out_index):
|
1012 |
+
"""In-place pruning of submodules."""
|
1013 |
+
prune_layer_norm(self.feature_projection.layer_norm, conv_out_index)
|
1014 |
+
prune_linear_layer(self.feature_projection.projection, conv_out_index, "input")
|
1015 |
+
transformer_config = self.transformer.prune()
|
1016 |
+
return transformer_config
|
1017 |
+
|
1018 |
+
|
1019 |
+
################################################################################
|
1020 |
+
def _get_feature_extractor(
|
1021 |
+
norm_mode: str,
|
1022 |
+
shapes: List[Tuple[int, int, int]],
|
1023 |
+
bias: bool,
|
1024 |
+
prune_conv_channels: bool = False,
|
1025 |
+
) -> FeatureExtractor:
|
1026 |
+
"""
|
1027 |
+
Args:
|
1028 |
+
norm_mode (str):
|
1029 |
+
Either "group_norm" or "layer_norm".
|
1030 |
+
If "group_norm", then a single normalization is applied
|
1031 |
+
in the first convolution block. Otherwise, all the convolution
|
1032 |
+
blocks will have layer normalization.
|
1033 |
+
This option corresponds to "extractor_mode" from fairseq.
|
1034 |
+
Expected values are "group_norm" for Base arch, and
|
1035 |
+
"layer_norm" for Large arch.
|
1036 |
+
shapes (list of tuple of int):
|
1037 |
+
Configuration of convolution layers. List of convolution configuration,
|
1038 |
+
i.e. ``[(output_channel, kernel_size, stride), ...]``
|
1039 |
+
This option corresponds to "conv_feature_layers" from fairseq.
|
1040 |
+
Expected values are
|
1041 |
+
``[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512, 2, 2)] * 2``
|
1042 |
+
for all the architectures.
|
1043 |
+
bias (bool):
|
1044 |
+
Whether to include bias term to each convolution operation.
|
1045 |
+
This option corresponds to "conv_bias" from fairseq.
|
1046 |
+
Expected values are False for Base arch, and True for Large arch.
|
1047 |
+
|
1048 |
+
See Also:
|
1049 |
+
* Original implementation
|
1050 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L666-L733
|
1051 |
+
* "extractor_mode"
|
1052 |
+
- Def and base:
|
1053 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L38-L45
|
1054 |
+
- Large:
|
1055 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L52
|
1056 |
+
* "conv_feature_layers"
|
1057 |
+
- Def, base and large:
|
1058 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L94-L100
|
1059 |
+
* "conv_bias"
|
1060 |
+
- Def and base:
|
1061 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L101-L103
|
1062 |
+
- Large:
|
1063 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L61
|
1064 |
+
"""
|
1065 |
+
if norm_mode not in ["group_norm", "layer_norm"]:
|
1066 |
+
raise ValueError("Invalid norm mode")
|
1067 |
+
blocks = []
|
1068 |
+
in_channels = 1
|
1069 |
+
for i, (out_channels, kernel_size, stride) in enumerate(shapes):
|
1070 |
+
normalization = None
|
1071 |
+
if norm_mode == "group_norm" and i == 0:
|
1072 |
+
normalization = nn.GroupNorm(
|
1073 |
+
num_groups=out_channels,
|
1074 |
+
num_channels=out_channels,
|
1075 |
+
affine=True,
|
1076 |
+
)
|
1077 |
+
elif norm_mode == "layer_norm":
|
1078 |
+
normalization = LayerNorm(
|
1079 |
+
normalized_shape=out_channels,
|
1080 |
+
elementwise_affine=True,
|
1081 |
+
)
|
1082 |
+
blocks.append(
|
1083 |
+
ConvLayerBlock(
|
1084 |
+
in_channels=in_channels,
|
1085 |
+
out_channels=out_channels,
|
1086 |
+
kernel_size=kernel_size,
|
1087 |
+
stride=stride,
|
1088 |
+
bias=bias,
|
1089 |
+
layer_norm=normalization,
|
1090 |
+
prune_conv_channels=prune_conv_channels,
|
1091 |
+
)
|
1092 |
+
)
|
1093 |
+
in_channels = out_channels
|
1094 |
+
return FeatureExtractor(nn.ModuleList(blocks))
|
1095 |
+
|
1096 |
+
|
1097 |
+
def _get_encoder(
|
1098 |
+
in_features: int,
|
1099 |
+
embed_dim: int,
|
1100 |
+
dropout_input: float,
|
1101 |
+
pos_conv_kernel: int,
|
1102 |
+
pos_conv_groups: int,
|
1103 |
+
num_layers: int,
|
1104 |
+
use_attention: List[bool],
|
1105 |
+
use_feed_forward: List[bool],
|
1106 |
+
num_heads: List[int],
|
1107 |
+
head_dim: int,
|
1108 |
+
attention_dropout: float,
|
1109 |
+
ff_interm_features: List[int],
|
1110 |
+
ff_interm_dropout: float,
|
1111 |
+
dropout: float,
|
1112 |
+
layer_norm_first: bool,
|
1113 |
+
layer_drop: float,
|
1114 |
+
prune_attention_heads: bool = False,
|
1115 |
+
prune_attention_layer: bool = False,
|
1116 |
+
prune_feed_forward_intermediate: bool = False,
|
1117 |
+
prune_feed_forward_layer: bool = False,
|
1118 |
+
) -> Encoder:
|
1119 |
+
"""
|
1120 |
+
Args:
|
1121 |
+
in_features (int): The number of input features.
|
1122 |
+
embed_dim (int):
|
1123 |
+
The dimension of embedding.
|
1124 |
+
This option corresponds to "encoder_embed_dim" from fairseq.
|
1125 |
+
Expected values are 768 for Base arch, and 1024 for Large arch.
|
1126 |
+
dropout_input (float):
|
1127 |
+
The dropout probability applied after the input feature is projected
|
1128 |
+
to ``embed_dim``.
|
1129 |
+
This option corresponds to "dropout_input" from fairseq.
|
1130 |
+
Expected values are 0.1 for both Base and Large arch.
|
1131 |
+
pos_conv_kernel (int):
|
1132 |
+
The kernel size of convolutional positional embeddings.
|
1133 |
+
This option corresponds to "conv_pos" from fairseq.
|
1134 |
+
Expected values are 128 for both Base and Large arch.
|
1135 |
+
pos_conv_groups (int):
|
1136 |
+
The number of groups of convolutional positional embeddings.
|
1137 |
+
This option corresponds to "conv_pos_groups" from fairseq.
|
1138 |
+
Expected values are 16 for both Base and Large arch.
|
1139 |
+
num_layers (int):
|
1140 |
+
The number of self attention layers in transformer block.
|
1141 |
+
This option corresponds to "encoder_layers" from fairseq.
|
1142 |
+
Expected values are 12 for Base and 24 for Large arch.
|
1143 |
+
num_heads (int):
|
1144 |
+
The number of heads in self attention layers.
|
1145 |
+
This option corresponds to "encoder_attention_heads" from fairseq.
|
1146 |
+
Expected values are 12 for Base and 16 for Large arch.
|
1147 |
+
attention_dropout (float):
|
1148 |
+
The dropout probability applied after softmax in self-attention layer.
|
1149 |
+
This option corresponds to "attention_dropout" from fairseq.
|
1150 |
+
Expected values are 0.1 for Base and 0.0 for Large arch.
|
1151 |
+
ff_interm_features (int):
|
1152 |
+
The dimension of hidden features in feed forward layer.
|
1153 |
+
This option corresponds to "encoder_ffn_embed_dim" from fairseq.
|
1154 |
+
Expected values are 3072 for Base and 4096 for Large arch.
|
1155 |
+
ff_interm_dropout (float):
|
1156 |
+
The dropout probability applied in feedforward layer.
|
1157 |
+
This option correspinds to "activation_dropout" from fairseq.
|
1158 |
+
Expected values are 0.1 for both Base and Large arch.
|
1159 |
+
dropout (float):
|
1160 |
+
The dropout probability applied at the end of feed forward layer.
|
1161 |
+
This option corresponds to "dropout" from fairseq.
|
1162 |
+
Expected values are 0.1 for Base and 0.0 for Large arch.
|
1163 |
+
layer_norm_first (bool):
|
1164 |
+
Control the order of layer norm in transformer layer and each encoder layer.
|
1165 |
+
If True, in transformer layer, layer norm is applied before features are fed
|
1166 |
+
to encoder layers. In encoder layer, two layer norms are applied before and after
|
1167 |
+
self attention.
|
1168 |
+
If False, in transformer layer, layer norm is applied after features are fed
|
1169 |
+
to encoder layers. In encoder layer, two layer norms are applied after self
|
1170 |
+
attention, before and after feed forward.
|
1171 |
+
This option corresponds to "layer_norm_first" from fairseq.
|
1172 |
+
Expected values are False for Base and True for Large arch.
|
1173 |
+
layer_drop (float):
|
1174 |
+
Probability to drop each encoder layer during training.
|
1175 |
+
This option corresponds to "layerdrop" from fairseq.
|
1176 |
+
Expected values are 0.1 for both Base and Large arch.
|
1177 |
+
|
1178 |
+
See Also:
|
1179 |
+
* "encoder_embed_dim"
|
1180 |
+
- Def and base
|
1181 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L49-L51
|
1182 |
+
- Large
|
1183 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L64
|
1184 |
+
* "dropout_input"
|
1185 |
+
- Def, base and large
|
1186 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L75-L78
|
1187 |
+
* "conv_pos"
|
1188 |
+
- Def, base and large
|
1189 |
+
NOTE: The description is wrong.
|
1190 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L204-L207
|
1191 |
+
- Usage
|
1192 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L756
|
1193 |
+
* "conv_pos_groups"
|
1194 |
+
- Def, base and large
|
1195 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L208-L211
|
1196 |
+
* "encoder_layers"
|
1197 |
+
- Def and base
|
1198 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L46-L48
|
1199 |
+
- Large
|
1200 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L63
|
1201 |
+
* "encoder_attention_heads"
|
1202 |
+
- Def and base
|
1203 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L55-L57
|
1204 |
+
- Large
|
1205 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L66
|
1206 |
+
* "attention_dropout"
|
1207 |
+
- Def and base
|
1208 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L66-L68
|
1209 |
+
- Large
|
1210 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L60
|
1211 |
+
* "encoder_ffn_embed_dim"
|
1212 |
+
- Def and base
|
1213 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L52-L54
|
1214 |
+
- Large
|
1215 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L65
|
1216 |
+
* "activation_dropout"
|
1217 |
+
- Def
|
1218 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L69-L71
|
1219 |
+
- Base
|
1220 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/base_960h.yaml#L55
|
1221 |
+
- Large
|
1222 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/vox_960h.yaml#L55
|
1223 |
+
* "dropout"
|
1224 |
+
- Def and base
|
1225 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L63-L65
|
1226 |
+
- Large
|
1227 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L59
|
1228 |
+
* "layer_norm_first"
|
1229 |
+
- Def and base
|
1230 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L91-L93
|
1231 |
+
- Large
|
1232 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L53
|
1233 |
+
* "layerdrop"
|
1234 |
+
- Def
|
1235 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L72-L74
|
1236 |
+
- Base
|
1237 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/base_960h.yaml#L54
|
1238 |
+
- Large
|
1239 |
+
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/vox_960h.yaml#L54
|
1240 |
+
"""
|
1241 |
+
feature_projection = FeatureProjection(in_features, embed_dim, dropout_input)
|
1242 |
+
pos_conv = ConvolutionalPositionalEmbedding(embed_dim, pos_conv_kernel, pos_conv_groups)
|
1243 |
+
|
1244 |
+
# Original impl
|
1245 |
+
# https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L768-L782
|
1246 |
+
encoder_layers = nn.ModuleList()
|
1247 |
+
for idx in range(num_layers):
|
1248 |
+
if use_attention[idx]:
|
1249 |
+
attention = SelfAttention(
|
1250 |
+
embed_dim=embed_dim,
|
1251 |
+
num_heads=num_heads[idx],
|
1252 |
+
head_dim=head_dim,
|
1253 |
+
dropout=attention_dropout,
|
1254 |
+
prune_heads=prune_attention_heads,
|
1255 |
+
prune_layer=prune_attention_layer,
|
1256 |
+
)
|
1257 |
+
else:
|
1258 |
+
attention = None
|
1259 |
+
if use_feed_forward[idx]:
|
1260 |
+
feed_forward = FeedForward(
|
1261 |
+
io_features=embed_dim,
|
1262 |
+
intermediate_features=ff_interm_features[idx],
|
1263 |
+
intermediate_dropout=ff_interm_dropout,
|
1264 |
+
output_dropout=dropout,
|
1265 |
+
prune_intermediate=prune_feed_forward_intermediate,
|
1266 |
+
prune_layer=prune_feed_forward_layer,
|
1267 |
+
)
|
1268 |
+
else:
|
1269 |
+
feed_forward = None
|
1270 |
+
encoder_layers.append(
|
1271 |
+
EncoderLayer(
|
1272 |
+
attention=attention,
|
1273 |
+
dropout=dropout,
|
1274 |
+
layer_norm_first=layer_norm_first,
|
1275 |
+
feed_forward=feed_forward,
|
1276 |
+
embed_dim=embed_dim,
|
1277 |
+
)
|
1278 |
+
)
|
1279 |
+
transformer = Transformer(
|
1280 |
+
pos_conv_embed=pos_conv,
|
1281 |
+
dropout=dropout,
|
1282 |
+
layers=encoder_layers,
|
1283 |
+
layer_norm_first=not layer_norm_first,
|
1284 |
+
layer_drop=layer_drop,
|
1285 |
+
)
|
1286 |
+
return Encoder(feature_projection, transformer)
|
1287 |
+
|
1288 |
+
|
1289 |
+
def _get_wavlm_encoder(
|
1290 |
+
in_features: int,
|
1291 |
+
embed_dim: int,
|
1292 |
+
dropout_input: float,
|
1293 |
+
pos_conv_kernel: int,
|
1294 |
+
pos_conv_groups: int,
|
1295 |
+
num_layers: int,
|
1296 |
+
use_attention: List[bool],
|
1297 |
+
use_feed_forward: List[bool],
|
1298 |
+
total_num_heads: List[int],
|
1299 |
+
remaining_heads: List[List[int]],
|
1300 |
+
num_buckets: int,
|
1301 |
+
max_distance: int,
|
1302 |
+
attention_dropout: float,
|
1303 |
+
ff_interm_features: List[int],
|
1304 |
+
ff_interm_dropout: float,
|
1305 |
+
dropout: float,
|
1306 |
+
layer_norm_first: bool,
|
1307 |
+
layer_drop: float,
|
1308 |
+
prune_attention_heads: bool = False,
|
1309 |
+
prune_attention_layer: bool = False,
|
1310 |
+
prune_feed_forward_intermediate: bool = False,
|
1311 |
+
prune_feed_forward_layer: bool = False,
|
1312 |
+
) -> Encoder:
|
1313 |
+
"""
|
1314 |
+
Construct encoder for WavLM model :cite:`chen2022wavlm`. The structure of the encoder and most of the argments are
|
1315 |
+
the same as in :py:func:`_get_encoder` so refer there for documentation. The only difference from Wav2Vec2 encoder
|
1316 |
+
is usage of `WavLMSelfAttention` instead of `SelfAttention` and two additional parameters: `num_buckets` and
|
1317 |
+
`max_distance`.
|
1318 |
+
Args:
|
1319 |
+
in_features (int): See :py:func:`_get_encoder`.
|
1320 |
+
embed_dim (int): See :py:func:`_get_encoder`.
|
1321 |
+
dropout_input (float): See :py:func:`_get_encoder`.
|
1322 |
+
pos_conv_kernel (int): See :py:func:`_get_encoder`.
|
1323 |
+
pos_conv_groups (int): See :py:func:`_get_encoder`.
|
1324 |
+
num_layers (int): See :py:func:`_get_encoder`.
|
1325 |
+
num_heads (int): See :py:func:`_get_encoder`.
|
1326 |
+
num_buckets (int): Number of buckets for relative position embedding.
|
1327 |
+
max_distance (int): Maximum distance for relative position embedding.
|
1328 |
+
attention_dropout (float): See :py:func:`_get_encoder`.
|
1329 |
+
ff_interm_features (int): See :py:func:`_get_encoder`.
|
1330 |
+
ff_interm_dropout (float): See :py:func:`_get_encoder`.
|
1331 |
+
dropout (float): See :py:func:`_get_encoder`.
|
1332 |
+
layer_norm_first (bool): See :py:func:`_get_encoder`.
|
1333 |
+
layer_drop (float): See :py:func:`_get_encoder`.
|
1334 |
+
|
1335 |
+
"""
|
1336 |
+
feature_projection = FeatureProjection(in_features, embed_dim, dropout_input)
|
1337 |
+
pos_conv = ConvolutionalPositionalEmbedding(embed_dim, pos_conv_kernel, pos_conv_groups)
|
1338 |
+
|
1339 |
+
# Original impl
|
1340 |
+
# https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L768-L782
|
1341 |
+
encoder_layers = nn.ModuleList()
|
1342 |
+
for i in range(num_layers):
|
1343 |
+
if use_attention[i]:
|
1344 |
+
attention = WavLMSelfAttention(
|
1345 |
+
embed_dim=embed_dim,
|
1346 |
+
total_num_heads=total_num_heads[i],
|
1347 |
+
remaining_heads=remaining_heads[i],
|
1348 |
+
dropout=attention_dropout,
|
1349 |
+
has_relative_attention_bias=(i == 0), # Position embedding is only necessary in the first layer.
|
1350 |
+
num_buckets=num_buckets,
|
1351 |
+
max_distance=max_distance,
|
1352 |
+
prune_heads=prune_attention_heads,
|
1353 |
+
prune_layer=prune_attention_layer,
|
1354 |
+
)
|
1355 |
+
else:
|
1356 |
+
attention = None
|
1357 |
+
if use_feed_forward[i]:
|
1358 |
+
feed_forward = FeedForward(
|
1359 |
+
io_features=embed_dim,
|
1360 |
+
intermediate_features=ff_interm_features[i],
|
1361 |
+
intermediate_dropout=ff_interm_dropout,
|
1362 |
+
output_dropout=dropout,
|
1363 |
+
prune_intermediate=prune_feed_forward_intermediate,
|
1364 |
+
prune_layer=prune_feed_forward_layer,
|
1365 |
+
)
|
1366 |
+
else:
|
1367 |
+
feed_forward = None
|
1368 |
+
encoder_layers.append(
|
1369 |
+
EncoderLayer(
|
1370 |
+
attention=attention,
|
1371 |
+
dropout=dropout,
|
1372 |
+
layer_norm_first=layer_norm_first,
|
1373 |
+
feed_forward=feed_forward,
|
1374 |
+
embed_dim=embed_dim,
|
1375 |
+
)
|
1376 |
+
)
|
1377 |
+
transformer = Transformer(
|
1378 |
+
pos_conv_embed=pos_conv,
|
1379 |
+
dropout=dropout,
|
1380 |
+
layers=encoder_layers,
|
1381 |
+
layer_norm_first=not layer_norm_first,
|
1382 |
+
layer_drop=layer_drop,
|
1383 |
+
)
|
1384 |
+
return Encoder(feature_projection, transformer)
|
1385 |
+
|
1386 |
+
|
1387 |
+
def _get_padding_mask(input: Tensor, lengths: Tensor) -> Tensor:
|
1388 |
+
"""Generate the padding mask given the padded input and the lengths Tensors.
|
1389 |
+
Args:
|
1390 |
+
input (Tensor): The padded Tensor of dimension `[batch, max_len, frequency]`.
|
1391 |
+
lengths (Tensor): The lengths Tensor of dimension `[batch,]`.
|
1392 |
+
|
1393 |
+
Returns:
|
1394 |
+
(Tensor): The padding mask.
|
1395 |
+
"""
|
1396 |
+
batch_size, max_len, _ = input.shape
|
1397 |
+
mask = torch.arange(max_len, device=lengths.device).expand(batch_size, max_len) >= lengths[:, None]
|
1398 |
+
return mask
|
1399 |
+
|
1400 |
+
|
1401 |
+
class GradMultiply(torch.autograd.Function):
|
1402 |
+
@staticmethod
|
1403 |
+
def forward(ctx, x, scale):
|
1404 |
+
ctx.scale = scale
|
1405 |
+
res = x.new(x)
|
1406 |
+
return res
|
1407 |
+
|
1408 |
+
@staticmethod
|
1409 |
+
def backward(ctx, grad):
|
1410 |
+
return grad * ctx.scale, None
|
vencoder/dphubert/hardconcrete.py
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Implementation of the hard Concrete distribution.
|
2 |
+
|
3 |
+
Originally from:
|
4 |
+
https://github.com/asappresearch/flop/blob/master/flop/hardconcrete.py
|
5 |
+
|
6 |
+
"""
|
7 |
+
|
8 |
+
import math
|
9 |
+
|
10 |
+
import torch
|
11 |
+
import torch.nn as nn
|
12 |
+
|
13 |
+
|
14 |
+
class HardConcrete(nn.Module):
|
15 |
+
"""A HarcConcrete module.
|
16 |
+
Use this module to create a mask of size N, which you can
|
17 |
+
then use to perform L0 regularization.
|
18 |
+
|
19 |
+
To obtain a mask, simply run a forward pass through the module
|
20 |
+
with no input data. The mask is sampled in training mode, and
|
21 |
+
fixed during evaluation mode, e.g.:
|
22 |
+
|
23 |
+
>>> module = HardConcrete(n_in=100)
|
24 |
+
>>> mask = module()
|
25 |
+
>>> norm = module.l0_norm()
|
26 |
+
"""
|
27 |
+
|
28 |
+
def __init__(
|
29 |
+
self,
|
30 |
+
n_in: int,
|
31 |
+
init_mean: float = 0.5,
|
32 |
+
init_std: float = 0.01,
|
33 |
+
temperature: float = 2/3, # from CoFi
|
34 |
+
stretch: float = 0.1,
|
35 |
+
eps: float = 1e-6
|
36 |
+
) -> None:
|
37 |
+
"""Initialize the HardConcrete module.
|
38 |
+
Parameters
|
39 |
+
----------
|
40 |
+
n_in : int
|
41 |
+
The number of hard concrete variables in this mask.
|
42 |
+
init_mean : float, optional
|
43 |
+
Initial drop rate for hard concrete parameter,
|
44 |
+
by default 0.5.,
|
45 |
+
init_std: float, optional
|
46 |
+
Used to initialize the hard concrete parameters,
|
47 |
+
by default 0.01.
|
48 |
+
temperature : float, optional
|
49 |
+
Temperature used to control the sharpness of the
|
50 |
+
distribution, by default 1.0
|
51 |
+
stretch : float, optional
|
52 |
+
Stretch the sampled value from [0, 1] to the interval
|
53 |
+
[-stretch, 1 + stretch], by default 0.1.
|
54 |
+
"""
|
55 |
+
super().__init__()
|
56 |
+
|
57 |
+
self.n_in = n_in
|
58 |
+
self.limit_l = -stretch
|
59 |
+
self.limit_r = 1.0 + stretch
|
60 |
+
self.log_alpha = nn.Parameter(torch.zeros(n_in))
|
61 |
+
self.beta = temperature
|
62 |
+
self.init_mean = init_mean
|
63 |
+
self.init_std = init_std
|
64 |
+
self.bias = -self.beta * math.log(-self.limit_l / self.limit_r)
|
65 |
+
|
66 |
+
self.eps = eps
|
67 |
+
self.compiled_mask = None
|
68 |
+
self.reset_parameters()
|
69 |
+
|
70 |
+
def reset_parameters(self):
|
71 |
+
"""Reset the parameters of this module."""
|
72 |
+
self.compiled_mask = None
|
73 |
+
mean = math.log(1 - self.init_mean) - math.log(self.init_mean)
|
74 |
+
self.log_alpha.data.normal_(mean, self.init_std)
|
75 |
+
|
76 |
+
def l0_norm(self) -> torch.Tensor:
|
77 |
+
"""Compute the expected L0 norm of this mask.
|
78 |
+
Returns
|
79 |
+
-------
|
80 |
+
torch.Tensor
|
81 |
+
The expected L0 norm.
|
82 |
+
"""
|
83 |
+
return (self.log_alpha + self.bias).sigmoid().sum()
|
84 |
+
|
85 |
+
def forward(self) -> torch.Tensor:
|
86 |
+
"""Sample a hard concrete mask.
|
87 |
+
Returns
|
88 |
+
-------
|
89 |
+
torch.Tensor
|
90 |
+
The sampled binary mask
|
91 |
+
"""
|
92 |
+
if self.training:
|
93 |
+
# Reset the compiled mask
|
94 |
+
self.compiled_mask = None
|
95 |
+
# Sample mask dynamically
|
96 |
+
u = self.log_alpha.new(self.n_in).uniform_(self.eps, 1 - self.eps)
|
97 |
+
s = torch.sigmoid((torch.log(u / (1 - u)) + self.log_alpha) / self.beta)
|
98 |
+
s = s * (self.limit_r - self.limit_l) + self.limit_l
|
99 |
+
mask = s.clamp(min=0., max=1.)
|
100 |
+
|
101 |
+
else:
|
102 |
+
# Compile new mask if not cached
|
103 |
+
if self.compiled_mask is None:
|
104 |
+
# Get expected sparsity
|
105 |
+
expected_num_zeros = self.n_in - self.l0_norm().item()
|
106 |
+
num_zeros = round(expected_num_zeros)
|
107 |
+
# Approximate expected value of each mask variable z;
|
108 |
+
# We use an empirically validated magic number 0.8
|
109 |
+
soft_mask = torch.sigmoid(self.log_alpha / self.beta * 0.8)
|
110 |
+
# Prune small values to set to 0
|
111 |
+
_, indices = torch.topk(soft_mask, k=num_zeros, largest=False)
|
112 |
+
soft_mask[indices] = 0.
|
113 |
+
self.compiled_mask = soft_mask
|
114 |
+
mask = self.compiled_mask
|
115 |
+
|
116 |
+
return mask
|
117 |
+
|
118 |
+
def extra_repr(self) -> str:
|
119 |
+
return str(self.n_in)
|
120 |
+
|
121 |
+
def __repr__(self) -> str:
|
122 |
+
return "{}({})".format(self.__class__.__name__, self.extra_repr())
|
vencoder/dphubert/model.py
ADDED
@@ -0,0 +1,966 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Speech SSL models supporting pruning.
|
2 |
+
|
3 |
+
Originally from:
|
4 |
+
https://github.com/pytorch/audio/blob/main/torchaudio/models/wav2vec2/model.py
|
5 |
+
|
6 |
+
"""
|
7 |
+
|
8 |
+
import math
|
9 |
+
from typing import List, Optional, Tuple
|
10 |
+
|
11 |
+
import torch
|
12 |
+
import torch.nn.functional as F
|
13 |
+
from torch import Tensor
|
14 |
+
from torch.nn import Module
|
15 |
+
|
16 |
+
from . import components
|
17 |
+
|
18 |
+
|
19 |
+
class Wav2Vec2Model(Module):
|
20 |
+
"""Acoustic model used in *wav2vec 2.0* :cite:`baevski2020wav2vec`.
|
21 |
+
|
22 |
+
Note:
|
23 |
+
To build the model, please use one of the factory functions.
|
24 |
+
:py:func:`wav2vec2_model`, :py:func:`wav2vec2_base`, :py:func:`wav2vec2_large`,
|
25 |
+
:py:func:`wav2vec2_large_lv60k`, :py:func:`hubert_base`, :py:func:`hubert_large`,
|
26 |
+
and :py:func:`hubert_xlarge`.
|
27 |
+
|
28 |
+
See Also:
|
29 |
+
* :class:`torchaudio.pipelines.Wav2Vec2Bundle`: Pretrained models (without fine-tuning)
|
30 |
+
* :class:`torchaudio.pipelines.Wav2Vec2ASRBundle`: ASR pipelines with pretrained models.
|
31 |
+
|
32 |
+
Args:
|
33 |
+
feature_extractor (torch.nn.Module):
|
34 |
+
Feature extractor that extracts feature vectors from raw audio Tensor.
|
35 |
+
|
36 |
+
encoder (torch.nn.Module):
|
37 |
+
Encoder that converts the audio features into the sequence of probability
|
38 |
+
distribution (in negative log-likelihood) over labels.
|
39 |
+
|
40 |
+
aux (torch.nn.Module or None, optional):
|
41 |
+
Auxiliary module. If provided, the output from encoder is passed to this module.
|
42 |
+
""" # noqa: E501
|
43 |
+
|
44 |
+
def __init__(
|
45 |
+
self,
|
46 |
+
normalize_waveform: bool,
|
47 |
+
feature_extractor: Module,
|
48 |
+
encoder: Module,
|
49 |
+
aux: Optional[Module] = None,
|
50 |
+
):
|
51 |
+
super().__init__()
|
52 |
+
self.normalize_waveform = normalize_waveform
|
53 |
+
self.feature_extractor = feature_extractor
|
54 |
+
self.encoder = encoder
|
55 |
+
self.aux = aux
|
56 |
+
|
57 |
+
@torch.jit.export
|
58 |
+
def extract_features(
|
59 |
+
self,
|
60 |
+
waveforms: Tensor,
|
61 |
+
lengths: Optional[Tensor] = None,
|
62 |
+
num_layers: Optional[int] = None,
|
63 |
+
) -> Tuple[List[Tensor], Optional[Tensor]]:
|
64 |
+
"""Extract feature vectors from raw waveforms
|
65 |
+
|
66 |
+
This returns the list of outputs from the intermediate layers of
|
67 |
+
transformer block in encoder.
|
68 |
+
|
69 |
+
Args:
|
70 |
+
waveforms (Tensor): Audio tensor of shape `(batch, frames)`.
|
71 |
+
lengths (Tensor or None, optional):
|
72 |
+
Indicates the valid length of each audio in the batch.
|
73 |
+
Shape: `(batch, )`.
|
74 |
+
When the ``waveforms`` contains audios with different durations,
|
75 |
+
by providing ``lengths`` argument, the model will compute
|
76 |
+
the corresponding valid output lengths and apply proper mask in
|
77 |
+
transformer attention layer.
|
78 |
+
If ``None``, it is assumed that the entire audio waveform
|
79 |
+
length is valid.
|
80 |
+
num_layers (int or None, optional):
|
81 |
+
If given, limit the number of intermediate layers to go through.
|
82 |
+
Providing `1` will stop the computation after going through one
|
83 |
+
intermediate layers. If not given, the outputs from all the
|
84 |
+
intermediate layers are returned.
|
85 |
+
|
86 |
+
Returns:
|
87 |
+
(List[Tensor], Optional[Tensor]):
|
88 |
+
List of Tensors
|
89 |
+
Features from requested layers.
|
90 |
+
Each Tensor is of shape: `(batch, time frame, feature dimension)`
|
91 |
+
Tensor or None
|
92 |
+
If ``lengths`` argument was provided, a Tensor of shape `(batch, )`
|
93 |
+
is returned.
|
94 |
+
It indicates the valid length in time axis of each feature Tensor.
|
95 |
+
"""
|
96 |
+
if self.normalize_waveform:
|
97 |
+
if lengths is not None:
|
98 |
+
waveforms = [
|
99 |
+
F.layer_norm(wave[:length], (length,)) for wave, length in zip(waveforms, lengths)
|
100 |
+
]
|
101 |
+
waveforms = torch.nn.utils.rnn.pad_sequence(waveforms, batch_first=True)
|
102 |
+
else:
|
103 |
+
waveforms = F.layer_norm(waveforms, waveforms.shape[-1:])
|
104 |
+
|
105 |
+
x, lengths = self.feature_extractor(waveforms, lengths)
|
106 |
+
x = self.encoder.extract_features(x, lengths, num_layers) # (num_layers+1,), including the input
|
107 |
+
return x, lengths
|
108 |
+
|
109 |
+
def get_num_params(self):
|
110 |
+
"""Calculate the current size."""
|
111 |
+
feature_extractor_size, encoder_in_features = self.feature_extractor.get_num_params_and_final_out_channels()
|
112 |
+
encoder_size = self.encoder.get_num_params(encoder_in_features)
|
113 |
+
return feature_extractor_size + encoder_size
|
114 |
+
|
115 |
+
def prune(self):
|
116 |
+
self.eval() # must be in eval mode
|
117 |
+
conv_config, conv_out_index = self.feature_extractor.prune() # [(output_channel, kernel_size, stride), ...]
|
118 |
+
transformer_config = self.encoder.prune(conv_out_index) # NOTE: this is a defaultdict(list)
|
119 |
+
use_attention = transformer_config["use_attention"]
|
120 |
+
use_feed_forward = transformer_config["use_feed_forward"]
|
121 |
+
num_heads = transformer_config["num_heads"] # can be []
|
122 |
+
remaining_heads = transformer_config["remaining_heads"] # can be []
|
123 |
+
ff_interm_features = transformer_config["ff_interm_features"]
|
124 |
+
|
125 |
+
return conv_config, use_attention, use_feed_forward, num_heads, remaining_heads, ff_interm_features
|
126 |
+
|
127 |
+
def forward(
|
128 |
+
self,
|
129 |
+
waveforms: Tensor,
|
130 |
+
lengths: Optional[Tensor] = None,
|
131 |
+
) -> Tuple[Tensor, Optional[Tensor]]:
|
132 |
+
"""Compute the sequence of probability distribution over labels.
|
133 |
+
|
134 |
+
Args:
|
135 |
+
waveforms (Tensor): Audio tensor of shape `(batch, frames)`.
|
136 |
+
lengths (Tensor or None, optional):
|
137 |
+
Indicates the valid length of each audio in the batch.
|
138 |
+
Shape: `(batch, )`.
|
139 |
+
When the ``waveforms`` contains audios with different durations,
|
140 |
+
by providing ``lengths`` argument, the model will compute
|
141 |
+
the corresponding valid output lengths and apply proper mask in
|
142 |
+
transformer attention layer.
|
143 |
+
If ``None``, it is assumed that all the audio in ``waveforms``
|
144 |
+
have valid length. Default: ``None``.
|
145 |
+
|
146 |
+
Returns:
|
147 |
+
(Tensor, Optional[Tensor]):
|
148 |
+
Tensor
|
149 |
+
The sequences of probability distribution (in logit) over labels.
|
150 |
+
Shape: `(batch, frames, num labels)`.
|
151 |
+
Tensor or None
|
152 |
+
If ``lengths`` argument was provided, a Tensor of shape `(batch, )`
|
153 |
+
is returned.
|
154 |
+
It indicates the valid length in time axis of the output Tensor.
|
155 |
+
"""
|
156 |
+
if self.normalize_waveform:
|
157 |
+
if lengths is not None:
|
158 |
+
waveforms = [
|
159 |
+
F.layer_norm(wave[:length], (length,)) for wave, length in zip(waveforms, lengths)
|
160 |
+
]
|
161 |
+
waveforms = torch.nn.utils.rnn.pad_sequence(waveforms, batch_first=True)
|
162 |
+
else:
|
163 |
+
waveforms = F.layer_norm(waveforms, waveforms.shape[-1:])
|
164 |
+
|
165 |
+
x, lengths = self.feature_extractor(waveforms, lengths)
|
166 |
+
x = self.encoder(x, lengths)
|
167 |
+
if self.aux is not None:
|
168 |
+
x = self.aux(x)
|
169 |
+
return x, lengths
|
170 |
+
|
171 |
+
|
172 |
+
def wav2vec2_model(**configs) -> Wav2Vec2Model:
|
173 |
+
"""Wraps the original wav2vec2_model and wavlm_model."""
|
174 |
+
|
175 |
+
if "encoder_remaining_heads" in configs:
|
176 |
+
return wavlm_model(**configs)
|
177 |
+
|
178 |
+
return wav2vec2_model_original(**configs)
|
179 |
+
|
180 |
+
|
181 |
+
def wav2vec2_model_original(
|
182 |
+
extractor_mode: str,
|
183 |
+
extractor_conv_layer_config: Optional[List[Tuple[int, int, int]]],
|
184 |
+
extractor_conv_bias: bool,
|
185 |
+
encoder_embed_dim: int,
|
186 |
+
encoder_projection_dropout: float,
|
187 |
+
encoder_pos_conv_kernel: int,
|
188 |
+
encoder_pos_conv_groups: int,
|
189 |
+
encoder_num_layers: int,
|
190 |
+
encoder_use_attention: List[bool],
|
191 |
+
encoder_use_feed_forward: List[bool],
|
192 |
+
encoder_num_heads: List[int],
|
193 |
+
encoder_head_dim: int,
|
194 |
+
encoder_attention_dropout: float,
|
195 |
+
encoder_ff_interm_features: List[int],
|
196 |
+
encoder_ff_interm_dropout: float,
|
197 |
+
encoder_dropout: float,
|
198 |
+
encoder_layer_norm_first: bool,
|
199 |
+
encoder_layer_drop: float,
|
200 |
+
aux_num_out: Optional[int],
|
201 |
+
normalize_waveform: bool,
|
202 |
+
extractor_prune_conv_channels: bool = False,
|
203 |
+
encoder_prune_attention_heads: bool = False,
|
204 |
+
encoder_prune_attention_layer: bool = False,
|
205 |
+
encoder_prune_feed_forward_intermediate: bool = False,
|
206 |
+
encoder_prune_feed_forward_layer: bool = False,
|
207 |
+
) -> Wav2Vec2Model:
|
208 |
+
"""Builds custom :class:`~torchaudio.models.Wav2Vec2Model`.
|
209 |
+
|
210 |
+
Note:
|
211 |
+
The "feature extractor" below corresponds to
|
212 |
+
`ConvFeatureExtractionModel <https://github.com/pytorch/fairseq/blob/dd3bd3c0497ae9a7ae7364404a6b0a4c501780b3/fairseq/models/wav2vec/wav2vec2.py#L736>`__
|
213 |
+
in the original ``fairseq`` implementation.
|
214 |
+
This is referred as "(convolutional) feature encoder" in the *wav2vec 2.0*
|
215 |
+
:cite:`baevski2020wav2vec` paper.
|
216 |
+
|
217 |
+
The "encoder" below corresponds to `TransformerEncoder <https://github.com/pytorch/fairseq/blob/dd3bd3c0497ae9a7ae7364404a6b0a4c501780b3/fairseq/models/wav2vec/wav2vec2.py#L817>`__,
|
218 |
+
and this is referred as "Transformer" in the paper.
|
219 |
+
|
220 |
+
Args:
|
221 |
+
extractor_mode (str): Operation mode of feature extractor.
|
222 |
+
Valid values are ``"group_norm"`` or ``"layer_norm"``.
|
223 |
+
If ``"group_norm"``, then a single normalization is applied
|
224 |
+
in the first convolution block. Otherwise, all the convolution
|
225 |
+
blocks will have layer normalization.
|
226 |
+
|
227 |
+
This option corresponds to ``extractor_mode`` from ``fairseq``.
|
228 |
+
extractor_conv_layer_config (list of integer tuples or None):
|
229 |
+
Configuration of convolution layers in feature extractor.
|
230 |
+
List of convolution configuration,
|
231 |
+
i.e. ``[(output_channel, kernel_size, stride), ...]``
|
232 |
+
|
233 |
+
If ``None`` is provided, then the following default value is used.
|
234 |
+
|
235 |
+
.. code-block:: python
|
236 |
+
|
237 |
+
[
|
238 |
+
(512, 10, 5),
|
239 |
+
(512, 3, 2),
|
240 |
+
(512, 3, 2),
|
241 |
+
(512, 3, 2),
|
242 |
+
(512, 3, 2),
|
243 |
+
(512, 2, 2),
|
244 |
+
(512, 2, 2),
|
245 |
+
]
|
246 |
+
|
247 |
+
This option corresponds to ``conv_feature_layers`` from ``fairseq``.
|
248 |
+
|
249 |
+
extractor_conv_bias (bool):
|
250 |
+
Whether to include bias term to each convolution operation.
|
251 |
+
|
252 |
+
This option corresponds to ``conv_bias`` from ``fairseq``.
|
253 |
+
|
254 |
+
encoder_embed_dim (int):
|
255 |
+
The dimension of embedding in encoder.
|
256 |
+
|
257 |
+
This option corresponds to ``encoder_embed_dim`` from ``fairseq``.
|
258 |
+
|
259 |
+
encoder_projection_dropout (float):
|
260 |
+
The dropout probability applied after the input feature is projected
|
261 |
+
to ``encoder_embed_dim``.
|
262 |
+
|
263 |
+
This option corresponds to ``dropout_input`` from ``fairseq``.
|
264 |
+
|
265 |
+
encoder_pos_conv_kernel (int):
|
266 |
+
The kernel size of convolutional positional embeddings.
|
267 |
+
|
268 |
+
This option corresponds to ``conv_pos`` from ``fairseq``.
|
269 |
+
|
270 |
+
encoder_pos_conv_groups (int):
|
271 |
+
The number of groups of convolutional positional embeddings.
|
272 |
+
|
273 |
+
This option corresponds to ``conv_pos_groups`` from ``fairseq``.
|
274 |
+
|
275 |
+
encoder_num_layers (int):
|
276 |
+
The number of self attention layers in transformer block.
|
277 |
+
|
278 |
+
This option corresponds to ``encoder_layers`` from ``fairseq``.
|
279 |
+
|
280 |
+
encoder_num_heads (int):
|
281 |
+
The number of heads in self attention layers.
|
282 |
+
|
283 |
+
This option corresponds to ``encoder_attention_heads`` from ``fairseq``.
|
284 |
+
|
285 |
+
encoder_attention_dropout (float):
|
286 |
+
The dropout probability applied after softmax in self-attention layer.
|
287 |
+
|
288 |
+
This option corresponds to ``attention_dropout`` from ``fairseq``.
|
289 |
+
|
290 |
+
encoder_ff_interm_features (int):
|
291 |
+
The dimension of hidden features in feed forward layer.
|
292 |
+
|
293 |
+
This option corresponds to ``encoder_ffn_embed_dim`` from ``fairseq``.
|
294 |
+
|
295 |
+
encoder_ff_interm_dropout (float):
|
296 |
+
The dropout probability applied in feedforward layer.
|
297 |
+
|
298 |
+
This option correspinds to ``activation_dropout`` from ``fairseq``.
|
299 |
+
|
300 |
+
encoder_dropout (float):
|
301 |
+
The dropout probability applied at the end of feed forward layer.
|
302 |
+
|
303 |
+
This option corresponds to ``dropout`` from ``fairseq``.
|
304 |
+
|
305 |
+
encoder_layer_norm_first (bool):
|
306 |
+
Control the order of layer norm in transformer layer and each encoder layer.
|
307 |
+
If True, in transformer layer, layer norm is applied before features are fed
|
308 |
+
to encoder layers. In encoder layer, two layer norms are applied before and after
|
309 |
+
self attention.
|
310 |
+
If False, in transformer layer, layer norm is applied after features are fed
|
311 |
+
to encoder layers. In encoder layer, two layer norms are applied after self
|
312 |
+
attention, before and after feed forward.
|
313 |
+
|
314 |
+
This option corresponds to ``layer_norm_first`` from ``fairseq``.
|
315 |
+
|
316 |
+
encoder_layer_drop (float):
|
317 |
+
Probability to drop each encoder layer during training.
|
318 |
+
|
319 |
+
This option corresponds to ``layerdrop`` from ``fairseq``.
|
320 |
+
|
321 |
+
aux_num_out (int or None):
|
322 |
+
When provided, attach an extra linear layer on top of encoder, which can be
|
323 |
+
used for fine-tuning.
|
324 |
+
|
325 |
+
Returns:
|
326 |
+
Wav2Vec2Model:
|
327 |
+
The resulting model.
|
328 |
+
""" # noqa: E501
|
329 |
+
if extractor_conv_layer_config is None:
|
330 |
+
extractor_conv_layer_config = [(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512, 2, 2)] * 2
|
331 |
+
|
332 |
+
feature_extractor = components._get_feature_extractor(
|
333 |
+
extractor_mode, extractor_conv_layer_config, extractor_conv_bias,
|
334 |
+
prune_conv_channels=extractor_prune_conv_channels,
|
335 |
+
)
|
336 |
+
encoder = components._get_encoder(
|
337 |
+
in_features=extractor_conv_layer_config[-1][0],
|
338 |
+
embed_dim=encoder_embed_dim,
|
339 |
+
dropout_input=encoder_projection_dropout,
|
340 |
+
pos_conv_kernel=encoder_pos_conv_kernel,
|
341 |
+
pos_conv_groups=encoder_pos_conv_groups,
|
342 |
+
num_layers=encoder_num_layers,
|
343 |
+
use_attention=encoder_use_attention,
|
344 |
+
use_feed_forward=encoder_use_feed_forward,
|
345 |
+
num_heads=encoder_num_heads,
|
346 |
+
head_dim=encoder_head_dim,
|
347 |
+
attention_dropout=encoder_attention_dropout,
|
348 |
+
ff_interm_features=encoder_ff_interm_features,
|
349 |
+
ff_interm_dropout=encoder_ff_interm_dropout,
|
350 |
+
dropout=encoder_dropout,
|
351 |
+
layer_norm_first=encoder_layer_norm_first,
|
352 |
+
layer_drop=encoder_layer_drop,
|
353 |
+
prune_attention_heads=encoder_prune_attention_heads,
|
354 |
+
prune_attention_layer=encoder_prune_attention_layer,
|
355 |
+
prune_feed_forward_intermediate=encoder_prune_feed_forward_intermediate,
|
356 |
+
prune_feed_forward_layer=encoder_prune_feed_forward_layer,
|
357 |
+
)
|
358 |
+
aux = None
|
359 |
+
if aux_num_out is not None:
|
360 |
+
aux = torch.nn.Linear(in_features=encoder_embed_dim, out_features=aux_num_out)
|
361 |
+
return Wav2Vec2Model(normalize_waveform, feature_extractor, encoder, aux)
|
362 |
+
|
363 |
+
|
364 |
+
def wav2vec2_base(
|
365 |
+
encoder_projection_dropout: float = 0.1,
|
366 |
+
encoder_attention_dropout: float = 0.1,
|
367 |
+
encoder_ff_interm_dropout: float = 0.1,
|
368 |
+
encoder_dropout: float = 0.1,
|
369 |
+
encoder_layer_drop: float = 0.1,
|
370 |
+
aux_num_out: Optional[int] = None,
|
371 |
+
extractor_prune_conv_channels: bool = False,
|
372 |
+
encoder_prune_attention_heads: bool = False,
|
373 |
+
encoder_prune_attention_layer: bool = False,
|
374 |
+
encoder_prune_feed_forward_intermediate: bool = False,
|
375 |
+
encoder_prune_feed_forward_layer: bool = False,
|
376 |
+
) -> Wav2Vec2Model:
|
377 |
+
"""Builds "base" :class:`~torchaudio.models.Wav2Vec2Model` from *wav2vec 2.0* :cite:`baevski2020wav2vec`
|
378 |
+
|
379 |
+
Args:
|
380 |
+
encoder_projection_dropout (float):
|
381 |
+
See :py:func:`wav2vec2_model`.
|
382 |
+
encoder_attention_dropout (float):
|
383 |
+
See :py:func:`wav2vec2_model`.
|
384 |
+
encoder_ff_interm_dropout (float):
|
385 |
+
See :py:func:`wav2vec2_model`.
|
386 |
+
encoder_dropout (float):
|
387 |
+
See :py:func:`wav2vec2_model`.
|
388 |
+
encoder_layer_drop (float):
|
389 |
+
See :py:func:`wav2vec2_model`.
|
390 |
+
aux_num_out (int or None, optional):
|
391 |
+
See :py:func:`wav2vec2_model`.
|
392 |
+
|
393 |
+
Returns:
|
394 |
+
Wav2Vec2Model:
|
395 |
+
The resulting model.
|
396 |
+
""" # noqa: E501
|
397 |
+
return wav2vec2_model(
|
398 |
+
extractor_mode="group_norm",
|
399 |
+
extractor_conv_layer_config=None,
|
400 |
+
extractor_conv_bias=False,
|
401 |
+
encoder_embed_dim=768,
|
402 |
+
encoder_projection_dropout=encoder_projection_dropout,
|
403 |
+
encoder_pos_conv_kernel=128,
|
404 |
+
encoder_pos_conv_groups=16,
|
405 |
+
encoder_num_layers=12,
|
406 |
+
encoder_num_heads=12,
|
407 |
+
encoder_attention_dropout=encoder_attention_dropout,
|
408 |
+
encoder_ff_interm_features=3072,
|
409 |
+
encoder_ff_interm_dropout=encoder_ff_interm_dropout,
|
410 |
+
encoder_dropout=encoder_dropout,
|
411 |
+
encoder_layer_norm_first=False,
|
412 |
+
encoder_layer_drop=encoder_layer_drop,
|
413 |
+
aux_num_out=aux_num_out,
|
414 |
+
extractor_prune_conv_channels=extractor_prune_conv_channels,
|
415 |
+
encoder_prune_attention_heads=encoder_prune_attention_heads,
|
416 |
+
encoder_prune_attention_layer=encoder_prune_attention_layer,
|
417 |
+
encoder_prune_feed_forward_intermediate=encoder_prune_feed_forward_intermediate,
|
418 |
+
encoder_prune_feed_forward_layer=encoder_prune_feed_forward_layer,
|
419 |
+
)
|
420 |
+
|
421 |
+
|
422 |
+
def wav2vec2_large(
|
423 |
+
encoder_projection_dropout: float = 0.1,
|
424 |
+
encoder_attention_dropout: float = 0.1,
|
425 |
+
encoder_ff_interm_dropout: float = 0.1,
|
426 |
+
encoder_dropout: float = 0.1,
|
427 |
+
encoder_layer_drop: float = 0.1,
|
428 |
+
aux_num_out: Optional[int] = None,
|
429 |
+
extractor_prune_conv_channels: bool = False,
|
430 |
+
encoder_prune_attention_heads: bool = False,
|
431 |
+
encoder_prune_attention_layer: bool = False,
|
432 |
+
encoder_prune_feed_forward_intermediate: bool = False,
|
433 |
+
encoder_prune_feed_forward_layer: bool = False,
|
434 |
+
) -> Wav2Vec2Model:
|
435 |
+
"""Builds "large" :class:`~torchaudio.models.Wav2Vec2Model` from *wav2vec 2.0* :cite:`baevski2020wav2vec`
|
436 |
+
|
437 |
+
Args:
|
438 |
+
encoder_projection_dropout (float):
|
439 |
+
See :py:func:`wav2vec2_model`.
|
440 |
+
encoder_attention_dropout (float):
|
441 |
+
See :py:func:`wav2vec2_model`.
|
442 |
+
encoder_ff_interm_dropout (float):
|
443 |
+
See :py:func:`wav2vec2_model`.
|
444 |
+
encoder_dropout (float):
|
445 |
+
See :py:func:`wav2vec2_model`.
|
446 |
+
encoder_layer_drop (float):
|
447 |
+
See :py:func:`wav2vec2_model`.
|
448 |
+
aux_num_out (int or None, optional):
|
449 |
+
See :py:func:`wav2vec2_model`.
|
450 |
+
|
451 |
+
Returns:
|
452 |
+
Wav2Vec2Model:
|
453 |
+
The resulting model.
|
454 |
+
""" # noqa: E501
|
455 |
+
return wav2vec2_model(
|
456 |
+
extractor_mode="group_norm",
|
457 |
+
extractor_conv_layer_config=None,
|
458 |
+
extractor_conv_bias=False,
|
459 |
+
encoder_embed_dim=1024,
|
460 |
+
encoder_projection_dropout=encoder_projection_dropout,
|
461 |
+
encoder_pos_conv_kernel=128,
|
462 |
+
encoder_pos_conv_groups=16,
|
463 |
+
encoder_num_layers=24,
|
464 |
+
encoder_num_heads=16,
|
465 |
+
encoder_attention_dropout=encoder_attention_dropout,
|
466 |
+
encoder_ff_interm_features=4096,
|
467 |
+
encoder_ff_interm_dropout=encoder_ff_interm_dropout,
|
468 |
+
encoder_dropout=encoder_dropout,
|
469 |
+
encoder_layer_norm_first=False,
|
470 |
+
encoder_layer_drop=encoder_layer_drop,
|
471 |
+
aux_num_out=aux_num_out,
|
472 |
+
extractor_prune_conv_channels=extractor_prune_conv_channels,
|
473 |
+
encoder_prune_attention_heads=encoder_prune_attention_heads,
|
474 |
+
encoder_prune_attention_layer=encoder_prune_attention_layer,
|
475 |
+
encoder_prune_feed_forward_intermediate=encoder_prune_feed_forward_intermediate,
|
476 |
+
encoder_prune_feed_forward_layer=encoder_prune_feed_forward_layer,
|
477 |
+
)
|
478 |
+
|
479 |
+
|
480 |
+
def wav2vec2_large_lv60k(
|
481 |
+
encoder_projection_dropout: float = 0.1,
|
482 |
+
encoder_attention_dropout: float = 0.0,
|
483 |
+
encoder_ff_interm_dropout: float = 0.1,
|
484 |
+
encoder_dropout: float = 0.0,
|
485 |
+
encoder_layer_drop: float = 0.1,
|
486 |
+
aux_num_out: Optional[int] = None,
|
487 |
+
extractor_prune_conv_channels: bool = False,
|
488 |
+
encoder_prune_attention_heads: bool = False,
|
489 |
+
encoder_prune_attention_layer: bool = False,
|
490 |
+
encoder_prune_feed_forward_intermediate: bool = False,
|
491 |
+
encoder_prune_feed_forward_layer: bool = False,
|
492 |
+
) -> Wav2Vec2Model:
|
493 |
+
"""Builds "large lv-60k" :class:`~torchaudio.models.Wav2Vec2Model` from *wav2vec 2.0* :cite:`baevski2020wav2vec`
|
494 |
+
|
495 |
+
Args:
|
496 |
+
encoder_projection_dropout (float):
|
497 |
+
See :py:func:`wav2vec2_model`.
|
498 |
+
encoder_attention_dropout (float):
|
499 |
+
See :py:func:`wav2vec2_model`.
|
500 |
+
encoder_ff_interm_dropout (float):
|
501 |
+
See :py:func:`wav2vec2_model`.
|
502 |
+
encoder_dropout (float):
|
503 |
+
See :py:func:`wav2vec2_model`.
|
504 |
+
encoder_layer_drop (float):
|
505 |
+
See :py:func:`wav2vec2_model`.
|
506 |
+
aux_num_out (int or None, optional):
|
507 |
+
See :py:func:`wav2vec2_model`.
|
508 |
+
|
509 |
+
Returns:
|
510 |
+
Wav2Vec2Model:
|
511 |
+
The resulting model.
|
512 |
+
""" # noqa: E501
|
513 |
+
return wav2vec2_model(
|
514 |
+
extractor_mode="layer_norm",
|
515 |
+
extractor_conv_layer_config=None,
|
516 |
+
extractor_conv_bias=True,
|
517 |
+
encoder_embed_dim=1024,
|
518 |
+
encoder_projection_dropout=encoder_projection_dropout,
|
519 |
+
encoder_pos_conv_kernel=128,
|
520 |
+
encoder_pos_conv_groups=16,
|
521 |
+
encoder_num_layers=24,
|
522 |
+
encoder_num_heads=16,
|
523 |
+
encoder_attention_dropout=encoder_attention_dropout,
|
524 |
+
encoder_ff_interm_features=4096,
|
525 |
+
encoder_ff_interm_dropout=encoder_ff_interm_dropout,
|
526 |
+
encoder_dropout=encoder_dropout,
|
527 |
+
encoder_layer_norm_first=True,
|
528 |
+
encoder_layer_drop=encoder_layer_drop,
|
529 |
+
aux_num_out=aux_num_out,
|
530 |
+
extractor_prune_conv_channels=extractor_prune_conv_channels,
|
531 |
+
encoder_prune_attention_heads=encoder_prune_attention_heads,
|
532 |
+
encoder_prune_attention_layer=encoder_prune_attention_layer,
|
533 |
+
encoder_prune_feed_forward_intermediate=encoder_prune_feed_forward_intermediate,
|
534 |
+
encoder_prune_feed_forward_layer=encoder_prune_feed_forward_layer,
|
535 |
+
)
|
536 |
+
|
537 |
+
|
538 |
+
def hubert_base(
|
539 |
+
encoder_projection_dropout: float = 0.1,
|
540 |
+
encoder_attention_dropout: float = 0.1,
|
541 |
+
encoder_ff_interm_dropout: float = 0.0,
|
542 |
+
encoder_dropout: float = 0.1,
|
543 |
+
encoder_layer_drop: float = 0.05,
|
544 |
+
aux_num_out: Optional[int] = None,
|
545 |
+
extractor_prune_conv_channels: bool = False,
|
546 |
+
encoder_prune_attention_heads: bool = False,
|
547 |
+
encoder_prune_attention_layer: bool = False,
|
548 |
+
encoder_prune_feed_forward_intermediate: bool = False,
|
549 |
+
encoder_prune_feed_forward_layer: bool = False,
|
550 |
+
) -> Wav2Vec2Model:
|
551 |
+
"""Builds "base" :class:`HuBERT <torchaudio.models.Wav2Vec2Model>` from *HuBERT* :cite:`hsu2021hubert`
|
552 |
+
|
553 |
+
Args:
|
554 |
+
encoder_projection_dropout (float):
|
555 |
+
See :py:func:`wav2vec2_model`.
|
556 |
+
encoder_attention_dropout (float):
|
557 |
+
See :py:func:`wav2vec2_model`.
|
558 |
+
encoder_ff_interm_dropout (float):
|
559 |
+
See :py:func:`wav2vec2_model`.
|
560 |
+
encoder_dropout (float):
|
561 |
+
See :py:func:`wav2vec2_model`.
|
562 |
+
encoder_layer_drop (float):
|
563 |
+
See :py:func:`wav2vec2_model`.
|
564 |
+
aux_num_out (int or None, optional):
|
565 |
+
See :py:func:`wav2vec2_model`.
|
566 |
+
|
567 |
+
Returns:
|
568 |
+
Wav2Vec2Model:
|
569 |
+
The resulting model.
|
570 |
+
""" # noqa: E501
|
571 |
+
return wav2vec2_model(
|
572 |
+
extractor_mode="group_norm",
|
573 |
+
extractor_conv_layer_config=None,
|
574 |
+
extractor_conv_bias=False,
|
575 |
+
encoder_embed_dim=768,
|
576 |
+
encoder_projection_dropout=encoder_projection_dropout,
|
577 |
+
encoder_pos_conv_kernel=128,
|
578 |
+
encoder_pos_conv_groups=16,
|
579 |
+
encoder_num_layers=12,
|
580 |
+
encoder_use_attention=[True] * 12,
|
581 |
+
encoder_use_feed_forward=[True] * 12,
|
582 |
+
encoder_num_heads=[12] * 12,
|
583 |
+
encoder_head_dim=64,
|
584 |
+
encoder_attention_dropout=encoder_attention_dropout,
|
585 |
+
encoder_ff_interm_features=[3072] * 12,
|
586 |
+
encoder_ff_interm_dropout=encoder_ff_interm_dropout,
|
587 |
+
encoder_dropout=encoder_dropout,
|
588 |
+
encoder_layer_norm_first=False,
|
589 |
+
encoder_layer_drop=encoder_layer_drop,
|
590 |
+
aux_num_out=aux_num_out,
|
591 |
+
extractor_prune_conv_channels=extractor_prune_conv_channels,
|
592 |
+
encoder_prune_attention_heads=encoder_prune_attention_heads,
|
593 |
+
encoder_prune_attention_layer=encoder_prune_attention_layer,
|
594 |
+
encoder_prune_feed_forward_intermediate=encoder_prune_feed_forward_intermediate,
|
595 |
+
encoder_prune_feed_forward_layer=encoder_prune_feed_forward_layer,
|
596 |
+
)
|
597 |
+
|
598 |
+
|
599 |
+
def hubert_large(
|
600 |
+
encoder_projection_dropout: float = 0.0,
|
601 |
+
encoder_attention_dropout: float = 0.0,
|
602 |
+
encoder_ff_interm_dropout: float = 0.0,
|
603 |
+
encoder_dropout: float = 0.0,
|
604 |
+
encoder_layer_drop: float = 0.0,
|
605 |
+
aux_num_out: Optional[int] = None,
|
606 |
+
extractor_prune_conv_channels: bool = False,
|
607 |
+
encoder_prune_attention_heads: bool = False,
|
608 |
+
encoder_prune_attention_layer: bool = False,
|
609 |
+
encoder_prune_feed_forward_intermediate: bool = False,
|
610 |
+
encoder_prune_feed_forward_layer: bool = False,
|
611 |
+
) -> Wav2Vec2Model:
|
612 |
+
"""Builds "large" :class:`HuBERT <torchaudio.models.Wav2Vec2Model>` from *HuBERT* :cite:`hsu2021hubert`
|
613 |
+
|
614 |
+
Args:
|
615 |
+
encoder_projection_dropout (float):
|
616 |
+
See :py:func:`wav2vec2_model`.
|
617 |
+
encoder_attention_dropout (float):
|
618 |
+
See :py:func:`wav2vec2_model`.
|
619 |
+
encoder_ff_interm_dropout (float):
|
620 |
+
See :py:func:`wav2vec2_model`.
|
621 |
+
encoder_dropout (float):
|
622 |
+
See :py:func:`wav2vec2_model`.
|
623 |
+
encoder_layer_drop (float):
|
624 |
+
See :py:func:`wav2vec2_model`.
|
625 |
+
aux_num_out (int or None, optional):
|
626 |
+
See :py:func:`wav2vec2_model`.
|
627 |
+
|
628 |
+
Returns:
|
629 |
+
Wav2Vec2Model:
|
630 |
+
The resulting model.
|
631 |
+
""" # noqa: E501
|
632 |
+
return wav2vec2_model(
|
633 |
+
extractor_mode="layer_norm",
|
634 |
+
extractor_conv_layer_config=None,
|
635 |
+
extractor_conv_bias=False,
|
636 |
+
encoder_embed_dim=1024,
|
637 |
+
encoder_projection_dropout=encoder_projection_dropout,
|
638 |
+
encoder_pos_conv_kernel=128,
|
639 |
+
encoder_pos_conv_groups=16,
|
640 |
+
encoder_num_layers=24,
|
641 |
+
encoder_num_heads=16,
|
642 |
+
encoder_attention_dropout=encoder_attention_dropout,
|
643 |
+
encoder_ff_interm_features=4096,
|
644 |
+
encoder_ff_interm_dropout=encoder_ff_interm_dropout,
|
645 |
+
encoder_dropout=encoder_dropout,
|
646 |
+
encoder_layer_norm_first=True,
|
647 |
+
encoder_layer_drop=encoder_layer_drop,
|
648 |
+
aux_num_out=aux_num_out,
|
649 |
+
extractor_prune_conv_channels=extractor_prune_conv_channels,
|
650 |
+
encoder_prune_attention_heads=encoder_prune_attention_heads,
|
651 |
+
encoder_prune_attention_layer=encoder_prune_attention_layer,
|
652 |
+
encoder_prune_feed_forward_intermediate=encoder_prune_feed_forward_intermediate,
|
653 |
+
encoder_prune_feed_forward_layer=encoder_prune_feed_forward_layer,
|
654 |
+
)
|
655 |
+
|
656 |
+
|
657 |
+
def hubert_xlarge(
|
658 |
+
encoder_projection_dropout: float = 0.0,
|
659 |
+
encoder_attention_dropout: float = 0.0,
|
660 |
+
encoder_ff_interm_dropout: float = 0.0,
|
661 |
+
encoder_dropout: float = 0.0,
|
662 |
+
encoder_layer_drop: float = 0.0,
|
663 |
+
aux_num_out: Optional[int] = None,
|
664 |
+
extractor_prune_conv_channels: bool = False,
|
665 |
+
encoder_prune_attention_heads: bool = False,
|
666 |
+
encoder_prune_attention_layer: bool = False,
|
667 |
+
encoder_prune_feed_forward_intermediate: bool = False,
|
668 |
+
encoder_prune_feed_forward_layer: bool = False,
|
669 |
+
) -> Wav2Vec2Model:
|
670 |
+
"""Builds "extra large" :class:`HuBERT <torchaudio.models.Wav2Vec2Model>` from *HuBERT* :cite:`hsu2021hubert`
|
671 |
+
|
672 |
+
Args:
|
673 |
+
encoder_projection_dropout (float):
|
674 |
+
See :py:func:`wav2vec2_model`.
|
675 |
+
encoder_attention_dropout (float):
|
676 |
+
See :py:func:`wav2vec2_model`.
|
677 |
+
encoder_ff_interm_dropout (float):
|
678 |
+
See :py:func:`wav2vec2_model`.
|
679 |
+
encoder_dropout (float):
|
680 |
+
See :py:func:`wav2vec2_model`.
|
681 |
+
encoder_layer_drop (float):
|
682 |
+
See :py:func:`wav2vec2_model`.
|
683 |
+
aux_num_out (int or None, optional):
|
684 |
+
See :py:func:`wav2vec2_model`.
|
685 |
+
|
686 |
+
Returns:
|
687 |
+
Wav2Vec2Model:
|
688 |
+
The resulting model.
|
689 |
+
""" # noqa: E501
|
690 |
+
return wav2vec2_model(
|
691 |
+
extractor_mode="layer_norm",
|
692 |
+
extractor_conv_layer_config=None,
|
693 |
+
extractor_conv_bias=False,
|
694 |
+
encoder_embed_dim=1280,
|
695 |
+
encoder_projection_dropout=encoder_projection_dropout,
|
696 |
+
encoder_pos_conv_kernel=128,
|
697 |
+
encoder_pos_conv_groups=16,
|
698 |
+
encoder_num_layers=48,
|
699 |
+
encoder_num_heads=16,
|
700 |
+
encoder_attention_dropout=encoder_attention_dropout,
|
701 |
+
encoder_ff_interm_features=5120,
|
702 |
+
encoder_ff_interm_dropout=encoder_ff_interm_dropout,
|
703 |
+
encoder_dropout=encoder_dropout,
|
704 |
+
encoder_layer_norm_first=True,
|
705 |
+
encoder_layer_drop=encoder_layer_drop,
|
706 |
+
aux_num_out=aux_num_out,
|
707 |
+
extractor_prune_conv_channels=extractor_prune_conv_channels,
|
708 |
+
encoder_prune_attention_heads=encoder_prune_attention_heads,
|
709 |
+
encoder_prune_attention_layer=encoder_prune_attention_layer,
|
710 |
+
encoder_prune_feed_forward_intermediate=encoder_prune_feed_forward_intermediate,
|
711 |
+
encoder_prune_feed_forward_layer=encoder_prune_feed_forward_layer,
|
712 |
+
)
|
713 |
+
|
714 |
+
|
715 |
+
def _init_hubert_pretrain_model(module):
|
716 |
+
if isinstance(module, components.LayerNorm):
|
717 |
+
torch.nn.init.kaiming_normal_(module.conv.weight)
|
718 |
+
elif isinstance(module, components.ConvolutionalPositionalEmbedding):
|
719 |
+
# normalize the weight to normal distribution.
|
720 |
+
std = math.sqrt(4.0 / (module.embed_dim * module.kernel_size))
|
721 |
+
torch.nn.init.normal_(module.conv.weight, mean=0.0, std=std)
|
722 |
+
torch.nn.init.constant_(module.conv.bias, 0.0)
|
723 |
+
elif isinstance(module, components.SelfAttention):
|
724 |
+
# normalize the query, key, value, and out_proj parameters in self attention module.
|
725 |
+
torch.nn.init.xavier_uniform_(module.k_proj.weight, gain=1 / math.sqrt(2))
|
726 |
+
torch.nn.init.xavier_uniform_(module.v_proj.weight, gain=1 / math.sqrt(2))
|
727 |
+
torch.nn.init.xavier_uniform_(module.q_proj.weight, gain=1 / math.sqrt(2))
|
728 |
+
torch.nn.init.xavier_uniform_(module.out_proj.weight)
|
729 |
+
torch.nn.init.constant_(module.out_proj.bias, 0.0)
|
730 |
+
elif isinstance(module, components.Transformer):
|
731 |
+
module.apply(components._init_transformer_params)
|
732 |
+
else:
|
733 |
+
pass
|
734 |
+
|
735 |
+
|
736 |
+
def wavlm_model(
|
737 |
+
extractor_mode: str,
|
738 |
+
extractor_conv_layer_config: Optional[List[Tuple[int, int, int]]],
|
739 |
+
extractor_conv_bias: bool,
|
740 |
+
encoder_embed_dim: int,
|
741 |
+
encoder_projection_dropout: float,
|
742 |
+
encoder_pos_conv_kernel: int,
|
743 |
+
encoder_pos_conv_groups: int,
|
744 |
+
encoder_num_layers: int,
|
745 |
+
encoder_use_attention: List[bool],
|
746 |
+
encoder_use_feed_forward: List[bool],
|
747 |
+
encoder_total_num_heads: List[int],
|
748 |
+
encoder_remaining_heads: List[List[int]],
|
749 |
+
encoder_num_buckets: int,
|
750 |
+
encoder_max_distance: int,
|
751 |
+
encoder_attention_dropout: float,
|
752 |
+
encoder_ff_interm_features: List[int],
|
753 |
+
encoder_ff_interm_dropout: float,
|
754 |
+
encoder_dropout: float,
|
755 |
+
encoder_layer_norm_first: bool,
|
756 |
+
encoder_layer_drop: float,
|
757 |
+
aux_num_out: Optional[int],
|
758 |
+
normalize_waveform: bool,
|
759 |
+
extractor_prune_conv_channels: bool = False,
|
760 |
+
encoder_prune_attention_heads: bool = False,
|
761 |
+
encoder_prune_attention_layer: bool = False,
|
762 |
+
encoder_prune_feed_forward_intermediate: bool = False,
|
763 |
+
encoder_prune_feed_forward_layer: bool = False,
|
764 |
+
) -> Wav2Vec2Model:
|
765 |
+
"""Builds custom WaveLM model :cite:`chen2022wavlm`. The architecture is compatible
|
766 |
+
with Wav2Vec2 model :cite:`baevski2020wav2vec`, and so the output object is
|
767 |
+
:class:`~torchaudio.models.Wav2Vec2Model`. Most of the arguments have the same meaning
|
768 |
+
as in :py:func:`wav2vec2_model` so please refer there for documentation.
|
769 |
+
|
770 |
+
Args:
|
771 |
+
extractor_mode (str): Operation mode of feature extractor.
|
772 |
+
See :py:func:`wav2vec2_model`.
|
773 |
+
|
774 |
+
extractor_conv_layer_config (list of integer tuples or None):
|
775 |
+
See :py:func:`wav2vec2_model`.
|
776 |
+
|
777 |
+
extractor_conv_bias (bool):
|
778 |
+
See :py:func:`wav2vec2_model`.
|
779 |
+
|
780 |
+
encoder_embed_dim (int):
|
781 |
+
See :py:func:`wav2vec2_model`.
|
782 |
+
|
783 |
+
encoder_projection_dropout (float):
|
784 |
+
See :py:func:`wav2vec2_model`.
|
785 |
+
|
786 |
+
encoder_pos_conv_kernel (int):
|
787 |
+
See :py:func:`wav2vec2_model`.
|
788 |
+
|
789 |
+
encoder_pos_conv_groups (int):
|
790 |
+
See :py:func:`wav2vec2_model`.
|
791 |
+
|
792 |
+
encoder_num_layers (int):
|
793 |
+
See :py:func:`wav2vec2_model`.
|
794 |
+
|
795 |
+
encoder_num_heads (int):
|
796 |
+
See :py:func:`wav2vec2_model`.
|
797 |
+
|
798 |
+
encoder_num_buckets (int):
|
799 |
+
Number of buckets for relative position embedding.
|
800 |
+
encoder_max_distance (int):
|
801 |
+
Maximum distance for relative position embedding.
|
802 |
+
|
803 |
+
encoder_attention_dropout (float):
|
804 |
+
See :py:func:`wav2vec2_model`.
|
805 |
+
|
806 |
+
encoder_ff_interm_features (int):
|
807 |
+
See :py:func:`wav2vec2_model`.
|
808 |
+
|
809 |
+
encoder_ff_interm_dropout (float):
|
810 |
+
See :py:func:`wav2vec2_model`.
|
811 |
+
|
812 |
+
encoder_dropout (float):
|
813 |
+
See :py:func:`wav2vec2_model`.
|
814 |
+
|
815 |
+
encoder_layer_norm_first (bool):
|
816 |
+
See :py:func:`wav2vec2_model`.
|
817 |
+
|
818 |
+
encoder_layer_drop (float):
|
819 |
+
See :py:func:`wav2vec2_model`.
|
820 |
+
|
821 |
+
aux_num_out (int or None):
|
822 |
+
See :py:func:`wav2vec2_model`.
|
823 |
+
|
824 |
+
Returns:
|
825 |
+
Wav2Vec2Model:
|
826 |
+
The resulting model.
|
827 |
+
"""
|
828 |
+
if extractor_conv_layer_config is None:
|
829 |
+
extractor_conv_layer_config = [(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512, 2, 2)] * 2
|
830 |
+
|
831 |
+
feature_extractor = components._get_feature_extractor(
|
832 |
+
extractor_mode, extractor_conv_layer_config, extractor_conv_bias,
|
833 |
+
prune_conv_channels=extractor_prune_conv_channels,
|
834 |
+
)
|
835 |
+
encoder = components._get_wavlm_encoder(
|
836 |
+
in_features=extractor_conv_layer_config[-1][0],
|
837 |
+
embed_dim=encoder_embed_dim,
|
838 |
+
dropout_input=encoder_projection_dropout,
|
839 |
+
pos_conv_kernel=encoder_pos_conv_kernel,
|
840 |
+
pos_conv_groups=encoder_pos_conv_groups,
|
841 |
+
num_layers=encoder_num_layers,
|
842 |
+
use_attention=encoder_use_attention,
|
843 |
+
use_feed_forward=encoder_use_feed_forward,
|
844 |
+
total_num_heads=encoder_total_num_heads,
|
845 |
+
remaining_heads=encoder_remaining_heads,
|
846 |
+
num_buckets=encoder_num_buckets,
|
847 |
+
max_distance=encoder_max_distance,
|
848 |
+
attention_dropout=encoder_attention_dropout,
|
849 |
+
ff_interm_features=encoder_ff_interm_features,
|
850 |
+
ff_interm_dropout=encoder_ff_interm_dropout,
|
851 |
+
dropout=encoder_dropout,
|
852 |
+
layer_norm_first=encoder_layer_norm_first,
|
853 |
+
layer_drop=encoder_layer_drop,
|
854 |
+
prune_attention_heads=encoder_prune_attention_heads,
|
855 |
+
prune_attention_layer=encoder_prune_attention_layer,
|
856 |
+
prune_feed_forward_intermediate=encoder_prune_feed_forward_intermediate,
|
857 |
+
prune_feed_forward_layer=encoder_prune_feed_forward_layer,
|
858 |
+
)
|
859 |
+
aux = None
|
860 |
+
if aux_num_out is not None:
|
861 |
+
aux = torch.nn.Linear(in_features=encoder_embed_dim, out_features=aux_num_out)
|
862 |
+
return Wav2Vec2Model(normalize_waveform, feature_extractor, encoder, aux)
|
863 |
+
|
864 |
+
|
865 |
+
def wavlm_base(
|
866 |
+
encoder_projection_dropout: float = 0.1,
|
867 |
+
encoder_attention_dropout: float = 0.1,
|
868 |
+
encoder_ff_interm_dropout: float = 0.1,
|
869 |
+
encoder_dropout: float = 0.1,
|
870 |
+
encoder_layer_drop: float = 0.1,
|
871 |
+
aux_num_out: Optional[int] = None,
|
872 |
+
) -> Wav2Vec2Model:
|
873 |
+
"""Builds "base" WaveLM model :cite:`chen2022wavlm`. The architecture is compatible
|
874 |
+
with Wav2Vec2 model :cite:`baevski2020wav2vec`, and so the output class is
|
875 |
+
:class:`~torchaudio.models.Wav2Vec2Model`.
|
876 |
+
|
877 |
+
Args:
|
878 |
+
encoder_projection_dropout (float):
|
879 |
+
See :py:func:`wav2vec2_model`.
|
880 |
+
encoder_attention_dropout (float):
|
881 |
+
See :py:func:`wav2vec2_model`.
|
882 |
+
encoder_ff_interm_dropout (float):
|
883 |
+
See :py:func:`wav2vec2_model`.
|
884 |
+
encoder_dropout (float):
|
885 |
+
See :py:func:`wav2vec2_model`.
|
886 |
+
encoder_layer_drop (float):
|
887 |
+
See :py:func:`wav2vec2_model`.
|
888 |
+
aux_num_out (int, optional):
|
889 |
+
See :py:func:`wav2vec2_model`.
|
890 |
+
|
891 |
+
Returns:
|
892 |
+
Wav2Vec2Model:
|
893 |
+
The resulting model.
|
894 |
+
"""
|
895 |
+
return wavlm_model(
|
896 |
+
extractor_mode="group_norm",
|
897 |
+
extractor_conv_layer_config=None,
|
898 |
+
extractor_conv_bias=False,
|
899 |
+
encoder_embed_dim=768,
|
900 |
+
encoder_projection_dropout=encoder_projection_dropout,
|
901 |
+
encoder_pos_conv_kernel=128,
|
902 |
+
encoder_pos_conv_groups=16,
|
903 |
+
encoder_num_layers=12,
|
904 |
+
encoder_num_heads=12,
|
905 |
+
encoder_num_buckets=320,
|
906 |
+
encoder_max_distance=800,
|
907 |
+
encoder_attention_dropout=encoder_attention_dropout,
|
908 |
+
encoder_ff_interm_features=3072,
|
909 |
+
encoder_ff_interm_dropout=encoder_ff_interm_dropout,
|
910 |
+
encoder_dropout=encoder_dropout,
|
911 |
+
encoder_layer_norm_first=False,
|
912 |
+
encoder_layer_drop=encoder_layer_drop,
|
913 |
+
aux_num_out=aux_num_out,
|
914 |
+
)
|
915 |
+
|
916 |
+
|
917 |
+
def wavlm_large(
|
918 |
+
encoder_projection_dropout: float = 0.1,
|
919 |
+
encoder_attention_dropout: float = 0.1,
|
920 |
+
encoder_ff_interm_dropout: float = 0.0,
|
921 |
+
encoder_dropout: float = 0.1,
|
922 |
+
encoder_layer_drop: float = 0.1,
|
923 |
+
aux_num_out: Optional[int] = None,
|
924 |
+
) -> Wav2Vec2Model:
|
925 |
+
"""Builds "large" WaveLM model :cite:`chen2022wavlm`. The architecture is compatible
|
926 |
+
with Wav2Vec2 model :cite:`baevski2020wav2vec`, and so the output class is
|
927 |
+
:class:`~torchaudio.models.Wav2Vec2Model`.
|
928 |
+
|
929 |
+
Args:
|
930 |
+
encoder_projection_dropout (float):
|
931 |
+
See :py:func:`wav2vec2_model`.
|
932 |
+
encoder_attention_dropout (float):
|
933 |
+
See :py:func:`wav2vec2_model`.
|
934 |
+
encoder_ff_interm_dropout (float):
|
935 |
+
See :py:func:`wav2vec2_model`.
|
936 |
+
encoder_dropout (float):
|
937 |
+
See :py:func:`wav2vec2_model`.
|
938 |
+
encoder_layer_drop (float):
|
939 |
+
See :py:func:`wav2vec2_model`.
|
940 |
+
aux_num_out (int, optional):
|
941 |
+
See :py:func:`wav2vec2_model`.
|
942 |
+
|
943 |
+
Returns:
|
944 |
+
Wav2Vec2Model:
|
945 |
+
The resulting model.
|
946 |
+
"""
|
947 |
+
return wavlm_model(
|
948 |
+
extractor_mode="layer_norm",
|
949 |
+
extractor_conv_layer_config=None,
|
950 |
+
extractor_conv_bias=False,
|
951 |
+
encoder_embed_dim=1024,
|
952 |
+
encoder_projection_dropout=encoder_projection_dropout,
|
953 |
+
encoder_pos_conv_kernel=128,
|
954 |
+
encoder_pos_conv_groups=16,
|
955 |
+
encoder_num_layers=24,
|
956 |
+
encoder_num_heads=16,
|
957 |
+
encoder_num_buckets=320,
|
958 |
+
encoder_max_distance=800,
|
959 |
+
encoder_attention_dropout=encoder_attention_dropout,
|
960 |
+
encoder_ff_interm_features=4096,
|
961 |
+
encoder_ff_interm_dropout=encoder_ff_interm_dropout,
|
962 |
+
encoder_dropout=encoder_dropout,
|
963 |
+
encoder_layer_norm_first=True,
|
964 |
+
encoder_layer_drop=encoder_layer_drop,
|
965 |
+
aux_num_out=aux_num_out,
|
966 |
+
)
|
vencoder/dphubert/pruning_utils.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Utility functions for pruning."""
|
2 |
+
|
3 |
+
from typing import Union
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import torch.nn as nn
|
7 |
+
|
8 |
+
|
9 |
+
def prune_linear_layer(layer: nn.Linear, index: torch.LongTensor, dim: str):
|
10 |
+
"Prune linear layer in place."
|
11 |
+
# NOTE: weight: (out_features, in_features), bias: (out_features,)
|
12 |
+
if dim == "input":
|
13 |
+
dim = 1
|
14 |
+
layer.in_features = len(index)
|
15 |
+
elif dim == "output":
|
16 |
+
dim = 0
|
17 |
+
layer.out_features = len(index)
|
18 |
+
else:
|
19 |
+
raise ValueError
|
20 |
+
|
21 |
+
layer.weight = nn.Parameter(layer.weight.index_select(dim, index).clone().detach())
|
22 |
+
if layer.bias is not None and dim == 0:
|
23 |
+
layer.bias = nn.Parameter(layer.bias.index_select(0, index).clone().detach())
|
24 |
+
|
25 |
+
|
26 |
+
def prune_conv1d_layer(layer: nn.Conv1d, index: torch.LongTensor, dim: str):
|
27 |
+
"""Prune conv1d in place."""
|
28 |
+
# NOTE: weight: (out_channels, in_channels, kernel_size), bias: (out_channels,)
|
29 |
+
if dim == "input":
|
30 |
+
dim = 1
|
31 |
+
layer.in_channels = len(index)
|
32 |
+
elif dim == "output":
|
33 |
+
dim = 0
|
34 |
+
layer.out_channels = len(index)
|
35 |
+
else:
|
36 |
+
raise ValueError
|
37 |
+
|
38 |
+
layer.weight = nn.Parameter(layer.weight.index_select(dim, index).clone().detach())
|
39 |
+
if layer.bias is not None and dim == 0:
|
40 |
+
layer.bias = nn.Parameter(layer.bias.index_select(0, index).clone().detach())
|
41 |
+
|
42 |
+
|
43 |
+
def prune_layer_norm(layernorm: Union[nn.LayerNorm, nn.GroupNorm], index: torch.LongTensor):
|
44 |
+
"""Prune layer norm or group norm in place."""
|
45 |
+
layernorm.weight = nn.Parameter(layernorm.weight.index_select(0, index).clone().detach())
|
46 |
+
layernorm.bias = nn.Parameter(layernorm.bias.index_select(0, index).clone().detach())
|
47 |
+
if isinstance(layernorm, nn.LayerNorm):
|
48 |
+
layernorm.normalized_shape = (len(index),)
|
49 |
+
elif isinstance(layernorm, nn.GroupNorm):
|
50 |
+
layernorm.num_groups = len(index)
|
51 |
+
layernorm.num_channels = len(index)
|
vencoder/dphubert/utils/__init__.py
ADDED
File without changes
|
vencoder/dphubert/utils/import_huggingface_wavlm.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Import Hugging Face transformers's wav2vec2.0 pretrained weights to torchaudios's format.
|
2 |
+
|
3 |
+
Originally from:
|
4 |
+
https://github.com/pytorch/audio/blob/main/torchaudio/models/wav2vec2/utils/import_huggingface.py
|
5 |
+
|
6 |
+
"""
|
7 |
+
|
8 |
+
import logging
|
9 |
+
from typing import Any, Dict
|
10 |
+
|
11 |
+
from torch.nn import Module
|
12 |
+
|
13 |
+
from ..model import wav2vec2_model, Wav2Vec2Model, wavlm_model
|
14 |
+
|
15 |
+
_LG = logging.getLogger(__name__)
|
16 |
+
|
17 |
+
|
18 |
+
def _get_config(cfg):
|
19 |
+
config = {
|
20 |
+
"extractor_mode": f"{cfg.feat_extract_norm}_norm",
|
21 |
+
"extractor_conv_layer_config": list(zip(cfg.conv_dim, cfg.conv_kernel, cfg.conv_stride)),
|
22 |
+
"extractor_conv_bias": cfg.conv_bias,
|
23 |
+
"encoder_embed_dim": cfg.hidden_size,
|
24 |
+
"encoder_projection_dropout": cfg.feat_proj_dropout,
|
25 |
+
"encoder_pos_conv_kernel": cfg.num_conv_pos_embeddings,
|
26 |
+
"encoder_pos_conv_groups": cfg.num_conv_pos_embedding_groups,
|
27 |
+
"encoder_num_layers": cfg.num_hidden_layers,
|
28 |
+
"encoder_num_heads": cfg.num_attention_heads,
|
29 |
+
"encoder_attention_dropout": cfg.attention_dropout,
|
30 |
+
"encoder_ff_interm_features": cfg.intermediate_size,
|
31 |
+
"encoder_ff_interm_dropout": cfg.activation_dropout,
|
32 |
+
"encoder_dropout": cfg.hidden_dropout,
|
33 |
+
"encoder_layer_norm_first": cfg.do_stable_layer_norm,
|
34 |
+
"encoder_layer_drop": cfg.layerdrop,
|
35 |
+
}
|
36 |
+
return config
|
37 |
+
|
38 |
+
|
39 |
+
def _get_config_wavlm(cfg):
|
40 |
+
config = {
|
41 |
+
"extractor_mode": f"{cfg.feat_extract_norm}_norm",
|
42 |
+
"extractor_conv_layer_config": list(zip(cfg.conv_dim, cfg.conv_kernel, cfg.conv_stride)),
|
43 |
+
"extractor_conv_bias": cfg.conv_bias,
|
44 |
+
"encoder_embed_dim": cfg.hidden_size,
|
45 |
+
"encoder_projection_dropout": cfg.feat_proj_dropout,
|
46 |
+
"encoder_pos_conv_kernel": cfg.num_conv_pos_embeddings,
|
47 |
+
"encoder_pos_conv_groups": cfg.num_conv_pos_embedding_groups,
|
48 |
+
"encoder_num_layers": cfg.num_hidden_layers,
|
49 |
+
"encoder_use_attention": [True] * cfg.num_hidden_layers,
|
50 |
+
"encoder_use_feed_forward": [True] * cfg.num_hidden_layers,
|
51 |
+
"encoder_total_num_heads": [cfg.num_attention_heads for _ in range(cfg.num_hidden_layers)],
|
52 |
+
"encoder_remaining_heads": [list(range(cfg.num_attention_heads)) for _ in range(cfg.num_hidden_layers)],
|
53 |
+
"encoder_num_buckets": cfg.num_buckets,
|
54 |
+
"encoder_max_distance": cfg.max_bucket_distance,
|
55 |
+
"encoder_attention_dropout": cfg.attention_dropout,
|
56 |
+
"encoder_ff_interm_features": [cfg.intermediate_size for _ in range(cfg.num_hidden_layers)],
|
57 |
+
"encoder_ff_interm_dropout": cfg.activation_dropout,
|
58 |
+
"encoder_dropout": cfg.hidden_dropout,
|
59 |
+
"encoder_layer_norm_first": cfg.do_stable_layer_norm,
|
60 |
+
"encoder_layer_drop": cfg.layerdrop,
|
61 |
+
"normalize_waveform": cfg.feat_extract_norm == "layer",
|
62 |
+
}
|
63 |
+
return config
|
64 |
+
|
65 |
+
|
66 |
+
def _build(config, original):
|
67 |
+
is_for_ctc = original.__class__.__name__ in ["Wav2Vec2ForCTC", "WavLMForCTC"]
|
68 |
+
if is_for_ctc:
|
69 |
+
aux_num_out = original.config.vocab_size
|
70 |
+
wav2vec2 = original.wav2vec2
|
71 |
+
else:
|
72 |
+
_LG.warning(
|
73 |
+
"The model is not an instance of Wav2Vec2ForCTC or WavLMForCTC. " '"lm_head" module is not imported.'
|
74 |
+
)
|
75 |
+
aux_num_out = None
|
76 |
+
wav2vec2 = original
|
77 |
+
is_wavlm = original.__class__.__name__ in ["WavLMModel", "WavLMForCTC"]
|
78 |
+
if is_wavlm:
|
79 |
+
imported = wavlm_model(**config, aux_num_out=aux_num_out)
|
80 |
+
else:
|
81 |
+
imported = wav2vec2_model(**config, aux_num_out=aux_num_out)
|
82 |
+
print(imported.feature_extractor.load_state_dict(wav2vec2.feature_extractor.state_dict(), strict=False))
|
83 |
+
print(imported.encoder.feature_projection.load_state_dict(wav2vec2.feature_projection.state_dict(), strict=False))
|
84 |
+
encoder_state_dict = wav2vec2.encoder.state_dict()
|
85 |
+
if is_wavlm: # Rename paramaters of linear transformations for compatibility with the HF model
|
86 |
+
transform_wavlm_encoder_state(encoder_state_dict, config["encoder_num_layers"])
|
87 |
+
print(imported.encoder.transformer.load_state_dict(encoder_state_dict, strict=False))
|
88 |
+
if is_for_ctc:
|
89 |
+
imported.aux.load_state_dict(original.lm_head.state_dict())
|
90 |
+
return imported
|
91 |
+
|
92 |
+
|
93 |
+
def transform_wavlm_encoder_state(state: Dict[str, Any], encoder_num_layers: int):
|
94 |
+
"""Converts WavLM encoder state from HuggingFace format. In particular, concatenates linear projection weights and
|
95 |
+
biases to align with the structure of ``torch.nn.MultiheadAttention``.
|
96 |
+
"""
|
97 |
+
pass
|
98 |
+
|
99 |
+
|
100 |
+
def import_huggingface_model(original: Module) -> Wav2Vec2Model:
|
101 |
+
"""Builds :class:`Wav2Vec2Model` from the corresponding model object of
|
102 |
+
`Transformers <https://huggingface.co/transformers/>`_.
|
103 |
+
|
104 |
+
Args:
|
105 |
+
original (torch.nn.Module): An instance of ``Wav2Vec2ForCTC`` from ``transformers``.
|
106 |
+
|
107 |
+
Returns:
|
108 |
+
Wav2Vec2Model: Imported model.
|
109 |
+
|
110 |
+
Example
|
111 |
+
>>> from torchaudio.models.wav2vec2.utils import import_huggingface_model
|
112 |
+
>>>
|
113 |
+
>>> original = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
|
114 |
+
>>> model = import_huggingface_model(original)
|
115 |
+
>>>
|
116 |
+
>>> waveforms, _ = torchaudio.load("audio.wav")
|
117 |
+
>>> logits, _ = model(waveforms)
|
118 |
+
"""
|
119 |
+
_LG.info("Importing model.")
|
120 |
+
_LG.info("Loading model configuration.")
|
121 |
+
is_wavlm = original.__class__.__name__ in ["WavLMModel", "WavLMForCTC"]
|
122 |
+
if is_wavlm:
|
123 |
+
config = _get_config_wavlm(original.config)
|
124 |
+
else:
|
125 |
+
config = _get_config(original.config)
|
126 |
+
_LG.debug(" - config: %s", config)
|
127 |
+
_LG.info("Building model.")
|
128 |
+
imported = _build(config, original)
|
129 |
+
return imported
|
vencoder/encoder.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
class SpeechEncoder(object):
|
2 |
+
def __init__(self,vec_path = "pretrain/checkpoint_best_legacy_500.pt",device=None):
|
3 |
+
self.model = None #This is Model
|
4 |
+
self.hidden_dim = 768
|
5 |
+
pass
|
6 |
+
|
7 |
+
def encoder(self,wav):
|
8 |
+
'''
|
9 |
+
input: wav:[batchsize,signal_length]
|
10 |
+
output: embedding:[batchsize,hidden_dim,wav_frame]
|
11 |
+
'''
|
12 |
+
pass
|
vencoder/hubert/__init__.py
ADDED
File without changes
|
vencoder/hubert/hubert_model.py
ADDED
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import random
|
3 |
+
from typing import Optional, Tuple
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import torch.nn as nn
|
7 |
+
import torch.nn.functional as t_func
|
8 |
+
from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
|
9 |
+
|
10 |
+
|
11 |
+
class Hubert(nn.Module):
|
12 |
+
def __init__(self, num_label_embeddings: int = 100, mask: bool = True):
|
13 |
+
super().__init__()
|
14 |
+
self._mask = mask
|
15 |
+
self.feature_extractor = FeatureExtractor()
|
16 |
+
self.feature_projection = FeatureProjection()
|
17 |
+
self.positional_embedding = PositionalConvEmbedding()
|
18 |
+
self.norm = nn.LayerNorm(768)
|
19 |
+
self.dropout = nn.Dropout(0.1)
|
20 |
+
self.encoder = TransformerEncoder(
|
21 |
+
nn.TransformerEncoderLayer(
|
22 |
+
768, 12, 3072, activation="gelu", batch_first=True
|
23 |
+
),
|
24 |
+
12,
|
25 |
+
)
|
26 |
+
self.proj = nn.Linear(768, 256)
|
27 |
+
|
28 |
+
self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_())
|
29 |
+
self.label_embedding = nn.Embedding(num_label_embeddings, 256)
|
30 |
+
|
31 |
+
def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
32 |
+
mask = None
|
33 |
+
if self.training and self._mask:
|
34 |
+
mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2)
|
35 |
+
x[mask] = self.masked_spec_embed.to(x.dtype)
|
36 |
+
return x, mask
|
37 |
+
|
38 |
+
def encode(
|
39 |
+
self, x: torch.Tensor, layer: Optional[int] = None
|
40 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
41 |
+
x = self.feature_extractor(x)
|
42 |
+
x = self.feature_projection(x.transpose(1, 2))
|
43 |
+
x, mask = self.mask(x)
|
44 |
+
x = x + self.positional_embedding(x)
|
45 |
+
x = self.dropout(self.norm(x))
|
46 |
+
x = self.encoder(x, output_layer=layer)
|
47 |
+
return x, mask
|
48 |
+
|
49 |
+
def logits(self, x: torch.Tensor) -> torch.Tensor:
|
50 |
+
logits = torch.cosine_similarity(
|
51 |
+
x.unsqueeze(2),
|
52 |
+
self.label_embedding.weight.unsqueeze(0).unsqueeze(0),
|
53 |
+
dim=-1,
|
54 |
+
)
|
55 |
+
return logits / 0.1
|
56 |
+
|
57 |
+
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
58 |
+
x, mask = self.encode(x)
|
59 |
+
x = self.proj(x)
|
60 |
+
logits = self.logits(x)
|
61 |
+
return logits, mask
|
62 |
+
|
63 |
+
|
64 |
+
class HubertSoft(Hubert):
|
65 |
+
def __init__(self):
|
66 |
+
super().__init__()
|
67 |
+
|
68 |
+
@torch.inference_mode()
|
69 |
+
def units(self, wav: torch.Tensor) -> torch.Tensor:
|
70 |
+
wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2))
|
71 |
+
x, _ = self.encode(wav)
|
72 |
+
return self.proj(x)
|
73 |
+
|
74 |
+
|
75 |
+
class FeatureExtractor(nn.Module):
|
76 |
+
def __init__(self):
|
77 |
+
super().__init__()
|
78 |
+
self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False)
|
79 |
+
self.norm0 = nn.GroupNorm(512, 512)
|
80 |
+
self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False)
|
81 |
+
self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False)
|
82 |
+
self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False)
|
83 |
+
self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False)
|
84 |
+
self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False)
|
85 |
+
self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False)
|
86 |
+
|
87 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
88 |
+
x = t_func.gelu(self.norm0(self.conv0(x)))
|
89 |
+
x = t_func.gelu(self.conv1(x))
|
90 |
+
x = t_func.gelu(self.conv2(x))
|
91 |
+
x = t_func.gelu(self.conv3(x))
|
92 |
+
x = t_func.gelu(self.conv4(x))
|
93 |
+
x = t_func.gelu(self.conv5(x))
|
94 |
+
x = t_func.gelu(self.conv6(x))
|
95 |
+
return x
|
96 |
+
|
97 |
+
|
98 |
+
class FeatureProjection(nn.Module):
|
99 |
+
def __init__(self):
|
100 |
+
super().__init__()
|
101 |
+
self.norm = nn.LayerNorm(512)
|
102 |
+
self.projection = nn.Linear(512, 768)
|
103 |
+
self.dropout = nn.Dropout(0.1)
|
104 |
+
|
105 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
106 |
+
x = self.norm(x)
|
107 |
+
x = self.projection(x)
|
108 |
+
x = self.dropout(x)
|
109 |
+
return x
|
110 |
+
|
111 |
+
|
112 |
+
class PositionalConvEmbedding(nn.Module):
|
113 |
+
def __init__(self):
|
114 |
+
super().__init__()
|
115 |
+
self.conv = nn.Conv1d(
|
116 |
+
768,
|
117 |
+
768,
|
118 |
+
kernel_size=128,
|
119 |
+
padding=128 // 2,
|
120 |
+
groups=16,
|
121 |
+
)
|
122 |
+
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
|
123 |
+
|
124 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
125 |
+
x = self.conv(x.transpose(1, 2))
|
126 |
+
x = t_func.gelu(x[:, :, :-1])
|
127 |
+
return x.transpose(1, 2)
|
128 |
+
|
129 |
+
|
130 |
+
class TransformerEncoder(nn.Module):
|
131 |
+
def __init__(
|
132 |
+
self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int
|
133 |
+
) -> None:
|
134 |
+
super(TransformerEncoder, self).__init__()
|
135 |
+
self.layers = nn.ModuleList(
|
136 |
+
[copy.deepcopy(encoder_layer) for _ in range(num_layers)]
|
137 |
+
)
|
138 |
+
self.num_layers = num_layers
|
139 |
+
|
140 |
+
def forward(
|
141 |
+
self,
|
142 |
+
src: torch.Tensor,
|
143 |
+
mask: torch.Tensor = None,
|
144 |
+
src_key_padding_mask: torch.Tensor = None,
|
145 |
+
output_layer: Optional[int] = None,
|
146 |
+
) -> torch.Tensor:
|
147 |
+
output = src
|
148 |
+
for layer in self.layers[:output_layer]:
|
149 |
+
output = layer(
|
150 |
+
output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
|
151 |
+
)
|
152 |
+
return output
|
153 |
+
|
154 |
+
|
155 |
+
def _compute_mask(
|
156 |
+
shape: Tuple[int, int],
|
157 |
+
mask_prob: float,
|
158 |
+
mask_length: int,
|
159 |
+
device: torch.device,
|
160 |
+
min_masks: int = 0,
|
161 |
+
) -> torch.Tensor:
|
162 |
+
batch_size, sequence_length = shape
|
163 |
+
|
164 |
+
if mask_length < 1:
|
165 |
+
raise ValueError("`mask_length` has to be bigger than 0.")
|
166 |
+
|
167 |
+
if mask_length > sequence_length:
|
168 |
+
raise ValueError(
|
169 |
+
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
|
170 |
+
)
|
171 |
+
|
172 |
+
# compute number of masked spans in batch
|
173 |
+
num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random())
|
174 |
+
num_masked_spans = max(num_masked_spans, min_masks)
|
175 |
+
|
176 |
+
# make sure num masked indices <= sequence_length
|
177 |
+
if num_masked_spans * mask_length > sequence_length:
|
178 |
+
num_masked_spans = sequence_length // mask_length
|
179 |
+
|
180 |
+
# SpecAugment mask to fill
|
181 |
+
mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool)
|
182 |
+
|
183 |
+
# uniform distribution to sample from, make sure that offset samples are < sequence_length
|
184 |
+
uniform_dist = torch.ones(
|
185 |
+
(batch_size, sequence_length - (mask_length - 1)), device=device
|
186 |
+
)
|
187 |
+
|
188 |
+
# get random indices to mask
|
189 |
+
mask_indices = torch.multinomial(uniform_dist, num_masked_spans)
|
190 |
+
|
191 |
+
# expand masked indices to masked spans
|
192 |
+
mask_indices = (
|
193 |
+
mask_indices.unsqueeze(dim=-1)
|
194 |
+
.expand((batch_size, num_masked_spans, mask_length))
|
195 |
+
.reshape(batch_size, num_masked_spans * mask_length)
|
196 |
+
)
|
197 |
+
offsets = (
|
198 |
+
torch.arange(mask_length, device=device)[None, None, :]
|
199 |
+
.expand((batch_size, num_masked_spans, mask_length))
|
200 |
+
.reshape(batch_size, num_masked_spans * mask_length)
|
201 |
+
)
|
202 |
+
mask_idxs = mask_indices + offsets
|
203 |
+
|
204 |
+
# scatter indices to mask
|
205 |
+
mask = mask.scatter(1, mask_idxs, True)
|
206 |
+
|
207 |
+
return mask
|
208 |
+
|
209 |
+
|
210 |
+
def hubert_soft(
|
211 |
+
path: str,
|
212 |
+
) -> HubertSoft:
|
213 |
+
r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`.
|
214 |
+
Args:
|
215 |
+
path (str): path of a pretrained model
|
216 |
+
"""
|
217 |
+
hubert = HubertSoft()
|
218 |
+
checkpoint = torch.load(path)
|
219 |
+
consume_prefix_in_state_dict_if_present(checkpoint, "module.")
|
220 |
+
hubert.load_state_dict(checkpoint)
|
221 |
+
hubert.eval()
|
222 |
+
return hubert
|
vencoder/hubert/hubert_model_onnx.py
ADDED
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import random
|
3 |
+
from typing import Optional, Tuple
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import torch.nn as nn
|
7 |
+
import torch.nn.functional as t_func
|
8 |
+
from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
|
9 |
+
|
10 |
+
|
11 |
+
class Hubert(nn.Module):
|
12 |
+
def __init__(self, num_label_embeddings: int = 100, mask: bool = True):
|
13 |
+
super().__init__()
|
14 |
+
self._mask = mask
|
15 |
+
self.feature_extractor = FeatureExtractor()
|
16 |
+
self.feature_projection = FeatureProjection()
|
17 |
+
self.positional_embedding = PositionalConvEmbedding()
|
18 |
+
self.norm = nn.LayerNorm(768)
|
19 |
+
self.dropout = nn.Dropout(0.1)
|
20 |
+
self.encoder = TransformerEncoder(
|
21 |
+
nn.TransformerEncoderLayer(
|
22 |
+
768, 12, 3072, activation="gelu", batch_first=True
|
23 |
+
),
|
24 |
+
12,
|
25 |
+
)
|
26 |
+
self.proj = nn.Linear(768, 256)
|
27 |
+
|
28 |
+
self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_())
|
29 |
+
self.label_embedding = nn.Embedding(num_label_embeddings, 256)
|
30 |
+
|
31 |
+
def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
32 |
+
mask = None
|
33 |
+
if self.training and self._mask:
|
34 |
+
mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2)
|
35 |
+
x[mask] = self.masked_spec_embed.to(x.dtype)
|
36 |
+
return x, mask
|
37 |
+
|
38 |
+
def encode(
|
39 |
+
self, x: torch.Tensor, layer: Optional[int] = None
|
40 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
41 |
+
x = self.feature_extractor(x)
|
42 |
+
x = self.feature_projection(x.transpose(1, 2))
|
43 |
+
x, mask = self.mask(x)
|
44 |
+
x = x + self.positional_embedding(x)
|
45 |
+
x = self.dropout(self.norm(x))
|
46 |
+
x = self.encoder(x, output_layer=layer)
|
47 |
+
return x, mask
|
48 |
+
|
49 |
+
def logits(self, x: torch.Tensor) -> torch.Tensor:
|
50 |
+
logits = torch.cosine_similarity(
|
51 |
+
x.unsqueeze(2),
|
52 |
+
self.label_embedding.weight.unsqueeze(0).unsqueeze(0),
|
53 |
+
dim=-1,
|
54 |
+
)
|
55 |
+
return logits / 0.1
|
56 |
+
|
57 |
+
|
58 |
+
class HubertSoft(Hubert):
|
59 |
+
def __init__(self):
|
60 |
+
super().__init__()
|
61 |
+
|
62 |
+
def units(self, wav: torch.Tensor) -> torch.Tensor:
|
63 |
+
wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2))
|
64 |
+
x, _ = self.encode(wav)
|
65 |
+
return self.proj(x)
|
66 |
+
|
67 |
+
def forward(self, x):
|
68 |
+
return self.units(x)
|
69 |
+
|
70 |
+
class FeatureExtractor(nn.Module):
|
71 |
+
def __init__(self):
|
72 |
+
super().__init__()
|
73 |
+
self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False)
|
74 |
+
self.norm0 = nn.GroupNorm(512, 512)
|
75 |
+
self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False)
|
76 |
+
self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False)
|
77 |
+
self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False)
|
78 |
+
self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False)
|
79 |
+
self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False)
|
80 |
+
self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False)
|
81 |
+
|
82 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
83 |
+
x = t_func.gelu(self.norm0(self.conv0(x)))
|
84 |
+
x = t_func.gelu(self.conv1(x))
|
85 |
+
x = t_func.gelu(self.conv2(x))
|
86 |
+
x = t_func.gelu(self.conv3(x))
|
87 |
+
x = t_func.gelu(self.conv4(x))
|
88 |
+
x = t_func.gelu(self.conv5(x))
|
89 |
+
x = t_func.gelu(self.conv6(x))
|
90 |
+
return x
|
91 |
+
|
92 |
+
|
93 |
+
class FeatureProjection(nn.Module):
|
94 |
+
def __init__(self):
|
95 |
+
super().__init__()
|
96 |
+
self.norm = nn.LayerNorm(512)
|
97 |
+
self.projection = nn.Linear(512, 768)
|
98 |
+
self.dropout = nn.Dropout(0.1)
|
99 |
+
|
100 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
101 |
+
x = self.norm(x)
|
102 |
+
x = self.projection(x)
|
103 |
+
x = self.dropout(x)
|
104 |
+
return x
|
105 |
+
|
106 |
+
|
107 |
+
class PositionalConvEmbedding(nn.Module):
|
108 |
+
def __init__(self):
|
109 |
+
super().__init__()
|
110 |
+
self.conv = nn.Conv1d(
|
111 |
+
768,
|
112 |
+
768,
|
113 |
+
kernel_size=128,
|
114 |
+
padding=128 // 2,
|
115 |
+
groups=16,
|
116 |
+
)
|
117 |
+
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
|
118 |
+
|
119 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
120 |
+
x = self.conv(x.transpose(1, 2))
|
121 |
+
x = t_func.gelu(x[:, :, :-1])
|
122 |
+
return x.transpose(1, 2)
|
123 |
+
|
124 |
+
|
125 |
+
class TransformerEncoder(nn.Module):
|
126 |
+
def __init__(
|
127 |
+
self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int
|
128 |
+
) -> None:
|
129 |
+
super(TransformerEncoder, self).__init__()
|
130 |
+
self.layers = nn.ModuleList(
|
131 |
+
[copy.deepcopy(encoder_layer) for _ in range(num_layers)]
|
132 |
+
)
|
133 |
+
self.num_layers = num_layers
|
134 |
+
|
135 |
+
def forward(
|
136 |
+
self,
|
137 |
+
src: torch.Tensor,
|
138 |
+
mask: torch.Tensor = None,
|
139 |
+
src_key_padding_mask: torch.Tensor = None,
|
140 |
+
output_layer: Optional[int] = None,
|
141 |
+
) -> torch.Tensor:
|
142 |
+
output = src
|
143 |
+
for layer in self.layers[:output_layer]:
|
144 |
+
output = layer(
|
145 |
+
output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
|
146 |
+
)
|
147 |
+
return output
|
148 |
+
|
149 |
+
|
150 |
+
def _compute_mask(
|
151 |
+
shape: Tuple[int, int],
|
152 |
+
mask_prob: float,
|
153 |
+
mask_length: int,
|
154 |
+
device: torch.device,
|
155 |
+
min_masks: int = 0,
|
156 |
+
) -> torch.Tensor:
|
157 |
+
batch_size, sequence_length = shape
|
158 |
+
|
159 |
+
if mask_length < 1:
|
160 |
+
raise ValueError("`mask_length` has to be bigger than 0.")
|
161 |
+
|
162 |
+
if mask_length > sequence_length:
|
163 |
+
raise ValueError(
|
164 |
+
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
|
165 |
+
)
|
166 |
+
|
167 |
+
# compute number of masked spans in batch
|
168 |
+
num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random())
|
169 |
+
num_masked_spans = max(num_masked_spans, min_masks)
|
170 |
+
|
171 |
+
# make sure num masked indices <= sequence_length
|
172 |
+
if num_masked_spans * mask_length > sequence_length:
|
173 |
+
num_masked_spans = sequence_length // mask_length
|
174 |
+
|
175 |
+
# SpecAugment mask to fill
|
176 |
+
mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool)
|
177 |
+
|
178 |
+
# uniform distribution to sample from, make sure that offset samples are < sequence_length
|
179 |
+
uniform_dist = torch.ones(
|
180 |
+
(batch_size, sequence_length - (mask_length - 1)), device=device
|
181 |
+
)
|
182 |
+
|
183 |
+
# get random indices to mask
|
184 |
+
mask_indices = torch.multinomial(uniform_dist, num_masked_spans)
|
185 |
+
|
186 |
+
# expand masked indices to masked spans
|
187 |
+
mask_indices = (
|
188 |
+
mask_indices.unsqueeze(dim=-1)
|
189 |
+
.expand((batch_size, num_masked_spans, mask_length))
|
190 |
+
.reshape(batch_size, num_masked_spans * mask_length)
|
191 |
+
)
|
192 |
+
offsets = (
|
193 |
+
torch.arange(mask_length, device=device)[None, None, :]
|
194 |
+
.expand((batch_size, num_masked_spans, mask_length))
|
195 |
+
.reshape(batch_size, num_masked_spans * mask_length)
|
196 |
+
)
|
197 |
+
mask_idxs = mask_indices + offsets
|
198 |
+
|
199 |
+
# scatter indices to mask
|
200 |
+
mask = mask.scatter(1, mask_idxs, True)
|
201 |
+
|
202 |
+
return mask
|
203 |
+
|
204 |
+
|
205 |
+
def hubert_soft(
|
206 |
+
path: str,
|
207 |
+
) -> HubertSoft:
|
208 |
+
r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`.
|
209 |
+
Args:
|
210 |
+
path (str): path of a pretrained model
|
211 |
+
"""
|
212 |
+
hubert = HubertSoft()
|
213 |
+
checkpoint = torch.load(path)
|
214 |
+
consume_prefix_in_state_dict_if_present(checkpoint, "module.")
|
215 |
+
hubert.load_state_dict(checkpoint)
|
216 |
+
hubert.eval()
|
217 |
+
return hubert
|
vencoder/whisper/__init__.py
ADDED
File without changes
|
vencoder/whisper/audio.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from functools import lru_cache
|
3 |
+
from typing import Union
|
4 |
+
|
5 |
+
import ffmpeg
|
6 |
+
import numpy as np
|
7 |
+
import torch
|
8 |
+
import torch.nn.functional as F
|
9 |
+
|
10 |
+
from .utils import exact_div
|
11 |
+
|
12 |
+
from librosa.filters import mel as librosa_mel_fn
|
13 |
+
|
14 |
+
# hard-coded audio hyperparameters
|
15 |
+
SAMPLE_RATE = 16000
|
16 |
+
N_FFT = 400
|
17 |
+
N_MELS = 80
|
18 |
+
HOP_LENGTH = 160
|
19 |
+
CHUNK_LENGTH = 30
|
20 |
+
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000: number of samples in a chunk
|
21 |
+
N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000: number of frames in a mel spectrogram input
|
22 |
+
|
23 |
+
|
24 |
+
def load_audio(file: str, sr: int = SAMPLE_RATE):
|
25 |
+
"""
|
26 |
+
Open an audio file and read as mono waveform, resampling as necessary
|
27 |
+
|
28 |
+
Parameters
|
29 |
+
----------
|
30 |
+
file: str
|
31 |
+
The audio file to open
|
32 |
+
|
33 |
+
sr: int
|
34 |
+
The sample rate to resample the audio if necessary
|
35 |
+
|
36 |
+
Returns
|
37 |
+
-------
|
38 |
+
A NumPy array containing the audio waveform, in float32 dtype.
|
39 |
+
"""
|
40 |
+
try:
|
41 |
+
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
|
42 |
+
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
|
43 |
+
out, _ = (
|
44 |
+
ffmpeg.input(file, threads=0)
|
45 |
+
.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr)
|
46 |
+
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
|
47 |
+
)
|
48 |
+
except ffmpeg.Error as e:
|
49 |
+
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
|
50 |
+
|
51 |
+
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
|
52 |
+
|
53 |
+
|
54 |
+
def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
|
55 |
+
"""
|
56 |
+
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
|
57 |
+
"""
|
58 |
+
if torch.is_tensor(array):
|
59 |
+
if array.shape[axis] > length:
|
60 |
+
array = array.index_select(dim=axis, index=torch.arange(length, device=array.device))
|
61 |
+
|
62 |
+
if array.shape[axis] < length:
|
63 |
+
pad_widths = [(0, 0)] * array.ndim
|
64 |
+
pad_widths[axis] = (0, length - array.shape[axis])
|
65 |
+
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
|
66 |
+
else:
|
67 |
+
if array.shape[axis] > length:
|
68 |
+
array = array.take(indices=range(length), axis=axis)
|
69 |
+
|
70 |
+
if array.shape[axis] < length:
|
71 |
+
pad_widths = [(0, 0)] * array.ndim
|
72 |
+
pad_widths[axis] = (0, length - array.shape[axis])
|
73 |
+
array = np.pad(array, pad_widths)
|
74 |
+
|
75 |
+
return array
|
76 |
+
|
77 |
+
|
78 |
+
@lru_cache(maxsize=None)
|
79 |
+
def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
|
80 |
+
"""
|
81 |
+
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
|
82 |
+
Allows decoupling librosa dependency; saved using:
|
83 |
+
|
84 |
+
np.savez_compressed(
|
85 |
+
"mel_filters.npz",
|
86 |
+
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
|
87 |
+
)
|
88 |
+
"""
|
89 |
+
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
|
90 |
+
return torch.from_numpy(librosa_mel_fn(sr=SAMPLE_RATE,n_fft=N_FFT,n_mels=n_mels)).to(device)
|
91 |
+
|
92 |
+
|
93 |
+
def log_mel_spectrogram(audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS):
|
94 |
+
"""
|
95 |
+
Compute the log-Mel spectrogram of
|
96 |
+
|
97 |
+
Parameters
|
98 |
+
----------
|
99 |
+
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
|
100 |
+
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
|
101 |
+
|
102 |
+
n_mels: int
|
103 |
+
The number of Mel-frequency filters, only 80 is supported
|
104 |
+
|
105 |
+
Returns
|
106 |
+
-------
|
107 |
+
torch.Tensor, shape = (80, n_frames)
|
108 |
+
A Tensor that contains the Mel spectrogram
|
109 |
+
"""
|
110 |
+
if not torch.is_tensor(audio):
|
111 |
+
if isinstance(audio, str):
|
112 |
+
audio = load_audio(audio)
|
113 |
+
audio = torch.from_numpy(audio)
|
114 |
+
|
115 |
+
window = torch.hann_window(N_FFT).to(audio.device)
|
116 |
+
stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
|
117 |
+
magnitudes = stft[..., :-1].abs() ** 2
|
118 |
+
|
119 |
+
filters = mel_filters(audio.device, n_mels)
|
120 |
+
mel_spec = filters @ magnitudes
|
121 |
+
|
122 |
+
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
|
123 |
+
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
|
124 |
+
log_spec = (log_spec + 4.0) / 4.0
|
125 |
+
return log_spec
|
vencoder/whisper/decoding.py
ADDED
@@ -0,0 +1,712 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass, field
|
2 |
+
from typing import Dict, List, Tuple, Iterable, Optional, Sequence, Union, TYPE_CHECKING
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import torch
|
6 |
+
import torch.nn.functional as F
|
7 |
+
from torch import Tensor
|
8 |
+
from torch.distributions import Categorical
|
9 |
+
|
10 |
+
from .audio import CHUNK_LENGTH
|
11 |
+
from .tokenizer import Tokenizer, get_tokenizer
|
12 |
+
from .utils import compression_ratio
|
13 |
+
|
14 |
+
if TYPE_CHECKING:
|
15 |
+
from .model import Whisper
|
16 |
+
|
17 |
+
|
18 |
+
@torch.no_grad()
|
19 |
+
def detect_language(model: "Whisper", mel: Tensor, tokenizer: Tokenizer = None) -> Tuple[Tensor, List[dict]]:
|
20 |
+
"""
|
21 |
+
Detect the spoken language in the audio, and return them as list of strings, along with the ids
|
22 |
+
of the most probable language tokens and the probability distribution over all language tokens.
|
23 |
+
This is performed outside the main decode loop in order to not interfere with kv-caching.
|
24 |
+
|
25 |
+
Returns
|
26 |
+
-------
|
27 |
+
language_tokens : Tensor, shape = (n_audio,)
|
28 |
+
ids of the most probable language tokens, which appears after the startoftranscript token.
|
29 |
+
language_probs : List[Dict[str, float]], length = n_audio
|
30 |
+
list of dictionaries containing the probability distribution over all languages.
|
31 |
+
"""
|
32 |
+
if tokenizer is None:
|
33 |
+
tokenizer = get_tokenizer(model.is_multilingual)
|
34 |
+
if tokenizer.language is None or tokenizer.language_token not in tokenizer.sot_sequence:
|
35 |
+
raise ValueError(f"This model doesn't have language tokens so it can't perform lang id")
|
36 |
+
|
37 |
+
single = mel.ndim == 2
|
38 |
+
if single:
|
39 |
+
mel = mel.unsqueeze(0)
|
40 |
+
|
41 |
+
# skip encoder forward pass if already-encoded audio features were given
|
42 |
+
if mel.shape[-2:] != (model.dims.n_audio_ctx, model.dims.n_audio_state):
|
43 |
+
mel = model.encoder(mel)
|
44 |
+
|
45 |
+
# forward pass using a single token, startoftranscript
|
46 |
+
n_audio = mel.shape[0]
|
47 |
+
x = torch.tensor([[tokenizer.sot]] * n_audio).to(mel.device) # [n_audio, 1]
|
48 |
+
logits = model.logits(x, mel)[:, 0]
|
49 |
+
|
50 |
+
# collect detected languages; suppress all non-language tokens
|
51 |
+
mask = torch.ones(logits.shape[-1], dtype=torch.bool)
|
52 |
+
mask[list(tokenizer.all_language_tokens)] = False
|
53 |
+
logits[:, mask] = -np.inf
|
54 |
+
language_tokens = logits.argmax(dim=-1)
|
55 |
+
language_token_probs = logits.softmax(dim=-1).cpu()
|
56 |
+
language_probs = [
|
57 |
+
{
|
58 |
+
c: language_token_probs[i, j].item()
|
59 |
+
for j, c in zip(tokenizer.all_language_tokens, tokenizer.all_language_codes)
|
60 |
+
}
|
61 |
+
for i in range(n_audio)
|
62 |
+
]
|
63 |
+
|
64 |
+
if single:
|
65 |
+
language_tokens = language_tokens[0]
|
66 |
+
language_probs = language_probs[0]
|
67 |
+
|
68 |
+
return language_tokens, language_probs
|
69 |
+
|
70 |
+
|
71 |
+
@dataclass(frozen=True)
|
72 |
+
class DecodingOptions:
|
73 |
+
task: str = "transcribe" # whether to perform X->X "transcribe" or X->English "translate"
|
74 |
+
language: Optional[str] = None # language that the audio is in; uses detected language if None
|
75 |
+
|
76 |
+
# sampling-related options
|
77 |
+
temperature: float = 0.0
|
78 |
+
sample_len: Optional[int] = None # maximum number of tokens to sample
|
79 |
+
best_of: Optional[int] = None # number of independent samples to collect, when t > 0
|
80 |
+
beam_size: Optional[int] = None # number of beams in beam search, when t == 0
|
81 |
+
patience: Optional[float] = None # patience in beam search (https://arxiv.org/abs/2204.05424)
|
82 |
+
|
83 |
+
# options for ranking generations (either beams or best-of-N samples)
|
84 |
+
length_penalty: Optional[float] = None # "alpha" in Google NMT, None defaults to length norm
|
85 |
+
|
86 |
+
# prompt, prefix, and token suppression
|
87 |
+
prompt: Optional[Union[str, List[int]]] = None # text or tokens for the previous context
|
88 |
+
prefix: Optional[Union[str, List[int]]] = None # text or tokens to prefix the current context
|
89 |
+
suppress_blank: bool = True # this will suppress blank outputs
|
90 |
+
|
91 |
+
# list of tokens ids (or comma-separated token ids) to suppress
|
92 |
+
# "-1" will suppress a set of symbols as defined in `tokenizer.non_speech_tokens()`
|
93 |
+
suppress_tokens: Optional[Union[str, Iterable[int]]] = "-1"
|
94 |
+
|
95 |
+
# timestamp sampling options
|
96 |
+
without_timestamps: bool = False # use <|notimestamps|> to sample text tokens only
|
97 |
+
max_initial_timestamp: Optional[float] = 1.0 # the initial timestamp cannot be later than this
|
98 |
+
|
99 |
+
# implementation details
|
100 |
+
fp16: bool = True # use fp16 for most of the calculation
|
101 |
+
|
102 |
+
|
103 |
+
@dataclass(frozen=True)
|
104 |
+
class DecodingResult:
|
105 |
+
audio_features: Tensor
|
106 |
+
language: str
|
107 |
+
language_probs: Optional[Dict[str, float]] = None
|
108 |
+
tokens: List[int] = field(default_factory=list)
|
109 |
+
text: str = ""
|
110 |
+
avg_logprob: float = np.nan
|
111 |
+
no_speech_prob: float = np.nan
|
112 |
+
temperature: float = np.nan
|
113 |
+
compression_ratio: float = np.nan
|
114 |
+
|
115 |
+
|
116 |
+
class Inference:
|
117 |
+
def logits(self, tokens: Tensor, audio_features: Tensor) -> Tensor:
|
118 |
+
"""Perform a forward pass on the decoder and return per-token logits"""
|
119 |
+
raise NotImplementedError
|
120 |
+
|
121 |
+
def rearrange_kv_cache(self, source_indices) -> None:
|
122 |
+
"""Update the key-value cache according to the updated beams"""
|
123 |
+
raise NotImplementedError
|
124 |
+
|
125 |
+
def cleanup_caching(self) -> None:
|
126 |
+
"""Clean up any resources or hooks after decoding is finished"""
|
127 |
+
pass
|
128 |
+
|
129 |
+
|
130 |
+
class PyTorchInference(Inference):
|
131 |
+
def __init__(self, model: "Whisper", initial_token_length: int):
|
132 |
+
self.model: "Whisper" = model
|
133 |
+
self.initial_token_length = initial_token_length
|
134 |
+
self.kv_cache = {}
|
135 |
+
self.hooks = []
|
136 |
+
|
137 |
+
def logits(self, tokens: Tensor, audio_features: Tensor) -> Tensor:
|
138 |
+
if not self.kv_cache:
|
139 |
+
self.kv_cache, self.hooks = self.model.install_kv_cache_hooks()
|
140 |
+
|
141 |
+
if tokens.shape[-1] > self.initial_token_length:
|
142 |
+
# only need to use the last token except in the first forward pass
|
143 |
+
tokens = tokens[:, -1:]
|
144 |
+
|
145 |
+
return self.model.decoder(tokens, audio_features, kv_cache=self.kv_cache)
|
146 |
+
|
147 |
+
def cleanup_caching(self):
|
148 |
+
for hook in self.hooks:
|
149 |
+
hook.remove()
|
150 |
+
|
151 |
+
self.kv_cache = {}
|
152 |
+
self.hooks = []
|
153 |
+
|
154 |
+
def rearrange_kv_cache(self, source_indices):
|
155 |
+
for module, tensor in self.kv_cache.items():
|
156 |
+
# update the key/value cache to contain the selected sequences
|
157 |
+
self.kv_cache[module] = tensor[source_indices].detach()
|
158 |
+
|
159 |
+
|
160 |
+
class SequenceRanker:
|
161 |
+
def rank(self, tokens: List[List[Tensor]], sum_logprobs: List[List[float]]) -> List[int]:
|
162 |
+
"""
|
163 |
+
Given a list of groups of samples and their cumulative log probabilities,
|
164 |
+
return the indices of the samples in each group to select as the final result
|
165 |
+
"""
|
166 |
+
raise NotImplementedError
|
167 |
+
|
168 |
+
|
169 |
+
class MaximumLikelihoodRanker(SequenceRanker):
|
170 |
+
"""
|
171 |
+
Select the sample with the highest log probabilities, penalized using either
|
172 |
+
a simple length normalization or Google NMT paper's length penalty
|
173 |
+
"""
|
174 |
+
|
175 |
+
def __init__(self, length_penalty: Optional[float]):
|
176 |
+
self.length_penalty = length_penalty
|
177 |
+
|
178 |
+
def rank(self, tokens: List[List[Tensor]], sum_logprobs: List[List[float]]):
|
179 |
+
def scores(logprobs, lengths):
|
180 |
+
result = []
|
181 |
+
for logprob, length in zip(logprobs, lengths):
|
182 |
+
if self.length_penalty is None:
|
183 |
+
penalty = length
|
184 |
+
else:
|
185 |
+
# from the Google NMT paper
|
186 |
+
penalty = ((5 + length) / 6) ** self.length_penalty
|
187 |
+
result.append(logprob / penalty)
|
188 |
+
return result
|
189 |
+
|
190 |
+
# get the sequence with the highest score
|
191 |
+
lengths = [[len(t) for t in s] for s in tokens]
|
192 |
+
return [np.argmax(scores(p, l)) for p, l in zip(sum_logprobs, lengths)]
|
193 |
+
|
194 |
+
|
195 |
+
class TokenDecoder:
|
196 |
+
def reset(self):
|
197 |
+
"""Initialize any stateful variables for decoding a new sequence"""
|
198 |
+
|
199 |
+
def update(self, tokens: Tensor, logits: Tensor, sum_logprobs: Tensor) -> Tuple[Tensor, bool]:
|
200 |
+
"""Specify how to select the next token, based on the current trace and logits
|
201 |
+
|
202 |
+
Parameters
|
203 |
+
----------
|
204 |
+
tokens : Tensor, shape = (n_batch, current_sequence_length)
|
205 |
+
all tokens in the context so far, including the prefix and sot_sequence tokens
|
206 |
+
|
207 |
+
logits : Tensor, shape = (n_batch, vocab_size)
|
208 |
+
per-token logits of the probability distribution at the current step
|
209 |
+
|
210 |
+
sum_logprobs : Tensor, shape = (n_batch)
|
211 |
+
cumulative log probabilities for each sequence
|
212 |
+
|
213 |
+
Returns
|
214 |
+
-------
|
215 |
+
tokens : Tensor, shape = (n_batch, current_sequence_length + 1)
|
216 |
+
the tokens, appended with the selected next token
|
217 |
+
|
218 |
+
completed : bool
|
219 |
+
True if all sequences has reached the end of text
|
220 |
+
|
221 |
+
"""
|
222 |
+
raise NotImplementedError
|
223 |
+
|
224 |
+
def finalize(
|
225 |
+
self, tokens: Tensor, sum_logprobs: Tensor
|
226 |
+
) -> Tuple[Sequence[Sequence[Tensor]], List[List[float]]]:
|
227 |
+
"""Finalize search and return the final candidate sequences
|
228 |
+
|
229 |
+
Parameters
|
230 |
+
----------
|
231 |
+
tokens : Tensor, shape = (n_audio, n_group, current_sequence_length)
|
232 |
+
all tokens in the context so far, including the prefix and sot_sequence
|
233 |
+
|
234 |
+
sum_logprobs : Tensor, shape = (n_audio, n_group)
|
235 |
+
cumulative log probabilities for each sequence
|
236 |
+
|
237 |
+
Returns
|
238 |
+
-------
|
239 |
+
tokens : Sequence[Sequence[Tensor]], length = n_audio
|
240 |
+
sequence of Tensors containing candidate token sequences, for each audio input
|
241 |
+
|
242 |
+
sum_logprobs : List[List[float]], length = n_audio
|
243 |
+
sequence of cumulative log probabilities corresponding to the above
|
244 |
+
|
245 |
+
"""
|
246 |
+
raise NotImplementedError
|
247 |
+
|
248 |
+
|
249 |
+
class GreedyDecoder(TokenDecoder):
|
250 |
+
def __init__(self, temperature: float, eot: int):
|
251 |
+
self.temperature = temperature
|
252 |
+
self.eot = eot
|
253 |
+
|
254 |
+
def update(self, tokens: Tensor, logits: Tensor, sum_logprobs: Tensor) -> Tuple[Tensor, bool]:
|
255 |
+
temperature = self.temperature
|
256 |
+
if temperature == 0:
|
257 |
+
next_tokens = logits.argmax(dim=-1)
|
258 |
+
else:
|
259 |
+
next_tokens = Categorical(logits=logits / temperature).sample()
|
260 |
+
|
261 |
+
logprobs = F.log_softmax(logits.float(), dim=-1)
|
262 |
+
current_logprobs = logprobs[torch.arange(logprobs.shape[0]), next_tokens]
|
263 |
+
sum_logprobs += current_logprobs * (tokens[:, -1] != self.eot)
|
264 |
+
|
265 |
+
next_tokens[tokens[:, -1] == self.eot] = self.eot
|
266 |
+
tokens = torch.cat([tokens, next_tokens[:, None]], dim=-1)
|
267 |
+
|
268 |
+
completed = (tokens[:, -1] == self.eot).all()
|
269 |
+
return tokens, completed
|
270 |
+
|
271 |
+
def finalize(self, tokens: Tensor, sum_logprobs: Tensor):
|
272 |
+
# make sure each sequence has at least one EOT token at the end
|
273 |
+
tokens = F.pad(tokens, (0, 1), value=self.eot)
|
274 |
+
return tokens, sum_logprobs.tolist()
|
275 |
+
|
276 |
+
|
277 |
+
class BeamSearchDecoder(TokenDecoder):
|
278 |
+
def __init__(self, beam_size: int, eot: int, inference: Inference, patience: Optional[float] = None):
|
279 |
+
self.beam_size = beam_size
|
280 |
+
self.eot = eot
|
281 |
+
self.inference = inference
|
282 |
+
self.patience = patience or 1.0
|
283 |
+
self.max_candidates: int = round(beam_size * self.patience)
|
284 |
+
self.finished_sequences = None
|
285 |
+
|
286 |
+
assert self.max_candidates > 0, f"Invalid beam size ({beam_size}) or patience ({patience})"
|
287 |
+
|
288 |
+
def reset(self):
|
289 |
+
self.finished_sequences = None
|
290 |
+
|
291 |
+
def update(self, tokens: Tensor, logits: Tensor, sum_logprobs: Tensor) -> Tuple[Tensor, bool]:
|
292 |
+
if tokens.shape[0] % self.beam_size != 0:
|
293 |
+
raise ValueError(f"{tokens.shape}[0] % {self.beam_size} != 0")
|
294 |
+
|
295 |
+
n_audio = tokens.shape[0] // self.beam_size
|
296 |
+
if self.finished_sequences is None: # for the first update
|
297 |
+
self.finished_sequences = [{} for _ in range(n_audio)]
|
298 |
+
|
299 |
+
logprobs = F.log_softmax(logits.float(), dim=-1)
|
300 |
+
next_tokens, source_indices, finished_sequences = [], [], []
|
301 |
+
for i in range(n_audio):
|
302 |
+
scores, sources, finished = {}, {}, {}
|
303 |
+
|
304 |
+
# STEP 1: calculate the cumulative log probabilities for possible candidates
|
305 |
+
for j in range(self.beam_size):
|
306 |
+
idx = i * self.beam_size + j
|
307 |
+
prefix = tokens[idx].tolist()
|
308 |
+
for logprob, token in zip(*logprobs[idx].topk(self.beam_size + 1)):
|
309 |
+
new_logprob = (sum_logprobs[idx] + logprob).item()
|
310 |
+
sequence = tuple(prefix + [token.item()])
|
311 |
+
scores[sequence] = new_logprob
|
312 |
+
sources[sequence] = idx
|
313 |
+
|
314 |
+
# STEP 2: rank the candidates and keep the top beam_size sequences for each audio
|
315 |
+
saved = 0
|
316 |
+
for sequence in sorted(scores, key=scores.get, reverse=True):
|
317 |
+
if sequence[-1] == self.eot:
|
318 |
+
finished[sequence] = scores[sequence]
|
319 |
+
else:
|
320 |
+
sum_logprobs[len(next_tokens)] = scores[sequence]
|
321 |
+
next_tokens.append(sequence)
|
322 |
+
source_indices.append(sources[sequence])
|
323 |
+
|
324 |
+
saved += 1
|
325 |
+
if saved == self.beam_size:
|
326 |
+
break
|
327 |
+
|
328 |
+
finished_sequences.append(finished)
|
329 |
+
|
330 |
+
tokens = torch.tensor(next_tokens, device=tokens.device)
|
331 |
+
self.inference.rearrange_kv_cache(source_indices)
|
332 |
+
|
333 |
+
# add newly finished sequences to self.finished_sequences
|
334 |
+
assert len(self.finished_sequences) == len(finished_sequences)
|
335 |
+
for previously_finished, newly_finished in zip(self.finished_sequences, finished_sequences):
|
336 |
+
for seq in sorted(newly_finished, key=newly_finished.get, reverse=True):
|
337 |
+
if len(previously_finished) >= self.max_candidates:
|
338 |
+
break # the candidate list is full
|
339 |
+
previously_finished[seq] = newly_finished[seq]
|
340 |
+
|
341 |
+
# mark as completed if all audio has enough number of samples
|
342 |
+
completed = all(
|
343 |
+
len(sequences) >= self.max_candidates for sequences in self.finished_sequences
|
344 |
+
)
|
345 |
+
return tokens, completed
|
346 |
+
|
347 |
+
def finalize(self, preceding_tokens: Tensor, sum_logprobs: Tensor):
|
348 |
+
# collect all finished sequences, including patience, and add unfinished ones if not enough
|
349 |
+
sum_logprobs = sum_logprobs.cpu()
|
350 |
+
for i, sequences in enumerate(self.finished_sequences):
|
351 |
+
if len(sequences) < self.beam_size: # when not enough sequences are finished
|
352 |
+
for j in list(np.argsort(sum_logprobs[i]))[::-1]:
|
353 |
+
sequence = preceding_tokens[i, j].tolist() + [self.eot]
|
354 |
+
sequences[tuple(sequence)] = sum_logprobs[i][j].item()
|
355 |
+
if len(sequences) >= self.beam_size:
|
356 |
+
break
|
357 |
+
|
358 |
+
tokens: List[List[Tensor]] = [
|
359 |
+
[torch.tensor(seq) for seq in sequences.keys()] for sequences in self.finished_sequences
|
360 |
+
]
|
361 |
+
sum_logprobs: List[List[float]] = [
|
362 |
+
list(sequences.values()) for sequences in self.finished_sequences
|
363 |
+
]
|
364 |
+
return tokens, sum_logprobs
|
365 |
+
|
366 |
+
|
367 |
+
class LogitFilter:
|
368 |
+
def apply(self, logits: Tensor, tokens: Tensor) -> None:
|
369 |
+
"""Apply any filtering or masking to logits in-place
|
370 |
+
|
371 |
+
Parameters
|
372 |
+
----------
|
373 |
+
logits : Tensor, shape = (n_batch, vocab_size)
|
374 |
+
per-token logits of the probability distribution at the current step
|
375 |
+
|
376 |
+
tokens : Tensor, shape = (n_batch, current_sequence_length)
|
377 |
+
all tokens in the context so far, including the prefix and sot_sequence tokens
|
378 |
+
|
379 |
+
"""
|
380 |
+
raise NotImplementedError
|
381 |
+
|
382 |
+
|
383 |
+
class SuppressBlank(LogitFilter):
|
384 |
+
def __init__(self, tokenizer: Tokenizer, sample_begin: int):
|
385 |
+
self.tokenizer = tokenizer
|
386 |
+
self.sample_begin = sample_begin
|
387 |
+
|
388 |
+
def apply(self, logits: Tensor, tokens: Tensor):
|
389 |
+
if tokens.shape[1] == self.sample_begin:
|
390 |
+
logits[:, self.tokenizer.encode(" ") + [self.tokenizer.eot]] = -np.inf
|
391 |
+
|
392 |
+
|
393 |
+
class SuppressTokens(LogitFilter):
|
394 |
+
def __init__(self, suppress_tokens: Sequence[int]):
|
395 |
+
self.suppress_tokens = list(suppress_tokens)
|
396 |
+
|
397 |
+
def apply(self, logits: Tensor, tokens: Tensor):
|
398 |
+
logits[:, self.suppress_tokens] = -np.inf
|
399 |
+
|
400 |
+
|
401 |
+
class ApplyTimestampRules(LogitFilter):
|
402 |
+
def __init__(
|
403 |
+
self, tokenizer: Tokenizer, sample_begin: int, max_initial_timestamp_index: Optional[int]
|
404 |
+
):
|
405 |
+
self.tokenizer = tokenizer
|
406 |
+
self.sample_begin = sample_begin
|
407 |
+
self.max_initial_timestamp_index = max_initial_timestamp_index
|
408 |
+
|
409 |
+
def apply(self, logits: Tensor, tokens: Tensor):
|
410 |
+
# suppress <|notimestamps|> which is handled by without_timestamps
|
411 |
+
if self.tokenizer.no_timestamps is not None:
|
412 |
+
logits[:, self.tokenizer.no_timestamps] = -np.inf
|
413 |
+
|
414 |
+
# timestamps have to appear in pairs, except directly before EOT; mask logits accordingly
|
415 |
+
for k in range(tokens.shape[0]):
|
416 |
+
seq = [t for t in tokens[k, self.sample_begin :].tolist()]
|
417 |
+
last_was_timestamp = len(seq) >= 1 and seq[-1] >= self.tokenizer.timestamp_begin
|
418 |
+
penultimate_was_timestamp = len(seq) < 2 or seq[-2] >= self.tokenizer.timestamp_begin
|
419 |
+
|
420 |
+
if last_was_timestamp:
|
421 |
+
if penultimate_was_timestamp: # has to be non-timestamp
|
422 |
+
logits[k, self.tokenizer.timestamp_begin :] = -np.inf
|
423 |
+
else: # cannot be normal text tokens
|
424 |
+
logits[k, : self.tokenizer.eot] = -np.inf
|
425 |
+
|
426 |
+
if tokens.shape[1] == self.sample_begin:
|
427 |
+
# suppress generating non-timestamp tokens at the beginning
|
428 |
+
logits[:, : self.tokenizer.timestamp_begin] = -np.inf
|
429 |
+
|
430 |
+
# apply the `max_initial_timestamp` option
|
431 |
+
if self.max_initial_timestamp_index is not None:
|
432 |
+
last_allowed = self.tokenizer.timestamp_begin + self.max_initial_timestamp_index
|
433 |
+
logits[:, last_allowed + 1 :] = -np.inf
|
434 |
+
|
435 |
+
# if sum of probability over timestamps is above any other token, sample timestamp
|
436 |
+
logprobs = F.log_softmax(logits.float(), dim=-1)
|
437 |
+
for k in range(tokens.shape[0]):
|
438 |
+
timestamp_logprob = logprobs[k, self.tokenizer.timestamp_begin :].logsumexp(dim=-1)
|
439 |
+
max_text_token_logprob = logprobs[k, : self.tokenizer.timestamp_begin].max()
|
440 |
+
if timestamp_logprob > max_text_token_logprob:
|
441 |
+
logits[k, : self.tokenizer.timestamp_begin] = -np.inf
|
442 |
+
|
443 |
+
|
444 |
+
class DecodingTask:
|
445 |
+
inference: Inference
|
446 |
+
sequence_ranker: SequenceRanker
|
447 |
+
decoder: TokenDecoder
|
448 |
+
logit_filters: List[LogitFilter]
|
449 |
+
|
450 |
+
def __init__(self, model: "Whisper", options: DecodingOptions):
|
451 |
+
self.model = model
|
452 |
+
|
453 |
+
language = options.language or "en"
|
454 |
+
tokenizer = get_tokenizer(model.is_multilingual, language=language, task=options.task)
|
455 |
+
self.tokenizer: Tokenizer = tokenizer
|
456 |
+
self.options: DecodingOptions = self._verify_options(options)
|
457 |
+
|
458 |
+
self.n_group: int = options.beam_size or options.best_of or 1
|
459 |
+
self.n_ctx: int = model.dims.n_text_ctx
|
460 |
+
self.sample_len: int = options.sample_len or model.dims.n_text_ctx // 2
|
461 |
+
|
462 |
+
self.sot_sequence: Tuple[int] = tokenizer.sot_sequence
|
463 |
+
if self.options.without_timestamps:
|
464 |
+
self.sot_sequence = tokenizer.sot_sequence_including_notimestamps
|
465 |
+
|
466 |
+
self.initial_tokens: Tuple[int] = self._get_initial_tokens()
|
467 |
+
self.sample_begin: int = len(self.initial_tokens)
|
468 |
+
self.sot_index: int = self.initial_tokens.index(tokenizer.sot)
|
469 |
+
|
470 |
+
# inference: implements the forward pass through the decoder, including kv caching
|
471 |
+
self.inference = PyTorchInference(model, len(self.initial_tokens))
|
472 |
+
|
473 |
+
# sequence ranker: implements how to rank a group of sampled sequences
|
474 |
+
self.sequence_ranker = MaximumLikelihoodRanker(options.length_penalty)
|
475 |
+
|
476 |
+
# decoder: implements how to select the next tokens, given the autoregressive distribution
|
477 |
+
if options.beam_size is not None:
|
478 |
+
self.decoder = BeamSearchDecoder(
|
479 |
+
options.beam_size, tokenizer.eot, self.inference, options.patience
|
480 |
+
)
|
481 |
+
else:
|
482 |
+
self.decoder = GreedyDecoder(options.temperature, tokenizer.eot)
|
483 |
+
|
484 |
+
# logit filters: applies various rules to suppress or penalize certain tokens
|
485 |
+
self.logit_filters = []
|
486 |
+
if self.options.suppress_blank:
|
487 |
+
self.logit_filters.append(SuppressBlank(self.tokenizer, self.sample_begin))
|
488 |
+
if self.options.suppress_tokens:
|
489 |
+
self.logit_filters.append(SuppressTokens(self._get_suppress_tokens()))
|
490 |
+
if not options.without_timestamps:
|
491 |
+
precision = CHUNK_LENGTH / model.dims.n_audio_ctx # usually 0.02 seconds
|
492 |
+
max_initial_timestamp_index = None
|
493 |
+
if options.max_initial_timestamp:
|
494 |
+
max_initial_timestamp_index = round(self.options.max_initial_timestamp / precision)
|
495 |
+
self.logit_filters.append(
|
496 |
+
ApplyTimestampRules(tokenizer, self.sample_begin, max_initial_timestamp_index)
|
497 |
+
)
|
498 |
+
|
499 |
+
def _verify_options(self, options: DecodingOptions) -> DecodingOptions:
|
500 |
+
if options.beam_size is not None and options.best_of is not None:
|
501 |
+
raise ValueError("beam_size and best_of can't be given together")
|
502 |
+
if options.temperature == 0:
|
503 |
+
if options.best_of is not None:
|
504 |
+
raise ValueError("best_of with greedy sampling (T=0) is not compatible")
|
505 |
+
if options.patience is not None and options.beam_size is None:
|
506 |
+
raise ValueError("patience requires beam_size to be given")
|
507 |
+
if options.length_penalty is not None and not (0 <= options.length_penalty <= 1):
|
508 |
+
raise ValueError("length_penalty (alpha) should be a value between 0 and 1")
|
509 |
+
|
510 |
+
return options
|
511 |
+
|
512 |
+
def _get_initial_tokens(self) -> Tuple[int]:
|
513 |
+
tokens = list(self.sot_sequence)
|
514 |
+
prefix = self.options.prefix
|
515 |
+
prompt = self.options.prompt
|
516 |
+
|
517 |
+
if prefix:
|
518 |
+
prefix_tokens = (
|
519 |
+
self.tokenizer.encode(" " + prefix.strip()) if isinstance(prefix, str) else prefix
|
520 |
+
)
|
521 |
+
if self.sample_len is not None:
|
522 |
+
max_prefix_len = self.n_ctx // 2 - self.sample_len
|
523 |
+
prefix_tokens = prefix_tokens[-max_prefix_len:]
|
524 |
+
tokens = tokens + prefix_tokens
|
525 |
+
|
526 |
+
if prompt:
|
527 |
+
prompt_tokens = (
|
528 |
+
self.tokenizer.encode(" " + prompt.strip()) if isinstance(prompt, str) else prompt
|
529 |
+
)
|
530 |
+
tokens = [self.tokenizer.sot_prev] + prompt_tokens[-(self.n_ctx // 2 - 1) :] + tokens
|
531 |
+
|
532 |
+
return tuple(tokens)
|
533 |
+
|
534 |
+
def _get_suppress_tokens(self) -> Tuple[int]:
|
535 |
+
suppress_tokens = self.options.suppress_tokens
|
536 |
+
|
537 |
+
if isinstance(suppress_tokens, str):
|
538 |
+
suppress_tokens = [int(t) for t in suppress_tokens.split(",")]
|
539 |
+
|
540 |
+
if -1 in suppress_tokens:
|
541 |
+
suppress_tokens = [t for t in suppress_tokens if t >= 0]
|
542 |
+
suppress_tokens.extend(self.tokenizer.non_speech_tokens)
|
543 |
+
elif suppress_tokens is None or len(suppress_tokens) == 0:
|
544 |
+
suppress_tokens = [] # interpret empty string as an empty list
|
545 |
+
else:
|
546 |
+
assert isinstance(suppress_tokens, list), "suppress_tokens must be a list"
|
547 |
+
|
548 |
+
suppress_tokens.extend(
|
549 |
+
[self.tokenizer.sot, self.tokenizer.sot_prev, self.tokenizer.sot_lm]
|
550 |
+
)
|
551 |
+
if self.tokenizer.no_speech is not None:
|
552 |
+
# no-speech probability is collected separately
|
553 |
+
suppress_tokens.append(self.tokenizer.no_speech)
|
554 |
+
|
555 |
+
return tuple(sorted(set(suppress_tokens)))
|
556 |
+
|
557 |
+
def _get_audio_features(self, mel: Tensor):
|
558 |
+
if self.options.fp16:
|
559 |
+
mel = mel.half()
|
560 |
+
|
561 |
+
if mel.shape[-2:] == (self.model.dims.n_audio_ctx, self.model.dims.n_audio_state):
|
562 |
+
# encoded audio features are given; skip audio encoding
|
563 |
+
print("encoded audio features are given; skip audio encoding")
|
564 |
+
audio_features = mel
|
565 |
+
else:
|
566 |
+
print(mel.shape)
|
567 |
+
print("===============================")
|
568 |
+
audio_features = self.model.encoder(mel)
|
569 |
+
|
570 |
+
if audio_features.dtype != (torch.float16 if self.options.fp16 else torch.float32):
|
571 |
+
return TypeError(f"audio_features has an incorrect dtype: {audio_features.dtype}")
|
572 |
+
|
573 |
+
return audio_features
|
574 |
+
|
575 |
+
def _detect_language(self, audio_features: Tensor, tokens: Tensor):
|
576 |
+
languages = [self.options.language] * audio_features.shape[0]
|
577 |
+
lang_probs = None
|
578 |
+
|
579 |
+
if self.options.language is None or self.options.task == "lang_id":
|
580 |
+
lang_tokens, lang_probs = self.model.detect_language(audio_features, self.tokenizer)
|
581 |
+
languages = [max(probs, key=probs.get) for probs in lang_probs]
|
582 |
+
if self.options.language is None:
|
583 |
+
tokens[:, self.sot_index + 1] = lang_tokens # write language tokens
|
584 |
+
|
585 |
+
return languages, lang_probs
|
586 |
+
|
587 |
+
def _main_loop(self, audio_features: Tensor, tokens: Tensor):
|
588 |
+
assert audio_features.shape[0] == tokens.shape[0]
|
589 |
+
n_batch = tokens.shape[0]
|
590 |
+
sum_logprobs: Tensor = torch.zeros(n_batch, device=audio_features.device)
|
591 |
+
no_speech_probs = [np.nan] * n_batch
|
592 |
+
|
593 |
+
try:
|
594 |
+
for i in range(self.sample_len):
|
595 |
+
logits = self.inference.logits(tokens, audio_features)
|
596 |
+
|
597 |
+
if i == 0 and self.tokenizer.no_speech is not None: # save no_speech_probs
|
598 |
+
probs_at_sot = logits[:, self.sot_index].float().softmax(dim=-1)
|
599 |
+
no_speech_probs = probs_at_sot[:, self.tokenizer.no_speech].tolist()
|
600 |
+
|
601 |
+
# now we need to consider the logits at the last token only
|
602 |
+
logits = logits[:, -1]
|
603 |
+
|
604 |
+
# apply the logit filters, e.g. for suppressing or applying penalty to
|
605 |
+
for logit_filter in self.logit_filters:
|
606 |
+
logit_filter.apply(logits, tokens)
|
607 |
+
|
608 |
+
# expand the tokens tensor with the selected next tokens
|
609 |
+
tokens, completed = self.decoder.update(tokens, logits, sum_logprobs)
|
610 |
+
|
611 |
+
if completed or tokens.shape[-1] > self.n_ctx:
|
612 |
+
break
|
613 |
+
finally:
|
614 |
+
self.inference.cleanup_caching()
|
615 |
+
|
616 |
+
return tokens, sum_logprobs, no_speech_probs
|
617 |
+
|
618 |
+
@torch.no_grad()
|
619 |
+
def run(self, mel: Tensor) -> List[DecodingResult]:
|
620 |
+
self.decoder.reset()
|
621 |
+
tokenizer: Tokenizer = self.tokenizer
|
622 |
+
n_audio: int = mel.shape[0]
|
623 |
+
|
624 |
+
audio_features: Tensor = self._get_audio_features(mel) # encoder forward pass
|
625 |
+
tokens: Tensor = torch.tensor([self.initial_tokens]).repeat(n_audio, 1)
|
626 |
+
|
627 |
+
# detect language if requested, overwriting the language token
|
628 |
+
languages, language_probs = self._detect_language(audio_features, tokens)
|
629 |
+
if self.options.task == "lang_id":
|
630 |
+
return [
|
631 |
+
DecodingResult(audio_features=features, language=language, language_probs=probs)
|
632 |
+
for features, language, probs in zip(audio_features, languages, language_probs)
|
633 |
+
]
|
634 |
+
|
635 |
+
# repeat the audio & text tensors by the group size, for beam search or best-of-n sampling
|
636 |
+
audio_features = audio_features.repeat_interleave(self.n_group, dim=0)
|
637 |
+
tokens = tokens.repeat_interleave(self.n_group, dim=0).to(audio_features.device)
|
638 |
+
|
639 |
+
# call the main sampling loop
|
640 |
+
tokens, sum_logprobs, no_speech_probs = self._main_loop(audio_features, tokens)
|
641 |
+
|
642 |
+
# reshape the tensors to have (n_audio, n_group) as the first two dimensions
|
643 |
+
audio_features = audio_features[:: self.n_group]
|
644 |
+
no_speech_probs = no_speech_probs[:: self.n_group]
|
645 |
+
assert audio_features.shape[0] == len(no_speech_probs) == n_audio
|
646 |
+
|
647 |
+
tokens = tokens.reshape(n_audio, self.n_group, -1)
|
648 |
+
sum_logprobs = sum_logprobs.reshape(n_audio, self.n_group)
|
649 |
+
|
650 |
+
# get the final candidates for each group, and slice between the first sampled token and EOT
|
651 |
+
tokens, sum_logprobs = self.decoder.finalize(tokens, sum_logprobs)
|
652 |
+
tokens: List[List[Tensor]] = [
|
653 |
+
[t[self.sample_begin : (t == tokenizer.eot).nonzero()[0, 0]] for t in s] for s in tokens
|
654 |
+
]
|
655 |
+
|
656 |
+
# select the top-ranked sample in each group
|
657 |
+
selected = self.sequence_ranker.rank(tokens, sum_logprobs)
|
658 |
+
tokens: List[List[int]] = [t[i].tolist() for i, t in zip(selected, tokens)]
|
659 |
+
texts: List[str] = [tokenizer.decode(t).strip() for t in tokens]
|
660 |
+
|
661 |
+
sum_logprobs: List[float] = [lp[i] for i, lp in zip(selected, sum_logprobs)]
|
662 |
+
avg_logprobs: List[float] = [lp / (len(t) + 1) for t, lp in zip(tokens, sum_logprobs)]
|
663 |
+
|
664 |
+
fields = (texts, languages, tokens, audio_features, avg_logprobs, no_speech_probs)
|
665 |
+
if len(set(map(len, fields))) != 1:
|
666 |
+
raise RuntimeError(f"inconsistent result lengths: {list(map(len, fields))}")
|
667 |
+
|
668 |
+
return [
|
669 |
+
DecodingResult(
|
670 |
+
audio_features=features,
|
671 |
+
language=language,
|
672 |
+
tokens=tokens,
|
673 |
+
text=text,
|
674 |
+
avg_logprob=avg_logprob,
|
675 |
+
no_speech_prob=no_speech_prob,
|
676 |
+
temperature=self.options.temperature,
|
677 |
+
compression_ratio=compression_ratio(text),
|
678 |
+
)
|
679 |
+
for text, language, tokens, features, avg_logprob, no_speech_prob in zip(*fields)
|
680 |
+
]
|
681 |
+
|
682 |
+
|
683 |
+
@torch.no_grad()
|
684 |
+
def decode(model: "Whisper", mel: Tensor, options: DecodingOptions = DecodingOptions()) -> Union[DecodingResult, List[DecodingResult]]:
|
685 |
+
"""
|
686 |
+
Performs decoding of 30-second audio segment(s), provided as Mel spectrogram(s).
|
687 |
+
|
688 |
+
Parameters
|
689 |
+
----------
|
690 |
+
model: Whisper
|
691 |
+
the Whisper model instance
|
692 |
+
|
693 |
+
mel: torch.Tensor, shape = (80, 3000) or (*, 80, 3000)
|
694 |
+
A tensor containing the Mel spectrogram(s)
|
695 |
+
|
696 |
+
options: DecodingOptions
|
697 |
+
A dataclass that contains all necessary options for decoding 30-second segments
|
698 |
+
|
699 |
+
Returns
|
700 |
+
-------
|
701 |
+
result: Union[DecodingResult, List[DecodingResult]]
|
702 |
+
The result(s) of decoding contained in `DecodingResult` dataclass instance(s)
|
703 |
+
"""
|
704 |
+
single = mel.ndim == 2
|
705 |
+
if single:
|
706 |
+
mel = mel.unsqueeze(0)
|
707 |
+
result = DecodingTask(model, options).run(mel)
|
708 |
+
|
709 |
+
if single:
|
710 |
+
result = result[0]
|
711 |
+
|
712 |
+
return result
|
vencoder/whisper/model.py
ADDED
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from typing import Dict
|
3 |
+
from typing import Iterable, Optional
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import torch
|
7 |
+
import torch.nn.functional as F
|
8 |
+
from torch import Tensor
|
9 |
+
from torch import nn
|
10 |
+
|
11 |
+
from .decoding import detect_language as detect_language_function, decode as decode_function
|
12 |
+
|
13 |
+
|
14 |
+
@dataclass
|
15 |
+
class ModelDimensions:
|
16 |
+
n_mels: int
|
17 |
+
n_audio_ctx: int
|
18 |
+
n_audio_state: int
|
19 |
+
n_audio_head: int
|
20 |
+
n_audio_layer: int
|
21 |
+
n_vocab: int
|
22 |
+
n_text_ctx: int
|
23 |
+
n_text_state: int
|
24 |
+
n_text_head: int
|
25 |
+
n_text_layer: int
|
26 |
+
|
27 |
+
|
28 |
+
class LayerNorm(nn.LayerNorm):
|
29 |
+
def forward(self, x: Tensor) -> Tensor:
|
30 |
+
return super().forward(x.float()).type(x.dtype)
|
31 |
+
|
32 |
+
|
33 |
+
class Linear(nn.Linear):
|
34 |
+
def forward(self, x: Tensor) -> Tensor:
|
35 |
+
return F.linear(
|
36 |
+
x, self.weight.to(x.dtype), None if self.bias is None else self.bias.to(x.dtype)
|
37 |
+
)
|
38 |
+
|
39 |
+
|
40 |
+
class Conv1d(nn.Conv1d):
|
41 |
+
def _conv_forward(self, x: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor:
|
42 |
+
return super()._conv_forward(
|
43 |
+
x, weight.to(x.dtype), None if bias is None else bias.to(x.dtype)
|
44 |
+
)
|
45 |
+
|
46 |
+
|
47 |
+
def sinusoids(length, channels, max_timescale=10000):
|
48 |
+
"""Returns sinusoids for positional embedding"""
|
49 |
+
assert channels % 2 == 0
|
50 |
+
log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
|
51 |
+
inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2))
|
52 |
+
scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
|
53 |
+
return torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)
|
54 |
+
|
55 |
+
|
56 |
+
class MultiHeadAttention(nn.Module):
|
57 |
+
def __init__(self, n_state: int, n_head: int):
|
58 |
+
super().__init__()
|
59 |
+
self.n_head = n_head
|
60 |
+
self.query = Linear(n_state, n_state)
|
61 |
+
self.key = Linear(n_state, n_state, bias=False)
|
62 |
+
self.value = Linear(n_state, n_state)
|
63 |
+
self.out = Linear(n_state, n_state)
|
64 |
+
|
65 |
+
def forward(
|
66 |
+
self,
|
67 |
+
x: Tensor,
|
68 |
+
xa: Optional[Tensor] = None,
|
69 |
+
mask: Optional[Tensor] = None,
|
70 |
+
kv_cache: Optional[dict] = None,
|
71 |
+
):
|
72 |
+
q = self.query(x)
|
73 |
+
|
74 |
+
if kv_cache is None or xa is None or self.key not in kv_cache:
|
75 |
+
# hooks, if installed (i.e. kv_cache is not None), will prepend the cached kv tensors;
|
76 |
+
# otherwise, perform key/value projections for self- or cross-attention as usual.
|
77 |
+
k = self.key(x if xa is None else xa)
|
78 |
+
v = self.value(x if xa is None else xa)
|
79 |
+
else:
|
80 |
+
# for cross-attention, calculate keys and values once and reuse in subsequent calls.
|
81 |
+
k = kv_cache[self.key]
|
82 |
+
v = kv_cache[self.value]
|
83 |
+
|
84 |
+
wv, qk = self.qkv_attention(q, k, v, mask)
|
85 |
+
return self.out(wv), qk
|
86 |
+
|
87 |
+
def qkv_attention(self, q: Tensor, k: Tensor, v: Tensor, mask: Optional[Tensor] = None):
|
88 |
+
n_batch, n_ctx, n_state = q.shape
|
89 |
+
scale = (n_state // self.n_head) ** -0.25
|
90 |
+
q = q.view(*q.shape[:2], self.n_head, -1).permute(0, 2, 1, 3) * scale
|
91 |
+
k = k.view(*k.shape[:2], self.n_head, -1).permute(0, 2, 3, 1) * scale
|
92 |
+
v = v.view(*v.shape[:2], self.n_head, -1).permute(0, 2, 1, 3)
|
93 |
+
|
94 |
+
qk = q @ k
|
95 |
+
if mask is not None:
|
96 |
+
qk = qk + mask[:n_ctx, :n_ctx]
|
97 |
+
qk = qk.float()
|
98 |
+
|
99 |
+
w = F.softmax(qk, dim=-1).to(q.dtype)
|
100 |
+
return (w @ v).permute(0, 2, 1, 3).flatten(start_dim=2), qk.detach()
|
101 |
+
|
102 |
+
|
103 |
+
class ResidualAttentionBlock(nn.Module):
|
104 |
+
def __init__(self, n_state: int, n_head: int, cross_attention: bool = False):
|
105 |
+
super().__init__()
|
106 |
+
|
107 |
+
self.attn = MultiHeadAttention(n_state, n_head)
|
108 |
+
self.attn_ln = LayerNorm(n_state)
|
109 |
+
|
110 |
+
self.cross_attn = MultiHeadAttention(n_state, n_head) if cross_attention else None
|
111 |
+
self.cross_attn_ln = LayerNorm(n_state) if cross_attention else None
|
112 |
+
|
113 |
+
n_mlp = n_state * 4
|
114 |
+
self.mlp = nn.Sequential(Linear(n_state, n_mlp), nn.GELU(), Linear(n_mlp, n_state))
|
115 |
+
self.mlp_ln = LayerNorm(n_state)
|
116 |
+
|
117 |
+
def forward(
|
118 |
+
self,
|
119 |
+
x: Tensor,
|
120 |
+
xa: Optional[Tensor] = None,
|
121 |
+
mask: Optional[Tensor] = None,
|
122 |
+
kv_cache: Optional[dict] = None,
|
123 |
+
):
|
124 |
+
x = x + self.attn(self.attn_ln(x), mask=mask, kv_cache=kv_cache)[0]
|
125 |
+
if self.cross_attn:
|
126 |
+
x = x + self.cross_attn(self.cross_attn_ln(x), xa, kv_cache=kv_cache)[0]
|
127 |
+
x = x + self.mlp(self.mlp_ln(x))
|
128 |
+
return x
|
129 |
+
|
130 |
+
|
131 |
+
class AudioEncoder(nn.Module):
|
132 |
+
def __init__(self, n_mels: int, n_ctx: int, n_state: int, n_head: int, n_layer: int):
|
133 |
+
super().__init__()
|
134 |
+
self.conv1 = Conv1d(n_mels, n_state, kernel_size=3, padding=1)
|
135 |
+
self.conv2 = Conv1d(n_state, n_state, kernel_size=3, stride=2, padding=1)
|
136 |
+
self.register_buffer("positional_embedding", sinusoids(n_ctx, n_state))
|
137 |
+
|
138 |
+
self.blocks: Iterable[ResidualAttentionBlock] = nn.ModuleList(
|
139 |
+
[ResidualAttentionBlock(n_state, n_head) for _ in range(n_layer)]
|
140 |
+
)
|
141 |
+
self.ln_post = LayerNorm(n_state)
|
142 |
+
|
143 |
+
def forward(self, x: Tensor):
|
144 |
+
"""
|
145 |
+
x : torch.Tensor, shape = (batch_size, n_mels, n_ctx)
|
146 |
+
the mel spectrogram of the audio
|
147 |
+
"""
|
148 |
+
x = F.gelu(self.conv1(x))
|
149 |
+
x = F.gelu(self.conv2(x))
|
150 |
+
x = x.permute(0, 2, 1)
|
151 |
+
|
152 |
+
len_x = x.shape[1]
|
153 |
+
len_e = self.positional_embedding.shape[0]
|
154 |
+
assert len_x <= len_e, "incorrect audio shape"
|
155 |
+
pos_e = self.positional_embedding[:len_x, :]
|
156 |
+
x = (x + pos_e).to(x.dtype)
|
157 |
+
|
158 |
+
for block in self.blocks:
|
159 |
+
x = block(x)
|
160 |
+
|
161 |
+
x = self.ln_post(x)
|
162 |
+
return x
|
163 |
+
|
164 |
+
|
165 |
+
class TextDecoder(nn.Module):
|
166 |
+
def __init__(self, n_vocab: int, n_ctx: int, n_state: int, n_head: int, n_layer: int):
|
167 |
+
super().__init__()
|
168 |
+
|
169 |
+
self.token_embedding = nn.Embedding(n_vocab, n_state)
|
170 |
+
self.positional_embedding = nn.Parameter(torch.empty(n_ctx, n_state))
|
171 |
+
|
172 |
+
self.blocks: Iterable[ResidualAttentionBlock] = nn.ModuleList(
|
173 |
+
[ResidualAttentionBlock(n_state, n_head, cross_attention=True) for _ in range(n_layer)]
|
174 |
+
)
|
175 |
+
self.ln = LayerNorm(n_state)
|
176 |
+
|
177 |
+
mask = torch.empty(n_ctx, n_ctx).fill_(-np.inf).triu_(1)
|
178 |
+
self.register_buffer("mask", mask, persistent=False)
|
179 |
+
|
180 |
+
def forward(self, x: Tensor, xa: Tensor, kv_cache: Optional[dict] = None):
|
181 |
+
"""
|
182 |
+
x : torch.LongTensor, shape = (batch_size, <= n_ctx)
|
183 |
+
the text tokens
|
184 |
+
xa : torch.Tensor, shape = (batch_size, n_mels, n_audio_ctx)
|
185 |
+
the encoded audio features to be attended on
|
186 |
+
"""
|
187 |
+
offset = next(iter(kv_cache.values())).shape[1] if kv_cache else 0
|
188 |
+
x = self.token_embedding(x) + self.positional_embedding[offset : offset + x.shape[-1]]
|
189 |
+
x = x.to(xa.dtype)
|
190 |
+
|
191 |
+
for block in self.blocks:
|
192 |
+
x = block(x, xa, mask=self.mask, kv_cache=kv_cache)
|
193 |
+
|
194 |
+
x = self.ln(x)
|
195 |
+
logits = (x @ torch.transpose(self.token_embedding.weight.to(x.dtype), 0, 1)).float()
|
196 |
+
|
197 |
+
return logits
|
198 |
+
|
199 |
+
|
200 |
+
class Whisper(nn.Module):
|
201 |
+
def __init__(self, dims: ModelDimensions):
|
202 |
+
super().__init__()
|
203 |
+
self.dims = dims
|
204 |
+
self.encoder = AudioEncoder(
|
205 |
+
self.dims.n_mels,
|
206 |
+
self.dims.n_audio_ctx,
|
207 |
+
self.dims.n_audio_state,
|
208 |
+
self.dims.n_audio_head,
|
209 |
+
self.dims.n_audio_layer,
|
210 |
+
)
|
211 |
+
self.decoder = TextDecoder(
|
212 |
+
self.dims.n_vocab,
|
213 |
+
self.dims.n_text_ctx,
|
214 |
+
self.dims.n_text_state,
|
215 |
+
self.dims.n_text_head,
|
216 |
+
self.dims.n_text_layer,
|
217 |
+
)
|
218 |
+
|
219 |
+
def embed_audio(self, mel: torch.Tensor):
|
220 |
+
return self.encoder(mel)
|
221 |
+
|
222 |
+
def logits(self, tokens: torch.Tensor, audio_features: torch.Tensor):
|
223 |
+
return self.decoder(tokens, audio_features)
|
224 |
+
|
225 |
+
def forward(self, mel: torch.Tensor, tokens: torch.Tensor) -> Dict[str, torch.Tensor]:
|
226 |
+
return self.decoder(tokens, self.encoder(mel))
|
227 |
+
|
228 |
+
@property
|
229 |
+
def device(self):
|
230 |
+
return next(self.parameters()).device
|
231 |
+
|
232 |
+
@property
|
233 |
+
def is_multilingual(self):
|
234 |
+
return self.dims.n_vocab == 51865
|
235 |
+
|
236 |
+
def install_kv_cache_hooks(self, cache: Optional[dict] = None):
|
237 |
+
"""
|
238 |
+
The `MultiHeadAttention` module optionally accepts `kv_cache` which stores the key and value
|
239 |
+
tensors calculated for the previous positions. This method returns a dictionary that stores
|
240 |
+
all caches, and the necessary hooks for the key and value projection modules that save the
|
241 |
+
intermediate tensors to be reused during later calculations.
|
242 |
+
|
243 |
+
Returns
|
244 |
+
-------
|
245 |
+
cache : Dict[nn.Module, torch.Tensor]
|
246 |
+
A dictionary object mapping the key/value projection modules to its cache
|
247 |
+
hooks : List[RemovableHandle]
|
248 |
+
List of PyTorch RemovableHandle objects to stop the hooks to be called
|
249 |
+
"""
|
250 |
+
cache = {**cache} if cache is not None else {}
|
251 |
+
hooks = []
|
252 |
+
|
253 |
+
def save_to_cache(module, _, output):
|
254 |
+
if module not in cache or output.shape[1] > self.decoder.positional_embedding.shape[0]:
|
255 |
+
cache[module] = output # save as-is, for the first token or cross attention
|
256 |
+
else:
|
257 |
+
cache[module] = torch.cat([cache[module], output], dim=1).detach()
|
258 |
+
return cache[module]
|
259 |
+
|
260 |
+
def install_hooks(layer: nn.Module):
|
261 |
+
if isinstance(layer, MultiHeadAttention):
|
262 |
+
hooks.append(layer.key.register_forward_hook(save_to_cache))
|
263 |
+
hooks.append(layer.value.register_forward_hook(save_to_cache))
|
264 |
+
|
265 |
+
self.decoder.apply(install_hooks)
|
266 |
+
return cache, hooks
|
267 |
+
|
268 |
+
detect_language = detect_language_function
|
269 |
+
decode = decode_function
|
vencoder/whisper/tokenizer.py
ADDED
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from dataclasses import dataclass
|
3 |
+
from functools import lru_cache
|
4 |
+
from typing import List, Optional, Tuple, Union
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import torch
|
8 |
+
from transformers import GPT2TokenizerFast
|
9 |
+
|
10 |
+
LANGUAGES = {
|
11 |
+
"en": "english",
|
12 |
+
"zh": "chinese",
|
13 |
+
"de": "german",
|
14 |
+
"es": "spanish",
|
15 |
+
"ru": "russian",
|
16 |
+
"ko": "korean",
|
17 |
+
"fr": "french",
|
18 |
+
"ja": "japanese",
|
19 |
+
"pt": "portuguese",
|
20 |
+
"tr": "turkish",
|
21 |
+
"pl": "polish",
|
22 |
+
"ca": "catalan",
|
23 |
+
"nl": "dutch",
|
24 |
+
"ar": "arabic",
|
25 |
+
"sv": "swedish",
|
26 |
+
"it": "italian",
|
27 |
+
"id": "indonesian",
|
28 |
+
"hi": "hindi",
|
29 |
+
"fi": "finnish",
|
30 |
+
"vi": "vietnamese",
|
31 |
+
"he": "hebrew",
|
32 |
+
"uk": "ukrainian",
|
33 |
+
"el": "greek",
|
34 |
+
"ms": "malay",
|
35 |
+
"cs": "czech",
|
36 |
+
"ro": "romanian",
|
37 |
+
"da": "danish",
|
38 |
+
"hu": "hungarian",
|
39 |
+
"ta": "tamil",
|
40 |
+
"no": "norwegian",
|
41 |
+
"th": "thai",
|
42 |
+
"ur": "urdu",
|
43 |
+
"hr": "croatian",
|
44 |
+
"bg": "bulgarian",
|
45 |
+
"lt": "lithuanian",
|
46 |
+
"la": "latin",
|
47 |
+
"mi": "maori",
|
48 |
+
"ml": "malayalam",
|
49 |
+
"cy": "welsh",
|
50 |
+
"sk": "slovak",
|
51 |
+
"te": "telugu",
|
52 |
+
"fa": "persian",
|
53 |
+
"lv": "latvian",
|
54 |
+
"bn": "bengali",
|
55 |
+
"sr": "serbian",
|
56 |
+
"az": "azerbaijani",
|
57 |
+
"sl": "slovenian",
|
58 |
+
"kn": "kannada",
|
59 |
+
"et": "estonian",
|
60 |
+
"mk": "macedonian",
|
61 |
+
"br": "breton",
|
62 |
+
"eu": "basque",
|
63 |
+
"is": "icelandic",
|
64 |
+
"hy": "armenian",
|
65 |
+
"ne": "nepali",
|
66 |
+
"mn": "mongolian",
|
67 |
+
"bs": "bosnian",
|
68 |
+
"kk": "kazakh",
|
69 |
+
"sq": "albanian",
|
70 |
+
"sw": "swahili",
|
71 |
+
"gl": "galician",
|
72 |
+
"mr": "marathi",
|
73 |
+
"pa": "punjabi",
|
74 |
+
"si": "sinhala",
|
75 |
+
"km": "khmer",
|
76 |
+
"sn": "shona",
|
77 |
+
"yo": "yoruba",
|
78 |
+
"so": "somali",
|
79 |
+
"af": "afrikaans",
|
80 |
+
"oc": "occitan",
|
81 |
+
"ka": "georgian",
|
82 |
+
"be": "belarusian",
|
83 |
+
"tg": "tajik",
|
84 |
+
"sd": "sindhi",
|
85 |
+
"gu": "gujarati",
|
86 |
+
"am": "amharic",
|
87 |
+
"yi": "yiddish",
|
88 |
+
"lo": "lao",
|
89 |
+
"uz": "uzbek",
|
90 |
+
"fo": "faroese",
|
91 |
+
"ht": "haitian creole",
|
92 |
+
"ps": "pashto",
|
93 |
+
"tk": "turkmen",
|
94 |
+
"nn": "nynorsk",
|
95 |
+
"mt": "maltese",
|
96 |
+
"sa": "sanskrit",
|
97 |
+
"lb": "luxembourgish",
|
98 |
+
"my": "myanmar",
|
99 |
+
"bo": "tibetan",
|
100 |
+
"tl": "tagalog",
|
101 |
+
"mg": "malagasy",
|
102 |
+
"as": "assamese",
|
103 |
+
"tt": "tatar",
|
104 |
+
"haw": "hawaiian",
|
105 |
+
"ln": "lingala",
|
106 |
+
"ha": "hausa",
|
107 |
+
"ba": "bashkir",
|
108 |
+
"jw": "javanese",
|
109 |
+
"su": "sundanese",
|
110 |
+
}
|
111 |
+
|
112 |
+
# language code lookup by name, with a few language aliases
|
113 |
+
TO_LANGUAGE_CODE = {
|
114 |
+
**{language: code for code, language in LANGUAGES.items()},
|
115 |
+
"burmese": "my",
|
116 |
+
"valencian": "ca",
|
117 |
+
"flemish": "nl",
|
118 |
+
"haitian": "ht",
|
119 |
+
"letzeburgesch": "lb",
|
120 |
+
"pushto": "ps",
|
121 |
+
"panjabi": "pa",
|
122 |
+
"moldavian": "ro",
|
123 |
+
"moldovan": "ro",
|
124 |
+
"sinhalese": "si",
|
125 |
+
"castilian": "es",
|
126 |
+
}
|
127 |
+
|
128 |
+
|
129 |
+
@dataclass(frozen=True)
|
130 |
+
class Tokenizer:
|
131 |
+
"""A thin wrapper around `GPT2TokenizerFast` providing quick access to special tokens"""
|
132 |
+
|
133 |
+
tokenizer: "GPT2TokenizerFast"
|
134 |
+
language: Optional[str]
|
135 |
+
sot_sequence: Tuple[int]
|
136 |
+
|
137 |
+
def encode(self, text, **kwargs):
|
138 |
+
return self.tokenizer.encode(text, **kwargs)
|
139 |
+
|
140 |
+
def decode(self, token_ids: Union[int, List[int], np.ndarray, torch.Tensor], **kwargs):
|
141 |
+
return self.tokenizer.decode(token_ids, **kwargs)
|
142 |
+
|
143 |
+
def decode_with_timestamps(self, tokens) -> str:
|
144 |
+
"""
|
145 |
+
Timestamp tokens are above the special tokens' id range and are ignored by `decode()`.
|
146 |
+
This method decodes given tokens with timestamps tokens annotated, e.g. "<|1.08|>".
|
147 |
+
"""
|
148 |
+
outputs = [[]]
|
149 |
+
for token in tokens:
|
150 |
+
if token >= self.timestamp_begin:
|
151 |
+
timestamp = f"<|{(token - self.timestamp_begin) * 0.02:.2f}|>"
|
152 |
+
outputs.append(timestamp)
|
153 |
+
outputs.append([])
|
154 |
+
else:
|
155 |
+
outputs[-1].append(token)
|
156 |
+
outputs = [s if isinstance(s, str) else self.tokenizer.decode(s) for s in outputs]
|
157 |
+
return "".join(outputs)
|
158 |
+
|
159 |
+
@property
|
160 |
+
@lru_cache()
|
161 |
+
def eot(self) -> int:
|
162 |
+
return self.tokenizer.eos_token_id
|
163 |
+
|
164 |
+
@property
|
165 |
+
@lru_cache()
|
166 |
+
def sot(self) -> int:
|
167 |
+
return self._get_single_token_id("<|startoftranscript|>")
|
168 |
+
|
169 |
+
@property
|
170 |
+
@lru_cache()
|
171 |
+
def sot_lm(self) -> int:
|
172 |
+
return self._get_single_token_id("<|startoflm|>")
|
173 |
+
|
174 |
+
@property
|
175 |
+
@lru_cache()
|
176 |
+
def sot_prev(self) -> int:
|
177 |
+
return self._get_single_token_id("<|startofprev|>")
|
178 |
+
|
179 |
+
@property
|
180 |
+
@lru_cache()
|
181 |
+
def no_speech(self) -> int:
|
182 |
+
return self._get_single_token_id("<|nospeech|>")
|
183 |
+
|
184 |
+
@property
|
185 |
+
@lru_cache()
|
186 |
+
def no_timestamps(self) -> int:
|
187 |
+
return self._get_single_token_id("<|notimestamps|>")
|
188 |
+
|
189 |
+
@property
|
190 |
+
@lru_cache()
|
191 |
+
def timestamp_begin(self) -> int:
|
192 |
+
return self.tokenizer.all_special_ids[-1] + 1
|
193 |
+
|
194 |
+
@property
|
195 |
+
@lru_cache()
|
196 |
+
def language_token(self) -> int:
|
197 |
+
"""Returns the token id corresponding to the value of the `language` field"""
|
198 |
+
if self.language is None:
|
199 |
+
raise ValueError(f"This tokenizer does not have language token configured")
|
200 |
+
|
201 |
+
additional_tokens = dict(
|
202 |
+
zip(
|
203 |
+
self.tokenizer.additional_special_tokens,
|
204 |
+
self.tokenizer.additional_special_tokens_ids,
|
205 |
+
)
|
206 |
+
)
|
207 |
+
candidate = f"<|{self.language}|>"
|
208 |
+
if candidate in additional_tokens:
|
209 |
+
return additional_tokens[candidate]
|
210 |
+
|
211 |
+
raise KeyError(f"Language {self.language} not found in tokenizer.")
|
212 |
+
|
213 |
+
@property
|
214 |
+
@lru_cache()
|
215 |
+
def all_language_tokens(self) -> Tuple[int]:
|
216 |
+
result = []
|
217 |
+
for token, token_id in zip(
|
218 |
+
self.tokenizer.additional_special_tokens,
|
219 |
+
self.tokenizer.additional_special_tokens_ids,
|
220 |
+
):
|
221 |
+
if token.strip("<|>") in LANGUAGES:
|
222 |
+
result.append(token_id)
|
223 |
+
return tuple(result)
|
224 |
+
|
225 |
+
@property
|
226 |
+
@lru_cache()
|
227 |
+
def all_language_codes(self) -> Tuple[str]:
|
228 |
+
return tuple(self.decode([l]).strip("<|>") for l in self.all_language_tokens)
|
229 |
+
|
230 |
+
@property
|
231 |
+
@lru_cache()
|
232 |
+
def sot_sequence_including_notimestamps(self) -> Tuple[int]:
|
233 |
+
return tuple(list(self.sot_sequence) + [self.no_timestamps])
|
234 |
+
|
235 |
+
@property
|
236 |
+
@lru_cache()
|
237 |
+
def non_speech_tokens(self) -> Tuple[int]:
|
238 |
+
"""
|
239 |
+
Returns the list of tokens to suppress in order to avoid any speaker tags or non-speech
|
240 |
+
annotations, to prevent sampling texts that are not actually spoken in the audio, e.g.
|
241 |
+
|
242 |
+
- ♪♪♪
|
243 |
+
- ( SPEAKING FOREIGN LANGUAGE )
|
244 |
+
- [DAVID] Hey there,
|
245 |
+
|
246 |
+
keeping basic punctuations like commas, periods, question marks, exclamation points, etc.
|
247 |
+
"""
|
248 |
+
symbols = list("\"#()*+/:;<=>@[\\]^_`{|}~「」『』")
|
249 |
+
symbols += "<< >> <<< >>> -- --- -( -[ (' (\" (( )) ((( ))) [[ ]] {{ }} ♪♪ ♪♪♪".split()
|
250 |
+
|
251 |
+
# symbols that may be a single token or multiple tokens depending on the tokenizer.
|
252 |
+
# In case they're multiple tokens, suppress the first token, which is safe because:
|
253 |
+
# These are between U+2640 and U+267F miscellaneous symbols that are okay to suppress
|
254 |
+
# in generations, and in the 3-byte UTF-8 representation they share the first two bytes.
|
255 |
+
miscellaneous = set("♩♪♫♬♭♮♯")
|
256 |
+
assert all(0x2640 <= ord(c) <= 0x267F for c in miscellaneous)
|
257 |
+
|
258 |
+
# allow hyphens "-" and single quotes "'" between words, but not at the beginning of a word
|
259 |
+
result = {self.tokenizer.encode(" -")[0], self.tokenizer.encode(" '")[0]}
|
260 |
+
for symbol in symbols + list(miscellaneous):
|
261 |
+
for tokens in [self.tokenizer.encode(symbol), self.tokenizer.encode(" " + symbol)]:
|
262 |
+
if len(tokens) == 1 or symbol in miscellaneous:
|
263 |
+
result.add(tokens[0])
|
264 |
+
|
265 |
+
return tuple(sorted(result))
|
266 |
+
|
267 |
+
def _get_single_token_id(self, text) -> int:
|
268 |
+
tokens = self.tokenizer.encode(text)
|
269 |
+
assert len(tokens) == 1, f"{text} is not encoded as a single token"
|
270 |
+
return tokens[0]
|
271 |
+
|
272 |
+
|
273 |
+
@lru_cache(maxsize=None)
|
274 |
+
def build_tokenizer(name: str = "gpt2"):
|
275 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
276 |
+
path = os.path.join(os.path.dirname(__file__), "assets", name)
|
277 |
+
tokenizer = GPT2TokenizerFast.from_pretrained(path)
|
278 |
+
|
279 |
+
specials = [
|
280 |
+
"<|startoftranscript|>",
|
281 |
+
*[f"<|{lang}|>" for lang in LANGUAGES.keys()],
|
282 |
+
"<|translate|>",
|
283 |
+
"<|transcribe|>",
|
284 |
+
"<|startoflm|>",
|
285 |
+
"<|startofprev|>",
|
286 |
+
"<|nospeech|>",
|
287 |
+
"<|notimestamps|>",
|
288 |
+
]
|
289 |
+
|
290 |
+
tokenizer.add_special_tokens(dict(additional_special_tokens=specials))
|
291 |
+
return tokenizer
|
292 |
+
|
293 |
+
|
294 |
+
@lru_cache(maxsize=None)
|
295 |
+
def get_tokenizer(
|
296 |
+
multilingual: bool,
|
297 |
+
*,
|
298 |
+
task: Optional[str] = None, # Literal["transcribe", "translate", None]
|
299 |
+
language: Optional[str] = None,
|
300 |
+
) -> Tokenizer:
|
301 |
+
if language is not None:
|
302 |
+
language = language.lower()
|
303 |
+
if language not in LANGUAGES:
|
304 |
+
if language in TO_LANGUAGE_CODE:
|
305 |
+
language = TO_LANGUAGE_CODE[language]
|
306 |
+
else:
|
307 |
+
raise ValueError(f"Unsupported language: {language}")
|
308 |
+
|
309 |
+
if multilingual:
|
310 |
+
tokenizer_name = "multilingual"
|
311 |
+
task = task or "transcribe"
|
312 |
+
language = language or "en"
|
313 |
+
else:
|
314 |
+
tokenizer_name = "gpt2"
|
315 |
+
task = None
|
316 |
+
language = None
|
317 |
+
|
318 |
+
tokenizer = build_tokenizer(name=tokenizer_name)
|
319 |
+
all_special_ids: List[int] = tokenizer.all_special_ids
|
320 |
+
sot: int = all_special_ids[1]
|
321 |
+
translate: int = all_special_ids[-6]
|
322 |
+
transcribe: int = all_special_ids[-5]
|
323 |
+
|
324 |
+
langs = tuple(LANGUAGES.keys())
|
325 |
+
sot_sequence = [sot]
|
326 |
+
if language is not None:
|
327 |
+
sot_sequence.append(sot + 1 + langs.index(language))
|
328 |
+
if task is not None:
|
329 |
+
sot_sequence.append(transcribe if task == "transcribe" else translate)
|
330 |
+
|
331 |
+
return Tokenizer(tokenizer=tokenizer, language=language, sot_sequence=tuple(sot_sequence))
|
vencoder/whisper/utils.py
ADDED
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import sys
|
4 |
+
import zlib
|
5 |
+
from typing import Callable, TextIO
|
6 |
+
|
7 |
+
system_encoding = sys.getdefaultencoding()
|
8 |
+
|
9 |
+
if system_encoding != "utf-8":
|
10 |
+
def make_safe(string):
|
11 |
+
# replaces any character not representable using the system default encoding with an '?',
|
12 |
+
# avoiding UnicodeEncodeError (https://github.com/openai/whisper/discussions/729).
|
13 |
+
return string.encode(system_encoding, errors="replace").decode(system_encoding)
|
14 |
+
else:
|
15 |
+
def make_safe(string):
|
16 |
+
# utf-8 can encode any Unicode code point, so no need to do the round-trip encoding
|
17 |
+
return string
|
18 |
+
|
19 |
+
|
20 |
+
def exact_div(x, y):
|
21 |
+
assert x % y == 0
|
22 |
+
return x // y
|
23 |
+
|
24 |
+
|
25 |
+
def str2bool(string):
|
26 |
+
str2val = {"True": True, "False": False}
|
27 |
+
if string in str2val:
|
28 |
+
return str2val[string]
|
29 |
+
else:
|
30 |
+
raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}")
|
31 |
+
|
32 |
+
|
33 |
+
def optional_int(string):
|
34 |
+
return None if string == "None" else int(string)
|
35 |
+
|
36 |
+
|
37 |
+
def optional_float(string):
|
38 |
+
return None if string == "None" else float(string)
|
39 |
+
|
40 |
+
|
41 |
+
def compression_ratio(text) -> float:
|
42 |
+
text_bytes = text.encode("utf-8")
|
43 |
+
return len(text_bytes) / len(zlib.compress(text_bytes))
|
44 |
+
|
45 |
+
|
46 |
+
def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = '.'):
|
47 |
+
assert seconds >= 0, "non-negative timestamp expected"
|
48 |
+
milliseconds = round(seconds * 1000.0)
|
49 |
+
|
50 |
+
hours = milliseconds // 3_600_000
|
51 |
+
milliseconds -= hours * 3_600_000
|
52 |
+
|
53 |
+
minutes = milliseconds // 60_000
|
54 |
+
milliseconds -= minutes * 60_000
|
55 |
+
|
56 |
+
seconds = milliseconds // 1_000
|
57 |
+
milliseconds -= seconds * 1_000
|
58 |
+
|
59 |
+
hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
|
60 |
+
return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}"
|
61 |
+
|
62 |
+
|
63 |
+
class ResultWriter:
|
64 |
+
extension: str
|
65 |
+
|
66 |
+
def __init__(self, output_dir: str):
|
67 |
+
self.output_dir = output_dir
|
68 |
+
|
69 |
+
def __call__(self, result: dict, audio_path: str):
|
70 |
+
audio_basename = os.path.basename(audio_path)
|
71 |
+
output_path = os.path.join(self.output_dir, audio_basename + "." + self.extension)
|
72 |
+
|
73 |
+
with open(output_path, "w", encoding="utf-8") as f:
|
74 |
+
self.write_result(result, file=f)
|
75 |
+
|
76 |
+
def write_result(self, result: dict, file: TextIO):
|
77 |
+
raise NotImplementedError
|
78 |
+
|
79 |
+
|
80 |
+
class WriteTXT(ResultWriter):
|
81 |
+
extension: str = "txt"
|
82 |
+
|
83 |
+
def write_result(self, result: dict, file: TextIO):
|
84 |
+
for segment in result["segments"]:
|
85 |
+
print(segment['text'].strip(), file=file, flush=True)
|
86 |
+
|
87 |
+
|
88 |
+
class WriteVTT(ResultWriter):
|
89 |
+
extension: str = "vtt"
|
90 |
+
|
91 |
+
def write_result(self, result: dict, file: TextIO):
|
92 |
+
print("WEBVTT\n", file=file)
|
93 |
+
for segment in result["segments"]:
|
94 |
+
print(
|
95 |
+
f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n"
|
96 |
+
f"{segment['text'].strip().replace('-->', '->')}\n",
|
97 |
+
file=file,
|
98 |
+
flush=True,
|
99 |
+
)
|
100 |
+
|
101 |
+
|
102 |
+
class WriteSRT(ResultWriter):
|
103 |
+
extension: str = "srt"
|
104 |
+
|
105 |
+
def write_result(self, result: dict, file: TextIO):
|
106 |
+
for i, segment in enumerate(result["segments"], start=1):
|
107 |
+
# write srt lines
|
108 |
+
print(
|
109 |
+
f"{i}\n"
|
110 |
+
f"{format_timestamp(segment['start'], always_include_hours=True, decimal_marker=',')} --> "
|
111 |
+
f"{format_timestamp(segment['end'], always_include_hours=True, decimal_marker=',')}\n"
|
112 |
+
f"{segment['text'].strip().replace('-->', '->')}\n",
|
113 |
+
file=file,
|
114 |
+
flush=True,
|
115 |
+
)
|
116 |
+
|
117 |
+
|
118 |
+
class WriteTSV(ResultWriter):
|
119 |
+
"""
|
120 |
+
Write a transcript to a file in TSV (tab-separated values) format containing lines like:
|
121 |
+
<start time in integer milliseconds>\t<end time in integer milliseconds>\t<transcript text>
|
122 |
+
|
123 |
+
Using integer milliseconds as start and end times means there's no chance of interference from
|
124 |
+
an environment setting a language encoding that causes the decimal in a floating point number
|
125 |
+
to appear as a comma; also is faster and more efficient to parse & store, e.g., in C++.
|
126 |
+
"""
|
127 |
+
extension: str = "tsv"
|
128 |
+
|
129 |
+
def write_result(self, result: dict, file: TextIO):
|
130 |
+
print("start", "end", "text", sep="\t", file=file)
|
131 |
+
for segment in result["segments"]:
|
132 |
+
print(round(1000 * segment['start']), file=file, end="\t")
|
133 |
+
print(round(1000 * segment['end']), file=file, end="\t")
|
134 |
+
print(segment['text'].strip().replace("\t", " "), file=file, flush=True)
|
135 |
+
|
136 |
+
|
137 |
+
class WriteJSON(ResultWriter):
|
138 |
+
extension: str = "json"
|
139 |
+
|
140 |
+
def write_result(self, result: dict, file: TextIO):
|
141 |
+
json.dump(result, file)
|
142 |
+
|
143 |
+
|
144 |
+
def get_writer(output_format: str, output_dir: str) -> Callable[[dict, TextIO], None]:
|
145 |
+
writers = {
|
146 |
+
"txt": WriteTXT,
|
147 |
+
"vtt": WriteVTT,
|
148 |
+
"srt": WriteSRT,
|
149 |
+
"tsv": WriteTSV,
|
150 |
+
"json": WriteJSON,
|
151 |
+
}
|
152 |
+
|
153 |
+
if output_format == "all":
|
154 |
+
all_writers = [writer(output_dir) for writer in writers.values()]
|
155 |
+
|
156 |
+
def write_all(result: dict, file: TextIO):
|
157 |
+
for writer in all_writers:
|
158 |
+
writer(result, file)
|
159 |
+
|
160 |
+
return write_all
|
161 |
+
|
162 |
+
return writers[output_format](output_dir)
|
163 |
+
|