repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-lstmsum-sig/05/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((self.spec_fb_dim//16) * 32,
(self.spec_fb_dim//16) * 32),
nii_nn.BLSTMLayer((self.spec_fb_dim//16) * 32,
(self.spec_fb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim // 16) * 32, self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).mean(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,026 | 33.865429 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-lstmsum-sig/04/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((self.spec_fb_dim//16) * 32,
(self.spec_fb_dim//16) * 32),
nii_nn.BLSTMLayer((self.spec_fb_dim//16) * 32,
(self.spec_fb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim // 16) * 32, self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).mean(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,026 | 33.865429 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-lstmsum-sig/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((self.spec_fb_dim//16) * 32,
(self.spec_fb_dim//16) * 32),
nii_nn.BLSTMLayer((self.spec_fb_dim//16) * 32,
(self.spec_fb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim // 16) * 32, self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).mean(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,026 | 33.865429 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-lstmsum-sig/06/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((self.spec_fb_dim//16) * 32,
(self.spec_fb_dim//16) * 32),
nii_nn.BLSTMLayer((self.spec_fb_dim//16) * 32,
(self.spec_fb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim // 16) * 32, self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).mean(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,025 | 33.944186 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-lstmsum-sig/03/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((self.spec_fb_dim//16) * 32,
(self.spec_fb_dim//16) * 32),
nii_nn.BLSTMLayer((self.spec_fb_dim//16) * 32,
(self.spec_fb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim // 16) * 32, self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).mean(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,026 | 33.865429 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-lstmsum-sig/02/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((self.spec_fb_dim//16) * 32,
(self.spec_fb_dim//16) * 32),
nii_nn.BLSTMLayer((self.spec_fb_dim//16) * 32,
(self.spec_fb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim // 16) * 32, self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).mean(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,026 | 33.865429 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-fixed-sig/05/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.a_softmax as nii_a_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
##
def protocol_parse(protocol_filepath):
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.validation = False
#####
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 1
# output class
self.v_out_class = 1
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
#self.m_a_softmax = []
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfcc_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
#self.m_a_softmax.append(
# nii_a_softmax.AngleLayer(self.v_emd_dim, self.v_out_class)
#)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
#self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_score = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_score
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 13,817 | 34.430769 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-fixed-sig/04/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.a_softmax as nii_a_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
##
def protocol_parse(protocol_filepath):
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.validation = False
#####
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 1
# output class
self.v_out_class = 1
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
#self.m_a_softmax = []
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfcc_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
#self.m_a_softmax.append(
# nii_a_softmax.AngleLayer(self.v_emd_dim, self.v_out_class)
#)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
#self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_score = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_score
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 13,817 | 34.430769 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-fixed-sig/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.a_softmax as nii_a_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
##
def protocol_parse(protocol_filepath):
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.validation = False
#####
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 1
# output class
self.v_out_class = 1
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
#self.m_a_softmax = []
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfcc_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
#self.m_a_softmax.append(
# nii_a_softmax.AngleLayer(self.v_emd_dim, self.v_out_class)
#)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
#self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_score = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_score
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 13,817 | 34.430769 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-fixed-sig/06/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.a_softmax as nii_a_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
##
def protocol_parse(protocol_filepath):
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.validation = False
#####
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 1
# output class
self.v_out_class = 1
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
#self.m_a_softmax = []
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfcc_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
#self.m_a_softmax.append(
# nii_a_softmax.AngleLayer(self.v_emd_dim, self.v_out_class)
#)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
#self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_score = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_score
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 13,817 | 34.430769 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-fixed-sig/03/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.a_softmax as nii_a_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
##
def protocol_parse(protocol_filepath):
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.validation = False
#####
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 1
# output class
self.v_out_class = 1
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
#self.m_a_softmax = []
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfcc_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
#self.m_a_softmax.append(
# nii_a_softmax.AngleLayer(self.v_emd_dim, self.v_out_class)
#)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
#self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_score = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_score
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 13,817 | 34.430769 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-fixed-sig/02/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.a_softmax as nii_a_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
##
def protocol_parse(protocol_filepath):
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.validation = False
#####
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 1
# output class
self.v_out_class = 1
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
#self.m_a_softmax = []
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfcc_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
#self.m_a_softmax.append(
# nii_a_softmax.AngleLayer(self.v_emd_dim, self.v_out_class)
#)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
#self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_score = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_score
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 13,817 | 34.430769 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-attention-oc/05/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((lfcc_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,153 | 33.519362 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-attention-oc/04/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((lfcc_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,153 | 33.519362 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-attention-oc/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((lfcc_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,153 | 33.519362 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-attention-oc/06/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((lfcc_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,153 | 33.519362 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-attention-oc/03/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((lfcc_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,153 | 33.519362 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-attention-oc/02/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((lfcc_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,153 | 33.519362 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-p2s/05/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32),
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,081 | 33.751152 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-p2s/04/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32),
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,081 | 33.751152 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-p2s/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32),
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,081 | 33.751152 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-p2s/06/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32),
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,081 | 33.751152 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-p2s/03/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32),
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,081 | 33.751152 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-p2s/02/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32),
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,081 | 33.751152 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-am/05/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16)*32*2, self.v_emd_dim)
)
self.m_angle.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1]
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,576 | 33.615556 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-am/04/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16)*32*2, self.v_emd_dim)
)
self.m_angle.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1]
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,576 | 33.615556 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-am/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16)*32*2, self.v_emd_dim)
)
self.m_angle.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1]
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,576 | 33.615556 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-am/06/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16)*32*2, self.v_emd_dim)
)
self.m_angle.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1]
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,576 | 33.615556 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-am/03/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16)*32*2, self.v_emd_dim)
)
self.m_angle.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1]
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,576 | 33.615556 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-am/02/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16)*32*2, self.v_emd_dim)
)
self.m_angle.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1]
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,576 | 33.615556 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/06-asvspoof-ood/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The training/inference process wrapper.
Dataset API is replaced with NII_MergeDataSetLoader.
It is more convenient to train model on corpora stored in different directories.
Requires model.py and config.py (config_merge_datasets.py)
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.test_input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.test_output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 7,862 | 35.915493 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/06-asvspoof-ood/config_train_asvspoof2019_bc10.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
import os
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = ['asvspoof2019_trn', 'bc2019_trn']
val_set_name = ['asvspoof2019_val', 'bc2019_val']
# for convenience
tmp1 = os.path.dirname(__file__) + '/../../../DATA/asvspoof2019_LA'
tmp2 = os.path.dirname(__file__) + '/../../../DATA/bc_release'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = [tmp1 + '/scp/train.lst', tmp2 + '/scp/train.lst']
# val_file_list: list of files for validation set. It can be None
val_list = [tmp1 + '/scp/val.lst', tmp2 + '/scp/val.lst']
# Directories for input features
# input_dirs = [[path_of_feature_1_trainset_1, path_of_feature_2_trainset_1.. ]
# [path_of_feature_1_trainset_2, path_of_feature_2_trainset_2..]]
# len(input_dirs) should = len(trn_list) = len(val_list)
# we assume train and validation data are put in the same sub-directory
input_dirs = [[tmp1 + '/train_dev'], [tmp2 + '/train_dev']]
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
# len(input_dims) should be len(input_dirs[0])
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.wav']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [1]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [False]
# Similar configurations for output features
output_dirs = [[] for x in input_dirs]
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = 64000
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = 8000
# Optional argument
# Just a buffer for convenience
# It can contain anything
# Here we use it to hold the path to the protocol.txt
# They will be loaded by model.py
optional_argument = [tmp1 + '/protocol.txt', tmp2 + '/protocol.txt']
#import augment
#input_trans_fns = [[augment.wav_aug]]
#output_trans_fns = [[]]
#########################################################
## Configuration for inference stage (place holder)
#########################################################
# Please use config_test_*.py inference
# This part is just a place holder
test_set_name = trn_set_name + val_set_name
# List of test set data
# for convenience, you may directly load test_set list here
test_list = trn_list + val_list
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = input_dirs * 2
# Directories for output features, which are []
test_output_dirs = [[]] * 2
| 3,891 | 32.843478 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/06-asvspoof-ood/config_train_asvspoof2019_esp.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
import os
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = ['asvspoof2019_trn', 'espnet_trn']
val_set_name = ['asvspoof2019_val', 'espnet_val']
# for convenience
tmp1 = os.path.dirname(__file__) + '/../../../DATA/asvspoof2019_LA'
tmp2 = os.path.dirname(__file__) + '/../../../DATA/espnet_release'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = [tmp1 + '/scp/train.lst', tmp2 + '/scp/train.lst']
# val_file_list: list of files for validation set. It can be None
val_list = [tmp1 + '/scp/val.lst', tmp2 + '/scp/val.lst']
# Directories for input features
# input_dirs = [[path_of_feature_1_trainset_1, path_of_feature_2_trainset_1.. ]
# [path_of_feature_1_trainset_2, path_of_feature_2_trainset_2..]]
# len(input_dirs) should = len(trn_list) = len(val_list)
# we assume train and validation data are put in the same sub-directory
input_dirs = [[tmp1 + '/train_dev'], [tmp2 + '/train_dev']]
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
# len(input_dims) should be len(input_dirs[0])
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.wav']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [1]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [False]
# Similar configurations for output features
output_dirs = [[] for x in input_dirs]
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = 64000
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = 8000
# Optional argument
# Just a buffer for convenience
# It can contain anything
# Here we use it to hold the path to the protocol.txt
# They will be loaded by model.py
optional_argument = [tmp1 + '/protocol.txt', tmp2 + '/protocol.txt']
#import augment
#input_trans_fns = [[augment.wav_aug]]
#output_trans_fns = [[]]
#########################################################
## Configuration for inference stage (place holder)
#########################################################
# Please use config_test_*.py inference
# This part is just a place holder
test_set_name = trn_set_name + val_set_name
# List of test set data
# for convenience, you may directly load test_set list here
test_list = trn_list + val_list
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = input_dirs * 2
# Directories for output features, which are []
test_output_dirs = [[]] * 2
| 3,895 | 32.878261 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/06-asvspoof-ood/config_test_asvspoof2019.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
import os
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Not necessary to change this part
# Parameters like input_dims, input_exts will be used for inference too
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = ['asvspoof2019_trn']
val_set_name = ['asvspoof2019_val']
# for convenience
tmp = ''
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = [tmp + '/scp/train.lst']
# val_file_list: list of files for validation set. It can be None
val_list = [tmp + '/scp/val.lst']
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
input_dirs = [[tmp + '/train_dev']]
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.wav']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [1]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [False]
# Similar configurations for output features
output_dirs = [[] for x in input_dirs]
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = None
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = None
#import augment
#input_trans_fns = [[augment.wav_aug]]
#output_trans_fns = [[]]
#########################################################
## Configuration for inference stage
#########################################################
# similar options to training stage
# path to the test set directory
tmp = os.path.dirname(__file__) + '/../../../DATA/asvspoof2019_LA'
test_set_name = ['asvspoof2019_test']
# List of test set data
# for convenience, you may directly load test_set list here
test_list = [tmp + '/scp/test.lst']
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = [[tmp + '/eval']]
# Directories for output features, which are []
test_output_dirs = [[]]
# Optional argument
# Just a buffer for convenience
# It can contain anything
# Here we use it to hold the path to the protocol.txt
# They will be loaded by model.py
optional_argument = [tmp + '/protocol.txt']
| 3,592 | 30.243478 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/06-asvspoof-ood/config_train_asvspoof2019.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
import os
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = ['asvspoof2019_trn']
val_set_name = ['asvspoof2019_val']
# for convenience
tmp = os.path.dirname(__file__) + '/../../../DATA/asvspoof2019_LA'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = [tmp + '/scp/train.lst']
# val_file_list: list of files for validation set. It can be None
val_list = [tmp + '/scp/val.lst']
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
input_dirs = [[tmp + '/train_dev']]
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.wav']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [1]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [False]
# Similar configurations for output features
output_dirs = [[] for x in input_dirs]
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = 64000
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = 8000
# Optional argument
# Just a buffer for convenience
# It can contain anything
# Here we use it to hold the path to the protocol.txt
# They will be loaded by model.py
optional_argument = [tmp + '/protocol.txt']
#import augment
#input_trans_fns = [[augment.wav_aug]]
#output_trans_fns = [[]]
#########################################################
## Configuration for inference stage (place holder)
#########################################################
# Please use config_test_*.py inference
# This part is just a place holder
test_set_name = trn_set_name + val_set_name
# List of test set data
# for convenience, you may directly load test_set list here
test_list = trn_list + val_list
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = input_dirs * 2
# Directories for output features, which are []
test_output_dirs = [[]] * 2
| 3,496 | 30.504505 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/06-asvspoof-ood/config_test_vcc.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
import os
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Not necessary to change this part
# Parameters like input_dims, input_exts will be used for inference too
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = ['asvspoof2019_trn']
val_set_name = ['asvspoof2019_val']
# for convenience
tmp = ''
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = [tmp + '/scp/train.lst']
# val_file_list: list of files for validation set. It can be None
val_list = [tmp + '/scp/val.lst']
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
input_dirs = [[tmp + '/train_dev']]
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.wav']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [1]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [False]
# Similar configurations for output features
output_dirs = [[] for x in input_dirs]
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = None
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = None
#import augment
#input_trans_fns = [[augment.wav_aug]]
#output_trans_fns = [[]]
#########################################################
## Configuration for inference stage
#########################################################
# similar options to training stage
# path to the test set directory
tmp2 = os.path.dirname(__file__) + '/../../../DATA/vcc_release'
test_set_name = ['vcc_test']
# List of test set data
# for convenience, you may directly load test_set list here
test_list = [tmp2 + '/scp/test.lst']
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = [[tmp2 + '/eval']]
# Directories for output features, which are []
test_output_dirs = [[]]
# Optional argument
# Just a buffer for convenience
# It can contain anything
# Here we use it to hold the path to the protocol.txt
# They will be loaded by model.py
optional_argument = [tmp2 + '/protocol.txt']
| 3,584 | 29.905172 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/06-asvspoof-ood/Softmax-maxprob/config_train_asvspoof2019/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torchaudio
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
g_attack_map = {'-': 1,
'A01': 1, 'A02': 1, 'A03': 1, 'A04': 1, 'A05': 1, 'A06': 1,
'A07': 1, 'A08': 1, 'A09': 1, 'A10': 1, 'A11': 1, 'A12': 1,
'A13': 1, 'A14': 1, 'A15': 1, 'A16': 1, 'A17': 1, 'A18': 1,
'A19': 1}
def protocol_parse_general(protocol_filepaths, g_map, sep=' ', target_row=-1):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
target_row: int, default -1, use line[-1] as the target label
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
if type(protocol_filepaths) is str:
tmp = [protocol_filepaths]
else:
tmp = protocol_filepaths
for protocol_filepath in tmp:
with open(protocol_filepath, 'r') as file_ptr:
for line in file_ptr:
line = line.rstrip('\n')
cols = line.split(sep)
try:
data_buffer[cols[1]] = g_attack_map[cols[target_row]]
except KeyError:
data_buffer[cols[1]] = False
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
self.in_out_parser = protocol_parse_general(protocol_f, g_attack_map,
' ', -2)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = None
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# confidence predictor
self.m_conf = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
if self.v_emd_dim is None:
self.v_emd_dim = (lfcc_dim // 16) * 32
else:
assert self.v_emd_dim == (lfcc_dim//16) * 32, "v_emd_dim error"
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
self.m_loss = torch_nn.CrossEntropyLoss()
self.m_temp = 1
self.m_lambda = 0.
self.m_e_m_in = -25.0
self.m_e_m_out = -7.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = (hidden_features_lstm + hidden_features).mean(1)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_logit(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
output_act = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_output_act):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
output_act[idx*batch_size : (idx+1)*batch_size] = m_output(tmp_emb)
# feature_vec is [batch * submodel, output-class]
return output_act
def _compute_score(self, logits):
"""
"""
# [batch * submodel, output-class], logits
# [:, 1] denotes being bonafide
if logits.shape[1] == 2:
return logits[:, 1] - logits[:, 0]
else:
return logits[:, -1]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _clamp_prob(self, input_prob, clamp_val=1e-12):
return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val)
def _get_in_out_indx(self, filenames):
in_indx = []
out_indx = []
for x, y in enumerate(filenames):
if self.in_out_parser[y]:
in_indx.append(x)
else:
out_indx.append(x)
return np.array(in_indx), np.array(out_indx)
def _energy(self, logits):
"""
"""
# - T \log \sum_y \exp (logits[x, y] / T)
eng = - self.m_temp * torch.logsumexp(logits / self.m_temp, dim=1)
return eng
def _loss(self, logits, targets, energy, in_indx, out_indx):
"""
"""
# loss over cross-entropy on in-dist. data
if len(in_indx):
loss = self.m_loss(logits[in_indx], targets[in_indx])
else:
loss = 0
# loss on energy of in-dist.data
if len(in_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(energy[in_indx] - self.m_e_m_in), 2).mean()
# loss on energy of out-dist. data
if len(out_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(self.m_e_m_out - energy[out_indx]), 2).mean()
return loss
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, %s, %d, %f, %f" % (
filename, target, 0.0, 0.0))
return None
feature_vec = self._compute_embedding(x, datalength)
logits = self._compute_logit(feature_vec)
energy = self._energy(logits)
in_indx, out_indx = self._get_in_out_indx(filenames)
if self.training:
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
# loss
loss = self._loss(logits, target_vec, energy, in_indx, out_indx)
return loss
else:
scores = self._compute_score(logits)
targets = self._get_target(filenames)
# for baseline, get the conf_score through softmax
conf_scores_new = torch_nn_func.softmax(logits, dim=1)
conf_scores_new, _ = conf_scores_new.max(dim=1)
for filename, target, score, conf in \
zip(filenames, targets, scores, conf_scores_new):
print("Output, %s, %d, %f, %f" % (
filename, target, score.item(), conf.item()))
# don't write output score as a single file
return None
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 18,568 | 34.302281 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/06-asvspoof-ood/AM-softmax-energy/config_train_asvspoof2019/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torchaudio
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import sandbox.eval_asvspoof as nii_asvspoof
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
g_attack_map = {'-': 1,
'A01': 1, 'A02': 1, 'A03': 1, 'A04': 1, 'A05': 1, 'A06': 1,
'A07': 1, 'A08': 1, 'A09': 1, 'A10': 1, 'A11': 1, 'A12': 1,
'A13': 1, 'A14': 1, 'A15': 1, 'A16': 1, 'A17': 1, 'A18': 1,
'A19': 1}
def protocol_parse_general(protocol_filepaths, g_map, sep=' ', target_row=-1):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
target_row: int, default -1, use line[-1] as the target label
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
if type(protocol_filepaths) is str:
tmp = [protocol_filepaths]
else:
tmp = protocol_filepaths
for protocol_filepath in tmp:
with open(protocol_filepath, 'r') as file_ptr:
for line in file_ptr:
line = line.rstrip('\n')
cols = line.split(sep)
try:
data_buffer[cols[1]] = g_attack_map[cols[target_row]]
except KeyError:
data_buffer[cols[1]] = False
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
self.in_out_parser = protocol_parse_general(protocol_f, g_attack_map,
' ', -2)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = 128
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
self.m_after_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# confidence predictor
self.m_conf = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
self.m_after_pooling.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim)
)
self.m_output_act.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_after_pooling = torch_nn.ModuleList(self.m_after_pooling)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
self.m_loss = torch_nn.CrossEntropyLoss()
self.m_temp = 1
self.m_lambda = 0.0
self.m_e_m_in = -25.0
self.m_e_m_out = -7.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_a_pool) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_after_pooling)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_a_pool((hidden_features_lstm + hidden_features).mean(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_logit(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
output_act_pos = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
output_act_neg = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_output_act):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
tmp_neg, tmp_pos = m_output(tmp_emb)
output_act_pos[idx*batch_size : (idx+1)*batch_size] = tmp_pos
output_act_neg[idx*batch_size : (idx+1)*batch_size] = tmp_neg
# feature_vec is [batch * submodel, output-class]
return output_act_pos, output_act_neg
def _compose_logit(self, logits, targets):
"""
"""
logits_pos = logits[0]
logits_neg = logits[1]
with torch.no_grad():
index = torch.zeros_like(logits_pos).bool()
index.scatter_(1, targets.data.view(-1, 1), 1)
logits_new = logits_neg * 1.0
logits_new[index] = logits_pos[index]
return logits_new
def _compute_score(self, logits):
"""
"""
return logits[1][:, -1]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _clamp_prob(self, input_prob, clamp_val=1e-12):
return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val)
def _get_in_out_indx(self, filenames):
in_indx = []
out_indx = []
for x, y in enumerate(filenames):
if self.in_out_parser[y]:
in_indx.append(x)
else:
out_indx.append(x)
return np.array(in_indx), np.array(out_indx)
def _energy(self, logits):
"""
"""
# - T \log \sum_y \exp (logits[x, y] / T)
eng = - self.m_temp * torch.logsumexp(logits / self.m_temp, dim=1)
return eng
def _loss(self, logits, targets, energy, in_indx, out_indx):
"""
"""
# loss over cross-entropy on in-dist. data
if len(in_indx):
loss = self.m_loss(logits[in_indx], targets[in_indx])
else:
loss = 0
# loss on energy of in-dist.data
if len(in_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(energy[in_indx] - self.m_e_m_in), 2).mean()
# loss on energy of out-dist. data
if len(out_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(self.m_e_m_out - energy[out_indx]), 2).mean()
return loss
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, %s, %d, %f, %f" % (
filename, target, 0.0, 0.0))
return None
feature_vec = self._compute_embedding(x, datalength)
logits = self._compute_logit(feature_vec)
in_indx, out_indx = self._get_in_out_indx(filenames)
if self.training:
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
composed_logit = self._compose_logit(logits, target_vec)
energy = self._energy(composed_logit)
# loss
loss = self._loss(composed_logit, target_vec, energy,
in_indx, out_indx)
return loss
else:
scores = self._compute_score(logits)
targets = self._get_target(filenames)
energy = self._energy(logits[1])
# for baseline, get the conf_score through softmax
#conf_scores_new = torch_nn_func.softmax(logits[1], dim=1)
#conf_scores_new, _ = conf_scores_new.max(dim=1)
for filename, target, score, energytmp in \
zip(filenames, targets, scores, energy):
print("Output, %s, %d, %f, %f" % (
filename, target, score.item(), -energytmp.item()))
# don't write output score as a single file
return None
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 19,396 | 34.460695 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/06-asvspoof-ood/AM-softmax-conf/config_train_asvspoof2019/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torchaudio
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import sandbox.eval_asvspoof as nii_asvspoof
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = 128
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
self.m_after_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# confidence predictor
self.m_conf = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
self.m_after_pooling.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim)
)
self.m_output_act.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class)
)
self.m_conf.append(
torch_nn.Sequential(
torch_nn.Linear(self.v_emd_dim, 128),
torch_nn.Tanh(),
torch_nn.Linear(128, 1),
torch_nn.Sigmoid()
)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
self.m_after_pooling = torch_nn.ModuleList(self.m_after_pooling)
self.m_conf = torch_nn.ModuleList(self.m_conf)
# output
self.m_loss = torch_nn.NLLLoss()
self.m_lambda = torch_nn.Parameter(torch.tensor([0.1]),
requires_grad=False)
self.m_budget = 0.5
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_a_pool) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_after_pooling)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_a_pool((hidden_features_lstm + hidden_features).mean(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_logit(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
output_act_pos = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
output_act_neg = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_output_act):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
tmp_neg, tmp_pos = m_output(tmp_emb)
output_act_pos[idx*batch_size : (idx+1)*batch_size] = tmp_pos
output_act_neg[idx*batch_size : (idx+1)*batch_size] = tmp_neg
# feature_vec is [batch * submodel, output-class]
return output_act_pos, output_act_neg
def _compose_logit(self, logits, targets):
"""
"""
logits_pos = logits[0]
logits_neg = logits[1]
with torch.no_grad():
index = torch.zeros_like(logits_pos).bool()
index.scatter_(1, targets.data.view(-1, 1), 1)
logits_new = logits_neg * 1.0
logits_new[index] = logits_pos[index]
return logits_new
def _compute_score(self, logits):
"""
"""
# bonafide binary from negative logits
return logits[1][:, -1]
def _compute_conf(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
conf_score = torch.zeros(
[batch_size * self.v_submodels, 1],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_conf):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
tmp_emb = torch_nn_func.normalize(tmp_emb, p=2, dim=1)
conf_score[idx*batch_size : (idx+1)*batch_size] = m_output(tmp_emb)
return conf_score.squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _clamp_prob(self, input_prob, clamp_val=1e-12):
return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val)
def _loss(self, logits, targets, conf_scores):
"""
"""
with torch.no_grad():
index = torch.zeros_like(logits)
index.scatter_(1, targets.data.view(-1, 1), 1)
# clamp the probablity and conf scores
prob = self._clamp_prob(torch_nn_func.softmax(logits, dim=1))
conf_tmp = self._clamp_prob(conf_scores)
# mixed log probablity
log_prob = torch.log(
prob * conf_tmp.view(-1, 1) + (1 - conf_tmp.view(-1, 1)) * index)
loss = self.m_loss(log_prob, targets)
loss2 = -torch.log(conf_scores).mean()
loss = loss + self.m_lambda * loss2
if self.m_budget > loss2:
self.m_lambda.data = self.m_lambda / 1.01
else:
self.m_lambda.data = self.m_lambda / 0.99
return loss.mean()
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, %s, %d, %f, %f" % (
filename, target, 0.0, 0.0))
return None
# for training, do balanced batch
if self.training:
target = self._get_target(filenames)
bona_indx = np.argwhere(np.array(target) > 0)[:, 0]
spoof_indx = np.argwhere(np.array(target) == 0)[:, 0]
num = np.min([len(bona_indx), len(spoof_indx)])
trial_indx = np.concatenate([bona_indx[0:num], spoof_indx[0:num]])
if len(trial_indx) == 0:
x_ = x
flag_no_data = True
else:
filenames = [filenames[x] for x in trial_indx]
datalength = [datalength[x] for x in trial_indx]
x_ = x[trial_indx]
flag_no_data = False
else:
x_ = x
feature_vec = self._compute_embedding(x_, datalength)
logits = self._compute_logit(feature_vec)
conf_scores = self._compute_conf(feature_vec)
if self.training:
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
# randomly set half of the conf data to 1
b = torch.bernoulli(torch.zeros_like(conf_scores).uniform_(0, 1))
conf_scores = conf_scores * b + (1 - b)
composed_logit = self._compose_logit(logits, target_vec)
loss = self._loss(composed_logit, target_vec, conf_scores)
if flag_no_data:
return loss * 0
else:
return loss
else:
scores = self._compute_score(logits)
targets = self._get_target(filenames)
for filename, target, score, conf in \
zip(filenames, targets, scores, conf_scores):
print("Output, %s, %d, %f, %f" % (
filename, target, score.item(), conf.item()))
# don't write output score as a single file
return None
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 19,307 | 34.821892 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/06-asvspoof-ood/AM-softmax-maxprob/config_train_asvspoof2019/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torchaudio
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import sandbox.eval_asvspoof as nii_asvspoof
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
g_attack_map = {'-': 1,
'A01': 1, 'A02': 1, 'A03': 1, 'A04': 1, 'A05': 1, 'A06': 1,
'A07': 1, 'A08': 1, 'A09': 1, 'A10': 1, 'A11': 1, 'A12': 1,
'A13': 1, 'A14': 1, 'A15': 1, 'A16': 1, 'A17': 1, 'A18': 1,
'A19': 1}
def protocol_parse_general(protocol_filepaths, g_map, sep=' ', target_row=-1):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
target_row: int, default -1, use line[-1] as the target label
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
if type(protocol_filepaths) is str:
tmp = [protocol_filepaths]
else:
tmp = protocol_filepaths
for protocol_filepath in tmp:
with open(protocol_filepath, 'r') as file_ptr:
for line in file_ptr:
line = line.rstrip('\n')
cols = line.split(sep)
try:
data_buffer[cols[1]] = g_attack_map[cols[target_row]]
except KeyError:
data_buffer[cols[1]] = False
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
self.in_out_parser = protocol_parse_general(protocol_f, g_attack_map,
' ', -2)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = 128
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
self.m_after_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# confidence predictor
self.m_conf = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
self.m_after_pooling.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim)
)
self.m_output_act.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_after_pooling = torch_nn.ModuleList(self.m_after_pooling)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
self.m_loss = torch_nn.CrossEntropyLoss()
self.m_temp = 1
self.m_lambda = 0.0
self.m_e_m_in = -25.0
self.m_e_m_out = -7.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_a_pool) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_after_pooling)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_a_pool((hidden_features_lstm + hidden_features).mean(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_logit(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
output_act_pos = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
output_act_neg = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_output_act):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
tmp_neg, tmp_pos = m_output(tmp_emb)
output_act_pos[idx*batch_size : (idx+1)*batch_size] = tmp_pos
output_act_neg[idx*batch_size : (idx+1)*batch_size] = tmp_neg
# feature_vec is [batch * submodel, output-class]
return output_act_pos, output_act_neg
def _compose_logit(self, logits, targets):
"""
"""
logits_pos = logits[0]
logits_neg = logits[1]
with torch.no_grad():
index = torch.zeros_like(logits_pos).bool()
index.scatter_(1, targets.data.view(-1, 1), 1)
logits_new = logits_neg * 1.0
logits_new[index] = logits_pos[index]
return logits_new
def _compute_score(self, logits):
"""
"""
return logits[1][:, -1]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _clamp_prob(self, input_prob, clamp_val=1e-12):
return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val)
def _get_in_out_indx(self, filenames):
in_indx = []
out_indx = []
for x, y in enumerate(filenames):
if self.in_out_parser[y]:
in_indx.append(x)
else:
out_indx.append(x)
return np.array(in_indx), np.array(out_indx)
def _energy(self, logits):
"""
"""
# - T \log \sum_y \exp (logits[x, y] / T)
eng = - self.m_temp * torch.logsumexp(logits / self.m_temp, dim=1)
return eng
def _loss(self, logits, targets, energy, in_indx, out_indx):
"""
"""
# loss over cross-entropy on in-dist. data
if len(in_indx):
loss = self.m_loss(logits[in_indx], targets[in_indx])
else:
loss = 0
# loss on energy of in-dist.data
if len(in_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(energy[in_indx] - self.m_e_m_in), 2).mean()
# loss on energy of out-dist. data
if len(out_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(self.m_e_m_out - energy[out_indx]), 2).mean()
return loss
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, %s, %d, %f, %f" % (
filename, target, 0.0, 0.0))
return None
feature_vec = self._compute_embedding(x, datalength)
logits = self._compute_logit(feature_vec)
in_indx, out_indx = self._get_in_out_indx(filenames)
if self.training:
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
composed_logit = self._compose_logit(logits, target_vec)
energy = self._energy(composed_logit)
# loss
loss = self._loss(composed_logit, target_vec, energy,
in_indx, out_indx)
return loss
else:
scores = self._compute_score(logits)
targets = self._get_target(filenames)
energy = self._energy(logits[1])
# for baseline, get the conf_score through softmax
conf_scores_new = torch_nn_func.softmax(logits[1], dim=1)
conf_scores_new, _ = conf_scores_new.max(dim=1)
for filename, target, score, conf in \
zip(filenames, targets, scores, conf_scores_new):
print("Output, %s, %d, %f, %f" % (
filename, target, score.item(), conf.item()))
# don't write output score as a single file
return None
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 19,392 | 34.453382 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/06-asvspoof-ood/Softmax-conf/config_train_asvspoof2019/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torchaudio
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = None
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# confidence predictor
self.m_conf = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
if self.v_emd_dim is None:
self.v_emd_dim = (lfcc_dim // 16) * 32
else:
assert self.v_emd_dim == (lfcc_dim//16) * 32, "v_emd_dim error"
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_out_class)
)
self.m_conf.append(
torch_nn.Sequential(
torch_nn.Linear((lfcc_dim // 16) * 32, 128),
torch_nn.Tanh(),
torch_nn.Linear(128, 1),
torch_nn.Sigmoid()
)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
self.m_conf = torch_nn.ModuleList(self.m_conf)
# output
self.m_loss = torch_nn.NLLLoss()
self.m_lambda = torch_nn.Parameter(torch.tensor([0.1]),
requires_grad=False)
self.m_budget = 0.5
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = (hidden_features_lstm + hidden_features).mean(1)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_logit(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
output_act = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_output_act):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
output_act[idx*batch_size : (idx+1)*batch_size] = m_output(tmp_emb)
# feature_vec is [batch * submodel, output-class]
return output_act
def _compute_score(self, logits):
"""
"""
# [batch * submodel, output-class], logits
# [:, 1] denotes being bonafide
if logits.shape[1] == 2:
return logits[:, 1] - logits[:, 0]
else:
return logits[:, -1]
def _compute_conf(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
conf_score = torch.zeros(
[batch_size * self.v_submodels, 1],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_conf):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
conf_score[idx*batch_size : (idx+1)*batch_size] = m_output(tmp_emb)
return conf_score.squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _clamp_prob(self, input_prob, clamp_val=1e-12):
return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val)
def _loss(self, logits, targets, conf_scores):
"""
"""
with torch.no_grad():
index = torch.zeros_like(logits)
index.scatter_(1, targets.data.view(-1, 1), 1)
# clamp the probablity and conf scores
prob = self._clamp_prob(torch_nn_func.softmax(logits, dim=1))
conf_tmp = self._clamp_prob(conf_scores)
# mixed log probablity
log_prob = torch.log(
prob * conf_tmp.view(-1, 1) + (1 - conf_tmp.view(-1, 1)) * index)
loss = self.m_loss(log_prob, targets)
loss2 = -torch.log(conf_scores).mean()
loss = loss + self.m_lambda * loss2
if self.m_budget > loss2:
self.m_lambda.data = self.m_lambda / 1.01
else:
self.m_lambda.data = self.m_lambda / 0.99
return loss.mean()
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, %s, %d, %f, %f" % (
filename, target, 0.0, 0.0))
return None
# for training, do balanced batch
if self.training:
target = self._get_target(filenames)
bona_indx = np.argwhere(np.array(target) > 0)[:, 0]
spoof_indx = np.argwhere(np.array(target) == 0)[:, 0]
num = np.min([len(bona_indx), len(spoof_indx)])
trial_indx = np.concatenate([bona_indx[0:num], spoof_indx[0:num]])
if len(trial_indx) == 0:
x_ = x
flag_no_data = True
else:
filenames = [filenames[x] for x in trial_indx]
datalength = [datalength[x] for x in trial_indx]
x_ = x[trial_indx]
flag_no_data = False
else:
x_ = x
feature_vec = self._compute_embedding(x_, datalength)
logits = self._compute_logit(feature_vec)
conf_scores = self._compute_conf(feature_vec)
if self.training:
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
# randomly set half of the conf data to 1
b = torch.bernoulli(torch.zeros_like(conf_scores).uniform_(0, 1))
conf_scores = conf_scores * b + (1 - b)
loss = self._loss(logits, target_vec, conf_scores)
if flag_no_data:
return loss * 0
else:
return loss
else:
scores = self._compute_score(logits)
targets = self._get_target(filenames)
for filename, target, score, conf in \
zip(filenames, targets, scores, conf_scores):
print("Output, %s, %d, %f, %f" % (
filename, target, score.item(), conf.item()))
# don't write output score as a single file
return None
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 18,469 | 34.587669 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/06-asvspoof-ood/Softmax-energy/config_train_asvspoof2019/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torchaudio
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
g_attack_map = {'-': 1,
'A01': 1, 'A02': 1, 'A03': 1, 'A04': 1, 'A05': 1, 'A06': 1,
'A07': 1, 'A08': 1, 'A09': 1, 'A10': 1, 'A11': 1, 'A12': 1,
'A13': 1, 'A14': 1, 'A15': 1, 'A16': 1, 'A17': 1, 'A18': 1,
'A19': 1}
def protocol_parse_general(protocol_filepaths, g_map, sep=' ', target_row=-1):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
target_row: int, default -1, use line[-1] as the target label
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
if type(protocol_filepaths) is str:
tmp = [protocol_filepaths]
else:
tmp = protocol_filepaths
for protocol_filepath in tmp:
with open(protocol_filepath, 'r') as file_ptr:
for line in file_ptr:
line = line.rstrip('\n')
cols = line.split(sep)
try:
data_buffer[cols[1]] = g_attack_map[cols[target_row]]
except KeyError:
data_buffer[cols[1]] = False
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
self.in_out_parser = protocol_parse_general(protocol_f, g_attack_map,
' ', -2)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = None
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# confidence predictor
self.m_conf = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
if self.v_emd_dim is None:
self.v_emd_dim = (lfcc_dim // 16) * 32
else:
assert self.v_emd_dim == (lfcc_dim//16) * 32, "v_emd_dim error"
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
self.m_loss = torch_nn.CrossEntropyLoss()
self.m_temp = 1
self.m_lambda = 0.
self.m_e_m_in = -25.0
self.m_e_m_out = -7.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = (hidden_features_lstm + hidden_features).mean(1)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_logit(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
output_act = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_output_act):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
output_act[idx*batch_size : (idx+1)*batch_size] = m_output(tmp_emb)
# feature_vec is [batch * submodel, output-class]
return output_act
def _compute_score(self, logits):
"""
"""
# [batch * submodel, output-class], logits
# [:, 1] denotes being bonafide
if logits.shape[1] == 2:
return logits[:, 1] - logits[:, 0]
else:
return logits[:, -1]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _clamp_prob(self, input_prob, clamp_val=1e-12):
return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val)
def _get_in_out_indx(self, filenames):
in_indx = []
out_indx = []
for x, y in enumerate(filenames):
if self.in_out_parser[y]:
in_indx.append(x)
else:
out_indx.append(x)
return np.array(in_indx), np.array(out_indx)
def _energy(self, logits):
"""
"""
# - T \log \sum_y \exp (logits[x, y] / T)
eng = - self.m_temp * torch.logsumexp(logits / self.m_temp, dim=1)
return eng
def _loss(self, logits, targets, energy, in_indx, out_indx):
"""
"""
# loss over cross-entropy on in-dist. data
if len(in_indx):
loss = self.m_loss(logits[in_indx], targets[in_indx])
else:
loss = 0
# loss on energy of in-dist.data
if len(in_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(energy[in_indx] - self.m_e_m_in), 2).mean()
# loss on energy of out-dist. data
if len(out_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(self.m_e_m_out - energy[out_indx]), 2).mean()
return loss
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, %s, %d, %f, %f" % (
filename, target, 0.0, 0.0))
return None
feature_vec = self._compute_embedding(x, datalength)
logits = self._compute_logit(feature_vec)
energy = self._energy(logits)
in_indx, out_indx = self._get_in_out_indx(filenames)
if self.training:
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
# loss
loss = self._loss(logits, target_vec, energy, in_indx, out_indx)
return loss
else:
scores = self._compute_score(logits)
targets = self._get_target(filenames)
for filename, target, score, energytmp in \
zip(filenames, targets, scores, energy):
print("Output, %s, %d, %f, %f" % (
filename, target, score.item(), -energytmp.item()))
# don't write output score as a single file
return None
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 18,378 | 34.276392 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/06-asvspoof-ood/Softmax-energy/config_train_asvspoof2019_bc10/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torchaudio
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
g_attack_map = {'-': 1,
'A01': 1, 'A02': 1, 'A03': 1, 'A04': 1, 'A05': 1, 'A06': 1,
'A07': 1, 'A08': 1, 'A09': 1, 'A10': 1, 'A11': 1, 'A12': 1,
'A13': 1, 'A14': 1, 'A15': 1, 'A16': 1, 'A17': 1, 'A18': 1,
'A19': 1}
def protocol_parse_general(protocol_filepaths, g_map, sep=' ', target_row=-1):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
target_row: int, default -1, use line[-1] as the target label
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
if type(protocol_filepaths) is str:
tmp = [protocol_filepaths]
else:
tmp = protocol_filepaths
for protocol_filepath in tmp:
with open(protocol_filepath, 'r') as file_ptr:
for line in file_ptr:
line = line.rstrip('\n')
cols = line.split(sep)
try:
data_buffer[cols[1]] = g_attack_map[cols[target_row]]
except KeyError:
data_buffer[cols[1]] = False
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
self.in_out_parser = protocol_parse_general(protocol_f, g_attack_map,
' ', -2)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = None
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# confidence predictor
self.m_conf = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
if self.v_emd_dim is None:
self.v_emd_dim = (lfcc_dim // 16) * 32
else:
assert self.v_emd_dim == (lfcc_dim//16) * 32, "v_emd_dim error"
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
self.m_loss = torch_nn.CrossEntropyLoss()
self.m_temp = 1
self.m_lambda = 0.1
self.m_e_m_in = -25.0
self.m_e_m_out = -7.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = (hidden_features_lstm + hidden_features).mean(1)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_logit(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
output_act = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_output_act):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
output_act[idx*batch_size : (idx+1)*batch_size] = m_output(tmp_emb)
# feature_vec is [batch * submodel, output-class]
return output_act
def _compute_score(self, logits):
"""
"""
# [batch * submodel, output-class], logits
# [:, 1] denotes being bonafide
if logits.shape[1] == 2:
return logits[:, 1] - logits[:, 0]
else:
return logits[:, -1]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _clamp_prob(self, input_prob, clamp_val=1e-12):
return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val)
def _get_in_out_indx(self, filenames):
in_indx = []
out_indx = []
for x, y in enumerate(filenames):
if self.in_out_parser[y]:
in_indx.append(x)
else:
out_indx.append(x)
return np.array(in_indx), np.array(out_indx)
def _energy(self, logits):
"""
"""
# - T \log \sum_y \exp (logits[x, y] / T)
eng = - self.m_temp * torch.logsumexp(logits / self.m_temp, dim=1)
return eng
def _loss(self, logits, targets, energy, in_indx, out_indx):
"""
"""
# loss over cross-entropy on in-dist. data
if len(in_indx):
loss = self.m_loss(logits[in_indx], targets[in_indx])
else:
loss = 0
# loss on energy of in-dist.data
if len(in_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(energy[in_indx] - self.m_e_m_in), 2).mean()
# loss on energy of out-dist. data
if len(out_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(self.m_e_m_out - energy[out_indx]), 2).mean()
return loss
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, %s, %d, %f, %f" % (
filename, target, 0.0, 0.0))
return None
feature_vec = self._compute_embedding(x, datalength)
logits = self._compute_logit(feature_vec)
energy = self._energy(logits)
in_indx, out_indx = self._get_in_out_indx(filenames)
if self.training:
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
# loss
loss = self._loss(logits, target_vec, energy, in_indx, out_indx)
return loss
else:
scores = self._compute_score(logits)
targets = self._get_target(filenames)
for filename, target, score, conf in \
zip(filenames, targets, scores, energy):
print("Output, %s, %d, %f, %f" % (
filename, target, score.item(), -energy.item()))
# don't write output score as a single file
return None
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 18,371 | 34.262956 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/06-asvspoof-ood/Softmax-energy/config_train_asvspoof2019_esp/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torchaudio
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
g_attack_map = {'-': 1,
'A01': 1, 'A02': 1, 'A03': 1, 'A04': 1, 'A05': 1, 'A06': 1,
'A07': 1, 'A08': 1, 'A09': 1, 'A10': 1, 'A11': 1, 'A12': 1,
'A13': 1, 'A14': 1, 'A15': 1, 'A16': 1, 'A17': 1, 'A18': 1,
'A19': 1}
def protocol_parse_general(protocol_filepaths, g_map, sep=' ', target_row=-1):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
target_row: int, default -1, use line[-1] as the target label
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
if type(protocol_filepaths) is str:
tmp = [protocol_filepaths]
else:
tmp = protocol_filepaths
for protocol_filepath in tmp:
with open(protocol_filepath, 'r') as file_ptr:
for line in file_ptr:
line = line.rstrip('\n')
cols = line.split(sep)
try:
data_buffer[cols[1]] = g_attack_map[cols[target_row]]
except KeyError:
data_buffer[cols[1]] = False
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
self.in_out_parser = protocol_parse_general(protocol_f, g_attack_map,
' ', -2)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = None
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# confidence predictor
self.m_conf = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
if self.v_emd_dim is None:
self.v_emd_dim = (lfcc_dim // 16) * 32
else:
assert self.v_emd_dim == (lfcc_dim//16) * 32, "v_emd_dim error"
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
self.m_loss = torch_nn.CrossEntropyLoss()
self.m_temp = 1
self.m_lambda = 0.1
self.m_e_m_in = -25.0
self.m_e_m_out = -7.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = (hidden_features_lstm + hidden_features).mean(1)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_logit(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
output_act = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_output_act):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
output_act[idx*batch_size : (idx+1)*batch_size] = m_output(tmp_emb)
# feature_vec is [batch * submodel, output-class]
return output_act
def _compute_score(self, logits):
"""
"""
# [batch * submodel, output-class], logits
# [:, 1] denotes being bonafide
if logits.shape[1] == 2:
return logits[:, 1] - logits[:, 0]
else:
return logits[:, -1]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _clamp_prob(self, input_prob, clamp_val=1e-12):
return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val)
def _get_in_out_indx(self, filenames):
in_indx = []
out_indx = []
for x, y in enumerate(filenames):
if self.in_out_parser[y]:
in_indx.append(x)
else:
out_indx.append(x)
return np.array(in_indx), np.array(out_indx)
def _energy(self, logits):
"""
"""
# - T \log \sum_y \exp (logits[x, y] / T)
eng = - self.m_temp * torch.logsumexp(logits / self.m_temp, dim=1)
return eng
def _loss(self, logits, targets, energy, in_indx, out_indx):
"""
"""
# loss over cross-entropy on in-dist. data
if len(in_indx):
loss = self.m_loss(logits[in_indx], targets[in_indx])
else:
loss = 0
# loss on energy of in-dist.data
if len(in_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(energy[in_indx] - self.m_e_m_in), 2).mean()
# loss on energy of out-dist. data
if len(out_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(self.m_e_m_out - energy[out_indx]), 2).mean()
return loss
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, %s, %d, %f, %f" % (
filename, target, 0.0, 0.0))
return None
feature_vec = self._compute_embedding(x, datalength)
logits = self._compute_logit(feature_vec)
energy = self._energy(logits)
in_indx, out_indx = self._get_in_out_indx(filenames)
if self.training:
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
# loss
loss = self._loss(logits, target_vec, energy, in_indx, out_indx)
return loss
else:
scores = self._compute_score(logits)
targets = self._get_target(filenames)
for filename, target, score, conf in \
zip(filenames, targets, scores, energy):
print("Output, %s, %d, %f, %f" % (
filename, target, score.item(), -energy.item()))
# don't write output score as a single file
return None
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 18,371 | 34.262956 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/05-nn-vocoders/blow/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The training/inference process wrapper.
Dataset API is replaced with NII_MergeDataSetLoader.
It is more convenient to train model on corpora stored in different directories.
Requires model.py and config.py (config_merge_datasets.py)
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
in_trans_fns = prj_conf.test_input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.test_output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 7,819 | 35.886792 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/05-nn-vocoders/blow/model.py | #!/usr/bin/env python
"""
model.py for Blow
version: 1
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import time
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
import core_scripts.other_tools.display as nii_warn
import sandbox.block_nn as nii_nn
import sandbox.block_blow as nii_blow
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
########
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
#################
## must-have
#################
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
self.input_dim = in_dim
self.output_dim = out_dim
# a flag for debugging (by default False)
self.model_debug = False
#################
## model config
#################
# waveform sampling rate
self.sample_rate = prj_conf.wav_samp_rate
# load speaker map
self.speaker_map = prj_conf.options['speaker_map']
self.speaker_num = self.speaker_map.num()
if 'conversion_map' in prj_conf.options:
self.conversion_map = prj_conf.options['conversion_map']
else:
self.conversion_map = None
self.cond_dim = 128
self.num_block = 8
self.num_flow_steps_perblock = 12
self.num_conv_channel_size = 512
self.num_conv_conv_kernel = 3
self.m_spk_emd = torch.nn.Embedding(self.speaker_num, self.cond_dim)
self.m_blow = nii_blow.Blow(
self.cond_dim, self.num_block,
self.num_flow_steps_perblock,
self.num_conv_channel_size,
self.num_conv_conv_kernel)
# only used for synthesis
self.m_overlap = nii_blow.OverlapAdder(4096, 4096//4, False)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def forward(self, wav, fileinfo):
"""loss = forward(self, input_feat, wav)
input
-----
wav: tensor, target waveform (batchsize, length2, 1)
it should be raw waveform, flot valued, between (-1, 1)
the code will do mu-law conversion
fileinfo: list, file information for each data in the batch
output
------
loss: tensor / scalar,
Note: returned loss can be directly used as the loss value
no need to write Loss()
"""
# prepare speaker IDs
# (batch, )
speaker_ids = torch.tensor(
[self.speaker_map.parse(x) for x in fileinfo],
dtype=torch.long, device=wav.device)
# convert to embeddings
# (batch, 1, cond_dim)
speaker_emd = self.m_spk_emd(speaker_ids).unsqueeze(1)
# normalize conditiona feature
#input_feat = self.normalize_input(input_feat)
# compute
z, neg_logp, logp_z, log_detjac = self.m_blow(wav, speaker_emd)
return [[-logp_z, -log_detjac], [True, True]]
def inference(self, wav, fileinfo):
"""wav = inference(mels)
input
-----
wav: tensor, target waveform (batchsize, length2, 1)
output
------
wav_new: tensor, same shape
"""
# framing the input waveform into frames
#
# framed_wav (batchsize, frame_num, frame_length)
framed_wav = self.m_overlap(wav)
batch, frame_num, frame_len = framed_wav.shape
# framed_Wav (batchsize * frame_num, frame_length, 1)
framed_wav = framed_wav.view(-1, frame_len).unsqueeze(-1)
# get the speaker IDs
# (batch, )
speaker_ids = torch.tensor(
[self.speaker_map.parse(x) for x in fileinfo],
dtype=torch.long, device=wav.device)
# (batch * frame_num)
speaker_ids = speaker_ids.repeat_interleave(frame_num)
# if conversion map is defined, change the speaker identity following
# the conversion map. Otherwise, specify target speaker ID
# through environment variable TEMP_TARGET_SPEAKER
if self.conversion_map:
target_speaker = torch.tensor(
[self.speaker_map.get_idx(
self.conversion_map[self.speaker_map.parse(x, False)]) \
for x in fileinfo],
device=wav.device, dtype=torch.long)
target_speaker = target_speaker.repeat_interleave(frame_num)
else:
# target speaker ID
target_speaker = torch.tensor(
[int(os.getenv('TEMP_TARGET_SPEAKER'))] * speaker_ids.shape[0],
device=wav.device, dtype=torch.long)
# if it is intended to swap the source / target speaker ID
# this can be used to recover the original waveform given converted
# speech
flag_reverse = os.getenv('TEMP_FLAG_REVERSE')
if flag_reverse and int(flag_reverse):
# if this is for reverse conversion
# swap the IDs
print("revert IDs")
tmp = speaker_ids
speaker_ids = target_speaker
target_speaker = tmp
# print some information
for idx in range(len(speaker_ids)):
if idx % frame_num == 0:
print("From ID {:3d} to {:3d}, ".format(
speaker_ids[idx], target_speaker[idx]), end=' ')
# get embeddings (batch * frame_num, 1, cond_dim)
speaker_emd = self.m_spk_emd(speaker_ids).unsqueeze(1)
target_speaker_emb = self.m_spk_emd(target_speaker).unsqueeze(1)
# compute
z, neg_logp, logp_z, log_detjac = self.m_blow(framed_wav, speaker_emd)
# output_framed (batch * frame, frame_length, 1)
output_framed = self.m_blow.reverse(z, target_speaker_emb)
# overlap and add
# view -> (batch, frame_num, frame_length)
return self.m_overlap.reverse(output_framed.view(batch, -1, frame_len),
True)
def convert(self, wav, src_id, tar_id):
"""wav = inference(mels)
input
-----
wav: tensor, target waveform (batchsize, length2, 1)
src_id: int, ID of source speaker
tar_id: int, ID of target speaker
output
------
wav_new: tensor, same shape
"""
# framing the input waveform into frames
# m_overlap.forward does framing
# framed_wav (batchsize, frame_num, frame_length)
framed_wav = self.m_overlap(wav)
batch, frame_num, frame_len = framed_wav.shape
# change frames into batch
# framed_Wav (batchsize * frame_num, frame_length, 1)
framed_wav = framed_wav.view(-1, frame_len).unsqueeze(-1)
# source speaker IDs
# (batch, )
speaker_ids = torch.tensor([src_id for x in wav],
dtype=torch.long, device=wav.device)
# (batch * frame_num)
speaker_ids = speaker_ids.repeat_interleave(frame_num)
# get embeddings (batch * frame_num, 1, cond_dim)
speaker_emd = self.m_spk_emd(speaker_ids).unsqueeze(1)
# target speaker IDs
tar_speaker_ids = torch.tensor([tar_id for x in wav],
dtype=torch.long, device=wav.device)
# (batch * frame_num)
tar_speaker_ids = tar_speaker_ids.repeat_interleave(frame_num)
target_speaker_emb = self.m_spk_emd(tar_speaker_ids).unsqueeze(1)
# analysis
z, _, _, _ = self.m_blow(framed_wav, speaker_emd)
# synthesis
# output_framed (batch * frame, frame_length, 1)
output_framed = self.m_blow.reverse(z, target_speaker_emb)
# overlap and add
# view -> (batch, frame_num, frame_length)
return self.m_overlap.reverse(
output_framed.view(batch, -1, frame_len), True)
# Loss is returned by model.forward(), no need to specify
# just a place holder so that the output of model.forward() can be
# sent to the optimizer
class Loss():
def __init__(self, args):
return
def compute(self, output, target):
return output
if __name__ == "__main__":
print("Definition of model")
| 10,716 | 33.795455 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/05-nn-vocoders/waveglow/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The training/inference process wrapper.
Dataset API is replaced with NII_MergeDataSetLoader.
It is more convenient to train model on corpora stored in different directories.
Requires model.py and config.py (config_merge_datasets.py)
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets)
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 6,776 | 35.047872 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/05-nn-vocoders/waveglow/model.py | #!/usr/bin/env python
"""
model.py
waveglow is defined in sandbox.block_waveglow.
This file wrapps waveglow inside model() so that it can be used by the script.
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import time
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
import core_scripts.other_tools.display as nii_warn
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_nn_frontend
import sandbox.block_waveglow as nii_waveglow
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
#################
## must-have
#################
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
self.input_dim = in_dim
self.output_dim = out_dim
# a flag for debugging (by default False)
self.model_debug = False
#################
## model config
#################
# whether z-normalize the input features?
# the pre-trained model was trained without normalizing feat
self.flag_normalize_input = False
# waveform sampling rate
self.sample_rate = prj_conf.wav_samp_rate
# up-sample rate
self.up_sample = prj_conf.input_reso[0]
# configuration for WaveGlow
# number of waveglow blocks
self.num_waveglow_blocks = 3
# number of flow steps in each block
self.num_flow_steps_perblock = 4
# number of wavenet layers in one flow step
self.num_wn_blocks_perflow = 8
# channel size of the dilated conv in wavenet layer
self.num_wn_channel_size = 256
# kernel size of the dilated conv in wavenet layer
self.num_wn_conv_kernel = 3
# use affine transformation? (if False, use a + b)
self.flag_affine = True
# dimension of the early extracted latent z
self.early_z_feature_dim = 2
# use legacy implementation of affine (default False)
# the difference is the wavenet core to compute the affine
# parameter. For more details, check sandbox.block_waveglow
self.flag_affine_legacy_implementation = False
self.m_waveglow = nii_waveglow.WaveGlow(
in_dim, self.up_sample,
self.num_waveglow_blocks,
self.num_flow_steps_perblock,
self.num_wn_blocks_perflow,
self.num_wn_channel_size,
self.num_wn_conv_kernel,
self.flag_affine,
self.early_z_feature_dim,
self.flag_affine_legacy_implementation)
# buffer for noise in inference
self.bg_noise = None
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def forward(self, input_feat, wav):
"""loss = forward(self, input_feat, wav)
input
-----
input_feat: tensor, input features (batchsize, length1, input_dim)
wav: tensor, target waveform (batchsize, length2, 1)
it should be raw waveform, flot valued, between (-1, 1)
the code will do mu-law conversion
output
------
loss: tensor / scalar
Note: returned loss can be directly used as the loss value
no need to write Loss()
"""
# normalize conditiona feature
if self.flag_normalize_input:
input_feat = self.normalize_input(input_feat)
# compute
z_bags, neg_logp, logp_z, log_detjac = self.m_waveglow(wav, input_feat)
return [[-logp_z, -log_detjac], [True, True]]
def inference(self, input_feat):
"""wav = inference(mels)
input
-----
input_feat: tensor, input features (batchsize, length1, input_dim)
output
------
wav: tensor, target waveform (batchsize, length2, 1)
Note: length2 will be = length1 * self.up_sample
"""
#normalize input
if self.flag_normalize_input:
input_feat = self.normalize_input(input_feat)
length = input_feat.shape[1] * self.up_sample
noise = self.m_waveglow.get_z_noises(length, noise_std=0.6,
batchsize=input_feat.shape[0])
output = self.m_waveglow.reverse(noise, input_feat)
# use a randomized
if self.bg_noise is None:
self.bg_noise = self.m_waveglow.reverse(noise, input_feat * 0)
# do post-filtering (spectra substraction)
output = nii_nn_frontend.spectral_substraction(output,self.bg_noise,1.0)
return output
# Loss is returned by model.forward(), no need to specify
# just a place holder so that the output of model.forward() can be
# sent to the optimizer
class Loss():
def __init__(self, args):
return
def compute(self, output, target):
return output
if __name__ == "__main__":
print("Definition of model")
| 7,385 | 33.353488 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/05-nn-vocoders/waveglow-2/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The training/inference process wrapper.
Dataset API is replaced with NII_MergeDataSetLoader.
It is more convenient to train model on corpora stored in different directories.
Requires model.py and config.py (config_merge_datasets.py)
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets)
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 6,776 | 35.047872 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/05-nn-vocoders/waveglow-2/model.py | #!/usr/bin/env python
"""
model.py
waveglow is defined in sandbox.block_waveglow.
This file wrapps waveglow inside model() so that it can be used by the script.
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import time
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
import core_scripts.other_tools.display as nii_warn
import sandbox.block_nn as nii_nn
import sandbox.block_waveglow as nii_waveglow
import sandbox.util_frontend as nii_nn_frontend
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
#################
## must-have
#################
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
self.input_dim = in_dim
self.output_dim = out_dim
# a flag for debugging (by default False)
self.model_debug = False
#################
## model config
#################
# waveform sampling rate
self.sample_rate = prj_conf.wav_samp_rate
# up-sample rate
self.up_sample = prj_conf.input_reso[0]
# configuration for WaveGlow
# number of waveglow blocks
self.num_waveglow_blocks = 3
# number of flow steps in each block
self.num_flow_steps_perblock = 4
# number of wavenet layers in one flow step
self.num_wn_blocks_perflow = 8
# channel size of the dilated conv in wavenet layer
self.num_wn_channel_size = 256
# kernel size of the dilated conv in wavenet layer
self.num_wn_conv_kernel = 3
# use affine transformation? (if False, use a + b)
self.flag_affine = True
# dimension of the early extracted latent z
self.early_z_feature_dim = 2
# use legacy implementation of affine (default False)
# the difference is the wavenet core to compute the affine
# parameter. For more details, check sandbox.block_waveglow
self.flag_affine_legacy_implementation = True
self.m_waveglow = nii_waveglow.WaveGlow(
in_dim, self.up_sample,
self.num_waveglow_blocks,
self.num_flow_steps_perblock,
self.num_wn_blocks_perflow,
self.num_wn_channel_size,
self.num_wn_conv_kernel,
self.flag_affine,
self.early_z_feature_dim,
self.flag_affine_legacy_implementation)
# buffer for noise in inference
self.bg_noise = None
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def forward(self, input_feat, wav):
"""loss = forward(self, input_feat, wav)
input
-----
input_feat: tensor, input features (batchsize, length1, input_dim)
wav: tensor, target waveform (batchsize, length2, 1)
it should be raw waveform, flot valued, between (-1, 1)
the code will do mu-law conversion
output
------
loss: tensor / scalar
Note: returned loss can be directly used as the loss value
no need to write Loss()
"""
# normalize conditiona feature
#input_feat = self.normalize_input(input_feat)
# compute
z_bags, neg_logp, logp_z, log_detjac = self.m_waveglow(wav, input_feat)
return [[-logp_z, -log_detjac], [True, True]]
def inference(self, input_feat):
"""wav = inference(mels)
input
-----
input_feat: tensor, input features (batchsize, length1, input_dim)
output
------
wav: tensor, target waveform (batchsize, length2, 1)
Note: length2 will be = length1 * self.up_sample
"""
#normalize input
#input_feat = self.normalize_input(input_feat)
length = input_feat.shape[1] * self.up_sample
noise = self.m_waveglow.get_z_noises(length, noise_std=0.6,
batchsize=input_feat.shape[0])
output = self.m_waveglow.reverse(noise, input_feat)
# use a randomized
if self.bg_noise is None:
self.bg_noise = self.m_waveglow.reverse(noise, input_feat * 0)
# do post filtering
output = nii_nn_frontend.spectral_substraction(output,self.bg_noise,1.0)
return output
# Loss is returned by model.forward(), no need to specify
# just a place holder so that the output of model.forward() can be
# sent to the optimizer
class Loss():
def __init__(self, args):
return
def compute(self, output, target):
return output
if __name__ == "__main__":
print("Definition of model")
| 7,095 | 33.280193 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/05-nn-vocoders/wavenet/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The training/inference process wrapper.
Dataset API is replaced with NII_MergeDataSetLoader.
It is more convenient to train model on corpora stored in different directories.
Requires model.py and config.py (config_merge_datasets.py)
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
in_trans_fns = prj_conf.test_input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.test_output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 7,819 | 35.886792 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/05-nn-vocoders/wavenet/model.py | #!/usr/bin/env python
"""
model.py
wavenet is defined in sandbox.block_wavenet,
This file wrapps wavenet inside model() so that it can be used by the script.
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import time
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
import core_scripts.other_tools.display as nii_warn
import sandbox.block_nn as nii_nn
import sandbox.block_dist as nii_dist
import sandbox.util_dsp as nii_dsp
import sandbox.block_wavenet as nii_wavenet
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
#
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
#################
## must-have
#################
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
self.input_dim = in_dim
self.output_dim = out_dim
# a flag for debugging (by default False)
self.model_debug = False
#################
## model config
#################
# number of bits for mu-law
self.num_bits = 10
self.up_sample = prj_conf.input_reso[0]
# model
# Most of the configurations are fixed in sandbox.block_wavenet.py
self.m_wavenet = nii_wavenet.WaveNet_v1(in_dim,
self.up_sample,
self.num_bits)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _waveform_encode_target(self, target_wav):
return nii_dsp.mulaw_encode(target_wav, self.num_classes)
def _waveform_decode_target(self, gen_wav):
return nii_dsp.mulaw_decode(gen_wav, self.num_classes)
def forward(self, input_feat, wav):
"""loss = forward(self, input_feat, wav)
input
-----
input_feat: tensor, input features (batchsize, length1, input_dim)
wav: tensor, target waveform (batchsize, length2, 1)
it should be raw waveform, flot valued, between (-1, 1)
the code will do mu-law conversion
output
------
loss: tensor / scalar
Note: returned loss can be directly used as the loss value
no need to write Loss()
"""
# normalize input features
input_feat = self.normalize_input(input_feat)
# compute cross-entropy using wavenet
return self.m_wavenet.forward(input_feat, wav)
def inference(self, input_feat):
"""wav = inference(mels)
input
-----
input_feat: tensor, input features (batchsize, length1, input_dim)
output
------
wav: tensor, target waveform (batchsize, length2, 1)
Note: length2 will be = length1 * self.up_sample
"""
# normalize the input
input_feat = self.normalize_input(input_feat)
# get the output waveform
wave = self.m_wavenet.inference(input_feat)
return wave
# Loss is returned by model.forward(), no need to specify
# just a place holder so that the output of model.forward() can be
# sent to the optimizer
class Loss():
def __init__(self, args):
return
def compute(self, output, target):
return output
if __name__ == "__main__":
print("Definition of model")
| 5,750 | 31.491525 | 77 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/05-nn-vocoders/ilpcnet/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The training/inference process wrapper.
Dataset API is replaced with NII_MergeDataSetLoader.
It is more convenient to train model on corpora stored in different directories.
Requires model.py and config.py (config_merge_datasets.py)
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
in_trans_fns = prj_conf.test_input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.test_output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 7,819 | 35.886792 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/05-nn-vocoders/ilpcnet/block_lpcnet.py | #!/usr/bin/env python
"""
"""
from __future__ import absolute_import
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import torch.nn.init as torch_init
import core_scripts.data_io.dsp_tools as nii_dsp_np
import sandbox.block_nn as nii_nn
import sandbox.block_dist as nii_dist
import sandbox.util_dsp as nii_dsp
import core_scripts.other_tools.debug as nii_debug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
#######################################
# Numpy utilities for feature extraction
#######################################
def get_excit(wav, frame_length, frame_shift, lpc_order):
lpc_handler = nii_dsp_np.LPClite(frame_length, frame_shift, lpc_order)
lpc_coef, _, rc, gain, _, _ = lpc_handler.analysis(wav)
return [wav, lpc_coef, rc, gain]
#######################################
# Network component
#######################################
class LPCLitePytorch(torch_nn.Module):
"""LPCLitePytorch
This API is only used to do AR and MA operation, not LPC analysis
Example:
length = 1000
frame_l = 320
frame_s = 80
wav = torch.randn([1, length, 1])
frame_num = (length - frame_l) // frame_s + 1
lpc_coef = torch.tensor([[1, -0.97]] * frame_num).unsqueeze(0)
m_lpc = LPCLitePytorch(320, 80, 2)
with torch.no_grad():
# analysis
wavp, excit = m_lpc.LPCAnalsysisPytorch(wav, lpc_coef)
# synthesis
output = torch.zeros_like(wavp)
for idx in range(output.shape[1]):
lpc_coef_tmp = lpc_coef[:, idx // frame_s, :].unsqueeze(1)
output[:, idx:idx+1, :] = m_lpc.LPCSynthesisPytorch(
excit[:, idx:idx+1, :], lpc_coef_tmp, idx)
print(torch.mean(output[0] - wav[0][0:720]))
"""
def __init__(self, framelen, frameshift, lpc_order,
flag_emph=True, emph_coef=0.97, noise_std=0, noise_bit=8,
flag_emph_ignore_start=True, flag_win_begining=True):
super(LPCLitePytorch, self).__init__()
self.fl = framelen
self.fs = frameshift
self.lpc_order = lpc_order
self.emph_coef = emph_coef
self.noise_std = noise_std
self.noise_mulaw_q = 2 ** noise_bit
self.flag_emph = flag_emph
self.flag_emph_ignore_start = flag_emph_ignore_start
self.flag_win_begining = flag_win_begining
self.m_winbuf = None
self.m_buf = None
return
def _preemphasis(self, wav):
"""
input
-----
wav: tensor, (batch, length, 1)
output
------
wav: tensor, (batch, length, 1)
"""
wav_tmp = torch.zeros_like(wav) + wav
wav_tmp[:, 1:] = wav_tmp[:, 1:] - self.emph_coef * wav_tmp[:, :-1]
if self.flag_emph_ignore_start:
wav_tmp[:, 0] = wav_tmp[:, 1]
return wav_tmp
def _deemphasis(self, wav):
"""
input
-----
wav: tensor, (batch, length, 1)
output
------
wav: tensor, (batch, length, 1)
"""
out = torch.zeros_like(wav) + wav
for idx in range(1, wav.shape[1]):
out[:, idx] = out[:, idx] + self.emph_coef * out[:, idx-1]
return out
def deemphasis(self, wav):
"""
input
-----
wav: tensor, (batch, length, 1)
output
------
wav: tensor, (batch, length, 1)
"""
if self.flag_emph:
return self._deemphasis(wav)
else:
return wav
def preemphasis(self, wav):
"""
input
-----
wav: tensor, (batch, length, 1)
output
------
wav: tensor, (batch, length, 1)
"""
if self.flag_emph:
return self._preemphasis(wav)
else:
return wav
def LPCAnalysisPytorch(self, wav, lpc_coef, gain=None):
"""
input
-----
wav: tensor, (batch, length, 1)
lpc_coef, tensor, (batch, frame, order)
gain: tensor, (batch, frame, 1), or None
output
------
wavp: tensor, (batch, length2, 1)
excit: tensor, (batch, length2, 1)
wav_input: tensor, (batch, length2, 1), pre-processed wav
Note that length2 depends on the frame length, shift
length2 = min(
((length - frame_length) // frame_shift + 1) * frame_shift,
lpc_coef.shape[1] * frame_shift)
"""
if self.flag_emph:
wav_input = self._preemphasis(wav)
else:
wav_input = torch.zeros_like(wav) + wav
if self.flag_win_begining:
if self.m_winbuf is None:
self.m_winbuf = torch.hann_window(
self.fl, dtype=wav.dtype, device=wav.device)
wav_input[:, 0:self.fl//2, 0] *= self.m_winbuf[0:self.fl//2]
if self.noise_std > 0:
wav_mulaw_noised = nii_dsp.mulaw_encode(
wav, self.noise_mulaw_q, scale_to_int=True).to(dtype=wav.dtype)
wav_mulaw_noised += (torch.rand_like(wav) - 0.5) * self.noise_std
wav_mulaw_noised = wav_mulaw_noised.clamp(0, self.noise_mulaw_q-1)
wav_feedback = nii_dsp.mulaw_decode(
wav_mulaw_noised, self.noise_mulaw_q, input_int=True)
wav_mulaw_noised_quan = wav_mulaw_noised.to(torch.int64)
else:
wav_mulaw_noised_quan = None
wav_feedback = wav_input
#
batch = lpc_coef.shape[0]
# LPC oroder + 1
poly_order = lpc_coef.shape[2]
# take the minimum length
wavlen = np.min([((wav.shape[1] - self.fl) // self.fs + 1) * self.fs,
lpc_coef.shape[1] * self.fs])
# to pad
pad_zero = torch.zeros(
[batch, poly_order-1], dtype=wav.dtype, device=wav.device)
# (batch, length + poly_order - 1)
# flip wavf = [x[n], x[n-1], ..., x[0], 0, 0, 0]
wavf = torch.flip(
torch.cat([pad_zero, wav_feedback.squeeze(-1)], dim=1), dims=[1])
# (batch, length, poly_order)
# unfold [[x[n], x[n-1], x[n-2], ], [x[n-1], x[n-2], x[n-3], ]]
# flip back [[x[0], 0, 0...], ..., [x[n], x[n-1], x[n-2], ], ]
# wavf[i, n, :] = [wav[n], wav[n-1], wav[n-2], ...m wav[n-order+1]]
# this can be used for LPC analysis for the n-th time step
wavf = torch.flip(wavf.unfold(dimension=1, size=poly_order, step=1),
dims=[1])[:, 0:wavlen, :]
# duplicate lpc coefficients for each time step
# (batch, length, poly_order)
# lpcf[i, n, :] = [1, a_1, ..., a_order] for n-th step in i-th
# we need to pad
lpcf = lpc_coef.repeat_interleave(self.fs, dim=1)[:, 0:wavlen, :]
# excitation
# wavf[i, n, :] * lpcf[i, n, :] = wav[n] * 1 + wav[n-1] * a_1 ...
excit = torch.sum(wavf * lpcf, dim=-1).unsqueeze(-1)
# predicted_wav = wav - excit
wavp = wav_input[:, 0:wavlen, :] - excit
if gain is not None:
gain_tmp = gain.repeat_interleave(self.fs, dim=1)[:, 0:wavlen, :]
excit = excit / gain_tmp
return wavp, excit, wav_mulaw_noised_quan, wav_input
def LPCSynthesisPytorchCore(self, lpc_coef, buf):
""" predicted = LPCSynthesisPytorchCore(self, lpc_coef, buf):
input
-----
lpc_coef: tensor, (batch, 1, poly_order)
buf: tensor, (batch, order - 1, 1)
output
------
output: tensor, (batch, 1, 1)
Compute (x[n-1] * a[2] + ...) for LP synthesis
This should be combined with excitation excit[n] - (x[n-1] * a[2] + ...)
as the final output waveform
"""
batch = lpc_coef.shape[0]
poly_order = lpc_coef.shape[-1]
# (batch, poly_order - 1)
# flip so that data in order [x[n-1], ..., x[n-order+1]]
pre_sample = torch.flip(buf, dims=[1]).squeeze(-1)
# (batch, )
# (x[n-1] * a[2] + ...)
predicted = torch.sum(lpc_coef[:, 0, 1:] * pre_sample, dim=1)
# (batch, 1, 1)
# -(x[n-1] * a[2] + ...)
return -1 * predicted.unsqueeze(-1).unsqueeze(-1)
def LPCSynthesisPytorchStep(self, excit, lpc_coef, stepidx, gain=None):
"""
input
-----
excit: tensor, (batch, 1, 1)
lpc_coef: tensor, (batch, 1, poly_order)
stepidx: int, which time step is this
gain: tensor, (batch, 1, 1)
output
------
output: tensor, (batch, 1, 1)
"""
batch = lpc_coef.shape[0]
poly_order = lpc_coef.shape[-1]
# if no buffer is provided. save the output here
if stepidx == 0:
# save as [n-order-1, ..., n-1]
self.m_buf = torch.zeros(
[batch, poly_order - 1, 1],
dtype=lpc_coef.dtype, device=lpc_coef.device)
pre = self.LPCSynthesisPytorchCore(lpc_coef, self.m_buf)
# excit[n] + [- (x[n-1] * a[2] + ...)]
if gain is None:
output = excit + pre
else:
output = excit * gain + pre
# save the previous value
# roll and save as [x[n-order+2], ..., x[n-1], x[n]]
self.m_buf = torch.roll(self.m_buf, -1, dims=1)
self.m_buf[:, -1, :] = output[:, 0, :]
return output
def LPCSynthesisPytorch(self, excit, lpc_coef, gain=None):
"""
input
-----
wav: tensor, (batch, length, 1)
lpc_coef, tensor, (batch, frame, order)
gain: tensor, (batch, frame, 1), or None
output
------
wavp: tensor, (batch, length2, 1)
excit: tensor, (batch, length2, 1)
Note that length2 depends on the frame length, shift
length2 = min(
((length - frame_length) // frame_shift + 1) * frame_shift,
lpc_coef.shape[1] * frame_shift)
"""
# synthesis
output = torch.zeros_like(excit)
for idx in range(output.shape[1]):
if gain is None:
gain_tmp = None
else:
gain_tmp = gain[:, idx // self.fs, :].unsqueeze(1)
output[:, idx:idx+1, :] = self.LPCSynthesisPytorchStep(
excit[:, idx:idx+1, :],
lpc_coef[:, idx // self.fs, :].unsqueeze(1), idx,
gain_tmp)
if self.flag_emph:
output = self._deemphasis(output)
return output
def LPC_rc2lpc(self, rc_input):
"""from reflection coefficients to LPC coefficients
Based on numpy version in core_scripts/data_io/dsp_tools.py
input
-----
rc: tensor, (batch, length, poly_order - 1)
output
------
lpc: tensor, (batch, length, poly_order)
"""
# from (batch, length, poly_order - 1) to (batch * length, poly_order-1)
batch, frame_num, order = rc_input.shape
rc = rc_input.view(-1, order)
# (frame_num, order)
frame_num, order = rc.shape
polyOrder = order + 1
lpc_coef = torch.zeros([frame_num, 2, polyOrder], dtype=rc_input.dtype,
device=rc_input.device)
lpc_coef[:, 0, 0] = 1.0
for index in np.arange(1, polyOrder):
lpc_coef[:, 1, index] = 1.0
gamma = rc[:, index-1]
lpc_coef[:, 1, 0] = -1.0 * gamma
if index > 1:
lpc_coef[:, 1, 1:index] = lpc_coef[:, 0, 0:index-1] \
+ lpc_coef[:, 1, 0:1] * torch.flip(lpc_coef[:, 0, 0:index-1],
dims=[1])
lpc_coef[:, 0, :] = lpc_coef[:, 1,:]
lpc_coef = torch.flip(lpc_coef[:, 0, :], dims=[1])
return lpc_coef.view(batch, -1, order+1)
class CondNetwork(torch_nn.Module):
"""CondNetwork(input_dim, output_dim)
Predict reflection coefficients and gain factor from the input
Args
----
input_dim: int, input feature dimension,
output_dim: int, output feature dimension,
= dimension of reflection coeffcients + 1 for gain
"""
def __init__(self, input_dim, output_dim):
super(CondNetwork, self).__init__()
self.m_layer1 = nii_nn.Conv1dKeepLength(input_dim, input_dim, 1, 5)
self.m_layer2 = nii_nn.Conv1dKeepLength(input_dim, input_dim, 1, 5)
self.m_layer3 = torch_nn.Sequential(
nii_nn.GRULayer(input_dim, 64, True),
torch_nn.Linear(128, 128),
torch_nn.Tanh(),
torch_nn.Linear(128, output_dim)
)
self.m_act_1 = torch_nn.Tanh()
return
def forward(self, x):
""" rc_coef, gain = CondNetwork(x)
input
-----
x: tensor, (batch, length, input_dim)
output
------
rc_coef: tensor, reflection coefficients, (batch, length, out_dim - 1)
gain: tensor, gain, (batch, length, 1)
Length denotes the number of frames
"""
x_tmp = self.m_layer3(self.m_layer2(self.m_layer1(x)) + x)
part1, part2 = x_tmp.split([x_tmp.shape[-1]-1, 1], dim=-1)
# self.m_act_1(part1) is the predicted reflection coefficients
# torch.exp(part2) is the gain.
return self.m_act_1(part1), torch.exp(part2)
class FrameRateNet(torch_nn.Module):
"""FrameRateNet(input_dim, output_dim)
Args
----
input_dim: int, input feature dimension,
output_dim: int, output feature dimension,
"""
def __init__(self, input_dim, output_dim):
super(FrameRateNet, self).__init__()
self.m_layer1 = nii_nn.Conv1dKeepLength(input_dim, 128, 1, 3)
self.m_layer2 = nii_nn.Conv1dKeepLength(128, 128, 1, 3)
self.m_layer3 = torch_nn.Sequential(
torch_nn.Linear(128, 128),
torch_nn.Tanh(),
torch_nn.Linear(128, output_dim),
torch_nn.Tanh())
return
def forward(self, x):
"""y = FrameRateNet(x)
input
-----
x: tensor, (batch, length, input_dim)
output
------
y: tensor, (batch, length, out_dim)
Length denotes the number of frames
"""
return self.m_layer3(self.m_layer2(self.m_layer1(x)))
class OutputNet(torch_nn.Module):
"""OutputNet(cond_dim, feedback_dim, out_dim)
Args
----
cond_dim: int, dimension of input condition feature
feedback_dim: int, dimension of feedback feature
out_dim: int, output dimension
"""
def __init__(self, cond_dim, feedback_dim, out_dim):
super(OutputNet, self).__init__()
# Use the wrapper of GRULayer in nii_nn
self.m_gru1 = nii_nn.GRULayer(cond_dim + feedback_dim, 256)
self.m_gru2 = nii_nn.GRULayer(256 + feedback_dim, 16)
self.m_outfc = torch_nn.Sequential(
torch_nn.utils.weight_norm(
torch_nn.Linear(16, 2, bias=False), name='weight')
)
self.m_stdact = torch_nn.ReLU()
return
def _forward(self, cond, feedback):
"""Forward for training stage
"""
tmp = self.m_gru1(torch.cat([cond, feedback], dim=-1))
tmp = self.m_gru2(torch.cat([tmp, feedback], dim=-1))
return self.m_outfc(tmp)
def _forward_step(self, cond, feedback, stepidx):
"""Forward for inference stage
"""
tmp = self.m_gru1(torch.cat([cond, feedback], dim=-1), stepidx)
tmp = self.m_gru2(torch.cat([tmp, feedback], dim=-1), stepidx)
return self.m_outfc(tmp)
def forward(self, cond, feedback, stepidx=None):
"""mean, std = forward(cond, feedback, stepidx=Non)
input
-----
cond: tensor, input condition feature, (batch, length, input_dim)
feedback: tensor, feedback feature, (batch, length, feedback_dim)
stepidx: int or None, the index of the time step, starting from 0
output
------
mean: tensor, mean of the Gaussian dist., (batch, length, out_dim//2)
std: tensor, std of the Gaussian dist., (batch, length, out_dim//2)
If stepidx is None, length is equal to the waveform sequence length,
and the forward() method is in the training mode (self._forward)
If stepidx is from 0 to T, length is equal to 1, and forward() only
computes output for the t=stepidx. (self._forward_step)
The nii_nn.GRULayer will save the hidden states of GRULayer.
Note that self._forward_step must be called from stepidx=0 to stepidx=T.
"""
if stepidx is None:
output = self._forward(cond, feedback)
else:
output = self._forward_step(cond, feedback, stepidx)
mean, log_std = output.chunk(2, dim=-1)
return mean, torch.exp(self.m_stdact(log_std + 9) - 9)
class LPCNetV1(torch_nn.Module):
"""LPCNetV1(framelen, frameshift, lpc_order, cond_dim, flag_fix_cond)
Args
----
framelen: int, frame length for LP ana/syn, in number of waveform points
frameshift: int, frame shift for LP ana/syn, in number of waveform points
lpc_order: int, LP order
cond_dim: input condition feature dimension
flag_fix_cond: bool, whether fix the condition network or not
In training, we can use a two-staged training approach: train the condition
network. first, then fix the condition network and train the iLPCNet.
flag_fix_cond is used to indicate the stage
"""
def __init__(self, framelen, frameshift, lpc_order, cond_dim,
flag_fix_cond):
super(LPCNetV1, self).__init__()
# =====
# options
# =====
self.m_fl = framelen
self.m_fs = frameshift
self.m_lpc_order = lpc_order
# dimension of input conditional feature
self.m_cond_dim = cond_dim
# =====
# hyper-parameters
# =====
# dimension of output of frame-rate network
self.m_hid_dim = 256
# dimension of waveform
self.m_wav_dim = 1
# number of discrete pitch classes
# We can directly fed pitch to the network, but in LPCNet the pitch
# is quantized and embedded
self.m_pitch_cat = 256
self.m_pitch_emb = 64
self.m_emb_f0 = torch_nn.Embedding(self.m_pitch_cat, self.m_pitch_emb)
# =====
# network definition
# =====
# lpc analyszer
self.m_lpc = LPCLitePytorch(framelen, frameshift, lpc_order)
# frame rate network
self.m_net_framerate = FrameRateNet(cond_dim + self.m_pitch_emb,
self.m_hid_dim)
# output network
self.m_net_out = OutputNet(self.m_hid_dim, self.m_wav_dim,
self.m_wav_dim * 2)
# condition network to convert conditional feature to rc coef and gain
# (although we will not use gain)
self.m_cond_net = CondNetwork(cond_dim, lpc_order + 1)
# loss for condition network
self.m_cond_loss = torch_nn.MSELoss()
# fix
self.flag_fix_cond = flag_fix_cond
return
def _negloglikelihood(self, data, mean, std):
""" nll = self._negloglikelihood(data, mean, std)
neg log likelihood of normal distribution on the data
input
-----
data: tensor, data, (batch, length, dim)
mean: tensor, mean of dist., (batch, length, dim)
std: tensor, std of dist., (batch, length, dim)
output
------
nll: scalar, neg log likelihood
"""
return (0.5 * np.log(2 * np.pi) \
+ torch.log(std) + 0.5 * (((data - mean)/std) ** 2)).mean()
def _convert_pitch(self, pitch_value):
""" output = self._convert_pitch(pitch_value)
input
-----
pitch_value: tensor, any shape
output
------
output: tensor in int64, quantized pitch
"""
return torch.clamp((pitch_value - 33) // 2, 0,
self.m_pitch_cat-1).to(torch.int64)
def forward(self, cond_feat, cond_feat_normed,
lpc_coef, rc_coef, gain, wav):
""" loss_wav, loss_cond = forward(cond_feat, cond_feat_normed,
lpc_coef, rc_coef, gain, wav)
input
-----
cond_feat: tensor, condition feature, unnormed
(batch, frame_num1, cond_dim)
cond_feat_normed: tensor, condition feature, normed
(batch, frame_num1, cond_dim)
lpc_coef: tensor, LP coefficients, (batch, frame_num2, lpc_order + 1)
rc_coef: tensor, reflection coeffs (batch, frame_num2, lpc_order)
gain: tensor, gain (batch, frame_num2, 1)
wav: tensor, target waveform, (batch, length, 1)
output
------
loss_wav: scalar, loss for the waveform modeling (neg log likelihood)
loss_cond: scalar, loss for the condition network
"""
# == step 1 ==
# network to convert cond_feat predict
# cond -> rc_coef
# we can compute the loss for condition network if necessary
cond_len = np.min([rc_coef.shape[1], cond_feat.shape[1]])
if self.flag_fix_cond:
with torch.no_grad():
rc_coef_pre, gain_pre = self.m_cond_net(cond_feat[:, :cond_len])
loss_cond = (self.m_cond_loss(rc_coef, rc_coef_pre)
+ self.m_cond_loss(gain_pre, gain)) * 0
else:
rc_coef_pre, gain_pre = self.m_cond_net(cond_feat[:, :cond_len])
loss_cond = (self.m_cond_loss(rc_coef, rc_coef_pre)
+ self.m_cond_loss(gain_pre, gain))
# == step 2 ==
# do LP analysis given the LP coeffs (from predicted reflection coefs)
with torch.no_grad():
# convert predicted rc_coef to LP coef
lpc_coef_pre = self.m_lpc.LPC_rc2lpc(rc_coef_pre)
# LP analysis given LPC coefficients
# wav_pre is the LP prediction
# wav_err is the LP residual
# wav is the pre-processed waveform by the LP analyzer.
# here wav_err + wav_pre = wav
wav_pre, wav_err, _, wav = self.m_lpc.LPCAnalysisPytorch(
wav, lpc_coef_pre)
# quantize pitch, we put this line here just for convenience
pitch_quantized = self._convert_pitch(cond_feat[:, :cond_len, -1])
# == step 3 ==
# frame-rate network
# embedding F0
pitch_emb = self.m_emb_f0(pitch_quantized)
# cond -> feature
lpccond_feat = self.m_net_framerate(
torch.cat([cond_feat_normed[:, :cond_len, :], pitch_emb], dim=-1))
# duplicate (upsampling) to waveform level
lpccond_feat = lpccond_feat.repeat_interleave(self.m_fs, dim=1)
# == step 4 ==
# waveform generation network
#
# get the minimum length
wavlen = np.min([wav.shape[1],wav_pre.shape[1],lpccond_feat.shape[1]])
# feedback waveform (add noise and dropout) 16384 = np.power(2, 14)
noise1 = torch.randn_like(wav[:, :wavlen, :]) / 16384
feedback_wav = torch.roll(wav[:, :wavlen, :], 1, dims=1)
feedback_wav[:, 0, :] *= 0
feedback_wav += noise1
# compute LP residual mean and std
mean, std = self.m_net_out(lpccond_feat[:, :wavlen, :], feedback_wav)
# compute wav dist. mean and std
mean_wav = mean + wav_pre[:, :wavlen]
std_wav = std
# get likelihood
loss_wav = self._negloglikelihood(wav[:, :wavlen], mean_wav, std_wav)
if self.flag_fix_cond:
loss_cond = loss_cond * 0
else:
loss_wav = loss_wav * 0
return loss_cond, loss_wav
def inference(self, cond_feat, cond_feat_normed):
""" wav = inference(cond_feat)
input
-----
cond_feat: tensor, condition feature, unnormed
(batch, frame_num1, cond_dim)
cond_feat_normed, tensor, condition feature, unnormed
(batch, frame_num1, cond_dim)
output
------
wav, tensor, (batch, frame_num1 * frame_shift, 1)
"""
# prepare
batch, frame_num, _ = cond_feat.shape
xtyp = cond_feat.dtype
xdev = cond_feat.device
# quantize F0 and F0 embedding
pitch_quantized = self._convert_pitch(cond_feat[:, :, -1])
pitch_emb = self.m_emb_f0(pitch_quantized)
# == step1. ==
# predict reflection coeff from cond_feat
# rc_coef_pre (batch, frame_num, lpc_order)
rc_coef_pre, gain_pre = self.m_cond_net(cond_feat)
# == step2. ==
# from reflection coeff to LPC coef
# (batch, frame_num, lpc_order + 1)
lpc_coef_pre = self.m_lpc.LPC_rc2lpc(rc_coef_pre)
# == step3. ==
# frame rate network
lpccond_feat = self.m_net_framerate(
torch.cat([cond_feat_normed, pitch_emb], dim=-1))
# == step4. ==
# step-by-step generation
# waveform length = frame num * up-sampling rate
wavlen = frame_num * self.m_fs
# many buffers
# buf to store output waveofmr
wavbuf = torch.zeros([batch, wavlen, 1], dtype=xtyp, device=xdev)
# buf to store excitation signal
exibuf = torch.zeros_like(wavbuf)
# buf to store LPC predicted wave
prebuf = torch.zeros_like(wavbuf)
# buf to store LPC input x[n-1], ... x[n - poly_order+1]
lpcbuf = torch.zeros([batch, self.m_lpc_order, 1],
dtype=xtyp, device=xdev)
# mean and std buf
meanbuf = torch.zeros_like(wavbuf)
stdbuf = torch.zeros_like(wavbuf)
# loop
for idx in range(0, wavlen):
if idx % 1000 == 0:
print(idx, end=' ', flush=True)
frame_idx = idx // self.m_fs
# 4.1 LP predicted wav
# [- (x[n-1] * a_1 + x[n-2] * a_2 ... )]
pre_raw = self.m_lpc.LPCSynthesisPytorchCore(
lpc_coef_pre[:, frame_idx : frame_idx+1, :], lpcbuf)
# save it (for debugging)
prebuf[:, idx:idx+1, :] = pre_raw
# 4.2 predict excitation
if idx == 0:
wav_raw = torch.zeros_like(pre_raw)
else:
pass
# mean, std
mean, std = self.m_net_out(lpccond_feat[:, frame_idx:frame_idx+1],
wav_raw, idx)
meanbuf[:, idx:idx+1, :]= mean
stdbuf[:, idx:idx+1, :] = std
# sampling
exi_raw = torch.randn_like(mean) * std * 0.7 + mean
# save excit (for debugging)
exibuf[:, idx:idx+1,:] = exi_raw
# 4.3 waveform output
# excit[n] + [-(x[n-1] * a_1 + x[n-2] * a_2 ... )]
wav_raw = exi_raw + pre_raw
# save waveform
wavbuf[:, idx:idx+1,:] = wav_raw
# save it to the LPC buffer.
# It will be used by LPCSynthesisPytorchCore
lpcbuf = torch.roll(lpcbuf, -1, dims=1)
lpcbuf[:, -1, :] = wav_raw[:, 0, :]
return self.m_lpc.deemphasis(wavbuf)
if __name__ == "__main__":
print("Components of LPCNet")
| 28,731 | 33.575211 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/05-nn-vocoders/ilpcnet/model.py | #!/usr/bin/env python
"""
model.py for iLPCNet
version: 1
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import time
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
import core_scripts.other_tools.display as nii_warn
import sandbox.block_nn as nii_nn
import block_lpcnet as nii_lpcnet
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
########
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
#################
## must-have
#################
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
self.input_dim = in_dim
self.output_dim = out_dim
# a flag for debugging (by default False)
self.model_debug = False
#################
## model config
#################
# waveform sampling rate
self.sample_rate = prj_conf.wav_samp_rate
self.fl = prj_conf.options['framelength']
self.fs = prj_conf.options['frameshift']
self.lpc_order = prj_conf.options['lpc_order']
self.feat_reso = prj_conf.input_reso[0]
if self.fs % self.feat_reso == 0:
if self.fs >= self.feat_reso:
self.feat_upsamp = self.fs // self.feat_reso
else:
print("Adjust condition feature resolution not supported ")
sys.exit(1)
self.flag_fix_cond = args.temp_flag == 'stage2'
self.m_lpcnet = nii_lpcnet.LPCNetV1(
self.fl, self.fs, self.lpc_order, in_dim, self.flag_fix_cond)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
#return (y - self.output_mean) / self.output_std
# now the target features will be a list of features
return y
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def forward(self, cond_feat, target):
"""
input
-----
output
------
loss: tensor / scalar
Note: returned loss can be directly used as the loss value
no need to write Loss()
"""
#
wav = target[0]
# (batch, frame_num, lpc_order)
lpc_coef = target[1]
# (batch, framge_num, lpc_order - 1)
rc = target[2]
# (batch, framge_num, 1)
gain = target[3]
# condition feature adjust
cond_feat_tmp = cond_feat[:, ::self.feat_upsamp]
loss_cond, loss_lpcnet = self.m_lpcnet(
cond_feat_tmp, self.normalize_input(cond_feat_tmp),
lpc_coef, rc, gain, wav)
return [[loss_cond, loss_lpcnet], [True, True]]
def inference(self, cond_feat):
"""wav = inference(mels)
input
-----
output
------
wav_new: tensor, same shape
"""
# condition feature adjust
cond_feat_tmp = cond_feat[:, ::self.feat_upsamp]
return self.m_lpcnet.inference(
cond_feat_tmp, self.normalize_input(cond_feat_tmp))
# Loss is returned by Model.forward(), no need to specify
# just a place holder
class Loss():
def __init__(self, args):
return
def compute(self, output, target):
return output
if __name__ == "__main__":
print("Definition of model")
| 5,573 | 28.807487 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/05-nn-vocoders/ilpcnet/config.py | #!/usr/bin/env python
"""
config.py
To merge different corpora (or just one corpus),
*_set_name are lists
*_list are lists of lists
*_dirs are lists of lists
"""
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = ['cmu_all_trn']
val_set_name = ['cmu_all_val']
# for convenience
tmp1 = '../DATA/cmu-arctic-data-set'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = [tmp1 + '/scp/train.lst']
# val_file_list: list of files for validation set. It can be None
val_list = [tmp1 + '/scp/val.lst']
# Directories for input features
# input_dirs = [[path_of_feature_1, path_of_feature_2, ..., ],
# [path_of_feature_1, path_of_feature_2, ..., ], ...]
# len(input_dirs) should be equal to len(trn_set_name) and len(val_set_name)
# iput_dirs[N] is the path for trn_set_name[N] and val_set_name[N]
# we assume train and validation data are put in the same sub-directory
input_dirs = [[tmp1 + '/5ms/melspec', tmp1 + '/5ms/f0']]
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [80, 1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.mfbsp', '.f0']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [80, 80]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [True, True]
# Similar configurations for output features
output_dirs = [[tmp1 + '/wav_16k_norm']]
output_dims = [1]
output_exts = ['.wav']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = 2400
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = 2400
# Other optional arguments, the definition of which depends on a specific model
# This may be imported by model.py
#
# Here, we specify the framelenth, frameshift, and lpc_order for LPC analysis
# Note that these configurations can be different from what we used to extract
# the input acoustic features, but they should be compatible.
#
# For example, input acoustic features are extracted with a frameshift of 80
# then, we can use frameshift 160 here. Since frameshift for LPC features
# is x2 of input features, the model will down-sample the input features.
# See line 148 and 167 in model.py
options = {'framelength': 160, 'frameshift': 160, 'lpc_order': 15}
# input_trans_fns and output_trans_fns are used by the DataSet.
# When loading the data from the disk, the input data will be transformed
# by input_trans_fns, output will be transformed by output_trans_fns.
# This is done before converting the data into pytorch tensor, which
# is more convient.
#
# They are used by ../../../core_scripts/data_io/default_data_io.py:
# f_post_data_process()
# If you want to debug into these functions, remember turn off multi workers
# $: python -m pdb main.py --num-workers 0
#
# These are the rules:
# len(input_trans_fns) == len(output_trans_fns) == len(trn_set_name)
# input_trans_fns[N] is for trn_set_name[N] and val_set_name[N]
# len(input_trans_fns[N]) == len(input_exts)
# len(output_trans_fns[N]) == len(output_exts)
# input_trans_fns[N][M] is for the input_exts[M] of trn_set_name[N]
# ...
import block_lpcnet
input_trans_fns = [[]]
output_trans_fns = [[lambda x: block_lpcnet.get_excit(x, 160, 160, 15)]]
#########################################################
## Configuration for inference stage
#########################################################
# similar options to training stage
test_set_name = ['cmu_all_test_tiny']
# List of test set data (in the same way as trn_list or val_list)
# Or, for convenience, you may directly load test_set list
test_list = [['slt_arctic_b0474']]
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = [[tmp1 + '/5ms/melspec', tmp1 + '/5ms/f0']]
# Directories for output features, which are [[]]
test_output_dirs = [[]]
#
#test_output_trans_fns = output_trans_fns
| 5,067 | 34.690141 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/05-nn-vocoders/hifigan/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The default training/inference process wrapper
Requires model.py and config.py
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.nn_manager.nn_manager_GAN as nii_nn_wrapper_GAN
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler,
'pin_memory': True}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
inout_trans_fns = prj_conf.input_output_trans_fn \
if hasattr(prj_conf, 'input_output_trans_fn') else None
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns,
inoutput_augment_func = inout_trans_fns)
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns,
inoutput_augment_func = inout_trans_fns)
else:
val_set = None
# initialize the model and loss function
model_G = prj_model.ModelGenerator(
trn_set.get_in_dim(), trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
model_D = prj_model.ModelDiscriminator(
trn_set.get_in_dim(), trn_set.get_out_dim(),
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = None
# initialize the optimizer
optimizer_G_wrap = nii_op_wrapper.OptimizerWrapper(model_G, args)
optimizer_D_wrap = nii_op_wrapper.OptimizerWrapper(model_D, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint_G = None
checkpoint_D = None
else:
tmp_str = args.trained_model.split(",")
checkpoint_G = torch.load(tmp_str[0])
if len(tmp_str) > 1:
checkpoint_D = torch.load(tmp_str[1])
else:
checkpoint_D = None
# start training
nii_nn_wrapper_GAN.f_train_wrapper_GAN(
args, model_G, model_D,
loss_wrapper, device,
optimizer_G_wrap, optimizer_D_wrap,
trn_set, val_set,
checkpoint_G, checkpoint_D)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
inout_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'test_input_output_trans_fn') \
else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq = None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns,
inoutput_augment_func = inout_trans_fns)
# initialize model
model = prj_model.ModelGenerator(
test_set.get_in_dim(), test_set.get_out_dim(), args, prj_conf)
if args.trained_model == "":
print("Please provide ---trained-model")
sys.exit(1)
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(
args, model, device, test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 8,338 | 35.574561 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/05-nn-vocoders/hifigan/model.py | #!/usr/bin/env python
"""
model.py for HiFiGAN
HifiGAN is based on code in from https://github.com/jik876/hifi-gan
HiFi-GAN: Generative Adversarial Networks for Efficient and
High Fidelity Speech Synthesis
By Jungil Kong, Jaehyeon Kim, Jaekyoung Bae
MIT License
Copyright (c) 2020 Jungil Kong
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
import sandbox.util_frontend as nii_frontend
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
#########
## Loss definition
#########
class LossMel(torch_nn.Module):
""" Wrapper to define loss function
"""
def __init__(self, sr):
super(LossMel, self).__init__()
"""
"""
# extractir
fl = 1024
fs = 256
fn = 1024
num_mel = 80
self.m_frontend = nii_frontend.MFCC(
fl, fs, fn, sr, num_mel,
with_emphasis=False, with_delta=False, flag_for_MelSpec=True)
# loss function
self.loss_weight = 45
self.loss = torch_nn.L1Loss()
return
def forward(self, outputs, target):
with torch.no_grad():
# (batch, length, 1) -> (batch, length) -> (batch, length, dim)
target_mel = self.m_frontend(target.squeeze(-1))
output_mel = self.m_frontend(outputs.squeeze(-1))
# done
return self.loss(output_mel, target_mel) * self.loss_weight
#####
## Model Generator definition
#####
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
def get_padding(kernel_size, dilation=1):
# L_out = (L_in + 2*pad - dila * (ker - 1) - 1) // stride + 1
# stride -> 1
# L_out = L_in + 2*pad - dila * (ker - 1)
# L_out == L_in ->
# 2 * pad = dila * (ker - 1)
return int((kernel_size*dilation - dilation)/2)
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
class ResBlock1(torch_nn.Module):
"""
"""
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.leaky_relu_slope = 0.1
self.convs1 = torch_nn.ModuleList([
weight_norm(
torch_nn.Conv1d(
channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(
torch_nn.Conv1d(
channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(
torch_nn.Conv1d(
channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
# initialize the weight
self.convs1.apply(init_weights)
self.convs2 = torch_nn.ModuleList([
weight_norm(
torch_nn.Conv1d(
channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(
torch_nn.Conv1d(
channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(
torch_nn.Conv1d(
channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1)))
])
self.convs2.apply(init_weights)
return
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = torch_nn_func.leaky_relu(x, self.leaky_relu_slope)
xt = c1(xt)
xt = torch_nn_func.leaky_relu(xt, self.leaky_relu_slope)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class Generator(torch_nn.Module):
"""
"""
def __init__(self, in_dim,
resblock_kernel_sizes, resblock_dilation_sizes,
upsample_rates, upsample_kernel_sizes,
upsample_init_channel):
super(Generator, self).__init__()
self.leaky_relu_slope = 0.1
self.resblock_kernel_sizes = resblock_kernel_sizes
self.resblock_dilation_sizes = resblock_dilation_sizes
self.num_kernels = len(resblock_kernel_sizes)
self.upsample_rates = upsample_rates
self.upsample_kernel_sizes = upsample_kernel_sizes
self.num_upsamples = len(upsample_rates)
self.upsample_init_channel = upsample_init_channel
self.conv_pre = weight_norm(
torch_nn.Conv1d(in_dim, upsample_init_channel, 7, 1, padding=3))
self.ups = torch_nn.ModuleList()
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
# L_out = (L_in - 1) * stride - 2 * pad + dila * (ker - 1) + 1
# dilation = 1 ->
# L_out = (L_in - 1) * stride - 2 * pad + ker
# L_out = L_in * stride - 2 * pad + (ker - stride)
self.ups.append(weight_norm(
torch_nn.ConvTranspose1d(
upsample_init_channel//(2**i),
upsample_init_channel//(2**(i+1)),
k, u, padding=(k-u)//2)))
#
resblock = ResBlock1
self.resblocks = torch_nn.ModuleList()
for i in range(len(self.ups)):
ch = upsample_init_channel//(2**(i+1))
for j, (k, d) in enumerate(
zip(resblock_kernel_sizes, resblock_dilation_sizes)):
self.resblocks.append(resblock(ch, k, d))
self.conv_post = weight_norm(torch_nn.Conv1d(ch, 1, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
return
def forward(self, x):
x = self.conv_pre(x)
for i in range(self.num_upsamples):
x = torch_nn_func.leaky_relu(x, self.leaky_relu_slope)
x = self.ups[i](x)
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i*self.num_kernels+j](x)
else:
xs += self.resblocks[i*self.num_kernels+j](x)
x = xs / self.num_kernels
x = torch_nn_func.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class ModelGenerator(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(ModelGenerator, self).__init__()
########## basic config ########
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
self.input_dim = in_dim
self.output_dim = out_dim
###############################
######
## model definition
######
h = prj_conf.options['hifigan_config']
self.m_gen = Generator(in_dim,
h['resblock_kernel_sizes'], h['resblock_dilation_sizes'],
h['upsample_rates'], h['upsample_kernel_sizes'],
h['upsample_initial_channel'])
self.m_mel_loss = LossMel(prj_conf.wav_samp_rate)
self.flag_removed_weight_norm = False
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def forward(self, x):
if not self.training and not self.flag_removed_weight_norm:
self.m_gen.remove_weight_norm()
self.flag_removed_weight_norm = True
x = self.normalize_input(x)
gen_output = self.m_gen(x.permute(0, 2, 1)).permute(0, 2, 1)
return gen_output
def loss_aux(self, nat_wav, gen_wav, data_in):
return self.m_mel_loss(gen_wav, nat_wav)
#########
## Model Discriminator definition
#########
class DiscriminatorP(torch_nn.Module):
def __init__(self, period,
kernel_size=5, stride=3, use_spectral_norm=False):
super(DiscriminatorP, self).__init__()
self.leaky_relu_slope = 0.1
self.period = period
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = torch_nn.ModuleList([
norm_f(
torch_nn.Conv2d(1, 32, (kernel_size, 1), (stride, 1),
padding=(get_padding(5, 1), 0))),
norm_f(
torch_nn.Conv2d(32, 128, (kernel_size, 1), (stride, 1),
padding=(get_padding(5, 1), 0))),
norm_f(
torch_nn.Conv2d(128, 512, (kernel_size, 1), (stride, 1),
padding=(get_padding(5, 1), 0))),
norm_f(
torch_nn.Conv2d(512, 1024, (kernel_size, 1), (stride, 1),
padding=(get_padding(5, 1), 0))),
norm_f(
torch_nn.Conv2d(1024, 1024, (kernel_size, 1), 1,
padding=(2, 0))),
])
self.conv_post = norm_f(
torch_nn.Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
return
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = torch_nn_func.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = torch_nn_func.leaky_relu(x, self.leaky_relu_slope)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiPeriodDiscriminator(torch_nn.Module):
def __init__(self):
super(MultiPeriodDiscriminator, self).__init__()
self.discriminators = torch_nn.ModuleList([
DiscriminatorP(2),
DiscriminatorP(3),
DiscriminatorP(5),
DiscriminatorP(7),
DiscriminatorP(11),
])
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class DiscriminatorS(torch_nn.Module):
def __init__(self, use_spectral_norm=False):
super(DiscriminatorS, self).__init__()
self.leaky_relu_slope = 0.1
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = torch_nn.ModuleList([
norm_f(
torch_nn.Conv1d(1, 128, 15, 1, padding=7)),
norm_f(
torch_nn.Conv1d(128, 128, 41, 2, groups=4, padding=20)),
norm_f(
torch_nn.Conv1d(128, 256, 41, 2, groups=16, padding=20)),
norm_f(
torch_nn.Conv1d(256, 512, 41, 4, groups=16, padding=20)),
norm_f(
torch_nn.Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
norm_f(
torch_nn.Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
norm_f(
torch_nn.Conv1d(1024, 1024, 5, 1, padding=2)),
])
self.conv_post = norm_f(torch_nn.Conv1d(1024, 1, 3, 1, padding=1))
return
def forward(self, x):
fmap = []
for l in self.convs:
x = l(x)
x = torch_nn_func.leaky_relu(x, self.leaky_relu_slope)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiScaleDiscriminator(torch_nn.Module):
def __init__(self):
super(MultiScaleDiscriminator, self).__init__()
self.discriminators = torch_nn.ModuleList([
DiscriminatorS(use_spectral_norm=True),
DiscriminatorS(),
DiscriminatorS(),
])
self.meanpools = torch_nn.ModuleList([
torch_nn.AvgPool1d(4, 2, padding=2),
torch_nn.AvgPool1d(4, 2, padding=2)
])
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
if i != 0:
y = self.meanpools[i-1](y)
y_hat = self.meanpools[i-1](y_hat)
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class ModelDiscriminator(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(ModelDiscriminator, self).__init__()
self.m_mpd = MultiPeriodDiscriminator()
self.m_msd = MultiScaleDiscriminator()
# done
return
def _feature_loss(self, fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += torch.mean(torch.abs(rl - gl))
return loss*2
def _discriminator_loss(self, disc_real_outputs, disc_generated_outputs):
loss = 0
r_losses = []
g_losses = []
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1-dr)**2)
g_loss = torch.mean(dg**2)
loss += (r_loss + g_loss)
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
return loss, r_losses, g_losses
def _generator_loss(self, disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
l = torch.mean((1-dg)**2)
gen_losses.append(l)
loss += l
return loss, gen_losses
def loss_for_D(self, nat_wav, gen_wav_detached, input_feat):
# gen_wav has been detached
nat_wav_tmp = nat_wav.permute(0, 2, 1)
gen_wav_tmp = gen_wav_detached.permute(0, 2, 1)
# MPD
y_df_hat_r, y_df_hat_g, _, _ = self.m_mpd(nat_wav_tmp, gen_wav_tmp)
loss_disc_f, _, _ = self._discriminator_loss(y_df_hat_r, y_df_hat_g)
# MSD
y_ds_hat_r, y_ds_hat_g, _, _ = self.m_msd(nat_wav_tmp, gen_wav_tmp)
loss_disc_s, _, _ = self._discriminator_loss(y_ds_hat_r, y_ds_hat_g)
return loss_disc_f + loss_disc_s
def loss_for_G(self, nat_wav, gen_wav, input_feat):
nat_wav_tmp = nat_wav.permute(0, 2, 1)
gen_wav_tmp = gen_wav.permute(0, 2, 1)
# MPD
y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = self.m_mpd(nat_wav_tmp,
gen_wav_tmp)
# MSD
y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = self.m_msd(nat_wav_tmp,
gen_wav_tmp)
loss_fm_f = self._feature_loss(fmap_f_r, fmap_f_g)
loss_fm_s = self._feature_loss(fmap_s_r, fmap_s_g)
loss_gen_f, _ = self._generator_loss(y_df_hat_g)
loss_gen_s, _ = self._generator_loss(y_ds_hat_g)
return loss_fm_f + loss_fm_s + loss_gen_f + loss_gen_s
if __name__ == "__main__":
print("Definition of model")
| 19,494 | 33.565603 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/config_train_toyset_ID_2.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
import os
import pandas as pd
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# this will be used as the name of cache files created for each set
#
# Name for the seed training set, in case you merge multiple data sets as
# a single training set, just specify the name for each subset.
# Here we only have 1 training subset
trn_set_name = ['asvspoof2019_toyset_vocoded_trn']
val_set_name = ['asvspoof2019_toyset_vocoded_val']
# For convenience, specify a path to the toy data set
# because config*.py will be copied into model-*/config_AL_train_toyset/NN
# we need to use ../../../
tmp = os.path.dirname(__file__) + '/../../../DATA/toy_example_vocoded'
# File list for training and development sets
# (text file, one file name per line, without name extension)
# we need to provide one lst for each subset
# trn_list[n] will correspond to trn_set_name[n]
# for training set, baseline method directly load all bonafide and spoofed data
trn_list = [tmp + '/scp/train.lst']
# for development set
val_list = [tmp + '/scp/dev.lst']
# Directories for input data
# We need to provide the path to the directory that saves the input data.
# We assume waveforms for training and development of one subset
# are stored in the same directory.
# Hence, input_dirs[n] is for trn_set_name[n] and val_set_name[n]
#
# If you need to specify a separate val_input_dirs
# val_input_dirs = [[PATH_TO_DEVELOPMENT_SET]]
#
# Each input_dirs[n] is a list,
# for example, input_dirs[n] = [wav, speaker_label, augmented_wav, ...]
#
# Here, input for each file is a single waveform
input_dirs = [[tmp + '/train_dev']]
val_input_dirs = [[tmp + '/train_dev']]
# Dimensions of input features
# What is the dimension of the input feature
# len(input_dims) should be equal to len(input_dirs[n])
#
# Here, input for each file is a single waveform, dimension is 1
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# len(input_exts) should be equal to len(input_dirs[n])
#
# Here, input file extension is .wav
# We use .wav not .flac
input_exts = ['.wav']
# Temporal resolution for input features
# This is not relevant for CM but for other projects
# len(input_reso) should be equal to len(input_dirs[n])
# Here, it is 1 for waveform
input_reso = [1]
# Whether input features should be z-normalized
# This is not relevant for CM but for other projects
# len(input_norm) should be equal to len(input_dirs[n])
# Here, it is False for waveform
# We don't normalize the waveform
input_norm = [False]
# Similar configurations for output features
# Here, we set output to empty because we will load
# the target labels from protocol rather than output feature
# '.bin' is also a place holder
output_dirs = [[] for x in input_dirs]
val_output_dirs = [[]]
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# ===
# Waveform configuration
# ===
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
# Here, we don't use truncate_seq included in data_io, but we will do
# truncation in data_augmentation functions
truncate_seq = None
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = None
# Optional argument
# This used to load protocol(s)
# Multiple protocol files can be specified in the list
#
# Note that these protocols should cover all the
# training, development, and pool set data.
# Otherwise, the code will raise an error
#
# Here, this protocol will cover all the data in the toy set
optional_argument = [tmp + '/protocol.txt']
# ===
# pre-trained SSL model
# ===
# We will load this pre-trained SSL model as the front-end
#
# path to the SSL model (it is downloaded by 01_download.sh)
ssl_front_end_path = os.path.dirname(__file__) \
+ '/../../../SSL_pretrained/xlsr_53_56k.pt'
# dimension of the SSL model output
# this must be provided.
ssl_front_end_out_dim = 1024
# ===
# data augmentation option
# ===
# for training with aligned bonafide-spoofed mini-batches,
# we have to use this customized function to make sure that
# we can load the aligned files
# We will use function in data_augment.py to process the loaded mini-batch
import data_augment
# path to the waveform directory (the same as input_dirs above)
wav_path = None
# path to the protocol of spoofed data ( the same as optional_argument)
protocol_path = None
# configuration to use Pandas to parse the protocol
protocol_cols = None
# length to truncate the waveform
trim_len = 64000
####
# wrapper of data augmentation functions
# the wrapper calls the data_augmentation function defined in
# data_augment.py.
# these wrapper will be called in data_io when loading the data
# from disk
####
# wrapper for training set
input_trans_fns = [
[lambda x, y: data_augment.wav_aug_wrapper(
x, y, wav_samp_rate, trim_len)],
[lambda x, y: data_augment.wav_aug_wrapper(
x, y, wav_samp_rate, trim_len)]]
output_trans_fns = [[], []]
# wrapper for development set
# development does nothing but simply truncate the waveforms
val_input_trans_fns = [
[lambda x, y: data_augment.wav_aug_wrapper_val(
x, y, wav_samp_rate, trim_len)],
[lambda x, y: data_augment.wav_aug_wrapper_val(
x, y, wav_samp_rate, trim_len)]]
val_output_trans_fns = [[], []]
#########################################################
## Configuration for inference stage
#########################################################
# This part is not used in this project
# They are place holders
test_set_name = trn_set_name + val_set_name
# List of test set data
# for convenience, you may directly load test_set list here
test_list = trn_list + val_list
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = input_dirs * 2
# Directories for output features, which are []
test_output_dirs = [[]] * 2
| 6,753 | 31.009479 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The training/inference process wrapper.
Dataset API is replaced with NII_MergeDataSetLoader.
It is more convenient to train model on corpora stored in different directories.
Requires model.py and config.py (config_merge_datasets.py)
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
val_in_trans_fns = prj_conf.val_input_trans_fns \
if hasattr(prj_conf, 'val_input_trans_fns') else None
val_out_trans_fns = prj_conf.val_output_trans_fns \
if hasattr(prj_conf, 'val_output_trans_fns') else None
if val_in_trans_fns is None:
val_in_trans_fns = in_trans_fns
if val_out_trans_fns is None:
val_out_trans_fns = out_trans_fns
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
if hasattr(prj_conf, 'val_input_dirs'):
val_input_dirs = prj_conf.val_input_dirs
else:
val_input_dirs = prj_conf.input_dirs
if hasattr(prj_conf, 'val_output_dirs'):
val_output_dirs = prj_conf.val_output_dirs
else:
val_output_dirs = prj_conf.output_dirs
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
val_input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
val_output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = val_in_trans_fns,
output_augment_funcs = val_out_trans_fns)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.test_input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.test_output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 8,626 | 36.34632 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/config_train_toyset_ID_4.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
import os
import pandas as pd
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# this will be used as the name of cache files created for each set
#
# Name for the seed training set, in case you merge multiple data sets as
# a single training set, just specify the name for each subset.
# Here we only have 1 training subset
trn_set_name = ['asvspoof2019_toyset_vocoded_trn_bona']
val_set_name = ['asvspoof2019_toyset_vocoded_val']
# For convenience, specify a path to the toy data set
# because config*.py will be copied into model-*/config_AL_train_toyset/NN
# we need to use ../../../
tmp = os.path.dirname(__file__) + '/../../../DATA/toy_example_vocoded'
# File list for training and development sets
# (text file, one file name per line, without name extension)
# we need to provide one lst for each subset
# trn_list[n] will correspond to trn_set_name[n]
# for training set, we only load bonafide through default data IO
# then, (paired) spoofed data will be loaded through data augmentation function
trn_list = [tmp + '/scp/train_bonafide.lst']
# for development set
val_list = [tmp + '/scp/dev.lst']
# Directories for input data
# We need to provide the path to the directory that saves the input data.
# We assume waveforms for training and development of one subset
# are stored in the same directory.
# Hence, input_dirs[n] is for trn_set_name[n] and val_set_name[n]
#
# If you need to specify a separate val_input_dirs
# val_input_dirs = [[PATH_TO_DEVELOPMENT_SET]]
#
# Each input_dirs[n] is a list,
# for example, input_dirs[n] = [wav, speaker_label, augmented_wav, ...]
#
# Here, input for each file is a single waveform
input_dirs = [[tmp + '/train_dev']]
val_input_dirs = [[tmp + '/train_dev']]
# Dimensions of input features
# What is the dimension of the input feature
# len(input_dims) should be equal to len(input_dirs[n])
#
# Here, input for each file is a single waveform, dimension is 1
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# len(input_exts) should be equal to len(input_dirs[n])
#
# Here, input file extension is .wav
# We use .wav not .flac
input_exts = ['.wav']
# Temporal resolution for input features
# This is not relevant for CM but for other projects
# len(input_reso) should be equal to len(input_dirs[n])
# Here, it is 1 for waveform
input_reso = [1]
# Whether input features should be z-normalized
# This is not relevant for CM but for other projects
# len(input_norm) should be equal to len(input_dirs[n])
# Here, it is False for waveform
# We don't normalize the waveform
input_norm = [False]
# Similar configurations for output features
# Here, we set output to empty because we will load
# the target labels from protocol rather than output feature
# '.bin' is also a place holder
output_dirs = [[] for x in input_dirs]
val_output_dirs = [[]]
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# ===
# Waveform configuration
# ===
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
# Here, we don't use truncate_seq included in data_io, but we will do
# truncation in data_augmentation functions
truncate_seq = None
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = None
# Optional argument
# This used to load protocol(s)
# Multiple protocol files can be specified in the list
#
# Note that these protocols should cover all the
# training, development, and pool set data.
# Otherwise, the code will raise an error
#
# Here, this protocol will cover all the data in the toy set
optional_argument = [tmp + '/protocol.txt']
# ===
# pre-trained SSL model
# ===
# We will load this pre-trained SSL model as the front-end
#
# path to the SSL model (it is downloaded by 01_download.sh)
ssl_front_end_path = os.path.dirname(__file__) \
+ '/../../../SSL_pretrained/xlsr_53_56k.pt'
# dimension of the SSL model output
# this must be provided.
ssl_front_end_out_dim = 1024
# ===
# data augmentation option
# ===
# for training with aligned bonafide-spoofed mini-batches,
# we have to use this customized function to make sure that
# we can load the aligned files
# We will use function in data_augment.py to process the loaded mini-batch
import data_augment
# path to the waveform directory (the same as input_dirs above)
wav_path = tmp + '/train_dev'
# path to the protocol of spoofed data ( the same as optional_argument)
protocol_path = tmp + '/protocol.txt'
# configuration to use Pandas to parse the protocol
protocol_cols = ['voice', 'trial', '-', 'attack', 'label']
# length to truncate the waveform
trim_len = 64000
# load protocol
protocol_pd = pd.read_csv(protocol_path, sep = ' ', names = protocol_cols)
# how many number of spoofed trials exist for every bonafide
# this toy dataset has four vocoded (spoofed) versions of each bonafide trial
num_spoofed_trials = 4
# where this database provided bonafide and spoofed pairs?
flag_database_aligned = False
def spoof_name_loader(bona_name,
wav_path=wav_path,
data_pd = protocol_pd,
num_spoofed = num_spoofed_trials,
flag_database_aligned = flag_database_aligned):
"""
"""
# get the spoofed data list that contains the name of the bona fide
# spoofs can be empty for non-aligned database, in that case,
if flag_database_aligned:
# if aligned we assume the spoofed and bonafide audio file names have
# common part, thus, we can select those trials from the list
# in toy dataset
# bonafide trial has name LA_X_YYYY
# spoofed trials are named as vocoder1_LA_X_YYYY, vocoder2_LA_X_YYYY
idx = data_pd.query('label == "spoof"')['trial'].str.contains(bona_name)
spoofs = data_pd.query('label == "spoof"')[idx]['trial'].to_list()
if len(spoofs) == 0:
print("Warn: cannot find spoofed data for {:s}".format(bona_name))
print("Possible reason: {:s} is spoofed trial".format(bona_name))
print("How-to-resolve: make sure that trn_list={:s}".format(
str(trn_list)
))
print(" in config only contains bona fide data")
else:
# if not aligned, we randomply sample spoofed trials
idx = data_pd.query('label == "spoof"')['trial'].str.contains('LA_T')
tmp_pd = data_pd.query('label == "spoof"')[idx].sample(n=num_spoofed)
spoofs = tmp_pd['trial'].to_list()
spoof_paths = []
for spoof_name in spoofs:
filepath = wav_path + '/' + spoof_name + '.wav'
if not os.path.isfile(filepath):
print("Cannot find {:s}".format(filepath))
else:
spoof_paths.append(filepath)
return spoof_paths
####
# wrapper of data augmentation functions
# the wrapper calls the data_augmentation function defined in
# data_augment.py.
# these wrapper will be called in data_io when loading the data
# from disk
####
# wrapper for training set
input_trans_fns = [
[lambda x, y: data_augment.wav_aug_wrapper(
x, y, wav_samp_rate, trim_len, spoof_name_loader)],
[lambda x, y: data_augment.wav_aug_wrapper(
x, y, wav_samp_rate, trim_len, spoof_name_loader)]]
output_trans_fns = [[], []]
# wrapper for development set
# development does nothing but simply truncate the waveforms
val_input_trans_fns = [
[lambda x, y: data_augment.wav_aug_wrapper_val(
x, y, wav_samp_rate, trim_len, spoof_name_loader)],
[lambda x, y: data_augment.wav_aug_wrapper_val(
x, y, wav_samp_rate, trim_len, spoof_name_loader)]]
val_output_trans_fns = [[], []]
#########################################################
## Configuration for inference stage
#########################################################
# This part is not used in this project
# They are place holders
test_set_name = trn_set_name + val_set_name
# List of test set data
# for convenience, you may directly load test_set list here
test_list = trn_list + val_list
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = input_dirs * 2
# Directories for output features, which are []
test_output_dirs = [[]] * 2
| 9,125 | 34.235521 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/config_train_toyset_ID_6.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
import os
import pandas as pd
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# this will be used as the name of cache files created for each set
#
# Name for the seed training set, in case you merge multiple data sets as
# a single training set, just specify the name for each subset.
# Here we only have 1 training subset
trn_set_name = ['asvspoof2019_toyset_vocoded_trn_bona']
val_set_name = ['asvspoof2019_toyset_vocoded_val']
# For convenience, specify a path to the toy data set
# because config*.py will be copied into model-*/config_AL_train_toyset/NN
# we need to use ../../../
tmp = os.path.dirname(__file__) + '/../../../DATA/toy_example_vocoded'
# File list for training and development sets
# (text file, one file name per line, without name extension)
# we need to provide one lst for each subset
# trn_list[n] will correspond to trn_set_name[n]
# for training set, we only load bonafide through default data IO
# then, (paired) spoofed data will be loaded through data augmentation function
trn_list = [tmp + '/scp/train_bonafide.lst']
# for development set
val_list = [tmp + '/scp/dev.lst']
# Directories for input data
# We need to provide the path to the directory that saves the input data.
# We assume waveforms for training and development of one subset
# are stored in the same directory.
# Hence, input_dirs[n] is for trn_set_name[n] and val_set_name[n]
#
# If you need to specify a separate val_input_dirs
# val_input_dirs = [[PATH_TO_DEVELOPMENT_SET]]
#
# Each input_dirs[n] is a list,
# for example, input_dirs[n] = [wav, speaker_label, augmented_wav, ...]
#
# Here, input for each file is a single waveform
input_dirs = [[tmp + '/train_dev']]
val_input_dirs = [[tmp + '/train_dev']]
# Dimensions of input features
# What is the dimension of the input feature
# len(input_dims) should be equal to len(input_dirs[n])
#
# Here, input for each file is a single waveform, dimension is 1
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# len(input_exts) should be equal to len(input_dirs[n])
#
# Here, input file extension is .wav
# We use .wav not .flac
input_exts = ['.wav']
# Temporal resolution for input features
# This is not relevant for CM but for other projects
# len(input_reso) should be equal to len(input_dirs[n])
# Here, it is 1 for waveform
input_reso = [1]
# Whether input features should be z-normalized
# This is not relevant for CM but for other projects
# len(input_norm) should be equal to len(input_dirs[n])
# Here, it is False for waveform
# We don't normalize the waveform
input_norm = [False]
# Similar configurations for output features
# Here, we set output to empty because we will load
# the target labels from protocol rather than output feature
# '.bin' is also a place holder
output_dirs = [[] for x in input_dirs]
val_output_dirs = [[]]
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# ===
# Waveform configuration
# ===
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
# Here, we don't use truncate_seq included in data_io, but we will do
# truncation in data_augmentation functions
truncate_seq = None
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = None
# Optional argument
# This used to load protocol(s)
# Multiple protocol files can be specified in the list
#
# Note that these protocols should cover all the
# training, development, and pool set data.
# Otherwise, the code will raise an error
#
# Here, this protocol will cover all the data in the toy set
optional_argument = [tmp + '/protocol.txt']
# ===
# pre-trained SSL model
# ===
# We will load this pre-trained SSL model as the front-end
#
# path to the SSL model (it is downloaded by 01_download.sh)
ssl_front_end_path = os.path.dirname(__file__) \
+ '/../../../SSL_pretrained/xlsr_53_56k.pt'
# dimension of the SSL model output
# this must be provided.
ssl_front_end_out_dim = 1024
# ===
# data augmentation option
# ===
# for training with aligned bonafide-spoofed mini-batches,
# we have to use this customized function to make sure that
# we can load the aligned files
# We will use function in data_augment.py to process the loaded mini-batch
import data_augment
# path to the waveform directory (the same as input_dirs above)
wav_path = tmp + '/train_dev'
# path to the protocol of spoofed data ( the same as optional_argument)
protocol_path = tmp + '/protocol.txt'
# configuration to use Pandas to parse the protocol
protocol_cols = ['voice', 'trial', '-', 'attack', 'label']
# length to truncate the waveform
trim_len = 64000
# load protocol
protocol_pd = pd.read_csv(protocol_path, sep = ' ', names = protocol_cols)
# how many number of spoofed trials exist for every bonafide
# this toy dataset has four vocoded (spoofed) versions of each bonafide trial
num_spoofed_trials = 4
# where this database provided bonafide and spoofed pairs?
flag_database_aligned = False
def spoof_name_loader(bona_name,
wav_path=wav_path,
data_pd = protocol_pd,
num_spoofed = num_spoofed_trials,
flag_database_aligned = flag_database_aligned):
"""
"""
# get the spoofed data list that contains the name of the bona fide
# spoofs can be empty for non-aligned database, in that case,
if flag_database_aligned:
# if aligned we assume the spoofed and bonafide audio file names have
# common part, thus, we can select those trials from the list
# in toy dataset
# bonafide trial has name LA_X_YYYY
# spoofed trials are named as vocoder1_LA_X_YYYY, vocoder2_LA_X_YYYY
idx = data_pd.query('label == "spoof"')['trial'].str.contains(bona_name)
spoofs = data_pd.query('label == "spoof"')[idx]['trial'].to_list()
if len(spoofs) == 0:
print("Warn: cannot find spoofed data for {:s}".format(bona_name))
print("Possible reason: {:s} is spoofed trial".format(bona_name))
print("How-to-resolve: make sure that trn_list={:s}".format(
str(trn_list)
))
print(" in config only contains bona fide data")
else:
# if not aligned, we randomply sample spoofed trials
idx = data_pd.query('label == "spoof"')['trial'].str.contains('LA_T')
tmp_pd = data_pd.query('label == "spoof"')[idx].sample(n=num_spoofed)
spoofs = tmp_pd['trial'].to_list()
spoof_paths = []
for spoof_name in spoofs:
filepath = wav_path + '/' + spoof_name + '.wav'
if not os.path.isfile(filepath):
print("Cannot find {:s}".format(filepath))
else:
spoof_paths.append(filepath)
return spoof_paths
####
# wrapper of data augmentation functions
# the wrapper calls the data_augmentation function defined in
# data_augment.py.
# these wrapper will be called in data_io when loading the data
# from disk
####
# wrapper for training set
input_trans_fns = [
[lambda x, y: data_augment.wav_aug_wrapper(
x, y, wav_samp_rate, trim_len, spoof_name_loader)],
[lambda x, y: data_augment.wav_aug_wrapper(
x, y, wav_samp_rate, trim_len, spoof_name_loader)]]
output_trans_fns = [[], []]
# wrapper for development set
# development does nothing but simply truncate the waveforms
val_input_trans_fns = [
[lambda x, y: data_augment.wav_aug_wrapper_val(
x, y, wav_samp_rate, trim_len, spoof_name_loader)],
[lambda x, y: data_augment.wav_aug_wrapper_val(
x, y, wav_samp_rate, trim_len, spoof_name_loader)]]
val_output_trans_fns = [[], []]
#########################################################
## Configuration for inference stage
#########################################################
# This part is not used in this project
# They are place holders
test_set_name = trn_set_name + val_set_name
# List of test set data
# for convenience, you may directly load test_set list here
test_list = trn_list + val_list
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = input_dirs * 2
# Directories for output features, which are []
test_output_dirs = [[]] * 2
| 9,125 | 34.235521 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/config_train_toyset_ID_7.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
import os
import pandas as pd
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# this will be used as the name of cache files created for each set
#
# Name for the seed training set, in case you merge multiple data sets as
# a single training set, just specify the name for each subset.
# Here we only have 1 training subset
trn_set_name = ['asvspoof2019_toyset_vocoded_trn_bona']
val_set_name = ['asvspoof2019_toyset_vocoded_val']
# For convenience, specify a path to the toy data set
# because config*.py will be copied into model-*/config_AL_train_toyset/NN
# we need to use ../../../
tmp = os.path.dirname(__file__) + '/../../../DATA/toy_example_vocoded'
# File list for training and development sets
# (text file, one file name per line, without name extension)
# we need to provide one lst for each subset
# trn_list[n] will correspond to trn_set_name[n]
# for training set, we only load bonafide through default data IO
# then, (paired) spoofed data will be loaded through data augmentation function
trn_list = [tmp + '/scp/train_bonafide.lst']
# for development set
val_list = [tmp + '/scp/dev.lst']
# Directories for input data
# We need to provide the path to the directory that saves the input data.
# We assume waveforms for training and development of one subset
# are stored in the same directory.
# Hence, input_dirs[n] is for trn_set_name[n] and val_set_name[n]
#
# If you need to specify a separate val_input_dirs
# val_input_dirs = [[PATH_TO_DEVELOPMENT_SET]]
#
# Each input_dirs[n] is a list,
# for example, input_dirs[n] = [wav, speaker_label, augmented_wav, ...]
#
# Here, input for each file is a single waveform
input_dirs = [[tmp + '/train_dev']]
val_input_dirs = [[tmp + '/train_dev']]
# Dimensions of input features
# What is the dimension of the input feature
# len(input_dims) should be equal to len(input_dirs[n])
#
# Here, input for each file is a single waveform, dimension is 1
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# len(input_exts) should be equal to len(input_dirs[n])
#
# Here, input file extension is .wav
# We use .wav not .flac
input_exts = ['.wav']
# Temporal resolution for input features
# This is not relevant for CM but for other projects
# len(input_reso) should be equal to len(input_dirs[n])
# Here, it is 1 for waveform
input_reso = [1]
# Whether input features should be z-normalized
# This is not relevant for CM but for other projects
# len(input_norm) should be equal to len(input_dirs[n])
# Here, it is False for waveform
# We don't normalize the waveform
input_norm = [False]
# Similar configurations for output features
# Here, we set output to empty because we will load
# the target labels from protocol rather than output feature
# '.bin' is also a place holder
output_dirs = [[] for x in input_dirs]
val_output_dirs = [[]]
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# ===
# Waveform configuration
# ===
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
# Here, we don't use truncate_seq included in data_io, but we will do
# truncation in data_augmentation functions
truncate_seq = None
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = None
# Optional argument
# This used to load protocol(s)
# Multiple protocol files can be specified in the list
#
# Note that these protocols should cover all the
# training, development, and pool set data.
# Otherwise, the code will raise an error
#
# Here, this protocol will cover all the data in the toy set
optional_argument = [tmp + '/protocol.txt']
# ===
# pre-trained SSL model
# ===
# We will load this pre-trained SSL model as the front-end
#
# path to the SSL model (it is downloaded by 01_download.sh)
ssl_front_end_path = os.path.dirname(__file__) \
+ '/../../../SSL_pretrained/xlsr_53_56k.pt'
# dimension of the SSL model output
# this must be provided.
ssl_front_end_out_dim = 1024
# ===
# data augmentation option
# ===
# for training with aligned bonafide-spoofed mini-batches,
# we have to use this customized function to make sure that
# we can load the aligned files
# We will use function in data_augment.py to process the loaded mini-batch
import data_augment
# path to the waveform directory (the same as input_dirs above)
wav_path = tmp + '/train_dev'
# path to the protocol of spoofed data ( the same as optional_argument)
protocol_path = tmp + '/protocol.txt'
# configuration to use Pandas to parse the protocol
protocol_cols = ['voice', 'trial', '-', 'attack', 'label']
# length to truncate the waveform
trim_len = 64000
# load protocol
protocol_pd = pd.read_csv(protocol_path, sep = ' ', names = protocol_cols)
# how many number of spoofed trials exist for every bonafide
# this toy dataset has four vocoded (spoofed) versions of each bonafide trial
num_spoofed_trials = 4
# where this database provided bonafide and spoofed pairs?
flag_database_aligned = True
def spoof_name_loader(bona_name,
wav_path=wav_path,
data_pd = protocol_pd,
num_spoofed = num_spoofed_trials,
flag_database_aligned = flag_database_aligned):
"""
"""
# get the spoofed data list that contains the name of the bona fide
# spoofs can be empty for non-aligned database, in that case,
if flag_database_aligned:
# if aligned we assume the spoofed and bonafide audio file names have
# common part, thus, we can select those trials from the list
# in toy dataset
# bonafide trial has name LA_X_YYYY
# spoofed trials are named as vocoder1_LA_X_YYYY, vocoder2_LA_X_YYYY
idx = data_pd.query('label == "spoof"')['trial'].str.contains(bona_name)
spoofs = data_pd.query('label == "spoof"')[idx]['trial'].to_list()
if len(spoofs) == 0:
print("Warn: cannot find spoofed data for {:s}".format(bona_name))
print("Possible reason: {:s} is spoofed trial".format(bona_name))
print("How-to-resolve: make sure that trn_list={:s}".format(
str(trn_list)
))
print(" in config only contains bona fide data")
else:
# if not aligned, we randomply sample spoofed trials
idx = data_pd.query('label == "spoof"')['trial'].str.contains('LA_T')
tmp_pd = data_pd.query('label == "spoof"')[idx].sample(n=num_spoofed)
spoofs = tmp_pd['trial'].to_list()
spoof_paths = []
for spoof_name in spoofs:
filepath = wav_path + '/' + spoof_name + '.wav'
if not os.path.isfile(filepath):
print("Cannot find {:s}".format(filepath))
else:
spoof_paths.append(filepath)
return spoof_paths
####
# wrapper of data augmentation functions
# the wrapper calls the data_augmentation function defined in
# data_augment.py.
# these wrapper will be called in data_io when loading the data
# from disk
####
# wrapper for training set
input_trans_fns = [
[lambda x, y: data_augment.wav_aug_wrapper(
x, y, wav_samp_rate, trim_len, spoof_name_loader)],
[lambda x, y: data_augment.wav_aug_wrapper(
x, y, wav_samp_rate, trim_len, spoof_name_loader)]]
output_trans_fns = [[], []]
# wrapper for development set
# development does nothing but simply truncate the waveforms
val_input_trans_fns = [
[lambda x, y: data_augment.wav_aug_wrapper_val(
x, y, wav_samp_rate, trim_len, spoof_name_loader)],
[lambda x, y: data_augment.wav_aug_wrapper_val(
x, y, wav_samp_rate, trim_len, spoof_name_loader)]]
val_output_trans_fns = [[], []]
#########################################################
## Configuration for inference stage
#########################################################
# This part is not used in this project
# They are place holders
test_set_name = trn_set_name + val_set_name
# List of test set data
# for convenience, you may directly load test_set list here
test_list = trn_list + val_list
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = input_dirs * 2
# Directories for output features, which are []
test_output_dirs = [[]] * 2
| 9,124 | 34.23166 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/model-ID-2/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.util_bayesian as nii_bayesian
import sandbox.util_loss_metric as nii_loss_metric
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq as fq
class SSLModel(torch_nn.Module):
def __init__(self, mpath, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args
----
mpath: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath])
self.model = md[0]
# this should be loaded from md
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" output = extract_feat(input_data)
input:
------
input_data,tensor, (batch, length, 1) or (batch, length)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
# put the model to GPU if it not there
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
# input should be in shape (batch, length)
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# emb has shape [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
##############
## FOR MODEL
##############
class FrontEnd(torch_nn.Module):
""" Front end wrapper
"""
def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False):
super(FrontEnd, self).__init__()
# dimension of output feature
self.out_dim = output_dim
# whether fix SSL or not
self.flag_fix_ssl = fix_ssl
# ssl part
self.ssl_model = SSLModel(mpath, ssl_out_dim)
# post transformation part
self.m_front_end_process = torch_nn.Linear(
self.ssl_model.out_dim, self.out_dim)
return
def set_flag_fix_ssl(self, fix_ssl):
self.flag_fix_ssl = fix_ssl
return
def forward(self, wav, flag_fix_ssl):
""" output = front_end(wav, datalength)
input:
------
wav: tensor, (batch, length, 1)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
if self.flag_fix_ssl:
# if SSL is not fine-tuned
self.ssl_model.eval()
with torch.no_grad():
x_ssl_feat = self.ssl_model.extract_feat(wav)
else:
# if SSL model is fine-tuned
x_ssl_feat = self.ssl_model.extract_feat(wav)
output = self.m_front_end_process(x_ssl_feat)
return output
class BackEnd(torch_nn.Module):
"""Back End Wrapper
"""
def __init__(self, input_dim, out_dim, num_classes,
dropout_rate, dropout_flag=True):
super(BackEnd, self).__init__()
# input feature dimension
self.in_dim = input_dim
# output embedding dimension
self.out_dim = out_dim
# number of output classes
self.num_class = num_classes
# dropout rate
self.m_mcdp_rate = dropout_rate
self.m_mcdp_flag = dropout_flag
# a simple full-connected network for frame-level feature processing
self.m_frame_level = torch_nn.Sequential(
torch_nn.Linear(self.in_dim, self.in_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag),
torch_nn.Linear(self.in_dim, self.in_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag),
torch_nn.Linear(self.in_dim, self.out_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag))
# linear layer to produce output logits
self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class)
return
def forward(self, feat):
""" logits, emb_vec = back_end_emb(feat)
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
logits: tensor, (batch, num_output_class)
emb_vec: tensor, (batch, emb_dim)
"""
# through the frame-level network
# (batch, frame_num, self.out_dim)
feat_ = self.m_frame_level(feat)
# average pooling -> (batch, self.out_dim)
feat_utt = feat_.mean(1)
# output linear
logits = self.m_utt_level(feat_utt)
return logits, feat_utt
class FeatLossModule(torch_nn.Module):
""" Loss wrapper over features
Here we use supervised contrastive feature loss
"""
def __init__(self):
super(FeatLossModule, self).__init__()
# similarity metric for two sequences
self.sim_metric_seq = lambda mat1, mat2: torch.bmm(
mat1.permute(1, 0, 2), mat2.permute(1, 2, 0)).mean(0)
return
def forward(self, data, target):
"""
"""
if data.ndim > 3:
# loss between frame-level feature sequences
return nii_loss_metric.supcon_loss(
data, target, sim_metric=self.sim_metric_seq, length_norm=True)
else:
# loss between embeddings
return nii_loss_metric.supcon_loss(data, target, length_norm=True)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
############################################
####
# auxililary
####
# flag of current training stage
# this variable will be overwritten
self.temp_flag = args.temp_flag
####
# Load protocol and prepare the target data for network training
####
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
####
# Bayesian Dropout parameter
# This is not for contrastive loss or vocoded data
# This is for uncertainty estimation.
####
# dropout rate
self.m_mcdp_rate = 0.5
# whether use Bayesian dropout during inference
self.m_mcdp_flag = True
# How many samples to draw for Bayesian uncertainty estimation.
# Naively, we duplicate one data 144 times and create a mini-batch
# By doing so, we simultanesouly do sampling and get 144 samples for
# each input data.
# But the mini-batch may be too large in size.
# Hence, we do the sampling three times, each time draw 48 samples
#
# If your GPU memorgy is insufficient, try to reduce this number
self.m_mcdropout_num = [48, 48, 48]
####
# Model definition
####
# front-end
# dimension of compressed front-end feature
self.v_feat_dim = 128
self.m_front_end = FrontEnd(self.v_feat_dim,
prj_conf.ssl_front_end_path,
prj_conf.ssl_front_end_out_dim)
# back-end
# dimension of utterance-level embedding vectors
self.v_emd_dim = self.v_feat_dim
# number of output classes
self.v_out_class = 2
# back end
self.m_back_end = BackEnd(self.v_feat_dim,
self.v_emd_dim,
self.v_out_class,
self.m_mcdp_rate,
self.m_mcdp_flag)
#####
# Loss function
#####
# we do not use mixup loss for experiments.
# here, it is used for compatability.
self.m_ce_loss = torch_nn.CrossEntropyLoss()
# feature loss module
self.m_cr_loss = FeatLossModule()
# weight for the contrastive feature loss
self.m_feat = 0.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but irrelevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but irrelevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but irrelevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but irrelevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _get_target_vec(self, num_sys, num_aug, bs, device, dtype):
target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)]
target = np.tile(target, bs)
target = torch.tensor(target, device=device, dtype=dtype)
return target
def __inference(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences, skip it
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, 0.0, 0.0, 0.0))
return None
# front end
feat_vec = self.m_front_end(x, flag_fix_ssl=True)
# back end
# not relevant to the vocoded training data and contrastive loss,
# this is for another project of uncertainty estimation.
# we will use code as it is
#
# Bayesian dropout sampling
logits_bag = []
for dropnum in self.m_mcdropout_num:
# Simulate Bayesian sampling in inference
# for many times of sampling, we need to do it multiple times.
# we can duplicate SSL output from SSL, pass them to dropout
# and will get different dropout results
# (dropnum * batch, frame, frame_dim)
feat_tmp = feat_vec.repeat(dropnum, 1, 1)
# (dropnum * batch, 2)
logi_tmp, _ = self.m_back_end(feat_tmp)
logits_bag.append(logi_tmp)
# (total_dropnum * batch, 2)
bs = x.shape[0]
total_samp_num = sum(self.m_mcdropout_num)
logits = torch.cat(logits_bag, dim=0).view(total_samp_num, bs, -1)
# compute CM scores, model uncertainty, data uncertainty
# we need to use CM scores to compute EER
scores, epss, ales = nii_bayesian.compute_llr_eps_ale(logits)
targets = self._get_target(filenames)
for filename, target, score, eps, ale in \
zip(filenames, targets, scores, epss, ales):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, score.item(), eps.item(), ale.item()))
# don't write output score as a single file
return None
def __forward_single_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# front-end & back-end
feat_vec = self.m_front_end(x, flag_fix_ssl=True)
logits, emb_vec = self.m_back_end(feat_vec)
target = self._get_target(filenames)
target_ = torch.tensor(target, device=x.device, dtype=torch.long)
# loss
loss = self.m_ce_loss(logits, target_)
return loss
def __forward_multi_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# not used in this project
flag_fix_ssl = self.temp_flag == 's2'
# input will be (batchsize, length, 1+num_spoofed, num_aug)
bat_siz = x.shape[0]
pad_len = x.shape[1]
num_sys = x.shape[2]
num_aug = x.shape[3]
# to (batchsize * (1+num_spoofed) * num_aug, length)
x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len)
datalen_tmp = np.repeat(datalength, num_sys * num_aug)
# target vector loaded from data_augment, we don'y use _get_target_vec
# this is for CE loss
# [1, 0, 0, ..., 1, 0, 0 ...]
target = self._get_target_vec(num_sys, num_aug, bat_siz,
x.device, torch.long)
# this is for contrasitive loss (ignore the augmentation)
target_feat = self._get_target_vec(num_sys, 1, bat_siz,
x.device, torch.long)
# front-end & back-end
feat_vec = self.m_front_end(x_new, flag_fix_ssl)
logits, emb_vec = self.m_back_end(feat_vec)
# CE loss
# although the loss is implemented in mixup
# we set gamma to 1.0, so mixup becomes equivalent to a normal
# loss without mixup
loss_ce = self.m_ce_loss(logits, target)
# feature loss
if self.m_feat:
# not implemented for baseline method
return loss_ce
else:
return loss_ce
def forward(self, x, fileinfo):
"""
"""
if self.training and type(x) is list and x[0].shape[2] > 1:
# if training with multi-view data
# contrastive feature loss requires multi-view of data
return self.__forward_multi_view(x, fileinfo)
elif self.training:
return self.__forward_single_view(x, fileinfo)
else:
return self.__inference(x, fileinfo)
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper for scripts, ignore it
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 18,097 | 33.018797 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/model-ID-2/data_augment.py | #!/usr/bin/env python
"""Functions for data augmentation
These functions are written using Numpy.
They should be used before data are casted into torch.tensor.
For example, use them in config.py:input_trans_fns or output_trans_fns
"""
import os
import sys
import numpy as np
from core_scripts.other_tools import debug as nii_debug
from core_scripts.data_io import wav_tools as nii_wav_tools
from core_scripts.data_io import wav_augmentation as nii_wav_aug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
def wav_aug_wrapper(input_data,
name_info,
wav_samp_rate,
length):
# use this API to randomly trim the input length
input_data = nii_wav_aug.batch_pad_for_multiview(
[input_data], wav_samp_rate, length, random_trim_nosil=True)[0]
return input_data
def wav_aug_wrapper_val(input_data,
name_info,
wav_samp_rate,
length):
# use this API to randomly trim the input length
input_data = nii_wav_aug.batch_pad_for_multiview(
[input_data], wav_samp_rate, length, random_trim_nosil=False)[0]
return input_data
if __name__ == "__main__":
print("Tools for data augmentation")
| 1,311 | 29.511628 | 72 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/model-ID-6/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.util_bayesian as nii_bayesian
import sandbox.util_loss_metric as nii_loss_metric
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq as fq
class SSLModel(torch_nn.Module):
def __init__(self, mpath, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args
----
mpath: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath])
self.model = md[0]
# this should be loaded from md
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" output = extract_feat(input_data)
input:
------
input_data,tensor, (batch, length, 1) or (batch, length)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
# put the model to GPU if it not there
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
# input should be in shape (batch, length)
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# emb has shape [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
##############
## FOR MODEL
##############
class FrontEnd(torch_nn.Module):
""" Front end wrapper
"""
def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False):
super(FrontEnd, self).__init__()
# dimension of output feature
self.out_dim = output_dim
# whether fix SSL or not
self.flag_fix_ssl = fix_ssl
# ssl part
self.ssl_model = SSLModel(mpath, ssl_out_dim)
# post transformation part
self.m_front_end_process = torch_nn.Linear(
self.ssl_model.out_dim, self.out_dim)
return
def set_flag_fix_ssl(self, fix_ssl):
self.flag_fix_ssl = fix_ssl
return
def forward(self, wav, flag_fix_ssl):
""" output = front_end(wav, datalength)
input:
------
wav: tensor, (batch, length, 1)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
if self.flag_fix_ssl:
# if SSL is not fine-tuned
self.ssl_model.eval()
with torch.no_grad():
x_ssl_feat = self.ssl_model.extract_feat(wav)
else:
# if SSL model is fine-tuned
x_ssl_feat = self.ssl_model.extract_feat(wav)
output = self.m_front_end_process(x_ssl_feat)
return output
class BackEnd(torch_nn.Module):
"""Back End Wrapper
"""
def __init__(self, input_dim, out_dim, num_classes,
dropout_rate, dropout_flag=True):
super(BackEnd, self).__init__()
# input feature dimension
self.in_dim = input_dim
# output embedding dimension
self.out_dim = out_dim
# number of output classes
self.num_class = num_classes
# dropout rate
self.m_mcdp_rate = dropout_rate
self.m_mcdp_flag = dropout_flag
# a simple full-connected network for frame-level feature processing
self.m_frame_level = torch_nn.Sequential(
torch_nn.Linear(self.in_dim, self.in_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag),
torch_nn.Linear(self.in_dim, self.in_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag),
torch_nn.Linear(self.in_dim, self.out_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag))
# linear layer to produce output logits
self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class)
return
def forward(self, feat):
""" logits, emb_vec = back_end_emb(feat)
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
logits: tensor, (batch, num_output_class)
emb_vec: tensor, (batch, emb_dim)
"""
# through the frame-level network
# (batch, frame_num, self.out_dim)
feat_ = self.m_frame_level(feat)
# average pooling -> (batch, self.out_dim)
feat_utt = feat_.mean(1)
# output linear
logits = self.m_utt_level(feat_utt)
return logits, feat_utt
class FeatLossModule(torch_nn.Module):
""" Loss wrapper over features
Here we use supervised contrastive feature loss
"""
def __init__(self):
super(FeatLossModule, self).__init__()
# similarity metric for two sequences
self.sim_metric_seq = lambda mat1, mat2: torch.bmm(
mat1.permute(1, 0, 2), mat2.permute(1, 2, 0)).mean(0)
return
def forward(self, data, target):
"""
"""
if data.ndim > 3:
# loss between frame-level feature sequences
return nii_loss_metric.supcon_loss(
data, target, sim_metric=self.sim_metric_seq, length_norm=True)
else:
# loss between embeddings
return nii_loss_metric.supcon_loss(data, target, length_norm=True)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
############################################
####
# auxililary
####
# flag of current training stage
# this variable will be overwritten
self.temp_flag = args.temp_flag
####
# Load protocol and prepare the target data for network training
####
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
####
# Bayesian Dropout parameter
# This is not for contrastive loss or vocoded data
# This is for uncertainty estimation.
####
# dropout rate
self.m_mcdp_rate = 0.5
# whether use Bayesian dropout during inference
self.m_mcdp_flag = True
# How many samples to draw for Bayesian uncertainty estimation.
# Naively, we duplicate one data 144 times and create a mini-batch
# By doing so, we simultanesouly do sampling and get 144 samples for
# each input data.
# But the mini-batch may be too large in size.
# Hence, we do the sampling three times, each time draw 48 samples
#
# If your GPU memorgy is insufficient, try to reduce this number
self.m_mcdropout_num = [48, 48, 48]
####
# Model definition
####
# front-end
# dimension of compressed front-end feature
self.v_feat_dim = 128
self.m_front_end = FrontEnd(self.v_feat_dim,
prj_conf.ssl_front_end_path,
prj_conf.ssl_front_end_out_dim)
# back-end
# dimension of utterance-level embedding vectors
self.v_emd_dim = self.v_feat_dim
# number of output classes
self.v_out_class = 2
# back end
self.m_back_end = BackEnd(self.v_feat_dim,
self.v_emd_dim,
self.v_out_class,
self.m_mcdp_rate,
self.m_mcdp_flag)
#####
# Loss function
#####
# we do not use mixup loss for experiments.
# here, it is used for compatability.
self.m_ce_loss = nii_loss_metric.MixUpCE()
# feature loss module
self.m_cr_loss = FeatLossModule()
# weight for the contrastive feature loss
self.m_feat = 1.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but irrelevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but irrelevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but irrelevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but irrelevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _get_target_vec(self, num_sys, num_aug, bs, device, dtype):
target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)]
target = np.tile(target, bs)
target = torch.tensor(target, device=device, dtype=dtype)
return target
def __inference(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences, skip it
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, 0.0, 0.0, 0.0))
return None
# front end
feat_vec = self.m_front_end(x, flag_fix_ssl=True)
# back end
# not relevant to the vocoded training data and contrastive loss,
# this is for another project of uncertainty estimation.
# we will use code as it is
#
# Bayesian dropout sampling
logits_bag = []
for dropnum in self.m_mcdropout_num:
# Simulate Bayesian sampling in inference
# for many times of sampling, we need to do it multiple times.
# we can duplicate SSL output from SSL, pass them to dropout
# and will get different dropout results
# (dropnum * batch, frame, frame_dim)
feat_tmp = feat_vec.repeat(dropnum, 1, 1)
# (dropnum * batch, 2)
logi_tmp, _ = self.m_back_end(feat_tmp)
logits_bag.append(logi_tmp)
# (total_dropnum * batch, 2)
bs = x.shape[0]
total_samp_num = sum(self.m_mcdropout_num)
logits = torch.cat(logits_bag, dim=0).view(total_samp_num, bs, -1)
# compute CM scores, model uncertainty, data uncertainty
# we need to use CM scores to compute EER
scores, epss, ales = nii_bayesian.compute_llr_eps_ale(logits)
targets = self._get_target(filenames)
for filename, target, score, eps, ale in \
zip(filenames, targets, scores, epss, ales):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, score.item(), eps.item(), ale.item()))
# don't write output score as a single file
return None
def __forward_single_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# front-end & back-end
feat_vec = self.m_front_end(x, flag_fix_ssl=True)
logits, emb_vec = self.m_back_end(feat_vec)
target = self._get_target(filenames)
target_ = torch.tensor(target, device=x.device, dtype=torch.long)
# loss
loss = self.m_ce_loss(logits, target_)
return loss
def __forward_multi_view(self, x_tuple, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# not used in this project
flag_fix_ssl = self.temp_flag == 's2'
# input will be [input_tensor, target_1, target_2, gamma_list]
x = x_tuple[0]
tar1 = x_tuple[1]
tar2 = x_tuple[2]
gamma = x_tuple[3]
feat_class = x_tuple[4]
# input_tensor will be (batchsize, length, batch size in data_augment)
bat_siz = x.shape[0]
pad_len = x.shape[1]
num_sys = x.shape[2]
num_view = x.shape[3]
# to (batchsize * (1+num_spoofed) * num_aug, length)
x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len)
# target vector loaded from data_augment, we don'y use _get_target_vec
# this is for CE loss
# [1, 0, 0, ..., 1, 0, 0 ...]
target1 = tar1.flatten().to(torch.long)
target2 = tar2.flatten().to(torch.long)
gamma = gamma.flatten()
# front-end & back-end
feat_vec = self.m_front_end(x_new, flag_fix_ssl)
logits, emb_vec = self.m_back_end(feat_vec)
# CE loss
# although the loss is implemented in mixup
# we set gamma to 1.0, so mixup becomes equivalent to a normal
# loss without mixup
loss_ce = self.m_ce_loss(logits, target1, target2, gamma)
# feature loss
if self.m_feat:
loss_cr_1 = 0
loss_cr_2 = 0
# reshape to multi-view format
# (batch, (1+num_spoof), nview, dimension...)
feat_vec_ = feat_vec.view(
bat_siz, num_sys, num_view, -1, feat_vec.shape[-1])
emb_vec_ = emb_vec.view(bat_siz, num_sys, num_view, -1)
for bat_idx in range(bat_siz):
loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss(
feat_vec_[bat_idx], feat_class[bat_idx])
loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss(
emb_vec_[bat_idx], feat_class[bat_idx])
return [[loss_ce, loss_cr_1, loss_cr_2],
[True, True, True]]
else:
return loss_ce
def forward(self, x, fileinfo):
"""
"""
if self.training and type(x) is list and x[0].shape[2] > 1:
# if training with multi-view data
# contrastive feature loss requires multi-view of data
return self.__forward_multi_view(x, fileinfo)
elif self.training:
return self.__forward_single_view(x, fileinfo)
else:
return self.__inference(x, fileinfo)
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper for scripts, ignore it
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 18,709 | 33.142336 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/model-ID-6/data_augment.py | #!/usr/bin/env python
"""Functions for data augmentation
These functions are written using Numpy.
They should be used before data are casted into torch.tensor.
For example, use them in config.py:input_trans_fns or output_trans_fns
"""
import os
import sys
import numpy as np
from core_scripts.other_tools import debug as nii_debug
from core_scripts.data_io import wav_tools as nii_wav_tools
from core_scripts.data_io import wav_augmentation as nii_wav_aug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
def load_spoof_trials(bona_name, spoof_loader):
"""spoof_audios = load_spoof_utterance
"""
# load the spoofed data
spoofs = spoof_loader(bona_name)
spoof_list = []
for filepath in spoofs:
if os.path.isfile(filepath):
_, data = nii_wav_tools.waveReadAsFloat(filepath)
else:
print("Cannot find {:s}".format(filepath))
sys.exit(1)
# make the shape (length, 1)
spoof_list.append(np.expand_dims(data, axis=1))
return spoof_list
def mixup_wav(input_wav_list, target_list, alpha, beta, mixup_method='wav'):
"""
"""
# output buffer
out_buf, out_tar1, out_tar2, out_gamma = [], [], [], []
# assume input_wav_list[0] is bonafide
bona_idx = 0
bona_wav = input_wav_list[bona_idx]
# mixed bonafide and spoofed
for spoof_idx, spoof_wav in enumerate(input_wav_list):
if spoof_idx == 0:
# this is bonafide
continue
else:
# mixup
gamma = np.random.beta(alpha, beta)
mixed_wav = nii_wav_aug.morph_wavform(
bona_wav, spoof_wav, gamma, method=mixup_method)
out_buf.append(mixed_wav)
out_tar1.append(target_list[bona_idx])
out_tar2.append(target_list[spoof_idx])
out_gamma.append(gamma)
# return
return out_buf, out_tar1, out_tar2, out_gamma
def wav_aug_wrapper(input_data,
name_info,
wav_samp_rate,
length,
spoof_loader):
# load spoofed trials
spoof_trials = load_spoof_trials(
name_info.seq_name, spoof_loader)
# use this API to randomly trim the input length
batch_data = [input_data] + spoof_trials
batch_data = nii_wav_aug.batch_pad_for_multiview(
batch_data, wav_samp_rate, length, random_trim_nosil=True)
# first target is for bonafide
# the rest are for spoofed
orig_target = [0] * len(batch_data)
orig_target[0] = 1
# rawboost to create multiple view data
aug_list = [nii_wav_aug.RawBoostWrapper12(x) for x in batch_data]
# to be compatible with the rest of the code,
tar_list_1, tar_list_2 = orig_target, orig_target
# gamma_list=1.0 will be equivalent to normal CE
gamma_list = [1.0] * len(batch_data)
# assign bonafide and spoofed data to different feature classes
# for supervised contrastive learning loss
new_feat_class = [2] * len(batch_data)
new_feat_class[0] = 1
# merge the original data and mixed data
new_wav_list = [np.concatenate(batch_data, axis=1),
np.concatenate(aug_list, axis=1)]
new_tar_list_1 = [np.array(orig_target), np.array(tar_list_1)]
new_tar_list_2 = [np.array(orig_target), np.array(tar_list_2)]
new_gamma_list = [np.array([1.0] * len(batch_data)), np.array(gamma_list)]
# waveform stack to (length, num_of_wav, num_of_aug)
output = np.stack(new_wav_list, axis=2)
# label stack to (num_of_wav, num_of_aug)
new_tar_1 = np.stack(new_tar_list_1, axis=1)
new_tar_2 = np.stack(new_tar_list_2, axis=1)
# gamma the same shape as label
new_gamma = np.stack(new_gamma_list, axis=1)
# (num_of_wav)
new_feat_class = np.array(new_feat_class)
return [output, new_tar_1, new_tar_2, new_gamma,
new_feat_class, new_feat_class]
def wav_aug_wrapper_val(input_data,
name_info,
wav_samp_rate,
length,
spoof_name_loader):
# use this API to randomly trim the input length
input_data = nii_wav_aug.batch_pad_for_multiview(
[input_data], wav_samp_rate, length, random_trim_nosil=False)[0]
return input_data
if __name__ == "__main__":
print("Tools for data augmentation")
| 4,445 | 31.452555 | 78 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/model-ID-4/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.util_bayesian as nii_bayesian
import sandbox.util_loss_metric as nii_loss_metric
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq as fq
class SSLModel(torch_nn.Module):
def __init__(self, mpath, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args
----
mpath: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath])
self.model = md[0]
# this should be loaded from md
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" output = extract_feat(input_data)
input:
------
input_data,tensor, (batch, length, 1) or (batch, length)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
# put the model to GPU if it not there
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
# input should be in shape (batch, length)
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# emb has shape [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
##############
## FOR MODEL
##############
class FrontEnd(torch_nn.Module):
""" Front end wrapper
"""
def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False):
super(FrontEnd, self).__init__()
# dimension of output feature
self.out_dim = output_dim
# whether fix SSL or not
self.flag_fix_ssl = fix_ssl
# ssl part
self.ssl_model = SSLModel(mpath, ssl_out_dim)
# post transformation part
self.m_front_end_process = torch_nn.Linear(
self.ssl_model.out_dim, self.out_dim)
return
def set_flag_fix_ssl(self, fix_ssl):
self.flag_fix_ssl = fix_ssl
return
def forward(self, wav, flag_fix_ssl):
""" output = front_end(wav, datalength)
input:
------
wav: tensor, (batch, length, 1)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
if self.flag_fix_ssl:
# if SSL is not fine-tuned
self.ssl_model.eval()
with torch.no_grad():
x_ssl_feat = self.ssl_model.extract_feat(wav)
else:
# if SSL model is fine-tuned
x_ssl_feat = self.ssl_model.extract_feat(wav)
output = self.m_front_end_process(x_ssl_feat)
return output
class BackEnd(torch_nn.Module):
"""Back End Wrapper
"""
def __init__(self, input_dim, out_dim, num_classes,
dropout_rate, dropout_flag=True):
super(BackEnd, self).__init__()
# input feature dimension
self.in_dim = input_dim
# output embedding dimension
self.out_dim = out_dim
# number of output classes
self.num_class = num_classes
# dropout rate
self.m_mcdp_rate = dropout_rate
self.m_mcdp_flag = dropout_flag
# a simple full-connected network for frame-level feature processing
self.m_frame_level = torch_nn.Sequential(
torch_nn.Linear(self.in_dim, self.in_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag),
torch_nn.Linear(self.in_dim, self.in_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag),
torch_nn.Linear(self.in_dim, self.out_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag))
# linear layer to produce output logits
self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class)
return
def forward(self, feat):
""" logits, emb_vec = back_end_emb(feat)
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
logits: tensor, (batch, num_output_class)
emb_vec: tensor, (batch, emb_dim)
"""
# through the frame-level network
# (batch, frame_num, self.out_dim)
feat_ = self.m_frame_level(feat)
# average pooling -> (batch, self.out_dim)
feat_utt = feat_.mean(1)
# output linear
logits = self.m_utt_level(feat_utt)
return logits, feat_utt
class FeatLossModule(torch_nn.Module):
""" Loss wrapper over features
Here we use supervised contrastive feature loss
"""
def __init__(self):
super(FeatLossModule, self).__init__()
# similarity metric for two sequences
self.sim_metric_seq = lambda mat1, mat2: torch.bmm(
mat1.permute(1, 0, 2), mat2.permute(1, 2, 0)).mean(0)
return
def forward(self, data, target):
"""
"""
if data.ndim > 3:
# loss between frame-level feature sequences
return nii_loss_metric.supcon_loss(
data, target, sim_metric=self.sim_metric_seq, length_norm=True)
else:
# loss between embeddings
return nii_loss_metric.supcon_loss(data, target, length_norm=True)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
############################################
####
# auxililary
####
# flag of current training stage
# this variable will be overwritten
self.temp_flag = args.temp_flag
####
# Load protocol and prepare the target data for network training
####
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
####
# Bayesian Dropout parameter
# This is not for contrastive loss or vocoded data
# This is for uncertainty estimation.
####
# dropout rate
self.m_mcdp_rate = 0.5
# whether use Bayesian dropout during inference
self.m_mcdp_flag = True
# How many samples to draw for Bayesian uncertainty estimation.
# Naively, we duplicate one data 144 times and create a mini-batch
# By doing so, we simultanesouly do sampling and get 144 samples for
# each input data.
# But the mini-batch may be too large in size.
# Hence, we do the sampling three times, each time draw 48 samples
#
# If your GPU memorgy is insufficient, try to reduce this number
self.m_mcdropout_num = [48, 48, 48]
####
# Model definition
####
# front-end
# dimension of compressed front-end feature
self.v_feat_dim = 128
self.m_front_end = FrontEnd(self.v_feat_dim,
prj_conf.ssl_front_end_path,
prj_conf.ssl_front_end_out_dim)
# back-end
# dimension of utterance-level embedding vectors
self.v_emd_dim = self.v_feat_dim
# number of output classes
self.v_out_class = 2
# back end
self.m_back_end = BackEnd(self.v_feat_dim,
self.v_emd_dim,
self.v_out_class,
self.m_mcdp_rate,
self.m_mcdp_flag)
#####
# Loss function
#####
# we do not use mixup loss for experiments.
# here, it is used for compatability.
self.m_ce_loss = nii_loss_metric.MixUpCE()
# feature loss module
self.m_cr_loss = FeatLossModule()
# weight for the contrastive feature loss
self.m_feat = 0.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but irrelevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but irrelevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but irrelevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but irrelevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _get_target_vec(self, num_sys, num_aug, bs, device, dtype):
target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)]
target = np.tile(target, bs)
target = torch.tensor(target, device=device, dtype=dtype)
return target
def __inference(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences, skip it
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, 0.0, 0.0, 0.0))
return None
# front end
feat_vec = self.m_front_end(x, flag_fix_ssl=True)
# back end
# not relevant to the vocoded training data and contrastive loss,
# this is for another project of uncertainty estimation.
# we will use code as it is
#
# Bayesian dropout sampling
logits_bag = []
for dropnum in self.m_mcdropout_num:
# Simulate Bayesian sampling in inference
# for many times of sampling, we need to do it multiple times.
# we can duplicate SSL output from SSL, pass them to dropout
# and will get different dropout results
# (dropnum * batch, frame, frame_dim)
feat_tmp = feat_vec.repeat(dropnum, 1, 1)
# (dropnum * batch, 2)
logi_tmp, _ = self.m_back_end(feat_tmp)
logits_bag.append(logi_tmp)
# (total_dropnum * batch, 2)
bs = x.shape[0]
total_samp_num = sum(self.m_mcdropout_num)
logits = torch.cat(logits_bag, dim=0).view(total_samp_num, bs, -1)
# compute CM scores, model uncertainty, data uncertainty
# we need to use CM scores to compute EER
scores, epss, ales = nii_bayesian.compute_llr_eps_ale(logits)
targets = self._get_target(filenames)
for filename, target, score, eps, ale in \
zip(filenames, targets, scores, epss, ales):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, score.item(), eps.item(), ale.item()))
# don't write output score as a single file
return None
def __forward_single_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# front-end & back-end
feat_vec = self.m_front_end(x, flag_fix_ssl=True)
logits, emb_vec = self.m_back_end(feat_vec)
target = self._get_target(filenames)
target_ = torch.tensor(target, device=x.device, dtype=torch.long)
# loss
loss = self.m_ce_loss(logits, target_)
return loss
def __forward_multi_view(self, x_tuple, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# not used in this project
flag_fix_ssl = self.temp_flag == 's2'
# input will be [input_tensor, target_1, target_2, gamma_list]
x = x_tuple[0]
tar1 = x_tuple[1]
tar2 = x_tuple[2]
gamma = x_tuple[3]
feat_class = x_tuple[4]
# input_tensor will be (batchsize, length, batch size in data_augment)
bat_siz = x.shape[0]
pad_len = x.shape[1]
num_sys = x.shape[2]
num_view = x.shape[3]
# to (batchsize * (1+num_spoofed) * num_aug, length)
x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len)
# target vector loaded from data_augment, we don'y use _get_target_vec
# this is for CE loss
# [1, 0, 0, ..., 1, 0, 0 ...]
target1 = tar1.flatten().to(torch.long)
target2 = tar2.flatten().to(torch.long)
gamma = gamma.flatten()
# front-end & back-end
feat_vec = self.m_front_end(x_new, flag_fix_ssl)
logits, emb_vec = self.m_back_end(feat_vec)
# CE loss
# although the loss is implemented in mixup
# we set gamma to 1.0, so mixup becomes equivalent to a normal
# loss without mixup
loss_ce = self.m_ce_loss(logits, target1, target2, gamma)
# feature loss
if self.m_feat:
loss_cr_1 = 0
loss_cr_2 = 0
# reshape to multi-view format
# (batch, (1+num_spoof), nview, dimension...)
feat_vec_ = feat_vec.view(
bat_siz, num_sys, num_view, -1, feat_vec.shape[-1])
emb_vec_ = emb_vec.view(bat_siz, num_sys, num_view, -1)
for bat_idx in range(bat_siz):
loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss(
feat_vec_[bat_idx], feat_class[bat_idx])
loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss(
emb_vec_[bat_idx], feat_class[bat_idx])
return [[loss_ce, loss_cr_1, loss_cr_2],
[True, True, True]]
else:
return loss_ce
def forward(self, x, fileinfo):
"""
"""
if self.training and type(x) is list and x[0].shape[2] > 1:
# if training with multi-view data
# contrastive feature loss requires multi-view of data
return self.__forward_multi_view(x, fileinfo)
elif self.training:
return self.__forward_single_view(x, fileinfo)
else:
return self.__inference(x, fileinfo)
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper for scripts, ignore it
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 18,709 | 33.142336 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/model-ID-4/data_augment.py | #!/usr/bin/env python
"""Functions for data augmentation
These functions are written using Numpy.
They should be used before data are casted into torch.tensor.
For example, use them in config.py:input_trans_fns or output_trans_fns
"""
import os
import sys
import numpy as np
from core_scripts.other_tools import debug as nii_debug
from core_scripts.data_io import wav_tools as nii_wav_tools
from core_scripts.data_io import wav_augmentation as nii_wav_aug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
def load_spoof_trials(bona_name, spoof_loader):
"""spoof_audios = load_spoof_utterance
"""
# load the spoofed data
spoofs = spoof_loader(bona_name)
spoof_list = []
for filepath in spoofs:
if os.path.isfile(filepath):
_, data = nii_wav_tools.waveReadAsFloat(filepath)
else:
print("Cannot find {:s}".format(filepath))
sys.exit(1)
# make the shape (length, 1)
spoof_list.append(np.expand_dims(data, axis=1))
return spoof_list
def mixup_wav(input_wav_list, target_list, alpha, beta, mixup_method='wav'):
"""
"""
# output buffer
out_buf, out_tar1, out_tar2, out_gamma = [], [], [], []
# assume input_wav_list[0] is bonafide
bona_idx = 0
bona_wav = input_wav_list[bona_idx]
# mixed bonafide and spoofed
for spoof_idx, spoof_wav in enumerate(input_wav_list):
if spoof_idx == 0:
# this is bonafide
continue
else:
# mixup
gamma = np.random.beta(alpha, beta)
mixed_wav = nii_wav_aug.morph_wavform(
bona_wav, spoof_wav, gamma, method=mixup_method)
out_buf.append(mixed_wav)
out_tar1.append(target_list[bona_idx])
out_tar2.append(target_list[spoof_idx])
out_gamma.append(gamma)
# return
return out_buf, out_tar1, out_tar2, out_gamma
def wav_aug_wrapper(input_data,
name_info,
wav_samp_rate,
length,
spoof_loader):
# load spoofed trials
spoof_trials = load_spoof_trials(
name_info.seq_name, spoof_loader)
# use this API to randomly trim the input length
batch_data = [input_data] + spoof_trials
batch_data = nii_wav_aug.batch_pad_for_multiview(
batch_data, wav_samp_rate, length, random_trim_nosil=True)
# first target is for bonafide
# the rest are for spoofed
orig_target = [0] * len(batch_data)
orig_target[0] = 1
# rawboost to create multiple view data
aug_list = [nii_wav_aug.RawBoostWrapper12(x) for x in batch_data]
# to be compatible with the rest of the code,
tar_list_1, tar_list_2 = orig_target, orig_target
# gamma_list=1.0 will be equivalent to normal CE
gamma_list = [1.0] * len(batch_data)
# assign bonafide and spoofed data to different feature classes
# for supervised contrastive learning loss
new_feat_class = [2] * len(batch_data)
new_feat_class[0] = 1
# merge the original data and mixed data
new_wav_list = [np.concatenate(batch_data, axis=1),
np.concatenate(aug_list, axis=1)]
new_tar_list_1 = [np.array(orig_target), np.array(tar_list_1)]
new_tar_list_2 = [np.array(orig_target), np.array(tar_list_2)]
new_gamma_list = [np.array([1.0] * len(batch_data)), np.array(gamma_list)]
# waveform stack to (length, num_of_wav, num_of_aug)
output = np.stack(new_wav_list, axis=2)
# label stack to (num_of_wav, num_of_aug)
new_tar_1 = np.stack(new_tar_list_1, axis=1)
new_tar_2 = np.stack(new_tar_list_2, axis=1)
# gamma the same shape as label
new_gamma = np.stack(new_gamma_list, axis=1)
# (num_of_wav)
new_feat_class = np.array(new_feat_class)
return [output, new_tar_1, new_tar_2, new_gamma,
new_feat_class, new_feat_class]
def wav_aug_wrapper_val(input_data,
name_info,
wav_samp_rate,
length,
spoof_name_loader):
# use this API to randomly trim the input length
input_data = nii_wav_aug.batch_pad_for_multiview(
[input_data], wav_samp_rate, length, random_trim_nosil=False)[0]
return input_data
if __name__ == "__main__":
print("Tools for data augmentation")
| 4,445 | 31.452555 | 78 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/model-ID-7/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.util_bayesian as nii_bayesian
import sandbox.util_loss_metric as nii_loss_metric
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq as fq
class SSLModel(torch_nn.Module):
def __init__(self, mpath, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args
----
mpath: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath])
self.model = md[0]
# this should be loaded from md
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" output = extract_feat(input_data)
input:
------
input_data,tensor, (batch, length, 1) or (batch, length)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
# put the model to GPU if it not there
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
# input should be in shape (batch, length)
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# emb has shape [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
##############
## FOR MODEL
##############
class FrontEnd(torch_nn.Module):
""" Front end wrapper
"""
def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False):
super(FrontEnd, self).__init__()
# dimension of output feature
self.out_dim = output_dim
# whether fix SSL or not
self.flag_fix_ssl = fix_ssl
# ssl part
self.ssl_model = SSLModel(mpath, ssl_out_dim)
# post transformation part
self.m_front_end_process = torch_nn.Linear(
self.ssl_model.out_dim, self.out_dim)
return
def set_flag_fix_ssl(self, fix_ssl):
self.flag_fix_ssl = fix_ssl
return
def forward(self, wav, flag_fix_ssl):
""" output = front_end(wav, datalength)
input:
------
wav: tensor, (batch, length, 1)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
if self.flag_fix_ssl:
# if SSL is not fine-tuned
self.ssl_model.eval()
with torch.no_grad():
x_ssl_feat = self.ssl_model.extract_feat(wav)
else:
# if SSL model is fine-tuned
x_ssl_feat = self.ssl_model.extract_feat(wav)
output = self.m_front_end_process(x_ssl_feat)
return output
class BackEnd(torch_nn.Module):
"""Back End Wrapper
"""
def __init__(self, input_dim, out_dim, num_classes,
dropout_rate, dropout_flag=True):
super(BackEnd, self).__init__()
# input feature dimension
self.in_dim = input_dim
# output embedding dimension
self.out_dim = out_dim
# number of output classes
self.num_class = num_classes
# dropout rate
self.m_mcdp_rate = dropout_rate
self.m_mcdp_flag = dropout_flag
# a simple full-connected network for frame-level feature processing
self.m_frame_level = torch_nn.Sequential(
torch_nn.Linear(self.in_dim, self.in_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag),
torch_nn.Linear(self.in_dim, self.in_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag),
torch_nn.Linear(self.in_dim, self.out_dim),
torch_nn.LeakyReLU(),
nii_nn.DropoutForMC(self.m_mcdp_rate,self.m_mcdp_flag))
# linear layer to produce output logits
self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class)
return
def forward(self, feat):
""" logits, emb_vec = back_end_emb(feat)
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
logits: tensor, (batch, num_output_class)
emb_vec: tensor, (batch, emb_dim)
"""
# through the frame-level network
# (batch, frame_num, self.out_dim)
feat_ = self.m_frame_level(feat)
# average pooling -> (batch, self.out_dim)
feat_utt = feat_.mean(1)
# output linear
logits = self.m_utt_level(feat_utt)
return logits, feat_utt
class FeatLossModule(torch_nn.Module):
""" Loss wrapper over features
Here we use supervised contrastive feature loss
"""
def __init__(self):
super(FeatLossModule, self).__init__()
# similarity metric for two sequences
self.sim_metric_seq = lambda mat1, mat2: torch.bmm(
mat1.permute(1, 0, 2), mat2.permute(1, 2, 0)).mean(0)
return
def forward(self, data, target):
"""
"""
if data.ndim > 3:
# loss between frame-level feature sequences
return nii_loss_metric.supcon_loss(
data, target, sim_metric=self.sim_metric_seq, length_norm=True)
else:
# loss between embeddings
return nii_loss_metric.supcon_loss(data, target, length_norm=True)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
############################################
####
# auxililary
####
# flag of current training stage
# this variable will be overwritten
self.temp_flag = args.temp_flag
####
# Load protocol and prepare the target data for network training
####
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
####
# Bayesian Dropout parameter
# This is not for contrastive loss or vocoded data
# This is for uncertainty estimation.
####
# dropout rate
self.m_mcdp_rate = 0.5
# whether use Bayesian dropout during inference
self.m_mcdp_flag = True
# How many samples to draw for Bayesian uncertainty estimation.
# Naively, we duplicate one data 144 times and create a mini-batch
# By doing so, we simultanesouly do sampling and get 144 samples for
# each input data.
# But the mini-batch may be too large in size.
# Hence, we do the sampling three times, each time draw 48 samples
#
# If your GPU memorgy is insufficient, try to reduce this number
self.m_mcdropout_num = [48, 48, 48]
####
# Model definition
####
# front-end
# dimension of compressed front-end feature
self.v_feat_dim = 128
self.m_front_end = FrontEnd(self.v_feat_dim,
prj_conf.ssl_front_end_path,
prj_conf.ssl_front_end_out_dim)
# back-end
# dimension of utterance-level embedding vectors
self.v_emd_dim = self.v_feat_dim
# number of output classes
self.v_out_class = 2
# back end
self.m_back_end = BackEnd(self.v_feat_dim,
self.v_emd_dim,
self.v_out_class,
self.m_mcdp_rate,
self.m_mcdp_flag)
#####
# Loss function
#####
# we do not use mixup loss for experiments.
# here, it is used for compatability.
self.m_ce_loss = nii_loss_metric.MixUpCE()
# feature loss module
self.m_cr_loss = FeatLossModule()
# weight for the contrastive feature loss
self.m_feat = 1.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but irrelevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but irrelevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but irrelevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but irrelevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _get_target_vec(self, num_sys, num_aug, bs, device, dtype):
target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)]
target = np.tile(target, bs)
target = torch.tensor(target, device=device, dtype=dtype)
return target
def __inference(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences, skip it
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, 0.0, 0.0, 0.0))
return None
# front end
feat_vec = self.m_front_end(x, flag_fix_ssl=True)
# back end
# not relevant to the vocoded training data and contrastive loss,
# this is for another project of uncertainty estimation.
# we will use code as it is
#
# Bayesian dropout sampling
logits_bag = []
for dropnum in self.m_mcdropout_num:
# Simulate Bayesian sampling in inference
# for many times of sampling, we need to do it multiple times.
# we can duplicate SSL output from SSL, pass them to dropout
# and will get different dropout results
# (dropnum * batch, frame, frame_dim)
feat_tmp = feat_vec.repeat(dropnum, 1, 1)
# (dropnum * batch, 2)
logi_tmp, _ = self.m_back_end(feat_tmp)
logits_bag.append(logi_tmp)
# (total_dropnum * batch, 2)
bs = x.shape[0]
total_samp_num = sum(self.m_mcdropout_num)
logits = torch.cat(logits_bag, dim=0).view(total_samp_num, bs, -1)
# compute CM scores, model uncertainty, data uncertainty
# we need to use CM scores to compute EER
scores, epss, ales = nii_bayesian.compute_llr_eps_ale(logits)
targets = self._get_target(filenames)
for filename, target, score, eps, ale in \
zip(filenames, targets, scores, epss, ales):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, score.item(), eps.item(), ale.item()))
# don't write output score as a single file
return None
def __forward_single_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# front-end & back-end
feat_vec = self.m_front_end(x, flag_fix_ssl=True)
logits, emb_vec = self.m_back_end(feat_vec)
target = self._get_target(filenames)
target_ = torch.tensor(target, device=x.device, dtype=torch.long)
# loss
loss = self.m_ce_loss(logits, target_)
return loss
def __forward_multi_view(self, x_tuple, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# not used in this project
flag_fix_ssl = self.temp_flag == 's2'
# input will be [input_tensor, target_1, target_2, gamma_list]
x = x_tuple[0]
tar1 = x_tuple[1]
tar2 = x_tuple[2]
gamma = x_tuple[3]
feat_class = x_tuple[4]
# input_tensor will be (batchsize, length, batch size in data_augment)
bat_siz = x.shape[0]
pad_len = x.shape[1]
num_sys = x.shape[2]
num_view = x.shape[3]
# to (batchsize * (1+num_spoofed) * num_aug, length)
x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len)
# target vector loaded from data_augment, we don'y use _get_target_vec
# this is for CE loss
# [1, 0, 0, ..., 1, 0, 0 ...]
target1 = tar1.flatten().to(torch.long)
target2 = tar2.flatten().to(torch.long)
gamma = gamma.flatten()
# front-end & back-end
feat_vec = self.m_front_end(x_new, flag_fix_ssl)
logits, emb_vec = self.m_back_end(feat_vec)
# CE loss
# although the loss is implemented in mixup
# we set gamma to 1.0, so mixup becomes equivalent to a normal
# loss without mixup
loss_ce = self.m_ce_loss(logits, target1, target2, gamma)
# feature loss
if self.m_feat:
loss_cr_1 = 0
loss_cr_2 = 0
# reshape to multi-view format
# (batch, (1+num_spoof), nview, dimension...)
feat_vec_ = feat_vec.view(
bat_siz, num_sys, num_view, -1, feat_vec.shape[-1])
emb_vec_ = emb_vec.view(bat_siz, num_sys, num_view, -1)
for bat_idx in range(bat_siz):
loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss(
feat_vec_[bat_idx], feat_class[bat_idx])
loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss(
emb_vec_[bat_idx], feat_class[bat_idx])
return [[loss_ce, loss_cr_1, loss_cr_2],
[True, True, True]]
else:
return loss_ce
def forward(self, x, fileinfo):
"""
"""
if self.training and type(x) is list and x[0].shape[2] > 1:
# if training with multi-view data
# contrastive feature loss requires multi-view of data
return self.__forward_multi_view(x, fileinfo)
elif self.training:
return self.__forward_single_view(x, fileinfo)
else:
return self.__inference(x, fileinfo)
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper for scripts, ignore it
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 18,709 | 33.142336 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/09-asvspoof-vocoded-trn/model-ID-7/data_augment.py | #!/usr/bin/env python
"""Functions for data augmentation
These functions are written using Numpy.
They should be used before data are casted into torch.tensor.
For example, use them in config.py:input_trans_fns or output_trans_fns
"""
import os
import sys
import numpy as np
from core_scripts.other_tools import debug as nii_debug
from core_scripts.data_io import wav_tools as nii_wav_tools
from core_scripts.data_io import wav_augmentation as nii_wav_aug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
def load_spoof_trials(bona_name, spoof_loader):
"""spoof_audios = load_spoof_utterance
"""
# load the spoofed data
spoofs = spoof_loader(bona_name)
spoof_list = []
for filepath in spoofs:
if os.path.isfile(filepath):
_, data = nii_wav_tools.waveReadAsFloat(filepath)
else:
print("Cannot find {:s}".format(filepath))
sys.exit(1)
# make the shape (length, 1)
spoof_list.append(np.expand_dims(data, axis=1))
return spoof_list
def mixup_wav(input_wav_list, target_list, alpha, beta, mixup_method='wav'):
"""
"""
# output buffer
out_buf, out_tar1, out_tar2, out_gamma = [], [], [], []
# assume input_wav_list[0] is bonafide
bona_idx = 0
bona_wav = input_wav_list[bona_idx]
# mixed bonafide and spoofed
for spoof_idx, spoof_wav in enumerate(input_wav_list):
if spoof_idx == 0:
# this is bonafide
continue
else:
# mixup
gamma = np.random.beta(alpha, beta)
mixed_wav = nii_wav_aug.morph_wavform(
bona_wav, spoof_wav, gamma, method=mixup_method)
out_buf.append(mixed_wav)
out_tar1.append(target_list[bona_idx])
out_tar2.append(target_list[spoof_idx])
out_gamma.append(gamma)
# return
return out_buf, out_tar1, out_tar2, out_gamma
def wav_aug_wrapper(input_data,
name_info,
wav_samp_rate,
length,
spoof_loader):
# load spoofed trials
spoof_trials = load_spoof_trials(
name_info.seq_name, spoof_loader)
# use this API to randomly trim the input length
batch_data = [input_data] + spoof_trials
batch_data = nii_wav_aug.batch_pad_for_multiview(
batch_data, wav_samp_rate, length, random_trim_nosil=True)
# first target is for bonafide
# the rest are for spoofed
orig_target = [0] * len(batch_data)
orig_target[0] = 1
# rawboost to create multiple view data
aug_list = [nii_wav_aug.RawBoostWrapper12(x) for x in batch_data]
# to be compatible with the rest of the code,
tar_list_1, tar_list_2 = orig_target, orig_target
# gamma_list=1.0 will be equivalent to normal CE
gamma_list = [1.0] * len(batch_data)
# assign bonafide and spoofed data to different feature classes
# for supervised contrastive learning loss
new_feat_class = [2] * len(batch_data)
new_feat_class[0] = 1
# merge the original data and mixed data
new_wav_list = [np.concatenate(batch_data, axis=1),
np.concatenate(aug_list, axis=1)]
new_tar_list_1 = [np.array(orig_target), np.array(tar_list_1)]
new_tar_list_2 = [np.array(orig_target), np.array(tar_list_2)]
new_gamma_list = [np.array([1.0] * len(batch_data)), np.array(gamma_list)]
# waveform stack to (length, num_of_wav, num_of_aug)
output = np.stack(new_wav_list, axis=2)
# label stack to (num_of_wav, num_of_aug)
new_tar_1 = np.stack(new_tar_list_1, axis=1)
new_tar_2 = np.stack(new_tar_list_2, axis=1)
# gamma the same shape as label
new_gamma = np.stack(new_gamma_list, axis=1)
# (num_of_wav)
new_feat_class = np.array(new_feat_class)
return [output, new_tar_1, new_tar_2, new_gamma,
new_feat_class, new_feat_class]
def wav_aug_wrapper_val(input_data,
name_info,
wav_samp_rate,
length,
spoof_name_loader):
# use this API to randomly trim the input length
input_data = nii_wav_aug.batch_pad_for_multiview(
[input_data], wav_samp_rate, length, random_trim_nosil=False)[0]
return input_data
if __name__ == "__main__":
print("Tools for data augmentation")
| 4,445 | 31.452555 | 78 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_modules/a_softmax.py | #!/usr/bin/env python
"""
a_softmax layers
copied from https://github.com/Joyako/SphereFace-pytorch
"""
from __future__ import print_function
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_f
from torch.nn import Parameter
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
###################
class AngleLayer(torch_nn.Module):
""" Output layer to produce activation for Angular softmax layer
AngleLayer(in_dim, output_dim, m=4):
in_dim: dimension of input feature vectors
output_dim: dimension of output feature vectors
(i.e., number of classes)
m: angular-softmax paramter
Method: (|x|cos, phi) = forward(x)
x: (batchsize, input_dim)
cos: (batchsize, output_dim)
phi: (batchsize, output_dim)
Note:
cos[i, j]: cos(\theta) where \theta is the angle between
input feature vector x[i, :] and weight vector w[j, :]
phi[i, j]: -1^k cos(m \theta) - 2k
"""
def __init__(self, in_planes, out_planes, m=4):
super(AngleLayer, self).__init__()
self.in_planes = in_planes
self.out_planes = out_planes
self.weight = Parameter(torch.Tensor(in_planes, out_planes))
self.weight.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5)
self.m = m
# cos(m \theta) = f(cos(\theta))
self.cos_val = [
lambda x: x**0,
lambda x: x**1,
lambda x: 2*x**2-1,
lambda x: 4*x**3-3*x,
lambda x: 8*x**4-8*x**2+1,
lambda x: 16*x**5-20*x**3+5*x,
]
def forward(self, input, flag_angle_only=False):
"""
Compute a-softmax activations
input:
------
input tensor (batchsize, input_dim)
flag_angle_only: true: return cos(\theta), phi(\theta)
false: return |x|cos(\theta), |x|phi(\theta)
default: false
output:
-------
tuple of tensor ((batchsize, output_dim), (batchsize, output_dim))
"""
# w (feature_dim, output_dim)
w = self.weight.renorm(2, 1, 1e-5).mul(1e5)
# x_modulus (batchsize)
# sum input -> x_modules in shape (batchsize)
x_modulus = input.pow(2).sum(1).pow(0.5)
# w_modules (output_dim)
# w_moduls should be 1, since w has been normalized
w_modulus = w.pow(2).sum(0).pow(0.5)
# W * x = ||W|| * ||x|| * cos())))))))
# inner_wx (batchsize, output_dim)
inner_wx = input.mm(w)
# cos_theta (batchsize, output_dim)
cos_theta = inner_wx / x_modulus.view(-1, 1) \
/ w_modulus.view(1, -1)
cos_theta = cos_theta.clamp(-1, 1)
# cos(m \theta)
cos_m_theta = self.cos_val[self.m](cos_theta)
with torch.no_grad():
# theta (batchsie, output_dim)
theta = cos_theta.acos()
# k is deterministic here
# k * pi / m <= theta <= (k + 1) * pi / m
k = (self.m * theta / 3.14159265).floor()
minus_one = k * 0.0 - 1
# phi_theta (batchsize, output_dim)
# Phi(yi, i) = (-1)**k * cos(myi,i) - 2 * k
phi_theta = (minus_one ** k) * cos_m_theta - 2 * k
if flag_angle_only:
cos_x = cos_theta
phi_x = phi_theta
else:
cos_x = cos_theta * x_modulus.view(-1, 1)
phi_x = phi_theta * x_modulus.view(-1, 1)
# ((batchsie, output_dim), (batchsie, output_dim))
return cos_x, phi_x
class AngularSoftmaxWithLoss(torch_nn.Module):
"""
AngularSoftmaxWithLoss()
This is a loss function.
Method:
loss = forward(input, target)
input: a pair of cos(\theta) and phi(\theta),
calculated by AngularLinear
cos(\theta) and phi(\theta) shape: (batchsize, class_num)
target: target labels (batchsize)
"""
def __init__(self, gamma=0):
super(AngularSoftmaxWithLoss, self).__init__()
self.gamma = gamma
self.iter = 0
self.lambda_min = 5.0
self.lambda_max = 1500.0
self.lamb = 1500.0
def forward(self, input, target):
"""
"""
self.iter += 1
# target (batchsize, 1)
target = target.long().view(-1, 1)
with torch.no_grad():
index = torch.zeros_like(input[0])
# index[i][target[i][j]] = 1
index.scatter_(1, target.data.view(-1, 1), 1)
index = index.bool()
# output (batchsize, output_dim)
# Tricks
# output(\theta_yi)
# = (lambda*cos(\theta_yi) + ((-1)**k * cos(m * \theta_yi) - 2*k))
# /(1 + lambda)
# = cos(\theta_yi)
# - cos(\theta_yi) / (1 + lambda) + Phi(\theta_yi) / (1 + lambda)
self.lamb = max(self.lambda_min,
self.lambda_max / (1 + 0.1 * self.iter))
output = input[0] * 1.0
output[index] -= input[0][index] * 1.0 / (1 + self.lamb)
output[index] += input[1][index] * 1.0 / (1 + self.lamb)
# softmax loss
logit = torch_f.log_softmax(output, dim=1)
# select the ones specified by target
logit = logit.gather(1, target).view(-1)
# additional
pt = logit.data.exp()
loss = -1 * (1 - pt) ** self.gamma * logit
loss = loss.mean()
return loss
if __name__ == "__main__":
print("Definition of A-softmax loss")
| 5,663 | 30.120879 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_modules/p2sgrad.py | #!/usr/bin/env python
"""
P2sGrad:
Zhang, X. et al. P2sgrad: Refined gradients for optimizing deep face models.
in Proc. CVPR 9906-9914, 2019
I think the grad defined in Eq.(11) is equivalent to define a MSE loss with 0
or 1 as target:
\mathcal{L}_i = \sum_{j=1}^{K} (\cos\theta_{i,j} - \delta(j == y_i))^2
The difference from a common MSE is that the network output is cos angle.
"""
from __future__ import print_function
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_f
from torch.nn import Parameter
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
###################
class P2SActivationLayer(torch_nn.Module):
""" Output layer that produces cos\theta between activation vector x
and class vector w_j
in_dim: dimension of input feature vectors
output_dim: dimension of output feature vectors
(i.e., number of classes)
Usage example:
batchsize = 64
input_dim = 10
class_num = 5
l_layer = P2SActivationLayer(input_dim, class_num)
l_loss = P2SGradLoss()
data = torch.rand(batchsize, input_dim, requires_grad=True)
target = (torch.rand(batchsize) * class_num).clamp(0, class_num-1)
target = target.to(torch.long)
scores = l_layer(data)
loss = l_loss(scores, target)
loss.backward()
"""
def __init__(self, in_dim, out_dim):
super(P2SActivationLayer, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.weight = Parameter(torch.Tensor(in_dim, out_dim))
self.weight.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5)
return
def forward(self, input_feat):
"""
Compute P2sgrad activation
input:
------
input_feat: tensor (batchsize, input_dim)
output:
-------
tensor (batchsize, output_dim)
"""
# normalize the weight (again)
# w (feature_dim, output_dim)
w = self.weight.renorm(2, 1, 1e-5).mul(1e5)
# normalize the input feature vector
# x_modulus (batchsize)
# sum input -> x_modules in shape (batchsize)
x_modulus = input_feat.pow(2).sum(1).pow(0.5)
# w_modules (output_dim)
# w_moduls should be 1, since w has been normalized
w_modulus = w.pow(2).sum(0).pow(0.5)
# W * x = ||W|| * ||x|| * cos())))))))
# inner_wx (batchsize, output_dim)
inner_wx = input_feat.mm(w)
# cos_theta (batchsize, output_dim)
cos_theta = inner_wx / x_modulus.view(-1, 1)
cos_theta = cos_theta.clamp(-1, 1)
# done
return cos_theta
class P2SGradLoss(torch_nn.Module):
"""P2SGradLoss() MSE loss between output and target one-hot vectors
See usage in __doc__ of P2SActivationLayer
"""
def __init__(self):
super(P2SGradLoss, self).__init__()
self.m_loss = torch_nn.MSELoss()
def forward(self, input_score, target):
"""
input
-----
input_score: tensor (batchsize, class_num)
cos\theta given by P2SActivationLayer(input_feat)
target: tensor (batchsize)
target[i] is the target class index of the i-th sample
output
------
loss: scaler
"""
# target (batchsize, 1)
target = target.long() #.view(-1, 1)
# filling in the target
# index (batchsize, class_num)
with torch.no_grad():
index = torch.zeros_like(input_score)
# index[i][target[i][j]] = 1
index.scatter_(1, target.data.view(-1, 1), 1)
# MSE between \cos\theta and one-hot vectors
loss = self.m_loss(input_score, index)
return loss
if __name__ == "__main__":
print("Definition of P2SGrad Loss")
| 3,964 | 27.52518 | 77 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_modules/am_softmax.py | #!/usr/bin/env python
"""
additive margin softmax layers
Wang, F., Cheng, J., Liu, W. & Liu, H.
Additive margin softmax for face verification. IEEE Signal Process. Lett. 2018
"""
from __future__ import print_function
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_f
from torch.nn import Parameter
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
###################
class AMAngleLayer(torch_nn.Module):
""" Output layer to produce activation for Angular softmax layer
AMAngleLayer(in_dim, output_dim, s=20, m=0.9):
in_dim: dimension of input feature vectors
output_dim: dimension of output feature vectors
(i.e., number of classes)
s: scaler
m: margin
Method: (|x|cos, phi) = forward(x)
x: (batchsize, input_dim)
cos: (batchsize, output_dim)
phi: (batchsize, output_dim)
Note:
cos[i, j]: cos(\theta) where \theta is the angle between
input feature vector x[i, :] and weight vector w[j, :]
phi[i, j]: -1^k cos(m \theta) - 2k
Usage example:
batchsize = 64
input_dim = 10
class_num = 2
l_layer = AMAngleLayer(input_dim, class_num)
l_loss = AMSoftmaxWithLoss()
data = torch.rand(batchsize, input_dim, requires_grad=True)
target = (torch.rand(batchsize) * class_num).clamp(0, class_num-1)
target = target.to(torch.long)
scores = l_layer(data)
loss = l_loss(scores, target)
loss.backward()
"""
def __init__(self, in_planes, out_planes, s=20, m=0.9):
super(AMAngleLayer, self).__init__()
self.in_planes = in_planes
self.out_planes = out_planes
self.weight = Parameter(torch.Tensor(in_planes, out_planes))
self.weight.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5)
self.m = m
self.s = s
def forward(self, input, flag_angle_only=False):
"""
Compute am-softmax activations
input:
------
input tensor (batchsize, input_dim)
flag_angle_only: true: return cos(\theta), phi(\theta)
false: return |x|cos(\theta), |x|phi(\theta)
default: false
output:
-------
tuple of tensor ((batchsize, output_dim), (batchsize, output_dim))
"""
# w (feature_dim, output_dim)
w = self.weight.renorm(2, 1, 1e-5).mul(1e5)
# x_modulus (batchsize)
# sum input -> x_modules in shape (batchsize)
x_modulus = input.pow(2).sum(1).pow(0.5)
# w_modules (output_dim)
# w_moduls should be 1, since w has been normalized
w_modulus = w.pow(2).sum(0).pow(0.5)
# W * x = ||W|| * ||x|| * cos())))))))
# inner_wx (batchsize, output_dim)
inner_wx = input.mm(w)
# cos_theta (batchsize, output_dim)
cos_theta = inner_wx / x_modulus.view(-1, 1)
cos_theta = cos_theta.clamp(-1, 1)
if flag_angle_only:
cos_x = cos_theta
phi_x = cos_theta
else:
cos_x = self.s * cos_theta
phi_x = self.s * (cos_theta - self.m)
# ((batchsie, output_dim), (batchsie, output_dim))
return cos_x, phi_x
class AMSoftmaxWithLoss(torch_nn.Module):
"""
AMSoftmaxWithLoss()
See usage in __doc__ of AMAngleLayer
"""
def __init__(self):
super(AMSoftmaxWithLoss, self).__init__()
self.m_loss = torch_nn.CrossEntropyLoss()
def forward(self, input, target):
"""
input:
------
input: tuple of tensors ((batchsie, out_dim), (batchsie, out_dim))
output from AMAngleLayer
target: tensor (batchsize)
tensor of target index
output:
------
loss: scalar
"""
# target (batchsize)
target = target.long() #.view(-1, 1)
# create an index matrix, i.e., one-hot vectors
with torch.no_grad():
index = torch.zeros_like(input[0])
# index[i][target[i][j]] = 1
index.scatter_(1, target.data.view(-1, 1), 1)
index = index.bool()
# use the one-hot vector as index to select
# input[0] -> cos
# input[1] -> phi
# if target_i = j, ouput[i][j] = phi[i][j], otherwise cos[i][j]
#
output = input[0] * 1.0
output[index] -= input[0][index] * 1.0
output[index] += input[1][index] * 1.0
# cross entropy loss
loss = self.m_loss(output, target)
return loss
if __name__ == "__main__":
print("Definition of Am-softmax loss")
| 4,893 | 28.305389 | 78 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_modules/grad_rev.py | #!/usr/bin/env python
"""
grad_rev.py
Definition of gradient reverse layer
Copied from https://cyberagent.ai/blog/research/11863/
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#
class GradientReversalFunction(torch.autograd.Function):
""" https://cyberagent.ai/blog/research/11863/
"""
@staticmethod
def forward(ctx, x, scale):
ctx.save_for_backward(scale)
return x
@staticmethod
def backward(ctx, grad):
scale, = ctx.saved_tensors
return scale * -grad, None
class GradientReversal(torch_nn.Module):
""" https://cyberagent.ai/blog/research/11863/
"""
def __init__(self, scale: float):
super(GradientReversal, self).__init__()
self.scale = torch.tensor(scale)
def forward(self, x):
return GradientReversalFunction.apply(x, self.scale)
| 1,078 | 22.977778 | 60 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_modules/oc_softmax.py | #!/usr/bin/env python
"""
one class
One-class learning towards generalized voice spoofing detection
Zhang, You and Jiang, Fei and Duan, Zhiyao
arXiv preprint arXiv:2010.13995
"""
from __future__ import print_function
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_f
from torch.nn import Parameter
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
###################
class OCAngleLayer(torch_nn.Module):
""" Output layer to produce activation for one-class softmax
Usage example:
batchsize = 64
input_dim = 10
class_num = 2
l_layer = OCAngleLayer(input_dim)
l_loss = OCSoftmaxWithLoss()
data = torch.rand(batchsize, input_dim, requires_grad=True)
target = (torch.rand(batchsize) * class_num).clamp(0, class_num-1)
target = target.to(torch.long)
scores = l_layer(data)
loss = l_loss(scores, target)
loss.backward()
"""
def __init__(self, in_planes, w_posi=0.9, w_nega=0.2, alpha=20.0):
super(OCAngleLayer, self).__init__()
self.in_planes = in_planes
self.w_posi = w_posi
self.w_nega = w_nega
self.out_planes = 1
self.weight = Parameter(torch.Tensor(in_planes, self.out_planes))
#self.weight.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5)
torch_nn.init.kaiming_uniform_(self.weight, 0.25)
self.weight.data.renorm_(2,1,1e-5).mul_(1e5)
self.alpha = alpha
def forward(self, input, flag_angle_only=False):
"""
Compute oc-softmax activations
input:
------
input tensor (batchsize, input_dim)
output:
-------
tuple of tensor ((batchsize, output_dim), (batchsize, output_dim))
"""
# w (feature_dim, output_dim)
w = self.weight.renorm(2, 1, 1e-5).mul(1e5)
# x_modulus (batchsize)
# sum input -> x_modules in shape (batchsize)
x_modulus = input.pow(2).sum(1).pow(0.5)
# w_modules (output_dim)
# w_moduls should be 1, since w has been normalized
# w_modulus = w.pow(2).sum(0).pow(0.5)
# W * x = ||W|| * ||x|| * cos())))))))
# inner_wx (batchsize, 1)
inner_wx = input.mm(w)
# cos_theta (batchsize, output_dim)
cos_theta = inner_wx / x_modulus.view(-1, 1)
cos_theta = cos_theta.clamp(-1, 1)
if flag_angle_only:
pos_score = cos_theta
neg_score = cos_theta
else:
pos_score = self.alpha * (self.w_posi - cos_theta)
neg_score = -1 * self.alpha * (self.w_nega - cos_theta)
#
return pos_score, neg_score
class OCSoftmaxWithLoss(torch_nn.Module):
"""
OCSoftmaxWithLoss()
"""
def __init__(self):
super(OCSoftmaxWithLoss, self).__init__()
self.m_loss = torch_nn.Softplus()
def forward(self, inputs, target):
"""
input:
------
input: tuple of tensors ((batchsie, out_dim), (batchsie, out_dim))
output from OCAngle
inputs[0]: positive class score
inputs[1]: negative class score
target: tensor (batchsize)
tensor of target index
output:
------
loss: scalar
"""
# Assume target is binary, positive = 1, negaitve = 0
#
# Equivalent to select the scores using if-elese
# if target = 1, use inputs[0]
# else, use inputs[1]
output = inputs[0] * target.view(-1, 1) + \
inputs[1] * (1-target.view(-1, 1))
loss = self.m_loss(output).mean()
return loss
if __name__ == "__main__":
print("Definition of Am-softmax loss")
| 3,886 | 28.007463 | 76 | py |
matscipy | matscipy-master/matscipy/cli/electrochemistry/pnp.py | #!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
# ======================================================================
# matscipy - Python materials science tools
# https://github.com/libAtoms/matscipy
#
# Copyright (2019) Johannes Hoermann, University of Freiburg
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ======================================================================
"""Solves 1D Poisson-Nernst-Planck system
Copyright 2019 IMTEK Simulation
University of Freiburg
Authors:
Johannes Hoermann <[email protected]>
"""
import argparse
import datetime
import logging
import os
import sys
import numpy as np
def main():
"""Solve Poisson-Nernst-Planck system and store distribution.
Specify quantities in SI units at this command line interface."""
# in order to have both:
# * preformatted help text and ...
# * automatic display of defaults
class ArgumentDefaultsAndRawDescriptionHelpFormatter(
argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=ArgumentDefaultsAndRawDescriptionHelpFormatter)
parser.add_argument('outfile', metavar='OUT', default=None, nargs='?',
help='binary numpy .npz or plain text .txt output file')
# physical system parameters
parser.add_argument('--concentrations', '-c',
default=[0.1, 0.1], type=float, nargs='+',
metavar='c', required=False, dest="concentrations",
help='Ion species concentrations c (mol m^-3, mM)')
parser.add_argument('--charges', '-z',
default=[1, -1], type=float, nargs='+',
metavar='z', required=False, dest="charges",
help='Ion species number charges z')
parser.add_argument('--potential', '-u',
default=0.05, type=float,
metavar='U', required=False, dest="potential",
help='Potential drop from left to right dU (V)')
parser.add_argument('--length', '-l',
default=100.0e-9, type=float,
metavar='L', required=False, dest="length",
help='Domain length (m)')
parser.add_argument('--temperature', '-T',
default=298.15, type=float,
metavar='T', required=False, dest="temperature",
help='Temperature (K)')
parser.add_argument('--relative-permittivity', '--epsilon-r', '--eps',
default=79.0, type=float,
metavar='eps', required=False,
dest="relative_permittivity",
help='Relative permittivity')
parser.add_argument('--compact-layer', '--stern-layer', '--lambda-s',
default=0.0, type=float,
metavar='L', required=False,
dest="lambda_S",
help='Stern or compact layer thickness (for Robin BC)')
parser.add_argument('--boundary-conditions', '-bc',
default='cell', type=str,
metavar='BC', required=False,
dest="boundary_conditions",
choices=(
'interface', # open half-space
'cell', # 1D electorchemical cell with zero flux BC
'cell-stern', # 1D cell with linear compact layer regime
'cell-robin', # 1D cell with implict compact layer by Robin BC
),
help='Boundary conditions')
# technical settings
parser.add_argument('--segments', '-N',
default=200, type=int,
metavar='N', required=False,
dest="segments",
help='Number of discretization segments')
parser.add_argument('--maximum-iterations', '--maxit',
default=20, type=int,
metavar='N', required=False,
dest="maxit",
help='Maximum number of Newton iterations')
parser.add_argument('--absolute-tolerance',
default=1e-8, type=float,
metavar='e', required=False,
dest="absolute_tolerance",
help='Absolute tolerance Newton solver convergence criterion')
parser.add_argument('--convergence-stats', default=False, required=False,
action='store_true', dest="convergence",
help='Record and store Newton solver convergence statistics')
parser.add_argument('--debug', default=False, required=False,
action='store_true', dest="debug", help='debug flag')
parser.add_argument('--verbose', default=False, required=False,
action='store_true', dest="verbose", help='verbose flag')
parser.add_argument('--log', required=False, nargs='?', dest="log",
default=None, const='pnp.log', metavar='LOG',
help='Write log file pnp.log, optionally specify log file name')
try:
import argcomplete
argcomplete.autocomplete(parser)
# This supports bash autocompletion. To enable this, pip install
# argcomplete, activate global completion, or add
# eval "$(register-python-argcomplete lpad)"
# into your .bash_profile or .bashrc
except ImportError:
pass
args = parser.parse_args()
if args.debug:
loglevel = logging.DEBUG
elif args.verbose:
loglevel = logging.INFO
else:
loglevel = logging.WARNING
# PoissonNernstPlanckSystem makes extensive use of Python's logging module
logformat = "[ %(filename)s:%(lineno)s - %(funcName)s() ]: %(message)s"
logging.basicConfig(level=loglevel,
format=logformat)
# explicitly modify the root logger (necessary?)
logger = logging.getLogger()
logger.setLevel(loglevel)
# remove all handlers
for h in logger.handlers:
logger.removeHandler(h)
# create and append custom handles
ch = logging.StreamHandler()
formatter = logging.Formatter(logformat)
ch.setFormatter(formatter)
ch.setLevel(loglevel)
logger.addHandler(ch)
if args.log:
fh = logging.FileHandler(args.log)
fh.setFormatter(formatter)
fh.setLevel(loglevel)
logger.addHandler(fh)
# use FEniCS finite element solver if available,
# otherwise own controlled volume scheme
try:
import fenics
from matscipy.electrochemistry.poisson_nernst_planck_solver_fenics \
import PoissonNernstPlanckSystemFEniCS as PoissonNernstPlanckSystem
import dolfin
dolfin.cpp.log.set_log_level(loglevel)
logger.info("Will use FEniCS finite element solver.")
except ModuleNotFoundError:
logger.warning(
"No FEniCS finite element solver found,"
" falling back to internal controlled-volume implementation."
" ATTENTION: Number conservation not exact.")
from matscipy.electrochemistry import PoissonNernstPlanckSystem
# set up system
pnp = PoissonNernstPlanckSystem(
c=np.array(args.concentrations, dtype=float),
z=np.array(args.charges, dtype=float),
L=float(args.length),
T=float(args.temperature),
delta_u=float(args.potential),
lambda_S=float(args.lambda_S),
N=args.segments,
maxit=args.maxit,
e=args.absolute_tolerance,
relative_permittivity=float(args.relative_permittivity))
if args.boundary_conditions in ('cell-robin') and float(args.lambda_S) > 0:
pnp.useSternLayerCellBC()
elif ((args.boundary_conditions in ('cell-robin') and float(args.lambda_S) == 0)
or args.boundary_conditions == 'cell'):
pnp.useStandardCellBC()
elif args.boundary_conditions == 'interface':
pnp.useStandardInterfaceBC()
else:
raise ValueError("Boundary conditions '{}' not implemented!".format(
args.boundary_conditions))
pnp.solve()
extra_kwargs = {}
if args.convergence:
extra_kwargs.update({
'convergence_step_absolute': pnp.convergenceStepAbsolute,
'convergence_step_relative': pnp.convergenceStepRelative,
'convergence_residual_absolute': pnp.convergenceResidualAbsolute})
if not args.outfile:
outfile = sys.stdout
format = 'txt'
else:
outfile = args.outfile
_, format = os.path.splitext(outfile)
if format == '.npz':
np.savez(file=outfile,
x=pnp.grid, u=pnp.potential, c=pnp.concentration, **extra_kwargs)
else: # elif format == '.txt'
comment = '\n'.join((
'Poisson-Nernst-Planck system, generated on {:s}, {:s}'.format(
os.uname().nodename,
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
'All quantities in SI units.',
'grid x (m), potential u (V), concentration c (mM)'))
header = comment + '\n' + '{:20s} {:22s} '.format('x', 'u') + ' '.join(
['{:22s}'.format('c{:02d}'.format(k)) for k in range(pnp.M)])
data = np.column_stack([pnp.grid, pnp.potential, pnp.concentration.T])
np.savetxt(outfile, data, fmt='%22.15e', header=header)
# write out final state as usual, but mark process failed if not converged
# if not pnp.converged:
# sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()
| 10,487 | 38.577358 | 91 | py |
matscipy | matscipy-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# matscipy documentation build configuration file, created by
# sphinx-quickstart on Sun May 17 16:40:20 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# Fix for readthedocs which does not support numpy
#import mock
#
#MOCK_MODULES = ['numpy', 'numpy.linalg']
#for mod_name in MOCK_MODULES:
# sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'myst_nb',
'numpydoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'matscipy'
copyright = u'2015, James Kermode, Lars Pastewka'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'devel'
# The full version, including alpha/beta/rc tags.
release = 'devel'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'matscipydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'matscipy.tex', u'matscipy Documentation',
u'James Kermode, Lars Pastewka', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'matscipy', u'matscipy Documentation',
[u'James Kermode, Lars Pastewka'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'matscipy', u'matscipy Documentation',
u'James Kermode, Lars Pastewka', 'matscipy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Extension configuration -------------------------------------------------
autodoc_default_options = {
'members': True,
'member-order': 'bysource',
'undoc-members': True,
'show-inheritance': True,
'inherited-members': True,
'special-members': '__init__',
}
myst_enable_extensions = [
"dollarmath"]
| 8,929 | 29.793103 | 83 | py |
EfficientDet | EfficientDet-master/initializers.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# import keras
from tensorflow import keras
import numpy as np
import math
class PriorProbability(keras.initializers.Initializer):
""" Apply a prior probability to the weights.
"""
def __init__(self, probability=0.01):
self.probability = probability
def get_config(self):
return {
'probability': self.probability
}
def __call__(self, shape, dtype=None):
# set bias to -log((1 - p)/p) for foreground
result = np.ones(shape, dtype=np.float32) * -math.log((1 - self.probability) / self.probability)
return result
| 1,177 | 27.731707 | 104 | py |
EfficientDet | EfficientDet-master/losses.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# import keras
from tensorflow import keras
import tensorflow as tf
def focal(alpha=0.25, gamma=1.5):
"""
Create a functor for computing the focal loss.
Args
alpha: Scale the focal weight with alpha.
gamma: Take the power of the focal weight with gamma.
Returns
A functor that computes the focal loss using the alpha and gamma.
"""
def _focal(y_true, y_pred):
"""
Compute the focal loss given the target tensor and the predicted tensor.
As defined in https://arxiv.org/abs/1708.02002
Args
y_true: Tensor of target data from the generator with shape (B, N, num_classes).
y_pred: Tensor of predicted data from the network with shape (B, N, num_classes).
Returns
The focal loss of y_pred w.r.t. y_true.
"""
labels = y_true[:, :, :-1]
# -1 for ignore, 0 for background, 1 for object
anchor_state = y_true[:, :, -1]
classification = y_pred
# filter out "ignore" anchors
indices = tf.where(keras.backend.not_equal(anchor_state, -1))
labels = tf.gather_nd(labels, indices)
classification = tf.gather_nd(classification, indices)
# compute the focal loss
alpha_factor = keras.backend.ones_like(labels) * alpha
alpha_factor = tf.where(keras.backend.equal(labels, 1), alpha_factor, 1 - alpha_factor)
# (1 - 0.99) ** 2 = 1e-4, (1 - 0.9) ** 2 = 1e-2
focal_weight = tf.where(keras.backend.equal(labels, 1), 1 - classification, classification)
focal_weight = alpha_factor * focal_weight ** gamma
cls_loss = focal_weight * keras.backend.binary_crossentropy(labels, classification)
# compute the normalizer: the number of positive anchors
normalizer = tf.where(keras.backend.equal(anchor_state, 1))
normalizer = keras.backend.cast(keras.backend.shape(normalizer)[0], keras.backend.floatx())
normalizer = keras.backend.maximum(keras.backend.cast_to_floatx(1.0), normalizer)
return keras.backend.sum(cls_loss) / normalizer
return _focal
def smooth_l1(sigma=3.0):
"""
Create a smooth L1 loss functor.
Args
sigma: This argument defines the point where the loss changes from L2 to L1.
Returns
A functor for computing the smooth L1 loss given target data and predicted data.
"""
sigma_squared = sigma ** 2
def _smooth_l1(y_true, y_pred):
""" Compute the smooth L1 loss of y_pred w.r.t. y_true.
Args
y_true: Tensor from the generator of shape (B, N, 5). The last value for each box is the state of the anchor (ignore, negative, positive).
y_pred: Tensor from the network of shape (B, N, 4).
Returns
The smooth L1 loss of y_pred w.r.t. y_true.
"""
# separate target and state
regression = y_pred
regression_target = y_true[:, :, :-1]
anchor_state = y_true[:, :, -1]
# filter out "ignore" anchors
indices = tf.where(keras.backend.equal(anchor_state, 1))
regression = tf.gather_nd(regression, indices)
regression_target = tf.gather_nd(regression_target, indices)
# compute smooth L1 loss
# f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma
# |x| - 0.5 / sigma / sigma otherwise
regression_diff = regression - regression_target
regression_diff = keras.backend.abs(regression_diff)
regression_loss = tf.where(
keras.backend.less(regression_diff, 1.0 / sigma_squared),
0.5 * sigma_squared * keras.backend.pow(regression_diff, 2),
regression_diff - 0.5 / sigma_squared
)
# compute the normalizer: the number of positive anchors
normalizer = keras.backend.maximum(1, keras.backend.shape(indices)[0])
normalizer = keras.backend.cast(normalizer, dtype=keras.backend.floatx())
return keras.backend.sum(regression_loss) / normalizer
return _smooth_l1
def smooth_l1_quad(sigma=3.0):
"""
Create a smooth L1 loss functor.
Args
sigma: This argument defines the point where the loss changes from L2 to L1.
Returns
A functor for computing the smooth L1 loss given target data and predicted data.
"""
sigma_squared = sigma ** 2
def _smooth_l1(y_true, y_pred):
""" Compute the smooth L1 loss of y_pred w.r.t. y_true.
Args
y_true: Tensor from the generator of shape (B, N, 5). The last value for each box is the state of the anchor (ignore, negative, positive).
y_pred: Tensor from the network of shape (B, N, 4).
Returns
The smooth L1 loss of y_pred w.r.t. y_true.
"""
# separate target and state
regression = y_pred
regression = tf.concat([regression[..., :4], tf.sigmoid(regression[..., 4:9])], axis=-1)
regression_target = y_true[:, :, :-1]
anchor_state = y_true[:, :, -1]
# filter out "ignore" anchors
indices = tf.where(keras.backend.equal(anchor_state, 1))
regression = tf.gather_nd(regression, indices)
regression_target = tf.gather_nd(regression_target, indices)
# compute smooth L1 loss
# f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma
# |x| - 0.5 / sigma / sigma otherwise
regression_diff = regression - regression_target
regression_diff = keras.backend.abs(regression_diff)
box_regression_loss = tf.where(
keras.backend.less(regression_diff[..., :4], 1.0 / sigma_squared),
0.5 * sigma_squared * keras.backend.pow(regression_diff[..., :4], 2),
regression_diff[..., :4] - 0.5 / sigma_squared
)
alpha_regression_loss = tf.where(
keras.backend.less(regression_diff[..., 4:8], 1.0 / sigma_squared),
0.5 * sigma_squared * keras.backend.pow(regression_diff[..., 4:8], 2),
regression_diff[..., 4:8] - 0.5 / sigma_squared
)
ratio_regression_loss = tf.where(
keras.backend.less(regression_diff[..., 8], 1.0 / sigma_squared),
0.5 * sigma_squared * keras.backend.pow(regression_diff[..., 8], 2),
regression_diff[..., 8] - 0.5 / sigma_squared
)
# compute the normalizer: the number of positive anchors
normalizer = keras.backend.maximum(1, keras.backend.shape(indices)[0])
normalizer = keras.backend.cast(normalizer, dtype=keras.backend.floatx())
box_regression_loss = tf.reduce_sum(box_regression_loss) / normalizer
alpha_regression_loss = tf.reduce_sum(alpha_regression_loss) / normalizer
ratio_regression_loss = tf.reduce_sum(ratio_regression_loss) / normalizer
return box_regression_loss + alpha_regression_loss + 16 * ratio_regression_loss
return _smooth_l1
| 7,520 | 39.005319 | 150 | py |
EfficientDet | EfficientDet-master/callbacks.py | from tensorflow.keras.callbacks import Callback
import tensorflow.keras.backend as K
import numpy as np
class CosineAnnealingScheduler(Callback):
def __init__(self, cycle_iterations, min_lr, t_mu=2, start_iteration=0):
self.iteration_id = 0
self.start_iteration = start_iteration
self.cycle_iteration_id = 0
self.lrs = []
self.min_lr = min_lr
self.cycle_iterations = cycle_iterations
self.t_mu = t_mu
super(CosineAnnealingScheduler, self).__init__()
def on_batch_end(self, batch, logs):
if self.iteration_id > self.start_iteration:
# (1, 0)
cosine_decay = 0.5 * (1 + np.cos(np.pi * (self.cycle_iteration_id / self.cycle_iterations)))
decayed_lr = (self.max_lr - self.min_lr) * cosine_decay + self.min_lr
K.set_value(self.model.optimizer.lr, decayed_lr)
if self.cycle_iteration_id == self.cycle_iterations:
self.cycle_iteration_id = 0
self.cycle_iterations = int(self.cycle_iterations * self.t_mu)
else:
self.cycle_iteration_id = self.cycle_iteration_id + 1
self.lrs.append(decayed_lr)
elif self.iteration_id == self.start_iteration:
self.max_lr = K.get_value(self.model.optimizer.lr)
self.iteration_id += 1
def on_train_begin(self, logs={}):
self.max_lr = K.get_value(self.model.optimizer.lr)
class ExponentialScheduler(Callback):
def __init__(self, min_lr, max_lr, iterations):
self.factor = np.exp(np.log(max_lr / min_lr) / iterations)
self.min_lr = min_lr
self.max_lr = max_lr
# debug
self.lrs = []
self.losses = []
def on_batch_end(self, batch, logs):
lr = K.get_value(self.model.optimizer.lr)
self.lrs.append(lr)
self.losses.append(logs["loss"])
K.set_value(self.model.optimizer.lr, lr * self.factor)
def on_train_begin(self, logs={}):
K.set_value(self.model.optimizer.lr, self.min_lr)
class LinearWarmUpScheduler(Callback):
def __init__(self, iterations, min_lr):
self.iterations = iterations
self.min_lr = min_lr
self.iteration_id = 0
# debug
self.lrs = []
def on_batch_begin(self, batch, logs):
if self.iteration_id < self.iterations:
lr = (self.max_lr - self.min_lr) / self.iterations * (self.iteration_id + 1) + self.min_lr
K.set_value(self.model.optimizer.lr, lr)
self.iteration_id += 1
self.lrs.append(K.get_value(self.model.optimizer.lr))
def on_train_begin(self, logs={}):
self.max_lr = K.get_value(self.model.optimizer.lr)
K.set_value(self.model.optimizer.lr, self.min_lr)
self.lrs.append(K.get_value(self.model.optimizer.lr))
| 2,830 | 36.746667 | 104 | py |
EfficientDet | EfficientDet-master/efficientnet.py | # Copyright 2019 The TensorFlow Authors, Pavel Yakubovskiy, Björn Barz. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for EfficientNet model.
[1] Mingxing Tan, Quoc V. Le
EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks.
ICML'19, https://arxiv.org/abs/1905.11946
"""
# Code of this model implementation is mostly written by
# Björn Barz ([@Callidior](https://github.com/Callidior))
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import math
import string
import collections
import numpy as np
from six.moves import xrange
from keras_applications.imagenet_utils import _obtain_input_shape
from keras_applications.imagenet_utils import decode_predictions
from keras_applications.imagenet_utils import preprocess_input as _preprocess_input
from utils import get_submodules_from_kwargs
from layers import BatchNormalization
backend = None
layers = None
models = None
keras_utils = None
BASE_WEIGHTS_PATH = (
'https://github.com/Callidior/keras-applications/'
'releases/download/efficientnet/')
WEIGHTS_HASHES = {
'efficientnet-b0': ('163292582f1c6eaca8e7dc7b51b01c61'
'5b0dbc0039699b4dcd0b975cc21533dc',
'c1421ad80a9fc67c2cc4000f666aa507'
'89ce39eedb4e06d531b0c593890ccff3'),
'efficientnet-b1': ('d0a71ddf51ef7a0ca425bab32b7fa7f1'
'6043ee598ecee73fc674d9560c8f09b0',
'75de265d03ac52fa74f2f510455ba64f'
'9c7c5fd96dc923cd4bfefa3d680c4b68'),
'efficientnet-b2': ('bb5451507a6418a574534aa76a91b106'
'f6b605f3b5dde0b21055694319853086',
'433b60584fafba1ea3de07443b74cfd3'
'2ce004a012020b07ef69e22ba8669333'),
'efficientnet-b3': ('03f1fba367f070bd2545f081cfa7f3e7'
'6f5e1aa3b6f4db700f00552901e75ab9',
'c5d42eb6cfae8567b418ad3845cfd63a'
'a48b87f1bd5df8658a49375a9f3135c7'),
'efficientnet-b4': ('98852de93f74d9833c8640474b2c698d'
'b45ec60690c75b3bacb1845e907bf94f',
'7942c1407ff1feb34113995864970cd4'
'd9d91ea64877e8d9c38b6c1e0767c411'),
'efficientnet-b5': ('30172f1d45f9b8a41352d4219bf930ee'
'3339025fd26ab314a817ba8918fefc7d',
'9d197bc2bfe29165c10a2af8c2ebc675'
'07f5d70456f09e584c71b822941b1952'),
'efficientnet-b6': ('f5270466747753485a082092ac9939ca'
'a546eb3f09edca6d6fff842cad938720',
'1d0923bb038f2f8060faaf0a0449db4b'
'96549a881747b7c7678724ac79f427ed'),
'efficientnet-b7': ('876a41319980638fa597acbbf956a82d'
'10819531ff2dcb1a52277f10c7aefa1a',
'60b56ff3a8daccc8d96edfd40b204c11'
'3e51748da657afd58034d54d3cec2bac')
}
BlockArgs = collections.namedtuple('BlockArgs', [
'kernel_size', 'num_repeat', 'input_filters', 'output_filters',
'expand_ratio', 'id_skip', 'strides', 'se_ratio'
])
# defaults will be a public argument for namedtuple in Python 3.7
# https://docs.python.org/3/library/collections.html#collections.namedtuple
BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)
DEFAULT_BLOCKS_ARGS = [
BlockArgs(kernel_size=3, num_repeat=1, input_filters=32, output_filters=16,
expand_ratio=1, id_skip=True, strides=[1, 1], se_ratio=0.25),
BlockArgs(kernel_size=3, num_repeat=2, input_filters=16, output_filters=24,
expand_ratio=6, id_skip=True, strides=[2, 2], se_ratio=0.25),
BlockArgs(kernel_size=5, num_repeat=2, input_filters=24, output_filters=40,
expand_ratio=6, id_skip=True, strides=[2, 2], se_ratio=0.25),
BlockArgs(kernel_size=3, num_repeat=3, input_filters=40, output_filters=80,
expand_ratio=6, id_skip=True, strides=[2, 2], se_ratio=0.25),
BlockArgs(kernel_size=5, num_repeat=3, input_filters=80, output_filters=112,
expand_ratio=6, id_skip=True, strides=[1, 1], se_ratio=0.25),
BlockArgs(kernel_size=5, num_repeat=4, input_filters=112, output_filters=192,
expand_ratio=6, id_skip=True, strides=[2, 2], se_ratio=0.25),
BlockArgs(kernel_size=3, num_repeat=1, input_filters=192, output_filters=320,
expand_ratio=6, id_skip=True, strides=[1, 1], se_ratio=0.25)
]
CONV_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 2.0,
'mode': 'fan_out',
# EfficientNet actually uses an untruncated normal distribution for
# initializing conv layers, but keras.initializers.VarianceScaling use
# a truncated distribution.
# We decided against a custom initializer for better serializability.
'distribution': 'normal'
}
}
DENSE_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 1. / 3.,
'mode': 'fan_out',
'distribution': 'uniform'
}
}
def preprocess_input(x, **kwargs):
kwargs = {k: v for k, v in kwargs.items() if k in ['backend', 'layers', 'models', 'utils']}
return _preprocess_input(x, mode='torch', **kwargs)
def get_swish(**kwargs):
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
def swish(x):
"""Swish activation function: x * sigmoid(x).
Reference: [Searching for Activation Functions](https://arxiv.org/abs/1710.05941)
"""
if backend.backend() == 'tensorflow':
try:
# The native TF implementation has a more
# memory-efficient gradient implementation
return backend.tf.nn.swish(x)
except AttributeError:
pass
return x * backend.sigmoid(x)
return swish
def get_dropout(**kwargs):
"""Wrapper over custom dropout. Fix problem of ``None`` shape for tf.keras.
It is not possible to define FixedDropout class as global object,
because we do not have modules for inheritance at first time.
Issue:
https://github.com/tensorflow/tensorflow/issues/30946
"""
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
class FixedDropout(layers.Dropout):
def _get_noise_shape(self, inputs):
if self.noise_shape is None:
return self.noise_shape
symbolic_shape = backend.shape(inputs)
noise_shape = [symbolic_shape[axis] if shape is None else shape
for axis, shape in enumerate(self.noise_shape)]
return tuple(noise_shape)
return FixedDropout
def round_filters(filters, width_coefficient, depth_divisor):
"""Round number of filters based on width multiplier."""
filters *= width_coefficient
new_filters = int(filters + depth_divisor / 2) // depth_divisor * depth_divisor
new_filters = max(depth_divisor, new_filters)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += depth_divisor
return int(new_filters)
def round_repeats(repeats, depth_coefficient):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
def mb_conv_block(inputs, block_args, activation, drop_rate=None, prefix='', freeze_bn=False):
"""Mobile Inverted Residual Bottleneck."""
has_se = (block_args.se_ratio is not None) and (0 < block_args.se_ratio <= 1)
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
# workaround over non working dropout with None in noise_shape in tf.keras
Dropout = get_dropout(
backend=backend,
layers=layers,
models=models,
utils=keras_utils
)
# Expansion phase
filters = block_args.input_filters * block_args.expand_ratio
if block_args.expand_ratio != 1:
x = layers.Conv2D(filters, 1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=prefix + 'expand_conv')(inputs)
# x = BatchNormalization(freeze=freeze_bn, axis=bn_axis, name=prefix + 'expand_bn')(x)
x = layers.BatchNormalization(axis=bn_axis, name=prefix + 'expand_bn')(x)
x = layers.Activation(activation, name=prefix + 'expand_activation')(x)
else:
x = inputs
# Depthwise Convolution
x = layers.DepthwiseConv2D(block_args.kernel_size,
strides=block_args.strides,
padding='same',
use_bias=False,
depthwise_initializer=CONV_KERNEL_INITIALIZER,
name=prefix + 'dwconv')(x)
# x = BatchNormalization(freeze=freeze_bn, axis=bn_axis, name=prefix + 'bn')(x)
x = layers.BatchNormalization(axis=bn_axis, name=prefix + 'bn')(x)
x = layers.Activation(activation, name=prefix + 'activation')(x)
# Squeeze and Excitation phase
if has_se:
num_reduced_filters = max(1, int(
block_args.input_filters * block_args.se_ratio
))
se_tensor = layers.GlobalAveragePooling2D(name=prefix + 'se_squeeze')(x)
target_shape = (1, 1, filters) if backend.image_data_format() == 'channels_last' else (filters, 1, 1)
se_tensor = layers.Reshape(target_shape, name=prefix + 'se_reshape')(se_tensor)
se_tensor = layers.Conv2D(num_reduced_filters, 1,
activation=activation,
padding='same',
use_bias=True,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=prefix + 'se_reduce')(se_tensor)
se_tensor = layers.Conv2D(filters, 1,
activation='sigmoid',
padding='same',
use_bias=True,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=prefix + 'se_expand')(se_tensor)
if backend.backend() == 'theano':
# For the Theano backend, we have to explicitly make
# the excitation weights broadcastable.
pattern = ([True, True, True, False] if backend.image_data_format() == 'channels_last'
else [True, False, True, True])
se_tensor = layers.Lambda(
lambda x: backend.pattern_broadcast(x, pattern),
name=prefix + 'se_broadcast')(se_tensor)
x = layers.multiply([x, se_tensor], name=prefix + 'se_excite')
# Output phase
x = layers.Conv2D(block_args.output_filters, 1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=prefix + 'project_conv')(x)
# x = BatchNormalization(freeze=freeze_bn, axis=bn_axis, name=prefix + 'project_bn')(x)
x = layers.BatchNormalization(axis=bn_axis, name=prefix + 'project_bn')(x)
if block_args.id_skip and all(
s == 1 for s in block_args.strides
) and block_args.input_filters == block_args.output_filters:
if drop_rate and (drop_rate > 0):
x = Dropout(drop_rate,
noise_shape=(None, 1, 1, 1),
name=prefix + 'drop')(x)
x = layers.add([x, inputs], name=prefix + 'add')
return x
def EfficientNet(width_coefficient,
depth_coefficient,
default_resolution,
dropout_rate=0.2,
drop_connect_rate=0.2,
depth_divisor=8,
blocks_args=DEFAULT_BLOCKS_ARGS,
model_name='efficientnet',
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
freeze_bn=False,
**kwargs):
"""Instantiates the EfficientNet architecture using given scaling coefficients.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
# Arguments
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
default_resolution: int, default input image size.
dropout_rate: float, dropout rate before final classifier layer.
drop_connect_rate: float, dropout rate at skip connections.
depth_divisor: int.
blocks_args: A list of BlockArgs to construct block modules.
model_name: string, model name.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False.
It should have exactly 3 inputs channels.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
global backend, layers, models, keras_utils
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
features = []
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=default_resolution,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if backend.backend() == 'tensorflow':
from tensorflow.python.keras.backend import is_keras_tensor
else:
is_keras_tensor = backend.is_keras_tensor
if not is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
activation = get_swish(**kwargs)
# Build stem
x = img_input
x = layers.Conv2D(round_filters(32, width_coefficient, depth_divisor), 3,
strides=(2, 2),
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name='stem_conv')(x)
# x = BatchNormalization(freeze=freeze_bn, axis=bn_axis, name='stem_bn')(x)
x = layers.BatchNormalization(axis=bn_axis, name='stem_bn')(x)
x = layers.Activation(activation, name='stem_activation')(x)
# Build blocks
num_blocks_total = sum(block_args.num_repeat for block_args in blocks_args)
block_num = 0
for idx, block_args in enumerate(blocks_args):
assert block_args.num_repeat > 0
# Update block input and output filters based on depth multiplier.
block_args = block_args._replace(
input_filters=round_filters(block_args.input_filters,
width_coefficient, depth_divisor),
output_filters=round_filters(block_args.output_filters,
width_coefficient, depth_divisor),
num_repeat=round_repeats(block_args.num_repeat, depth_coefficient))
# The first block needs to take care of stride and filter size increase.
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
x = mb_conv_block(x, block_args,
activation=activation,
drop_rate=drop_rate,
prefix='block{}a_'.format(idx + 1),
freeze_bn=freeze_bn
)
block_num += 1
if block_args.num_repeat > 1:
# pylint: disable=protected-access
block_args = block_args._replace(
input_filters=block_args.output_filters, strides=[1, 1])
# pylint: enable=protected-access
for bidx in xrange(block_args.num_repeat - 1):
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
block_prefix = 'block{}{}_'.format(
idx + 1,
string.ascii_lowercase[bidx + 1]
)
x = mb_conv_block(x, block_args,
activation=activation,
drop_rate=drop_rate,
prefix=block_prefix,
freeze_bn=freeze_bn
)
block_num += 1
if idx < len(blocks_args) - 1 and blocks_args[idx + 1].strides[0] == 2:
features.append(x)
elif idx == len(blocks_args) - 1:
features.append(x)
return features
def EfficientNetB0(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.0, 1.0, 224, 0.2,
model_name='efficientnet-b0',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB1(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.0, 1.1, 240, 0.2,
model_name='efficientnet-b1',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB2(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.1, 1.2, 260, 0.3,
model_name='efficientnet-b2',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB3(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.2, 1.4, 300, 0.3,
model_name='efficientnet-b3',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB4(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.4, 1.8, 380, 0.4,
model_name='efficientnet-b4',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB5(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.6, 2.2, 456, 0.4,
model_name='efficientnet-b5',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB6(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.8, 2.6, 528, 0.5,
model_name='efficientnet-b6',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB7(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(2.0, 3.1, 600, 0.5,
model_name='efficientnet-b7',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
setattr(EfficientNetB0, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB1, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB2, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB3, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB4, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB5, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB6, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB7, '__doc__', EfficientNet.__doc__)
| 24,768 | 42.001736 | 109 | py |
EfficientDet | EfficientDet-master/model.py | from functools import reduce
# from keras import layers
# from keras import initializers
# from keras import models
# from keras_ import EfficientNetB0, EfficientNetB1, EfficientNetB2
# from keras_ import EfficientNetB3, EfficientNetB4, EfficientNetB5, EfficientNetB6
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import initializers
from tensorflow.keras import models
from tfkeras import EfficientNetB0, EfficientNetB1, EfficientNetB2
from tfkeras import EfficientNetB3, EfficientNetB4, EfficientNetB5, EfficientNetB6
from layers import ClipBoxes, RegressBoxes, FilterDetections, wBiFPNAdd, BatchNormalization
from initializers import PriorProbability
from utils.anchors import anchors_for_shape
import numpy as np
w_bifpns = [64, 88, 112, 160, 224, 288, 384]
d_bifpns = [3, 4, 5, 6, 7, 7, 8]
d_heads = [3, 3, 3, 4, 4, 4, 5]
image_sizes = [512, 640, 768, 896, 1024, 1280, 1408]
backbones = [EfficientNetB0, EfficientNetB1, EfficientNetB2,
EfficientNetB3, EfficientNetB4, EfficientNetB5, EfficientNetB6]
MOMENTUM = 0.997
EPSILON = 1e-4
def SeparableConvBlock(num_channels, kernel_size, strides, name, freeze_bn=False):
f1 = layers.SeparableConv2D(num_channels, kernel_size=kernel_size, strides=strides, padding='same',
use_bias=True, name=f'{name}/conv')
f2 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name=f'{name}/bn')
# f2 = BatchNormalization(freeze=freeze_bn, name=f'{name}/bn')
return reduce(lambda f, g: lambda *args, **kwargs: g(f(*args, **kwargs)), (f1, f2))
def ConvBlock(num_channels, kernel_size, strides, name, freeze_bn=False):
f1 = layers.Conv2D(num_channels, kernel_size=kernel_size, strides=strides, padding='same',
use_bias=True, name='{}_conv'.format(name))
f2 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name='{}_bn'.format(name))
# f2 = BatchNormalization(freeze=freeze_bn, name='{}_bn'.format(name))
f3 = layers.ReLU(name='{}_relu'.format(name))
return reduce(lambda f, g: lambda *args, **kwargs: g(f(*args, **kwargs)), (f1, f2, f3))
def build_wBiFPN(features, num_channels, id, freeze_bn=False):
if id == 0:
_, _, C3, C4, C5 = features
P3_in = C3
P4_in = C4
P5_in = C5
P6_in = layers.Conv2D(num_channels, kernel_size=1, padding='same', name='resample_p6/conv2d')(C5)
P6_in = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name='resample_p6/bn')(P6_in)
# P6_in = BatchNormalization(freeze=freeze_bn, name='resample_p6/bn')(P6_in)
P6_in = layers.MaxPooling2D(pool_size=3, strides=2, padding='same', name='resample_p6/maxpool')(P6_in)
P7_in = layers.MaxPooling2D(pool_size=3, strides=2, padding='same', name='resample_p7/maxpool')(P6_in)
P7_U = layers.UpSampling2D()(P7_in)
P6_td = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode0/add')([P6_in, P7_U])
P6_td = layers.Activation(lambda x: tf.nn.swish(x))(P6_td)
P6_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode0/op_after_combine5')(P6_td)
P5_in_1 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/conv2d')(P5_in)
P5_in_1 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/bn')(P5_in_1)
# P5_in_1 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/bn')(P5_in_1)
P6_U = layers.UpSampling2D()(P6_td)
P5_td = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode1/add')([P5_in_1, P6_U])
P5_td = layers.Activation(lambda x: tf.nn.swish(x))(P5_td)
P5_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode1/op_after_combine6')(P5_td)
P4_in_1 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/conv2d')(P4_in)
P4_in_1 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/bn')(P4_in_1)
# P4_in_1 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/bn')(P4_in_1)
P5_U = layers.UpSampling2D()(P5_td)
P4_td = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode2/add')([P4_in_1, P5_U])
P4_td = layers.Activation(lambda x: tf.nn.swish(x))(P4_td)
P4_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode2/op_after_combine7')(P4_td)
P3_in = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/conv2d')(P3_in)
P3_in = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/bn')(P3_in)
# P3_in = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/bn')(P3_in)
P4_U = layers.UpSampling2D()(P4_td)
P3_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode3/add')([P3_in, P4_U])
P3_out = layers.Activation(lambda x: tf.nn.swish(x))(P3_out)
P3_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode3/op_after_combine8')(P3_out)
P4_in_2 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/conv2d')(P4_in)
P4_in_2 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/bn')(P4_in_2)
# P4_in_2 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/bn')(P4_in_2)
P3_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P3_out)
P4_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode4/add')([P4_in_2, P4_td, P3_D])
P4_out = layers.Activation(lambda x: tf.nn.swish(x))(P4_out)
P4_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode4/op_after_combine9')(P4_out)
P5_in_2 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/conv2d')(P5_in)
P5_in_2 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/bn')(P5_in_2)
# P5_in_2 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/bn')(P5_in_2)
P4_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P4_out)
P5_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode5/add')([P5_in_2, P5_td, P4_D])
P5_out = layers.Activation(lambda x: tf.nn.swish(x))(P5_out)
P5_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode5/op_after_combine10')(P5_out)
P5_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P5_out)
P6_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode6/add')([P6_in, P6_td, P5_D])
P6_out = layers.Activation(lambda x: tf.nn.swish(x))(P6_out)
P6_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode6/op_after_combine11')(P6_out)
P6_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P6_out)
P7_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode7/add')([P7_in, P6_D])
P7_out = layers.Activation(lambda x: tf.nn.swish(x))(P7_out)
P7_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode7/op_after_combine12')(P7_out)
else:
P3_in, P4_in, P5_in, P6_in, P7_in = features
P7_U = layers.UpSampling2D()(P7_in)
P6_td = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode0/add')([P6_in, P7_U])
P6_td = layers.Activation(lambda x: tf.nn.swish(x))(P6_td)
P6_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode0/op_after_combine5')(P6_td)
P6_U = layers.UpSampling2D()(P6_td)
P5_td = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode1/add')([P5_in, P6_U])
P5_td = layers.Activation(lambda x: tf.nn.swish(x))(P5_td)
P5_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode1/op_after_combine6')(P5_td)
P5_U = layers.UpSampling2D()(P5_td)
P4_td = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode2/add')([P4_in, P5_U])
P4_td = layers.Activation(lambda x: tf.nn.swish(x))(P4_td)
P4_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode2/op_after_combine7')(P4_td)
P4_U = layers.UpSampling2D()(P4_td)
P3_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode3/add')([P3_in, P4_U])
P3_out = layers.Activation(lambda x: tf.nn.swish(x))(P3_out)
P3_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode3/op_after_combine8')(P3_out)
P3_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P3_out)
P4_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode4/add')([P4_in, P4_td, P3_D])
P4_out = layers.Activation(lambda x: tf.nn.swish(x))(P4_out)
P4_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode4/op_after_combine9')(P4_out)
P4_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P4_out)
P5_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode5/add')([P5_in, P5_td, P4_D])
P5_out = layers.Activation(lambda x: tf.nn.swish(x))(P5_out)
P5_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode5/op_after_combine10')(P5_out)
P5_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P5_out)
P6_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode6/add')([P6_in, P6_td, P5_D])
P6_out = layers.Activation(lambda x: tf.nn.swish(x))(P6_out)
P6_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode6/op_after_combine11')(P6_out)
P6_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P6_out)
P7_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode7/add')([P7_in, P6_D])
P7_out = layers.Activation(lambda x: tf.nn.swish(x))(P7_out)
P7_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode7/op_after_combine12')(P7_out)
return P3_out, P4_td, P5_td, P6_td, P7_out
def build_BiFPN(features, num_channels, id, freeze_bn=False):
if id == 0:
_, _, C3, C4, C5 = features
P3_in = C3
P4_in = C4
P5_in = C5
P6_in = layers.Conv2D(num_channels, kernel_size=1, padding='same', name='resample_p6/conv2d')(C5)
P6_in = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name='resample_p6/bn')(P6_in)
# P6_in = BatchNormalization(freeze=freeze_bn, name='resample_p6/bn')(P6_in)
P6_in = layers.MaxPooling2D(pool_size=3, strides=2, padding='same', name='resample_p6/maxpool')(P6_in)
P7_in = layers.MaxPooling2D(pool_size=3, strides=2, padding='same', name='resample_p7/maxpool')(P6_in)
P7_U = layers.UpSampling2D()(P7_in)
P6_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode0/add')([P6_in, P7_U])
P6_td = layers.Activation(lambda x: tf.nn.swish(x))(P6_td)
P6_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode0/op_after_combine5')(P6_td)
P5_in_1 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/conv2d')(P5_in)
P5_in_1 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/bn')(P5_in_1)
# P5_in_1 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/bn')(P5_in_1)
P6_U = layers.UpSampling2D()(P6_td)
P5_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode1/add')([P5_in_1, P6_U])
P5_td = layers.Activation(lambda x: tf.nn.swish(x))(P5_td)
P5_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode1/op_after_combine6')(P5_td)
P4_in_1 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/conv2d')(P4_in)
P4_in_1 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/bn')(P4_in_1)
# P4_in_1 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/bn')(P4_in_1)
P5_U = layers.UpSampling2D()(P5_td)
P4_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode2/add')([P4_in_1, P5_U])
P4_td = layers.Activation(lambda x: tf.nn.swish(x))(P4_td)
P4_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode2/op_after_combine7')(P4_td)
P3_in = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/conv2d')(P3_in)
P3_in = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/bn')(P3_in)
# P3_in = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/bn')(P3_in)
P4_U = layers.UpSampling2D()(P4_td)
P3_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode3/add')([P3_in, P4_U])
P3_out = layers.Activation(lambda x: tf.nn.swish(x))(P3_out)
P3_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode3/op_after_combine8')(P3_out)
P4_in_2 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/conv2d')(P4_in)
P4_in_2 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/bn')(P4_in_2)
# P4_in_2 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/bn')(P4_in_2)
P3_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P3_out)
P4_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode4/add')([P4_in_2, P4_td, P3_D])
P4_out = layers.Activation(lambda x: tf.nn.swish(x))(P4_out)
P4_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode4/op_after_combine9')(P4_out)
P5_in_2 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/conv2d')(P5_in)
P5_in_2 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/bn')(P5_in_2)
# P5_in_2 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/bn')(P5_in_2)
P4_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P4_out)
P5_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode5/add')([P5_in_2, P5_td, P4_D])
P5_out = layers.Activation(lambda x: tf.nn.swish(x))(P5_out)
P5_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode5/op_after_combine10')(P5_out)
P5_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P5_out)
P6_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode6/add')([P6_in, P6_td, P5_D])
P6_out = layers.Activation(lambda x: tf.nn.swish(x))(P6_out)
P6_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode6/op_after_combine11')(P6_out)
P6_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P6_out)
P7_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode7/add')([P7_in, P6_D])
P7_out = layers.Activation(lambda x: tf.nn.swish(x))(P7_out)
P7_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode7/op_after_combine12')(P7_out)
else:
P3_in, P4_in, P5_in, P6_in, P7_in = features
P7_U = layers.UpSampling2D()(P7_in)
P6_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode0/add')([P6_in, P7_U])
P6_td = layers.Activation(lambda x: tf.nn.swish(x))(P6_td)
P6_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode0/op_after_combine5')(P6_td)
P6_U = layers.UpSampling2D()(P6_td)
P5_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode1/add')([P5_in, P6_U])
P5_td = layers.Activation(lambda x: tf.nn.swish(x))(P5_td)
P5_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode1/op_after_combine6')(P5_td)
P5_U = layers.UpSampling2D()(P5_td)
P4_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode2/add')([P4_in, P5_U])
P4_td = layers.Activation(lambda x: tf.nn.swish(x))(P4_td)
P4_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode2/op_after_combine7')(P4_td)
P4_U = layers.UpSampling2D()(P4_td)
P3_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode3/add')([P3_in, P4_U])
P3_out = layers.Activation(lambda x: tf.nn.swish(x))(P3_out)
P3_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode3/op_after_combine8')(P3_out)
P3_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P3_out)
P4_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode4/add')([P4_in, P4_td, P3_D])
P4_out = layers.Activation(lambda x: tf.nn.swish(x))(P4_out)
P4_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode4/op_after_combine9')(P4_out)
P4_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P4_out)
P5_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode5/add')([P5_in, P5_td, P4_D])
P5_out = layers.Activation(lambda x: tf.nn.swish(x))(P5_out)
P5_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode5/op_after_combine10')(P5_out)
P5_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P5_out)
P6_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode6/add')([P6_in, P6_td, P5_D])
P6_out = layers.Activation(lambda x: tf.nn.swish(x))(P6_out)
P6_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode6/op_after_combine11')(P6_out)
P6_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P6_out)
P7_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode7/add')([P7_in, P6_D])
P7_out = layers.Activation(lambda x: tf.nn.swish(x))(P7_out)
P7_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode7/op_after_combine12')(P7_out)
return P3_out, P4_td, P5_td, P6_td, P7_out
class BoxNet(models.Model):
def __init__(self, width, depth, num_anchors=9, separable_conv=True, freeze_bn=False, detect_quadrangle=False, **kwargs):
super(BoxNet, self).__init__(**kwargs)
self.width = width
self.depth = depth
self.num_anchors = num_anchors
self.separable_conv = separable_conv
self.detect_quadrangle = detect_quadrangle
num_values = 9 if detect_quadrangle else 4
options = {
'kernel_size': 3,
'strides': 1,
'padding': 'same',
'bias_initializer': 'zeros',
}
if separable_conv:
kernel_initializer = {
'depthwise_initializer': initializers.VarianceScaling(),
'pointwise_initializer': initializers.VarianceScaling(),
}
options.update(kernel_initializer)
self.convs = [layers.SeparableConv2D(filters=width, name=f'{self.name}/box-{i}', **options) for i in
range(depth)]
self.head = layers.SeparableConv2D(filters=num_anchors * num_values,
name=f'{self.name}/box-predict', **options)
else:
kernel_initializer = {
'kernel_initializer': initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None)
}
options.update(kernel_initializer)
self.convs = [layers.Conv2D(filters=width, name=f'{self.name}/box-{i}', **options) for i in range(depth)]
self.head = layers.Conv2D(filters=num_anchors * num_values, name=f'{self.name}/box-predict', **options)
self.bns = [
[layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name=f'{self.name}/box-{i}-bn-{j}') for j in
range(3, 8)]
for i in range(depth)]
# self.bns = [[BatchNormalization(freeze=freeze_bn, name=f'{self.name}/box-{i}-bn-{j}') for j in range(3, 8)]
# for i in range(depth)]
self.relu = layers.Lambda(lambda x: tf.nn.swish(x))
self.reshape = layers.Reshape((-1, num_values))
self.level = 0
def call(self, inputs, **kwargs):
feature, level = inputs
for i in range(self.depth):
feature = self.convs[i](feature)
feature = self.bns[i][self.level](feature)
feature = self.relu(feature)
outputs = self.head(feature)
outputs = self.reshape(outputs)
self.level += 1
return outputs
class ClassNet(models.Model):
def __init__(self, width, depth, num_classes=20, num_anchors=9, separable_conv=True, freeze_bn=False, **kwargs):
super(ClassNet, self).__init__(**kwargs)
self.width = width
self.depth = depth
self.num_classes = num_classes
self.num_anchors = num_anchors
self.separable_conv = separable_conv
options = {
'kernel_size': 3,
'strides': 1,
'padding': 'same',
}
if self.separable_conv:
kernel_initializer = {
'depthwise_initializer': initializers.VarianceScaling(),
'pointwise_initializer': initializers.VarianceScaling(),
}
options.update(kernel_initializer)
self.convs = [layers.SeparableConv2D(filters=width, bias_initializer='zeros', name=f'{self.name}/class-{i}',
**options)
for i in range(depth)]
self.head = layers.SeparableConv2D(filters=num_classes * num_anchors,
bias_initializer=PriorProbability(probability=0.01),
name=f'{self.name}/class-predict', **options)
else:
kernel_initializer = {
'kernel_initializer': initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None)
}
options.update(kernel_initializer)
self.convs = [layers.Conv2D(filters=width, bias_initializer='zeros', name=f'{self.name}/class-{i}',
**options)
for i in range(depth)]
self.head = layers.Conv2D(filters=num_classes * num_anchors,
bias_initializer=PriorProbability(probability=0.01),
name='class-predict', **options)
self.bns = [
[layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name=f'{self.name}/class-{i}-bn-{j}') for j
in range(3, 8)]
for i in range(depth)]
# self.bns = [[BatchNormalization(freeze=freeze_bn, name=f'{self.name}/class-{i}-bn-{j}') for j in range(3, 8)]
# for i in range(depth)]
self.relu = layers.Lambda(lambda x: tf.nn.swish(x))
self.reshape = layers.Reshape((-1, num_classes))
self.activation = layers.Activation('sigmoid')
self.level = 0
def call(self, inputs, **kwargs):
feature, level = inputs
for i in range(self.depth):
feature = self.convs[i](feature)
feature = self.bns[i][self.level](feature)
feature = self.relu(feature)
outputs = self.head(feature)
outputs = self.reshape(outputs)
outputs = self.activation(outputs)
self.level += 1
return outputs
def efficientdet(phi, num_classes=20, num_anchors=9, weighted_bifpn=False, freeze_bn=False,
score_threshold=0.01, detect_quadrangle=False, anchor_parameters=None, separable_conv=True):
assert phi in range(7)
input_size = image_sizes[phi]
input_shape = (input_size, input_size, 3)
image_input = layers.Input(input_shape)
w_bifpn = w_bifpns[phi]
d_bifpn = d_bifpns[phi]
w_head = w_bifpn
d_head = d_heads[phi]
backbone_cls = backbones[phi]
features = backbone_cls(input_tensor=image_input, freeze_bn=freeze_bn)
if weighted_bifpn:
fpn_features = features
for i in range(d_bifpn):
fpn_features = build_wBiFPN(fpn_features, w_bifpn, i, freeze_bn=freeze_bn)
else:
fpn_features = features
for i in range(d_bifpn):
fpn_features = build_BiFPN(fpn_features, w_bifpn, i, freeze_bn=freeze_bn)
box_net = BoxNet(w_head, d_head, num_anchors=num_anchors, separable_conv=separable_conv, freeze_bn=freeze_bn,
detect_quadrangle=detect_quadrangle, name='box_net')
class_net = ClassNet(w_head, d_head, num_classes=num_classes, num_anchors=num_anchors,
separable_conv=separable_conv, freeze_bn=freeze_bn, name='class_net')
classification = [class_net([feature, i]) for i, feature in enumerate(fpn_features)]
classification = layers.Concatenate(axis=1, name='classification')(classification)
regression = [box_net([feature, i]) for i, feature in enumerate(fpn_features)]
regression = layers.Concatenate(axis=1, name='regression')(regression)
model = models.Model(inputs=[image_input], outputs=[classification, regression], name='efficientdet')
# apply predicted regression to anchors
anchors = anchors_for_shape((input_size, input_size), anchor_params=anchor_parameters)
anchors_input = np.expand_dims(anchors, axis=0)
boxes = RegressBoxes(name='boxes')([anchors_input, regression[..., :4]])
boxes = ClipBoxes(name='clipped_boxes')([image_input, boxes])
# filter detections (apply NMS / score threshold / select top-k)
if detect_quadrangle:
detections = FilterDetections(
name='filtered_detections',
score_threshold=score_threshold,
detect_quadrangle=True
)([boxes, classification, regression[..., 4:8], regression[..., 8]])
else:
detections = FilterDetections(
name='filtered_detections',
score_threshold=score_threshold
)([boxes, classification])
prediction_model = models.Model(inputs=[image_input], outputs=detections, name='efficientdet_p')
return model, prediction_model
if __name__ == '__main__':
x, y = efficientdet(1)
| 29,621 | 61.362105 | 125 | py |
EfficientDet | EfficientDet-master/layers.py | # import keras
from tensorflow import keras
import tensorflow as tf
class BatchNormalization(keras.layers.BatchNormalization):
"""
Identical to keras.layers.BatchNormalization, but adds the option to freeze parameters.
"""
def __init__(self, freeze, *args, **kwargs):
self.freeze = freeze
super(BatchNormalization, self).__init__(*args, **kwargs)
# set to non-trainable if freeze is true
self.trainable = not self.freeze
def call(self, inputs, training=None, **kwargs):
# return super.call, but set training
if not training:
return super(BatchNormalization, self).call(inputs, training=False)
else:
return super(BatchNormalization, self).call(inputs, training=(not self.freeze))
def get_config(self):
config = super(BatchNormalization, self).get_config()
config.update({'freeze': self.freeze})
return config
class wBiFPNAdd(keras.layers.Layer):
def __init__(self, epsilon=1e-4, **kwargs):
super(wBiFPNAdd, self).__init__(**kwargs)
self.epsilon = epsilon
def build(self, input_shape):
num_in = len(input_shape)
self.w = self.add_weight(name=self.name,
shape=(num_in,),
initializer=keras.initializers.constant(1 / num_in),
trainable=True,
dtype=tf.float32)
def call(self, inputs, **kwargs):
w = keras.activations.relu(self.w)
x = tf.reduce_sum([w[i] * inputs[i] for i in range(len(inputs))], axis=0)
x = x / (tf.reduce_sum(w) + self.epsilon)
return x
def compute_output_shape(self, input_shape):
return input_shape[0]
def get_config(self):
config = super(wBiFPNAdd, self).get_config()
config.update({
'epsilon': self.epsilon
})
return config
def bbox_transform_inv(boxes, deltas, scale_factors=None):
cxa = (boxes[..., 0] + boxes[..., 2]) / 2
cya = (boxes[..., 1] + boxes[..., 3]) / 2
wa = boxes[..., 2] - boxes[..., 0]
ha = boxes[..., 3] - boxes[..., 1]
ty, tx, th, tw = deltas[..., 0], deltas[..., 1], deltas[..., 2], deltas[..., 3]
if scale_factors:
ty *= scale_factors[0]
tx *= scale_factors[1]
th *= scale_factors[2]
tw *= scale_factors[3]
w = tf.exp(tw) * wa
h = tf.exp(th) * ha
cy = ty * ha + cya
cx = tx * wa + cxa
ymin = cy - h / 2.
xmin = cx - w / 2.
ymax = cy + h / 2.
xmax = cx + w / 2.
return tf.stack([xmin, ymin, xmax, ymax], axis=-1)
class ClipBoxes(keras.layers.Layer):
def call(self, inputs, **kwargs):
image, boxes = inputs
shape = keras.backend.cast(keras.backend.shape(image), keras.backend.floatx())
height = shape[1]
width = shape[2]
x1 = tf.clip_by_value(boxes[:, :, 0], 0, width - 1)
y1 = tf.clip_by_value(boxes[:, :, 1], 0, height - 1)
x2 = tf.clip_by_value(boxes[:, :, 2], 0, width - 1)
y2 = tf.clip_by_value(boxes[:, :, 3], 0, height - 1)
return keras.backend.stack([x1, y1, x2, y2], axis=2)
def compute_output_shape(self, input_shape):
return input_shape[1]
class RegressBoxes(keras.layers.Layer):
def __init__(self, *args, **kwargs):
super(RegressBoxes, self).__init__(*args, **kwargs)
def call(self, inputs, **kwargs):
anchors, regression = inputs
return bbox_transform_inv(anchors, regression)
def compute_output_shape(self, input_shape):
return input_shape[0]
def get_config(self):
config = super(RegressBoxes, self).get_config()
return config
def filter_detections(
boxes,
classification,
alphas=None,
ratios=None,
class_specific_filter=True,
nms=True,
score_threshold=0.01,
max_detections=100,
nms_threshold=0.5,
detect_quadrangle=False,
):
"""
Filter detections using the boxes and classification values.
Args
boxes: Tensor of shape (num_boxes, 4) containing the boxes in (x1, y1, x2, y2) format.
classification: Tensor of shape (num_boxes, num_classes) containing the classification scores.
other: List of tensors of shape (num_boxes, ...) to filter along with the boxes and classification scores.
class_specific_filter: Whether to perform filtering per class, or take the best scoring class and filter those.
nms: Flag to enable/disable non maximum suppression.
score_threshold: Threshold used to prefilter the boxes with.
max_detections: Maximum number of detections to keep.
nms_threshold: Threshold for the IoU value to determine when a box should be suppressed.
Returns
A list of [boxes, scores, labels, other[0], other[1], ...].
boxes is shaped (max_detections, 4) and contains the (x1, y1, x2, y2) of the non-suppressed boxes.
scores is shaped (max_detections,) and contains the scores of the predicted class.
labels is shaped (max_detections,) and contains the predicted label.
other[i] is shaped (max_detections, ...) and contains the filtered other[i] data.
In case there are less than max_detections detections, the tensors are padded with -1's.
"""
def _filter_detections(scores_, labels_):
# threshold based on score
# (num_score_keeps, 1)
indices_ = tf.where(keras.backend.greater(scores_, score_threshold))
if nms:
# (num_score_keeps, 4)
filtered_boxes = tf.gather_nd(boxes, indices_)
# In [4]: scores = np.array([0.1, 0.5, 0.4, 0.2, 0.7, 0.2])
# In [5]: tf.greater(scores, 0.4)
# Out[5]: <tf.Tensor: id=2, shape=(6,), dtype=bool, numpy=array([False, True, False, False, True, False])>
# In [6]: tf.where(tf.greater(scores, 0.4))
# Out[6]:
# <tf.Tensor: id=7, shape=(2, 1), dtype=int64, numpy=
# array([[1],
# [4]])>
#
# In [7]: tf.gather(scores, tf.where(tf.greater(scores, 0.4)))
# Out[7]:
# <tf.Tensor: id=15, shape=(2, 1), dtype=float64, numpy=
# array([[0.5],
# [0.7]])>
filtered_scores = keras.backend.gather(scores_, indices_)[:, 0]
# perform NMS
# filtered_boxes = tf.concat([filtered_boxes[..., 1:2], filtered_boxes[..., 0:1],
# filtered_boxes[..., 3:4], filtered_boxes[..., 2:3]], axis=-1)
nms_indices = tf.image.non_max_suppression(filtered_boxes, filtered_scores, max_output_size=max_detections,
iou_threshold=nms_threshold)
# filter indices based on NMS
# (num_score_nms_keeps, 1)
indices_ = keras.backend.gather(indices_, nms_indices)
# add indices to list of all indices
# (num_score_nms_keeps, )
labels_ = tf.gather_nd(labels_, indices_)
# (num_score_nms_keeps, 2)
indices_ = keras.backend.stack([indices_[:, 0], labels_], axis=1)
return indices_
if class_specific_filter:
all_indices = []
# perform per class filtering
for c in range(int(classification.shape[1])):
scores = classification[:, c]
labels = c * tf.ones((keras.backend.shape(scores)[0],), dtype='int64')
all_indices.append(_filter_detections(scores, labels))
# concatenate indices to single tensor
# (concatenated_num_score_nms_keeps, 2)
indices = keras.backend.concatenate(all_indices, axis=0)
else:
scores = keras.backend.max(classification, axis=1)
labels = keras.backend.argmax(classification, axis=1)
indices = _filter_detections(scores, labels)
# select top k
scores = tf.gather_nd(classification, indices)
labels = indices[:, 1]
scores, top_indices = tf.nn.top_k(scores, k=keras.backend.minimum(max_detections, keras.backend.shape(scores)[0]))
# filter input using the final set of indices
indices = keras.backend.gather(indices[:, 0], top_indices)
boxes = keras.backend.gather(boxes, indices)
labels = keras.backend.gather(labels, top_indices)
# zero pad the outputs
pad_size = keras.backend.maximum(0, max_detections - keras.backend.shape(scores)[0])
boxes = tf.pad(boxes, [[0, pad_size], [0, 0]], constant_values=-1)
scores = tf.pad(scores, [[0, pad_size]], constant_values=-1)
labels = tf.pad(labels, [[0, pad_size]], constant_values=-1)
labels = keras.backend.cast(labels, 'int32')
# set shapes, since we know what they are
boxes.set_shape([max_detections, 4])
scores.set_shape([max_detections])
labels.set_shape([max_detections])
if detect_quadrangle:
alphas = keras.backend.gather(alphas, indices)
ratios = keras.backend.gather(ratios, indices)
alphas = tf.pad(alphas, [[0, pad_size], [0, 0]], constant_values=-1)
ratios = tf.pad(ratios, [[0, pad_size]], constant_values=-1)
alphas.set_shape([max_detections, 4])
ratios.set_shape([max_detections])
return [boxes, scores, alphas, ratios, labels]
else:
return [boxes, scores, labels]
class FilterDetections(keras.layers.Layer):
"""
Keras layer for filtering detections using score threshold and NMS.
"""
def __init__(
self,
nms=True,
class_specific_filter=True,
nms_threshold=0.5,
score_threshold=0.01,
max_detections=100,
parallel_iterations=32,
detect_quadrangle=False,
**kwargs
):
"""
Filters detections using score threshold, NMS and selecting the top-k detections.
Args
nms: Flag to enable/disable NMS.
class_specific_filter: Whether to perform filtering per class, or take the best scoring class and filter those.
nms_threshold: Threshold for the IoU value to determine when a box should be suppressed.
score_threshold: Threshold used to prefilter the boxes with.
max_detections: Maximum number of detections to keep.
parallel_iterations: Number of batch items to process in parallel.
"""
self.nms = nms
self.class_specific_filter = class_specific_filter
self.nms_threshold = nms_threshold
self.score_threshold = score_threshold
self.max_detections = max_detections
self.parallel_iterations = parallel_iterations
self.detect_quadrangle = detect_quadrangle
super(FilterDetections, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
"""
Constructs the NMS graph.
Args
inputs : List of [boxes, classification, other[0], other[1], ...] tensors.
"""
boxes = inputs[0]
classification = inputs[1]
if self.detect_quadrangle:
alphas = inputs[2]
ratios = inputs[3]
# wrap nms with our parameters
def _filter_detections(args):
boxes_ = args[0]
classification_ = args[1]
alphas_ = args[2] if self.detect_quadrangle else None
ratios_ = args[3] if self.detect_quadrangle else None
return filter_detections(
boxes_,
classification_,
alphas_,
ratios_,
nms=self.nms,
class_specific_filter=self.class_specific_filter,
score_threshold=self.score_threshold,
max_detections=self.max_detections,
nms_threshold=self.nms_threshold,
detect_quadrangle=self.detect_quadrangle,
)
# call filter_detections on each batch item
if self.detect_quadrangle:
outputs = tf.map_fn(
_filter_detections,
elems=[boxes, classification, alphas, ratios],
dtype=['float32', 'float32', 'float32', 'float32', 'int32'],
parallel_iterations=self.parallel_iterations
)
else:
outputs = tf.map_fn(
_filter_detections,
elems=[boxes, classification],
dtype=['float32', 'float32', 'int32'],
parallel_iterations=self.parallel_iterations
)
return outputs
def compute_output_shape(self, input_shape):
"""
Computes the output shapes given the input shapes.
Args
input_shape : List of input shapes [boxes, classification].
Returns
List of tuples representing the output shapes:
[filtered_boxes.shape, filtered_scores.shape, filtered_labels.shape, filtered_other[0].shape, filtered_other[1].shape, ...]
"""
if self.detect_quadrangle:
return [
(input_shape[0][0], self.max_detections, 4),
(input_shape[1][0], self.max_detections),
(input_shape[1][0], self.max_detections, 4),
(input_shape[1][0], self.max_detections),
(input_shape[1][0], self.max_detections),
]
else:
return [
(input_shape[0][0], self.max_detections, 4),
(input_shape[1][0], self.max_detections),
(input_shape[1][0], self.max_detections),
]
def compute_mask(self, inputs, mask=None):
"""
This is required in Keras when there is more than 1 output.
"""
return (len(inputs) + 1) * [None]
def get_config(self):
"""
Gets the configuration of this layer.
Returns
Dictionary containing the parameters of this layer.
"""
config = super(FilterDetections, self).get_config()
config.update({
'nms': self.nms,
'class_specific_filter': self.class_specific_filter,
'nms_threshold': self.nms_threshold,
'score_threshold': self.score_threshold,
'max_detections': self.max_detections,
'parallel_iterations': self.parallel_iterations,
})
return config
| 14,393 | 36.978892 | 135 | py |
EfficientDet | EfficientDet-master/tfkeras.py | from utils import inject_tfkeras_modules, init_tfkeras_custom_objects
import efficientnet as model
EfficientNetB0 = inject_tfkeras_modules(model.EfficientNetB0)
EfficientNetB1 = inject_tfkeras_modules(model.EfficientNetB1)
EfficientNetB2 = inject_tfkeras_modules(model.EfficientNetB2)
EfficientNetB3 = inject_tfkeras_modules(model.EfficientNetB3)
EfficientNetB4 = inject_tfkeras_modules(model.EfficientNetB4)
EfficientNetB5 = inject_tfkeras_modules(model.EfficientNetB5)
EfficientNetB6 = inject_tfkeras_modules(model.EfficientNetB6)
EfficientNetB7 = inject_tfkeras_modules(model.EfficientNetB7)
preprocess_input = inject_tfkeras_modules(model.preprocess_input)
init_tfkeras_custom_objects()
| 694 | 42.4375 | 69 | py |
EfficientDet | EfficientDet-master/freeze_model.py | from model import efficientdet
import cv2
import os
import numpy as np
import time
from utils import preprocess_image
import tensorflow as tf
from tensorflow.keras import backend as K
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):
graph = session.graph
with graph.as_default():
freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = tf.graph_util.convert_variables_to_constants(
session, input_graph_def, output_names, freeze_var_names)
return frozen_graph
def main():
phi = 1
weighted_bifpn = False
model_path = 'checkpoints/2019-12-03/pascal_05_0.6283_1.1975_0.8029.h5'
image_sizes = (512, 640, 768, 896, 1024, 1280, 1408)
image_size = image_sizes[phi]
classes = [
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor',
]
num_classes = len(classes)
score_threshold = 0.5
model, prediction_model = efficientdet(phi=phi,
weighted_bifpn=weighted_bifpn,
num_classes=num_classes,
score_threshold=score_threshold)
prediction_model.load_weights(model_path, by_name=True)
frozen_graph = freeze_session(K.get_session(), output_names=[out.op.name for out in prediction_model.outputs])
tf.train.write_graph(frozen_graph, "./checkpoints/2019-12-03/", "pascal_05.pb", as_text=False)
if __name__ == '__main__':
main()
| 2,011 | 38.45098 | 122 | py |
EfficientDet | EfficientDet-master/train.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from datetime import date
import os
import sys
import tensorflow as tf
# import keras
# import keras.preprocessing.image
# import keras.backend as K
# from keras.optimizers import Adam, SGD
from tensorflow import keras
import tensorflow.keras.backend as K
from tensorflow.keras.optimizers import Adam, SGD
from augmentor.color import VisualEffect
from augmentor.misc import MiscEffect
from model import efficientdet
from losses import smooth_l1, focal, smooth_l1_quad
from efficientnet import BASE_WEIGHTS_PATH, WEIGHTS_HASHES
def makedirs(path):
# Intended behavior: try to create the directory,
# pass if the directory exists already, fails otherwise.
# Meant for Python 2.7/3.n compatibility.
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def get_session():
"""
Construct a modified tf session.
"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
def create_callbacks(training_model, prediction_model, validation_generator, args):
"""
Creates the callbacks to use during training.
Args
training_model: The model that is used for training.
prediction_model: The model that should be used for validation.
validation_generator: The generator for creating validation data.
args: parseargs args object.
Returns:
A list of callbacks used for training.
"""
callbacks = []
tensorboard_callback = None
if args.tensorboard_dir:
if tf.version.VERSION > '2.0.0':
file_writer = tf.summary.create_file_writer(args.tensorboard_dir)
file_writer.set_as_default()
tensorboard_callback = keras.callbacks.TensorBoard(
log_dir=args.tensorboard_dir,
histogram_freq=0,
batch_size=args.batch_size,
write_graph=True,
write_grads=False,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None
)
callbacks.append(tensorboard_callback)
if args.evaluation and validation_generator:
if args.dataset_type == 'coco':
from eval.coco import Evaluate
# use prediction model for evaluation
evaluation = Evaluate(validation_generator, prediction_model, tensorboard=tensorboard_callback)
else:
from eval.pascal import Evaluate
evaluation = Evaluate(validation_generator, prediction_model, tensorboard=tensorboard_callback)
callbacks.append(evaluation)
# save the model
if args.snapshots:
# ensure directory created first; otherwise h5py will error after epoch.
makedirs(args.snapshot_path)
checkpoint = keras.callbacks.ModelCheckpoint(
os.path.join(
args.snapshot_path,
f'{args.dataset_type}_{{epoch:02d}}_{{loss:.4f}}_{{val_loss:.4f}}.h5' if args.compute_val_loss
else f'{args.dataset_type}_{{epoch:02d}}_{{loss:.4f}}.h5'
),
verbose=1,
save_weights_only=True,
# save_best_only=True,
# monitor="mAP",
# mode='max'
)
callbacks.append(checkpoint)
# callbacks.append(keras.callbacks.ReduceLROnPlateau(
# monitor='loss',
# factor=0.1,
# patience=2,
# verbose=1,
# mode='auto',
# min_delta=0.0001,
# cooldown=0,
# min_lr=0
# ))
return callbacks
def create_generators(args):
"""
Create generators for training and validation.
Args
args: parseargs object containing configuration for generators.
preprocess_image: Function that preprocesses an image for the network.
"""
common_args = {
'batch_size': args.batch_size,
'phi': args.phi,
'detect_text': args.detect_text,
'detect_quadrangle': args.detect_quadrangle
}
# create random transform generator for augmenting training data
if args.random_transform:
misc_effect = MiscEffect()
visual_effect = VisualEffect()
else:
misc_effect = None
visual_effect = None
if args.dataset_type == 'pascal':
from generators.pascal import PascalVocGenerator
train_generator = PascalVocGenerator(
args.pascal_path,
'trainval',
skip_difficult=True,
misc_effect=misc_effect,
visual_effect=visual_effect,
**common_args
)
validation_generator = PascalVocGenerator(
args.pascal_path,
'val',
skip_difficult=True,
shuffle_groups=False,
**common_args
)
elif args.dataset_type == 'csv':
from generators.csv_ import CSVGenerator
train_generator = CSVGenerator(
args.annotations_path,
args.classes_path,
misc_effect=misc_effect,
visual_effect=visual_effect,
**common_args
)
if args.val_annotations_path:
validation_generator = CSVGenerator(
args.val_annotations_path,
args.classes_path,
shuffle_groups=False,
**common_args
)
else:
validation_generator = None
elif args.dataset_type == 'coco':
# import here to prevent unnecessary dependency on cocoapi
from generators.coco import CocoGenerator
train_generator = CocoGenerator(
args.coco_path,
'train2017',
misc_effect=misc_effect,
visual_effect=visual_effect,
group_method='random',
**common_args
)
validation_generator = CocoGenerator(
args.coco_path,
'val2017',
shuffle_groups=False,
**common_args
)
else:
raise ValueError('Invalid data type received: {}'.format(args.dataset_type))
return train_generator, validation_generator
def check_args(parsed_args):
"""
Function to check for inherent contradictions within parsed arguments.
For example, batch_size < num_gpus
Intended to raise errors prior to backend initialisation.
Args
parsed_args: parser.parse_args()
Returns
parsed_args
"""
if parsed_args.gpu and parsed_args.batch_size < len(parsed_args.gpu.split(',')):
raise ValueError(
"Batch size ({}) must be equal to or higher than the number of GPUs ({})".format(parsed_args.batch_size,
len(parsed_args.gpu.split(
','))))
return parsed_args
def parse_args(args):
"""
Parse the arguments.
"""
today = str(date.today())
parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')
subparsers.required = True
coco_parser = subparsers.add_parser('coco')
coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')
pascal_parser = subparsers.add_parser('pascal')
pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).')
csv_parser = subparsers.add_parser('csv')
csv_parser.add_argument('annotations_path', help='Path to CSV file containing annotations for training.')
csv_parser.add_argument('classes_path', help='Path to a CSV file containing class label mapping.')
csv_parser.add_argument('--val-annotations-path',
help='Path to CSV file containing annotations for validation (optional).')
parser.add_argument('--detect-quadrangle', help='If to detect quadrangle.', action='store_true', default=False)
parser.add_argument('--detect-text', help='If is text detection task.', action='store_true', default=False)
parser.add_argument('--snapshot', help='Resume training from a snapshot.')
parser.add_argument('--freeze-backbone', help='Freeze training of backbone layers.', action='store_true')
parser.add_argument('--freeze-bn', help='Freeze training of BatchNormalization layers.', action='store_true')
parser.add_argument('--weighted-bifpn', help='Use weighted BiFPN', action='store_true')
parser.add_argument('--batch-size', help='Size of the batches.', default=1, type=int)
parser.add_argument('--phi', help='Hyper parameter phi', default=0, type=int, choices=(0, 1, 2, 3, 4, 5, 6))
parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')
parser.add_argument('--epochs', help='Number of epochs to train.', type=int, default=50)
parser.add_argument('--steps', help='Number of steps per epoch.', type=int, default=10000)
parser.add_argument('--snapshot-path',
help='Path to store snapshots of models during training',
default='checkpoints/{}'.format(today))
parser.add_argument('--tensorboard-dir', help='Log directory for Tensorboard output',
default='logs/{}'.format(today))
parser.add_argument('--no-snapshots', help='Disable saving snapshots.', dest='snapshots', action='store_false')
parser.add_argument('--no-evaluation', help='Disable per epoch evaluation.', dest='evaluation',
action='store_false')
parser.add_argument('--random-transform', help='Randomly transform image and annotations.', action='store_true')
parser.add_argument('--compute-val-loss', help='Compute validation loss during training', dest='compute_val_loss',
action='store_true')
# Fit generator arguments
parser.add_argument('--multiprocessing', help='Use multiprocessing in fit_generator.', action='store_true')
parser.add_argument('--workers', help='Number of generator workers.', type=int, default=1)
parser.add_argument('--max-queue-size', help='Queue length for multiprocessing workers in fit_generator.', type=int,
default=10)
print(vars(parser.parse_args(args)))
return check_args(parser.parse_args(args))
def main(args=None):
# parse arguments
if args is None:
args = sys.argv[1:]
args = parse_args(args)
# create the generators
train_generator, validation_generator = create_generators(args)
num_classes = train_generator.num_classes()
num_anchors = train_generator.num_anchors
# optionally choose specific GPU
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
# K.set_session(get_session())
model, prediction_model = efficientdet(args.phi,
num_classes=num_classes,
num_anchors=num_anchors,
weighted_bifpn=args.weighted_bifpn,
freeze_bn=args.freeze_bn,
detect_quadrangle=args.detect_quadrangle
)
# load pretrained weights
if args.snapshot:
if args.snapshot == 'imagenet':
model_name = 'efficientnet-b{}'.format(args.phi)
file_name = '{}_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5'.format(model_name)
file_hash = WEIGHTS_HASHES[model_name][1]
weights_path = keras.utils.get_file(file_name,
BASE_WEIGHTS_PATH + file_name,
cache_subdir='models',
file_hash=file_hash)
model.load_weights(weights_path, by_name=True)
else:
print('Loading model, this may take a second...')
model.load_weights(args.snapshot, by_name=True)
# freeze backbone layers
if args.freeze_backbone:
# 227, 329, 329, 374, 464, 566, 656
for i in range(1, [227, 329, 329, 374, 464, 566, 656][args.phi]):
model.layers[i].trainable = False
if args.gpu and len(args.gpu.split(',')) > 1:
model = keras.utils.multi_gpu_model(model, gpus=list(map(int, args.gpu.split(','))))
# compile model
model.compile(optimizer=Adam(lr=1e-3), loss={
'regression': smooth_l1_quad() if args.detect_quadrangle else smooth_l1(),
'classification': focal()
}, )
# print(model.summary())
# create the callbacks
callbacks = create_callbacks(
model,
prediction_model,
validation_generator,
args,
)
if not args.compute_val_loss:
validation_generator = None
elif args.compute_val_loss and validation_generator is None:
raise ValueError('When you have no validation data, you should not specify --compute-val-loss.')
# start training
return model.fit_generator(
generator=train_generator,
steps_per_epoch=args.steps,
initial_epoch=0,
epochs=args.epochs,
verbose=1,
callbacks=callbacks,
workers=args.workers,
use_multiprocessing=args.multiprocessing,
max_queue_size=args.max_queue_size,
validation_data=validation_generator
)
if __name__ == '__main__':
main()
| 14,180 | 36.123037 | 120 | py |
EfficientDet | EfficientDet-master/keras_.py | from utils import inject_keras_modules, init_keras_custom_objects
import efficientnet as model
EfficientNetB0 = inject_keras_modules(model.EfficientNetB0)
EfficientNetB1 = inject_keras_modules(model.EfficientNetB1)
EfficientNetB2 = inject_keras_modules(model.EfficientNetB2)
EfficientNetB3 = inject_keras_modules(model.EfficientNetB3)
EfficientNetB4 = inject_keras_modules(model.EfficientNetB4)
EfficientNetB5 = inject_keras_modules(model.EfficientNetB5)
EfficientNetB6 = inject_keras_modules(model.EfficientNetB6)
EfficientNetB7 = inject_keras_modules(model.EfficientNetB7)
preprocess_input = inject_keras_modules(model.preprocess_input)
init_keras_custom_objects()
| 670 | 40.9375 | 65 | py |
EfficientDet | EfficientDet-master/generators/common.py | import numpy as np
import random
import warnings
import cv2
from tensorflow import keras
from utils.anchors import anchors_for_shape, anchor_targets_bbox, AnchorParameters
class Generator(keras.utils.Sequence):
"""
Abstract generator class.
"""
def __init__(
self,
phi=0,
image_sizes=(512, 640, 768, 896, 1024, 1280, 1408),
misc_effect=None,
visual_effect=None,
batch_size=1,
group_method='random', # one of 'none', 'random', 'ratio'
shuffle_groups=True,
detect_text=False,
detect_quadrangle=False,
):
"""
Initialize Generator object.
Args:
batch_size: The size of the batches to generate.
group_method: Determines how images are grouped together (defaults to 'ratio', one of ('none', 'random', 'ratio')).
shuffle_groups: If True, shuffles the groups each epoch.
image_sizes:
"""
self.misc_effect = misc_effect
self.visual_effect = visual_effect
self.batch_size = int(batch_size)
self.group_method = group_method
self.shuffle_groups = shuffle_groups
self.detect_text = detect_text
self.detect_quadrangle = detect_quadrangle
self.image_size = image_sizes[phi]
self.groups = None
self.anchor_parameters = AnchorParameters.default if not self.detect_text else AnchorParameters(
ratios=(0.25, 0.5, 1., 2.),
sizes=(16, 32, 64, 128, 256))
self.anchors = anchors_for_shape((self.image_size, self.image_size), anchor_params=self.anchor_parameters)
self.num_anchors = self.anchor_parameters.num_anchors()
# Define groups
self.group_images()
# Shuffle when initializing
if self.shuffle_groups:
random.shuffle(self.groups)
def on_epoch_end(self):
if self.shuffle_groups:
random.shuffle(self.groups)
def size(self):
"""
Size of the dataset.
"""
raise NotImplementedError('size method not implemented')
def get_anchors(self):
"""
loads the anchors from a txt file
"""
with open(self.anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
# (N, 2), wh
return np.array(anchors).reshape(-1, 2)
def num_classes(self):
"""
Number of classes in the dataset.
"""
raise NotImplementedError('num_classes method not implemented')
def has_label(self, label):
"""
Returns True if label is a known label.
"""
raise NotImplementedError('has_label method not implemented')
def has_name(self, name):
"""
Returns True if name is a known class.
"""
raise NotImplementedError('has_name method not implemented')
def name_to_label(self, name):
"""
Map name to label.
"""
raise NotImplementedError('name_to_label method not implemented')
def label_to_name(self, label):
"""
Map label to name.
"""
raise NotImplementedError('label_to_name method not implemented')
def image_aspect_ratio(self, image_index):
"""
Compute the aspect ratio for an image with image_index.
"""
raise NotImplementedError('image_aspect_ratio method not implemented')
def load_image(self, image_index):
"""
Load an image at the image_index.
"""
raise NotImplementedError('load_image method not implemented')
def load_annotations(self, image_index):
"""
Load annotations for an image_index.
"""
raise NotImplementedError('load_annotations method not implemented')
def load_annotations_group(self, group):
"""
Load annotations for all images in group.
"""
annotations_group = [self.load_annotations(image_index) for image_index in group]
for annotations in annotations_group:
assert (isinstance(annotations,
dict)), '\'load_annotations\' should return a list of dictionaries, received: {}'.format(
type(annotations))
assert (
'labels' in annotations), '\'load_annotations\' should return a list of dictionaries that contain \'labels\' and \'bboxes\'.'
assert (
'bboxes' in annotations), '\'load_annotations\' should return a list of dictionaries that contain \'labels\' and \'bboxes\'.'
return annotations_group
def filter_annotations(self, image_group, annotations_group, group):
"""
Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.
"""
# test all annotations
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
# test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]
invalid_indices = np.where(
(annotations['bboxes'][:, 2] <= annotations['bboxes'][:, 0]) |
(annotations['bboxes'][:, 3] <= annotations['bboxes'][:, 1]) |
(annotations['bboxes'][:, 0] < 0) |
(annotations['bboxes'][:, 1] < 0) |
(annotations['bboxes'][:, 2] <= 0) |
(annotations['bboxes'][:, 3] <= 0) |
(annotations['bboxes'][:, 2] > image.shape[1]) |
(annotations['bboxes'][:, 3] > image.shape[0])
)[0]
# delete invalid indices
if len(invalid_indices):
warnings.warn('Image with id {} (shape {}) contains the following invalid boxes: {}.'.format(
group[index],
image.shape,
annotations['bboxes'][invalid_indices, :]
))
for k in annotations_group[index].keys():
annotations_group[index][k] = np.delete(annotations[k], invalid_indices, axis=0)
# if annotations['bboxes'].shape[0] == 0:
# warnings.warn('Image with id {} (shape {}) contains no valid boxes before transform'.format(
# group[index],
# image.shape,
# ))
return image_group, annotations_group
def clip_transformed_annotations(self, image_group, annotations_group, group):
"""
Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.
"""
# test all annotations
filtered_image_group = []
filtered_annotations_group = []
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
image_height = image.shape[0]
image_width = image.shape[1]
# x1
annotations['bboxes'][:, 0] = np.clip(annotations['bboxes'][:, 0], 0, image_width - 2)
# y1
annotations['bboxes'][:, 1] = np.clip(annotations['bboxes'][:, 1], 0, image_height - 2)
# x2
annotations['bboxes'][:, 2] = np.clip(annotations['bboxes'][:, 2], 1, image_width - 1)
# y2
annotations['bboxes'][:, 3] = np.clip(annotations['bboxes'][:, 3], 1, image_height - 1)
# test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]
small_indices = np.where(
(annotations['bboxes'][:, 2] - annotations['bboxes'][:, 0] < 3) |
(annotations['bboxes'][:, 3] - annotations['bboxes'][:, 1] < 3)
)[0]
# delete invalid indices
if len(small_indices):
for k in annotations_group[index].keys():
annotations_group[index][k] = np.delete(annotations[k], small_indices, axis=0)
# import cv2
# for invalid_index in small_indices:
# x1, y1, x2, y2 = annotations['bboxes'][invalid_index]
# label = annotations['labels'][invalid_index]
# class_name = self.labels[label]
# print('width: {}'.format(x2 - x1))
# print('height: {}'.format(y2 - y1))
# cv2.rectangle(image, (int(round(x1)), int(round(y1))), (int(round(x2)), int(round(y2))), (0, 255, 0), 2)
# cv2.putText(image, class_name, (int(round(x1)), int(round(y1))), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 1)
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# cv2.imshow('image', image)
# cv2.waitKey(0)
filtered_image_group.append(image)
filtered_annotations_group.append(annotations_group[index])
return filtered_image_group, filtered_annotations_group
def load_image_group(self, group):
"""
Load images for all images in a group.
"""
return [self.load_image(image_index) for image_index in group]
def random_visual_effect_group_entry(self, image, annotations):
"""
Randomly transforms image and annotation.
"""
# apply visual effect
image = self.visual_effect(image)
return image, annotations
def random_visual_effect_group(self, image_group, annotations_group):
"""
Randomly apply visual effect on each image.
"""
assert (len(image_group) == len(annotations_group))
if self.visual_effect is None:
# do nothing
return image_group, annotations_group
for index in range(len(image_group)):
# apply effect on a single group entry
image_group[index], annotations_group[index] = self.random_visual_effect_group_entry(
image_group[index], annotations_group[index]
)
return image_group, annotations_group
def random_misc_group_entry(self, image, annotations):
"""
Randomly transforms image and annotation.
"""
# randomly transform both image and annotations
image, annotations = self.misc_effect(image, annotations)
return image, annotations
def random_misc_group(self, image_group, annotations_group):
"""
Randomly transforms each image and its annotations.
"""
assert (len(image_group) == len(annotations_group))
if self.misc_effect is None:
return image_group, annotations_group
for index in range(len(image_group)):
# transform a single group entry
image_group[index], annotations_group[index] = self.random_misc_group_entry(image_group[index],
annotations_group[index])
return image_group, annotations_group
def preprocess_group_entry(self, image, annotations):
"""
Preprocess image and its annotations.
"""
# preprocess the image
image, scale = self.preprocess_image(image)
# apply resizing to annotations too
annotations['bboxes'] *= scale
if self.detect_quadrangle:
annotations['quadrangles'] *= scale
return image, annotations
def preprocess_group(self, image_group, annotations_group):
"""
Preprocess each image and its annotations in its group.
"""
assert (len(image_group) == len(annotations_group))
for index in range(len(image_group)):
# preprocess a single group entry
image_group[index], annotations_group[index] = self.preprocess_group_entry(image_group[index],
annotations_group[index])
return image_group, annotations_group
def group_images(self):
"""
Order the images according to self.order and makes groups of self.batch_size.
"""
# determine the order of the images
order = list(range(self.size()))
if self.group_method == 'random':
random.shuffle(order)
elif self.group_method == 'ratio':
order.sort(key=lambda x: self.image_aspect_ratio(x))
# divide into groups, one group = one batch
self.groups = [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in
range(0, len(order), self.batch_size)]
def compute_inputs(self, image_group, annotations_group):
"""
Compute inputs for the network using an image_group.
"""
batch_images = np.array(image_group).astype(np.float32)
return [batch_images]
def compute_alphas_and_ratios(self, annotations_group):
for i, annotations in enumerate(annotations_group):
quadrangles = annotations['quadrangles']
alphas = np.zeros((quadrangles.shape[0], 4), dtype=np.float32)
xmin = np.min(quadrangles, axis=1)[:, 0]
ymin = np.min(quadrangles, axis=1)[:, 1]
xmax = np.max(quadrangles, axis=1)[:, 0]
ymax = np.max(quadrangles, axis=1)[:, 1]
# alpha1, alpha2, alpha3, alpha4
alphas[:, 0] = (quadrangles[:, 0, 0] - xmin) / (xmax - xmin)
alphas[:, 1] = (quadrangles[:, 1, 1] - ymin) / (ymax - ymin)
alphas[:, 2] = (xmax - quadrangles[:, 2, 0]) / (xmax - xmin)
alphas[:, 3] = (ymax - quadrangles[:, 3, 1]) / (ymax - ymin)
annotations['alphas'] = alphas
# ratio
area1 = 0.5 * alphas[:, 0] * (1 - alphas[:, 3])
area2 = 0.5 * alphas[:, 1] * (1 - alphas[:, 0])
area3 = 0.5 * alphas[:, 2] * (1 - alphas[:, 1])
area4 = 0.5 * alphas[:, 3] * (1 - alphas[:, 2])
annotations['ratios'] = 1 - area1 - area2 - area3 - area4
def compute_targets(self, image_group, annotations_group):
"""
Compute target outputs for the network using images and their annotations.
"""
"""
Compute target outputs for the network using images and their annotations.
"""
batches_targets = anchor_targets_bbox(
self.anchors,
image_group,
annotations_group,
num_classes=self.num_classes(),
detect_quadrangle=self.detect_quadrangle
)
return list(batches_targets)
def compute_inputs_targets(self, group, debug=False):
"""
Compute inputs and target outputs for the network.
"""
# load images and annotations
# list
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
# check validity of annotations
image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)
# randomly apply visual effect
image_group, annotations_group = self.random_visual_effect_group(image_group, annotations_group)
# randomly transform data
# image_group, annotations_group = self.random_transform_group(image_group, annotations_group)
# randomly apply misc effect
image_group, annotations_group = self.random_misc_group(image_group, annotations_group)
# perform preprocessing steps
image_group, annotations_group = self.preprocess_group(image_group, annotations_group)
# check validity of annotations
image_group, annotations_group = self.clip_transformed_annotations(image_group, annotations_group, group)
assert len(image_group) != 0
assert len(image_group) == len(annotations_group)
if self.detect_quadrangle:
# compute alphas and ratio for targets
self.compute_alphas_and_ratios(annotations_group)
# compute network inputs
inputs = self.compute_inputs(image_group, annotations_group)
# compute network targets
targets = self.compute_targets(image_group, annotations_group)
if debug:
return inputs, targets, annotations_group
return inputs, targets
def __len__(self):
"""
Number of batches for generator.
"""
return len(self.groups)
def __getitem__(self, index):
"""
Keras sequence method for generating batches.
"""
group = self.groups[index]
inputs, targets = self.compute_inputs_targets(group)
return inputs, targets
def preprocess_image(self, image):
# image, RGB
image_height, image_width = image.shape[:2]
if image_height > image_width:
scale = self.image_size / image_height
resized_height = self.image_size
resized_width = int(image_width * scale)
else:
scale = self.image_size / image_width
resized_height = int(image_height * scale)
resized_width = self.image_size
image = cv2.resize(image, (resized_width, resized_height))
image = image.astype(np.float32)
image /= 255.
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
image -= mean
image /= std
pad_h = self.image_size - resized_height
pad_w = self.image_size - resized_width
image = np.pad(image, [(0, pad_h), (0, pad_w), (0, 0)], mode='constant')
return image, scale
def get_augmented_data(self, group):
"""
Compute inputs and target outputs for the network.
"""
# load images and annotations
# list
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
# check validity of annotations
image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)
# randomly apply visual effect
# image_group, annotations_group = self.random_visual_effect_group(image_group, annotations_group)
# randomly transform data
# image_group, annotations_group = self.random_transform_group(image_group, annotations_group)
# randomly apply misc effect
# image_group, annotations_group = self.random_misc_group(image_group, annotations_group)
# perform preprocessing steps
image_group, annotations_group = self.preprocess_group(image_group, annotations_group)
# check validity of annotations
image_group, annotations_group = self.clip_transformed_annotations(image_group, annotations_group, group)
assert len(image_group) != 0
assert len(image_group) == len(annotations_group)
# compute alphas for targets
self.compute_alphas_and_ratios(annotations_group)
return image_group, annotations_group
| 18,975 | 38.045267 | 145 | py |
EfficientDet | EfficientDet-master/generators/csv_.py | """
Copyright 2017-2018 yhenon (https://github.com/yhenon/)
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from generators.common import Generator
import cv2
import numpy as np
from PIL import Image
from six import raise_from
import csv
import sys
import os.path as osp
from collections import OrderedDict
def _parse(value, function, fmt):
"""
Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise_from(ValueError(fmt.format(e)), None)
def _read_classes(csv_reader):
"""
Parse the classes file given by csv_reader.
"""
result = OrderedDict()
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise_from(ValueError('line {}: format should be \'class_name,class_id\''.format(line)), None)
class_id = _parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
def _read_quadrangle_annotations(csv_reader, classes, detect_text=False):
"""
Read annotations from the csv_reader.
Args:
csv_reader: csv reader of args.annotations_path
classes: list[str] all the class names read from args.classes_path
Returns:
result: dict, dict is like {image_path: [{'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'x3': x3, 'y3': y3, 'x4': x4, 'y4': y4, 'class': class_name}]}
"""
result = OrderedDict()
for line, row in enumerate(csv_reader, 1):
try:
img_file, x1, y1, x2, y2, x3, y3, x4, y4, class_name = row[:10]
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, x3, y3, x4, y4, class_name) == ('', '', '', '', '', '', '', '', ''):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
x3 = _parse(x3, int, 'line {}: malformed x3: {{}}'.format(line))
y3 = _parse(y3, int, 'line {}: malformed y3: {{}}'.format(line))
x4 = _parse(x4, int, 'line {}: malformed x4: {{}}'.format(line))
y4 = _parse(y4, int, 'line {}: malformed y4: {{}}'.format(line))
# check if the current class name is correctly present
if detect_text:
if class_name == '###':
continue
else:
class_name = 'text'
if class_name not in classes:
raise ValueError(f'line {line}: unknown class name: \'{class_name}\' (classes: {classes})')
result[img_file].append({'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'x3': x3, 'y3': y3, 'x4': x4, 'y4': y4, 'class': class_name})
except ValueError:
raise_from(ValueError(
f'line {line}: format should be \'img_file,x1,y1,x2,y2,x3,y3,x4,y4,class_name\' or \'img_file,,,,,\''),
None)
return result
def _read_annotations(csv_reader, classes):
"""
Read annotations from the csv_reader.
Args:
csv_reader: csv reader of args.annotations_path
classes: list[str] all the class names read from args.classes_path
Returns:
result: dict, dict is like {image_path: [{'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'class': class_name}]}
"""
result = OrderedDict()
for line, row in enumerate(csv_reader, 1):
try:
img_file, x1, y1, x2, y2, class_name = row[:10]
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
if class_name not in classes:
raise ValueError(f'line {line}: unknown class name: \'{class_name}\' (classes: {classes})')
result[img_file].append({'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'class': class_name})
except ValueError:
raise_from(ValueError(
f'line {line}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''),
None)
return result
def _open_for_csv(path):
"""
Open a file with flags suitable for csv.reader.
This is different for python2 it means with mode 'rb', for python3 this means 'r' with "universal newlines".
"""
if sys.version_info[0] < 3:
return open(path, 'rb')
else:
return open(path, 'r', newline='')
class CSVGenerator(Generator):
"""
Generate data for a custom CSV dataset.
See https://github.com/fizyr/keras-retinanet#csv-datasets for more information.
"""
def __init__(
self,
csv_data_file,
csv_class_file,
base_dir=None,
detect_quadrangle=False,
detect_text=False,
**kwargs
):
"""
Initialize a CSV data generator.
Args
csv_data_file: Path to the CSV annotations file.
csv_class_file: Path to the CSV classes file.
detect_text: if do text detection
base_dir: Directory w.r.t. where the files are to be searched (defaults to the directory containing the csv_data_file).
"""
self.image_names = []
self.image_data = {}
self.base_dir = base_dir
self.detect_quadrangle = detect_quadrangle
self.detect_text = detect_text
# Take base_dir from annotations file if not explicitly specified.
if self.base_dir is None:
if osp.exists(csv_data_file):
self.base_dir = ''
else:
self.base_dir = osp.dirname(csv_data_file)
# parse the provided class file
try:
with _open_for_csv(csv_class_file) as file:
# class_name --> class_id
self.classes = _read_classes(csv.reader(file, delimiter=','))
except ValueError as e:
raise_from(ValueError('invalid CSV class file: {}: {}'.format(csv_class_file, e)), None)
self.labels = {}
# class_id --> class_name
for key, value in self.classes.items():
self.labels[value] = key
# csv with img_path, x1, y1, x2, y2, x3, y3, x4, y4, class_name
try:
with _open_for_csv(csv_data_file) as file:
# {'img_path1':[{'x1':xx,'y1':xx,'x2':xx,'y2':xx,'x3':xx,'y3':xx,'x4':xx,'y4':xx, 'class':xx}...],...}
if self.detect_quadrangle:
self.image_data = _read_quadrangle_annotations(csv.reader(file, delimiter=','), self.classes,
self.detect_text)
else:
self.image_data = _read_annotations(csv.reader(file, delimiter=','), self.classes)
except ValueError as e:
raise_from(ValueError('invalid CSV annotations file: {}: {}'.format(csv_data_file, e)), None)
self.image_names = list(self.image_data.keys())
super(CSVGenerator, self).__init__(detect_text=detect_text, detect_quadrangle=detect_quadrangle, **kwargs)
def size(self):
"""
Size of the dataset.
"""
return len(self.image_names)
def num_classes(self):
"""
Number of classes in the dataset.
"""
return max(self.classes.values()) + 1
def has_label(self, label):
"""
Return True if label is a known label.
"""
return label in self.labels
def has_name(self, name):
"""
Returns True if name is a known class.
"""
return name in self.classes
def name_to_label(self, name):
"""
Map name to label.
"""
return self.classes[name]
def label_to_name(self, label):
"""
Map label to name.
"""
return self.labels[label]
def image_path(self, image_index):
"""
Returns the image path for image_index.
"""
return osp.join(self.base_dir, self.image_names[image_index])
def image_aspect_ratio(self, image_index):
"""
Compute the aspect ratio for an image with image_index.
"""
# PIL is fast for metadata
image = Image.open(self.image_path(image_index))
return float(image.width) / float(image.height)
def load_image(self, image_index):
"""
Load an image at the image_index.
"""
image = cv2.imread(self.image_path(image_index))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def load_annotations(self, image_index):
"""
Load annotations for an image_index.
"""
path = self.image_names[image_index]
annotations = {'labels': np.empty((0,), dtype=np.int32),
'bboxes': np.empty((0, 4), dtype=np.float32),
'quadrangles': np.empty((0, 4, 2), dtype=np.float32),
}
for idx, annot in enumerate(self.image_data[path]):
annotations['labels'] = np.concatenate((annotations['labels'], [self.name_to_label(annot['class'])]))
if self.detect_quadrangle:
quadrangle = np.array([[float(annot['x1']), float(annot['y1'])],
[float(annot['x2']), float(annot['y2'])],
[float(annot['x3']), float(annot['y3'])],
[float(annot['x4']), float(annot['y4'])]])
ordered_quadrangle = self.reorder_vertexes(quadrangle)
annotations['quadrangles'] = np.concatenate((annotations['quadrangles'], ordered_quadrangle[None]))
annotations['bboxes'] = np.concatenate((annotations['bboxes'], [[
float(min(annot['x1'], annot['x2'], annot['x3'], annot['x4'])),
float(min(annot['y1'], annot['y2'], annot['y3'], annot['y4'])),
float(max(annot['x1'], annot['x2'], annot['x3'], annot['x4'])),
float(max(annot['y1'], annot['y2'], annot['y3'], annot['y4'])),
]]))
else:
annotations['bboxes'] = np.concatenate((annotations['bboxes'], [[
float(annot['x1']),
float(annot['y1']),
float(annot['x2']),
float(annot['y2']),
]]))
return annotations
def reorder_vertexes(self, vertexes):
"""
reorder vertexes as the paper shows, (top, right, bottom, left)
Args:
vertexes:
Returns:
"""
assert vertexes.shape == (4, 2)
xmin, ymin = np.min(vertexes, axis=0)
xmax, ymax = np.max(vertexes, axis=0)
# determine the first point with the smallest y,
# if two vertexes has same y, choose that with smaller x,
ordered_idxes = np.argsort(vertexes, axis=0)
ymin1_idx = ordered_idxes[0, 1]
ymin2_idx = ordered_idxes[1, 1]
if vertexes[ymin1_idx, 1] == vertexes[ymin2_idx, 1]:
if vertexes[ymin1_idx, 0] <= vertexes[ymin2_idx, 0]:
first_vertex_idx = ymin1_idx
else:
first_vertex_idx = ymin2_idx
else:
first_vertex_idx = ymin1_idx
ordered_idxes = [(first_vertex_idx + i) % 4 for i in range(4)]
ordered_vertexes = vertexes[ordered_idxes]
# drag the point to the corresponding edge
ordered_vertexes[0, 1] = ymin
ordered_vertexes[1, 0] = xmax
ordered_vertexes[2, 1] = ymax
ordered_vertexes[3, 0] = xmin
return ordered_vertexes
| 13,347 | 36.389356 | 131 | py |
EfficientDet | EfficientDet-master/eval/pascal.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# import keras
from tensorflow import keras
import tensorflow as tf
from eval.common import evaluate
class Evaluate(keras.callbacks.Callback):
"""
Evaluation callback for arbitrary datasets.
"""
def __init__(
self,
generator,
model,
iou_threshold=0.5,
score_threshold=0.01,
max_detections=100,
save_path=None,
tensorboard=None,
weighted_average=False,
verbose=1
):
"""
Evaluate a given dataset using a given model at the end of every epoch during training.
Args:
generator: The generator that represents the dataset to evaluate.
iou_threshold: The threshold used to consider when a detection is positive or negative.
score_threshold: The score confidence threshold to use for detections.
max_detections: The maximum number of detections to use per image.
save_path: The path to save images with visualized detections to.
tensorboard: Instance of keras.callbacks.TensorBoard used to log the mAP value.
weighted_average: Compute the mAP using the weighted average of precisions among classes.
verbose: Set the verbosity level, by default this is set to 1.
"""
self.generator = generator
self.iou_threshold = iou_threshold
self.score_threshold = score_threshold
self.max_detections = max_detections
self.save_path = save_path
self.tensorboard = tensorboard
self.weighted_average = weighted_average
self.verbose = verbose
self.active_model = model
super(Evaluate, self).__init__()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
# run evaluation
average_precisions = evaluate(
self.generator,
self.active_model,
iou_threshold=self.iou_threshold,
score_threshold=self.score_threshold,
max_detections=self.max_detections,
visualize=False
)
# compute per class average precision
total_instances = []
precisions = []
for label, (average_precision, num_annotations) in average_precisions.items():
if self.verbose == 1:
print('{:.0f} instances of class'.format(num_annotations),
self.generator.label_to_name(label), 'with average precision: {:.4f}'.format(average_precision))
total_instances.append(num_annotations)
precisions.append(average_precision)
if self.weighted_average:
self.mean_ap = sum([a * b for a, b in zip(total_instances, precisions)]) / sum(total_instances)
else:
self.mean_ap = sum(precisions) / sum(x > 0 for x in total_instances)
if self.tensorboard is not None:
if tf.version.VERSION < '2.0.0' and self.tensorboard.writer is not None:
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = self.mean_ap
summary_value.tag = "mAP"
self.tensorboard.writer.add_summary(summary, epoch)
else:
tf.summary.scalar('mAP', self.mean_ap, epoch)
logs['mAP'] = self.mean_ap
if self.verbose == 1:
print('mAP: {:.4f}'.format(self.mean_ap))
| 3,989 | 36.641509 | 118 | py |
EfficientDet | EfficientDet-master/eval/coco.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# import keras
from tensorflow import keras
import tensorflow as tf
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
from tqdm import trange
import cv2
from generators.coco import CocoGenerator
def evaluate(generator, model, threshold=0.01):
"""
Use the pycocotools to evaluate a COCO model on a dataset.
Args
generator: The generator for generating the evaluation data.
model: The model to evaluate.
threshold: The score threshold to use.
"""
# start collecting results
results = []
image_ids = []
for index in trange(generator.size(), desc='COCO evaluation: '):
image = generator.load_image(index)
src_image = image.copy()
h, w = image.shape[:2]
image, scale = generator.preprocess_image(image)
# run network
boxes, scores, labels = model.predict_on_batch([np.expand_dims(image, axis=0)])
boxes /= scale
boxes[:, :, 0] = np.clip(boxes[:, :, 0], 0, w - 1)
boxes[:, :, 1] = np.clip(boxes[:, :, 1], 0, h - 1)
boxes[:, :, 2] = np.clip(boxes[:, :, 2], 0, w - 1)
boxes[:, :, 3] = np.clip(boxes[:, :, 3], 0, h - 1)
# change to (x, y, w, h) (MS COCO standard)
boxes[:, :, 2] -= boxes[:, :, 0]
boxes[:, :, 3] -= boxes[:, :, 1]
# select indices which have a score above the threshold
indices = np.where(scores[0, :] > threshold)[0]
boxes = boxes[0, indices]
scores = scores[0, indices]
class_ids = labels[0, indices]
# compute predicted labels and scores
for box, score, class_id in zip(boxes, scores, class_ids):
# append detection for each positively labeled class
image_result = {
'image_id': generator.image_ids[index],
'category_id': int(class_id) + 1,
'score': float(score),
'bbox': box.tolist(),
}
# append detection to results
results.append(image_result)
# box = np.round(box).astype(np.int32)
# class_name = generator.label_to_name(generator.coco_label_to_label(class_id + 1))
# ret, baseline = cv2.getTextSize(class_name, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
# cv2.rectangle(src_image, (box[0], box[1]), (box[0] + box[2], box[1] + box[3]), (0, 255, 0), 1)
# cv2.putText(src_image, class_name, (box[0], box[1] + box[3] - baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
# (0, 0, 0), 1)
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# cv2.imshow('image', src_image)
# cv2.waitKey(0)
# append image to list of processed images
image_ids.append(generator.image_ids[index])
if not len(results):
return
# write output
json.dump(results, open('{}_bbox_results.json'.format(generator.set_name), 'w'), indent=4)
json.dump(image_ids, open('{}_processed_image_ids.json'.format(generator.set_name), 'w'), indent=4)
# # load results in COCO evaluation tool
# coco_true = generator.coco
# coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(generator.set_name))
#
# # run COCO evaluation
# coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
# coco_eval.params.imgIds = image_ids
# coco_eval.evaluate()
# coco_eval.accumulate()
# coco_eval.summarize()
# return coco_eval.stats
class Evaluate(keras.callbacks.Callback):
""" Performs COCO evaluation on each epoch.
"""
def __init__(self, generator, model, tensorboard=None, threshold=0.01):
""" Evaluate callback initializer.
Args
generator : The generator used for creating validation data.
model: prediction model
tensorboard : If given, the results will be written to tensorboard.
threshold : The score threshold to use.
"""
self.generator = generator
self.active_model = model
self.threshold = threshold
self.tensorboard = tensorboard
super(Evaluate, self).__init__()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
coco_tag = ['AP @[ IoU=0.50:0.95 | area= all | maxDets=100 ]',
'AP @[ IoU=0.50 | area= all | maxDets=100 ]',
'AP @[ IoU=0.75 | area= all | maxDets=100 ]',
'AP @[ IoU=0.50:0.95 | area= small | maxDets=100 ]',
'AP @[ IoU=0.50:0.95 | area=medium | maxDets=100 ]',
'AP @[ IoU=0.50:0.95 | area= large | maxDets=100 ]',
'AR @[ IoU=0.50:0.95 | area= all | maxDets= 1 ]',
'AR @[ IoU=0.50:0.95 | area= all | maxDets= 10 ]',
'AR @[ IoU=0.50:0.95 | area= all | maxDets=100 ]',
'AR @[ IoU=0.50:0.95 | area= small | maxDets=100 ]',
'AR @[ IoU=0.50:0.95 | area=medium | maxDets=100 ]',
'AR @[ IoU=0.50:0.95 | area= large | maxDets=100 ]']
coco_eval_stats = evaluate(self.generator, self.active_model, self.threshold)
if coco_eval_stats is not None and self.tensorboard is not None:
if tf.version.VERSION < '2.0.0' and self.tensorboard.writer is not None:
summary = tf.Summary()
for index, result in enumerate(coco_eval_stats):
summary_value = summary.value.add()
summary_value.simple_value = result
summary_value.tag = '{}. {}'.format(index + 1, coco_tag[index])
self.tensorboard.writer.add_summary(summary, epoch)
logs[coco_tag[index]] = result
else:
for index, result in enumerate(coco_eval_stats):
tag = '{}. {}'.format(index + 1, coco_tag[index])
tf.summary.scalar(tag, result, epoch)
if __name__ == '__main__':
from model import efficientdet
import os
from generators.coco import CocoGenerator
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
phi = 2
weighted_bifpn = True
model_path = 'efficientdet-d2.h5'
common_args = {
'batch_size': 1,
'phi': phi,
}
test_generator = CocoGenerator(
'datasets/coco',
'test-dev2017',
shuffle_groups=False,
**common_args
)
num_classes = test_generator.num_classes()
model, prediction_model = efficientdet(phi=phi, num_classes=num_classes, weighted_bifpn=weighted_bifpn,
score_threshold=0.01)
prediction_model.load_weights(model_path, by_name=True)
evaluate(test_generator, prediction_model, threshold=0.01)
| 7,327 | 37.772487 | 117 | py |
EfficientDet | EfficientDet-master/augmentor/transform.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
identity_matrix = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
def colvec(*args):
"""
Create a numpy array representing a column vector.
"""
return np.array([args]).T
def transform_aabb(transform_matrix, aabb):
"""
Apply a transformation to an axis aligned bounding box.
The result is a new AABB in the same coordinate system as the original AABB.
The new AABB contains all corner points of the original AABB after applying the given transformation.
Args
transform: The transformation to apply.
x1: The minimum x value of the AABB.
y1: The minimum y value of the AABB.
x2: The maximum x value of the AABB.
y2: The maximum y value of the AABB.
Returns
The new AABB as tuple (x1, y1, x2, y2)
"""
x1, y1, x2, y2 = aabb
# Transform all 4 corners of the AABB.
points = transform_matrix.dot([
[x1, x2, x1, x2],
[y1, y2, y2, y1],
[1, 1, 1, 1],
])
# Extract the min and max corners again.
# (3, ) (min_x, min_y, 1)
min_corner = points.min(axis=1)
# (3, ) (max_x, max_y, 1)
max_corner = points.max(axis=1)
return [min_corner[0], min_corner[1], max_corner[0], max_corner[1]]
def random_value(min, max):
return np.random.uniform(min, max)
def random_vector(min, max):
"""
Construct a random vector between min and max.
Args
min: the minimum value for each component, (n, )
max: the maximum value for each component, (n, )
"""
min = np.array(min)
max = np.array(max)
assert min.shape == max.shape
assert len(min.shape) == 1
return np.random.uniform(min, max)
def rotation(min=0, max=0, prob=0.5):
"""
Construct a homogeneous 2D rotation matrix.
Args
min: a scalar for the minimum absolute angle in radians
max: a scalar for the maximum absolute angle in radians
Returns
the rotation matrix as 3 by 3 numpy array
"""
random_prob = np.random.uniform()
if random_prob > prob:
# angle: the angle in radians
angle = random_value(min=min, max=max)
return np.array([
[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]
])
else:
return identity_matrix
def translation_x(min=0, max=0, prob=0.5):
"""
Construct a homogeneous 2D translation matrix.
Args:
min: a scalar for the minimum translation for x axis
max: a scalar for the maximum translation for x axis
Returns:
the translation matrix as 3 by 3 numpy array
"""
random_prob = np.random.uniform()
if random_prob > prob:
# translation: the translation 2D vector
translation = random_value(min=min, max=max)
return np.array([
[1, 0, translation],
[0, 1, ],
[0, 0, 1]
])
else:
return identity_matrix
def translation_y(min=0, max=0, prob=0.5):
"""
Construct a homogeneous 2D translation matrix.
Args:
min: a scalar for the minimum translation for y axis
max: a scalar for the maximum translation for y axis
Returns:
the translation matrix as 3 by 3 numpy array
"""
random_prob = np.random.uniform()
if random_prob > prob:
# translation: the translation 2D vector
translation = random_value(min=min, max=max)
return np.array([
[1, 0],
[0, 1, translation],
[0, 0, 1]
])
else:
return identity_matrix
def translation_xy(min=(0, 0), max=(0, 0), prob=0.5):
"""
Construct a homogeneous 2D translation matrix.
Args:
min: a scalar for the minimum translation for y axis
max: a scalar for the maximum translation for y axis
Returns:
the translation matrix as 3 by 3 numpy array
"""
random_prob = np.random.uniform()
if random_prob < prob:
# translation: the translation 2D vector
dx = np.random.randint(min[0], max[0])
dy = np.random.randint(min[1], max[1])
return np.array([
[1, 0, dx],
[0, 1, dy],
[0, 0, 1]
])
else:
return identity_matrix
def shear_x(min=0, max=0, prob=0.5):
"""
Construct a homogeneous 2D shear matrix.
Args
min: the minimum shear angle in radians.
max: the maximum shear angle in radians.
Returns
the shear matrix as 3 by 3 numpy array
"""
random_prob = np.random.uniform()
if random_prob > prob:
# angle: the shear angle in radians
angle = random_value(min=min, max=max)
return np.array([
[1, np.tan(angle), 0],
[0, 1, 0],
[0, 0, 1]
])
else:
return identity_matrix
def shear_y(min, max, prob=0.5):
"""
Construct a homogeneous 2D shear matrix.
Args
min: the minimum shear angle in radians.
max: the maximum shear angle in radians.
Returns
the shear matrix as 3 by 3 numpy array
"""
random_prob = np.random.uniform()
if random_prob > prob:
# angle: the shear angle in radians
angle = random_value(min=min, max=max)
return np.array([
[1, 0, 0],
[np.tan(angle), 1, 0],
[0, 0, 1]
])
else:
return identity_matrix
def scaling_x(min=0.9, max=1.1, prob=0.5):
"""
Construct a homogeneous 2D scaling matrix.
Args
factor: a 2D vector for X and Y scaling
Returns
the zoom matrix as 3 by 3 numpy array
"""
random_prob = np.random.uniform()
if random_prob > prob:
# angle: the shear angle in radians
factor = random_value(min=min, max=max)
return np.array([
[factor, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
else:
return identity_matrix
def scaling_y(min=0.9, max=1.1, prob=0.5):
"""
Construct a homogeneous 2D scaling matrix.
Args
factor: a 2D vector for X and Y scaling
Returns
the zoom matrix as 3 by 3 numpy array
"""
random_prob = np.random.uniform()
if random_prob > prob:
# angle: the shear angle in radians
factor = random_value(min=min, max=max)
return np.array([
[1, 0, 0],
[0, factor, 0],
[0, 0, 1]
])
else:
return identity_matrix
def scaling_xy(min=(0.9, 0.9), max=(1.1, 1.1), prob=0.5):
"""
Construct a homogeneous 2D scaling matrix.
Args
min: a 2D vector containing the minimum scaling factor for X and Y.
min: a 2D vector containing The maximum scaling factor for X and Y.
Returns
the zoom matrix as 3 by 3 numpy array
"""
random_prob = np.random.uniform()
if random_prob > prob:
# factor: a 2D vector for X and Y scaling
factor = random_vector(min=min, max=max)
return np.array([
[factor[0], 0, 0],
[0, factor[1], 0],
[0, 0, 1]
])
else:
return identity_matrix
def flip_x(prob=0.8):
"""
Construct a transformation randomly containing X/Y flips (or not).
Args
flip_x_chance: The chance that the result will contain a flip along the X axis.
flip_y_chance: The chance that the result will contain a flip along the Y axis.
Returns
a homogeneous 3 by 3 transformation matrix
"""
random_prob = np.random.uniform()
if random_prob > prob:
# 1 - 2 * bool gives 1 for False and -1 for True.
return np.array([
[-1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
else:
return identity_matrix
def flip_y(prob=0.8):
"""
Construct a transformation randomly containing X/Y flips (or not).
Args
flip_x_chance: The chance that the result will contain a flip along the X axis.
flip_y_chance: The chance that the result will contain a flip along the Y axis.
Returns
a homogeneous 3 by 3 transformation matrix
"""
random_prob = np.random.uniform()
if random_prob > prob:
# 1 - 2 * bool gives 1 for False and -1 for True.
return np.array([
[1, 0, 0],
[0, -1, 0],
[0, 0, 1]
])
else:
return identity_matrix
def change_transform_origin(transform, center):
"""
Create a new transform representing the same transformation, only with the origin of the linear part changed.
Args
transform: the transformation matrix
center: the new origin of the transformation
Returns
translate(center) * transform * translate(-center)
"""
center = np.array(center)
return np.linalg.multi_dot([np.array([[1, 0, center[0]], [0, 1, center[1]], [0, 0, 1]]),
transform,
np.array([[1, 0, -center[0]], [0, 1, -center[1]], [0, 0, 1]])])
def random_transform(
min_rotation=0,
max_rotation=0,
min_translation=(0, 0),
max_translation=(0, 0),
min_shear=0,
max_shear=0,
min_scaling=(1, 1),
max_scaling=(1, 1),
):
"""
Create a random transformation.
The transformation consists of the following operations in this order (from left to right):
* rotation
* translation
* shear
* scaling
* flip x (if applied)
* flip y (if applied)
Note that by default, the data generators in `keras_retinanet.preprocessing.generators` interpret the translation
as factor of the image size. So an X translation of 0.1 would translate the image by 10% of it's width.
Set `relative_translation` to `False` in the `TransformParameters` of a data generator to have it interpret
the translation directly as pixel distances instead.
Args
min_rotation: The minimum rotation in radians for the transform as scalar.
max_rotation: The maximum rotation in radians for the transform as scalar.
min_translation: The minimum translation for the transform as 2D column vector.
max_translation: The maximum translation for the transform as 2D column vector.
min_shear: The minimum shear angle for the transform in radians.
max_shear: The maximum shear angle for the transform in radians.
min_scaling: The minimum scaling for the transform as 2D column vector.
max_scaling: The maximum scaling for the transform as 2D column vector.
"""
return np.linalg.multi_dot([
rotation(min_rotation, max_rotation),
translation_xy(min_translation, max_translation),
shear_x(min_shear, max_shear) if np.random.uniform() > 0.5 else shear_y(min_shear, max_shear),
scaling_xy(min_scaling, max_scaling),
flip_x() if np.random.uniform() > 0.5 else flip_y(),
])
def random_transform_generator(**kwargs):
"""
Create a random transform generator.
The transformation consists of the following operations in this order (from left to right):
* rotation
* translation
* shear
* scaling
* flip x (if applied)
* flip y (if applied)
Note that by default, the data generators in `keras_retinanet.preprocessing.generators` interpret the translation
as factor of the image size. So an X translation of 0.1 would translate the image by 10% of it's width.
Set `relative_translation` to `False` in the `TransformParameters` of a data generator to have it interpret
the translation directly as pixel distances instead.
Args
min_rotation: The minimum rotation in radians for the transform as scalar.
max_rotation: The maximum rotation in radians for the transform as scalar.
min_translation: The minimum translation for the transform as 2D column vector.
max_translation: The maximum translation for the transform as 2D column vector.
min_shear: The minimum shear angle for the transform in radians.
max_shear: The maximum shear angle for the transform in radians.
min_scaling: The minimum scaling for the transform as 2D column vector.
max_scaling: The maximum scaling for the transform as 2D column vector.
"""
while True:
yield random_transform(**kwargs)
def adjust_transform_for_image(transform, image, relative_translation):
"""
Adjust a transformation for a specific image.
The translation of the matrix will be scaled with the size of the image.
The linear part of the transformation will adjusted so that the origin of the transformation will be at the center of the image.
"""
height, width, channels = image.shape
result = transform
# Scale the translation with the image size if specified.
if relative_translation:
result[0:2, 2] *= [width, height]
# Move the origin of transformation.
result = change_transform_origin(transform, (0.5 * width, 0.5 * height))
return result
class TransformParameters:
"""
Struct holding parameters determining how to apply a transformation to an image.
Args
fill_mode: One of: 'constant', 'nearest', 'reflect', 'wrap'
interpolation: One of: 'nearest', 'linear', 'cubic', 'area', 'lanczos4'
cval: Fill value to use with fill_mode='constant'
relative_translation: If true (the default), interpret translation as a factor of the image size.
If false, interpret it as absolute pixels.
"""
def __init__(
self,
fill_mode='nearest',
interpolation='linear',
cval=0,
relative_translation=True,
):
self.fill_mode = fill_mode
self.cval = cval
self.interpolation = interpolation
self.relative_translation = relative_translation
def cv_border_mode(self):
if self.fill_mode == 'constant':
return cv2.BORDER_CONSTANT
if self.fill_mode == 'nearest':
return cv2.BORDER_REPLICATE
if self.fill_mode == 'reflect':
return cv2.BORDER_REFLECT_101
if self.fill_mode == 'wrap':
return cv2.BORDER_WRAP
def cv_interpolation(self):
if self.interpolation == 'nearest':
return cv2.INTER_NEAREST
if self.interpolation == 'linear':
return cv2.INTER_LINEAR
if self.interpolation == 'cubic':
return cv2.INTER_CUBIC
if self.interpolation == 'area':
return cv2.INTER_AREA
if self.interpolation == 'lanczos4':
return cv2.INTER_LANCZOS4
def apply_transform(matrix, image, params):
"""
Apply a transformation to an image.
The origin of transformation is at the top left corner of the image.
The matrix is interpreted such that a point (x, y) on the original image is moved to transform * (x, y) in the generated image.
Mathematically speaking, that means that the matrix is a transformation from the transformed image space to the original image space.
Args
matrix: A homogeneous 3 by 3 matrix holding representing the transformation to apply.
image: The image to transform.
params: The transform parameters (see TransformParameters)
"""
output = cv2.warpAffine(
image,
matrix[:2, :],
dsize=(image.shape[1], image.shape[0]),
flags=params.cvInterpolation(),
borderMode=params.cvBorderMode(),
borderValue=params.cval,
)
return output
| 16,261 | 30.034351 | 137 | py |
EfficientDet | EfficientDet-master/utils/anchors.py | # import keras
import numpy as np
from tensorflow import keras
from utils.compute_overlap import compute_overlap
class AnchorParameters:
"""
The parameters that define how anchors are generated.
Args
sizes : List of sizes to use. Each size corresponds to one feature level.
strides : List of strides to use. Each stride correspond to one feature level.
ratios : List of ratios to use per location in a feature map.
scales : List of scales to use per location in a feature map.
"""
def __init__(self, sizes=(32, 64, 128, 256, 512),
strides=(8, 16, 32, 64, 128),
ratios=(1, 0.5, 2),
scales=(2 ** 0, 2 ** (1. / 3.), 2 ** (2. / 3.))):
self.sizes = sizes
self.strides = strides
self.ratios = np.array(ratios, dtype=keras.backend.floatx())
self.scales = np.array(scales, dtype=keras.backend.floatx())
def num_anchors(self):
return len(self.ratios) * len(self.scales)
"""
The default anchor parameters.
"""
AnchorParameters.default = AnchorParameters(
sizes=[32, 64, 128, 256, 512],
strides=[8, 16, 32, 64, 128],
# ratio=h/w
ratios=np.array([1, 0.5, 2], keras.backend.floatx()),
scales=np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)], keras.backend.floatx()),
)
def anchor_targets_bbox(
anchors,
image_group,
annotations_group,
num_classes,
negative_overlap=0.4,
positive_overlap=0.5,
detect_quadrangle=False
):
"""
Generate anchor targets for bbox detection.
Args
anchors: np.array of annotations of shape (N, 4) for (x1, y1, x2, y2).
image_group: List of BGR images.
annotations_group: List of annotations (np.array of shape (N, 5) for (x1, y1, x2, y2, label)).
num_classes: Number of classes to predict.
mask_shape: If the image is padded with zeros, mask_shape can be used to mark the relevant part of the image.
negative_overlap: IoU overlap for negative anchors (all anchors with overlap < negative_overlap are negative).
positive_overlap: IoU overlap or positive anchors (all anchors with overlap > positive_overlap are positive).
Returns
labels_batch: batch that contains labels & anchor states (np.array of shape (batch_size, N, num_classes + 1),
where N is the number of anchors for an image and the last column defines the anchor state
(-1 for ignore, 0 for bg, 1 for fg).
regression_batch: batch that contains bounding-box regression targets for an image & anchor states
(np.array of shape (batch_size, N, 4 + 1), where N is the number of anchors for an image,
the first 4 columns define regression targets for (x1, y1, x2, y2) and the last column defines
anchor states (-1 for ignore, 0 for bg, 1 for fg).
"""
assert (len(image_group) == len(annotations_group)), "The length of the images and annotations need to be equal."
assert (len(annotations_group) > 0), "No data received to compute anchor targets for."
for annotations in annotations_group:
assert ('bboxes' in annotations), "Annotations should contain bboxes."
assert ('labels' in annotations), "Annotations should contain labels."
batch_size = len(image_group)
if detect_quadrangle:
regression_batch = np.zeros((batch_size, anchors.shape[0], 9 + 1), dtype=np.float32)
else:
regression_batch = np.zeros((batch_size, anchors.shape[0], 4 + 1), dtype=np.float32)
labels_batch = np.zeros((batch_size, anchors.shape[0], num_classes + 1), dtype=np.float32)
# compute labels and regression targets
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
if annotations['bboxes'].shape[0]:
# obtain indices of gt annotations with the greatest overlap
# argmax_overlaps_inds: id of ground truth box has greatest overlap with anchor
# (N, ), (N, ), (N, ) N is num_anchors
positive_indices, ignore_indices, argmax_overlaps_inds = compute_gt_annotations(anchors,
annotations['bboxes'],
negative_overlap,
positive_overlap)
labels_batch[index, ignore_indices, -1] = -1
labels_batch[index, positive_indices, -1] = 1
regression_batch[index, ignore_indices, -1] = -1
regression_batch[index, positive_indices, -1] = 1
# compute target class labels
labels_batch[
index, positive_indices, annotations['labels'][argmax_overlaps_inds[positive_indices]].astype(int)] = 1
regression_batch[index, :, :4] = bbox_transform(anchors, annotations['bboxes'][argmax_overlaps_inds, :])
if detect_quadrangle:
regression_batch[index, :, 4:8] = annotations['alphas'][argmax_overlaps_inds, :]
regression_batch[index, :, 8] = annotations['ratios'][argmax_overlaps_inds]
# ignore anchors outside of image
if image.shape:
anchors_centers = np.vstack([(anchors[:, 0] + anchors[:, 2]) / 2, (anchors[:, 1] + anchors[:, 3]) / 2]).T
indices = np.logical_or(anchors_centers[:, 0] >= image.shape[1], anchors_centers[:, 1] >= image.shape[0])
labels_batch[index, indices, -1] = -1
regression_batch[index, indices, -1] = -1
return labels_batch, regression_batch
def compute_gt_annotations(
anchors,
annotations,
negative_overlap=0.4,
positive_overlap=0.5
):
"""
Obtain indices of gt annotations with the greatest overlap.
Args
anchors: np.array of annotations of shape (N, 4) for (x1, y1, x2, y2).
annotations: np.array of shape (K, 5) for (x1, y1, x2, y2, label).
negative_overlap: IoU overlap for negative anchors (all anchors with overlap < negative_overlap are negative).
positive_overlap: IoU overlap or positive anchors (all anchors with overlap > positive_overlap are positive).
Returns
positive_indices: indices of positive anchors, (N, )
ignore_indices: indices of ignored anchors, (N, )
argmax_overlaps_inds: ordered overlaps indices, (N, )
"""
# (N, K)
overlaps = compute_overlap(anchors.astype(np.float64), annotations.astype(np.float64))
# (N, )
argmax_overlaps_inds = np.argmax(overlaps, axis=1)
# (N, )
max_overlaps = overlaps[np.arange(overlaps.shape[0]), argmax_overlaps_inds]
# assign "dont care" labels
# (N, )
positive_indices = max_overlaps >= positive_overlap
# adam: in case of there are gt boxes has no matched positive anchors
# nonzero_inds = np.nonzero(overlaps == np.max(overlaps, axis=0))
# positive_indices[nonzero_inds[0]] = 1
# (N, )
ignore_indices = (max_overlaps > negative_overlap) & ~positive_indices
return positive_indices, ignore_indices, argmax_overlaps_inds
def layer_shapes(image_shape, model):
"""
Compute layer shapes given input image shape and the model.
Args
image_shape: The shape of the image.
model: The model to use for computing how the image shape is transformed in the pyramid.
Returns
A dictionary mapping layer names to image shapes.
"""
shape = {
model.layers[0].name: (None,) + image_shape,
}
for layer in model.layers[1:]:
nodes = layer._inbound_nodes
for node in nodes:
input_shapes = [shape[inbound_layer.name] for inbound_layer in node.inbound_layers]
if not input_shapes:
continue
shape[layer.name] = layer.compute_output_shape(input_shapes[0] if len(input_shapes) == 1 else input_shapes)
return shape
def make_shapes_callback(model):
"""
Make a function for getting the shape of the pyramid levels.
"""
def get_shapes(image_shape, pyramid_levels):
shape = layer_shapes(image_shape, model)
image_shapes = [shape["P{}".format(level)][1:3] for level in pyramid_levels]
return image_shapes
return get_shapes
def guess_shapes(image_shape, pyramid_levels):
"""
Guess shapes based on pyramid levels.
Args
image_shape: The shape of the image.
pyramid_levels: A list of what pyramid levels are used.
Returns
A list of image shapes at each pyramid level.
"""
image_shape = np.array(image_shape[:2])
image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in pyramid_levels]
return image_shapes
def anchors_for_shape(
image_shape,
pyramid_levels=None,
anchor_params=None,
shapes_callback=None,
):
"""
Generators anchors for a given shape.
Args
image_shape: The shape of the image.
pyramid_levels: List of ints representing which pyramids to use (defaults to [3, 4, 5, 6, 7]).
anchor_params: Struct containing anchor parameters. If None, default values are used.
shapes_callback: Function to call for getting the shape of the image at different pyramid levels.
Returns
np.array of shape (N, 4) containing the (x1, y1, x2, y2) coordinates for the anchors.
"""
if pyramid_levels is None:
pyramid_levels = [3, 4, 5, 6, 7]
if anchor_params is None:
anchor_params = AnchorParameters.default
if shapes_callback is None:
shapes_callback = guess_shapes
feature_map_shapes = shapes_callback(image_shape, pyramid_levels)
# compute anchors over all pyramid levels
all_anchors = np.zeros((0, 4), dtype=np.float32)
for idx, p in enumerate(pyramid_levels):
anchors = generate_anchors(
base_size=anchor_params.sizes[idx],
ratios=anchor_params.ratios,
scales=anchor_params.scales
)
shifted_anchors = shift(feature_map_shapes[idx], anchor_params.strides[idx], anchors)
all_anchors = np.append(all_anchors, shifted_anchors, axis=0)
return all_anchors.astype(np.float32)
def shift(feature_map_shape, stride, anchors):
"""
Produce shifted anchors based on shape of the map and stride size.
Args
feature_map_shape : Shape to shift the anchors over.
stride : Stride to shift the anchors with over the shape.
anchors: The anchors to apply at each location.
"""
# create a grid starting from half stride from the top left corner
shift_x = (np.arange(0, feature_map_shape[1]) + 0.5) * stride
shift_y = (np.arange(0, feature_map_shape[0]) + 0.5) * stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((
shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel()
)).transpose()
A = anchors.shape[0]
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
return all_anchors
def generate_anchors(base_size=16, ratios=None, scales=None):
"""
Generate anchor (reference) windows by enumerating aspect ratios X scales w.r.t. a reference window.
Args:
base_size:
ratios:
scales:
Returns:
"""
if ratios is None:
ratios = AnchorParameters.default.ratios
if scales is None:
scales = AnchorParameters.default.scales
num_anchors = len(ratios) * len(scales)
# initialize output anchors
anchors = np.zeros((num_anchors, 4))
anchors[:, 2:] = base_size * np.tile(np.repeat(scales, len(ratios))[None], (2, 1)).T
areas = anchors[:, 2] * anchors[:, 3]
# correct for ratios
anchors[:, 2] = np.sqrt(areas / np.tile(ratios, len(scales)))
anchors[:, 3] = anchors[:, 2] * np.tile(ratios, len(scales))
anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T
anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T
return anchors
def bbox_transform(anchors, gt_boxes, scale_factors=None):
wa = anchors[:, 2] - anchors[:, 0]
ha = anchors[:, 3] - anchors[:, 1]
cxa = anchors[:, 0] + wa / 2.
cya = anchors[:, 1] + ha / 2.
w = gt_boxes[:, 2] - gt_boxes[:, 0]
h = gt_boxes[:, 3] - gt_boxes[:, 1]
cx = gt_boxes[:, 0] + w / 2.
cy = gt_boxes[:, 1] + h / 2.
# Avoid NaN in division and log below.
ha += 1e-7
wa += 1e-7
h += 1e-7
w += 1e-7
tx = (cx - cxa) / wa
ty = (cy - cya) / ha
tw = np.log(w / wa)
th = np.log(h / ha)
if scale_factors:
ty /= scale_factors[0]
tx /= scale_factors[1]
th /= scale_factors[2]
tw /= scale_factors[3]
targets = np.stack([ty, tx, th, tw], axis=1)
return targets
| 13,034 | 35.615169 | 119 | py |
EfficientDet | EfficientDet-master/utils/visualization.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
from .colors import label_color
def draw_box(image, box, color, thickness=2):
""" Draws a box on an image with a given color.
# Arguments
image : The image to draw on.
box : A list of 4 elements (x1, y1, x2, y2).
color : The color of the box.
thickness : The thickness of the lines to draw a box with.
"""
b = np.array(box).astype(np.int32)
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), color, thickness, cv2.LINE_AA)
def draw_caption(image, box, caption):
""" Draws a caption above the box in an image.
# Arguments
image : The image to draw on.
box : A list of 4 elements (x1, y1, x2, y2).
caption : String containing the text to draw.
"""
b = np.array(box).astype(int)
cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2)
cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
def draw_boxes(image, boxes, color, thickness=2):
""" Draws boxes on an image with a given color.
# Arguments
image : The image to draw on.
boxes : A [N, 4] matrix (x1, y1, x2, y2).
color : The color of the boxes.
thickness : The thickness of the lines to draw boxes with.
"""
for b in boxes:
draw_box(image, b, color, thickness=thickness)
def draw_detections(image, boxes, scores, labels, colors, label_to_name=None, score_threshold=0.5):
""" Draws detections in an image.
# Arguments
image : The image to draw on.
boxes : A [N, 4] matrix (x1, y1, x2, y2).
scores : A list of N classification scores.
labels : A list of N labels.
colors : The colors of the boxes.
label_to_name : (optional) Functor for mapping a label to a name.
score_threshold : Threshold used for determining what detections to draw.
"""
selection = np.where(scores > score_threshold)[0]
for i in selection:
c = colors[int(labels[i])]
draw_box(image, boxes[i, :], color=c)
# draw labels
caption = (label_to_name(labels[i]) if label_to_name else labels[i]) + ': {0:.2f}'.format(scores[i])
draw_caption(image, boxes[i, :], caption)
def draw_annotations(image, annotations, color=(0, 255, 0), label_to_name=None):
""" Draws annotations in an image.
# Arguments
image : The image to draw on.
annotations : A [N, 5] matrix (x1, y1, x2, y2, label) or dictionary containing bboxes (shaped [N, 4]) and labels (shaped [N]).
color : The color of the boxes. By default the color from keras_retinanet.utils.colors.label_color will be used.
label_to_name : (optional) Functor for mapping a label to a name.
"""
if isinstance(annotations, np.ndarray):
annotations = {'bboxes': annotations[:, :4], 'labels': annotations[:, 4]}
assert('bboxes' in annotations)
assert('labels' in annotations)
assert(annotations['bboxes'].shape[0] == annotations['labels'].shape[0])
for i in range(annotations['bboxes'].shape[0]):
label = annotations['labels'][i]
c = color if color is not None else label_color(label)
caption = '{}'.format(label_to_name(label) if label_to_name else label)
draw_caption(image, annotations['bboxes'][i], caption)
draw_box(image, annotations['bboxes'][i], color=c)
| 4,112 | 37.439252 | 136 | py |
EfficientDet | EfficientDet-master/utils/image.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division
import numpy as np
import cv2
from PIL import Image
from .transform import change_transform_origin
def read_image_bgr(path):
"""
Read an image in BGR format.
Args
path: Path to the image.
"""
# We deliberately don't use cv2.imread here, since it gives no feedback on errors while reading the image.
image = np.asarray(Image.open(path).convert('RGB'))
return image[:, :, ::-1].copy()
def preprocess_image(x, mode='caffe'):
"""
Preprocess an image by subtracting the ImageNet mean.
Args
x: np.array of shape (None, None, 3) or (3, None, None).
mode: One of "caffe" or "tf".
- caffe: will zero-center each color channel with
respect to the ImageNet dataset, without scaling.
- tf: will scale pixels between -1 and 1, sample-wise.
Returns
The input with the ImageNet mean subtracted.
"""
# mostly identical to "https://github.com/keras-team/keras-applications/blob/master/keras_applications/imagenet_utils.py"
# except for converting RGB -> BGR since we assume BGR already
# covert always to float32 to keep compatibility with opencv
x = x.astype(np.float32)
if mode == 'tf':
x /= 127.5
x -= 1.
elif mode == 'caffe':
x[..., 0] -= 103.939
x[..., 1] -= 116.779
x[..., 2] -= 123.68
return x
def adjust_transform_for_image(transform, image, relative_translation):
"""
Adjust a transformation for a specific image.
The translation of the matrix will be scaled with the size of the image.
The linear part of the transformation will adjusted so that the origin of the transformation will be at the center of the image.
"""
height, width, channels = image.shape
result = transform
# Scale the translation with the image size if specified.
if relative_translation:
result[0:2, 2] *= [width, height]
# Move the origin of transformation.
result = change_transform_origin(transform, (0.5 * width, 0.5 * height))
return result
class TransformParameters:
"""
Struct holding parameters determining how to apply a transformation to an image.
Args
fill_mode: One of: 'constant', 'nearest', 'reflect', 'wrap'
interpolation: One of: 'nearest', 'linear', 'cubic', 'area', 'lanczos4'
cval: Fill value to use with fill_mode='constant'
relative_translation: If true (the default), interpret translation as a factor of the image size.
If false, interpret it as absolute pixels.
"""
def __init__(
self,
fill_mode='nearest',
interpolation='linear',
cval=0,
relative_translation=True,
):
self.fill_mode = fill_mode
self.cval = cval
self.interpolation = interpolation
self.relative_translation = relative_translation
def cvBorderMode(self):
if self.fill_mode == 'constant':
return cv2.BORDER_CONSTANT
if self.fill_mode == 'nearest':
return cv2.BORDER_REPLICATE
if self.fill_mode == 'reflect':
return cv2.BORDER_REFLECT_101
if self.fill_mode == 'wrap':
return cv2.BORDER_WRAP
def cvInterpolation(self):
if self.interpolation == 'nearest':
return cv2.INTER_NEAREST
if self.interpolation == 'linear':
return cv2.INTER_LINEAR
if self.interpolation == 'cubic':
return cv2.INTER_CUBIC
if self.interpolation == 'area':
return cv2.INTER_AREA
if self.interpolation == 'lanczos4':
return cv2.INTER_LANCZOS4
def apply_transform(matrix, image, params):
"""
Apply a transformation to an image.
The origin of transformation is at the top left corner of the image.
The matrix is interpreted such that a point (x, y) on the original image is moved to transform * (x, y) in the generated image.
Mathematically speaking, that means that the matrix is a transformation from the transformed image space to the original image space.
Args
matrix: A homogeneous 3 by 3 matrix holding representing the transformation to apply.
image: The image to transform.
params: The transform parameters (see TransformParameters)
"""
output = cv2.warpAffine(
image,
matrix[:2, :],
dsize=(image.shape[1], image.shape[0]),
flags=params.cvInterpolation(),
borderMode=params.cvBorderMode(),
borderValue=params.cval,
)
return output
def compute_resize_scale(image_shape, min_side=800, max_side=1333):
"""
Compute an image scale such that the image size is constrained to min_side and max_side.
Args
min_side: The image's min side will be equal to min_side after resizing.
max_side: If after resizing the image's max side is above max_side, resize until the max side is equal to max_side.
Returns
A resizing scale.
"""
(rows, cols, _) = image_shape
smallest_side = min(rows, cols)
# rescale the image so the smallest side is min_side
scale = min_side / smallest_side
# check if the largest side is now greater than max_side, which can happen
# when images have a large aspect ratio
largest_side = max(rows, cols)
if largest_side * scale > max_side:
scale = max_side / largest_side
return scale
def resize_image(img, min_side=800, max_side=1333):
"""
Resize an image such that the size is constrained to min_side and max_side.
Args
min_side: The image's min side will be equal to min_side after resizing.
max_side: If after resizing the image's max side is above max_side, resize until the max side is equal to max_side.
Returns
A resized image.
"""
# compute scale to resize the image
scale = compute_resize_scale(img.shape, min_side=min_side, max_side=max_side)
# resize the image with the computed scale
img = cv2.resize(img, None, fx=scale, fy=scale)
return img, scale
def _uniform(val_range):
"""
Uniformly sample from the given range.
Args
val_range: A pair of lower and upper bound.
"""
return np.random.uniform(val_range[0], val_range[1])
def _check_range(val_range, min_val=None, max_val=None):
"""
Check whether the range is a valid range.
Args
val_range: A pair of lower and upper bound.
min_val: Minimal value for the lower bound.
max_val: Maximal value for the upper bound.
"""
if val_range[0] > val_range[1]:
raise ValueError('interval lower bound > upper bound')
if min_val is not None and val_range[0] < min_val:
raise ValueError('invalid interval lower bound')
if max_val is not None and val_range[1] > max_val:
raise ValueError('invalid interval upper bound')
def _clip(image):
"""
Clip and convert an image to np.uint8.
Args
image: Image to clip.
"""
return np.clip(image, 0, 255).astype(np.uint8)
class VisualEffect:
"""
Struct holding parameters and applying image color transformation.
Args
contrast_factor: A factor for adjusting contrast. Should be between 0 and 3.
brightness_delta: Brightness offset between -1 and 1 added to the pixel values.
hue_delta: Hue offset between -1 and 1 added to the hue channel.
saturation_factor: A factor multiplying the saturation values of each pixel.
"""
def __init__(
self,
contrast_factor,
brightness_delta,
hue_delta,
saturation_factor,
):
self.contrast_factor = contrast_factor
self.brightness_delta = brightness_delta
self.hue_delta = hue_delta
self.saturation_factor = saturation_factor
def __call__(self, image):
"""
Apply a visual effect on the image.
Args
image: Image to adjust
"""
if self.contrast_factor:
image = adjust_contrast(image, self.contrast_factor)
if self.brightness_delta:
image = adjust_brightness(image, self.brightness_delta)
if self.hue_delta or self.saturation_factor:
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
if self.hue_delta:
image = adjust_hue(image, self.hue_delta)
if self.saturation_factor:
image = adjust_saturation(image, self.saturation_factor)
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return image
def random_visual_effect_generator(
contrast_range=(0.9, 1.1),
brightness_range=(-.1, .1),
hue_range=(-0.05, 0.05),
saturation_range=(0.95, 1.05)
):
"""
Generate visual effect parameters uniformly sampled from the given intervals.
Args
contrast_factor: A factor interval for adjusting contrast. Should be between 0 and 3.
brightness_delta: An interval between -1 and 1 for the amount added to the pixels.
hue_delta: An interval between -1 and 1 for the amount added to the hue channel.
The values are rotated if they exceed 180.
saturation_factor: An interval for the factor multiplying the saturation values of each
pixel.
"""
_check_range(contrast_range, 0)
_check_range(brightness_range, -1, 1)
_check_range(hue_range, -1, 1)
_check_range(saturation_range, 0)
def _generate():
while True:
yield VisualEffect(
contrast_factor=_uniform(contrast_range),
brightness_delta=_uniform(brightness_range),
hue_delta=_uniform(hue_range),
saturation_factor=_uniform(saturation_range),
)
return _generate()
def adjust_contrast(image, factor):
"""
Adjust contrast of an image.
Args
image: Image to adjust.
factor: A factor for adjusting contrast.
"""
mean = image.mean(axis=0).mean(axis=0)
return _clip((image - mean) * factor + mean)
def adjust_brightness(image, delta):
"""
Adjust brightness of an image
Args
image: Image to adjust.
delta: Brightness offset between -1 and 1 added to the pixel values.
"""
return _clip(image + delta * 255)
def adjust_hue(image, delta):
"""
Adjust hue of an image.
Args
image: Image to adjust.
delta: An interval between -1 and 1 for the amount added to the hue channel.
The values are rotated if they exceed 180.
"""
image[..., 0] = np.mod(image[..., 0] + delta * 180, 180)
return image
def adjust_saturation(image, factor):
"""
Adjust saturation of an image.
Args
image: Image to adjust.
factor: An interval for the factor multiplying the saturation values of each pixel.
"""
image[..., 1] = np.clip(image[..., 1] * factor, 0, 255)
return image
| 11,667 | 30.281501 | 137 | py |
EfficientDet | EfficientDet-master/utils/transform.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
DEFAULT_PRNG = np.random
def colvec(*args):
"""
Create a numpy array representing a column vector.
"""
return np.array([args]).T
def transform_aabb(transform, aabb):
"""
Apply a transformation to an axis aligned bounding box.
The result is a new AABB in the same coordinate system as the original AABB.
The new AABB contains all corner points of the original AABB after applying the given transformation.
Args
transform: The transformation to apply.
x1: The minimum x value of the AABB.
y1: The minimum y value of the AABB.
x2: The maximum x value of the AABB.
y2: The maximum y value of the AABB.
Returns
The new AABB as tuple (x1, y1, x2, y2)
"""
x1, y1, x2, y2 = aabb
# Transform all 4 corners of the AABB.
points = transform.dot([
[x1, x2, x1, x2],
[y1, y2, y2, y1],
[1, 1, 1, 1],
])
# Extract the min and max corners again.
# (3, ) (min_x, min_y, 1)
min_corner = points.min(axis=1)
# (3, ) (max_x, max_y, 1)
max_corner = points.max(axis=1)
return [min_corner[0], min_corner[1], max_corner[0], max_corner[1]]
def _random_vector(min, max, prng=DEFAULT_PRNG):
"""
Construct a random vector between min and max.
Args
min: the minimum value for each component, (n, )
max: the maximum value for each component, (n, )
"""
min = np.array(min)
max = np.array(max)
assert min.shape == max.shape
assert len(min.shape) == 1
return prng.uniform(min, max)
def rotation(angle):
"""
Construct a homogeneous 2D rotation matrix.
Args
angle: the angle in radians
Returns
the rotation matrix as 3 by 3 numpy array
"""
return np.array([
[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]
])
def random_rotation(min, max, prng=DEFAULT_PRNG):
"""
Construct a random rotation between -max and max.
Args
min: a scalar for the minimum absolute angle in radians
max: a scalar for the maximum absolute angle in radians
prng: the pseudo-random number generator to use.
Returns
a homogeneous 3 by 3 rotation matrix
"""
return rotation(prng.uniform(min, max))
def translation(translation):
"""
Construct a homogeneous 2D translation matrix.
Args:
translation: the translation 2D vector
Returns:
the translation matrix as 3 by 3 numpy array
"""
return np.array([
[1, 0, translation[0]],
[0, 1, translation[1]],
[0, 0, 1]
])
def random_translation(min, max, prng=DEFAULT_PRNG):
"""
Construct a random 2D translation between min and max.
Args
min: a 2D vector with the minimum translation for each dimension
max: a 2D vector with the maximum translation for each dimension
prng: the pseudo-random number generator to use.
Returns
a homogeneous 3 by 3 translation matrix
"""
return translation(_random_vector(min, max, prng))
def shear(angle):
"""
Construct a homogeneous 2D shear matrix.
Args
angle: the shear angle in radians
Returns
the shear matrix as 3 by 3 numpy array
"""
return np.array([
[1, -np.sin(angle), 0],
[0, np.cos(angle), 0],
[0, 0, 1]
])
def random_shear(min, max, prng=DEFAULT_PRNG):
"""
Construct a random 2D shear matrix with shear angle between -max and max.
Args
min: the minimum shear angle in radians.
max: the maximum shear angle in radians.
prng: the pseudo-random number generator to use.
Returns
a homogeneous 3 by 3 shear matrix
"""
return shear(prng.uniform(min, max))
def scaling(factor):
"""
Construct a homogeneous 2D scaling matrix.
Args
factor: a 2D vector for X and Y scaling
Returns
the zoom matrix as 3 by 3 numpy array
"""
return np.array([
[factor[0], 0, 0],
[0, factor[1], 0],
[0, 0, 1]
])
def random_scaling(min, max, prng=DEFAULT_PRNG):
"""
Construct a random 2D scale matrix between -max and max.
Args
min: a 2D vector containing the minimum scaling factor for X and Y.
min: a 2D vector containing The maximum scaling factor for X and Y.
prng: the pseudo-random number generator to use.
Returns
a homogeneous 3 by 3 scaling matrix
"""
return scaling(_random_vector(min, max, prng))
def random_flip(flip_x_chance, flip_y_chance, prng=DEFAULT_PRNG):
"""
Construct a transformation randomly containing X/Y flips (or not).
Args
flip_x_chance: The chance that the result will contain a flip along the X axis.
flip_y_chance: The chance that the result will contain a flip along the Y axis.
prng: The pseudo-random number generator to use.
Returns
a homogeneous 3 by 3 transformation matrix
"""
flip_x = prng.uniform(0, 1) < flip_x_chance
flip_y = prng.uniform(0, 1) < flip_y_chance
# 1 - 2 * bool gives 1 for False and -1 for True.
return scaling((1 - 2 * flip_x, 1 - 2 * flip_y))
def change_transform_origin(transform, center):
"""
Create a new transform representing the same transformation, only with the origin of the linear part changed.
Args
transform: the transformation matrix
center: the new origin of the transformation
Returns
translate(center) * transform * translate(-center)
"""
center = np.array(center)
return np.linalg.multi_dot([translation(center), transform, translation(-center)])
def random_transform(
min_rotation=0,
max_rotation=0,
min_translation=(0, 0),
max_translation=(0, 0),
min_shear=0,
max_shear=0,
min_scaling=(1, 1),
max_scaling=(1, 1),
flip_x_chance=0,
flip_y_chance=0,
prng=DEFAULT_PRNG
):
"""
Create a random transformation.
The transformation consists of the following operations in this order (from left to right):
* rotation
* translation
* shear
* scaling
* flip x (if applied)
* flip y (if applied)
Note that by default, the data generators in `keras_retinanet.preprocessing.generators` interpret the translation
as factor of the image size. So an X translation of 0.1 would translate the image by 10% of it's width.
Set `relative_translation` to `False` in the `TransformParameters` of a data generator to have it interpret
the translation directly as pixel distances instead.
Args
min_rotation: The minimum rotation in radians for the transform as scalar.
max_rotation: The maximum rotation in radians for the transform as scalar.
min_translation: The minimum translation for the transform as 2D column vector.
max_translation: The maximum translation for the transform as 2D column vector.
min_shear: The minimum shear angle for the transform in radians.
max_shear: The maximum shear angle for the transform in radians.
min_scaling: The minimum scaling for the transform as 2D column vector.
max_scaling: The maximum scaling for the transform as 2D column vector.
flip_x_chance: The chance (0 to 1) that a transform will contain a flip along X direction.
flip_y_chance: The chance (0 to 1) that a transform will contain a flip along Y direction.
prng: The pseudo-random number generator to use.
"""
return np.linalg.multi_dot([
random_rotation(min_rotation, max_rotation, prng),
random_translation(min_translation, max_translation, prng),
random_shear(min_shear, max_shear, prng),
random_scaling(min_scaling, max_scaling, prng),
random_flip(flip_x_chance, flip_y_chance, prng)
])
def random_transform_generator(prng=None, **kwargs):
"""
Create a random transform generator.
Uses a dedicated, newly created, properly seeded PRNG by default instead of the global DEFAULT_PRNG.
The transformation consists of the following operations in this order (from left to right):
* rotation
* translation
* shear
* scaling
* flip x (if applied)
* flip y (if applied)
Note that by default, the data generators in `keras_retinanet.preprocessing.generators` interpret the translation
as factor of the image size. So an X translation of 0.1 would translate the image by 10% of it's width.
Set `relative_translation` to `False` in the `TransformParameters` of a data generator to have it interpret
the translation directly as pixel distances instead.
Args
min_rotation: The minimum rotation in radians for the transform as scalar.
max_rotation: The maximum rotation in radians for the transform as scalar.
min_translation: The minimum translation for the transform as 2D column vector.
max_translation: The maximum translation for the transform as 2D column vector.
min_shear: The minimum shear angle for the transform in radians.
max_shear: The maximum shear angle for the transform in radians.
min_scaling: The minimum scaling for the transform as 2D column vector.
max_scaling: The maximum scaling for the transform as 2D column vector.
flip_x_chance: The chance (0 to 1) that a transform will contain a flip along X direction.
flip_y_chance: The chance (0 to 1) that a transform will contain a flip along Y direction.
prng: The pseudo-random number generator to use.
"""
if prng is None:
# RandomState automatically seeds using the best available method.
prng = np.random.RandomState()
while True:
yield random_transform(prng=prng, **kwargs)
| 10,565 | 32.01875 | 117 | py |
EfficientDet | EfficientDet-master/utils/__init__.py | # Copyright 2019 The TensorFlow Authors, Pavel Yakubovskiy. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import functools
import cv2
import numpy as np
_KERAS_BACKEND = None
_KERAS_LAYERS = None
_KERAS_MODELS = None
_KERAS_UTILS = None
def get_submodules_from_kwargs(kwargs):
backend = kwargs.get('backend', _KERAS_BACKEND)
layers = kwargs.get('layers', _KERAS_LAYERS)
models = kwargs.get('models', _KERAS_MODELS)
utils = kwargs.get('utils', _KERAS_UTILS)
for key in kwargs.keys():
if key not in ['backend', 'layers', 'models', 'utils']:
raise TypeError('Invalid keyword argument: %s', key)
return backend, layers, models, utils
def inject_keras_modules(func):
import keras
@functools.wraps(func)
def wrapper(*args, **kwargs):
kwargs['backend'] = keras.backend
kwargs['layers'] = keras.layers
kwargs['models'] = keras.models
kwargs['utils'] = keras.utils
return func(*args, **kwargs)
return wrapper
def inject_tfkeras_modules(func):
import tensorflow.keras as tfkeras
@functools.wraps(func)
def wrapper(*args, **kwargs):
kwargs['backend'] = tfkeras.backend
kwargs['layers'] = tfkeras.layers
kwargs['models'] = tfkeras.models
kwargs['utils'] = tfkeras.utils
return func(*args, **kwargs)
return wrapper
def init_keras_custom_objects():
import keras
import efficientnet as model
custom_objects = {
'swish': inject_keras_modules(model.get_swish)(),
'FixedDropout': inject_keras_modules(model.get_dropout)()
}
keras.utils.generic_utils.get_custom_objects().update(custom_objects)
def init_tfkeras_custom_objects():
import tensorflow.keras as tfkeras
import efficientnet as model
custom_objects = {
'swish': inject_tfkeras_modules(model.get_swish)(),
'FixedDropout': inject_tfkeras_modules(model.get_dropout)()
}
tfkeras.utils.get_custom_objects().update(custom_objects)
def preprocess_image(image, image_size):
# image, RGB
image_height, image_width = image.shape[:2]
if image_height > image_width:
scale = image_size / image_height
resized_height = image_size
resized_width = int(image_width * scale)
else:
scale = image_size / image_width
resized_height = int(image_height * scale)
resized_width = image_size
image = cv2.resize(image, (resized_width, resized_height))
image = image.astype(np.float32)
image /= 255.
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
image -= mean
image /= std
pad_h = image_size - resized_height
pad_w = image_size - resized_width
image = np.pad(image, [(0, pad_h), (0, pad_w), (0, 0)], mode='constant')
return image, scale
def rotate_image(image):
rotate_degree = np.random.uniform(low=-45, high=45)
h, w = image.shape[:2]
# Compute the rotation matrix.
M = cv2.getRotationMatrix2D(center=(w / 2, h / 2),
angle=rotate_degree,
scale=1)
# Get the sine and cosine from the rotation matrix.
abs_cos_angle = np.abs(M[0, 0])
abs_sin_angle = np.abs(M[0, 1])
# Compute the new bounding dimensions of the image.
new_w = int(h * abs_sin_angle + w * abs_cos_angle)
new_h = int(h * abs_cos_angle + w * abs_sin_angle)
# Adjust the rotation matrix to take into account the translation.
M[0, 2] += new_w // 2 - w // 2
M[1, 2] += new_h // 2 - h // 2
# Rotate the image.
image = cv2.warpAffine(image, M=M, dsize=(new_w, new_h), flags=cv2.INTER_CUBIC,
borderMode=cv2.BORDER_CONSTANT,
borderValue=(128, 128, 128))
return image
def reorder_vertexes(vertexes):
"""
reorder vertexes as the paper shows, (top, right, bottom, left)
Args:
vertexes: np.array (4, 2), should be in clockwise
Returns:
"""
assert vertexes.shape == (4, 2)
xmin, ymin = np.min(vertexes, axis=0)
xmax, ymax = np.max(vertexes, axis=0)
# determine the first point with the smallest y,
# if two vertexes has same y, choose that with smaller x,
ordered_idxes = np.argsort(vertexes, axis=0)
ymin1_idx = ordered_idxes[0, 1]
ymin2_idx = ordered_idxes[1, 1]
if vertexes[ymin1_idx, 1] == vertexes[ymin2_idx, 1]:
if vertexes[ymin1_idx, 0] <= vertexes[ymin2_idx, 0]:
first_vertex_idx = ymin1_idx
else:
first_vertex_idx = ymin2_idx
else:
first_vertex_idx = ymin1_idx
ordered_idxes = [(first_vertex_idx + i) % 4 for i in range(4)]
ordered_vertexes = vertexes[ordered_idxes]
# drag the point to the corresponding edge
ordered_vertexes[0, 1] = ymin
ordered_vertexes[1, 0] = xmax
ordered_vertexes[2, 1] = ymax
ordered_vertexes[3, 0] = xmin
return ordered_vertexes
def postprocess_boxes(boxes, scale, height, width):
boxes /= scale
boxes[:, 0] = np.clip(boxes[:, 0], 0, width - 1)
boxes[:, 1] = np.clip(boxes[:, 1], 0, height - 1)
boxes[:, 2] = np.clip(boxes[:, 2], 0, width - 1)
boxes[:, 3] = np.clip(boxes[:, 3], 0, height - 1)
return boxes
| 5,843 | 30.934426 | 83 | py |
MNC | MNC-master/tools/test_net.py | #!/usr/bin/env python
# --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
# Standard module
import argparse
import sys
import os
import time
import pprint
# User-defined module
import _init_paths
import caffe
from mnc_config import cfg, cfg_from_file
from db.imdb import get_imdb
from caffeWrapper.TesterWrapper import TesterWrapper
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--task', dest='task_name',
help='set task name', default='sds',
type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
while not os.path.exists(args.caffemodel) and args.wait:
print('Waiting for {} to exist...'.format(args.caffemodel))
time.sleep(10)
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
imdb = get_imdb(args.imdb_name)
_tester = TesterWrapper(args.prototxt, imdb, args.caffemodel, args.task_name)
_tester.get_result()
| 2,755 | 31.423529 | 81 | py |
MNC | MNC-master/tools/_init_paths.py |
import os.path
import sys
"""
Add lib paths and caffe path to system search path
"""
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
cur_dir = os.path.dirname(__file__)
# Add caffe python to PYTHONPATH
caffe_path = os.path.join(cur_dir, '..', 'caffe-mnc', 'python')
add_path(caffe_path)
# Add lib to PYTHONPATH
lib_path = os.path.join(cur_dir, '..', 'lib')
add_path(lib_path)
| 417 | 17.173913 | 63 | py |
Subsets and Splits